Merge SP1A.201015.001
Change-Id: I2954affa7f3d2d2adfb76bb3ec5b433c7a2e6949
diff --git a/darwin-x86/bin/cxx_extractor b/darwin-x86/bin/cxx_extractor
index e63d043..cfc5f02 100755
--- a/darwin-x86/bin/cxx_extractor
+++ b/darwin-x86/bin/cxx_extractor
Binary files differ
diff --git a/darwin-x86/bin/header-abi-diff b/darwin-x86/bin/header-abi-diff
index 70c4f92..dd35fc7 100755
--- a/darwin-x86/bin/header-abi-diff
+++ b/darwin-x86/bin/header-abi-diff
Binary files differ
diff --git a/darwin-x86/bin/header-abi-dumper b/darwin-x86/bin/header-abi-dumper
index e3b5dd0..ae81974 100755
--- a/darwin-x86/bin/header-abi-dumper
+++ b/darwin-x86/bin/header-abi-dumper
Binary files differ
diff --git a/darwin-x86/bin/header-abi-linker b/darwin-x86/bin/header-abi-linker
index 3542ffd..14a4de4 100755
--- a/darwin-x86/bin/header-abi-linker
+++ b/darwin-x86/bin/header-abi-linker
Binary files differ
diff --git a/darwin-x86/bin/proto_metadata_plugin b/darwin-x86/bin/proto_metadata_plugin
index 3248ed9..2326396 100755
--- a/darwin-x86/bin/proto_metadata_plugin
+++ b/darwin-x86/bin/proto_metadata_plugin
Binary files differ
diff --git a/darwin-x86/bin/protoc_extractor b/darwin-x86/bin/protoc_extractor
index 9197bef..10b6f60 100755
--- a/darwin-x86/bin/protoc_extractor
+++ b/darwin-x86/bin/protoc_extractor
Binary files differ
diff --git a/darwin-x86/bin/versioner b/darwin-x86/bin/versioner
index 88ee799..6d45046 100755
--- a/darwin-x86/bin/versioner
+++ b/darwin-x86/bin/versioner
Binary files differ
diff --git a/darwin-x86/clang-headers b/darwin-x86/clang-headers
index 1d9e782..e8f09e7 120000
--- a/darwin-x86/clang-headers
+++ b/darwin-x86/clang-headers
@@ -1 +1 @@
-lib64/clang/11.0.1/include
\ No newline at end of file
+lib64/clang/11.0.5/include
\ No newline at end of file
diff --git a/darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_complex_builtins.h b/darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_complex_builtins.h
deleted file mode 100644
index 576a958..0000000
--- a/darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_complex_builtins.h
+++ /dev/null
@@ -1,189 +0,0 @@
-/*===-- __clang_cuda_complex_builtins - CUDA impls of runtime complex fns ---===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __CLANG_CUDA_COMPLEX_BUILTINS
-#define __CLANG_CUDA_COMPLEX_BUILTINS
-
-// This header defines __muldc3, __mulsc3, __divdc3, and __divsc3.  These are
-// libgcc functions that clang assumes are available when compiling c99 complex
-// operations.  (These implementations come from libc++, and have been modified
-// to work with CUDA.)
-
-extern "C" inline __device__ double _Complex __muldc3(double __a, double __b,
-                                                      double __c, double __d) {
-  double __ac = __a * __c;
-  double __bd = __b * __d;
-  double __ad = __a * __d;
-  double __bc = __b * __c;
-  double _Complex z;
-  __real__(z) = __ac - __bd;
-  __imag__(z) = __ad + __bc;
-  if (std::isnan(__real__(z)) && std::isnan(__imag__(z))) {
-    int __recalc = 0;
-    if (std::isinf(__a) || std::isinf(__b)) {
-      __a = std::copysign(std::isinf(__a) ? 1 : 0, __a);
-      __b = std::copysign(std::isinf(__b) ? 1 : 0, __b);
-      if (std::isnan(__c))
-        __c = std::copysign(0, __c);
-      if (std::isnan(__d))
-        __d = std::copysign(0, __d);
-      __recalc = 1;
-    }
-    if (std::isinf(__c) || std::isinf(__d)) {
-      __c = std::copysign(std::isinf(__c) ? 1 : 0, __c);
-      __d = std::copysign(std::isinf(__d) ? 1 : 0, __d);
-      if (std::isnan(__a))
-        __a = std::copysign(0, __a);
-      if (std::isnan(__b))
-        __b = std::copysign(0, __b);
-      __recalc = 1;
-    }
-    if (!__recalc && (std::isinf(__ac) || std::isinf(__bd) ||
-                      std::isinf(__ad) || std::isinf(__bc))) {
-      if (std::isnan(__a))
-        __a = std::copysign(0, __a);
-      if (std::isnan(__b))
-        __b = std::copysign(0, __b);
-      if (std::isnan(__c))
-        __c = std::copysign(0, __c);
-      if (std::isnan(__d))
-        __d = std::copysign(0, __d);
-      __recalc = 1;
-    }
-    if (__recalc) {
-      // Can't use std::numeric_limits<double>::infinity() -- that doesn't have
-      // a device overload (and isn't constexpr before C++11, naturally).
-      __real__(z) = __builtin_huge_valf() * (__a * __c - __b * __d);
-      __imag__(z) = __builtin_huge_valf() * (__a * __d + __b * __c);
-    }
-  }
-  return z;
-}
-
-extern "C" inline __device__ float _Complex __mulsc3(float __a, float __b,
-                                                     float __c, float __d) {
-  float __ac = __a * __c;
-  float __bd = __b * __d;
-  float __ad = __a * __d;
-  float __bc = __b * __c;
-  float _Complex z;
-  __real__(z) = __ac - __bd;
-  __imag__(z) = __ad + __bc;
-  if (std::isnan(__real__(z)) && std::isnan(__imag__(z))) {
-    int __recalc = 0;
-    if (std::isinf(__a) || std::isinf(__b)) {
-      __a = std::copysign(std::isinf(__a) ? 1 : 0, __a);
-      __b = std::copysign(std::isinf(__b) ? 1 : 0, __b);
-      if (std::isnan(__c))
-        __c = std::copysign(0, __c);
-      if (std::isnan(__d))
-        __d = std::copysign(0, __d);
-      __recalc = 1;
-    }
-    if (std::isinf(__c) || std::isinf(__d)) {
-      __c = std::copysign(std::isinf(__c) ? 1 : 0, __c);
-      __d = std::copysign(std::isinf(__d) ? 1 : 0, __d);
-      if (std::isnan(__a))
-        __a = std::copysign(0, __a);
-      if (std::isnan(__b))
-        __b = std::copysign(0, __b);
-      __recalc = 1;
-    }
-    if (!__recalc && (std::isinf(__ac) || std::isinf(__bd) ||
-                      std::isinf(__ad) || std::isinf(__bc))) {
-      if (std::isnan(__a))
-        __a = std::copysign(0, __a);
-      if (std::isnan(__b))
-        __b = std::copysign(0, __b);
-      if (std::isnan(__c))
-        __c = std::copysign(0, __c);
-      if (std::isnan(__d))
-        __d = std::copysign(0, __d);
-      __recalc = 1;
-    }
-    if (__recalc) {
-      __real__(z) = __builtin_huge_valf() * (__a * __c - __b * __d);
-      __imag__(z) = __builtin_huge_valf() * (__a * __d + __b * __c);
-    }
-  }
-  return z;
-}
-
-extern "C" inline __device__ double _Complex __divdc3(double __a, double __b,
-                                                      double __c, double __d) {
-  int __ilogbw = 0;
-  // Can't use std::max, because that's defined in <algorithm>, and we don't
-  // want to pull that in for every compile.  The CUDA headers define
-  // ::max(float, float) and ::max(double, double), which is sufficient for us.
-  double __logbw = std::logb(max(std::abs(__c), std::abs(__d)));
-  if (std::isfinite(__logbw)) {
-    __ilogbw = (int)__logbw;
-    __c = std::scalbn(__c, -__ilogbw);
-    __d = std::scalbn(__d, -__ilogbw);
-  }
-  double __denom = __c * __c + __d * __d;
-  double _Complex z;
-  __real__(z) = std::scalbn((__a * __c + __b * __d) / __denom, -__ilogbw);
-  __imag__(z) = std::scalbn((__b * __c - __a * __d) / __denom, -__ilogbw);
-  if (std::isnan(__real__(z)) && std::isnan(__imag__(z))) {
-    if ((__denom == 0.0) && (!std::isnan(__a) || !std::isnan(__b))) {
-      __real__(z) = std::copysign(__builtin_huge_valf(), __c) * __a;
-      __imag__(z) = std::copysign(__builtin_huge_valf(), __c) * __b;
-    } else if ((std::isinf(__a) || std::isinf(__b)) && std::isfinite(__c) &&
-               std::isfinite(__d)) {
-      __a = std::copysign(std::isinf(__a) ? 1.0 : 0.0, __a);
-      __b = std::copysign(std::isinf(__b) ? 1.0 : 0.0, __b);
-      __real__(z) = __builtin_huge_valf() * (__a * __c + __b * __d);
-      __imag__(z) = __builtin_huge_valf() * (__b * __c - __a * __d);
-    } else if (std::isinf(__logbw) && __logbw > 0.0 && std::isfinite(__a) &&
-               std::isfinite(__b)) {
-      __c = std::copysign(std::isinf(__c) ? 1.0 : 0.0, __c);
-      __d = std::copysign(std::isinf(__d) ? 1.0 : 0.0, __d);
-      __real__(z) = 0.0 * (__a * __c + __b * __d);
-      __imag__(z) = 0.0 * (__b * __c - __a * __d);
-    }
-  }
-  return z;
-}
-
-extern "C" inline __device__ float _Complex __divsc3(float __a, float __b,
-                                                     float __c, float __d) {
-  int __ilogbw = 0;
-  float __logbw = std::logb(max(std::abs(__c), std::abs(__d)));
-  if (std::isfinite(__logbw)) {
-    __ilogbw = (int)__logbw;
-    __c = std::scalbn(__c, -__ilogbw);
-    __d = std::scalbn(__d, -__ilogbw);
-  }
-  float __denom = __c * __c + __d * __d;
-  float _Complex z;
-  __real__(z) = std::scalbn((__a * __c + __b * __d) / __denom, -__ilogbw);
-  __imag__(z) = std::scalbn((__b * __c - __a * __d) / __denom, -__ilogbw);
-  if (std::isnan(__real__(z)) && std::isnan(__imag__(z))) {
-    if ((__denom == 0) && (!std::isnan(__a) || !std::isnan(__b))) {
-      __real__(z) = std::copysign(__builtin_huge_valf(), __c) * __a;
-      __imag__(z) = std::copysign(__builtin_huge_valf(), __c) * __b;
-    } else if ((std::isinf(__a) || std::isinf(__b)) && std::isfinite(__c) &&
-               std::isfinite(__d)) {
-      __a = std::copysign(std::isinf(__a) ? 1 : 0, __a);
-      __b = std::copysign(std::isinf(__b) ? 1 : 0, __b);
-      __real__(z) = __builtin_huge_valf() * (__a * __c + __b * __d);
-      __imag__(z) = __builtin_huge_valf() * (__b * __c - __a * __d);
-    } else if (std::isinf(__logbw) && __logbw > 0 && std::isfinite(__a) &&
-               std::isfinite(__b)) {
-      __c = std::copysign(std::isinf(__c) ? 1 : 0, __c);
-      __d = std::copysign(std::isinf(__d) ? 1 : 0, __d);
-      __real__(z) = 0 * (__a * __c + __b * __d);
-      __imag__(z) = 0 * (__b * __c - __a * __d);
-    }
-  }
-  return z;
-}
-
-#endif // __CLANG_CUDA_COMPLEX_BUILTINS
diff --git a/darwin-x86/lib64/clang/11.0.1/include/arm_mve.h b/darwin-x86/lib64/clang/11.0.1/include/arm_mve.h
deleted file mode 100644
index 8a4b58e..0000000
--- a/darwin-x86/lib64/clang/11.0.1/include/arm_mve.h
+++ /dev/null
@@ -1,13787 +0,0 @@
-/*===---- arm_mve.h - ARM MVE intrinsics -----------------------------------===
- *
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __ARM_MVE_H
-#define __ARM_MVE_H
-
-#if !__ARM_FEATURE_MVE
-#error "MVE support not enabled"
-#endif
-
-#include <stdint.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef uint16_t mve_pred16_t;
-typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) int16_t int16x8_t;
-typedef struct { int16x8_t val[2]; } int16x8x2_t;
-typedef struct { int16x8_t val[4]; } int16x8x4_t;
-typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) int32_t int32x4_t;
-typedef struct { int32x4_t val[2]; } int32x4x2_t;
-typedef struct { int32x4_t val[4]; } int32x4x4_t;
-typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) int64_t int64x2_t;
-typedef struct { int64x2_t val[2]; } int64x2x2_t;
-typedef struct { int64x2_t val[4]; } int64x2x4_t;
-typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) int8_t int8x16_t;
-typedef struct { int8x16_t val[2]; } int8x16x2_t;
-typedef struct { int8x16_t val[4]; } int8x16x4_t;
-typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) uint16_t uint16x8_t;
-typedef struct { uint16x8_t val[2]; } uint16x8x2_t;
-typedef struct { uint16x8_t val[4]; } uint16x8x4_t;
-typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) uint32_t uint32x4_t;
-typedef struct { uint32x4_t val[2]; } uint32x4x2_t;
-typedef struct { uint32x4_t val[4]; } uint32x4x4_t;
-typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) uint64_t uint64x2_t;
-typedef struct { uint64x2_t val[2]; } uint64x2x2_t;
-typedef struct { uint64x2_t val[4]; } uint64x2x4_t;
-typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) uint8_t uint8x16_t;
-typedef struct { uint8x16_t val[2]; } uint8x16x2_t;
-typedef struct { uint8x16_t val[4]; } uint8x16x4_t;
-
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_asrl)))
-int64_t __arm_asrl(int64_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_lsll)))
-uint64_t __arm_lsll(uint64_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqrshr)))
-int32_t __arm_sqrshr(int32_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqrshrl)))
-int64_t __arm_sqrshrl(int64_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqrshrl_sat48)))
-int64_t __arm_sqrshrl_sat48(int64_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqshl)))
-int32_t __arm_sqshl(int32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqshll)))
-int64_t __arm_sqshll(int64_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_srshr)))
-int32_t __arm_srshr(int32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_srshrl)))
-int64_t __arm_srshrl(int64_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqrshl)))
-uint32_t __arm_uqrshl(uint32_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqrshll)))
-uint64_t __arm_uqrshll(uint64_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqrshll_sat48)))
-uint64_t __arm_uqrshll_sat48(uint64_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqshl)))
-uint32_t __arm_uqshl(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqshll)))
-uint64_t __arm_uqshll(uint64_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_urshr)))
-uint32_t __arm_urshr(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_urshrl)))
-uint64_t __arm_urshrl(uint64_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_s16)))
-uint32_t __arm_vabavq_p_s16(uint32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_s16)))
-uint32_t __arm_vabavq_p(uint32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_s32)))
-uint32_t __arm_vabavq_p_s32(uint32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_s32)))
-uint32_t __arm_vabavq_p(uint32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_s8)))
-uint32_t __arm_vabavq_p_s8(uint32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_s8)))
-uint32_t __arm_vabavq_p(uint32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_u16)))
-uint32_t __arm_vabavq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_u16)))
-uint32_t __arm_vabavq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_u32)))
-uint32_t __arm_vabavq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_u32)))
-uint32_t __arm_vabavq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_u8)))
-uint32_t __arm_vabavq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_u8)))
-uint32_t __arm_vabavq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_s16)))
-uint32_t __arm_vabavq_s16(uint32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_s16)))
-uint32_t __arm_vabavq(uint32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_s32)))
-uint32_t __arm_vabavq_s32(uint32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_s32)))
-uint32_t __arm_vabavq(uint32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_s8)))
-uint32_t __arm_vabavq_s8(uint32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_s8)))
-uint32_t __arm_vabavq(uint32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_u16)))
-uint32_t __arm_vabavq_u16(uint32_t, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_u16)))
-uint32_t __arm_vabavq(uint32_t, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_u32)))
-uint32_t __arm_vabavq_u32(uint32_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_u32)))
-uint32_t __arm_vabavq(uint32_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_u8)))
-uint32_t __arm_vabavq_u8(uint32_t, uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_u8)))
-uint32_t __arm_vabavq(uint32_t, uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_s16)))
-int16x8_t __arm_vabdq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_s16)))
-int16x8_t __arm_vabdq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_s32)))
-int32x4_t __arm_vabdq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_s32)))
-int32x4_t __arm_vabdq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_s8)))
-int8x16_t __arm_vabdq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_s8)))
-int8x16_t __arm_vabdq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_u16)))
-uint16x8_t __arm_vabdq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_u16)))
-uint16x8_t __arm_vabdq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_u32)))
-uint32x4_t __arm_vabdq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_u32)))
-uint32x4_t __arm_vabdq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_u8)))
-uint8x16_t __arm_vabdq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_u8)))
-uint8x16_t __arm_vabdq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_s16)))
-int16x8_t __arm_vabdq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_s16)))
-int16x8_t __arm_vabdq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_s32)))
-int32x4_t __arm_vabdq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_s32)))
-int32x4_t __arm_vabdq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_s8)))
-int8x16_t __arm_vabdq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_s8)))
-int8x16_t __arm_vabdq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_u16)))
-uint16x8_t __arm_vabdq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_u16)))
-uint16x8_t __arm_vabdq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_u32)))
-uint32x4_t __arm_vabdq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_u32)))
-uint32x4_t __arm_vabdq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_u8)))
-uint8x16_t __arm_vabdq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_u8)))
-uint8x16_t __arm_vabdq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_s16)))
-int16x8_t __arm_vabdq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_s16)))
-int16x8_t __arm_vabdq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_s32)))
-int32x4_t __arm_vabdq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_s32)))
-int32x4_t __arm_vabdq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_s8)))
-int8x16_t __arm_vabdq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_s8)))
-int8x16_t __arm_vabdq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_u16)))
-uint16x8_t __arm_vabdq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_u16)))
-uint16x8_t __arm_vabdq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_u32)))
-uint32x4_t __arm_vabdq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_u32)))
-uint32x4_t __arm_vabdq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_u8)))
-uint8x16_t __arm_vabdq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_u8)))
-uint8x16_t __arm_vabdq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_s32)))
-int32x4_t __arm_vadciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_s32)))
-int32x4_t __arm_vadciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_u32)))
-uint32x4_t __arm_vadciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_u32)))
-uint32x4_t __arm_vadciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_s32)))
-int32x4_t __arm_vadciq_s32(int32x4_t, int32x4_t, unsigned *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_s32)))
-int32x4_t __arm_vadciq(int32x4_t, int32x4_t, unsigned *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_u32)))
-uint32x4_t __arm_vadciq_u32(uint32x4_t, uint32x4_t, unsigned *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_u32)))
-uint32x4_t __arm_vadciq(uint32x4_t, uint32x4_t, unsigned *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_s32)))
-int32x4_t __arm_vadcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_s32)))
-int32x4_t __arm_vadcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_u32)))
-uint32x4_t __arm_vadcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_u32)))
-uint32x4_t __arm_vadcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_s32)))
-int32x4_t __arm_vadcq_s32(int32x4_t, int32x4_t, unsigned *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_s32)))
-int32x4_t __arm_vadcq(int32x4_t, int32x4_t, unsigned *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_u32)))
-uint32x4_t __arm_vadcq_u32(uint32x4_t, uint32x4_t, unsigned *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_u32)))
-uint32x4_t __arm_vadcq(uint32x4_t, uint32x4_t, unsigned *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s16)))
-int16x8_t __arm_vaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s16)))
-int16x8_t __arm_vaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s32)))
-int32x4_t __arm_vaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s32)))
-int32x4_t __arm_vaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s8)))
-int8x16_t __arm_vaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s8)))
-int8x16_t __arm_vaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u16)))
-uint16x8_t __arm_vaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u16)))
-uint16x8_t __arm_vaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u32)))
-uint32x4_t __arm_vaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u32)))
-uint32x4_t __arm_vaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u8)))
-uint8x16_t __arm_vaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u8)))
-uint8x16_t __arm_vaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_s16)))
-int16x8_t __arm_vaddq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_s16)))
-int16x8_t __arm_vaddq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_s32)))
-int32x4_t __arm_vaddq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_s32)))
-int32x4_t __arm_vaddq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_s8)))
-int8x16_t __arm_vaddq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_s8)))
-int8x16_t __arm_vaddq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_u16)))
-uint16x8_t __arm_vaddq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_u16)))
-uint16x8_t __arm_vaddq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_u32)))
-uint32x4_t __arm_vaddq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_u32)))
-uint32x4_t __arm_vaddq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_u8)))
-uint8x16_t __arm_vaddq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_u8)))
-uint8x16_t __arm_vaddq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_s16)))
-int16x8_t __arm_vaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_s16)))
-int16x8_t __arm_vaddq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_s32)))
-int32x4_t __arm_vaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_s32)))
-int32x4_t __arm_vaddq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_s8)))
-int8x16_t __arm_vaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_s8)))
-int8x16_t __arm_vaddq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_u16)))
-uint16x8_t __arm_vaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_u16)))
-uint16x8_t __arm_vaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_u32)))
-uint32x4_t __arm_vaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_u32)))
-uint32x4_t __arm_vaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_u8)))
-uint8x16_t __arm_vaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_u8)))
-uint8x16_t __arm_vaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_s16)))
-int16x8_t __arm_vandq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_s16)))
-int16x8_t __arm_vandq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_s32)))
-int32x4_t __arm_vandq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_s32)))
-int32x4_t __arm_vandq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_s8)))
-int8x16_t __arm_vandq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_s8)))
-int8x16_t __arm_vandq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_u16)))
-uint16x8_t __arm_vandq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_u16)))
-uint16x8_t __arm_vandq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_u32)))
-uint32x4_t __arm_vandq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_u32)))
-uint32x4_t __arm_vandq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_u8)))
-uint8x16_t __arm_vandq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_u8)))
-uint8x16_t __arm_vandq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_s16)))
-int16x8_t __arm_vandq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_s16)))
-int16x8_t __arm_vandq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_s32)))
-int32x4_t __arm_vandq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_s32)))
-int32x4_t __arm_vandq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_s8)))
-int8x16_t __arm_vandq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_s8)))
-int8x16_t __arm_vandq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_u16)))
-uint16x8_t __arm_vandq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_u16)))
-uint16x8_t __arm_vandq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_u32)))
-uint32x4_t __arm_vandq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_u32)))
-uint32x4_t __arm_vandq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_u8)))
-uint8x16_t __arm_vandq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_u8)))
-uint8x16_t __arm_vandq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_s16)))
-int16x8_t __arm_vandq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_s16)))
-int16x8_t __arm_vandq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_s32)))
-int32x4_t __arm_vandq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_s32)))
-int32x4_t __arm_vandq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_s8)))
-int8x16_t __arm_vandq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_s8)))
-int8x16_t __arm_vandq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_u16)))
-uint16x8_t __arm_vandq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_u16)))
-uint16x8_t __arm_vandq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_u32)))
-uint32x4_t __arm_vandq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_u32)))
-uint32x4_t __arm_vandq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_u8)))
-uint8x16_t __arm_vandq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_u8)))
-uint8x16_t __arm_vandq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_s16)))
-int16x8_t __arm_vbicq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_s16)))
-int16x8_t __arm_vbicq_m_n(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_s32)))
-int32x4_t __arm_vbicq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_s32)))
-int32x4_t __arm_vbicq_m_n(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_u16)))
-uint16x8_t __arm_vbicq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_u16)))
-uint16x8_t __arm_vbicq_m_n(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_u32)))
-uint32x4_t __arm_vbicq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_u32)))
-uint32x4_t __arm_vbicq_m_n(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_s16)))
-int16x8_t __arm_vbicq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_s16)))
-int16x8_t __arm_vbicq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_s32)))
-int32x4_t __arm_vbicq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_s32)))
-int32x4_t __arm_vbicq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_s8)))
-int8x16_t __arm_vbicq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_s8)))
-int8x16_t __arm_vbicq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_u16)))
-uint16x8_t __arm_vbicq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_u16)))
-uint16x8_t __arm_vbicq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_u32)))
-uint32x4_t __arm_vbicq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_u32)))
-uint32x4_t __arm_vbicq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_u8)))
-uint8x16_t __arm_vbicq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_u8)))
-uint8x16_t __arm_vbicq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_s16)))
-int16x8_t __arm_vbicq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_s16)))
-int16x8_t __arm_vbicq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_s32)))
-int32x4_t __arm_vbicq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_s32)))
-int32x4_t __arm_vbicq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_u16)))
-uint16x8_t __arm_vbicq_n_u16(uint16x8_t, uint16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_u16)))
-uint16x8_t __arm_vbicq(uint16x8_t, uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_u32)))
-uint32x4_t __arm_vbicq_n_u32(uint32x4_t, uint32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_u32)))
-uint32x4_t __arm_vbicq(uint32x4_t, uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_s16)))
-int16x8_t __arm_vbicq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_s16)))
-int16x8_t __arm_vbicq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_s32)))
-int32x4_t __arm_vbicq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_s32)))
-int32x4_t __arm_vbicq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_s8)))
-int8x16_t __arm_vbicq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_s8)))
-int8x16_t __arm_vbicq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_u16)))
-uint16x8_t __arm_vbicq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_u16)))
-uint16x8_t __arm_vbicq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_u32)))
-uint32x4_t __arm_vbicq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_u32)))
-uint32x4_t __arm_vbicq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_u8)))
-uint8x16_t __arm_vbicq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_u8)))
-uint8x16_t __arm_vbicq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_s16)))
-int16x8_t __arm_vbicq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_s16)))
-int16x8_t __arm_vbicq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_s32)))
-int32x4_t __arm_vbicq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_s32)))
-int32x4_t __arm_vbicq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_s8)))
-int8x16_t __arm_vbicq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_s8)))
-int8x16_t __arm_vbicq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_u16)))
-uint16x8_t __arm_vbicq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_u16)))
-uint16x8_t __arm_vbicq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_u32)))
-uint32x4_t __arm_vbicq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_u32)))
-uint32x4_t __arm_vbicq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_u8)))
-uint8x16_t __arm_vbicq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_u8)))
-uint8x16_t __arm_vbicq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_s16)))
-int16x8_t __arm_vcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_s16)))
-int16x8_t __arm_vcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_s32)))
-int32x4_t __arm_vcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_s32)))
-int32x4_t __arm_vcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_s8)))
-int8x16_t __arm_vcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_s8)))
-int8x16_t __arm_vcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_u16)))
-uint16x8_t __arm_vcaddq_rot270_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_u16)))
-uint16x8_t __arm_vcaddq_rot270_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_u32)))
-uint32x4_t __arm_vcaddq_rot270_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_u32)))
-uint32x4_t __arm_vcaddq_rot270_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_u8)))
-uint8x16_t __arm_vcaddq_rot270_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_u8)))
-uint8x16_t __arm_vcaddq_rot270_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_s16)))
-int16x8_t __arm_vcaddq_rot270_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_s16)))
-int16x8_t __arm_vcaddq_rot270(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_s32)))
-int32x4_t __arm_vcaddq_rot270_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_s32)))
-int32x4_t __arm_vcaddq_rot270(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_s8)))
-int8x16_t __arm_vcaddq_rot270_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_s8)))
-int8x16_t __arm_vcaddq_rot270(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_u16)))
-uint16x8_t __arm_vcaddq_rot270_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_u16)))
-uint16x8_t __arm_vcaddq_rot270(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_u32)))
-uint32x4_t __arm_vcaddq_rot270_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_u32)))
-uint32x4_t __arm_vcaddq_rot270(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_u8)))
-uint8x16_t __arm_vcaddq_rot270_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_u8)))
-uint8x16_t __arm_vcaddq_rot270(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_s16)))
-int16x8_t __arm_vcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_s16)))
-int16x8_t __arm_vcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_s32)))
-int32x4_t __arm_vcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_s32)))
-int32x4_t __arm_vcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_s8)))
-int8x16_t __arm_vcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_s8)))
-int8x16_t __arm_vcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_u16)))
-uint16x8_t __arm_vcaddq_rot270_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_u16)))
-uint16x8_t __arm_vcaddq_rot270_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_u32)))
-uint32x4_t __arm_vcaddq_rot270_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_u32)))
-uint32x4_t __arm_vcaddq_rot270_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_u8)))
-uint8x16_t __arm_vcaddq_rot270_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_u8)))
-uint8x16_t __arm_vcaddq_rot270_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_s16)))
-int16x8_t __arm_vcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_s16)))
-int16x8_t __arm_vcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_s32)))
-int32x4_t __arm_vcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_s32)))
-int32x4_t __arm_vcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_s8)))
-int8x16_t __arm_vcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_s8)))
-int8x16_t __arm_vcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_u16)))
-uint16x8_t __arm_vcaddq_rot90_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_u16)))
-uint16x8_t __arm_vcaddq_rot90_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_u32)))
-uint32x4_t __arm_vcaddq_rot90_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_u32)))
-uint32x4_t __arm_vcaddq_rot90_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_u8)))
-uint8x16_t __arm_vcaddq_rot90_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_u8)))
-uint8x16_t __arm_vcaddq_rot90_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_s16)))
-int16x8_t __arm_vcaddq_rot90_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_s16)))
-int16x8_t __arm_vcaddq_rot90(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_s32)))
-int32x4_t __arm_vcaddq_rot90_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_s32)))
-int32x4_t __arm_vcaddq_rot90(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_s8)))
-int8x16_t __arm_vcaddq_rot90_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_s8)))
-int8x16_t __arm_vcaddq_rot90(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_u16)))
-uint16x8_t __arm_vcaddq_rot90_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_u16)))
-uint16x8_t __arm_vcaddq_rot90(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_u32)))
-uint32x4_t __arm_vcaddq_rot90_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_u32)))
-uint32x4_t __arm_vcaddq_rot90(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_u8)))
-uint8x16_t __arm_vcaddq_rot90_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_u8)))
-uint8x16_t __arm_vcaddq_rot90(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_s16)))
-int16x8_t __arm_vcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_s16)))
-int16x8_t __arm_vcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_s32)))
-int32x4_t __arm_vcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_s32)))
-int32x4_t __arm_vcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_s8)))
-int8x16_t __arm_vcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_s8)))
-int8x16_t __arm_vcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_u16)))
-uint16x8_t __arm_vcaddq_rot90_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_u16)))
-uint16x8_t __arm_vcaddq_rot90_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_u32)))
-uint32x4_t __arm_vcaddq_rot90_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_u32)))
-uint32x4_t __arm_vcaddq_rot90_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_u8)))
-uint8x16_t __arm_vcaddq_rot90_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_u8)))
-uint8x16_t __arm_vcaddq_rot90_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))
-mve_pred16_t __arm_vcmpcsq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))
-mve_pred16_t __arm_vcmpcsq_m(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))
-mve_pred16_t __arm_vcmpcsq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))
-mve_pred16_t __arm_vcmpcsq_m(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))
-mve_pred16_t __arm_vcmpcsq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))
-mve_pred16_t __arm_vcmpcsq_m(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u16)))
-mve_pred16_t __arm_vcmpcsq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u16)))
-mve_pred16_t __arm_vcmpcsq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u32)))
-mve_pred16_t __arm_vcmpcsq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u32)))
-mve_pred16_t __arm_vcmpcsq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u8)))
-mve_pred16_t __arm_vcmpcsq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u8)))
-mve_pred16_t __arm_vcmpcsq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u16)))
-mve_pred16_t __arm_vcmpcsq_n_u16(uint16x8_t, uint16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u16)))
-mve_pred16_t __arm_vcmpcsq(uint16x8_t, uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u32)))
-mve_pred16_t __arm_vcmpcsq_n_u32(uint32x4_t, uint32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u32)))
-mve_pred16_t __arm_vcmpcsq(uint32x4_t, uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u8)))
-mve_pred16_t __arm_vcmpcsq_n_u8(uint8x16_t, uint8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u8)))
-mve_pred16_t __arm_vcmpcsq(uint8x16_t, uint8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u16)))
-mve_pred16_t __arm_vcmpcsq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u16)))
-mve_pred16_t __arm_vcmpcsq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u32)))
-mve_pred16_t __arm_vcmpcsq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u32)))
-mve_pred16_t __arm_vcmpcsq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u8)))
-mve_pred16_t __arm_vcmpcsq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u8)))
-mve_pred16_t __arm_vcmpcsq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))
-mve_pred16_t __arm_vcmpeqq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))
-mve_pred16_t __arm_vcmpeqq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))
-mve_pred16_t __arm_vcmpeqq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))
-mve_pred16_t __arm_vcmpeqq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))
-mve_pred16_t __arm_vcmpeqq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))
-mve_pred16_t __arm_vcmpeqq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))
-mve_pred16_t __arm_vcmpeqq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))
-mve_pred16_t __arm_vcmpeqq_m(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))
-mve_pred16_t __arm_vcmpeqq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))
-mve_pred16_t __arm_vcmpeqq_m(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))
-mve_pred16_t __arm_vcmpeqq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))
-mve_pred16_t __arm_vcmpeqq_m(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s16)))
-mve_pred16_t __arm_vcmpeqq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s16)))
-mve_pred16_t __arm_vcmpeqq_m(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s32)))
-mve_pred16_t __arm_vcmpeqq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s32)))
-mve_pred16_t __arm_vcmpeqq_m(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s8)))
-mve_pred16_t __arm_vcmpeqq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s8)))
-mve_pred16_t __arm_vcmpeqq_m(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u16)))
-mve_pred16_t __arm_vcmpeqq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u16)))
-mve_pred16_t __arm_vcmpeqq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u32)))
-mve_pred16_t __arm_vcmpeqq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u32)))
-mve_pred16_t __arm_vcmpeqq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u8)))
-mve_pred16_t __arm_vcmpeqq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u8)))
-mve_pred16_t __arm_vcmpeqq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s16)))
-mve_pred16_t __arm_vcmpeqq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s16)))
-mve_pred16_t __arm_vcmpeqq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s32)))
-mve_pred16_t __arm_vcmpeqq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s32)))
-mve_pred16_t __arm_vcmpeqq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s8)))
-mve_pred16_t __arm_vcmpeqq_n_s8(int8x16_t, int8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s8)))
-mve_pred16_t __arm_vcmpeqq(int8x16_t, int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u16)))
-mve_pred16_t __arm_vcmpeqq_n_u16(uint16x8_t, uint16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u16)))
-mve_pred16_t __arm_vcmpeqq(uint16x8_t, uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u32)))
-mve_pred16_t __arm_vcmpeqq_n_u32(uint32x4_t, uint32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u32)))
-mve_pred16_t __arm_vcmpeqq(uint32x4_t, uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u8)))
-mve_pred16_t __arm_vcmpeqq_n_u8(uint8x16_t, uint8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u8)))
-mve_pred16_t __arm_vcmpeqq(uint8x16_t, uint8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s16)))
-mve_pred16_t __arm_vcmpeqq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s16)))
-mve_pred16_t __arm_vcmpeqq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s32)))
-mve_pred16_t __arm_vcmpeqq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s32)))
-mve_pred16_t __arm_vcmpeqq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s8)))
-mve_pred16_t __arm_vcmpeqq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s8)))
-mve_pred16_t __arm_vcmpeqq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u16)))
-mve_pred16_t __arm_vcmpeqq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u16)))
-mve_pred16_t __arm_vcmpeqq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u32)))
-mve_pred16_t __arm_vcmpeqq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u32)))
-mve_pred16_t __arm_vcmpeqq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u8)))
-mve_pred16_t __arm_vcmpeqq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u8)))
-mve_pred16_t __arm_vcmpeqq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))
-mve_pred16_t __arm_vcmpgeq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))
-mve_pred16_t __arm_vcmpgeq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))
-mve_pred16_t __arm_vcmpgeq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))
-mve_pred16_t __arm_vcmpgeq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))
-mve_pred16_t __arm_vcmpgeq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))
-mve_pred16_t __arm_vcmpgeq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s16)))
-mve_pred16_t __arm_vcmpgeq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s16)))
-mve_pred16_t __arm_vcmpgeq_m(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s32)))
-mve_pred16_t __arm_vcmpgeq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s32)))
-mve_pred16_t __arm_vcmpgeq_m(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s8)))
-mve_pred16_t __arm_vcmpgeq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s8)))
-mve_pred16_t __arm_vcmpgeq_m(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s16)))
-mve_pred16_t __arm_vcmpgeq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s16)))
-mve_pred16_t __arm_vcmpgeq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s32)))
-mve_pred16_t __arm_vcmpgeq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s32)))
-mve_pred16_t __arm_vcmpgeq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s8)))
-mve_pred16_t __arm_vcmpgeq_n_s8(int8x16_t, int8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s8)))
-mve_pred16_t __arm_vcmpgeq(int8x16_t, int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s16)))
-mve_pred16_t __arm_vcmpgeq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s16)))
-mve_pred16_t __arm_vcmpgeq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s32)))
-mve_pred16_t __arm_vcmpgeq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s32)))
-mve_pred16_t __arm_vcmpgeq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s8)))
-mve_pred16_t __arm_vcmpgeq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s8)))
-mve_pred16_t __arm_vcmpgeq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))
-mve_pred16_t __arm_vcmpgtq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))
-mve_pred16_t __arm_vcmpgtq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))
-mve_pred16_t __arm_vcmpgtq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))
-mve_pred16_t __arm_vcmpgtq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))
-mve_pred16_t __arm_vcmpgtq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))
-mve_pred16_t __arm_vcmpgtq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s16)))
-mve_pred16_t __arm_vcmpgtq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s16)))
-mve_pred16_t __arm_vcmpgtq_m(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s32)))
-mve_pred16_t __arm_vcmpgtq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s32)))
-mve_pred16_t __arm_vcmpgtq_m(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s8)))
-mve_pred16_t __arm_vcmpgtq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s8)))
-mve_pred16_t __arm_vcmpgtq_m(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s16)))
-mve_pred16_t __arm_vcmpgtq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s16)))
-mve_pred16_t __arm_vcmpgtq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s32)))
-mve_pred16_t __arm_vcmpgtq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s32)))
-mve_pred16_t __arm_vcmpgtq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s8)))
-mve_pred16_t __arm_vcmpgtq_n_s8(int8x16_t, int8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s8)))
-mve_pred16_t __arm_vcmpgtq(int8x16_t, int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s16)))
-mve_pred16_t __arm_vcmpgtq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s16)))
-mve_pred16_t __arm_vcmpgtq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s32)))
-mve_pred16_t __arm_vcmpgtq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s32)))
-mve_pred16_t __arm_vcmpgtq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s8)))
-mve_pred16_t __arm_vcmpgtq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s8)))
-mve_pred16_t __arm_vcmpgtq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))
-mve_pred16_t __arm_vcmphiq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))
-mve_pred16_t __arm_vcmphiq_m(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))
-mve_pred16_t __arm_vcmphiq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))
-mve_pred16_t __arm_vcmphiq_m(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))
-mve_pred16_t __arm_vcmphiq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))
-mve_pred16_t __arm_vcmphiq_m(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u16)))
-mve_pred16_t __arm_vcmphiq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u16)))
-mve_pred16_t __arm_vcmphiq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u32)))
-mve_pred16_t __arm_vcmphiq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u32)))
-mve_pred16_t __arm_vcmphiq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u8)))
-mve_pred16_t __arm_vcmphiq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u8)))
-mve_pred16_t __arm_vcmphiq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u16)))
-mve_pred16_t __arm_vcmphiq_n_u16(uint16x8_t, uint16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u16)))
-mve_pred16_t __arm_vcmphiq(uint16x8_t, uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u32)))
-mve_pred16_t __arm_vcmphiq_n_u32(uint32x4_t, uint32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u32)))
-mve_pred16_t __arm_vcmphiq(uint32x4_t, uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u8)))
-mve_pred16_t __arm_vcmphiq_n_u8(uint8x16_t, uint8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u8)))
-mve_pred16_t __arm_vcmphiq(uint8x16_t, uint8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u16)))
-mve_pred16_t __arm_vcmphiq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u16)))
-mve_pred16_t __arm_vcmphiq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u32)))
-mve_pred16_t __arm_vcmphiq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u32)))
-mve_pred16_t __arm_vcmphiq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u8)))
-mve_pred16_t __arm_vcmphiq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u8)))
-mve_pred16_t __arm_vcmphiq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))
-mve_pred16_t __arm_vcmpleq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))
-mve_pred16_t __arm_vcmpleq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))
-mve_pred16_t __arm_vcmpleq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))
-mve_pred16_t __arm_vcmpleq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))
-mve_pred16_t __arm_vcmpleq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))
-mve_pred16_t __arm_vcmpleq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s16)))
-mve_pred16_t __arm_vcmpleq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s16)))
-mve_pred16_t __arm_vcmpleq_m(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s32)))
-mve_pred16_t __arm_vcmpleq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s32)))
-mve_pred16_t __arm_vcmpleq_m(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s8)))
-mve_pred16_t __arm_vcmpleq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s8)))
-mve_pred16_t __arm_vcmpleq_m(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s16)))
-mve_pred16_t __arm_vcmpleq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s16)))
-mve_pred16_t __arm_vcmpleq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s32)))
-mve_pred16_t __arm_vcmpleq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s32)))
-mve_pred16_t __arm_vcmpleq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s8)))
-mve_pred16_t __arm_vcmpleq_n_s8(int8x16_t, int8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s8)))
-mve_pred16_t __arm_vcmpleq(int8x16_t, int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s16)))
-mve_pred16_t __arm_vcmpleq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s16)))
-mve_pred16_t __arm_vcmpleq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s32)))
-mve_pred16_t __arm_vcmpleq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s32)))
-mve_pred16_t __arm_vcmpleq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s8)))
-mve_pred16_t __arm_vcmpleq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s8)))
-mve_pred16_t __arm_vcmpleq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))
-mve_pred16_t __arm_vcmpltq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))
-mve_pred16_t __arm_vcmpltq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))
-mve_pred16_t __arm_vcmpltq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))
-mve_pred16_t __arm_vcmpltq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))
-mve_pred16_t __arm_vcmpltq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))
-mve_pred16_t __arm_vcmpltq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s16)))
-mve_pred16_t __arm_vcmpltq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s16)))
-mve_pred16_t __arm_vcmpltq_m(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s32)))
-mve_pred16_t __arm_vcmpltq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s32)))
-mve_pred16_t __arm_vcmpltq_m(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s8)))
-mve_pred16_t __arm_vcmpltq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s8)))
-mve_pred16_t __arm_vcmpltq_m(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s16)))
-mve_pred16_t __arm_vcmpltq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s16)))
-mve_pred16_t __arm_vcmpltq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s32)))
-mve_pred16_t __arm_vcmpltq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s32)))
-mve_pred16_t __arm_vcmpltq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s8)))
-mve_pred16_t __arm_vcmpltq_n_s8(int8x16_t, int8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s8)))
-mve_pred16_t __arm_vcmpltq(int8x16_t, int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s16)))
-mve_pred16_t __arm_vcmpltq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s16)))
-mve_pred16_t __arm_vcmpltq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s32)))
-mve_pred16_t __arm_vcmpltq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s32)))
-mve_pred16_t __arm_vcmpltq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s8)))
-mve_pred16_t __arm_vcmpltq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s8)))
-mve_pred16_t __arm_vcmpltq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))
-mve_pred16_t __arm_vcmpneq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))
-mve_pred16_t __arm_vcmpneq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))
-mve_pred16_t __arm_vcmpneq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))
-mve_pred16_t __arm_vcmpneq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))
-mve_pred16_t __arm_vcmpneq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))
-mve_pred16_t __arm_vcmpneq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))
-mve_pred16_t __arm_vcmpneq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))
-mve_pred16_t __arm_vcmpneq_m(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))
-mve_pred16_t __arm_vcmpneq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))
-mve_pred16_t __arm_vcmpneq_m(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))
-mve_pred16_t __arm_vcmpneq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))
-mve_pred16_t __arm_vcmpneq_m(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s16)))
-mve_pred16_t __arm_vcmpneq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s16)))
-mve_pred16_t __arm_vcmpneq_m(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s32)))
-mve_pred16_t __arm_vcmpneq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s32)))
-mve_pred16_t __arm_vcmpneq_m(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s8)))
-mve_pred16_t __arm_vcmpneq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s8)))
-mve_pred16_t __arm_vcmpneq_m(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u16)))
-mve_pred16_t __arm_vcmpneq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u16)))
-mve_pred16_t __arm_vcmpneq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u32)))
-mve_pred16_t __arm_vcmpneq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u32)))
-mve_pred16_t __arm_vcmpneq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u8)))
-mve_pred16_t __arm_vcmpneq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u8)))
-mve_pred16_t __arm_vcmpneq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s16)))
-mve_pred16_t __arm_vcmpneq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s16)))
-mve_pred16_t __arm_vcmpneq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s32)))
-mve_pred16_t __arm_vcmpneq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s32)))
-mve_pred16_t __arm_vcmpneq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s8)))
-mve_pred16_t __arm_vcmpneq_n_s8(int8x16_t, int8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s8)))
-mve_pred16_t __arm_vcmpneq(int8x16_t, int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u16)))
-mve_pred16_t __arm_vcmpneq_n_u16(uint16x8_t, uint16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u16)))
-mve_pred16_t __arm_vcmpneq(uint16x8_t, uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u32)))
-mve_pred16_t __arm_vcmpneq_n_u32(uint32x4_t, uint32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u32)))
-mve_pred16_t __arm_vcmpneq(uint32x4_t, uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u8)))
-mve_pred16_t __arm_vcmpneq_n_u8(uint8x16_t, uint8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u8)))
-mve_pred16_t __arm_vcmpneq(uint8x16_t, uint8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s16)))
-mve_pred16_t __arm_vcmpneq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s16)))
-mve_pred16_t __arm_vcmpneq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s32)))
-mve_pred16_t __arm_vcmpneq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s32)))
-mve_pred16_t __arm_vcmpneq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s8)))
-mve_pred16_t __arm_vcmpneq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s8)))
-mve_pred16_t __arm_vcmpneq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u16)))
-mve_pred16_t __arm_vcmpneq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u16)))
-mve_pred16_t __arm_vcmpneq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u32)))
-mve_pred16_t __arm_vcmpneq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u32)))
-mve_pred16_t __arm_vcmpneq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u8)))
-mve_pred16_t __arm_vcmpneq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u8)))
-mve_pred16_t __arm_vcmpneq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s16)))
-int16x8_t __arm_vcreateq_s16(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s32)))
-int32x4_t __arm_vcreateq_s32(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s64)))
-int64x2_t __arm_vcreateq_s64(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s8)))
-int8x16_t __arm_vcreateq_s8(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u16)))
-uint16x8_t __arm_vcreateq_u16(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u32)))
-uint32x4_t __arm_vcreateq_u32(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u64)))
-uint64x2_t __arm_vcreateq_u64(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u8)))
-uint8x16_t __arm_vcreateq_u8(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp16q)))
-mve_pred16_t __arm_vctp16q(uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp16q_m)))
-mve_pred16_t __arm_vctp16q_m(uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp32q)))
-mve_pred16_t __arm_vctp32q(uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp32q_m)))
-mve_pred16_t __arm_vctp32q_m(uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp64q)))
-mve_pred16_t __arm_vctp64q(uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp64q_m)))
-mve_pred16_t __arm_vctp64q_m(uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp8q)))
-mve_pred16_t __arm_vctp8q(uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp8q_m)))
-mve_pred16_t __arm_vctp8q_m(uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_n_u16)))
-uint16x8_t __arm_vddupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_n_u16)))
-uint16x8_t __arm_vddupq_m(uint16x8_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_n_u32)))
-uint32x4_t __arm_vddupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_n_u32)))
-uint32x4_t __arm_vddupq_m(uint32x4_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_n_u8)))
-uint8x16_t __arm_vddupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_n_u8)))
-uint8x16_t __arm_vddupq_m(uint8x16_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_wb_u16)))
-uint16x8_t __arm_vddupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_wb_u16)))
-uint16x8_t __arm_vddupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_wb_u32)))
-uint32x4_t __arm_vddupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_wb_u32)))
-uint32x4_t __arm_vddupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_wb_u8)))
-uint8x16_t __arm_vddupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_wb_u8)))
-uint8x16_t __arm_vddupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_n_u16)))
-uint16x8_t __arm_vddupq_n_u16(uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_n_u16)))
-uint16x8_t __arm_vddupq_u16(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_n_u32)))
-uint32x4_t __arm_vddupq_n_u32(uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_n_u32)))
-uint32x4_t __arm_vddupq_u32(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_n_u8)))
-uint8x16_t __arm_vddupq_n_u8(uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_n_u8)))
-uint8x16_t __arm_vddupq_u8(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_wb_u16)))
-uint16x8_t __arm_vddupq_wb_u16(uint32_t *, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_wb_u16)))
-uint16x8_t __arm_vddupq_u16(uint32_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_wb_u32)))
-uint32x4_t __arm_vddupq_wb_u32(uint32_t *, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_wb_u32)))
-uint32x4_t __arm_vddupq_u32(uint32_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_wb_u8)))
-uint8x16_t __arm_vddupq_wb_u8(uint32_t *, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_wb_u8)))
-uint8x16_t __arm_vddupq_u8(uint32_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_n_u16)))
-uint16x8_t __arm_vddupq_x_n_u16(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_n_u16)))
-uint16x8_t __arm_vddupq_x_u16(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_n_u32)))
-uint32x4_t __arm_vddupq_x_n_u32(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_n_u32)))
-uint32x4_t __arm_vddupq_x_u32(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_n_u8)))
-uint8x16_t __arm_vddupq_x_n_u8(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_n_u8)))
-uint8x16_t __arm_vddupq_x_u8(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_wb_u16)))
-uint16x8_t __arm_vddupq_x_wb_u16(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_wb_u16)))
-uint16x8_t __arm_vddupq_x_u16(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_wb_u32)))
-uint32x4_t __arm_vddupq_x_wb_u32(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_wb_u32)))
-uint32x4_t __arm_vddupq_x_u32(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_wb_u8)))
-uint8x16_t __arm_vddupq_x_wb_u8(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_wb_u8)))
-uint8x16_t __arm_vddupq_x_u8(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_s16)))
-int16x8_t __arm_vdupq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_s16)))
-int16x8_t __arm_vdupq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_s32)))
-int32x4_t __arm_vdupq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_s32)))
-int32x4_t __arm_vdupq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_s8)))
-int8x16_t __arm_vdupq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_s8)))
-int8x16_t __arm_vdupq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_u16)))
-uint16x8_t __arm_vdupq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_u16)))
-uint16x8_t __arm_vdupq_m(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_u32)))
-uint32x4_t __arm_vdupq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_u32)))
-uint32x4_t __arm_vdupq_m(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_u8)))
-uint8x16_t __arm_vdupq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_u8)))
-uint8x16_t __arm_vdupq_m(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_s16)))
-int16x8_t __arm_vdupq_n_s16(int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_s32)))
-int32x4_t __arm_vdupq_n_s32(int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_s8)))
-int8x16_t __arm_vdupq_n_s8(int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_u16)))
-uint16x8_t __arm_vdupq_n_u16(uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_u32)))
-uint32x4_t __arm_vdupq_n_u32(uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_u8)))
-uint8x16_t __arm_vdupq_n_u8(uint8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_s16)))
-int16x8_t __arm_vdupq_x_n_s16(int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_s32)))
-int32x4_t __arm_vdupq_x_n_s32(int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_s8)))
-int8x16_t __arm_vdupq_x_n_s8(int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_u16)))
-uint16x8_t __arm_vdupq_x_n_u16(uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_u32)))
-uint32x4_t __arm_vdupq_x_n_u32(uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_u8)))
-uint8x16_t __arm_vdupq_x_n_u8(uint8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_n_u16)))
-uint16x8_t __arm_vdwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_n_u16)))
-uint16x8_t __arm_vdwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_n_u32)))
-uint32x4_t __arm_vdwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_n_u32)))
-uint32x4_t __arm_vdwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_n_u8)))
-uint8x16_t __arm_vdwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_n_u8)))
-uint8x16_t __arm_vdwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_wb_u16)))
-uint16x8_t __arm_vdwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_wb_u16)))
-uint16x8_t __arm_vdwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_wb_u32)))
-uint32x4_t __arm_vdwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_wb_u32)))
-uint32x4_t __arm_vdwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_wb_u8)))
-uint8x16_t __arm_vdwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_wb_u8)))
-uint8x16_t __arm_vdwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_n_u16)))
-uint16x8_t __arm_vdwdupq_n_u16(uint32_t, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_n_u16)))
-uint16x8_t __arm_vdwdupq_u16(uint32_t, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_n_u32)))
-uint32x4_t __arm_vdwdupq_n_u32(uint32_t, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_n_u32)))
-uint32x4_t __arm_vdwdupq_u32(uint32_t, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_n_u8)))
-uint8x16_t __arm_vdwdupq_n_u8(uint32_t, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_n_u8)))
-uint8x16_t __arm_vdwdupq_u8(uint32_t, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_wb_u16)))
-uint16x8_t __arm_vdwdupq_wb_u16(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_wb_u16)))
-uint16x8_t __arm_vdwdupq_u16(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_wb_u32)))
-uint32x4_t __arm_vdwdupq_wb_u32(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_wb_u32)))
-uint32x4_t __arm_vdwdupq_u32(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_wb_u8)))
-uint8x16_t __arm_vdwdupq_wb_u8(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_wb_u8)))
-uint8x16_t __arm_vdwdupq_u8(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_n_u16)))
-uint16x8_t __arm_vdwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_n_u16)))
-uint16x8_t __arm_vdwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_n_u32)))
-uint32x4_t __arm_vdwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_n_u32)))
-uint32x4_t __arm_vdwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_n_u8)))
-uint8x16_t __arm_vdwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_n_u8)))
-uint8x16_t __arm_vdwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_wb_u16)))
-uint16x8_t __arm_vdwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_wb_u16)))
-uint16x8_t __arm_vdwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_wb_u32)))
-uint32x4_t __arm_vdwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_wb_u32)))
-uint32x4_t __arm_vdwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_wb_u8)))
-uint8x16_t __arm_vdwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_wb_u8)))
-uint8x16_t __arm_vdwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_s16)))
-int16x8_t __arm_veorq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_s16)))
-int16x8_t __arm_veorq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_s32)))
-int32x4_t __arm_veorq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_s32)))
-int32x4_t __arm_veorq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_s8)))
-int8x16_t __arm_veorq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_s8)))
-int8x16_t __arm_veorq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_u16)))
-uint16x8_t __arm_veorq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_u16)))
-uint16x8_t __arm_veorq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_u32)))
-uint32x4_t __arm_veorq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_u32)))
-uint32x4_t __arm_veorq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_u8)))
-uint8x16_t __arm_veorq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_u8)))
-uint8x16_t __arm_veorq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_s16)))
-int16x8_t __arm_veorq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_s16)))
-int16x8_t __arm_veorq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_s32)))
-int32x4_t __arm_veorq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_s32)))
-int32x4_t __arm_veorq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_s8)))
-int8x16_t __arm_veorq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_s8)))
-int8x16_t __arm_veorq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_u16)))
-uint16x8_t __arm_veorq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_u16)))
-uint16x8_t __arm_veorq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_u32)))
-uint32x4_t __arm_veorq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_u32)))
-uint32x4_t __arm_veorq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_u8)))
-uint8x16_t __arm_veorq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_u8)))
-uint8x16_t __arm_veorq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_s16)))
-int16x8_t __arm_veorq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_s16)))
-int16x8_t __arm_veorq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_s32)))
-int32x4_t __arm_veorq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_s32)))
-int32x4_t __arm_veorq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_s8)))
-int8x16_t __arm_veorq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_s8)))
-int8x16_t __arm_veorq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_u16)))
-uint16x8_t __arm_veorq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_u16)))
-uint16x8_t __arm_veorq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_u32)))
-uint32x4_t __arm_veorq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_u32)))
-uint32x4_t __arm_veorq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_u8)))
-uint8x16_t __arm_veorq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_u8)))
-uint8x16_t __arm_veorq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s16)))
-int16_t __arm_vgetq_lane_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s16)))
-int16_t __arm_vgetq_lane(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s32)))
-int32_t __arm_vgetq_lane_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s32)))
-int32_t __arm_vgetq_lane(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s64)))
-int64_t __arm_vgetq_lane_s64(int64x2_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s64)))
-int64_t __arm_vgetq_lane(int64x2_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s8)))
-int8_t __arm_vgetq_lane_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s8)))
-int8_t __arm_vgetq_lane(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u16)))
-uint16_t __arm_vgetq_lane_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u16)))
-uint16_t __arm_vgetq_lane(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u32)))
-uint32_t __arm_vgetq_lane_u32(uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u32)))
-uint32_t __arm_vgetq_lane(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u64)))
-uint64_t __arm_vgetq_lane_u64(uint64x2_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u64)))
-uint64_t __arm_vgetq_lane(uint64x2_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u8)))
-uint8_t __arm_vgetq_lane_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u8)))
-uint8_t __arm_vgetq_lane(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_s16)))
-int16x8_t __arm_vhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_s16)))
-int16x8_t __arm_vhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_s32)))
-int32x4_t __arm_vhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_s32)))
-int32x4_t __arm_vhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_s8)))
-int8x16_t __arm_vhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_s8)))
-int8x16_t __arm_vhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_u16)))
-uint16x8_t __arm_vhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_u16)))
-uint16x8_t __arm_vhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_u32)))
-uint32x4_t __arm_vhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_u32)))
-uint32x4_t __arm_vhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_u8)))
-uint8x16_t __arm_vhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_u8)))
-uint8x16_t __arm_vhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_s16)))
-int16x8_t __arm_vhaddq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_s16)))
-int16x8_t __arm_vhaddq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_s32)))
-int32x4_t __arm_vhaddq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_s32)))
-int32x4_t __arm_vhaddq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_s8)))
-int8x16_t __arm_vhaddq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_s8)))
-int8x16_t __arm_vhaddq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_u16)))
-uint16x8_t __arm_vhaddq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_u16)))
-uint16x8_t __arm_vhaddq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_u32)))
-uint32x4_t __arm_vhaddq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_u32)))
-uint32x4_t __arm_vhaddq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_u8)))
-uint8x16_t __arm_vhaddq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_u8)))
-uint8x16_t __arm_vhaddq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_s16)))
-int16x8_t __arm_vhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_s16)))
-int16x8_t __arm_vhaddq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_s32)))
-int32x4_t __arm_vhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_s32)))
-int32x4_t __arm_vhaddq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_s8)))
-int8x16_t __arm_vhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_s8)))
-int8x16_t __arm_vhaddq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_u16)))
-uint16x8_t __arm_vhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_u16)))
-uint16x8_t __arm_vhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_u32)))
-uint32x4_t __arm_vhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_u32)))
-uint32x4_t __arm_vhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_u8)))
-uint8x16_t __arm_vhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_u8)))
-uint8x16_t __arm_vhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16)))
-int16x8_t __arm_vhcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16)))
-int16x8_t __arm_vhcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32)))
-int32x4_t __arm_vhcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32)))
-int32x4_t __arm_vhcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8)))
-int8x16_t __arm_vhcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8)))
-int8x16_t __arm_vhcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_s16)))
-int16x8_t __arm_vhcaddq_rot270_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_s16)))
-int16x8_t __arm_vhcaddq_rot270(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_s32)))
-int32x4_t __arm_vhcaddq_rot270_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_s32)))
-int32x4_t __arm_vhcaddq_rot270(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_s8)))
-int8x16_t __arm_vhcaddq_rot270_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_s8)))
-int8x16_t __arm_vhcaddq_rot270(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16)))
-int16x8_t __arm_vhcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16)))
-int16x8_t __arm_vhcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32)))
-int32x4_t __arm_vhcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32)))
-int32x4_t __arm_vhcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8)))
-int8x16_t __arm_vhcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8)))
-int8x16_t __arm_vhcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16)))
-int16x8_t __arm_vhcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16)))
-int16x8_t __arm_vhcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32)))
-int32x4_t __arm_vhcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32)))
-int32x4_t __arm_vhcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8)))
-int8x16_t __arm_vhcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8)))
-int8x16_t __arm_vhcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_s16)))
-int16x8_t __arm_vhcaddq_rot90_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_s16)))
-int16x8_t __arm_vhcaddq_rot90(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_s32)))
-int32x4_t __arm_vhcaddq_rot90_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_s32)))
-int32x4_t __arm_vhcaddq_rot90(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_s8)))
-int8x16_t __arm_vhcaddq_rot90_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_s8)))
-int8x16_t __arm_vhcaddq_rot90(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16)))
-int16x8_t __arm_vhcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16)))
-int16x8_t __arm_vhcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32)))
-int32x4_t __arm_vhcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32)))
-int32x4_t __arm_vhcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8)))
-int8x16_t __arm_vhcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8)))
-int8x16_t __arm_vhcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_s16)))
-int16x8_t __arm_vhsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_s16)))
-int16x8_t __arm_vhsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_s32)))
-int32x4_t __arm_vhsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_s32)))
-int32x4_t __arm_vhsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_s8)))
-int8x16_t __arm_vhsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_s8)))
-int8x16_t __arm_vhsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_u16)))
-uint16x8_t __arm_vhsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_u16)))
-uint16x8_t __arm_vhsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_u32)))
-uint32x4_t __arm_vhsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_u32)))
-uint32x4_t __arm_vhsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_u8)))
-uint8x16_t __arm_vhsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_u8)))
-uint8x16_t __arm_vhsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_s16)))
-int16x8_t __arm_vhsubq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_s16)))
-int16x8_t __arm_vhsubq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_s32)))
-int32x4_t __arm_vhsubq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_s32)))
-int32x4_t __arm_vhsubq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_s8)))
-int8x16_t __arm_vhsubq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_s8)))
-int8x16_t __arm_vhsubq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_u16)))
-uint16x8_t __arm_vhsubq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_u16)))
-uint16x8_t __arm_vhsubq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_u32)))
-uint32x4_t __arm_vhsubq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_u32)))
-uint32x4_t __arm_vhsubq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_u8)))
-uint8x16_t __arm_vhsubq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_u8)))
-uint8x16_t __arm_vhsubq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_s16)))
-int16x8_t __arm_vhsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_s16)))
-int16x8_t __arm_vhsubq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_s32)))
-int32x4_t __arm_vhsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_s32)))
-int32x4_t __arm_vhsubq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_s8)))
-int8x16_t __arm_vhsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_s8)))
-int8x16_t __arm_vhsubq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_u16)))
-uint16x8_t __arm_vhsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_u16)))
-uint16x8_t __arm_vhsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_u32)))
-uint32x4_t __arm_vhsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_u32)))
-uint32x4_t __arm_vhsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_u8)))
-uint8x16_t __arm_vhsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_u8)))
-uint8x16_t __arm_vhsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_n_u16)))
-uint16x8_t __arm_vidupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_n_u16)))
-uint16x8_t __arm_vidupq_m(uint16x8_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_n_u32)))
-uint32x4_t __arm_vidupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_n_u32)))
-uint32x4_t __arm_vidupq_m(uint32x4_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_n_u8)))
-uint8x16_t __arm_vidupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_n_u8)))
-uint8x16_t __arm_vidupq_m(uint8x16_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_wb_u16)))
-uint16x8_t __arm_vidupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_wb_u16)))
-uint16x8_t __arm_vidupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_wb_u32)))
-uint32x4_t __arm_vidupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_wb_u32)))
-uint32x4_t __arm_vidupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_wb_u8)))
-uint8x16_t __arm_vidupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_wb_u8)))
-uint8x16_t __arm_vidupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_n_u16)))
-uint16x8_t __arm_vidupq_n_u16(uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_n_u16)))
-uint16x8_t __arm_vidupq_u16(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_n_u32)))
-uint32x4_t __arm_vidupq_n_u32(uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_n_u32)))
-uint32x4_t __arm_vidupq_u32(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_n_u8)))
-uint8x16_t __arm_vidupq_n_u8(uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_n_u8)))
-uint8x16_t __arm_vidupq_u8(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_wb_u16)))
-uint16x8_t __arm_vidupq_wb_u16(uint32_t *, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_wb_u16)))
-uint16x8_t __arm_vidupq_u16(uint32_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_wb_u32)))
-uint32x4_t __arm_vidupq_wb_u32(uint32_t *, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_wb_u32)))
-uint32x4_t __arm_vidupq_u32(uint32_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_wb_u8)))
-uint8x16_t __arm_vidupq_wb_u8(uint32_t *, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_wb_u8)))
-uint8x16_t __arm_vidupq_u8(uint32_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_n_u16)))
-uint16x8_t __arm_vidupq_x_n_u16(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_n_u16)))
-uint16x8_t __arm_vidupq_x_u16(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_n_u32)))
-uint32x4_t __arm_vidupq_x_n_u32(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_n_u32)))
-uint32x4_t __arm_vidupq_x_u32(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_n_u8)))
-uint8x16_t __arm_vidupq_x_n_u8(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_n_u8)))
-uint8x16_t __arm_vidupq_x_u8(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_wb_u16)))
-uint16x8_t __arm_vidupq_x_wb_u16(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_wb_u16)))
-uint16x8_t __arm_vidupq_x_u16(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_wb_u32)))
-uint32x4_t __arm_vidupq_x_wb_u32(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_wb_u32)))
-uint32x4_t __arm_vidupq_x_u32(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_wb_u8)))
-uint8x16_t __arm_vidupq_x_wb_u8(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_wb_u8)))
-uint8x16_t __arm_vidupq_x_u8(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_n_u16)))
-uint16x8_t __arm_viwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_n_u16)))
-uint16x8_t __arm_viwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_n_u32)))
-uint32x4_t __arm_viwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_n_u32)))
-uint32x4_t __arm_viwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_n_u8)))
-uint8x16_t __arm_viwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_n_u8)))
-uint8x16_t __arm_viwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_wb_u16)))
-uint16x8_t __arm_viwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_wb_u16)))
-uint16x8_t __arm_viwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_wb_u32)))
-uint32x4_t __arm_viwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_wb_u32)))
-uint32x4_t __arm_viwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_wb_u8)))
-uint8x16_t __arm_viwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_wb_u8)))
-uint8x16_t __arm_viwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_n_u16)))
-uint16x8_t __arm_viwdupq_n_u16(uint32_t, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_n_u16)))
-uint16x8_t __arm_viwdupq_u16(uint32_t, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_n_u32)))
-uint32x4_t __arm_viwdupq_n_u32(uint32_t, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_n_u32)))
-uint32x4_t __arm_viwdupq_u32(uint32_t, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_n_u8)))
-uint8x16_t __arm_viwdupq_n_u8(uint32_t, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_n_u8)))
-uint8x16_t __arm_viwdupq_u8(uint32_t, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_wb_u16)))
-uint16x8_t __arm_viwdupq_wb_u16(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_wb_u16)))
-uint16x8_t __arm_viwdupq_u16(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_wb_u32)))
-uint32x4_t __arm_viwdupq_wb_u32(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_wb_u32)))
-uint32x4_t __arm_viwdupq_u32(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_wb_u8)))
-uint8x16_t __arm_viwdupq_wb_u8(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_wb_u8)))
-uint8x16_t __arm_viwdupq_u8(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_n_u16)))
-uint16x8_t __arm_viwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_n_u16)))
-uint16x8_t __arm_viwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_n_u32)))
-uint32x4_t __arm_viwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_n_u32)))
-uint32x4_t __arm_viwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_n_u8)))
-uint8x16_t __arm_viwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_n_u8)))
-uint8x16_t __arm_viwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_wb_u16)))
-uint16x8_t __arm_viwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_wb_u16)))
-uint16x8_t __arm_viwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_wb_u32)))
-uint32x4_t __arm_viwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_wb_u32)))
-uint32x4_t __arm_viwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_wb_u8)))
-uint8x16_t __arm_viwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_wb_u8)))
-uint8x16_t __arm_viwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_s16)))
-int16x8_t __arm_vld1q_s16(const int16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_s16)))
-int16x8_t __arm_vld1q(const int16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_s32)))
-int32x4_t __arm_vld1q_s32(const int32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_s32)))
-int32x4_t __arm_vld1q(const int32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_s8)))
-int8x16_t __arm_vld1q_s8(const int8_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_s8)))
-int8x16_t __arm_vld1q(const int8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_u16)))
-uint16x8_t __arm_vld1q_u16(const uint16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_u16)))
-uint16x8_t __arm_vld1q(const uint16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_u32)))
-uint32x4_t __arm_vld1q_u32(const uint32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_u32)))
-uint32x4_t __arm_vld1q(const uint32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_u8)))
-uint8x16_t __arm_vld1q_u8(const uint8_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_u8)))
-uint8x16_t __arm_vld1q(const uint8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s16)))
-int16x8_t __arm_vld1q_z_s16(const int16_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s16)))
-int16x8_t __arm_vld1q_z(const int16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s32)))
-int32x4_t __arm_vld1q_z_s32(const int32_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s32)))
-int32x4_t __arm_vld1q_z(const int32_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s8)))
-int8x16_t __arm_vld1q_z_s8(const int8_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s8)))
-int8x16_t __arm_vld1q_z(const int8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u16)))
-uint16x8_t __arm_vld1q_z_u16(const uint16_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u16)))
-uint16x8_t __arm_vld1q_z(const uint16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u32)))
-uint32x4_t __arm_vld1q_z_u32(const uint32_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u32)))
-uint32x4_t __arm_vld1q_z(const uint32_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u8)))
-uint8x16_t __arm_vld1q_z_u8(const uint8_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u8)))
-uint8x16_t __arm_vld1q_z(const uint8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_s16)))
-int16x8x2_t __arm_vld2q_s16(const int16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_s16)))
-int16x8x2_t __arm_vld2q(const int16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_s32)))
-int32x4x2_t __arm_vld2q_s32(const int32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_s32)))
-int32x4x2_t __arm_vld2q(const int32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_s8)))
-int8x16x2_t __arm_vld2q_s8(const int8_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_s8)))
-int8x16x2_t __arm_vld2q(const int8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_u16)))
-uint16x8x2_t __arm_vld2q_u16(const uint16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_u16)))
-uint16x8x2_t __arm_vld2q(const uint16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_u32)))
-uint32x4x2_t __arm_vld2q_u32(const uint32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_u32)))
-uint32x4x2_t __arm_vld2q(const uint32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_u8)))
-uint8x16x2_t __arm_vld2q_u8(const uint8_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_u8)))
-uint8x16x2_t __arm_vld2q(const uint8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_s16)))
-int16x8x4_t __arm_vld4q_s16(const int16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_s16)))
-int16x8x4_t __arm_vld4q(const int16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_s32)))
-int32x4x4_t __arm_vld4q_s32(const int32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_s32)))
-int32x4x4_t __arm_vld4q(const int32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_s8)))
-int8x16x4_t __arm_vld4q_s8(const int8_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_s8)))
-int8x16x4_t __arm_vld4q(const int8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_u16)))
-uint16x8x4_t __arm_vld4q_u16(const uint16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_u16)))
-uint16x8x4_t __arm_vld4q(const uint16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_u32)))
-uint32x4x4_t __arm_vld4q_u32(const uint32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_u32)))
-uint32x4x4_t __arm_vld4q(const uint32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_u8)))
-uint8x16x4_t __arm_vld4q_u8(const uint8_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_u8)))
-uint8x16x4_t __arm_vld4q(const uint8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))
-int16x8_t __arm_vldrbq_gather_offset_s16(const int8_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))
-int16x8_t __arm_vldrbq_gather_offset(const int8_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))
-int32x4_t __arm_vldrbq_gather_offset_s32(const int8_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))
-int32x4_t __arm_vldrbq_gather_offset(const int8_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))
-int8x16_t __arm_vldrbq_gather_offset_s8(const int8_t *, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))
-int8x16_t __arm_vldrbq_gather_offset(const int8_t *, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))
-uint16x8_t __arm_vldrbq_gather_offset_u16(const uint8_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))
-uint16x8_t __arm_vldrbq_gather_offset(const uint8_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))
-uint32x4_t __arm_vldrbq_gather_offset_u32(const uint8_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))
-uint32x4_t __arm_vldrbq_gather_offset(const uint8_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))
-uint8x16_t __arm_vldrbq_gather_offset_u8(const uint8_t *, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))
-uint8x16_t __arm_vldrbq_gather_offset(const uint8_t *, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))
-int16x8_t __arm_vldrbq_gather_offset_z_s16(const int8_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))
-int16x8_t __arm_vldrbq_gather_offset_z(const int8_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))
-int32x4_t __arm_vldrbq_gather_offset_z_s32(const int8_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))
-int32x4_t __arm_vldrbq_gather_offset_z(const int8_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))
-int8x16_t __arm_vldrbq_gather_offset_z_s8(const int8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))
-int8x16_t __arm_vldrbq_gather_offset_z(const int8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))
-uint16x8_t __arm_vldrbq_gather_offset_z_u16(const uint8_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))
-uint16x8_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))
-uint32x4_t __arm_vldrbq_gather_offset_z_u32(const uint8_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))
-uint32x4_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))
-uint8x16_t __arm_vldrbq_gather_offset_z_u8(const uint8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))
-uint8x16_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_s16)))
-int16x8_t __arm_vldrbq_s16(const int8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_s32)))
-int32x4_t __arm_vldrbq_s32(const int8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_s8)))
-int8x16_t __arm_vldrbq_s8(const int8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_u16)))
-uint16x8_t __arm_vldrbq_u16(const uint8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_u32)))
-uint32x4_t __arm_vldrbq_u32(const uint8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_u8)))
-uint8x16_t __arm_vldrbq_u8(const uint8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_s16)))
-int16x8_t __arm_vldrbq_z_s16(const int8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_s32)))
-int32x4_t __arm_vldrbq_z_s32(const int8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_s8)))
-int8x16_t __arm_vldrbq_z_s8(const int8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_u16)))
-uint16x8_t __arm_vldrbq_z_u16(const uint8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_u32)))
-uint32x4_t __arm_vldrbq_z_u32(const uint8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_u8)))
-uint8x16_t __arm_vldrbq_z_u8(const uint8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_s64)))
-int64x2_t __arm_vldrdq_gather_base_s64(uint64x2_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_u64)))
-uint64x2_t __arm_vldrdq_gather_base_u64(uint64x2_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_s64)))
-int64x2_t __arm_vldrdq_gather_base_wb_s64(uint64x2_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_u64)))
-uint64x2_t __arm_vldrdq_gather_base_wb_u64(uint64x2_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_s64)))
-int64x2_t __arm_vldrdq_gather_base_wb_z_s64(uint64x2_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_u64)))
-uint64x2_t __arm_vldrdq_gather_base_wb_z_u64(uint64x2_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_z_s64)))
-int64x2_t __arm_vldrdq_gather_base_z_s64(uint64x2_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_z_u64)))
-uint64x2_t __arm_vldrdq_gather_base_z_u64(uint64x2_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))
-int64x2_t __arm_vldrdq_gather_offset_s64(const int64_t *, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))
-int64x2_t __arm_vldrdq_gather_offset(const int64_t *, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))
-uint64x2_t __arm_vldrdq_gather_offset_u64(const uint64_t *, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))
-uint64x2_t __arm_vldrdq_gather_offset(const uint64_t *, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))
-int64x2_t __arm_vldrdq_gather_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))
-int64x2_t __arm_vldrdq_gather_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))
-uint64x2_t __arm_vldrdq_gather_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))
-uint64x2_t __arm_vldrdq_gather_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))
-int64x2_t __arm_vldrdq_gather_shifted_offset_s64(const int64_t *, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))
-int64x2_t __arm_vldrdq_gather_shifted_offset(const int64_t *, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))
-uint64x2_t __arm_vldrdq_gather_shifted_offset_u64(const uint64_t *, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))
-uint64x2_t __arm_vldrdq_gather_shifted_offset(const uint64_t *, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))
-int64x2_t __arm_vldrdq_gather_shifted_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))
-int64x2_t __arm_vldrdq_gather_shifted_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))
-uint64x2_t __arm_vldrdq_gather_shifted_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))
-uint64x2_t __arm_vldrdq_gather_shifted_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))
-int16x8_t __arm_vldrhq_gather_offset_s16(const int16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))
-int16x8_t __arm_vldrhq_gather_offset(const int16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))
-int32x4_t __arm_vldrhq_gather_offset_s32(const int16_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))
-int32x4_t __arm_vldrhq_gather_offset(const int16_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))
-uint16x8_t __arm_vldrhq_gather_offset_u16(const uint16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))
-uint16x8_t __arm_vldrhq_gather_offset(const uint16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))
-uint32x4_t __arm_vldrhq_gather_offset_u32(const uint16_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))
-uint32x4_t __arm_vldrhq_gather_offset(const uint16_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))
-int16x8_t __arm_vldrhq_gather_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))
-int16x8_t __arm_vldrhq_gather_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))
-int32x4_t __arm_vldrhq_gather_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))
-int32x4_t __arm_vldrhq_gather_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))
-uint16x8_t __arm_vldrhq_gather_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))
-uint16x8_t __arm_vldrhq_gather_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))
-uint32x4_t __arm_vldrhq_gather_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))
-uint32x4_t __arm_vldrhq_gather_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))
-int16x8_t __arm_vldrhq_gather_shifted_offset_s16(const int16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))
-int16x8_t __arm_vldrhq_gather_shifted_offset(const int16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))
-int32x4_t __arm_vldrhq_gather_shifted_offset_s32(const int16_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))
-int32x4_t __arm_vldrhq_gather_shifted_offset(const int16_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))
-uint16x8_t __arm_vldrhq_gather_shifted_offset_u16(const uint16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))
-uint16x8_t __arm_vldrhq_gather_shifted_offset(const uint16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))
-uint32x4_t __arm_vldrhq_gather_shifted_offset_u32(const uint16_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))
-uint32x4_t __arm_vldrhq_gather_shifted_offset(const uint16_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))
-int16x8_t __arm_vldrhq_gather_shifted_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))
-int16x8_t __arm_vldrhq_gather_shifted_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))
-int32x4_t __arm_vldrhq_gather_shifted_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))
-int32x4_t __arm_vldrhq_gather_shifted_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))
-uint16x8_t __arm_vldrhq_gather_shifted_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))
-uint16x8_t __arm_vldrhq_gather_shifted_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))
-uint32x4_t __arm_vldrhq_gather_shifted_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))
-uint32x4_t __arm_vldrhq_gather_shifted_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_s16)))
-int16x8_t __arm_vldrhq_s16(const int16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_s32)))
-int32x4_t __arm_vldrhq_s32(const int16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_u16)))
-uint16x8_t __arm_vldrhq_u16(const uint16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_u32)))
-uint32x4_t __arm_vldrhq_u32(const uint16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_s16)))
-int16x8_t __arm_vldrhq_z_s16(const int16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_s32)))
-int32x4_t __arm_vldrhq_z_s32(const int16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_u16)))
-uint16x8_t __arm_vldrhq_z_u16(const uint16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_u32)))
-uint32x4_t __arm_vldrhq_z_u32(const uint16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_s32)))
-int32x4_t __arm_vldrwq_gather_base_s32(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_u32)))
-uint32x4_t __arm_vldrwq_gather_base_u32(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_s32)))
-int32x4_t __arm_vldrwq_gather_base_wb_s32(uint32x4_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_u32)))
-uint32x4_t __arm_vldrwq_gather_base_wb_u32(uint32x4_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_s32)))
-int32x4_t __arm_vldrwq_gather_base_wb_z_s32(uint32x4_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_u32)))
-uint32x4_t __arm_vldrwq_gather_base_wb_z_u32(uint32x4_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_z_s32)))
-int32x4_t __arm_vldrwq_gather_base_z_s32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_z_u32)))
-uint32x4_t __arm_vldrwq_gather_base_z_u32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))
-int32x4_t __arm_vldrwq_gather_offset_s32(const int32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))
-int32x4_t __arm_vldrwq_gather_offset(const int32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))
-uint32x4_t __arm_vldrwq_gather_offset_u32(const uint32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))
-uint32x4_t __arm_vldrwq_gather_offset(const uint32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))
-int32x4_t __arm_vldrwq_gather_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))
-int32x4_t __arm_vldrwq_gather_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))
-uint32x4_t __arm_vldrwq_gather_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))
-uint32x4_t __arm_vldrwq_gather_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))
-int32x4_t __arm_vldrwq_gather_shifted_offset_s32(const int32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))
-int32x4_t __arm_vldrwq_gather_shifted_offset(const int32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))
-uint32x4_t __arm_vldrwq_gather_shifted_offset_u32(const uint32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))
-uint32x4_t __arm_vldrwq_gather_shifted_offset(const uint32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))
-int32x4_t __arm_vldrwq_gather_shifted_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))
-int32x4_t __arm_vldrwq_gather_shifted_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))
-uint32x4_t __arm_vldrwq_gather_shifted_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))
-uint32x4_t __arm_vldrwq_gather_shifted_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_s32)))
-int32x4_t __arm_vldrwq_s32(const int32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_u32)))
-uint32x4_t __arm_vldrwq_u32(const uint32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_z_s32)))
-int32x4_t __arm_vldrwq_z_s32(const int32_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_z_u32)))
-uint32x4_t __arm_vldrwq_z_u32(const uint32_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_m_s16)))
-uint16x8_t __arm_vmaxaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_m_s16)))
-uint16x8_t __arm_vmaxaq_m(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_m_s32)))
-uint32x4_t __arm_vmaxaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_m_s32)))
-uint32x4_t __arm_vmaxaq_m(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_m_s8)))
-uint8x16_t __arm_vmaxaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_m_s8)))
-uint8x16_t __arm_vmaxaq_m(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_s16)))
-uint16x8_t __arm_vmaxaq_s16(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_s16)))
-uint16x8_t __arm_vmaxaq(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_s32)))
-uint32x4_t __arm_vmaxaq_s32(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_s32)))
-uint32x4_t __arm_vmaxaq(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_s8)))
-uint8x16_t __arm_vmaxaq_s8(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_s8)))
-uint8x16_t __arm_vmaxaq(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_s16)))
-int16x8_t __arm_vmaxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_s16)))
-int16x8_t __arm_vmaxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_s32)))
-int32x4_t __arm_vmaxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_s32)))
-int32x4_t __arm_vmaxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_s8)))
-int8x16_t __arm_vmaxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_s8)))
-int8x16_t __arm_vmaxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_u16)))
-uint16x8_t __arm_vmaxq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_u16)))
-uint16x8_t __arm_vmaxq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_u32)))
-uint32x4_t __arm_vmaxq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_u32)))
-uint32x4_t __arm_vmaxq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_u8)))
-uint8x16_t __arm_vmaxq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_u8)))
-uint8x16_t __arm_vmaxq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_s16)))
-int16x8_t __arm_vmaxq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_s16)))
-int16x8_t __arm_vmaxq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_s32)))
-int32x4_t __arm_vmaxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_s32)))
-int32x4_t __arm_vmaxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_s8)))
-int8x16_t __arm_vmaxq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_s8)))
-int8x16_t __arm_vmaxq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_u16)))
-uint16x8_t __arm_vmaxq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_u16)))
-uint16x8_t __arm_vmaxq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_u32)))
-uint32x4_t __arm_vmaxq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_u32)))
-uint32x4_t __arm_vmaxq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_u8)))
-uint8x16_t __arm_vmaxq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_u8)))
-uint8x16_t __arm_vmaxq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_s16)))
-int16x8_t __arm_vmaxq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_s16)))
-int16x8_t __arm_vmaxq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_s32)))
-int32x4_t __arm_vmaxq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_s32)))
-int32x4_t __arm_vmaxq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_s8)))
-int8x16_t __arm_vmaxq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_s8)))
-int8x16_t __arm_vmaxq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_u16)))
-uint16x8_t __arm_vmaxq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_u16)))
-uint16x8_t __arm_vmaxq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_u32)))
-uint32x4_t __arm_vmaxq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_u32)))
-uint32x4_t __arm_vmaxq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_u8)))
-uint8x16_t __arm_vmaxq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_u8)))
-uint8x16_t __arm_vmaxq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s16)))
-int16_t __arm_vmaxvq_s16(int16_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s16)))
-int16_t __arm_vmaxvq(int16_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s32)))
-int32_t __arm_vmaxvq_s32(int32_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s32)))
-int32_t __arm_vmaxvq(int32_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s8)))
-int8_t __arm_vmaxvq_s8(int8_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s8)))
-int8_t __arm_vmaxvq(int8_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u16)))
-uint16_t __arm_vmaxvq_u16(uint16_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u16)))
-uint16_t __arm_vmaxvq(uint16_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u32)))
-uint32_t __arm_vmaxvq_u32(uint32_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u32)))
-uint32_t __arm_vmaxvq(uint32_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u8)))
-uint8_t __arm_vmaxvq_u8(uint8_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u8)))
-uint8_t __arm_vmaxvq(uint8_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminaq_m_s16)))
-uint16x8_t __arm_vminaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminaq_m_s16)))
-uint16x8_t __arm_vminaq_m(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminaq_m_s32)))
-uint32x4_t __arm_vminaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminaq_m_s32)))
-uint32x4_t __arm_vminaq_m(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminaq_m_s8)))
-uint8x16_t __arm_vminaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminaq_m_s8)))
-uint8x16_t __arm_vminaq_m(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminaq_s16)))
-uint16x8_t __arm_vminaq_s16(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminaq_s16)))
-uint16x8_t __arm_vminaq(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminaq_s32)))
-uint32x4_t __arm_vminaq_s32(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminaq_s32)))
-uint32x4_t __arm_vminaq(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminaq_s8)))
-uint8x16_t __arm_vminaq_s8(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminaq_s8)))
-uint8x16_t __arm_vminaq(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_m_s16)))
-int16x8_t __arm_vminq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_m_s16)))
-int16x8_t __arm_vminq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_m_s32)))
-int32x4_t __arm_vminq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_m_s32)))
-int32x4_t __arm_vminq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_m_s8)))
-int8x16_t __arm_vminq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_m_s8)))
-int8x16_t __arm_vminq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_m_u16)))
-uint16x8_t __arm_vminq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_m_u16)))
-uint16x8_t __arm_vminq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_m_u32)))
-uint32x4_t __arm_vminq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_m_u32)))
-uint32x4_t __arm_vminq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_m_u8)))
-uint8x16_t __arm_vminq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_m_u8)))
-uint8x16_t __arm_vminq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_s16)))
-int16x8_t __arm_vminq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_s16)))
-int16x8_t __arm_vminq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_s32)))
-int32x4_t __arm_vminq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_s32)))
-int32x4_t __arm_vminq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_s8)))
-int8x16_t __arm_vminq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_s8)))
-int8x16_t __arm_vminq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_u16)))
-uint16x8_t __arm_vminq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_u16)))
-uint16x8_t __arm_vminq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_u32)))
-uint32x4_t __arm_vminq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_u32)))
-uint32x4_t __arm_vminq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_u8)))
-uint8x16_t __arm_vminq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_u8)))
-uint8x16_t __arm_vminq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_x_s16)))
-int16x8_t __arm_vminq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_x_s16)))
-int16x8_t __arm_vminq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_x_s32)))
-int32x4_t __arm_vminq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_x_s32)))
-int32x4_t __arm_vminq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_x_s8)))
-int8x16_t __arm_vminq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_x_s8)))
-int8x16_t __arm_vminq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_x_u16)))
-uint16x8_t __arm_vminq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_x_u16)))
-uint16x8_t __arm_vminq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_x_u32)))
-uint32x4_t __arm_vminq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_x_u32)))
-uint32x4_t __arm_vminq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_x_u8)))
-uint8x16_t __arm_vminq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_x_u8)))
-uint8x16_t __arm_vminq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_s16)))
-int16_t __arm_vminvq_s16(int16_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_s16)))
-int16_t __arm_vminvq(int16_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_s32)))
-int32_t __arm_vminvq_s32(int32_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_s32)))
-int32_t __arm_vminvq(int32_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_s8)))
-int8_t __arm_vminvq_s8(int8_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_s8)))
-int8_t __arm_vminvq(int8_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_u16)))
-uint16_t __arm_vminvq_u16(uint16_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_u16)))
-uint16_t __arm_vminvq(uint16_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_u32)))
-uint32_t __arm_vminvq_u32(uint32_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_u32)))
-uint32_t __arm_vminvq(uint32_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_u8)))
-uint8_t __arm_vminvq_u8(uint8_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_u8)))
-uint8_t __arm_vminvq(uint8_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_s16)))
-int32_t __arm_vmladavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_s16)))
-int32_t __arm_vmladavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_s32)))
-int32_t __arm_vmladavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_s32)))
-int32_t __arm_vmladavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_s8)))
-int32_t __arm_vmladavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_s8)))
-int32_t __arm_vmladavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_u16)))
-uint32_t __arm_vmladavaq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_u16)))
-uint32_t __arm_vmladavaq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_u32)))
-uint32_t __arm_vmladavaq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_u32)))
-uint32_t __arm_vmladavaq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_u8)))
-uint32_t __arm_vmladavaq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_u8)))
-uint32_t __arm_vmladavaq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_s16)))
-int32_t __arm_vmladavaq_s16(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_s16)))
-int32_t __arm_vmladavaq(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_s32)))
-int32_t __arm_vmladavaq_s32(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_s32)))
-int32_t __arm_vmladavaq(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_s8)))
-int32_t __arm_vmladavaq_s8(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_s8)))
-int32_t __arm_vmladavaq(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_u16)))
-uint32_t __arm_vmladavaq_u16(uint32_t, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_u16)))
-uint32_t __arm_vmladavaq(uint32_t, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_u32)))
-uint32_t __arm_vmladavaq_u32(uint32_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_u32)))
-uint32_t __arm_vmladavaq(uint32_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_u8)))
-uint32_t __arm_vmladavaq_u8(uint32_t, uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_u8)))
-uint32_t __arm_vmladavaq(uint32_t, uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_p_s16)))
-int32_t __arm_vmladavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_p_s16)))
-int32_t __arm_vmladavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_p_s32)))
-int32_t __arm_vmladavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_p_s32)))
-int32_t __arm_vmladavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_p_s8)))
-int32_t __arm_vmladavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_p_s8)))
-int32_t __arm_vmladavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_s16)))
-int32_t __arm_vmladavaxq_s16(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_s16)))
-int32_t __arm_vmladavaxq(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_s32)))
-int32_t __arm_vmladavaxq_s32(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_s32)))
-int32_t __arm_vmladavaxq(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_s8)))
-int32_t __arm_vmladavaxq_s8(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_s8)))
-int32_t __arm_vmladavaxq(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_s16)))
-int32_t __arm_vmladavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_s16)))
-int32_t __arm_vmladavq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_s32)))
-int32_t __arm_vmladavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_s32)))
-int32_t __arm_vmladavq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_s8)))
-int32_t __arm_vmladavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_s8)))
-int32_t __arm_vmladavq_p(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_u16)))
-uint32_t __arm_vmladavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_u16)))
-uint32_t __arm_vmladavq_p(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_u32)))
-uint32_t __arm_vmladavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_u32)))
-uint32_t __arm_vmladavq_p(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_u8)))
-uint32_t __arm_vmladavq_p_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_u8)))
-uint32_t __arm_vmladavq_p(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_s16)))
-int32_t __arm_vmladavq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_s16)))
-int32_t __arm_vmladavq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_s32)))
-int32_t __arm_vmladavq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_s32)))
-int32_t __arm_vmladavq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_s8)))
-int32_t __arm_vmladavq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_s8)))
-int32_t __arm_vmladavq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_u16)))
-uint32_t __arm_vmladavq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_u16)))
-uint32_t __arm_vmladavq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_u32)))
-uint32_t __arm_vmladavq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_u32)))
-uint32_t __arm_vmladavq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_u8)))
-uint32_t __arm_vmladavq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_u8)))
-uint32_t __arm_vmladavq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_p_s16)))
-int32_t __arm_vmladavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_p_s16)))
-int32_t __arm_vmladavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_p_s32)))
-int32_t __arm_vmladavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_p_s32)))
-int32_t __arm_vmladavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_p_s8)))
-int32_t __arm_vmladavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_p_s8)))
-int32_t __arm_vmladavxq_p(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_s16)))
-int32_t __arm_vmladavxq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_s16)))
-int32_t __arm_vmladavxq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_s32)))
-int32_t __arm_vmladavxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_s32)))
-int32_t __arm_vmladavxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_s8)))
-int32_t __arm_vmladavxq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_s8)))
-int32_t __arm_vmladavxq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_s16)))
-int64_t __arm_vmlaldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_s16)))
-int64_t __arm_vmlaldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_s32)))
-int64_t __arm_vmlaldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_s32)))
-int64_t __arm_vmlaldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_u16)))
-uint64_t __arm_vmlaldavaq_p_u16(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_u16)))
-uint64_t __arm_vmlaldavaq_p(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_u32)))
-uint64_t __arm_vmlaldavaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_u32)))
-uint64_t __arm_vmlaldavaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_s16)))
-int64_t __arm_vmlaldavaq_s16(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_s16)))
-int64_t __arm_vmlaldavaq(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_s32)))
-int64_t __arm_vmlaldavaq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_s32)))
-int64_t __arm_vmlaldavaq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_u16)))
-uint64_t __arm_vmlaldavaq_u16(uint64_t, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_u16)))
-uint64_t __arm_vmlaldavaq(uint64_t, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_u32)))
-uint64_t __arm_vmlaldavaq_u32(uint64_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_u32)))
-uint64_t __arm_vmlaldavaq(uint64_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_p_s16)))
-int64_t __arm_vmlaldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_p_s16)))
-int64_t __arm_vmlaldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_p_s32)))
-int64_t __arm_vmlaldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_p_s32)))
-int64_t __arm_vmlaldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_s16)))
-int64_t __arm_vmlaldavaxq_s16(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_s16)))
-int64_t __arm_vmlaldavaxq(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_s32)))
-int64_t __arm_vmlaldavaxq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_s32)))
-int64_t __arm_vmlaldavaxq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_s16)))
-int64_t __arm_vmlaldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_s16)))
-int64_t __arm_vmlaldavq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_s32)))
-int64_t __arm_vmlaldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_s32)))
-int64_t __arm_vmlaldavq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_u16)))
-uint64_t __arm_vmlaldavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_u16)))
-uint64_t __arm_vmlaldavq_p(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_u32)))
-uint64_t __arm_vmlaldavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_u32)))
-uint64_t __arm_vmlaldavq_p(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_s16)))
-int64_t __arm_vmlaldavq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_s16)))
-int64_t __arm_vmlaldavq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_s32)))
-int64_t __arm_vmlaldavq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_s32)))
-int64_t __arm_vmlaldavq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_u16)))
-uint64_t __arm_vmlaldavq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_u16)))
-uint64_t __arm_vmlaldavq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_u32)))
-uint64_t __arm_vmlaldavq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_u32)))
-uint64_t __arm_vmlaldavq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_p_s16)))
-int64_t __arm_vmlaldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_p_s16)))
-int64_t __arm_vmlaldavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_p_s32)))
-int64_t __arm_vmlaldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_p_s32)))
-int64_t __arm_vmlaldavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_s16)))
-int64_t __arm_vmlaldavxq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_s16)))
-int64_t __arm_vmlaldavxq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_s32)))
-int64_t __arm_vmlaldavxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_s32)))
-int64_t __arm_vmlaldavxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_p_s16)))
-int32_t __arm_vmlsdavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_p_s16)))
-int32_t __arm_vmlsdavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_p_s32)))
-int32_t __arm_vmlsdavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_p_s32)))
-int32_t __arm_vmlsdavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_p_s8)))
-int32_t __arm_vmlsdavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_p_s8)))
-int32_t __arm_vmlsdavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_s16)))
-int32_t __arm_vmlsdavaq_s16(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_s16)))
-int32_t __arm_vmlsdavaq(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_s32)))
-int32_t __arm_vmlsdavaq_s32(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_s32)))
-int32_t __arm_vmlsdavaq(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_s8)))
-int32_t __arm_vmlsdavaq_s8(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_s8)))
-int32_t __arm_vmlsdavaq(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_p_s16)))
-int32_t __arm_vmlsdavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_p_s16)))
-int32_t __arm_vmlsdavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_p_s32)))
-int32_t __arm_vmlsdavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_p_s32)))
-int32_t __arm_vmlsdavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_p_s8)))
-int32_t __arm_vmlsdavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_p_s8)))
-int32_t __arm_vmlsdavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_s16)))
-int32_t __arm_vmlsdavaxq_s16(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_s16)))
-int32_t __arm_vmlsdavaxq(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_s32)))
-int32_t __arm_vmlsdavaxq_s32(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_s32)))
-int32_t __arm_vmlsdavaxq(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_s8)))
-int32_t __arm_vmlsdavaxq_s8(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_s8)))
-int32_t __arm_vmlsdavaxq(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_p_s16)))
-int32_t __arm_vmlsdavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_p_s16)))
-int32_t __arm_vmlsdavq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_p_s32)))
-int32_t __arm_vmlsdavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_p_s32)))
-int32_t __arm_vmlsdavq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_p_s8)))
-int32_t __arm_vmlsdavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_p_s8)))
-int32_t __arm_vmlsdavq_p(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_s16)))
-int32_t __arm_vmlsdavq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_s16)))
-int32_t __arm_vmlsdavq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_s32)))
-int32_t __arm_vmlsdavq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_s32)))
-int32_t __arm_vmlsdavq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_s8)))
-int32_t __arm_vmlsdavq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_s8)))
-int32_t __arm_vmlsdavq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_p_s16)))
-int32_t __arm_vmlsdavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_p_s16)))
-int32_t __arm_vmlsdavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_p_s32)))
-int32_t __arm_vmlsdavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_p_s32)))
-int32_t __arm_vmlsdavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_p_s8)))
-int32_t __arm_vmlsdavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_p_s8)))
-int32_t __arm_vmlsdavxq_p(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_s16)))
-int32_t __arm_vmlsdavxq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_s16)))
-int32_t __arm_vmlsdavxq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_s32)))
-int32_t __arm_vmlsdavxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_s32)))
-int32_t __arm_vmlsdavxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_s8)))
-int32_t __arm_vmlsdavxq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_s8)))
-int32_t __arm_vmlsdavxq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_p_s16)))
-int64_t __arm_vmlsldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_p_s16)))
-int64_t __arm_vmlsldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_p_s32)))
-int64_t __arm_vmlsldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_p_s32)))
-int64_t __arm_vmlsldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_s16)))
-int64_t __arm_vmlsldavaq_s16(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_s16)))
-int64_t __arm_vmlsldavaq(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_s32)))
-int64_t __arm_vmlsldavaq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_s32)))
-int64_t __arm_vmlsldavaq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_p_s16)))
-int64_t __arm_vmlsldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_p_s16)))
-int64_t __arm_vmlsldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_p_s32)))
-int64_t __arm_vmlsldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_p_s32)))
-int64_t __arm_vmlsldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_s16)))
-int64_t __arm_vmlsldavaxq_s16(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_s16)))
-int64_t __arm_vmlsldavaxq(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_s32)))
-int64_t __arm_vmlsldavaxq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_s32)))
-int64_t __arm_vmlsldavaxq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_p_s16)))
-int64_t __arm_vmlsldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_p_s16)))
-int64_t __arm_vmlsldavq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_p_s32)))
-int64_t __arm_vmlsldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_p_s32)))
-int64_t __arm_vmlsldavq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_s16)))
-int64_t __arm_vmlsldavq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_s16)))
-int64_t __arm_vmlsldavq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_s32)))
-int64_t __arm_vmlsldavq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_s32)))
-int64_t __arm_vmlsldavq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_p_s16)))
-int64_t __arm_vmlsldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_p_s16)))
-int64_t __arm_vmlsldavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_p_s32)))
-int64_t __arm_vmlsldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_p_s32)))
-int64_t __arm_vmlsldavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_s16)))
-int64_t __arm_vmlsldavxq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_s16)))
-int64_t __arm_vmlsldavxq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_s32)))
-int64_t __arm_vmlsldavxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_s32)))
-int64_t __arm_vmlsldavxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_s16)))
-int16x8_t __arm_vmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_s16)))
-int16x8_t __arm_vmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_s32)))
-int32x4_t __arm_vmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_s32)))
-int32x4_t __arm_vmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_s8)))
-int8x16_t __arm_vmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_s8)))
-int8x16_t __arm_vmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_u16)))
-uint16x8_t __arm_vmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_u16)))
-uint16x8_t __arm_vmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_u32)))
-uint32x4_t __arm_vmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_u32)))
-uint32x4_t __arm_vmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_u8)))
-uint8x16_t __arm_vmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_u8)))
-uint8x16_t __arm_vmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_s16)))
-int16x8_t __arm_vmulhq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_s16)))
-int16x8_t __arm_vmulhq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_s32)))
-int32x4_t __arm_vmulhq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_s32)))
-int32x4_t __arm_vmulhq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_s8)))
-int8x16_t __arm_vmulhq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_s8)))
-int8x16_t __arm_vmulhq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_u16)))
-uint16x8_t __arm_vmulhq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_u16)))
-uint16x8_t __arm_vmulhq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_u32)))
-uint32x4_t __arm_vmulhq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_u32)))
-uint32x4_t __arm_vmulhq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_u8)))
-uint8x16_t __arm_vmulhq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_u8)))
-uint8x16_t __arm_vmulhq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_s16)))
-int16x8_t __arm_vmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_s16)))
-int16x8_t __arm_vmulhq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_s32)))
-int32x4_t __arm_vmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_s32)))
-int32x4_t __arm_vmulhq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_s8)))
-int8x16_t __arm_vmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_s8)))
-int8x16_t __arm_vmulhq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_u16)))
-uint16x8_t __arm_vmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_u16)))
-uint16x8_t __arm_vmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_u32)))
-uint32x4_t __arm_vmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_u32)))
-uint32x4_t __arm_vmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_u8)))
-uint8x16_t __arm_vmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_u8)))
-uint8x16_t __arm_vmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_s16)))
-int32x4_t __arm_vmullbq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_s16)))
-int32x4_t __arm_vmullbq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_s32)))
-int64x2_t __arm_vmullbq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_s32)))
-int64x2_t __arm_vmullbq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_s8)))
-int16x8_t __arm_vmullbq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_s8)))
-int16x8_t __arm_vmullbq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_u16)))
-uint32x4_t __arm_vmullbq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_u16)))
-uint32x4_t __arm_vmullbq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_u32)))
-uint64x2_t __arm_vmullbq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_u32)))
-uint64x2_t __arm_vmullbq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_u8)))
-uint16x8_t __arm_vmullbq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_u8)))
-uint16x8_t __arm_vmullbq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_s16)))
-int32x4_t __arm_vmullbq_int_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_s16)))
-int32x4_t __arm_vmullbq_int(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_s32)))
-int64x2_t __arm_vmullbq_int_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_s32)))
-int64x2_t __arm_vmullbq_int(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_s8)))
-int16x8_t __arm_vmullbq_int_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_s8)))
-int16x8_t __arm_vmullbq_int(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_u16)))
-uint32x4_t __arm_vmullbq_int_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_u16)))
-uint32x4_t __arm_vmullbq_int(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_u32)))
-uint64x2_t __arm_vmullbq_int_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_u32)))
-uint64x2_t __arm_vmullbq_int(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_u8)))
-uint16x8_t __arm_vmullbq_int_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_u8)))
-uint16x8_t __arm_vmullbq_int(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_s16)))
-int32x4_t __arm_vmullbq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_s16)))
-int32x4_t __arm_vmullbq_int_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_s32)))
-int64x2_t __arm_vmullbq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_s32)))
-int64x2_t __arm_vmullbq_int_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_s8)))
-int16x8_t __arm_vmullbq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_s8)))
-int16x8_t __arm_vmullbq_int_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_u16)))
-uint32x4_t __arm_vmullbq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_u16)))
-uint32x4_t __arm_vmullbq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_u32)))
-uint64x2_t __arm_vmullbq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_u32)))
-uint64x2_t __arm_vmullbq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_u8)))
-uint16x8_t __arm_vmullbq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_u8)))
-uint16x8_t __arm_vmullbq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_m_p16)))
-uint32x4_t __arm_vmullbq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_m_p16)))
-uint32x4_t __arm_vmullbq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_m_p8)))
-uint16x8_t __arm_vmullbq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_m_p8)))
-uint16x8_t __arm_vmullbq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_p16)))
-uint32x4_t __arm_vmullbq_poly_p16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_p16)))
-uint32x4_t __arm_vmullbq_poly(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_p8)))
-uint16x8_t __arm_vmullbq_poly_p8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_p8)))
-uint16x8_t __arm_vmullbq_poly(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_x_p16)))
-uint32x4_t __arm_vmullbq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_x_p16)))
-uint32x4_t __arm_vmullbq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_x_p8)))
-uint16x8_t __arm_vmullbq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_x_p8)))
-uint16x8_t __arm_vmullbq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_s16)))
-int32x4_t __arm_vmulltq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_s16)))
-int32x4_t __arm_vmulltq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_s32)))
-int64x2_t __arm_vmulltq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_s32)))
-int64x2_t __arm_vmulltq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_s8)))
-int16x8_t __arm_vmulltq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_s8)))
-int16x8_t __arm_vmulltq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_u16)))
-uint32x4_t __arm_vmulltq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_u16)))
-uint32x4_t __arm_vmulltq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_u32)))
-uint64x2_t __arm_vmulltq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_u32)))
-uint64x2_t __arm_vmulltq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_u8)))
-uint16x8_t __arm_vmulltq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_u8)))
-uint16x8_t __arm_vmulltq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_s16)))
-int32x4_t __arm_vmulltq_int_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_s16)))
-int32x4_t __arm_vmulltq_int(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_s32)))
-int64x2_t __arm_vmulltq_int_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_s32)))
-int64x2_t __arm_vmulltq_int(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_s8)))
-int16x8_t __arm_vmulltq_int_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_s8)))
-int16x8_t __arm_vmulltq_int(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_u16)))
-uint32x4_t __arm_vmulltq_int_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_u16)))
-uint32x4_t __arm_vmulltq_int(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_u32)))
-uint64x2_t __arm_vmulltq_int_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_u32)))
-uint64x2_t __arm_vmulltq_int(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_u8)))
-uint16x8_t __arm_vmulltq_int_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_u8)))
-uint16x8_t __arm_vmulltq_int(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_s16)))
-int32x4_t __arm_vmulltq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_s16)))
-int32x4_t __arm_vmulltq_int_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_s32)))
-int64x2_t __arm_vmulltq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_s32)))
-int64x2_t __arm_vmulltq_int_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_s8)))
-int16x8_t __arm_vmulltq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_s8)))
-int16x8_t __arm_vmulltq_int_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_u16)))
-uint32x4_t __arm_vmulltq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_u16)))
-uint32x4_t __arm_vmulltq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_u32)))
-uint64x2_t __arm_vmulltq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_u32)))
-uint64x2_t __arm_vmulltq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_u8)))
-uint16x8_t __arm_vmulltq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_u8)))
-uint16x8_t __arm_vmulltq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_m_p16)))
-uint32x4_t __arm_vmulltq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_m_p16)))
-uint32x4_t __arm_vmulltq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_m_p8)))
-uint16x8_t __arm_vmulltq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_m_p8)))
-uint16x8_t __arm_vmulltq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_p16)))
-uint32x4_t __arm_vmulltq_poly_p16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_p16)))
-uint32x4_t __arm_vmulltq_poly(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_p8)))
-uint16x8_t __arm_vmulltq_poly_p8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_p8)))
-uint16x8_t __arm_vmulltq_poly(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_x_p16)))
-uint32x4_t __arm_vmulltq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_x_p16)))
-uint32x4_t __arm_vmulltq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_x_p8)))
-uint16x8_t __arm_vmulltq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_x_p8)))
-uint16x8_t __arm_vmulltq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_s16)))
-int16x8_t __arm_vmulq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_s16)))
-int16x8_t __arm_vmulq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_s32)))
-int32x4_t __arm_vmulq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_s32)))
-int32x4_t __arm_vmulq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_s8)))
-int8x16_t __arm_vmulq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_s8)))
-int8x16_t __arm_vmulq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_u16)))
-uint16x8_t __arm_vmulq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_u16)))
-uint16x8_t __arm_vmulq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_u32)))
-uint32x4_t __arm_vmulq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_u32)))
-uint32x4_t __arm_vmulq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_u8)))
-uint8x16_t __arm_vmulq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_u8)))
-uint8x16_t __arm_vmulq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_s16)))
-int16x8_t __arm_vmulq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_s16)))
-int16x8_t __arm_vmulq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_s32)))
-int32x4_t __arm_vmulq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_s32)))
-int32x4_t __arm_vmulq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_s8)))
-int8x16_t __arm_vmulq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_s8)))
-int8x16_t __arm_vmulq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_u16)))
-uint16x8_t __arm_vmulq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_u16)))
-uint16x8_t __arm_vmulq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_u32)))
-uint32x4_t __arm_vmulq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_u32)))
-uint32x4_t __arm_vmulq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_u8)))
-uint8x16_t __arm_vmulq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_u8)))
-uint8x16_t __arm_vmulq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_s16)))
-int16x8_t __arm_vmulq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_s16)))
-int16x8_t __arm_vmulq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_s32)))
-int32x4_t __arm_vmulq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_s32)))
-int32x4_t __arm_vmulq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_s8)))
-int8x16_t __arm_vmulq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_s8)))
-int8x16_t __arm_vmulq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_u16)))
-uint16x8_t __arm_vmulq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_u16)))
-uint16x8_t __arm_vmulq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_u32)))
-uint32x4_t __arm_vmulq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_u32)))
-uint32x4_t __arm_vmulq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_u8)))
-uint8x16_t __arm_vmulq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_u8)))
-uint8x16_t __arm_vmulq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_s16)))
-int16x8_t __arm_vmvnq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_s16)))
-int16x8_t __arm_vmvnq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_s32)))
-int32x4_t __arm_vmvnq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_s32)))
-int32x4_t __arm_vmvnq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_u16)))
-uint16x8_t __arm_vmvnq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_u16)))
-uint16x8_t __arm_vmvnq_m(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_u32)))
-uint32x4_t __arm_vmvnq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_u32)))
-uint32x4_t __arm_vmvnq_m(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_n_s16)))
-int16x8_t __arm_vmvnq_n_s16(int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_n_s32)))
-int32x4_t __arm_vmvnq_n_s32(int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_n_u16)))
-uint16x8_t __arm_vmvnq_n_u16(uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_n_u32)))
-uint32x4_t __arm_vmvnq_n_u32(uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_x_n_s16)))
-int16x8_t __arm_vmvnq_x_n_s16(int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_x_n_s32)))
-int32x4_t __arm_vmvnq_x_n_s32(int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_x_n_u16)))
-uint16x8_t __arm_vmvnq_x_n_u16(uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_x_n_u32)))
-uint32x4_t __arm_vmvnq_x_n_u32(uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_s16)))
-int16x8_t __arm_vornq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_s16)))
-int16x8_t __arm_vornq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_s32)))
-int32x4_t __arm_vornq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_s32)))
-int32x4_t __arm_vornq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_s8)))
-int8x16_t __arm_vornq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_s8)))
-int8x16_t __arm_vornq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_u16)))
-uint16x8_t __arm_vornq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_u16)))
-uint16x8_t __arm_vornq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_u32)))
-uint32x4_t __arm_vornq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_u32)))
-uint32x4_t __arm_vornq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_u8)))
-uint8x16_t __arm_vornq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_u8)))
-uint8x16_t __arm_vornq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_s16)))
-int16x8_t __arm_vornq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_s16)))
-int16x8_t __arm_vornq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_s32)))
-int32x4_t __arm_vornq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_s32)))
-int32x4_t __arm_vornq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_s8)))
-int8x16_t __arm_vornq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_s8)))
-int8x16_t __arm_vornq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_u16)))
-uint16x8_t __arm_vornq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_u16)))
-uint16x8_t __arm_vornq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_u32)))
-uint32x4_t __arm_vornq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_u32)))
-uint32x4_t __arm_vornq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_u8)))
-uint8x16_t __arm_vornq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_u8)))
-uint8x16_t __arm_vornq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_s16)))
-int16x8_t __arm_vornq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_s16)))
-int16x8_t __arm_vornq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_s32)))
-int32x4_t __arm_vornq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_s32)))
-int32x4_t __arm_vornq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_s8)))
-int8x16_t __arm_vornq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_s8)))
-int8x16_t __arm_vornq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_u16)))
-uint16x8_t __arm_vornq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_u16)))
-uint16x8_t __arm_vornq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_u32)))
-uint32x4_t __arm_vornq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_u32)))
-uint32x4_t __arm_vornq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_u8)))
-uint8x16_t __arm_vornq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_u8)))
-uint8x16_t __arm_vornq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_s16)))
-int16x8_t __arm_vorrq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_s16)))
-int16x8_t __arm_vorrq_m_n(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_s32)))
-int32x4_t __arm_vorrq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_s32)))
-int32x4_t __arm_vorrq_m_n(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_u16)))
-uint16x8_t __arm_vorrq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_u16)))
-uint16x8_t __arm_vorrq_m_n(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_u32)))
-uint32x4_t __arm_vorrq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_u32)))
-uint32x4_t __arm_vorrq_m_n(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_s16)))
-int16x8_t __arm_vorrq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_s16)))
-int16x8_t __arm_vorrq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_s32)))
-int32x4_t __arm_vorrq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_s32)))
-int32x4_t __arm_vorrq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_s8)))
-int8x16_t __arm_vorrq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_s8)))
-int8x16_t __arm_vorrq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_u16)))
-uint16x8_t __arm_vorrq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_u16)))
-uint16x8_t __arm_vorrq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_u32)))
-uint32x4_t __arm_vorrq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_u32)))
-uint32x4_t __arm_vorrq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_u8)))
-uint8x16_t __arm_vorrq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_u8)))
-uint8x16_t __arm_vorrq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_s16)))
-int16x8_t __arm_vorrq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_s16)))
-int16x8_t __arm_vorrq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_s32)))
-int32x4_t __arm_vorrq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_s32)))
-int32x4_t __arm_vorrq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_u16)))
-uint16x8_t __arm_vorrq_n_u16(uint16x8_t, uint16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_u16)))
-uint16x8_t __arm_vorrq(uint16x8_t, uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_u32)))
-uint32x4_t __arm_vorrq_n_u32(uint32x4_t, uint32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_u32)))
-uint32x4_t __arm_vorrq(uint32x4_t, uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_s16)))
-int16x8_t __arm_vorrq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_s16)))
-int16x8_t __arm_vorrq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_s32)))
-int32x4_t __arm_vorrq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_s32)))
-int32x4_t __arm_vorrq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_s8)))
-int8x16_t __arm_vorrq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_s8)))
-int8x16_t __arm_vorrq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_u16)))
-uint16x8_t __arm_vorrq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_u16)))
-uint16x8_t __arm_vorrq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_u32)))
-uint32x4_t __arm_vorrq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_u32)))
-uint32x4_t __arm_vorrq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_u8)))
-uint8x16_t __arm_vorrq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_u8)))
-uint8x16_t __arm_vorrq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_s16)))
-int16x8_t __arm_vorrq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_s16)))
-int16x8_t __arm_vorrq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_s32)))
-int32x4_t __arm_vorrq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_s32)))
-int32x4_t __arm_vorrq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_s8)))
-int8x16_t __arm_vorrq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_s8)))
-int8x16_t __arm_vorrq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_u16)))
-uint16x8_t __arm_vorrq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_u16)))
-uint16x8_t __arm_vorrq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_u32)))
-uint32x4_t __arm_vorrq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_u32)))
-uint32x4_t __arm_vorrq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_u8)))
-uint8x16_t __arm_vorrq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_u8)))
-uint8x16_t __arm_vorrq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpnot)))
-mve_pred16_t __arm_vpnot(mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_s16)))
-int16x8_t __arm_vpselq_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_s16)))
-int16x8_t __arm_vpselq(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_s32)))
-int32x4_t __arm_vpselq_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_s32)))
-int32x4_t __arm_vpselq(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_s64)))
-int64x2_t __arm_vpselq_s64(int64x2_t, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_s64)))
-int64x2_t __arm_vpselq(int64x2_t, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_s8)))
-int8x16_t __arm_vpselq_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_s8)))
-int8x16_t __arm_vpselq(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_u16)))
-uint16x8_t __arm_vpselq_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_u16)))
-uint16x8_t __arm_vpselq(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_u32)))
-uint32x4_t __arm_vpselq_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_u32)))
-uint32x4_t __arm_vpselq(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_u64)))
-uint64x2_t __arm_vpselq_u64(uint64x2_t, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_u64)))
-uint64x2_t __arm_vpselq(uint64x2_t, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_u8)))
-uint8x16_t __arm_vpselq_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_u8)))
-uint8x16_t __arm_vpselq(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_s16)))
-int16x8_t __arm_vqaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_s16)))
-int16x8_t __arm_vqaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_s32)))
-int32x4_t __arm_vqaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_s32)))
-int32x4_t __arm_vqaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_s8)))
-int8x16_t __arm_vqaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_s8)))
-int8x16_t __arm_vqaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_u16)))
-uint16x8_t __arm_vqaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_u16)))
-uint16x8_t __arm_vqaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_u32)))
-uint32x4_t __arm_vqaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_u32)))
-uint32x4_t __arm_vqaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_u8)))
-uint8x16_t __arm_vqaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_u8)))
-uint8x16_t __arm_vqaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_s16)))
-int16x8_t __arm_vqaddq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_s16)))
-int16x8_t __arm_vqaddq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_s32)))
-int32x4_t __arm_vqaddq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_s32)))
-int32x4_t __arm_vqaddq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_s8)))
-int8x16_t __arm_vqaddq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_s8)))
-int8x16_t __arm_vqaddq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_u16)))
-uint16x8_t __arm_vqaddq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_u16)))
-uint16x8_t __arm_vqaddq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_u32)))
-uint32x4_t __arm_vqaddq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_u32)))
-uint32x4_t __arm_vqaddq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_u8)))
-uint8x16_t __arm_vqaddq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_u8)))
-uint8x16_t __arm_vqaddq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_m_s16)))
-int16x8_t __arm_vqdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_m_s16)))
-int16x8_t __arm_vqdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_m_s32)))
-int32x4_t __arm_vqdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_m_s32)))
-int32x4_t __arm_vqdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_m_s8)))
-int8x16_t __arm_vqdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_m_s8)))
-int8x16_t __arm_vqdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_s16)))
-int16x8_t __arm_vqdmulhq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_s16)))
-int16x8_t __arm_vqdmulhq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_s32)))
-int32x4_t __arm_vqdmulhq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_s32)))
-int32x4_t __arm_vqdmulhq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_s8)))
-int8x16_t __arm_vqdmulhq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_s8)))
-int8x16_t __arm_vqdmulhq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_m_s16)))
-int16x8_t __arm_vqrdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_m_s16)))
-int16x8_t __arm_vqrdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_m_s32)))
-int32x4_t __arm_vqrdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_m_s32)))
-int32x4_t __arm_vqrdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_m_s8)))
-int8x16_t __arm_vqrdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_m_s8)))
-int8x16_t __arm_vqrdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_s16)))
-int16x8_t __arm_vqrdmulhq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_s16)))
-int16x8_t __arm_vqrdmulhq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_s32)))
-int32x4_t __arm_vqrdmulhq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_s32)))
-int32x4_t __arm_vqrdmulhq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_s8)))
-int8x16_t __arm_vqrdmulhq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_s8)))
-int8x16_t __arm_vqrdmulhq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_s16)))
-int16x8_t __arm_vqrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_s16)))
-int16x8_t __arm_vqrshlq_m_n(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_s32)))
-int32x4_t __arm_vqrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_s32)))
-int32x4_t __arm_vqrshlq_m_n(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_s8)))
-int8x16_t __arm_vqrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_s8)))
-int8x16_t __arm_vqrshlq_m_n(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_u16)))
-uint16x8_t __arm_vqrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_u16)))
-uint16x8_t __arm_vqrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_u32)))
-uint32x4_t __arm_vqrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_u32)))
-uint32x4_t __arm_vqrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_u8)))
-uint8x16_t __arm_vqrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_u8)))
-uint8x16_t __arm_vqrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_s16)))
-int16x8_t __arm_vqrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_s16)))
-int16x8_t __arm_vqrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_s32)))
-int32x4_t __arm_vqrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_s32)))
-int32x4_t __arm_vqrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_s8)))
-int8x16_t __arm_vqrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_s8)))
-int8x16_t __arm_vqrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_u16)))
-uint16x8_t __arm_vqrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_u16)))
-uint16x8_t __arm_vqrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_u32)))
-uint32x4_t __arm_vqrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_u32)))
-uint32x4_t __arm_vqrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_u8)))
-uint8x16_t __arm_vqrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_u8)))
-uint8x16_t __arm_vqrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_s16)))
-int16x8_t __arm_vqrshlq_n_s16(int16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_s16)))
-int16x8_t __arm_vqrshlq(int16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_s32)))
-int32x4_t __arm_vqrshlq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_s32)))
-int32x4_t __arm_vqrshlq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_s8)))
-int8x16_t __arm_vqrshlq_n_s8(int8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_s8)))
-int8x16_t __arm_vqrshlq(int8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_u16)))
-uint16x8_t __arm_vqrshlq_n_u16(uint16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_u16)))
-uint16x8_t __arm_vqrshlq(uint16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_u32)))
-uint32x4_t __arm_vqrshlq_n_u32(uint32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_u32)))
-uint32x4_t __arm_vqrshlq(uint32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_u8)))
-uint8x16_t __arm_vqrshlq_n_u8(uint8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_u8)))
-uint8x16_t __arm_vqrshlq(uint8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_s16)))
-int16x8_t __arm_vqrshlq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_s16)))
-int16x8_t __arm_vqrshlq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_s32)))
-int32x4_t __arm_vqrshlq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_s32)))
-int32x4_t __arm_vqrshlq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_s8)))
-int8x16_t __arm_vqrshlq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_s8)))
-int8x16_t __arm_vqrshlq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_u16)))
-uint16x8_t __arm_vqrshlq_u16(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_u16)))
-uint16x8_t __arm_vqrshlq(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_u32)))
-uint32x4_t __arm_vqrshlq_u32(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_u32)))
-uint32x4_t __arm_vqrshlq(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_u8)))
-uint8x16_t __arm_vqrshlq_u8(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_u8)))
-uint8x16_t __arm_vqrshlq(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16)))
-int8x16_t __arm_vqrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16)))
-int8x16_t __arm_vqrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32)))
-int16x8_t __arm_vqrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32)))
-int16x8_t __arm_vqrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16)))
-uint8x16_t __arm_vqrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16)))
-uint8x16_t __arm_vqrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32)))
-uint16x8_t __arm_vqrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32)))
-uint16x8_t __arm_vqrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_s16)))
-int8x16_t __arm_vqrshrnbq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_s16)))
-int8x16_t __arm_vqrshrnbq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_s32)))
-int16x8_t __arm_vqrshrnbq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_s32)))
-int16x8_t __arm_vqrshrnbq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_u16)))
-uint8x16_t __arm_vqrshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_u16)))
-uint8x16_t __arm_vqrshrnbq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_u32)))
-uint16x8_t __arm_vqrshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_u32)))
-uint16x8_t __arm_vqrshrnbq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_s16)))
-int8x16_t __arm_vqrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_s16)))
-int8x16_t __arm_vqrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_s32)))
-int16x8_t __arm_vqrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_s32)))
-int16x8_t __arm_vqrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_u16)))
-uint8x16_t __arm_vqrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_u16)))
-uint8x16_t __arm_vqrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_u32)))
-uint16x8_t __arm_vqrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_u32)))
-uint16x8_t __arm_vqrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_s16)))
-int8x16_t __arm_vqrshrntq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_s16)))
-int8x16_t __arm_vqrshrntq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_s32)))
-int16x8_t __arm_vqrshrntq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_s32)))
-int16x8_t __arm_vqrshrntq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_u16)))
-uint8x16_t __arm_vqrshrntq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_u16)))
-uint8x16_t __arm_vqrshrntq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_u32)))
-uint16x8_t __arm_vqrshrntq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_u32)))
-uint16x8_t __arm_vqrshrntq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16)))
-uint8x16_t __arm_vqrshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16)))
-uint8x16_t __arm_vqrshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32)))
-uint16x8_t __arm_vqrshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32)))
-uint16x8_t __arm_vqrshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_n_s16)))
-uint8x16_t __arm_vqrshrunbq_n_s16(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_n_s16)))
-uint8x16_t __arm_vqrshrunbq(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_n_s32)))
-uint16x8_t __arm_vqrshrunbq_n_s32(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_n_s32)))
-uint16x8_t __arm_vqrshrunbq(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_m_n_s16)))
-uint8x16_t __arm_vqrshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_m_n_s16)))
-uint8x16_t __arm_vqrshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_m_n_s32)))
-uint16x8_t __arm_vqrshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_m_n_s32)))
-uint16x8_t __arm_vqrshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_n_s16)))
-uint8x16_t __arm_vqrshruntq_n_s16(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_n_s16)))
-uint8x16_t __arm_vqrshruntq(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_n_s32)))
-uint16x8_t __arm_vqrshruntq_n_s32(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_n_s32)))
-uint16x8_t __arm_vqrshruntq(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_s16)))
-int16x8_t __arm_vqshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_s16)))
-int16x8_t __arm_vqshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_s32)))
-int32x4_t __arm_vqshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_s32)))
-int32x4_t __arm_vqshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_s8)))
-int8x16_t __arm_vqshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_s8)))
-int8x16_t __arm_vqshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_u16)))
-uint16x8_t __arm_vqshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_u16)))
-uint16x8_t __arm_vqshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_u32)))
-uint32x4_t __arm_vqshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_u32)))
-uint32x4_t __arm_vqshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_u8)))
-uint8x16_t __arm_vqshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_u8)))
-uint8x16_t __arm_vqshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_s16)))
-int16x8_t __arm_vqshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_s16)))
-int16x8_t __arm_vqshlq_m_r(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_s32)))
-int32x4_t __arm_vqshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_s32)))
-int32x4_t __arm_vqshlq_m_r(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_s8)))
-int8x16_t __arm_vqshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_s8)))
-int8x16_t __arm_vqshlq_m_r(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_u16)))
-uint16x8_t __arm_vqshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_u16)))
-uint16x8_t __arm_vqshlq_m_r(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_u32)))
-uint32x4_t __arm_vqshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_u32)))
-uint32x4_t __arm_vqshlq_m_r(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_u8)))
-uint8x16_t __arm_vqshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_u8)))
-uint8x16_t __arm_vqshlq_m_r(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_s16)))
-int16x8_t __arm_vqshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_s16)))
-int16x8_t __arm_vqshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_s32)))
-int32x4_t __arm_vqshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_s32)))
-int32x4_t __arm_vqshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_s8)))
-int8x16_t __arm_vqshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_s8)))
-int8x16_t __arm_vqshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_u16)))
-uint16x8_t __arm_vqshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_u16)))
-uint16x8_t __arm_vqshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_u32)))
-uint32x4_t __arm_vqshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_u32)))
-uint32x4_t __arm_vqshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_u8)))
-uint8x16_t __arm_vqshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_u8)))
-uint8x16_t __arm_vqshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_s16)))
-int16x8_t __arm_vqshlq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_s16)))
-int16x8_t __arm_vqshlq_n(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_s32)))
-int32x4_t __arm_vqshlq_n_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_s32)))
-int32x4_t __arm_vqshlq_n(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_s8)))
-int8x16_t __arm_vqshlq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_s8)))
-int8x16_t __arm_vqshlq_n(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_u16)))
-uint16x8_t __arm_vqshlq_n_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_u16)))
-uint16x8_t __arm_vqshlq_n(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_u32)))
-uint32x4_t __arm_vqshlq_n_u32(uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_u32)))
-uint32x4_t __arm_vqshlq_n(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_u8)))
-uint8x16_t __arm_vqshlq_n_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_u8)))
-uint8x16_t __arm_vqshlq_n(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_s16)))
-int16x8_t __arm_vqshlq_r_s16(int16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_s16)))
-int16x8_t __arm_vqshlq_r(int16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_s32)))
-int32x4_t __arm_vqshlq_r_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_s32)))
-int32x4_t __arm_vqshlq_r(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_s8)))
-int8x16_t __arm_vqshlq_r_s8(int8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_s8)))
-int8x16_t __arm_vqshlq_r(int8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_u16)))
-uint16x8_t __arm_vqshlq_r_u16(uint16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_u16)))
-uint16x8_t __arm_vqshlq_r(uint16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_u32)))
-uint32x4_t __arm_vqshlq_r_u32(uint32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_u32)))
-uint32x4_t __arm_vqshlq_r(uint32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_u8)))
-uint8x16_t __arm_vqshlq_r_u8(uint8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_u8)))
-uint8x16_t __arm_vqshlq_r(uint8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_s16)))
-int16x8_t __arm_vqshlq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_s16)))
-int16x8_t __arm_vqshlq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_s32)))
-int32x4_t __arm_vqshlq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_s32)))
-int32x4_t __arm_vqshlq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_s8)))
-int8x16_t __arm_vqshlq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_s8)))
-int8x16_t __arm_vqshlq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_u16)))
-uint16x8_t __arm_vqshlq_u16(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_u16)))
-uint16x8_t __arm_vqshlq(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_u32)))
-uint32x4_t __arm_vqshlq_u32(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_u32)))
-uint32x4_t __arm_vqshlq(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_u8)))
-uint8x16_t __arm_vqshlq_u8(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_u8)))
-uint8x16_t __arm_vqshlq(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshluq_m_n_s16)))
-uint16x8_t __arm_vqshluq_m_n_s16(uint16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshluq_m_n_s16)))
-uint16x8_t __arm_vqshluq_m(uint16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshluq_m_n_s32)))
-uint32x4_t __arm_vqshluq_m_n_s32(uint32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshluq_m_n_s32)))
-uint32x4_t __arm_vqshluq_m(uint32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshluq_m_n_s8)))
-uint8x16_t __arm_vqshluq_m_n_s8(uint8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshluq_m_n_s8)))
-uint8x16_t __arm_vqshluq_m(uint8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshluq_n_s16)))
-uint16x8_t __arm_vqshluq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshluq_n_s16)))
-uint16x8_t __arm_vqshluq(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshluq_n_s32)))
-uint32x4_t __arm_vqshluq_n_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshluq_n_s32)))
-uint32x4_t __arm_vqshluq(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshluq_n_s8)))
-uint8x16_t __arm_vqshluq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshluq_n_s8)))
-uint8x16_t __arm_vqshluq(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_s16)))
-int8x16_t __arm_vqshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_s16)))
-int8x16_t __arm_vqshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_s32)))
-int16x8_t __arm_vqshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_s32)))
-int16x8_t __arm_vqshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_u16)))
-uint8x16_t __arm_vqshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_u16)))
-uint8x16_t __arm_vqshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_u32)))
-uint16x8_t __arm_vqshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_u32)))
-uint16x8_t __arm_vqshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_s16)))
-int8x16_t __arm_vqshrnbq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_s16)))
-int8x16_t __arm_vqshrnbq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_s32)))
-int16x8_t __arm_vqshrnbq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_s32)))
-int16x8_t __arm_vqshrnbq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_u16)))
-uint8x16_t __arm_vqshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_u16)))
-uint8x16_t __arm_vqshrnbq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_u32)))
-uint16x8_t __arm_vqshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_u32)))
-uint16x8_t __arm_vqshrnbq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_s16)))
-int8x16_t __arm_vqshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_s16)))
-int8x16_t __arm_vqshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_s32)))
-int16x8_t __arm_vqshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_s32)))
-int16x8_t __arm_vqshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_u16)))
-uint8x16_t __arm_vqshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_u16)))
-uint8x16_t __arm_vqshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_u32)))
-uint16x8_t __arm_vqshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_u32)))
-uint16x8_t __arm_vqshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_s16)))
-int8x16_t __arm_vqshrntq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_s16)))
-int8x16_t __arm_vqshrntq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_s32)))
-int16x8_t __arm_vqshrntq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_s32)))
-int16x8_t __arm_vqshrntq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_u16)))
-uint8x16_t __arm_vqshrntq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_u16)))
-uint8x16_t __arm_vqshrntq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_u32)))
-uint16x8_t __arm_vqshrntq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_u32)))
-uint16x8_t __arm_vqshrntq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_m_n_s16)))
-uint8x16_t __arm_vqshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_m_n_s16)))
-uint8x16_t __arm_vqshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_m_n_s32)))
-uint16x8_t __arm_vqshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_m_n_s32)))
-uint16x8_t __arm_vqshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_n_s16)))
-uint8x16_t __arm_vqshrunbq_n_s16(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_n_s16)))
-uint8x16_t __arm_vqshrunbq(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_n_s32)))
-uint16x8_t __arm_vqshrunbq_n_s32(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_n_s32)))
-uint16x8_t __arm_vqshrunbq(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_m_n_s16)))
-uint8x16_t __arm_vqshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_m_n_s16)))
-uint8x16_t __arm_vqshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_m_n_s32)))
-uint16x8_t __arm_vqshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_m_n_s32)))
-uint16x8_t __arm_vqshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_n_s16)))
-uint8x16_t __arm_vqshruntq_n_s16(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_n_s16)))
-uint8x16_t __arm_vqshruntq(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_n_s32)))
-uint16x8_t __arm_vqshruntq_n_s32(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_n_s32)))
-uint16x8_t __arm_vqshruntq(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_s16)))
-int16x8_t __arm_vqsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_s16)))
-int16x8_t __arm_vqsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_s32)))
-int32x4_t __arm_vqsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_s32)))
-int32x4_t __arm_vqsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_s8)))
-int8x16_t __arm_vqsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_s8)))
-int8x16_t __arm_vqsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_u16)))
-uint16x8_t __arm_vqsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_u16)))
-uint16x8_t __arm_vqsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_u32)))
-uint32x4_t __arm_vqsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_u32)))
-uint32x4_t __arm_vqsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_u8)))
-uint8x16_t __arm_vqsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_u8)))
-uint8x16_t __arm_vqsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_s16)))
-int16x8_t __arm_vqsubq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_s16)))
-int16x8_t __arm_vqsubq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_s32)))
-int32x4_t __arm_vqsubq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_s32)))
-int32x4_t __arm_vqsubq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_s8)))
-int8x16_t __arm_vqsubq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_s8)))
-int8x16_t __arm_vqsubq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_u16)))
-uint16x8_t __arm_vqsubq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_u16)))
-uint16x8_t __arm_vqsubq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_u32)))
-uint32x4_t __arm_vqsubq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_u32)))
-uint32x4_t __arm_vqsubq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_u8)))
-uint8x16_t __arm_vqsubq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_u8)))
-uint8x16_t __arm_vqsubq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))
-int16x8_t __arm_vreinterpretq_s16_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))
-int16x8_t __arm_vreinterpretq_s16(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))
-int16x8_t __arm_vreinterpretq_s16_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))
-int16x8_t __arm_vreinterpretq_s16(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))
-int16x8_t __arm_vreinterpretq_s16_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))
-int16x8_t __arm_vreinterpretq_s16(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))
-int16x8_t __arm_vreinterpretq_s16_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))
-int16x8_t __arm_vreinterpretq_s16(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))
-int16x8_t __arm_vreinterpretq_s16_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))
-int16x8_t __arm_vreinterpretq_s16(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))
-int16x8_t __arm_vreinterpretq_s16_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))
-int16x8_t __arm_vreinterpretq_s16(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
-int16x8_t __arm_vreinterpretq_s16_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
-int16x8_t __arm_vreinterpretq_s16(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))
-int32x4_t __arm_vreinterpretq_s32_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))
-int32x4_t __arm_vreinterpretq_s32(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))
-int32x4_t __arm_vreinterpretq_s32_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))
-int32x4_t __arm_vreinterpretq_s32(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))
-int32x4_t __arm_vreinterpretq_s32_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))
-int32x4_t __arm_vreinterpretq_s32(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))
-int32x4_t __arm_vreinterpretq_s32_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))
-int32x4_t __arm_vreinterpretq_s32(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))
-int32x4_t __arm_vreinterpretq_s32_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))
-int32x4_t __arm_vreinterpretq_s32(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))
-int32x4_t __arm_vreinterpretq_s32_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))
-int32x4_t __arm_vreinterpretq_s32(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
-int32x4_t __arm_vreinterpretq_s32_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
-int32x4_t __arm_vreinterpretq_s32(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))
-int64x2_t __arm_vreinterpretq_s64_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))
-int64x2_t __arm_vreinterpretq_s64(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))
-int64x2_t __arm_vreinterpretq_s64_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))
-int64x2_t __arm_vreinterpretq_s64(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))
-int64x2_t __arm_vreinterpretq_s64_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))
-int64x2_t __arm_vreinterpretq_s64(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))
-int64x2_t __arm_vreinterpretq_s64_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))
-int64x2_t __arm_vreinterpretq_s64(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))
-int64x2_t __arm_vreinterpretq_s64_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))
-int64x2_t __arm_vreinterpretq_s64(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))
-int64x2_t __arm_vreinterpretq_s64_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))
-int64x2_t __arm_vreinterpretq_s64(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
-int64x2_t __arm_vreinterpretq_s64_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
-int64x2_t __arm_vreinterpretq_s64(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))
-int8x16_t __arm_vreinterpretq_s8_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))
-int8x16_t __arm_vreinterpretq_s8(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))
-int8x16_t __arm_vreinterpretq_s8_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))
-int8x16_t __arm_vreinterpretq_s8(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))
-int8x16_t __arm_vreinterpretq_s8_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))
-int8x16_t __arm_vreinterpretq_s8(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))
-int8x16_t __arm_vreinterpretq_s8_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))
-int8x16_t __arm_vreinterpretq_s8(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))
-int8x16_t __arm_vreinterpretq_s8_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))
-int8x16_t __arm_vreinterpretq_s8(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))
-int8x16_t __arm_vreinterpretq_s8_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))
-int8x16_t __arm_vreinterpretq_s8(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
-int8x16_t __arm_vreinterpretq_s8_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
-int8x16_t __arm_vreinterpretq_s8(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))
-uint16x8_t __arm_vreinterpretq_u16_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))
-uint16x8_t __arm_vreinterpretq_u16(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))
-uint16x8_t __arm_vreinterpretq_u16_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))
-uint16x8_t __arm_vreinterpretq_u16(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))
-uint16x8_t __arm_vreinterpretq_u16_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))
-uint16x8_t __arm_vreinterpretq_u16(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))
-uint16x8_t __arm_vreinterpretq_u16_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))
-uint16x8_t __arm_vreinterpretq_u16(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))
-uint16x8_t __arm_vreinterpretq_u16_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))
-uint16x8_t __arm_vreinterpretq_u16(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))
-uint16x8_t __arm_vreinterpretq_u16_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))
-uint16x8_t __arm_vreinterpretq_u16(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
-uint16x8_t __arm_vreinterpretq_u16_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
-uint16x8_t __arm_vreinterpretq_u16(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))
-uint32x4_t __arm_vreinterpretq_u32_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))
-uint32x4_t __arm_vreinterpretq_u32(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))
-uint32x4_t __arm_vreinterpretq_u32_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))
-uint32x4_t __arm_vreinterpretq_u32(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))
-uint32x4_t __arm_vreinterpretq_u32_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))
-uint32x4_t __arm_vreinterpretq_u32(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))
-uint32x4_t __arm_vreinterpretq_u32_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))
-uint32x4_t __arm_vreinterpretq_u32(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))
-uint32x4_t __arm_vreinterpretq_u32_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))
-uint32x4_t __arm_vreinterpretq_u32(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))
-uint32x4_t __arm_vreinterpretq_u32_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))
-uint32x4_t __arm_vreinterpretq_u32(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
-uint32x4_t __arm_vreinterpretq_u32_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
-uint32x4_t __arm_vreinterpretq_u32(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))
-uint64x2_t __arm_vreinterpretq_u64_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))
-uint64x2_t __arm_vreinterpretq_u64(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))
-uint64x2_t __arm_vreinterpretq_u64_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))
-uint64x2_t __arm_vreinterpretq_u64(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))
-uint64x2_t __arm_vreinterpretq_u64_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))
-uint64x2_t __arm_vreinterpretq_u64(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))
-uint64x2_t __arm_vreinterpretq_u64_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))
-uint64x2_t __arm_vreinterpretq_u64(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))
-uint64x2_t __arm_vreinterpretq_u64_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))
-uint64x2_t __arm_vreinterpretq_u64(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))
-uint64x2_t __arm_vreinterpretq_u64_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))
-uint64x2_t __arm_vreinterpretq_u64(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
-uint64x2_t __arm_vreinterpretq_u64_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
-uint64x2_t __arm_vreinterpretq_u64(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
-uint8x16_t __arm_vreinterpretq_u8_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
-uint8x16_t __arm_vreinterpretq_u8(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
-uint8x16_t __arm_vreinterpretq_u8_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
-uint8x16_t __arm_vreinterpretq_u8(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
-uint8x16_t __arm_vreinterpretq_u8_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
-uint8x16_t __arm_vreinterpretq_u8(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
-uint8x16_t __arm_vreinterpretq_u8_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
-uint8x16_t __arm_vreinterpretq_u8(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
-uint8x16_t __arm_vreinterpretq_u8_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
-uint8x16_t __arm_vreinterpretq_u8(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
-uint8x16_t __arm_vreinterpretq_u8_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
-uint8x16_t __arm_vreinterpretq_u8(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
-uint8x16_t __arm_vreinterpretq_u8_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
-uint8x16_t __arm_vreinterpretq_u8(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_s16)))
-int16x8_t __arm_vrhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_s16)))
-int16x8_t __arm_vrhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_s32)))
-int32x4_t __arm_vrhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_s32)))
-int32x4_t __arm_vrhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_s8)))
-int8x16_t __arm_vrhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_s8)))
-int8x16_t __arm_vrhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_u16)))
-uint16x8_t __arm_vrhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_u16)))
-uint16x8_t __arm_vrhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_u32)))
-uint32x4_t __arm_vrhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_u32)))
-uint32x4_t __arm_vrhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_u8)))
-uint8x16_t __arm_vrhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_u8)))
-uint8x16_t __arm_vrhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_s16)))
-int16x8_t __arm_vrhaddq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_s16)))
-int16x8_t __arm_vrhaddq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_s32)))
-int32x4_t __arm_vrhaddq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_s32)))
-int32x4_t __arm_vrhaddq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_s8)))
-int8x16_t __arm_vrhaddq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_s8)))
-int8x16_t __arm_vrhaddq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_u16)))
-uint16x8_t __arm_vrhaddq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_u16)))
-uint16x8_t __arm_vrhaddq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_u32)))
-uint32x4_t __arm_vrhaddq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_u32)))
-uint32x4_t __arm_vrhaddq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_u8)))
-uint8x16_t __arm_vrhaddq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_u8)))
-uint8x16_t __arm_vrhaddq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_s16)))
-int16x8_t __arm_vrhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_s16)))
-int16x8_t __arm_vrhaddq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_s32)))
-int32x4_t __arm_vrhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_s32)))
-int32x4_t __arm_vrhaddq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_s8)))
-int8x16_t __arm_vrhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_s8)))
-int8x16_t __arm_vrhaddq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_u16)))
-uint16x8_t __arm_vrhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_u16)))
-uint16x8_t __arm_vrhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_u32)))
-uint32x4_t __arm_vrhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_u32)))
-uint32x4_t __arm_vrhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_u8)))
-uint8x16_t __arm_vrhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_u8)))
-uint8x16_t __arm_vrhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32)))
-int64_t __arm_vrmlaldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32)))
-int64_t __arm_vrmlaldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32)))
-uint64_t __arm_vrmlaldavhaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32)))
-uint64_t __arm_vrmlaldavhaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_s32)))
-int64_t __arm_vrmlaldavhaq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_s32)))
-int64_t __arm_vrmlaldavhaq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_u32)))
-uint64_t __arm_vrmlaldavhaq_u32(uint64_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_u32)))
-uint64_t __arm_vrmlaldavhaq(uint64_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32)))
-int64_t __arm_vrmlaldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32)))
-int64_t __arm_vrmlaldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaxq_s32)))
-int64_t __arm_vrmlaldavhaxq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaxq_s32)))
-int64_t __arm_vrmlaldavhaxq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_p_s32)))
-int64_t __arm_vrmlaldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_p_s32)))
-int64_t __arm_vrmlaldavhq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_p_u32)))
-uint64_t __arm_vrmlaldavhq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_p_u32)))
-uint64_t __arm_vrmlaldavhq_p(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_s32)))
-int64_t __arm_vrmlaldavhq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_s32)))
-int64_t __arm_vrmlaldavhq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_u32)))
-uint64_t __arm_vrmlaldavhq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_u32)))
-uint64_t __arm_vrmlaldavhq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32)))
-int64_t __arm_vrmlaldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32)))
-int64_t __arm_vrmlaldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhxq_s32)))
-int64_t __arm_vrmlaldavhxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhxq_s32)))
-int64_t __arm_vrmlaldavhxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32)))
-int64_t __arm_vrmlsldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32)))
-int64_t __arm_vrmlsldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaq_s32)))
-int64_t __arm_vrmlsldavhaq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaq_s32)))
-int64_t __arm_vrmlsldavhaq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32)))
-int64_t __arm_vrmlsldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32)))
-int64_t __arm_vrmlsldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaxq_s32)))
-int64_t __arm_vrmlsldavhaxq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaxq_s32)))
-int64_t __arm_vrmlsldavhaxq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhq_p_s32)))
-int64_t __arm_vrmlsldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhq_p_s32)))
-int64_t __arm_vrmlsldavhq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhq_s32)))
-int64_t __arm_vrmlsldavhq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhq_s32)))
-int64_t __arm_vrmlsldavhq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32)))
-int64_t __arm_vrmlsldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32)))
-int64_t __arm_vrmlsldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhxq_s32)))
-int64_t __arm_vrmlsldavhxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhxq_s32)))
-int64_t __arm_vrmlsldavhxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_s16)))
-int16x8_t __arm_vrmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_s16)))
-int16x8_t __arm_vrmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_s32)))
-int32x4_t __arm_vrmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_s32)))
-int32x4_t __arm_vrmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_s8)))
-int8x16_t __arm_vrmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_s8)))
-int8x16_t __arm_vrmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_u16)))
-uint16x8_t __arm_vrmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_u16)))
-uint16x8_t __arm_vrmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_u32)))
-uint32x4_t __arm_vrmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_u32)))
-uint32x4_t __arm_vrmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_u8)))
-uint8x16_t __arm_vrmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_u8)))
-uint8x16_t __arm_vrmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_s16)))
-int16x8_t __arm_vrmulhq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_s16)))
-int16x8_t __arm_vrmulhq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_s32)))
-int32x4_t __arm_vrmulhq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_s32)))
-int32x4_t __arm_vrmulhq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_s8)))
-int8x16_t __arm_vrmulhq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_s8)))
-int8x16_t __arm_vrmulhq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_u16)))
-uint16x8_t __arm_vrmulhq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_u16)))
-uint16x8_t __arm_vrmulhq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_u32)))
-uint32x4_t __arm_vrmulhq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_u32)))
-uint32x4_t __arm_vrmulhq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_u8)))
-uint8x16_t __arm_vrmulhq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_u8)))
-uint8x16_t __arm_vrmulhq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_s16)))
-int16x8_t __arm_vrmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_s16)))
-int16x8_t __arm_vrmulhq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_s32)))
-int32x4_t __arm_vrmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_s32)))
-int32x4_t __arm_vrmulhq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_s8)))
-int8x16_t __arm_vrmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_s8)))
-int8x16_t __arm_vrmulhq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_u16)))
-uint16x8_t __arm_vrmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_u16)))
-uint16x8_t __arm_vrmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_u32)))
-uint32x4_t __arm_vrmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_u32)))
-uint32x4_t __arm_vrmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_u8)))
-uint8x16_t __arm_vrmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_u8)))
-uint8x16_t __arm_vrmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_s16)))
-int16x8_t __arm_vrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_s16)))
-int16x8_t __arm_vrshlq_m_n(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_s32)))
-int32x4_t __arm_vrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_s32)))
-int32x4_t __arm_vrshlq_m_n(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_s8)))
-int8x16_t __arm_vrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_s8)))
-int8x16_t __arm_vrshlq_m_n(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_u16)))
-uint16x8_t __arm_vrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_u16)))
-uint16x8_t __arm_vrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_u32)))
-uint32x4_t __arm_vrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_u32)))
-uint32x4_t __arm_vrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_u8)))
-uint8x16_t __arm_vrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_u8)))
-uint8x16_t __arm_vrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_s16)))
-int16x8_t __arm_vrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_s16)))
-int16x8_t __arm_vrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_s32)))
-int32x4_t __arm_vrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_s32)))
-int32x4_t __arm_vrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_s8)))
-int8x16_t __arm_vrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_s8)))
-int8x16_t __arm_vrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_u16)))
-uint16x8_t __arm_vrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_u16)))
-uint16x8_t __arm_vrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_u32)))
-uint32x4_t __arm_vrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_u32)))
-uint32x4_t __arm_vrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_u8)))
-uint8x16_t __arm_vrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_u8)))
-uint8x16_t __arm_vrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_s16)))
-int16x8_t __arm_vrshlq_n_s16(int16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_s16)))
-int16x8_t __arm_vrshlq(int16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_s32)))
-int32x4_t __arm_vrshlq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_s32)))
-int32x4_t __arm_vrshlq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_s8)))
-int8x16_t __arm_vrshlq_n_s8(int8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_s8)))
-int8x16_t __arm_vrshlq(int8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_u16)))
-uint16x8_t __arm_vrshlq_n_u16(uint16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_u16)))
-uint16x8_t __arm_vrshlq(uint16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_u32)))
-uint32x4_t __arm_vrshlq_n_u32(uint32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_u32)))
-uint32x4_t __arm_vrshlq(uint32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_u8)))
-uint8x16_t __arm_vrshlq_n_u8(uint8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_u8)))
-uint8x16_t __arm_vrshlq(uint8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_s16)))
-int16x8_t __arm_vrshlq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_s16)))
-int16x8_t __arm_vrshlq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_s32)))
-int32x4_t __arm_vrshlq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_s32)))
-int32x4_t __arm_vrshlq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_s8)))
-int8x16_t __arm_vrshlq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_s8)))
-int8x16_t __arm_vrshlq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_u16)))
-uint16x8_t __arm_vrshlq_u16(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_u16)))
-uint16x8_t __arm_vrshlq(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_u32)))
-uint32x4_t __arm_vrshlq_u32(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_u32)))
-uint32x4_t __arm_vrshlq(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_u8)))
-uint8x16_t __arm_vrshlq_u8(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_u8)))
-uint8x16_t __arm_vrshlq(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_s16)))
-int16x8_t __arm_vrshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_s16)))
-int16x8_t __arm_vrshlq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_s32)))
-int32x4_t __arm_vrshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_s32)))
-int32x4_t __arm_vrshlq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_s8)))
-int8x16_t __arm_vrshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_s8)))
-int8x16_t __arm_vrshlq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_u16)))
-uint16x8_t __arm_vrshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_u16)))
-uint16x8_t __arm_vrshlq_x(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_u32)))
-uint32x4_t __arm_vrshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_u32)))
-uint32x4_t __arm_vrshlq_x(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_u8)))
-uint8x16_t __arm_vrshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_u8)))
-uint8x16_t __arm_vrshlq_x(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_s16)))
-int8x16_t __arm_vrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_s16)))
-int8x16_t __arm_vrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_s32)))
-int16x8_t __arm_vrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_s32)))
-int16x8_t __arm_vrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_u16)))
-uint8x16_t __arm_vrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_u16)))
-uint8x16_t __arm_vrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_u32)))
-uint16x8_t __arm_vrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_u32)))
-uint16x8_t __arm_vrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_s16)))
-int8x16_t __arm_vrshrnbq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_s16)))
-int8x16_t __arm_vrshrnbq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_s32)))
-int16x8_t __arm_vrshrnbq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_s32)))
-int16x8_t __arm_vrshrnbq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_u16)))
-uint8x16_t __arm_vrshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_u16)))
-uint8x16_t __arm_vrshrnbq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_u32)))
-uint16x8_t __arm_vrshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_u32)))
-uint16x8_t __arm_vrshrnbq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_s16)))
-int8x16_t __arm_vrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_s16)))
-int8x16_t __arm_vrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_s32)))
-int16x8_t __arm_vrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_s32)))
-int16x8_t __arm_vrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_u16)))
-uint8x16_t __arm_vrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_u16)))
-uint8x16_t __arm_vrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_u32)))
-uint16x8_t __arm_vrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_u32)))
-uint16x8_t __arm_vrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_s16)))
-int8x16_t __arm_vrshrntq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_s16)))
-int8x16_t __arm_vrshrntq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_s32)))
-int16x8_t __arm_vrshrntq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_s32)))
-int16x8_t __arm_vrshrntq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_u16)))
-uint8x16_t __arm_vrshrntq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_u16)))
-uint8x16_t __arm_vrshrntq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_u32)))
-uint16x8_t __arm_vrshrntq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_u32)))
-uint16x8_t __arm_vrshrntq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_s16)))
-int16x8_t __arm_vrshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_s16)))
-int16x8_t __arm_vrshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_s32)))
-int32x4_t __arm_vrshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_s32)))
-int32x4_t __arm_vrshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_s8)))
-int8x16_t __arm_vrshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_s8)))
-int8x16_t __arm_vrshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_u16)))
-uint16x8_t __arm_vrshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_u16)))
-uint16x8_t __arm_vrshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_u32)))
-uint32x4_t __arm_vrshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_u32)))
-uint32x4_t __arm_vrshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_u8)))
-uint8x16_t __arm_vrshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_u8)))
-uint8x16_t __arm_vrshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_s16)))
-int16x8_t __arm_vrshrq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_s16)))
-int16x8_t __arm_vrshrq(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_s32)))
-int32x4_t __arm_vrshrq_n_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_s32)))
-int32x4_t __arm_vrshrq(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_s8)))
-int8x16_t __arm_vrshrq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_s8)))
-int8x16_t __arm_vrshrq(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_u16)))
-uint16x8_t __arm_vrshrq_n_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_u16)))
-uint16x8_t __arm_vrshrq(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_u32)))
-uint32x4_t __arm_vrshrq_n_u32(uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_u32)))
-uint32x4_t __arm_vrshrq(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_u8)))
-uint8x16_t __arm_vrshrq_n_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_u8)))
-uint8x16_t __arm_vrshrq(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_s16)))
-int16x8_t __arm_vrshrq_x_n_s16(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_s16)))
-int16x8_t __arm_vrshrq_x(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_s32)))
-int32x4_t __arm_vrshrq_x_n_s32(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_s32)))
-int32x4_t __arm_vrshrq_x(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_s8)))
-int8x16_t __arm_vrshrq_x_n_s8(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_s8)))
-int8x16_t __arm_vrshrq_x(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_u16)))
-uint16x8_t __arm_vrshrq_x_n_u16(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_u16)))
-uint16x8_t __arm_vrshrq_x(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_u32)))
-uint32x4_t __arm_vrshrq_x_n_u32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_u32)))
-uint32x4_t __arm_vrshrq_x(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_u8)))
-uint8x16_t __arm_vrshrq_x_n_u8(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_u8)))
-uint8x16_t __arm_vrshrq_x(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s16)))
-int16x8_t __arm_vsetq_lane_s16(int16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s16)))
-int16x8_t __arm_vsetq_lane(int16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s32)))
-int32x4_t __arm_vsetq_lane_s32(int32_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s32)))
-int32x4_t __arm_vsetq_lane(int32_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s64)))
-int64x2_t __arm_vsetq_lane_s64(int64_t, int64x2_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s64)))
-int64x2_t __arm_vsetq_lane(int64_t, int64x2_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s8)))
-int8x16_t __arm_vsetq_lane_s8(int8_t, int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s8)))
-int8x16_t __arm_vsetq_lane(int8_t, int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u16)))
-uint16x8_t __arm_vsetq_lane_u16(uint16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u16)))
-uint16x8_t __arm_vsetq_lane(uint16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u32)))
-uint32x4_t __arm_vsetq_lane_u32(uint32_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u32)))
-uint32x4_t __arm_vsetq_lane(uint32_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u64)))
-uint64x2_t __arm_vsetq_lane_u64(uint64_t, uint64x2_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u64)))
-uint64x2_t __arm_vsetq_lane(uint64_t, uint64x2_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u8)))
-uint8x16_t __arm_vsetq_lane_u8(uint8_t, uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u8)))
-uint8x16_t __arm_vsetq_lane(uint8_t, uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_s16)))
-int32x4_t __arm_vshllbq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_s16)))
-int32x4_t __arm_vshllbq_m(int32x4_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_s8)))
-int16x8_t __arm_vshllbq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_s8)))
-int16x8_t __arm_vshllbq_m(int16x8_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_u16)))
-uint32x4_t __arm_vshllbq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_u16)))
-uint32x4_t __arm_vshllbq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_u8)))
-uint16x8_t __arm_vshllbq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_u8)))
-uint16x8_t __arm_vshllbq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_s16)))
-int32x4_t __arm_vshllbq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_s16)))
-int32x4_t __arm_vshllbq(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_s8)))
-int16x8_t __arm_vshllbq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_s8)))
-int16x8_t __arm_vshllbq(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_u16)))
-uint32x4_t __arm_vshllbq_n_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_u16)))
-uint32x4_t __arm_vshllbq(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_u8)))
-uint16x8_t __arm_vshllbq_n_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_u8)))
-uint16x8_t __arm_vshllbq(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_s16)))
-int32x4_t __arm_vshllbq_x_n_s16(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_s16)))
-int32x4_t __arm_vshllbq_x(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_s8)))
-int16x8_t __arm_vshllbq_x_n_s8(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_s8)))
-int16x8_t __arm_vshllbq_x(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_u16)))
-uint32x4_t __arm_vshllbq_x_n_u16(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_u16)))
-uint32x4_t __arm_vshllbq_x(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_u8)))
-uint16x8_t __arm_vshllbq_x_n_u8(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_u8)))
-uint16x8_t __arm_vshllbq_x(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_s16)))
-int32x4_t __arm_vshlltq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_s16)))
-int32x4_t __arm_vshlltq_m(int32x4_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_s8)))
-int16x8_t __arm_vshlltq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_s8)))
-int16x8_t __arm_vshlltq_m(int16x8_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_u16)))
-uint32x4_t __arm_vshlltq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_u16)))
-uint32x4_t __arm_vshlltq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_u8)))
-uint16x8_t __arm_vshlltq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_u8)))
-uint16x8_t __arm_vshlltq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_s16)))
-int32x4_t __arm_vshlltq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_s16)))
-int32x4_t __arm_vshlltq(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_s8)))
-int16x8_t __arm_vshlltq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_s8)))
-int16x8_t __arm_vshlltq(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_u16)))
-uint32x4_t __arm_vshlltq_n_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_u16)))
-uint32x4_t __arm_vshlltq(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_u8)))
-uint16x8_t __arm_vshlltq_n_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_u8)))
-uint16x8_t __arm_vshlltq(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_s16)))
-int32x4_t __arm_vshlltq_x_n_s16(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_s16)))
-int32x4_t __arm_vshlltq_x(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_s8)))
-int16x8_t __arm_vshlltq_x_n_s8(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_s8)))
-int16x8_t __arm_vshlltq_x(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_u16)))
-uint32x4_t __arm_vshlltq_x_n_u16(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_u16)))
-uint32x4_t __arm_vshlltq_x(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_u8)))
-uint16x8_t __arm_vshlltq_x_n_u8(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_u8)))
-uint16x8_t __arm_vshlltq_x(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_s16)))
-int16x8_t __arm_vshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_s16)))
-int16x8_t __arm_vshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_s32)))
-int32x4_t __arm_vshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_s32)))
-int32x4_t __arm_vshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_s8)))
-int8x16_t __arm_vshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_s8)))
-int8x16_t __arm_vshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_u16)))
-uint16x8_t __arm_vshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_u16)))
-uint16x8_t __arm_vshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_u32)))
-uint32x4_t __arm_vshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_u32)))
-uint32x4_t __arm_vshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_u8)))
-uint8x16_t __arm_vshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_u8)))
-uint8x16_t __arm_vshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_s16)))
-int16x8_t __arm_vshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_s16)))
-int16x8_t __arm_vshlq_m_r(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_s32)))
-int32x4_t __arm_vshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_s32)))
-int32x4_t __arm_vshlq_m_r(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_s8)))
-int8x16_t __arm_vshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_s8)))
-int8x16_t __arm_vshlq_m_r(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_u16)))
-uint16x8_t __arm_vshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_u16)))
-uint16x8_t __arm_vshlq_m_r(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_u32)))
-uint32x4_t __arm_vshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_u32)))
-uint32x4_t __arm_vshlq_m_r(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_u8)))
-uint8x16_t __arm_vshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_u8)))
-uint8x16_t __arm_vshlq_m_r(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_s16)))
-int16x8_t __arm_vshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_s16)))
-int16x8_t __arm_vshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_s32)))
-int32x4_t __arm_vshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_s32)))
-int32x4_t __arm_vshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_s8)))
-int8x16_t __arm_vshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_s8)))
-int8x16_t __arm_vshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_u16)))
-uint16x8_t __arm_vshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_u16)))
-uint16x8_t __arm_vshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_u32)))
-uint32x4_t __arm_vshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_u32)))
-uint32x4_t __arm_vshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_u8)))
-uint8x16_t __arm_vshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_u8)))
-uint8x16_t __arm_vshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_s16)))
-int16x8_t __arm_vshlq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_s16)))
-int16x8_t __arm_vshlq_n(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_s32)))
-int32x4_t __arm_vshlq_n_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_s32)))
-int32x4_t __arm_vshlq_n(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_s8)))
-int8x16_t __arm_vshlq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_s8)))
-int8x16_t __arm_vshlq_n(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_u16)))
-uint16x8_t __arm_vshlq_n_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_u16)))
-uint16x8_t __arm_vshlq_n(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_u32)))
-uint32x4_t __arm_vshlq_n_u32(uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_u32)))
-uint32x4_t __arm_vshlq_n(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_u8)))
-uint8x16_t __arm_vshlq_n_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_u8)))
-uint8x16_t __arm_vshlq_n(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_s16)))
-int16x8_t __arm_vshlq_r_s16(int16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_s16)))
-int16x8_t __arm_vshlq_r(int16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_s32)))
-int32x4_t __arm_vshlq_r_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_s32)))
-int32x4_t __arm_vshlq_r(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_s8)))
-int8x16_t __arm_vshlq_r_s8(int8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_s8)))
-int8x16_t __arm_vshlq_r(int8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_u16)))
-uint16x8_t __arm_vshlq_r_u16(uint16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_u16)))
-uint16x8_t __arm_vshlq_r(uint16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_u32)))
-uint32x4_t __arm_vshlq_r_u32(uint32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_u32)))
-uint32x4_t __arm_vshlq_r(uint32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_u8)))
-uint8x16_t __arm_vshlq_r_u8(uint8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_u8)))
-uint8x16_t __arm_vshlq_r(uint8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_s16)))
-int16x8_t __arm_vshlq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_s16)))
-int16x8_t __arm_vshlq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_s32)))
-int32x4_t __arm_vshlq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_s32)))
-int32x4_t __arm_vshlq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_s8)))
-int8x16_t __arm_vshlq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_s8)))
-int8x16_t __arm_vshlq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_u16)))
-uint16x8_t __arm_vshlq_u16(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_u16)))
-uint16x8_t __arm_vshlq(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_u32)))
-uint32x4_t __arm_vshlq_u32(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_u32)))
-uint32x4_t __arm_vshlq(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_u8)))
-uint8x16_t __arm_vshlq_u8(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_u8)))
-uint8x16_t __arm_vshlq(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_s16)))
-int16x8_t __arm_vshlq_x_n_s16(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_s16)))
-int16x8_t __arm_vshlq_x_n(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_s32)))
-int32x4_t __arm_vshlq_x_n_s32(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_s32)))
-int32x4_t __arm_vshlq_x_n(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_s8)))
-int8x16_t __arm_vshlq_x_n_s8(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_s8)))
-int8x16_t __arm_vshlq_x_n(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_u16)))
-uint16x8_t __arm_vshlq_x_n_u16(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_u16)))
-uint16x8_t __arm_vshlq_x_n(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_u32)))
-uint32x4_t __arm_vshlq_x_n_u32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_u32)))
-uint32x4_t __arm_vshlq_x_n(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_u8)))
-uint8x16_t __arm_vshlq_x_n_u8(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_u8)))
-uint8x16_t __arm_vshlq_x_n(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_s16)))
-int16x8_t __arm_vshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_s16)))
-int16x8_t __arm_vshlq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_s32)))
-int32x4_t __arm_vshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_s32)))
-int32x4_t __arm_vshlq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_s8)))
-int8x16_t __arm_vshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_s8)))
-int8x16_t __arm_vshlq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_u16)))
-uint16x8_t __arm_vshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_u16)))
-uint16x8_t __arm_vshlq_x(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_u32)))
-uint32x4_t __arm_vshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_u32)))
-uint32x4_t __arm_vshlq_x(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_u8)))
-uint8x16_t __arm_vshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_u8)))
-uint8x16_t __arm_vshlq_x(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_s16)))
-int8x16_t __arm_vshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_s16)))
-int8x16_t __arm_vshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_s32)))
-int16x8_t __arm_vshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_s32)))
-int16x8_t __arm_vshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_u16)))
-uint8x16_t __arm_vshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_u16)))
-uint8x16_t __arm_vshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_u32)))
-uint16x8_t __arm_vshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_u32)))
-uint16x8_t __arm_vshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_s16)))
-int8x16_t __arm_vshrnbq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_s16)))
-int8x16_t __arm_vshrnbq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_s32)))
-int16x8_t __arm_vshrnbq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_s32)))
-int16x8_t __arm_vshrnbq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_u16)))
-uint8x16_t __arm_vshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_u16)))
-uint8x16_t __arm_vshrnbq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_u32)))
-uint16x8_t __arm_vshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_u32)))
-uint16x8_t __arm_vshrnbq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_s16)))
-int8x16_t __arm_vshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_s16)))
-int8x16_t __arm_vshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_s32)))
-int16x8_t __arm_vshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_s32)))
-int16x8_t __arm_vshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_u16)))
-uint8x16_t __arm_vshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_u16)))
-uint8x16_t __arm_vshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_u32)))
-uint16x8_t __arm_vshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_u32)))
-uint16x8_t __arm_vshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_s16)))
-int8x16_t __arm_vshrntq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_s16)))
-int8x16_t __arm_vshrntq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_s32)))
-int16x8_t __arm_vshrntq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_s32)))
-int16x8_t __arm_vshrntq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_u16)))
-uint8x16_t __arm_vshrntq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_u16)))
-uint8x16_t __arm_vshrntq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_u32)))
-uint16x8_t __arm_vshrntq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_u32)))
-uint16x8_t __arm_vshrntq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_s16)))
-int16x8_t __arm_vshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_s16)))
-int16x8_t __arm_vshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_s32)))
-int32x4_t __arm_vshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_s32)))
-int32x4_t __arm_vshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_s8)))
-int8x16_t __arm_vshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_s8)))
-int8x16_t __arm_vshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_u16)))
-uint16x8_t __arm_vshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_u16)))
-uint16x8_t __arm_vshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_u32)))
-uint32x4_t __arm_vshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_u32)))
-uint32x4_t __arm_vshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_u8)))
-uint8x16_t __arm_vshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_u8)))
-uint8x16_t __arm_vshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_s16)))
-int16x8_t __arm_vshrq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_s16)))
-int16x8_t __arm_vshrq(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_s32)))
-int32x4_t __arm_vshrq_n_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_s32)))
-int32x4_t __arm_vshrq(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_s8)))
-int8x16_t __arm_vshrq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_s8)))
-int8x16_t __arm_vshrq(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_u16)))
-uint16x8_t __arm_vshrq_n_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_u16)))
-uint16x8_t __arm_vshrq(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_u32)))
-uint32x4_t __arm_vshrq_n_u32(uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_u32)))
-uint32x4_t __arm_vshrq(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_u8)))
-uint8x16_t __arm_vshrq_n_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_u8)))
-uint8x16_t __arm_vshrq(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_s16)))
-int16x8_t __arm_vshrq_x_n_s16(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_s16)))
-int16x8_t __arm_vshrq_x(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_s32)))
-int32x4_t __arm_vshrq_x_n_s32(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_s32)))
-int32x4_t __arm_vshrq_x(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_s8)))
-int8x16_t __arm_vshrq_x_n_s8(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_s8)))
-int8x16_t __arm_vshrq_x(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_u16)))
-uint16x8_t __arm_vshrq_x_n_u16(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_u16)))
-uint16x8_t __arm_vshrq_x(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_u32)))
-uint32x4_t __arm_vshrq_x_n_u32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_u32)))
-uint32x4_t __arm_vshrq_x(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_u8)))
-uint8x16_t __arm_vshrq_x_n_u8(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_u8)))
-uint8x16_t __arm_vshrq_x(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_s16)))
-int16x8_t __arm_vsliq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_s16)))
-int16x8_t __arm_vsliq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_s32)))
-int32x4_t __arm_vsliq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_s32)))
-int32x4_t __arm_vsliq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_s8)))
-int8x16_t __arm_vsliq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_s8)))
-int8x16_t __arm_vsliq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_u16)))
-uint16x8_t __arm_vsliq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_u16)))
-uint16x8_t __arm_vsliq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_u32)))
-uint32x4_t __arm_vsliq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_u32)))
-uint32x4_t __arm_vsliq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_u8)))
-uint8x16_t __arm_vsliq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_u8)))
-uint8x16_t __arm_vsliq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_s16)))
-int16x8_t __arm_vsliq_n_s16(int16x8_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_s16)))
-int16x8_t __arm_vsliq(int16x8_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_s32)))
-int32x4_t __arm_vsliq_n_s32(int32x4_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_s32)))
-int32x4_t __arm_vsliq(int32x4_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_s8)))
-int8x16_t __arm_vsliq_n_s8(int8x16_t, int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_s8)))
-int8x16_t __arm_vsliq(int8x16_t, int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_u16)))
-uint16x8_t __arm_vsliq_n_u16(uint16x8_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_u16)))
-uint16x8_t __arm_vsliq(uint16x8_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_u32)))
-uint32x4_t __arm_vsliq_n_u32(uint32x4_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_u32)))
-uint32x4_t __arm_vsliq(uint32x4_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_u8)))
-uint8x16_t __arm_vsliq_n_u8(uint8x16_t, uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_u8)))
-uint8x16_t __arm_vsliq(uint8x16_t, uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_s16)))
-int16x8_t __arm_vsriq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_s16)))
-int16x8_t __arm_vsriq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_s32)))
-int32x4_t __arm_vsriq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_s32)))
-int32x4_t __arm_vsriq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_s8)))
-int8x16_t __arm_vsriq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_s8)))
-int8x16_t __arm_vsriq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_u16)))
-uint16x8_t __arm_vsriq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_u16)))
-uint16x8_t __arm_vsriq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_u32)))
-uint32x4_t __arm_vsriq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_u32)))
-uint32x4_t __arm_vsriq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_u8)))
-uint8x16_t __arm_vsriq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_u8)))
-uint8x16_t __arm_vsriq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_s16)))
-int16x8_t __arm_vsriq_n_s16(int16x8_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_s16)))
-int16x8_t __arm_vsriq(int16x8_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_s32)))
-int32x4_t __arm_vsriq_n_s32(int32x4_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_s32)))
-int32x4_t __arm_vsriq(int32x4_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_s8)))
-int8x16_t __arm_vsriq_n_s8(int8x16_t, int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_s8)))
-int8x16_t __arm_vsriq(int8x16_t, int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_u16)))
-uint16x8_t __arm_vsriq_n_u16(uint16x8_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_u16)))
-uint16x8_t __arm_vsriq(uint16x8_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_u32)))
-uint32x4_t __arm_vsriq_n_u32(uint32x4_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_u32)))
-uint32x4_t __arm_vsriq(uint32x4_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_u8)))
-uint8x16_t __arm_vsriq_n_u8(uint8x16_t, uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_u8)))
-uint8x16_t __arm_vsriq(uint8x16_t, uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s16)))
-void __arm_vst1q_p_s16(int16_t *, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s16)))
-void __arm_vst1q_p(int16_t *, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s32)))
-void __arm_vst1q_p_s32(int32_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s32)))
-void __arm_vst1q_p(int32_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s8)))
-void __arm_vst1q_p_s8(int8_t *, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s8)))
-void __arm_vst1q_p(int8_t *, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u16)))
-void __arm_vst1q_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u16)))
-void __arm_vst1q_p(uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u32)))
-void __arm_vst1q_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u32)))
-void __arm_vst1q_p(uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u8)))
-void __arm_vst1q_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u8)))
-void __arm_vst1q_p(uint8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_s16)))
-void __arm_vst1q_s16(int16_t *, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_s16)))
-void __arm_vst1q(int16_t *, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_s32)))
-void __arm_vst1q_s32(int32_t *, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_s32)))
-void __arm_vst1q(int32_t *, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_s8)))
-void __arm_vst1q_s8(int8_t *, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_s8)))
-void __arm_vst1q(int8_t *, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_u16)))
-void __arm_vst1q_u16(uint16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_u16)))
-void __arm_vst1q(uint16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_u32)))
-void __arm_vst1q_u32(uint32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_u32)))
-void __arm_vst1q(uint32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_u8)))
-void __arm_vst1q_u8(uint8_t *, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_u8)))
-void __arm_vst1q(uint8_t *, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_s16)))
-void __arm_vst2q_s16(int16_t *, int16x8x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_s16)))
-void __arm_vst2q(int16_t *, int16x8x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_s32)))
-void __arm_vst2q_s32(int32_t *, int32x4x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_s32)))
-void __arm_vst2q(int32_t *, int32x4x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_s8)))
-void __arm_vst2q_s8(int8_t *, int8x16x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_s8)))
-void __arm_vst2q(int8_t *, int8x16x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_u16)))
-void __arm_vst2q_u16(uint16_t *, uint16x8x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_u16)))
-void __arm_vst2q(uint16_t *, uint16x8x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_u32)))
-void __arm_vst2q_u32(uint32_t *, uint32x4x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_u32)))
-void __arm_vst2q(uint32_t *, uint32x4x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_u8)))
-void __arm_vst2q_u8(uint8_t *, uint8x16x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_u8)))
-void __arm_vst2q(uint8_t *, uint8x16x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_s16)))
-void __arm_vst4q_s16(int16_t *, int16x8x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_s16)))
-void __arm_vst4q(int16_t *, int16x8x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_s32)))
-void __arm_vst4q_s32(int32_t *, int32x4x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_s32)))
-void __arm_vst4q(int32_t *, int32x4x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_s8)))
-void __arm_vst4q_s8(int8_t *, int8x16x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_s8)))
-void __arm_vst4q(int8_t *, int8x16x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_u16)))
-void __arm_vst4q_u16(uint16_t *, uint16x8x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_u16)))
-void __arm_vst4q(uint16_t *, uint16x8x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_u32)))
-void __arm_vst4q_u32(uint32_t *, uint32x4x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_u32)))
-void __arm_vst4q(uint32_t *, uint32x4x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_u8)))
-void __arm_vst4q_u8(uint8_t *, uint8x16x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_u8)))
-void __arm_vst4q(uint8_t *, uint8x16x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s16)))
-void __arm_vstrbq_p_s16(int8_t *, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s16)))
-void __arm_vstrbq_p(int8_t *, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s32)))
-void __arm_vstrbq_p_s32(int8_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s32)))
-void __arm_vstrbq_p(int8_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s8)))
-void __arm_vstrbq_p_s8(int8_t *, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s8)))
-void __arm_vstrbq_p(int8_t *, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u16)))
-void __arm_vstrbq_p_u16(uint8_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u16)))
-void __arm_vstrbq_p(uint8_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u32)))
-void __arm_vstrbq_p_u32(uint8_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u32)))
-void __arm_vstrbq_p(uint8_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u8)))
-void __arm_vstrbq_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u8)))
-void __arm_vstrbq_p(uint8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s16)))
-void __arm_vstrbq_s16(int8_t *, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s16)))
-void __arm_vstrbq(int8_t *, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s32)))
-void __arm_vstrbq_s32(int8_t *, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s32)))
-void __arm_vstrbq(int8_t *, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s8)))
-void __arm_vstrbq_s8(int8_t *, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s8)))
-void __arm_vstrbq(int8_t *, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))
-void __arm_vstrbq_scatter_offset_p_s16(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))
-void __arm_vstrbq_scatter_offset_p(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))
-void __arm_vstrbq_scatter_offset_p_s32(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))
-void __arm_vstrbq_scatter_offset_p(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))
-void __arm_vstrbq_scatter_offset_p_s8(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))
-void __arm_vstrbq_scatter_offset_p(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))
-void __arm_vstrbq_scatter_offset_p_u16(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))
-void __arm_vstrbq_scatter_offset_p(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))
-void __arm_vstrbq_scatter_offset_p_u32(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))
-void __arm_vstrbq_scatter_offset_p(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))
-void __arm_vstrbq_scatter_offset_p_u8(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))
-void __arm_vstrbq_scatter_offset_p(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))
-void __arm_vstrbq_scatter_offset_s16(int8_t *, uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))
-void __arm_vstrbq_scatter_offset(int8_t *, uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))
-void __arm_vstrbq_scatter_offset_s32(int8_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))
-void __arm_vstrbq_scatter_offset(int8_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))
-void __arm_vstrbq_scatter_offset_s8(int8_t *, uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))
-void __arm_vstrbq_scatter_offset(int8_t *, uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))
-void __arm_vstrbq_scatter_offset_u16(uint8_t *, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))
-void __arm_vstrbq_scatter_offset(uint8_t *, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))
-void __arm_vstrbq_scatter_offset_u32(uint8_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))
-void __arm_vstrbq_scatter_offset(uint8_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))
-void __arm_vstrbq_scatter_offset_u8(uint8_t *, uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))
-void __arm_vstrbq_scatter_offset(uint8_t *, uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u16)))
-void __arm_vstrbq_u16(uint8_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u16)))
-void __arm_vstrbq(uint8_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u32)))
-void __arm_vstrbq_u32(uint8_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u32)))
-void __arm_vstrbq(uint8_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u8)))
-void __arm_vstrbq_u8(uint8_t *, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u8)))
-void __arm_vstrbq(uint8_t *, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))
-void __arm_vstrdq_scatter_base_p_s64(uint64x2_t, int, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))
-void __arm_vstrdq_scatter_base_p(uint64x2_t, int, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))
-void __arm_vstrdq_scatter_base_p_u64(uint64x2_t, int, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))
-void __arm_vstrdq_scatter_base_p(uint64x2_t, int, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))
-void __arm_vstrdq_scatter_base_s64(uint64x2_t, int, int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))
-void __arm_vstrdq_scatter_base(uint64x2_t, int, int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))
-void __arm_vstrdq_scatter_base_u64(uint64x2_t, int, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))
-void __arm_vstrdq_scatter_base(uint64x2_t, int, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))
-void __arm_vstrdq_scatter_base_wb_p_s64(uint64x2_t *, int, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))
-void __arm_vstrdq_scatter_base_wb_p(uint64x2_t *, int, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))
-void __arm_vstrdq_scatter_base_wb_p_u64(uint64x2_t *, int, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))
-void __arm_vstrdq_scatter_base_wb_p(uint64x2_t *, int, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))
-void __arm_vstrdq_scatter_base_wb_s64(uint64x2_t *, int, int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))
-void __arm_vstrdq_scatter_base_wb(uint64x2_t *, int, int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))
-void __arm_vstrdq_scatter_base_wb_u64(uint64x2_t *, int, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))
-void __arm_vstrdq_scatter_base_wb(uint64x2_t *, int, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))
-void __arm_vstrdq_scatter_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))
-void __arm_vstrdq_scatter_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))
-void __arm_vstrdq_scatter_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))
-void __arm_vstrdq_scatter_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))
-void __arm_vstrdq_scatter_offset_s64(int64_t *, uint64x2_t, int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))
-void __arm_vstrdq_scatter_offset(int64_t *, uint64x2_t, int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))
-void __arm_vstrdq_scatter_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))
-void __arm_vstrdq_scatter_offset(uint64_t *, uint64x2_t, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))
-void __arm_vstrdq_scatter_shifted_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))
-void __arm_vstrdq_scatter_shifted_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))
-void __arm_vstrdq_scatter_shifted_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))
-void __arm_vstrdq_scatter_shifted_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))
-void __arm_vstrdq_scatter_shifted_offset_s64(int64_t *, uint64x2_t, int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))
-void __arm_vstrdq_scatter_shifted_offset(int64_t *, uint64x2_t, int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))
-void __arm_vstrdq_scatter_shifted_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))
-void __arm_vstrdq_scatter_shifted_offset(uint64_t *, uint64x2_t, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s16)))
-void __arm_vstrhq_p_s16(int16_t *, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s16)))
-void __arm_vstrhq_p(int16_t *, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s32)))
-void __arm_vstrhq_p_s32(int16_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s32)))
-void __arm_vstrhq_p(int16_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u16)))
-void __arm_vstrhq_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u16)))
-void __arm_vstrhq_p(uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u32)))
-void __arm_vstrhq_p_u32(uint16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u32)))
-void __arm_vstrhq_p(uint16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s16)))
-void __arm_vstrhq_s16(int16_t *, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s16)))
-void __arm_vstrhq(int16_t *, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s32)))
-void __arm_vstrhq_s32(int16_t *, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s32)))
-void __arm_vstrhq(int16_t *, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))
-void __arm_vstrhq_scatter_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))
-void __arm_vstrhq_scatter_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))
-void __arm_vstrhq_scatter_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))
-void __arm_vstrhq_scatter_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))
-void __arm_vstrhq_scatter_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))
-void __arm_vstrhq_scatter_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))
-void __arm_vstrhq_scatter_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))
-void __arm_vstrhq_scatter_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))
-void __arm_vstrhq_scatter_offset_s16(int16_t *, uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))
-void __arm_vstrhq_scatter_offset(int16_t *, uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))
-void __arm_vstrhq_scatter_offset_s32(int16_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))
-void __arm_vstrhq_scatter_offset(int16_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))
-void __arm_vstrhq_scatter_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))
-void __arm_vstrhq_scatter_offset(uint16_t *, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))
-void __arm_vstrhq_scatter_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))
-void __arm_vstrhq_scatter_offset(uint16_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))
-void __arm_vstrhq_scatter_shifted_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))
-void __arm_vstrhq_scatter_shifted_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))
-void __arm_vstrhq_scatter_shifted_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))
-void __arm_vstrhq_scatter_shifted_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))
-void __arm_vstrhq_scatter_shifted_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))
-void __arm_vstrhq_scatter_shifted_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))
-void __arm_vstrhq_scatter_shifted_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))
-void __arm_vstrhq_scatter_shifted_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))
-void __arm_vstrhq_scatter_shifted_offset_s16(int16_t *, uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))
-void __arm_vstrhq_scatter_shifted_offset(int16_t *, uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))
-void __arm_vstrhq_scatter_shifted_offset_s32(int16_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))
-void __arm_vstrhq_scatter_shifted_offset(int16_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))
-void __arm_vstrhq_scatter_shifted_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))
-void __arm_vstrhq_scatter_shifted_offset(uint16_t *, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))
-void __arm_vstrhq_scatter_shifted_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))
-void __arm_vstrhq_scatter_shifted_offset(uint16_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u16)))
-void __arm_vstrhq_u16(uint16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u16)))
-void __arm_vstrhq(uint16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u32)))
-void __arm_vstrhq_u32(uint16_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u32)))
-void __arm_vstrhq(uint16_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_s32)))
-void __arm_vstrwq_p_s32(int32_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_s32)))
-void __arm_vstrwq_p(int32_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_u32)))
-void __arm_vstrwq_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_u32)))
-void __arm_vstrwq_p(uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_s32)))
-void __arm_vstrwq_s32(int32_t *, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_s32)))
-void __arm_vstrwq(int32_t *, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))
-void __arm_vstrwq_scatter_base_p_s32(uint32x4_t, int, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))
-void __arm_vstrwq_scatter_base_p(uint32x4_t, int, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))
-void __arm_vstrwq_scatter_base_p_u32(uint32x4_t, int, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))
-void __arm_vstrwq_scatter_base_p(uint32x4_t, int, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))
-void __arm_vstrwq_scatter_base_s32(uint32x4_t, int, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))
-void __arm_vstrwq_scatter_base(uint32x4_t, int, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))
-void __arm_vstrwq_scatter_base_u32(uint32x4_t, int, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))
-void __arm_vstrwq_scatter_base(uint32x4_t, int, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))
-void __arm_vstrwq_scatter_base_wb_p_s32(uint32x4_t *, int, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))
-void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))
-void __arm_vstrwq_scatter_base_wb_p_u32(uint32x4_t *, int, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))
-void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))
-void __arm_vstrwq_scatter_base_wb_s32(uint32x4_t *, int, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))
-void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))
-void __arm_vstrwq_scatter_base_wb_u32(uint32x4_t *, int, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))
-void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))
-void __arm_vstrwq_scatter_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))
-void __arm_vstrwq_scatter_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))
-void __arm_vstrwq_scatter_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))
-void __arm_vstrwq_scatter_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))
-void __arm_vstrwq_scatter_offset_s32(int32_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))
-void __arm_vstrwq_scatter_offset(int32_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))
-void __arm_vstrwq_scatter_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))
-void __arm_vstrwq_scatter_offset(uint32_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))
-void __arm_vstrwq_scatter_shifted_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))
-void __arm_vstrwq_scatter_shifted_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))
-void __arm_vstrwq_scatter_shifted_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))
-void __arm_vstrwq_scatter_shifted_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))
-void __arm_vstrwq_scatter_shifted_offset_s32(int32_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))
-void __arm_vstrwq_scatter_shifted_offset(int32_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))
-void __arm_vstrwq_scatter_shifted_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))
-void __arm_vstrwq_scatter_shifted_offset(uint32_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_u32)))
-void __arm_vstrwq_u32(uint32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_u32)))
-void __arm_vstrwq(uint32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s16)))
-int16x8_t __arm_vsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s16)))
-int16x8_t __arm_vsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s32)))
-int32x4_t __arm_vsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s32)))
-int32x4_t __arm_vsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s8)))
-int8x16_t __arm_vsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s8)))
-int8x16_t __arm_vsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u16)))
-uint16x8_t __arm_vsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u16)))
-uint16x8_t __arm_vsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u32)))
-uint32x4_t __arm_vsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u32)))
-uint32x4_t __arm_vsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u8)))
-uint8x16_t __arm_vsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u8)))
-uint8x16_t __arm_vsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_s16)))
-int16x8_t __arm_vsubq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_s16)))
-int16x8_t __arm_vsubq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_s32)))
-int32x4_t __arm_vsubq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_s32)))
-int32x4_t __arm_vsubq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_s8)))
-int8x16_t __arm_vsubq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_s8)))
-int8x16_t __arm_vsubq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_u16)))
-uint16x8_t __arm_vsubq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_u16)))
-uint16x8_t __arm_vsubq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_u32)))
-uint32x4_t __arm_vsubq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_u32)))
-uint32x4_t __arm_vsubq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_u8)))
-uint8x16_t __arm_vsubq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_u8)))
-uint8x16_t __arm_vsubq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_s16)))
-int16x8_t __arm_vsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_s16)))
-int16x8_t __arm_vsubq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_s32)))
-int32x4_t __arm_vsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_s32)))
-int32x4_t __arm_vsubq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_s8)))
-int8x16_t __arm_vsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_s8)))
-int8x16_t __arm_vsubq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_u16)))
-uint16x8_t __arm_vsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_u16)))
-uint16x8_t __arm_vsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_u32)))
-uint32x4_t __arm_vsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_u32)))
-uint32x4_t __arm_vsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_u8)))
-uint8x16_t __arm_vsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_u8)))
-uint8x16_t __arm_vsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s16)))
-int16x8_t __arm_vuninitializedq(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s32)))
-int32x4_t __arm_vuninitializedq(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s64)))
-int64x2_t __arm_vuninitializedq(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s8)))
-int8x16_t __arm_vuninitializedq(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u16)))
-uint16x8_t __arm_vuninitializedq(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u32)))
-uint32x4_t __arm_vuninitializedq(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u64)))
-uint64x2_t __arm_vuninitializedq(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u8)))
-uint8x16_t __arm_vuninitializedq(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s16)))
-int16x8_t __arm_vuninitializedq_s16();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s32)))
-int32x4_t __arm_vuninitializedq_s32();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s64)))
-int64x2_t __arm_vuninitializedq_s64();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s8)))
-int8x16_t __arm_vuninitializedq_s8();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u16)))
-uint16x8_t __arm_vuninitializedq_u16();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u32)))
-uint32x4_t __arm_vuninitializedq_u32();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u64)))
-uint64x2_t __arm_vuninitializedq_u64();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u8)))
-uint8x16_t __arm_vuninitializedq_u8();
-
-#if (__ARM_FEATURE_MVE & 2)
-
-typedef __fp16 float16_t;
-typedef float float32_t;
-typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) float16_t float16x8_t;
-typedef struct { float16x8_t val[2]; } float16x8x2_t;
-typedef struct { float16x8_t val[4]; } float16x8x4_t;
-typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) float32_t float32x4_t;
-typedef struct { float32x4_t val[2]; } float32x4x2_t;
-typedef struct { float32x4_t val[4]; } float32x4x4_t;
-
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_f16)))
-float16x8_t __arm_vabdq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_f16)))
-float16x8_t __arm_vabdq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_f32)))
-float32x4_t __arm_vabdq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_f32)))
-float32x4_t __arm_vabdq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_f16)))
-float16x8_t __arm_vabdq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_f16)))
-float16x8_t __arm_vabdq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_f32)))
-float32x4_t __arm_vabdq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_f32)))
-float32x4_t __arm_vabdq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_f16)))
-float16x8_t __arm_vabdq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_f16)))
-float16x8_t __arm_vabdq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_f32)))
-float32x4_t __arm_vabdq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_f32)))
-float32x4_t __arm_vabdq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_f16)))
-float16x8_t __arm_vaddq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_f16)))
-float16x8_t __arm_vaddq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_f32)))
-float32x4_t __arm_vaddq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_f32)))
-float32x4_t __arm_vaddq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f16)))
-float16x8_t __arm_vaddq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f16)))
-float16x8_t __arm_vaddq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f32)))
-float32x4_t __arm_vaddq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f32)))
-float32x4_t __arm_vaddq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_f16)))
-float16x8_t __arm_vaddq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_f16)))
-float16x8_t __arm_vaddq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_f32)))
-float32x4_t __arm_vaddq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_f32)))
-float32x4_t __arm_vaddq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_f16)))
-float16x8_t __arm_vandq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_f16)))
-float16x8_t __arm_vandq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_f32)))
-float32x4_t __arm_vandq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_f32)))
-float32x4_t __arm_vandq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_f16)))
-float16x8_t __arm_vandq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_f16)))
-float16x8_t __arm_vandq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_f32)))
-float32x4_t __arm_vandq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_f32)))
-float32x4_t __arm_vandq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_f16)))
-float16x8_t __arm_vandq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_f16)))
-float16x8_t __arm_vandq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_f32)))
-float32x4_t __arm_vandq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_f32)))
-float32x4_t __arm_vandq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_f16)))
-float16x8_t __arm_vbicq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_f16)))
-float16x8_t __arm_vbicq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_f32)))
-float32x4_t __arm_vbicq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_f32)))
-float32x4_t __arm_vbicq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_f16)))
-float16x8_t __arm_vbicq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_f16)))
-float16x8_t __arm_vbicq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_f32)))
-float32x4_t __arm_vbicq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_f32)))
-float32x4_t __arm_vbicq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_f16)))
-float16x8_t __arm_vbicq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_f16)))
-float16x8_t __arm_vbicq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_f32)))
-float32x4_t __arm_vbicq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_f32)))
-float32x4_t __arm_vbicq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_f16)))
-float16x8_t __arm_vcaddq_rot270_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_f16)))
-float16x8_t __arm_vcaddq_rot270(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_f32)))
-float32x4_t __arm_vcaddq_rot270_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_f32)))
-float32x4_t __arm_vcaddq_rot270(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_f16)))
-float16x8_t __arm_vcaddq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_f16)))
-float16x8_t __arm_vcaddq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_f32)))
-float32x4_t __arm_vcaddq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_f32)))
-float32x4_t __arm_vcaddq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_f16)))
-float16x8_t __arm_vcaddq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_f16)))
-float16x8_t __arm_vcaddq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_f32)))
-float32x4_t __arm_vcaddq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_f32)))
-float32x4_t __arm_vcaddq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_f16)))
-float16x8_t __arm_vcaddq_rot90_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_f16)))
-float16x8_t __arm_vcaddq_rot90(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_f32)))
-float32x4_t __arm_vcaddq_rot90_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_f32)))
-float32x4_t __arm_vcaddq_rot90(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_f16)))
-float16x8_t __arm_vcaddq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_f16)))
-float16x8_t __arm_vcaddq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_f32)))
-float32x4_t __arm_vcaddq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_f32)))
-float32x4_t __arm_vcaddq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_f16)))
-float16x8_t __arm_vcaddq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_f16)))
-float16x8_t __arm_vcaddq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_f32)))
-float32x4_t __arm_vcaddq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_f32)))
-float32x4_t __arm_vcaddq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_f16)))
-float16x8_t __arm_vcmlaq_f16(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_f16)))
-float16x8_t __arm_vcmlaq(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_f32)))
-float32x4_t __arm_vcmlaq_f32(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_f32)))
-float32x4_t __arm_vcmlaq(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_m_f16)))
-float16x8_t __arm_vcmlaq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_m_f16)))
-float16x8_t __arm_vcmlaq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_m_f32)))
-float32x4_t __arm_vcmlaq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_m_f32)))
-float32x4_t __arm_vcmlaq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_f16)))
-float16x8_t __arm_vcmlaq_rot180_f16(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_f16)))
-float16x8_t __arm_vcmlaq_rot180(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_f32)))
-float32x4_t __arm_vcmlaq_rot180_f32(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_f32)))
-float32x4_t __arm_vcmlaq_rot180(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16)))
-float16x8_t __arm_vcmlaq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16)))
-float16x8_t __arm_vcmlaq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32)))
-float32x4_t __arm_vcmlaq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32)))
-float32x4_t __arm_vcmlaq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_f16)))
-float16x8_t __arm_vcmlaq_rot270_f16(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_f16)))
-float16x8_t __arm_vcmlaq_rot270(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_f32)))
-float32x4_t __arm_vcmlaq_rot270_f32(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_f32)))
-float32x4_t __arm_vcmlaq_rot270(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16)))
-float16x8_t __arm_vcmlaq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16)))
-float16x8_t __arm_vcmlaq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32)))
-float32x4_t __arm_vcmlaq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32)))
-float32x4_t __arm_vcmlaq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_f16)))
-float16x8_t __arm_vcmlaq_rot90_f16(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_f16)))
-float16x8_t __arm_vcmlaq_rot90(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_f32)))
-float32x4_t __arm_vcmlaq_rot90_f32(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_f32)))
-float32x4_t __arm_vcmlaq_rot90(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16)))
-float16x8_t __arm_vcmlaq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16)))
-float16x8_t __arm_vcmlaq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32)))
-float32x4_t __arm_vcmlaq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32)))
-float32x4_t __arm_vcmlaq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f16)))
-mve_pred16_t __arm_vcmpeqq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f16)))
-mve_pred16_t __arm_vcmpeqq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f32)))
-mve_pred16_t __arm_vcmpeqq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f32)))
-mve_pred16_t __arm_vcmpeqq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f16)))
-mve_pred16_t __arm_vcmpeqq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f16)))
-mve_pred16_t __arm_vcmpeqq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f32)))
-mve_pred16_t __arm_vcmpeqq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f32)))
-mve_pred16_t __arm_vcmpeqq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))
-mve_pred16_t __arm_vcmpeqq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))
-mve_pred16_t __arm_vcmpeqq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))
-mve_pred16_t __arm_vcmpeqq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))
-mve_pred16_t __arm_vcmpeqq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f16)))
-mve_pred16_t __arm_vcmpeqq_n_f16(float16x8_t, float16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f16)))
-mve_pred16_t __arm_vcmpeqq(float16x8_t, float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f32)))
-mve_pred16_t __arm_vcmpeqq_n_f32(float32x4_t, float32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f32)))
-mve_pred16_t __arm_vcmpeqq(float32x4_t, float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f16)))
-mve_pred16_t __arm_vcmpgeq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f16)))
-mve_pred16_t __arm_vcmpgeq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f32)))
-mve_pred16_t __arm_vcmpgeq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f32)))
-mve_pred16_t __arm_vcmpgeq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f16)))
-mve_pred16_t __arm_vcmpgeq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f16)))
-mve_pred16_t __arm_vcmpgeq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f32)))
-mve_pred16_t __arm_vcmpgeq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f32)))
-mve_pred16_t __arm_vcmpgeq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))
-mve_pred16_t __arm_vcmpgeq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))
-mve_pred16_t __arm_vcmpgeq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))
-mve_pred16_t __arm_vcmpgeq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))
-mve_pred16_t __arm_vcmpgeq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f16)))
-mve_pred16_t __arm_vcmpgeq_n_f16(float16x8_t, float16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f16)))
-mve_pred16_t __arm_vcmpgeq(float16x8_t, float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f32)))
-mve_pred16_t __arm_vcmpgeq_n_f32(float32x4_t, float32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f32)))
-mve_pred16_t __arm_vcmpgeq(float32x4_t, float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f16)))
-mve_pred16_t __arm_vcmpgtq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f16)))
-mve_pred16_t __arm_vcmpgtq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f32)))
-mve_pred16_t __arm_vcmpgtq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f32)))
-mve_pred16_t __arm_vcmpgtq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f16)))
-mve_pred16_t __arm_vcmpgtq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f16)))
-mve_pred16_t __arm_vcmpgtq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f32)))
-mve_pred16_t __arm_vcmpgtq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f32)))
-mve_pred16_t __arm_vcmpgtq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))
-mve_pred16_t __arm_vcmpgtq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))
-mve_pred16_t __arm_vcmpgtq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))
-mve_pred16_t __arm_vcmpgtq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))
-mve_pred16_t __arm_vcmpgtq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f16)))
-mve_pred16_t __arm_vcmpgtq_n_f16(float16x8_t, float16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f16)))
-mve_pred16_t __arm_vcmpgtq(float16x8_t, float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f32)))
-mve_pred16_t __arm_vcmpgtq_n_f32(float32x4_t, float32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f32)))
-mve_pred16_t __arm_vcmpgtq(float32x4_t, float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f16)))
-mve_pred16_t __arm_vcmpleq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f16)))
-mve_pred16_t __arm_vcmpleq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f32)))
-mve_pred16_t __arm_vcmpleq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f32)))
-mve_pred16_t __arm_vcmpleq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f16)))
-mve_pred16_t __arm_vcmpleq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f16)))
-mve_pred16_t __arm_vcmpleq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f32)))
-mve_pred16_t __arm_vcmpleq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f32)))
-mve_pred16_t __arm_vcmpleq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))
-mve_pred16_t __arm_vcmpleq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))
-mve_pred16_t __arm_vcmpleq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))
-mve_pred16_t __arm_vcmpleq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))
-mve_pred16_t __arm_vcmpleq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f16)))
-mve_pred16_t __arm_vcmpleq_n_f16(float16x8_t, float16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f16)))
-mve_pred16_t __arm_vcmpleq(float16x8_t, float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f32)))
-mve_pred16_t __arm_vcmpleq_n_f32(float32x4_t, float32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f32)))
-mve_pred16_t __arm_vcmpleq(float32x4_t, float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f16)))
-mve_pred16_t __arm_vcmpltq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f16)))
-mve_pred16_t __arm_vcmpltq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f32)))
-mve_pred16_t __arm_vcmpltq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f32)))
-mve_pred16_t __arm_vcmpltq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f16)))
-mve_pred16_t __arm_vcmpltq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f16)))
-mve_pred16_t __arm_vcmpltq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f32)))
-mve_pred16_t __arm_vcmpltq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f32)))
-mve_pred16_t __arm_vcmpltq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))
-mve_pred16_t __arm_vcmpltq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))
-mve_pred16_t __arm_vcmpltq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))
-mve_pred16_t __arm_vcmpltq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))
-mve_pred16_t __arm_vcmpltq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f16)))
-mve_pred16_t __arm_vcmpltq_n_f16(float16x8_t, float16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f16)))
-mve_pred16_t __arm_vcmpltq(float16x8_t, float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f32)))
-mve_pred16_t __arm_vcmpltq_n_f32(float32x4_t, float32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f32)))
-mve_pred16_t __arm_vcmpltq(float32x4_t, float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f16)))
-mve_pred16_t __arm_vcmpneq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f16)))
-mve_pred16_t __arm_vcmpneq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f32)))
-mve_pred16_t __arm_vcmpneq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f32)))
-mve_pred16_t __arm_vcmpneq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f16)))
-mve_pred16_t __arm_vcmpneq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f16)))
-mve_pred16_t __arm_vcmpneq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f32)))
-mve_pred16_t __arm_vcmpneq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f32)))
-mve_pred16_t __arm_vcmpneq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))
-mve_pred16_t __arm_vcmpneq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))
-mve_pred16_t __arm_vcmpneq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))
-mve_pred16_t __arm_vcmpneq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))
-mve_pred16_t __arm_vcmpneq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f16)))
-mve_pred16_t __arm_vcmpneq_n_f16(float16x8_t, float16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f16)))
-mve_pred16_t __arm_vcmpneq(float16x8_t, float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f32)))
-mve_pred16_t __arm_vcmpneq_n_f32(float32x4_t, float32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f32)))
-mve_pred16_t __arm_vcmpneq(float32x4_t, float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_f16)))
-float16x8_t __arm_vcmulq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_f16)))
-float16x8_t __arm_vcmulq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_f32)))
-float32x4_t __arm_vcmulq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_f32)))
-float32x4_t __arm_vcmulq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_m_f16)))
-float16x8_t __arm_vcmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_m_f16)))
-float16x8_t __arm_vcmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_m_f32)))
-float32x4_t __arm_vcmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_m_f32)))
-float32x4_t __arm_vcmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_f16)))
-float16x8_t __arm_vcmulq_rot180_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_f16)))
-float16x8_t __arm_vcmulq_rot180(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_f32)))
-float32x4_t __arm_vcmulq_rot180_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_f32)))
-float32x4_t __arm_vcmulq_rot180(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_m_f16)))
-float16x8_t __arm_vcmulq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_m_f16)))
-float16x8_t __arm_vcmulq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_m_f32)))
-float32x4_t __arm_vcmulq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_m_f32)))
-float32x4_t __arm_vcmulq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_x_f16)))
-float16x8_t __arm_vcmulq_rot180_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_x_f16)))
-float16x8_t __arm_vcmulq_rot180_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_x_f32)))
-float32x4_t __arm_vcmulq_rot180_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_x_f32)))
-float32x4_t __arm_vcmulq_rot180_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_f16)))
-float16x8_t __arm_vcmulq_rot270_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_f16)))
-float16x8_t __arm_vcmulq_rot270(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_f32)))
-float32x4_t __arm_vcmulq_rot270_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_f32)))
-float32x4_t __arm_vcmulq_rot270(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_m_f16)))
-float16x8_t __arm_vcmulq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_m_f16)))
-float16x8_t __arm_vcmulq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_m_f32)))
-float32x4_t __arm_vcmulq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_m_f32)))
-float32x4_t __arm_vcmulq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_x_f16)))
-float16x8_t __arm_vcmulq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_x_f16)))
-float16x8_t __arm_vcmulq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_x_f32)))
-float32x4_t __arm_vcmulq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_x_f32)))
-float32x4_t __arm_vcmulq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_f16)))
-float16x8_t __arm_vcmulq_rot90_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_f16)))
-float16x8_t __arm_vcmulq_rot90(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_f32)))
-float32x4_t __arm_vcmulq_rot90_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_f32)))
-float32x4_t __arm_vcmulq_rot90(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_m_f16)))
-float16x8_t __arm_vcmulq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_m_f16)))
-float16x8_t __arm_vcmulq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_m_f32)))
-float32x4_t __arm_vcmulq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_m_f32)))
-float32x4_t __arm_vcmulq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_x_f16)))
-float16x8_t __arm_vcmulq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_x_f16)))
-float16x8_t __arm_vcmulq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_x_f32)))
-float32x4_t __arm_vcmulq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_x_f32)))
-float32x4_t __arm_vcmulq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_x_f16)))
-float16x8_t __arm_vcmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_x_f16)))
-float16x8_t __arm_vcmulq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_x_f32)))
-float32x4_t __arm_vcmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_x_f32)))
-float32x4_t __arm_vcmulq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_f16)))
-float16x8_t __arm_vcreateq_f16(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_f32)))
-float32x4_t __arm_vcreateq_f32(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtbq_f16_f32)))
-float16x8_t __arm_vcvtbq_f16_f32(float16x8_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtbq_m_f16_f32)))
-float16x8_t __arm_vcvtbq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16)))
-float16x8_t __arm_vcvtq_m_n_f16_s16(float16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16)))
-float16x8_t __arm_vcvtq_m_n(float16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16)))
-float16x8_t __arm_vcvtq_m_n_f16_u16(float16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16)))
-float16x8_t __arm_vcvtq_m_n(float16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32)))
-float32x4_t __arm_vcvtq_m_n_f32_s32(float32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32)))
-float32x4_t __arm_vcvtq_m_n(float32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32)))
-float32x4_t __arm_vcvtq_m_n_f32_u32(float32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32)))
-float32x4_t __arm_vcvtq_m_n(float32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16)))
-int16x8_t __arm_vcvtq_m_n_s16_f16(int16x8_t, float16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16)))
-int16x8_t __arm_vcvtq_m_n(int16x8_t, float16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32)))
-int32x4_t __arm_vcvtq_m_n_s32_f32(int32x4_t, float32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32)))
-int32x4_t __arm_vcvtq_m_n(int32x4_t, float32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16)))
-uint16x8_t __arm_vcvtq_m_n_u16_f16(uint16x8_t, float16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16)))
-uint16x8_t __arm_vcvtq_m_n(uint16x8_t, float16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32)))
-uint32x4_t __arm_vcvtq_m_n_u32_f32(uint32x4_t, float32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32)))
-uint32x4_t __arm_vcvtq_m_n(uint32x4_t, float32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f16_s16)))
-float16x8_t __arm_vcvtq_n_f16_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f16_s16)))
-float16x8_t __arm_vcvtq_n(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f16_u16)))
-float16x8_t __arm_vcvtq_n_f16_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f16_u16)))
-float16x8_t __arm_vcvtq_n(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f32_s32)))
-float32x4_t __arm_vcvtq_n_f32_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f32_s32)))
-float32x4_t __arm_vcvtq_n(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f32_u32)))
-float32x4_t __arm_vcvtq_n_f32_u32(uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f32_u32)))
-float32x4_t __arm_vcvtq_n(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_s16_f16)))
-int16x8_t __arm_vcvtq_n_s16_f16(float16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_s32_f32)))
-int32x4_t __arm_vcvtq_n_s32_f32(float32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_u16_f16)))
-uint16x8_t __arm_vcvtq_n_u16_f16(float16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_u32_f32)))
-uint32x4_t __arm_vcvtq_n_u32_f32(float32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16)))
-float16x8_t __arm_vcvtq_x_n_f16_s16(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16)))
-float16x8_t __arm_vcvtq_x_n(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16)))
-float16x8_t __arm_vcvtq_x_n_f16_u16(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16)))
-float16x8_t __arm_vcvtq_x_n(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32)))
-float32x4_t __arm_vcvtq_x_n_f32_s32(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32)))
-float32x4_t __arm_vcvtq_x_n(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32)))
-float32x4_t __arm_vcvtq_x_n_f32_u32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32)))
-float32x4_t __arm_vcvtq_x_n(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_s16_f16)))
-int16x8_t __arm_vcvtq_x_n_s16_f16(float16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_s32_f32)))
-int32x4_t __arm_vcvtq_x_n_s32_f32(float32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_u16_f16)))
-uint16x8_t __arm_vcvtq_x_n_u16_f16(float16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_u32_f32)))
-uint32x4_t __arm_vcvtq_x_n_u32_f32(float32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvttq_f16_f32)))
-float16x8_t __arm_vcvttq_f16_f32(float16x8_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvttq_m_f16_f32)))
-float16x8_t __arm_vcvttq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_f16)))
-float16x8_t __arm_vdupq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_f16)))
-float16x8_t __arm_vdupq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_f32)))
-float32x4_t __arm_vdupq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_f32)))
-float32x4_t __arm_vdupq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_f16)))
-float16x8_t __arm_vdupq_n_f16(float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_f32)))
-float32x4_t __arm_vdupq_n_f32(float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_f16)))
-float16x8_t __arm_vdupq_x_n_f16(float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_f32)))
-float32x4_t __arm_vdupq_x_n_f32(float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_f16)))
-float16x8_t __arm_veorq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_f16)))
-float16x8_t __arm_veorq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_f32)))
-float32x4_t __arm_veorq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_f32)))
-float32x4_t __arm_veorq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_f16)))
-float16x8_t __arm_veorq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_f16)))
-float16x8_t __arm_veorq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_f32)))
-float32x4_t __arm_veorq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_f32)))
-float32x4_t __arm_veorq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_f16)))
-float16x8_t __arm_veorq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_f16)))
-float16x8_t __arm_veorq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_f32)))
-float32x4_t __arm_veorq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_f32)))
-float32x4_t __arm_veorq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f16)))
-float16_t __arm_vgetq_lane_f16(float16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f16)))
-float16_t __arm_vgetq_lane(float16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f32)))
-float32_t __arm_vgetq_lane_f32(float32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f32)))
-float32_t __arm_vgetq_lane(float32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_f16)))
-float16x8_t __arm_vld1q_f16(const float16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_f16)))
-float16x8_t __arm_vld1q(const float16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_f32)))
-float32x4_t __arm_vld1q_f32(const float32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_f32)))
-float32x4_t __arm_vld1q(const float32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f16)))
-float16x8_t __arm_vld1q_z_f16(const float16_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f16)))
-float16x8_t __arm_vld1q_z(const float16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f32)))
-float32x4_t __arm_vld1q_z_f32(const float32_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f32)))
-float32x4_t __arm_vld1q_z(const float32_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_f16)))
-float16x8x2_t __arm_vld2q_f16(const float16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_f16)))
-float16x8x2_t __arm_vld2q(const float16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_f32)))
-float32x4x2_t __arm_vld2q_f32(const float32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_f32)))
-float32x4x2_t __arm_vld2q(const float32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_f16)))
-float16x8x4_t __arm_vld4q_f16(const float16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_f16)))
-float16x8x4_t __arm_vld4q(const float16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_f32)))
-float32x4x4_t __arm_vld4q_f32(const float32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_f32)))
-float32x4x4_t __arm_vld4q(const float32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_f16)))
-float16x8_t __arm_vldrhq_f16(const float16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))
-float16x8_t __arm_vldrhq_gather_offset_f16(const float16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))
-float16x8_t __arm_vldrhq_gather_offset(const float16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))
-float16x8_t __arm_vldrhq_gather_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))
-float16x8_t __arm_vldrhq_gather_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))
-float16x8_t __arm_vldrhq_gather_shifted_offset_f16(const float16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))
-float16x8_t __arm_vldrhq_gather_shifted_offset(const float16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))
-float16x8_t __arm_vldrhq_gather_shifted_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))
-float16x8_t __arm_vldrhq_gather_shifted_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_f16)))
-float16x8_t __arm_vldrhq_z_f16(const float16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_f32)))
-float32x4_t __arm_vldrwq_f32(const float32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_f32)))
-float32x4_t __arm_vldrwq_gather_base_f32(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_f32)))
-float32x4_t __arm_vldrwq_gather_base_wb_f32(uint32x4_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_f32)))
-float32x4_t __arm_vldrwq_gather_base_wb_z_f32(uint32x4_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_z_f32)))
-float32x4_t __arm_vldrwq_gather_base_z_f32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))
-float32x4_t __arm_vldrwq_gather_offset_f32(const float32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))
-float32x4_t __arm_vldrwq_gather_offset(const float32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))
-float32x4_t __arm_vldrwq_gather_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))
-float32x4_t __arm_vldrwq_gather_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))
-float32x4_t __arm_vldrwq_gather_shifted_offset_f32(const float32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))
-float32x4_t __arm_vldrwq_gather_shifted_offset(const float32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))
-float32x4_t __arm_vldrwq_gather_shifted_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))
-float32x4_t __arm_vldrwq_gather_shifted_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_z_f32)))
-float32x4_t __arm_vldrwq_z_f32(const float32_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_f16)))
-float16x8_t __arm_vmaxnmaq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_f16)))
-float16x8_t __arm_vmaxnmaq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_f32)))
-float32x4_t __arm_vmaxnmaq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_f32)))
-float32x4_t __arm_vmaxnmaq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_m_f16)))
-float16x8_t __arm_vmaxnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_m_f16)))
-float16x8_t __arm_vmaxnmaq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_m_f32)))
-float32x4_t __arm_vmaxnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_m_f32)))
-float32x4_t __arm_vmaxnmaq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_f16)))
-float16x8_t __arm_vmaxnmq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_f16)))
-float16x8_t __arm_vmaxnmq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_f32)))
-float32x4_t __arm_vmaxnmq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_f32)))
-float32x4_t __arm_vmaxnmq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_m_f16)))
-float16x8_t __arm_vmaxnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_m_f16)))
-float16x8_t __arm_vmaxnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_m_f32)))
-float32x4_t __arm_vmaxnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_m_f32)))
-float32x4_t __arm_vmaxnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_x_f16)))
-float16x8_t __arm_vmaxnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_x_f16)))
-float16x8_t __arm_vmaxnmq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_x_f32)))
-float32x4_t __arm_vmaxnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_x_f32)))
-float32x4_t __arm_vmaxnmq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_f16)))
-float16x8_t __arm_vminnmaq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_f16)))
-float16x8_t __arm_vminnmaq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_f32)))
-float32x4_t __arm_vminnmaq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_f32)))
-float32x4_t __arm_vminnmaq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_m_f16)))
-float16x8_t __arm_vminnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_m_f16)))
-float16x8_t __arm_vminnmaq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_m_f32)))
-float32x4_t __arm_vminnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_m_f32)))
-float32x4_t __arm_vminnmaq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmq_f16)))
-float16x8_t __arm_vminnmq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmq_f16)))
-float16x8_t __arm_vminnmq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmq_f32)))
-float32x4_t __arm_vminnmq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmq_f32)))
-float32x4_t __arm_vminnmq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmq_m_f16)))
-float16x8_t __arm_vminnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmq_m_f16)))
-float16x8_t __arm_vminnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmq_m_f32)))
-float32x4_t __arm_vminnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmq_m_f32)))
-float32x4_t __arm_vminnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmq_x_f16)))
-float16x8_t __arm_vminnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmq_x_f16)))
-float16x8_t __arm_vminnmq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmq_x_f32)))
-float32x4_t __arm_vminnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmq_x_f32)))
-float32x4_t __arm_vminnmq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_f16)))
-float16x8_t __arm_vmulq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_f16)))
-float16x8_t __arm_vmulq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_f32)))
-float32x4_t __arm_vmulq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_f32)))
-float32x4_t __arm_vmulq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_f16)))
-float16x8_t __arm_vmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_f16)))
-float16x8_t __arm_vmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_f32)))
-float32x4_t __arm_vmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_f32)))
-float32x4_t __arm_vmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_f16)))
-float16x8_t __arm_vmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_f16)))
-float16x8_t __arm_vmulq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_f32)))
-float32x4_t __arm_vmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_f32)))
-float32x4_t __arm_vmulq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_f16)))
-float16x8_t __arm_vornq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_f16)))
-float16x8_t __arm_vornq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_f32)))
-float32x4_t __arm_vornq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_f32)))
-float32x4_t __arm_vornq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_f16)))
-float16x8_t __arm_vornq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_f16)))
-float16x8_t __arm_vornq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_f32)))
-float32x4_t __arm_vornq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_f32)))
-float32x4_t __arm_vornq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_f16)))
-float16x8_t __arm_vornq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_f16)))
-float16x8_t __arm_vornq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_f32)))
-float32x4_t __arm_vornq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_f32)))
-float32x4_t __arm_vornq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_f16)))
-float16x8_t __arm_vorrq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_f16)))
-float16x8_t __arm_vorrq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_f32)))
-float32x4_t __arm_vorrq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_f32)))
-float32x4_t __arm_vorrq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_f16)))
-float16x8_t __arm_vorrq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_f16)))
-float16x8_t __arm_vorrq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_f32)))
-float32x4_t __arm_vorrq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_f32)))
-float32x4_t __arm_vorrq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_f16)))
-float16x8_t __arm_vorrq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_f16)))
-float16x8_t __arm_vorrq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_f32)))
-float32x4_t __arm_vorrq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_f32)))
-float32x4_t __arm_vorrq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_f16)))
-float16x8_t __arm_vpselq_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_f16)))
-float16x8_t __arm_vpselq(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_f32)))
-float32x4_t __arm_vpselq_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_f32)))
-float32x4_t __arm_vpselq(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))
-float16x8_t __arm_vreinterpretq_f16_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))
-float16x8_t __arm_vreinterpretq_f16(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))
-float16x8_t __arm_vreinterpretq_f16_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))
-float16x8_t __arm_vreinterpretq_f16(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))
-float16x8_t __arm_vreinterpretq_f16_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))
-float16x8_t __arm_vreinterpretq_f16(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))
-float16x8_t __arm_vreinterpretq_f16_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))
-float16x8_t __arm_vreinterpretq_f16(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))
-float16x8_t __arm_vreinterpretq_f16_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))
-float16x8_t __arm_vreinterpretq_f16(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))
-float16x8_t __arm_vreinterpretq_f16_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))
-float16x8_t __arm_vreinterpretq_f16(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))
-float16x8_t __arm_vreinterpretq_f16_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))
-float16x8_t __arm_vreinterpretq_f16(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))
-float16x8_t __arm_vreinterpretq_f16_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))
-float16x8_t __arm_vreinterpretq_f16(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
-float16x8_t __arm_vreinterpretq_f16_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
-float16x8_t __arm_vreinterpretq_f16(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))
-float32x4_t __arm_vreinterpretq_f32_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))
-float32x4_t __arm_vreinterpretq_f32(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))
-float32x4_t __arm_vreinterpretq_f32_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))
-float32x4_t __arm_vreinterpretq_f32(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))
-float32x4_t __arm_vreinterpretq_f32_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))
-float32x4_t __arm_vreinterpretq_f32(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))
-float32x4_t __arm_vreinterpretq_f32_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))
-float32x4_t __arm_vreinterpretq_f32(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))
-float32x4_t __arm_vreinterpretq_f32_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))
-float32x4_t __arm_vreinterpretq_f32(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))
-float32x4_t __arm_vreinterpretq_f32_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))
-float32x4_t __arm_vreinterpretq_f32(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))
-float32x4_t __arm_vreinterpretq_f32_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))
-float32x4_t __arm_vreinterpretq_f32(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))
-float32x4_t __arm_vreinterpretq_f32_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))
-float32x4_t __arm_vreinterpretq_f32(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
-float32x4_t __arm_vreinterpretq_f32_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
-float32x4_t __arm_vreinterpretq_f32(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))
-int16x8_t __arm_vreinterpretq_s16_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))
-int16x8_t __arm_vreinterpretq_s16(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))
-int16x8_t __arm_vreinterpretq_s16_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))
-int16x8_t __arm_vreinterpretq_s16(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))
-int32x4_t __arm_vreinterpretq_s32_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))
-int32x4_t __arm_vreinterpretq_s32(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))
-int32x4_t __arm_vreinterpretq_s32_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))
-int32x4_t __arm_vreinterpretq_s32(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))
-int64x2_t __arm_vreinterpretq_s64_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))
-int64x2_t __arm_vreinterpretq_s64(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))
-int64x2_t __arm_vreinterpretq_s64_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))
-int64x2_t __arm_vreinterpretq_s64(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))
-int8x16_t __arm_vreinterpretq_s8_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))
-int8x16_t __arm_vreinterpretq_s8(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))
-int8x16_t __arm_vreinterpretq_s8_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))
-int8x16_t __arm_vreinterpretq_s8(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))
-uint16x8_t __arm_vreinterpretq_u16_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))
-uint16x8_t __arm_vreinterpretq_u16(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))
-uint16x8_t __arm_vreinterpretq_u16_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))
-uint16x8_t __arm_vreinterpretq_u16(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))
-uint32x4_t __arm_vreinterpretq_u32_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))
-uint32x4_t __arm_vreinterpretq_u32(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))
-uint32x4_t __arm_vreinterpretq_u32_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))
-uint32x4_t __arm_vreinterpretq_u32(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))
-uint64x2_t __arm_vreinterpretq_u64_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))
-uint64x2_t __arm_vreinterpretq_u64(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))
-uint64x2_t __arm_vreinterpretq_u64_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))
-uint64x2_t __arm_vreinterpretq_u64(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
-uint8x16_t __arm_vreinterpretq_u8_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
-uint8x16_t __arm_vreinterpretq_u8(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
-uint8x16_t __arm_vreinterpretq_u8_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
-uint8x16_t __arm_vreinterpretq_u8(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f16)))
-float16x8_t __arm_vsetq_lane_f16(float16_t, float16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f16)))
-float16x8_t __arm_vsetq_lane(float16_t, float16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f32)))
-float32x4_t __arm_vsetq_lane_f32(float32_t, float32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f32)))
-float32x4_t __arm_vsetq_lane(float32_t, float32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_f16)))
-void __arm_vst1q_f16(float16_t *, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_f16)))
-void __arm_vst1q(float16_t *, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_f32)))
-void __arm_vst1q_f32(float32_t *, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_f32)))
-void __arm_vst1q(float32_t *, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f16)))
-void __arm_vst1q_p_f16(float16_t *, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f16)))
-void __arm_vst1q_p(float16_t *, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f32)))
-void __arm_vst1q_p_f32(float32_t *, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f32)))
-void __arm_vst1q_p(float32_t *, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_f16)))
-void __arm_vst2q_f16(float16_t *, float16x8x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_f16)))
-void __arm_vst2q(float16_t *, float16x8x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_f32)))
-void __arm_vst2q_f32(float32_t *, float32x4x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_f32)))
-void __arm_vst2q(float32_t *, float32x4x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_f16)))
-void __arm_vst4q_f16(float16_t *, float16x8x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_f16)))
-void __arm_vst4q(float16_t *, float16x8x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_f32)))
-void __arm_vst4q_f32(float32_t *, float32x4x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_f32)))
-void __arm_vst4q(float32_t *, float32x4x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_f16)))
-void __arm_vstrhq_f16(float16_t *, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_f16)))
-void __arm_vstrhq(float16_t *, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_f16)))
-void __arm_vstrhq_p_f16(float16_t *, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_f16)))
-void __arm_vstrhq_p(float16_t *, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))
-void __arm_vstrhq_scatter_offset_f16(float16_t *, uint16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))
-void __arm_vstrhq_scatter_offset(float16_t *, uint16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))
-void __arm_vstrhq_scatter_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))
-void __arm_vstrhq_scatter_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))
-void __arm_vstrhq_scatter_shifted_offset_f16(float16_t *, uint16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))
-void __arm_vstrhq_scatter_shifted_offset(float16_t *, uint16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))
-void __arm_vstrhq_scatter_shifted_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))
-void __arm_vstrhq_scatter_shifted_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_f32)))
-void __arm_vstrwq_f32(float32_t *, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_f32)))
-void __arm_vstrwq(float32_t *, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_f32)))
-void __arm_vstrwq_p_f32(float32_t *, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_f32)))
-void __arm_vstrwq_p(float32_t *, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))
-void __arm_vstrwq_scatter_base_f32(uint32x4_t, int, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))
-void __arm_vstrwq_scatter_base(uint32x4_t, int, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))
-void __arm_vstrwq_scatter_base_p_f32(uint32x4_t, int, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))
-void __arm_vstrwq_scatter_base_p(uint32x4_t, int, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))
-void __arm_vstrwq_scatter_base_wb_f32(uint32x4_t *, int, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))
-void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))
-void __arm_vstrwq_scatter_base_wb_p_f32(uint32x4_t *, int, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))
-void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))
-void __arm_vstrwq_scatter_offset_f32(float32_t *, uint32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))
-void __arm_vstrwq_scatter_offset(float32_t *, uint32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))
-void __arm_vstrwq_scatter_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))
-void __arm_vstrwq_scatter_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))
-void __arm_vstrwq_scatter_shifted_offset_f32(float32_t *, uint32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))
-void __arm_vstrwq_scatter_shifted_offset(float32_t *, uint32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))
-void __arm_vstrwq_scatter_shifted_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))
-void __arm_vstrwq_scatter_shifted_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_f16)))
-float16x8_t __arm_vsubq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_f16)))
-float16x8_t __arm_vsubq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_f32)))
-float32x4_t __arm_vsubq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_f32)))
-float32x4_t __arm_vsubq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f16)))
-float16x8_t __arm_vsubq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f16)))
-float16x8_t __arm_vsubq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f32)))
-float32x4_t __arm_vsubq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f32)))
-float32x4_t __arm_vsubq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_f16)))
-float16x8_t __arm_vsubq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_f16)))
-float16x8_t __arm_vsubq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_f32)))
-float32x4_t __arm_vsubq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_f32)))
-float32x4_t __arm_vsubq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_f16)))
-float16x8_t __arm_vuninitializedq_f16();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_f32)))
-float32x4_t __arm_vuninitializedq_f32();
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f16)))
-float16x8_t __arm_vuninitializedq(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f32)))
-float32x4_t __arm_vuninitializedq(float32x4_t);
-
-#endif /* (__ARM_FEATURE_MVE & 2) */
-
-#if (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE)
-
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_asrl)))
-int64_t asrl(int64_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_lsll)))
-uint64_t lsll(uint64_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqrshr)))
-int32_t sqrshr(int32_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqrshrl)))
-int64_t sqrshrl(int64_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqrshrl_sat48)))
-int64_t sqrshrl_sat48(int64_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqshl)))
-int32_t sqshl(int32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqshll)))
-int64_t sqshll(int64_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_srshr)))
-int32_t srshr(int32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_srshrl)))
-int64_t srshrl(int64_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqrshl)))
-uint32_t uqrshl(uint32_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqrshll)))
-uint64_t uqrshll(uint64_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqrshll_sat48)))
-uint64_t uqrshll_sat48(uint64_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqshl)))
-uint32_t uqshl(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqshll)))
-uint64_t uqshll(uint64_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_urshr)))
-uint32_t urshr(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_urshrl)))
-uint64_t urshrl(uint64_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_s16)))
-uint32_t vabavq_p_s16(uint32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_s16)))
-uint32_t vabavq_p(uint32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_s32)))
-uint32_t vabavq_p_s32(uint32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_s32)))
-uint32_t vabavq_p(uint32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_s8)))
-uint32_t vabavq_p_s8(uint32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_s8)))
-uint32_t vabavq_p(uint32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_u16)))
-uint32_t vabavq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_u16)))
-uint32_t vabavq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_u32)))
-uint32_t vabavq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_u32)))
-uint32_t vabavq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_u8)))
-uint32_t vabavq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_u8)))
-uint32_t vabavq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_s16)))
-uint32_t vabavq_s16(uint32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_s16)))
-uint32_t vabavq(uint32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_s32)))
-uint32_t vabavq_s32(uint32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_s32)))
-uint32_t vabavq(uint32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_s8)))
-uint32_t vabavq_s8(uint32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_s8)))
-uint32_t vabavq(uint32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_u16)))
-uint32_t vabavq_u16(uint32_t, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_u16)))
-uint32_t vabavq(uint32_t, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_u32)))
-uint32_t vabavq_u32(uint32_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_u32)))
-uint32_t vabavq(uint32_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_u8)))
-uint32_t vabavq_u8(uint32_t, uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_u8)))
-uint32_t vabavq(uint32_t, uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_s16)))
-int16x8_t vabdq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_s16)))
-int16x8_t vabdq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_s32)))
-int32x4_t vabdq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_s32)))
-int32x4_t vabdq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_s8)))
-int8x16_t vabdq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_s8)))
-int8x16_t vabdq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_u16)))
-uint16x8_t vabdq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_u16)))
-uint16x8_t vabdq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_u32)))
-uint32x4_t vabdq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_u32)))
-uint32x4_t vabdq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_u8)))
-uint8x16_t vabdq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_u8)))
-uint8x16_t vabdq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_s16)))
-int16x8_t vabdq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_s16)))
-int16x8_t vabdq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_s32)))
-int32x4_t vabdq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_s32)))
-int32x4_t vabdq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_s8)))
-int8x16_t vabdq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_s8)))
-int8x16_t vabdq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_u16)))
-uint16x8_t vabdq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_u16)))
-uint16x8_t vabdq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_u32)))
-uint32x4_t vabdq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_u32)))
-uint32x4_t vabdq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_u8)))
-uint8x16_t vabdq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_u8)))
-uint8x16_t vabdq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_s16)))
-int16x8_t vabdq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_s16)))
-int16x8_t vabdq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_s32)))
-int32x4_t vabdq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_s32)))
-int32x4_t vabdq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_s8)))
-int8x16_t vabdq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_s8)))
-int8x16_t vabdq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_u16)))
-uint16x8_t vabdq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_u16)))
-uint16x8_t vabdq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_u32)))
-uint32x4_t vabdq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_u32)))
-uint32x4_t vabdq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_u8)))
-uint8x16_t vabdq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_u8)))
-uint8x16_t vabdq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_s32)))
-int32x4_t vadciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_s32)))
-int32x4_t vadciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_u32)))
-uint32x4_t vadciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_u32)))
-uint32x4_t vadciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_s32)))
-int32x4_t vadciq_s32(int32x4_t, int32x4_t, unsigned *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_s32)))
-int32x4_t vadciq(int32x4_t, int32x4_t, unsigned *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_u32)))
-uint32x4_t vadciq_u32(uint32x4_t, uint32x4_t, unsigned *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_u32)))
-uint32x4_t vadciq(uint32x4_t, uint32x4_t, unsigned *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_s32)))
-int32x4_t vadcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_s32)))
-int32x4_t vadcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_u32)))
-uint32x4_t vadcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_u32)))
-uint32x4_t vadcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_s32)))
-int32x4_t vadcq_s32(int32x4_t, int32x4_t, unsigned *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_s32)))
-int32x4_t vadcq(int32x4_t, int32x4_t, unsigned *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_u32)))
-uint32x4_t vadcq_u32(uint32x4_t, uint32x4_t, unsigned *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_u32)))
-uint32x4_t vadcq(uint32x4_t, uint32x4_t, unsigned *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s16)))
-int16x8_t vaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s16)))
-int16x8_t vaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s32)))
-int32x4_t vaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s32)))
-int32x4_t vaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s8)))
-int8x16_t vaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s8)))
-int8x16_t vaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u16)))
-uint16x8_t vaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u16)))
-uint16x8_t vaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u32)))
-uint32x4_t vaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u32)))
-uint32x4_t vaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u8)))
-uint8x16_t vaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u8)))
-uint8x16_t vaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_s16)))
-int16x8_t vaddq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_s16)))
-int16x8_t vaddq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_s32)))
-int32x4_t vaddq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_s32)))
-int32x4_t vaddq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_s8)))
-int8x16_t vaddq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_s8)))
-int8x16_t vaddq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_u16)))
-uint16x8_t vaddq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_u16)))
-uint16x8_t vaddq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_u32)))
-uint32x4_t vaddq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_u32)))
-uint32x4_t vaddq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_u8)))
-uint8x16_t vaddq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_u8)))
-uint8x16_t vaddq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_s16)))
-int16x8_t vaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_s16)))
-int16x8_t vaddq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_s32)))
-int32x4_t vaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_s32)))
-int32x4_t vaddq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_s8)))
-int8x16_t vaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_s8)))
-int8x16_t vaddq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_u16)))
-uint16x8_t vaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_u16)))
-uint16x8_t vaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_u32)))
-uint32x4_t vaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_u32)))
-uint32x4_t vaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_u8)))
-uint8x16_t vaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_u8)))
-uint8x16_t vaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_s16)))
-int16x8_t vandq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_s16)))
-int16x8_t vandq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_s32)))
-int32x4_t vandq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_s32)))
-int32x4_t vandq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_s8)))
-int8x16_t vandq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_s8)))
-int8x16_t vandq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_u16)))
-uint16x8_t vandq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_u16)))
-uint16x8_t vandq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_u32)))
-uint32x4_t vandq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_u32)))
-uint32x4_t vandq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_u8)))
-uint8x16_t vandq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_u8)))
-uint8x16_t vandq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_s16)))
-int16x8_t vandq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_s16)))
-int16x8_t vandq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_s32)))
-int32x4_t vandq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_s32)))
-int32x4_t vandq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_s8)))
-int8x16_t vandq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_s8)))
-int8x16_t vandq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_u16)))
-uint16x8_t vandq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_u16)))
-uint16x8_t vandq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_u32)))
-uint32x4_t vandq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_u32)))
-uint32x4_t vandq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_u8)))
-uint8x16_t vandq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_u8)))
-uint8x16_t vandq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_s16)))
-int16x8_t vandq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_s16)))
-int16x8_t vandq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_s32)))
-int32x4_t vandq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_s32)))
-int32x4_t vandq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_s8)))
-int8x16_t vandq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_s8)))
-int8x16_t vandq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_u16)))
-uint16x8_t vandq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_u16)))
-uint16x8_t vandq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_u32)))
-uint32x4_t vandq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_u32)))
-uint32x4_t vandq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_u8)))
-uint8x16_t vandq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_u8)))
-uint8x16_t vandq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_s16)))
-int16x8_t vbicq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_s16)))
-int16x8_t vbicq_m_n(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_s32)))
-int32x4_t vbicq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_s32)))
-int32x4_t vbicq_m_n(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_u16)))
-uint16x8_t vbicq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_u16)))
-uint16x8_t vbicq_m_n(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_u32)))
-uint32x4_t vbicq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_u32)))
-uint32x4_t vbicq_m_n(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_s16)))
-int16x8_t vbicq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_s16)))
-int16x8_t vbicq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_s32)))
-int32x4_t vbicq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_s32)))
-int32x4_t vbicq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_s8)))
-int8x16_t vbicq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_s8)))
-int8x16_t vbicq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_u16)))
-uint16x8_t vbicq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_u16)))
-uint16x8_t vbicq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_u32)))
-uint32x4_t vbicq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_u32)))
-uint32x4_t vbicq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_u8)))
-uint8x16_t vbicq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_u8)))
-uint8x16_t vbicq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_s16)))
-int16x8_t vbicq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_s16)))
-int16x8_t vbicq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_s32)))
-int32x4_t vbicq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_s32)))
-int32x4_t vbicq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_u16)))
-uint16x8_t vbicq_n_u16(uint16x8_t, uint16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_u16)))
-uint16x8_t vbicq(uint16x8_t, uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_u32)))
-uint32x4_t vbicq_n_u32(uint32x4_t, uint32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_u32)))
-uint32x4_t vbicq(uint32x4_t, uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_s16)))
-int16x8_t vbicq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_s16)))
-int16x8_t vbicq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_s32)))
-int32x4_t vbicq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_s32)))
-int32x4_t vbicq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_s8)))
-int8x16_t vbicq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_s8)))
-int8x16_t vbicq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_u16)))
-uint16x8_t vbicq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_u16)))
-uint16x8_t vbicq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_u32)))
-uint32x4_t vbicq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_u32)))
-uint32x4_t vbicq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_u8)))
-uint8x16_t vbicq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_u8)))
-uint8x16_t vbicq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_s16)))
-int16x8_t vbicq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_s16)))
-int16x8_t vbicq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_s32)))
-int32x4_t vbicq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_s32)))
-int32x4_t vbicq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_s8)))
-int8x16_t vbicq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_s8)))
-int8x16_t vbicq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_u16)))
-uint16x8_t vbicq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_u16)))
-uint16x8_t vbicq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_u32)))
-uint32x4_t vbicq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_u32)))
-uint32x4_t vbicq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_u8)))
-uint8x16_t vbicq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_u8)))
-uint8x16_t vbicq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_s16)))
-int16x8_t vcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_s16)))
-int16x8_t vcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_s32)))
-int32x4_t vcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_s32)))
-int32x4_t vcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_s8)))
-int8x16_t vcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_s8)))
-int8x16_t vcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_u16)))
-uint16x8_t vcaddq_rot270_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_u16)))
-uint16x8_t vcaddq_rot270_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_u32)))
-uint32x4_t vcaddq_rot270_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_u32)))
-uint32x4_t vcaddq_rot270_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_u8)))
-uint8x16_t vcaddq_rot270_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_u8)))
-uint8x16_t vcaddq_rot270_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_s16)))
-int16x8_t vcaddq_rot270_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_s16)))
-int16x8_t vcaddq_rot270(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_s32)))
-int32x4_t vcaddq_rot270_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_s32)))
-int32x4_t vcaddq_rot270(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_s8)))
-int8x16_t vcaddq_rot270_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_s8)))
-int8x16_t vcaddq_rot270(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_u16)))
-uint16x8_t vcaddq_rot270_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_u16)))
-uint16x8_t vcaddq_rot270(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_u32)))
-uint32x4_t vcaddq_rot270_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_u32)))
-uint32x4_t vcaddq_rot270(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_u8)))
-uint8x16_t vcaddq_rot270_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_u8)))
-uint8x16_t vcaddq_rot270(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_s16)))
-int16x8_t vcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_s16)))
-int16x8_t vcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_s32)))
-int32x4_t vcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_s32)))
-int32x4_t vcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_s8)))
-int8x16_t vcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_s8)))
-int8x16_t vcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_u16)))
-uint16x8_t vcaddq_rot270_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_u16)))
-uint16x8_t vcaddq_rot270_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_u32)))
-uint32x4_t vcaddq_rot270_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_u32)))
-uint32x4_t vcaddq_rot270_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_u8)))
-uint8x16_t vcaddq_rot270_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_u8)))
-uint8x16_t vcaddq_rot270_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_s16)))
-int16x8_t vcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_s16)))
-int16x8_t vcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_s32)))
-int32x4_t vcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_s32)))
-int32x4_t vcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_s8)))
-int8x16_t vcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_s8)))
-int8x16_t vcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_u16)))
-uint16x8_t vcaddq_rot90_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_u16)))
-uint16x8_t vcaddq_rot90_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_u32)))
-uint32x4_t vcaddq_rot90_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_u32)))
-uint32x4_t vcaddq_rot90_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_u8)))
-uint8x16_t vcaddq_rot90_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_u8)))
-uint8x16_t vcaddq_rot90_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_s16)))
-int16x8_t vcaddq_rot90_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_s16)))
-int16x8_t vcaddq_rot90(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_s32)))
-int32x4_t vcaddq_rot90_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_s32)))
-int32x4_t vcaddq_rot90(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_s8)))
-int8x16_t vcaddq_rot90_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_s8)))
-int8x16_t vcaddq_rot90(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_u16)))
-uint16x8_t vcaddq_rot90_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_u16)))
-uint16x8_t vcaddq_rot90(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_u32)))
-uint32x4_t vcaddq_rot90_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_u32)))
-uint32x4_t vcaddq_rot90(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_u8)))
-uint8x16_t vcaddq_rot90_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_u8)))
-uint8x16_t vcaddq_rot90(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_s16)))
-int16x8_t vcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_s16)))
-int16x8_t vcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_s32)))
-int32x4_t vcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_s32)))
-int32x4_t vcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_s8)))
-int8x16_t vcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_s8)))
-int8x16_t vcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_u16)))
-uint16x8_t vcaddq_rot90_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_u16)))
-uint16x8_t vcaddq_rot90_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_u32)))
-uint32x4_t vcaddq_rot90_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_u32)))
-uint32x4_t vcaddq_rot90_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_u8)))
-uint8x16_t vcaddq_rot90_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_u8)))
-uint8x16_t vcaddq_rot90_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))
-mve_pred16_t vcmpcsq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))
-mve_pred16_t vcmpcsq_m(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))
-mve_pred16_t vcmpcsq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))
-mve_pred16_t vcmpcsq_m(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))
-mve_pred16_t vcmpcsq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))
-mve_pred16_t vcmpcsq_m(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u16)))
-mve_pred16_t vcmpcsq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u16)))
-mve_pred16_t vcmpcsq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u32)))
-mve_pred16_t vcmpcsq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u32)))
-mve_pred16_t vcmpcsq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u8)))
-mve_pred16_t vcmpcsq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u8)))
-mve_pred16_t vcmpcsq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u16)))
-mve_pred16_t vcmpcsq_n_u16(uint16x8_t, uint16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u16)))
-mve_pred16_t vcmpcsq(uint16x8_t, uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u32)))
-mve_pred16_t vcmpcsq_n_u32(uint32x4_t, uint32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u32)))
-mve_pred16_t vcmpcsq(uint32x4_t, uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u8)))
-mve_pred16_t vcmpcsq_n_u8(uint8x16_t, uint8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u8)))
-mve_pred16_t vcmpcsq(uint8x16_t, uint8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u16)))
-mve_pred16_t vcmpcsq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u16)))
-mve_pred16_t vcmpcsq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u32)))
-mve_pred16_t vcmpcsq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u32)))
-mve_pred16_t vcmpcsq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u8)))
-mve_pred16_t vcmpcsq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u8)))
-mve_pred16_t vcmpcsq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))
-mve_pred16_t vcmpeqq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))
-mve_pred16_t vcmpeqq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))
-mve_pred16_t vcmpeqq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))
-mve_pred16_t vcmpeqq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))
-mve_pred16_t vcmpeqq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))
-mve_pred16_t vcmpeqq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))
-mve_pred16_t vcmpeqq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))
-mve_pred16_t vcmpeqq_m(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))
-mve_pred16_t vcmpeqq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))
-mve_pred16_t vcmpeqq_m(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))
-mve_pred16_t vcmpeqq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))
-mve_pred16_t vcmpeqq_m(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s16)))
-mve_pred16_t vcmpeqq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s16)))
-mve_pred16_t vcmpeqq_m(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s32)))
-mve_pred16_t vcmpeqq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s32)))
-mve_pred16_t vcmpeqq_m(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s8)))
-mve_pred16_t vcmpeqq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s8)))
-mve_pred16_t vcmpeqq_m(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u16)))
-mve_pred16_t vcmpeqq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u16)))
-mve_pred16_t vcmpeqq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u32)))
-mve_pred16_t vcmpeqq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u32)))
-mve_pred16_t vcmpeqq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u8)))
-mve_pred16_t vcmpeqq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u8)))
-mve_pred16_t vcmpeqq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s16)))
-mve_pred16_t vcmpeqq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s16)))
-mve_pred16_t vcmpeqq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s32)))
-mve_pred16_t vcmpeqq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s32)))
-mve_pred16_t vcmpeqq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s8)))
-mve_pred16_t vcmpeqq_n_s8(int8x16_t, int8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s8)))
-mve_pred16_t vcmpeqq(int8x16_t, int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u16)))
-mve_pred16_t vcmpeqq_n_u16(uint16x8_t, uint16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u16)))
-mve_pred16_t vcmpeqq(uint16x8_t, uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u32)))
-mve_pred16_t vcmpeqq_n_u32(uint32x4_t, uint32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u32)))
-mve_pred16_t vcmpeqq(uint32x4_t, uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u8)))
-mve_pred16_t vcmpeqq_n_u8(uint8x16_t, uint8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u8)))
-mve_pred16_t vcmpeqq(uint8x16_t, uint8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s16)))
-mve_pred16_t vcmpeqq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s16)))
-mve_pred16_t vcmpeqq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s32)))
-mve_pred16_t vcmpeqq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s32)))
-mve_pred16_t vcmpeqq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s8)))
-mve_pred16_t vcmpeqq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s8)))
-mve_pred16_t vcmpeqq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u16)))
-mve_pred16_t vcmpeqq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u16)))
-mve_pred16_t vcmpeqq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u32)))
-mve_pred16_t vcmpeqq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u32)))
-mve_pred16_t vcmpeqq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u8)))
-mve_pred16_t vcmpeqq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u8)))
-mve_pred16_t vcmpeqq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))
-mve_pred16_t vcmpgeq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))
-mve_pred16_t vcmpgeq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))
-mve_pred16_t vcmpgeq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))
-mve_pred16_t vcmpgeq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))
-mve_pred16_t vcmpgeq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))
-mve_pred16_t vcmpgeq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s16)))
-mve_pred16_t vcmpgeq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s16)))
-mve_pred16_t vcmpgeq_m(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s32)))
-mve_pred16_t vcmpgeq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s32)))
-mve_pred16_t vcmpgeq_m(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s8)))
-mve_pred16_t vcmpgeq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s8)))
-mve_pred16_t vcmpgeq_m(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s16)))
-mve_pred16_t vcmpgeq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s16)))
-mve_pred16_t vcmpgeq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s32)))
-mve_pred16_t vcmpgeq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s32)))
-mve_pred16_t vcmpgeq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s8)))
-mve_pred16_t vcmpgeq_n_s8(int8x16_t, int8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s8)))
-mve_pred16_t vcmpgeq(int8x16_t, int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s16)))
-mve_pred16_t vcmpgeq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s16)))
-mve_pred16_t vcmpgeq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s32)))
-mve_pred16_t vcmpgeq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s32)))
-mve_pred16_t vcmpgeq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s8)))
-mve_pred16_t vcmpgeq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s8)))
-mve_pred16_t vcmpgeq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))
-mve_pred16_t vcmpgtq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))
-mve_pred16_t vcmpgtq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))
-mve_pred16_t vcmpgtq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))
-mve_pred16_t vcmpgtq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))
-mve_pred16_t vcmpgtq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))
-mve_pred16_t vcmpgtq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s16)))
-mve_pred16_t vcmpgtq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s16)))
-mve_pred16_t vcmpgtq_m(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s32)))
-mve_pred16_t vcmpgtq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s32)))
-mve_pred16_t vcmpgtq_m(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s8)))
-mve_pred16_t vcmpgtq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s8)))
-mve_pred16_t vcmpgtq_m(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s16)))
-mve_pred16_t vcmpgtq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s16)))
-mve_pred16_t vcmpgtq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s32)))
-mve_pred16_t vcmpgtq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s32)))
-mve_pred16_t vcmpgtq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s8)))
-mve_pred16_t vcmpgtq_n_s8(int8x16_t, int8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s8)))
-mve_pred16_t vcmpgtq(int8x16_t, int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s16)))
-mve_pred16_t vcmpgtq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s16)))
-mve_pred16_t vcmpgtq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s32)))
-mve_pred16_t vcmpgtq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s32)))
-mve_pred16_t vcmpgtq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s8)))
-mve_pred16_t vcmpgtq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s8)))
-mve_pred16_t vcmpgtq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))
-mve_pred16_t vcmphiq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))
-mve_pred16_t vcmphiq_m(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))
-mve_pred16_t vcmphiq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))
-mve_pred16_t vcmphiq_m(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))
-mve_pred16_t vcmphiq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))
-mve_pred16_t vcmphiq_m(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u16)))
-mve_pred16_t vcmphiq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u16)))
-mve_pred16_t vcmphiq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u32)))
-mve_pred16_t vcmphiq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u32)))
-mve_pred16_t vcmphiq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u8)))
-mve_pred16_t vcmphiq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u8)))
-mve_pred16_t vcmphiq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u16)))
-mve_pred16_t vcmphiq_n_u16(uint16x8_t, uint16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u16)))
-mve_pred16_t vcmphiq(uint16x8_t, uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u32)))
-mve_pred16_t vcmphiq_n_u32(uint32x4_t, uint32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u32)))
-mve_pred16_t vcmphiq(uint32x4_t, uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u8)))
-mve_pred16_t vcmphiq_n_u8(uint8x16_t, uint8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u8)))
-mve_pred16_t vcmphiq(uint8x16_t, uint8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u16)))
-mve_pred16_t vcmphiq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u16)))
-mve_pred16_t vcmphiq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u32)))
-mve_pred16_t vcmphiq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u32)))
-mve_pred16_t vcmphiq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u8)))
-mve_pred16_t vcmphiq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u8)))
-mve_pred16_t vcmphiq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))
-mve_pred16_t vcmpleq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))
-mve_pred16_t vcmpleq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))
-mve_pred16_t vcmpleq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))
-mve_pred16_t vcmpleq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))
-mve_pred16_t vcmpleq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))
-mve_pred16_t vcmpleq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s16)))
-mve_pred16_t vcmpleq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s16)))
-mve_pred16_t vcmpleq_m(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s32)))
-mve_pred16_t vcmpleq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s32)))
-mve_pred16_t vcmpleq_m(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s8)))
-mve_pred16_t vcmpleq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s8)))
-mve_pred16_t vcmpleq_m(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s16)))
-mve_pred16_t vcmpleq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s16)))
-mve_pred16_t vcmpleq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s32)))
-mve_pred16_t vcmpleq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s32)))
-mve_pred16_t vcmpleq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s8)))
-mve_pred16_t vcmpleq_n_s8(int8x16_t, int8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s8)))
-mve_pred16_t vcmpleq(int8x16_t, int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s16)))
-mve_pred16_t vcmpleq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s16)))
-mve_pred16_t vcmpleq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s32)))
-mve_pred16_t vcmpleq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s32)))
-mve_pred16_t vcmpleq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s8)))
-mve_pred16_t vcmpleq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s8)))
-mve_pred16_t vcmpleq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))
-mve_pred16_t vcmpltq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))
-mve_pred16_t vcmpltq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))
-mve_pred16_t vcmpltq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))
-mve_pred16_t vcmpltq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))
-mve_pred16_t vcmpltq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))
-mve_pred16_t vcmpltq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s16)))
-mve_pred16_t vcmpltq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s16)))
-mve_pred16_t vcmpltq_m(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s32)))
-mve_pred16_t vcmpltq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s32)))
-mve_pred16_t vcmpltq_m(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s8)))
-mve_pred16_t vcmpltq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s8)))
-mve_pred16_t vcmpltq_m(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s16)))
-mve_pred16_t vcmpltq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s16)))
-mve_pred16_t vcmpltq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s32)))
-mve_pred16_t vcmpltq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s32)))
-mve_pred16_t vcmpltq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s8)))
-mve_pred16_t vcmpltq_n_s8(int8x16_t, int8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s8)))
-mve_pred16_t vcmpltq(int8x16_t, int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s16)))
-mve_pred16_t vcmpltq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s16)))
-mve_pred16_t vcmpltq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s32)))
-mve_pred16_t vcmpltq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s32)))
-mve_pred16_t vcmpltq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s8)))
-mve_pred16_t vcmpltq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s8)))
-mve_pred16_t vcmpltq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))
-mve_pred16_t vcmpneq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))
-mve_pred16_t vcmpneq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))
-mve_pred16_t vcmpneq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))
-mve_pred16_t vcmpneq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))
-mve_pred16_t vcmpneq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))
-mve_pred16_t vcmpneq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))
-mve_pred16_t vcmpneq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))
-mve_pred16_t vcmpneq_m(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))
-mve_pred16_t vcmpneq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))
-mve_pred16_t vcmpneq_m(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))
-mve_pred16_t vcmpneq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))
-mve_pred16_t vcmpneq_m(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s16)))
-mve_pred16_t vcmpneq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s16)))
-mve_pred16_t vcmpneq_m(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s32)))
-mve_pred16_t vcmpneq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s32)))
-mve_pred16_t vcmpneq_m(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s8)))
-mve_pred16_t vcmpneq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s8)))
-mve_pred16_t vcmpneq_m(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u16)))
-mve_pred16_t vcmpneq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u16)))
-mve_pred16_t vcmpneq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u32)))
-mve_pred16_t vcmpneq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u32)))
-mve_pred16_t vcmpneq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u8)))
-mve_pred16_t vcmpneq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u8)))
-mve_pred16_t vcmpneq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s16)))
-mve_pred16_t vcmpneq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s16)))
-mve_pred16_t vcmpneq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s32)))
-mve_pred16_t vcmpneq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s32)))
-mve_pred16_t vcmpneq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s8)))
-mve_pred16_t vcmpneq_n_s8(int8x16_t, int8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s8)))
-mve_pred16_t vcmpneq(int8x16_t, int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u16)))
-mve_pred16_t vcmpneq_n_u16(uint16x8_t, uint16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u16)))
-mve_pred16_t vcmpneq(uint16x8_t, uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u32)))
-mve_pred16_t vcmpneq_n_u32(uint32x4_t, uint32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u32)))
-mve_pred16_t vcmpneq(uint32x4_t, uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u8)))
-mve_pred16_t vcmpneq_n_u8(uint8x16_t, uint8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u8)))
-mve_pred16_t vcmpneq(uint8x16_t, uint8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s16)))
-mve_pred16_t vcmpneq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s16)))
-mve_pred16_t vcmpneq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s32)))
-mve_pred16_t vcmpneq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s32)))
-mve_pred16_t vcmpneq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s8)))
-mve_pred16_t vcmpneq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s8)))
-mve_pred16_t vcmpneq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u16)))
-mve_pred16_t vcmpneq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u16)))
-mve_pred16_t vcmpneq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u32)))
-mve_pred16_t vcmpneq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u32)))
-mve_pred16_t vcmpneq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u8)))
-mve_pred16_t vcmpneq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u8)))
-mve_pred16_t vcmpneq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s16)))
-int16x8_t vcreateq_s16(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s32)))
-int32x4_t vcreateq_s32(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s64)))
-int64x2_t vcreateq_s64(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s8)))
-int8x16_t vcreateq_s8(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u16)))
-uint16x8_t vcreateq_u16(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u32)))
-uint32x4_t vcreateq_u32(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u64)))
-uint64x2_t vcreateq_u64(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u8)))
-uint8x16_t vcreateq_u8(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp16q)))
-mve_pred16_t vctp16q(uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp16q_m)))
-mve_pred16_t vctp16q_m(uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp32q)))
-mve_pred16_t vctp32q(uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp32q_m)))
-mve_pred16_t vctp32q_m(uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp64q)))
-mve_pred16_t vctp64q(uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp64q_m)))
-mve_pred16_t vctp64q_m(uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp8q)))
-mve_pred16_t vctp8q(uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp8q_m)))
-mve_pred16_t vctp8q_m(uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_n_u16)))
-uint16x8_t vddupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_n_u16)))
-uint16x8_t vddupq_m(uint16x8_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_n_u32)))
-uint32x4_t vddupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_n_u32)))
-uint32x4_t vddupq_m(uint32x4_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_n_u8)))
-uint8x16_t vddupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_n_u8)))
-uint8x16_t vddupq_m(uint8x16_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_wb_u16)))
-uint16x8_t vddupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_wb_u16)))
-uint16x8_t vddupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_wb_u32)))
-uint32x4_t vddupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_wb_u32)))
-uint32x4_t vddupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_wb_u8)))
-uint8x16_t vddupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_wb_u8)))
-uint8x16_t vddupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_n_u16)))
-uint16x8_t vddupq_n_u16(uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_n_u16)))
-uint16x8_t vddupq_u16(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_n_u32)))
-uint32x4_t vddupq_n_u32(uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_n_u32)))
-uint32x4_t vddupq_u32(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_n_u8)))
-uint8x16_t vddupq_n_u8(uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_n_u8)))
-uint8x16_t vddupq_u8(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_wb_u16)))
-uint16x8_t vddupq_wb_u16(uint32_t *, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_wb_u16)))
-uint16x8_t vddupq_u16(uint32_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_wb_u32)))
-uint32x4_t vddupq_wb_u32(uint32_t *, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_wb_u32)))
-uint32x4_t vddupq_u32(uint32_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_wb_u8)))
-uint8x16_t vddupq_wb_u8(uint32_t *, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_wb_u8)))
-uint8x16_t vddupq_u8(uint32_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_n_u16)))
-uint16x8_t vddupq_x_n_u16(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_n_u16)))
-uint16x8_t vddupq_x_u16(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_n_u32)))
-uint32x4_t vddupq_x_n_u32(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_n_u32)))
-uint32x4_t vddupq_x_u32(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_n_u8)))
-uint8x16_t vddupq_x_n_u8(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_n_u8)))
-uint8x16_t vddupq_x_u8(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_wb_u16)))
-uint16x8_t vddupq_x_wb_u16(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_wb_u16)))
-uint16x8_t vddupq_x_u16(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_wb_u32)))
-uint32x4_t vddupq_x_wb_u32(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_wb_u32)))
-uint32x4_t vddupq_x_u32(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_wb_u8)))
-uint8x16_t vddupq_x_wb_u8(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_wb_u8)))
-uint8x16_t vddupq_x_u8(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_s16)))
-int16x8_t vdupq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_s16)))
-int16x8_t vdupq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_s32)))
-int32x4_t vdupq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_s32)))
-int32x4_t vdupq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_s8)))
-int8x16_t vdupq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_s8)))
-int8x16_t vdupq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_u16)))
-uint16x8_t vdupq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_u16)))
-uint16x8_t vdupq_m(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_u32)))
-uint32x4_t vdupq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_u32)))
-uint32x4_t vdupq_m(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_u8)))
-uint8x16_t vdupq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_u8)))
-uint8x16_t vdupq_m(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_s16)))
-int16x8_t vdupq_n_s16(int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_s32)))
-int32x4_t vdupq_n_s32(int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_s8)))
-int8x16_t vdupq_n_s8(int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_u16)))
-uint16x8_t vdupq_n_u16(uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_u32)))
-uint32x4_t vdupq_n_u32(uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_u8)))
-uint8x16_t vdupq_n_u8(uint8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_s16)))
-int16x8_t vdupq_x_n_s16(int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_s32)))
-int32x4_t vdupq_x_n_s32(int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_s8)))
-int8x16_t vdupq_x_n_s8(int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_u16)))
-uint16x8_t vdupq_x_n_u16(uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_u32)))
-uint32x4_t vdupq_x_n_u32(uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_u8)))
-uint8x16_t vdupq_x_n_u8(uint8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_n_u16)))
-uint16x8_t vdwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_n_u16)))
-uint16x8_t vdwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_n_u32)))
-uint32x4_t vdwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_n_u32)))
-uint32x4_t vdwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_n_u8)))
-uint8x16_t vdwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_n_u8)))
-uint8x16_t vdwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_wb_u16)))
-uint16x8_t vdwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_wb_u16)))
-uint16x8_t vdwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_wb_u32)))
-uint32x4_t vdwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_wb_u32)))
-uint32x4_t vdwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_wb_u8)))
-uint8x16_t vdwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_wb_u8)))
-uint8x16_t vdwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_n_u16)))
-uint16x8_t vdwdupq_n_u16(uint32_t, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_n_u16)))
-uint16x8_t vdwdupq_u16(uint32_t, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_n_u32)))
-uint32x4_t vdwdupq_n_u32(uint32_t, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_n_u32)))
-uint32x4_t vdwdupq_u32(uint32_t, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_n_u8)))
-uint8x16_t vdwdupq_n_u8(uint32_t, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_n_u8)))
-uint8x16_t vdwdupq_u8(uint32_t, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_wb_u16)))
-uint16x8_t vdwdupq_wb_u16(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_wb_u16)))
-uint16x8_t vdwdupq_u16(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_wb_u32)))
-uint32x4_t vdwdupq_wb_u32(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_wb_u32)))
-uint32x4_t vdwdupq_u32(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_wb_u8)))
-uint8x16_t vdwdupq_wb_u8(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_wb_u8)))
-uint8x16_t vdwdupq_u8(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_n_u16)))
-uint16x8_t vdwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_n_u16)))
-uint16x8_t vdwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_n_u32)))
-uint32x4_t vdwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_n_u32)))
-uint32x4_t vdwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_n_u8)))
-uint8x16_t vdwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_n_u8)))
-uint8x16_t vdwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_wb_u16)))
-uint16x8_t vdwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_wb_u16)))
-uint16x8_t vdwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_wb_u32)))
-uint32x4_t vdwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_wb_u32)))
-uint32x4_t vdwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_wb_u8)))
-uint8x16_t vdwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_wb_u8)))
-uint8x16_t vdwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_s16)))
-int16x8_t veorq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_s16)))
-int16x8_t veorq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_s32)))
-int32x4_t veorq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_s32)))
-int32x4_t veorq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_s8)))
-int8x16_t veorq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_s8)))
-int8x16_t veorq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_u16)))
-uint16x8_t veorq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_u16)))
-uint16x8_t veorq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_u32)))
-uint32x4_t veorq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_u32)))
-uint32x4_t veorq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_u8)))
-uint8x16_t veorq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_u8)))
-uint8x16_t veorq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_s16)))
-int16x8_t veorq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_s16)))
-int16x8_t veorq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_s32)))
-int32x4_t veorq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_s32)))
-int32x4_t veorq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_s8)))
-int8x16_t veorq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_s8)))
-int8x16_t veorq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_u16)))
-uint16x8_t veorq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_u16)))
-uint16x8_t veorq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_u32)))
-uint32x4_t veorq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_u32)))
-uint32x4_t veorq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_u8)))
-uint8x16_t veorq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_u8)))
-uint8x16_t veorq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_s16)))
-int16x8_t veorq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_s16)))
-int16x8_t veorq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_s32)))
-int32x4_t veorq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_s32)))
-int32x4_t veorq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_s8)))
-int8x16_t veorq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_s8)))
-int8x16_t veorq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_u16)))
-uint16x8_t veorq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_u16)))
-uint16x8_t veorq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_u32)))
-uint32x4_t veorq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_u32)))
-uint32x4_t veorq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_u8)))
-uint8x16_t veorq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_u8)))
-uint8x16_t veorq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s16)))
-int16_t vgetq_lane_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s16)))
-int16_t vgetq_lane(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s32)))
-int32_t vgetq_lane_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s32)))
-int32_t vgetq_lane(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s64)))
-int64_t vgetq_lane_s64(int64x2_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s64)))
-int64_t vgetq_lane(int64x2_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s8)))
-int8_t vgetq_lane_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s8)))
-int8_t vgetq_lane(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u16)))
-uint16_t vgetq_lane_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u16)))
-uint16_t vgetq_lane(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u32)))
-uint32_t vgetq_lane_u32(uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u32)))
-uint32_t vgetq_lane(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u64)))
-uint64_t vgetq_lane_u64(uint64x2_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u64)))
-uint64_t vgetq_lane(uint64x2_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u8)))
-uint8_t vgetq_lane_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u8)))
-uint8_t vgetq_lane(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_s16)))
-int16x8_t vhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_s16)))
-int16x8_t vhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_s32)))
-int32x4_t vhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_s32)))
-int32x4_t vhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_s8)))
-int8x16_t vhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_s8)))
-int8x16_t vhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_u16)))
-uint16x8_t vhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_u16)))
-uint16x8_t vhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_u32)))
-uint32x4_t vhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_u32)))
-uint32x4_t vhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_u8)))
-uint8x16_t vhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_u8)))
-uint8x16_t vhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_s16)))
-int16x8_t vhaddq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_s16)))
-int16x8_t vhaddq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_s32)))
-int32x4_t vhaddq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_s32)))
-int32x4_t vhaddq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_s8)))
-int8x16_t vhaddq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_s8)))
-int8x16_t vhaddq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_u16)))
-uint16x8_t vhaddq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_u16)))
-uint16x8_t vhaddq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_u32)))
-uint32x4_t vhaddq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_u32)))
-uint32x4_t vhaddq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_u8)))
-uint8x16_t vhaddq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_u8)))
-uint8x16_t vhaddq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_s16)))
-int16x8_t vhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_s16)))
-int16x8_t vhaddq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_s32)))
-int32x4_t vhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_s32)))
-int32x4_t vhaddq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_s8)))
-int8x16_t vhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_s8)))
-int8x16_t vhaddq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_u16)))
-uint16x8_t vhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_u16)))
-uint16x8_t vhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_u32)))
-uint32x4_t vhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_u32)))
-uint32x4_t vhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_u8)))
-uint8x16_t vhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_u8)))
-uint8x16_t vhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16)))
-int16x8_t vhcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16)))
-int16x8_t vhcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32)))
-int32x4_t vhcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32)))
-int32x4_t vhcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8)))
-int8x16_t vhcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8)))
-int8x16_t vhcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_s16)))
-int16x8_t vhcaddq_rot270_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_s16)))
-int16x8_t vhcaddq_rot270(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_s32)))
-int32x4_t vhcaddq_rot270_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_s32)))
-int32x4_t vhcaddq_rot270(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_s8)))
-int8x16_t vhcaddq_rot270_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_s8)))
-int8x16_t vhcaddq_rot270(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16)))
-int16x8_t vhcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16)))
-int16x8_t vhcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32)))
-int32x4_t vhcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32)))
-int32x4_t vhcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8)))
-int8x16_t vhcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8)))
-int8x16_t vhcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16)))
-int16x8_t vhcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16)))
-int16x8_t vhcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32)))
-int32x4_t vhcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32)))
-int32x4_t vhcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8)))
-int8x16_t vhcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8)))
-int8x16_t vhcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_s16)))
-int16x8_t vhcaddq_rot90_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_s16)))
-int16x8_t vhcaddq_rot90(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_s32)))
-int32x4_t vhcaddq_rot90_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_s32)))
-int32x4_t vhcaddq_rot90(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_s8)))
-int8x16_t vhcaddq_rot90_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_s8)))
-int8x16_t vhcaddq_rot90(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16)))
-int16x8_t vhcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16)))
-int16x8_t vhcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32)))
-int32x4_t vhcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32)))
-int32x4_t vhcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8)))
-int8x16_t vhcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8)))
-int8x16_t vhcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_s16)))
-int16x8_t vhsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_s16)))
-int16x8_t vhsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_s32)))
-int32x4_t vhsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_s32)))
-int32x4_t vhsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_s8)))
-int8x16_t vhsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_s8)))
-int8x16_t vhsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_u16)))
-uint16x8_t vhsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_u16)))
-uint16x8_t vhsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_u32)))
-uint32x4_t vhsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_u32)))
-uint32x4_t vhsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_u8)))
-uint8x16_t vhsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_u8)))
-uint8x16_t vhsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_s16)))
-int16x8_t vhsubq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_s16)))
-int16x8_t vhsubq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_s32)))
-int32x4_t vhsubq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_s32)))
-int32x4_t vhsubq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_s8)))
-int8x16_t vhsubq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_s8)))
-int8x16_t vhsubq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_u16)))
-uint16x8_t vhsubq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_u16)))
-uint16x8_t vhsubq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_u32)))
-uint32x4_t vhsubq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_u32)))
-uint32x4_t vhsubq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_u8)))
-uint8x16_t vhsubq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_u8)))
-uint8x16_t vhsubq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_s16)))
-int16x8_t vhsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_s16)))
-int16x8_t vhsubq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_s32)))
-int32x4_t vhsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_s32)))
-int32x4_t vhsubq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_s8)))
-int8x16_t vhsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_s8)))
-int8x16_t vhsubq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_u16)))
-uint16x8_t vhsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_u16)))
-uint16x8_t vhsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_u32)))
-uint32x4_t vhsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_u32)))
-uint32x4_t vhsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_u8)))
-uint8x16_t vhsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_u8)))
-uint8x16_t vhsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_n_u16)))
-uint16x8_t vidupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_n_u16)))
-uint16x8_t vidupq_m(uint16x8_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_n_u32)))
-uint32x4_t vidupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_n_u32)))
-uint32x4_t vidupq_m(uint32x4_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_n_u8)))
-uint8x16_t vidupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_n_u8)))
-uint8x16_t vidupq_m(uint8x16_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_wb_u16)))
-uint16x8_t vidupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_wb_u16)))
-uint16x8_t vidupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_wb_u32)))
-uint32x4_t vidupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_wb_u32)))
-uint32x4_t vidupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_wb_u8)))
-uint8x16_t vidupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_wb_u8)))
-uint8x16_t vidupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_n_u16)))
-uint16x8_t vidupq_n_u16(uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_n_u16)))
-uint16x8_t vidupq_u16(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_n_u32)))
-uint32x4_t vidupq_n_u32(uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_n_u32)))
-uint32x4_t vidupq_u32(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_n_u8)))
-uint8x16_t vidupq_n_u8(uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_n_u8)))
-uint8x16_t vidupq_u8(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_wb_u16)))
-uint16x8_t vidupq_wb_u16(uint32_t *, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_wb_u16)))
-uint16x8_t vidupq_u16(uint32_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_wb_u32)))
-uint32x4_t vidupq_wb_u32(uint32_t *, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_wb_u32)))
-uint32x4_t vidupq_u32(uint32_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_wb_u8)))
-uint8x16_t vidupq_wb_u8(uint32_t *, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_wb_u8)))
-uint8x16_t vidupq_u8(uint32_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_n_u16)))
-uint16x8_t vidupq_x_n_u16(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_n_u16)))
-uint16x8_t vidupq_x_u16(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_n_u32)))
-uint32x4_t vidupq_x_n_u32(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_n_u32)))
-uint32x4_t vidupq_x_u32(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_n_u8)))
-uint8x16_t vidupq_x_n_u8(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_n_u8)))
-uint8x16_t vidupq_x_u8(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_wb_u16)))
-uint16x8_t vidupq_x_wb_u16(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_wb_u16)))
-uint16x8_t vidupq_x_u16(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_wb_u32)))
-uint32x4_t vidupq_x_wb_u32(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_wb_u32)))
-uint32x4_t vidupq_x_u32(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_wb_u8)))
-uint8x16_t vidupq_x_wb_u8(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_wb_u8)))
-uint8x16_t vidupq_x_u8(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_n_u16)))
-uint16x8_t viwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_n_u16)))
-uint16x8_t viwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_n_u32)))
-uint32x4_t viwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_n_u32)))
-uint32x4_t viwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_n_u8)))
-uint8x16_t viwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_n_u8)))
-uint8x16_t viwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_wb_u16)))
-uint16x8_t viwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_wb_u16)))
-uint16x8_t viwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_wb_u32)))
-uint32x4_t viwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_wb_u32)))
-uint32x4_t viwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_wb_u8)))
-uint8x16_t viwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_wb_u8)))
-uint8x16_t viwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_n_u16)))
-uint16x8_t viwdupq_n_u16(uint32_t, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_n_u16)))
-uint16x8_t viwdupq_u16(uint32_t, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_n_u32)))
-uint32x4_t viwdupq_n_u32(uint32_t, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_n_u32)))
-uint32x4_t viwdupq_u32(uint32_t, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_n_u8)))
-uint8x16_t viwdupq_n_u8(uint32_t, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_n_u8)))
-uint8x16_t viwdupq_u8(uint32_t, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_wb_u16)))
-uint16x8_t viwdupq_wb_u16(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_wb_u16)))
-uint16x8_t viwdupq_u16(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_wb_u32)))
-uint32x4_t viwdupq_wb_u32(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_wb_u32)))
-uint32x4_t viwdupq_u32(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_wb_u8)))
-uint8x16_t viwdupq_wb_u8(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_wb_u8)))
-uint8x16_t viwdupq_u8(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_n_u16)))
-uint16x8_t viwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_n_u16)))
-uint16x8_t viwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_n_u32)))
-uint32x4_t viwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_n_u32)))
-uint32x4_t viwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_n_u8)))
-uint8x16_t viwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_n_u8)))
-uint8x16_t viwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_wb_u16)))
-uint16x8_t viwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_wb_u16)))
-uint16x8_t viwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_wb_u32)))
-uint32x4_t viwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_wb_u32)))
-uint32x4_t viwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_wb_u8)))
-uint8x16_t viwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_wb_u8)))
-uint8x16_t viwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_s16)))
-int16x8_t vld1q_s16(const int16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_s16)))
-int16x8_t vld1q(const int16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_s32)))
-int32x4_t vld1q_s32(const int32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_s32)))
-int32x4_t vld1q(const int32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_s8)))
-int8x16_t vld1q_s8(const int8_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_s8)))
-int8x16_t vld1q(const int8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_u16)))
-uint16x8_t vld1q_u16(const uint16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_u16)))
-uint16x8_t vld1q(const uint16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_u32)))
-uint32x4_t vld1q_u32(const uint32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_u32)))
-uint32x4_t vld1q(const uint32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_u8)))
-uint8x16_t vld1q_u8(const uint8_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_u8)))
-uint8x16_t vld1q(const uint8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s16)))
-int16x8_t vld1q_z_s16(const int16_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s16)))
-int16x8_t vld1q_z(const int16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s32)))
-int32x4_t vld1q_z_s32(const int32_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s32)))
-int32x4_t vld1q_z(const int32_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s8)))
-int8x16_t vld1q_z_s8(const int8_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s8)))
-int8x16_t vld1q_z(const int8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u16)))
-uint16x8_t vld1q_z_u16(const uint16_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u16)))
-uint16x8_t vld1q_z(const uint16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u32)))
-uint32x4_t vld1q_z_u32(const uint32_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u32)))
-uint32x4_t vld1q_z(const uint32_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u8)))
-uint8x16_t vld1q_z_u8(const uint8_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u8)))
-uint8x16_t vld1q_z(const uint8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_s16)))
-int16x8x2_t vld2q_s16(const int16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_s16)))
-int16x8x2_t vld2q(const int16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_s32)))
-int32x4x2_t vld2q_s32(const int32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_s32)))
-int32x4x2_t vld2q(const int32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_s8)))
-int8x16x2_t vld2q_s8(const int8_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_s8)))
-int8x16x2_t vld2q(const int8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_u16)))
-uint16x8x2_t vld2q_u16(const uint16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_u16)))
-uint16x8x2_t vld2q(const uint16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_u32)))
-uint32x4x2_t vld2q_u32(const uint32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_u32)))
-uint32x4x2_t vld2q(const uint32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_u8)))
-uint8x16x2_t vld2q_u8(const uint8_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_u8)))
-uint8x16x2_t vld2q(const uint8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_s16)))
-int16x8x4_t vld4q_s16(const int16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_s16)))
-int16x8x4_t vld4q(const int16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_s32)))
-int32x4x4_t vld4q_s32(const int32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_s32)))
-int32x4x4_t vld4q(const int32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_s8)))
-int8x16x4_t vld4q_s8(const int8_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_s8)))
-int8x16x4_t vld4q(const int8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_u16)))
-uint16x8x4_t vld4q_u16(const uint16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_u16)))
-uint16x8x4_t vld4q(const uint16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_u32)))
-uint32x4x4_t vld4q_u32(const uint32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_u32)))
-uint32x4x4_t vld4q(const uint32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_u8)))
-uint8x16x4_t vld4q_u8(const uint8_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_u8)))
-uint8x16x4_t vld4q(const uint8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))
-int16x8_t vldrbq_gather_offset_s16(const int8_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))
-int16x8_t vldrbq_gather_offset(const int8_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))
-int32x4_t vldrbq_gather_offset_s32(const int8_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))
-int32x4_t vldrbq_gather_offset(const int8_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))
-int8x16_t vldrbq_gather_offset_s8(const int8_t *, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))
-int8x16_t vldrbq_gather_offset(const int8_t *, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))
-uint16x8_t vldrbq_gather_offset_u16(const uint8_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))
-uint16x8_t vldrbq_gather_offset(const uint8_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))
-uint32x4_t vldrbq_gather_offset_u32(const uint8_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))
-uint32x4_t vldrbq_gather_offset(const uint8_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))
-uint8x16_t vldrbq_gather_offset_u8(const uint8_t *, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))
-uint8x16_t vldrbq_gather_offset(const uint8_t *, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))
-int16x8_t vldrbq_gather_offset_z_s16(const int8_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))
-int16x8_t vldrbq_gather_offset_z(const int8_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))
-int32x4_t vldrbq_gather_offset_z_s32(const int8_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))
-int32x4_t vldrbq_gather_offset_z(const int8_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))
-int8x16_t vldrbq_gather_offset_z_s8(const int8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))
-int8x16_t vldrbq_gather_offset_z(const int8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))
-uint16x8_t vldrbq_gather_offset_z_u16(const uint8_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))
-uint16x8_t vldrbq_gather_offset_z(const uint8_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))
-uint32x4_t vldrbq_gather_offset_z_u32(const uint8_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))
-uint32x4_t vldrbq_gather_offset_z(const uint8_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))
-uint8x16_t vldrbq_gather_offset_z_u8(const uint8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))
-uint8x16_t vldrbq_gather_offset_z(const uint8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_s16)))
-int16x8_t vldrbq_s16(const int8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_s32)))
-int32x4_t vldrbq_s32(const int8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_s8)))
-int8x16_t vldrbq_s8(const int8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_u16)))
-uint16x8_t vldrbq_u16(const uint8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_u32)))
-uint32x4_t vldrbq_u32(const uint8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_u8)))
-uint8x16_t vldrbq_u8(const uint8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_s16)))
-int16x8_t vldrbq_z_s16(const int8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_s32)))
-int32x4_t vldrbq_z_s32(const int8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_s8)))
-int8x16_t vldrbq_z_s8(const int8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_u16)))
-uint16x8_t vldrbq_z_u16(const uint8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_u32)))
-uint32x4_t vldrbq_z_u32(const uint8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_u8)))
-uint8x16_t vldrbq_z_u8(const uint8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_s64)))
-int64x2_t vldrdq_gather_base_s64(uint64x2_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_u64)))
-uint64x2_t vldrdq_gather_base_u64(uint64x2_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_s64)))
-int64x2_t vldrdq_gather_base_wb_s64(uint64x2_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_u64)))
-uint64x2_t vldrdq_gather_base_wb_u64(uint64x2_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_s64)))
-int64x2_t vldrdq_gather_base_wb_z_s64(uint64x2_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_u64)))
-uint64x2_t vldrdq_gather_base_wb_z_u64(uint64x2_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_z_s64)))
-int64x2_t vldrdq_gather_base_z_s64(uint64x2_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_z_u64)))
-uint64x2_t vldrdq_gather_base_z_u64(uint64x2_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))
-int64x2_t vldrdq_gather_offset_s64(const int64_t *, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))
-int64x2_t vldrdq_gather_offset(const int64_t *, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))
-uint64x2_t vldrdq_gather_offset_u64(const uint64_t *, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))
-uint64x2_t vldrdq_gather_offset(const uint64_t *, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))
-int64x2_t vldrdq_gather_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))
-int64x2_t vldrdq_gather_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))
-uint64x2_t vldrdq_gather_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))
-uint64x2_t vldrdq_gather_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))
-int64x2_t vldrdq_gather_shifted_offset_s64(const int64_t *, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))
-int64x2_t vldrdq_gather_shifted_offset(const int64_t *, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))
-uint64x2_t vldrdq_gather_shifted_offset_u64(const uint64_t *, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))
-uint64x2_t vldrdq_gather_shifted_offset(const uint64_t *, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))
-int64x2_t vldrdq_gather_shifted_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))
-int64x2_t vldrdq_gather_shifted_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))
-uint64x2_t vldrdq_gather_shifted_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))
-uint64x2_t vldrdq_gather_shifted_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))
-int16x8_t vldrhq_gather_offset_s16(const int16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))
-int16x8_t vldrhq_gather_offset(const int16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))
-int32x4_t vldrhq_gather_offset_s32(const int16_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))
-int32x4_t vldrhq_gather_offset(const int16_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))
-uint16x8_t vldrhq_gather_offset_u16(const uint16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))
-uint16x8_t vldrhq_gather_offset(const uint16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))
-uint32x4_t vldrhq_gather_offset_u32(const uint16_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))
-uint32x4_t vldrhq_gather_offset(const uint16_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))
-int16x8_t vldrhq_gather_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))
-int16x8_t vldrhq_gather_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))
-int32x4_t vldrhq_gather_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))
-int32x4_t vldrhq_gather_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))
-uint16x8_t vldrhq_gather_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))
-uint16x8_t vldrhq_gather_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))
-uint32x4_t vldrhq_gather_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))
-uint32x4_t vldrhq_gather_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))
-int16x8_t vldrhq_gather_shifted_offset_s16(const int16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))
-int16x8_t vldrhq_gather_shifted_offset(const int16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))
-int32x4_t vldrhq_gather_shifted_offset_s32(const int16_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))
-int32x4_t vldrhq_gather_shifted_offset(const int16_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))
-uint16x8_t vldrhq_gather_shifted_offset_u16(const uint16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))
-uint16x8_t vldrhq_gather_shifted_offset(const uint16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))
-uint32x4_t vldrhq_gather_shifted_offset_u32(const uint16_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))
-uint32x4_t vldrhq_gather_shifted_offset(const uint16_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))
-int16x8_t vldrhq_gather_shifted_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))
-int16x8_t vldrhq_gather_shifted_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))
-int32x4_t vldrhq_gather_shifted_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))
-int32x4_t vldrhq_gather_shifted_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))
-uint16x8_t vldrhq_gather_shifted_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))
-uint16x8_t vldrhq_gather_shifted_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))
-uint32x4_t vldrhq_gather_shifted_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))
-uint32x4_t vldrhq_gather_shifted_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_s16)))
-int16x8_t vldrhq_s16(const int16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_s32)))
-int32x4_t vldrhq_s32(const int16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_u16)))
-uint16x8_t vldrhq_u16(const uint16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_u32)))
-uint32x4_t vldrhq_u32(const uint16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_s16)))
-int16x8_t vldrhq_z_s16(const int16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_s32)))
-int32x4_t vldrhq_z_s32(const int16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_u16)))
-uint16x8_t vldrhq_z_u16(const uint16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_u32)))
-uint32x4_t vldrhq_z_u32(const uint16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_s32)))
-int32x4_t vldrwq_gather_base_s32(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_u32)))
-uint32x4_t vldrwq_gather_base_u32(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_s32)))
-int32x4_t vldrwq_gather_base_wb_s32(uint32x4_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_u32)))
-uint32x4_t vldrwq_gather_base_wb_u32(uint32x4_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_s32)))
-int32x4_t vldrwq_gather_base_wb_z_s32(uint32x4_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_u32)))
-uint32x4_t vldrwq_gather_base_wb_z_u32(uint32x4_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_z_s32)))
-int32x4_t vldrwq_gather_base_z_s32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_z_u32)))
-uint32x4_t vldrwq_gather_base_z_u32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))
-int32x4_t vldrwq_gather_offset_s32(const int32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))
-int32x4_t vldrwq_gather_offset(const int32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))
-uint32x4_t vldrwq_gather_offset_u32(const uint32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))
-uint32x4_t vldrwq_gather_offset(const uint32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))
-int32x4_t vldrwq_gather_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))
-int32x4_t vldrwq_gather_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))
-uint32x4_t vldrwq_gather_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))
-uint32x4_t vldrwq_gather_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))
-int32x4_t vldrwq_gather_shifted_offset_s32(const int32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))
-int32x4_t vldrwq_gather_shifted_offset(const int32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))
-uint32x4_t vldrwq_gather_shifted_offset_u32(const uint32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))
-uint32x4_t vldrwq_gather_shifted_offset(const uint32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))
-int32x4_t vldrwq_gather_shifted_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))
-int32x4_t vldrwq_gather_shifted_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))
-uint32x4_t vldrwq_gather_shifted_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))
-uint32x4_t vldrwq_gather_shifted_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_s32)))
-int32x4_t vldrwq_s32(const int32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_u32)))
-uint32x4_t vldrwq_u32(const uint32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_z_s32)))
-int32x4_t vldrwq_z_s32(const int32_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_z_u32)))
-uint32x4_t vldrwq_z_u32(const uint32_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_m_s16)))
-uint16x8_t vmaxaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_m_s16)))
-uint16x8_t vmaxaq_m(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_m_s32)))
-uint32x4_t vmaxaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_m_s32)))
-uint32x4_t vmaxaq_m(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_m_s8)))
-uint8x16_t vmaxaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_m_s8)))
-uint8x16_t vmaxaq_m(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_s16)))
-uint16x8_t vmaxaq_s16(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_s16)))
-uint16x8_t vmaxaq(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_s32)))
-uint32x4_t vmaxaq_s32(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_s32)))
-uint32x4_t vmaxaq(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_s8)))
-uint8x16_t vmaxaq_s8(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_s8)))
-uint8x16_t vmaxaq(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_s16)))
-int16x8_t vmaxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_s16)))
-int16x8_t vmaxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_s32)))
-int32x4_t vmaxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_s32)))
-int32x4_t vmaxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_s8)))
-int8x16_t vmaxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_s8)))
-int8x16_t vmaxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_u16)))
-uint16x8_t vmaxq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_u16)))
-uint16x8_t vmaxq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_u32)))
-uint32x4_t vmaxq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_u32)))
-uint32x4_t vmaxq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_u8)))
-uint8x16_t vmaxq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_u8)))
-uint8x16_t vmaxq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_s16)))
-int16x8_t vmaxq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_s16)))
-int16x8_t vmaxq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_s32)))
-int32x4_t vmaxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_s32)))
-int32x4_t vmaxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_s8)))
-int8x16_t vmaxq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_s8)))
-int8x16_t vmaxq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_u16)))
-uint16x8_t vmaxq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_u16)))
-uint16x8_t vmaxq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_u32)))
-uint32x4_t vmaxq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_u32)))
-uint32x4_t vmaxq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_u8)))
-uint8x16_t vmaxq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_u8)))
-uint8x16_t vmaxq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_s16)))
-int16x8_t vmaxq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_s16)))
-int16x8_t vmaxq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_s32)))
-int32x4_t vmaxq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_s32)))
-int32x4_t vmaxq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_s8)))
-int8x16_t vmaxq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_s8)))
-int8x16_t vmaxq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_u16)))
-uint16x8_t vmaxq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_u16)))
-uint16x8_t vmaxq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_u32)))
-uint32x4_t vmaxq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_u32)))
-uint32x4_t vmaxq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_u8)))
-uint8x16_t vmaxq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_u8)))
-uint8x16_t vmaxq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s16)))
-int16_t vmaxvq_s16(int16_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s16)))
-int16_t vmaxvq(int16_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s32)))
-int32_t vmaxvq_s32(int32_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s32)))
-int32_t vmaxvq(int32_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s8)))
-int8_t vmaxvq_s8(int8_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s8)))
-int8_t vmaxvq(int8_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u16)))
-uint16_t vmaxvq_u16(uint16_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u16)))
-uint16_t vmaxvq(uint16_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u32)))
-uint32_t vmaxvq_u32(uint32_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u32)))
-uint32_t vmaxvq(uint32_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u8)))
-uint8_t vmaxvq_u8(uint8_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u8)))
-uint8_t vmaxvq(uint8_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminaq_m_s16)))
-uint16x8_t vminaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminaq_m_s16)))
-uint16x8_t vminaq_m(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminaq_m_s32)))
-uint32x4_t vminaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminaq_m_s32)))
-uint32x4_t vminaq_m(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminaq_m_s8)))
-uint8x16_t vminaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminaq_m_s8)))
-uint8x16_t vminaq_m(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminaq_s16)))
-uint16x8_t vminaq_s16(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminaq_s16)))
-uint16x8_t vminaq(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminaq_s32)))
-uint32x4_t vminaq_s32(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminaq_s32)))
-uint32x4_t vminaq(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminaq_s8)))
-uint8x16_t vminaq_s8(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminaq_s8)))
-uint8x16_t vminaq(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_m_s16)))
-int16x8_t vminq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_m_s16)))
-int16x8_t vminq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_m_s32)))
-int32x4_t vminq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_m_s32)))
-int32x4_t vminq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_m_s8)))
-int8x16_t vminq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_m_s8)))
-int8x16_t vminq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_m_u16)))
-uint16x8_t vminq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_m_u16)))
-uint16x8_t vminq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_m_u32)))
-uint32x4_t vminq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_m_u32)))
-uint32x4_t vminq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_m_u8)))
-uint8x16_t vminq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_m_u8)))
-uint8x16_t vminq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_s16)))
-int16x8_t vminq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_s16)))
-int16x8_t vminq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_s32)))
-int32x4_t vminq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_s32)))
-int32x4_t vminq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_s8)))
-int8x16_t vminq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_s8)))
-int8x16_t vminq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_u16)))
-uint16x8_t vminq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_u16)))
-uint16x8_t vminq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_u32)))
-uint32x4_t vminq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_u32)))
-uint32x4_t vminq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_u8)))
-uint8x16_t vminq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_u8)))
-uint8x16_t vminq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_x_s16)))
-int16x8_t vminq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_x_s16)))
-int16x8_t vminq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_x_s32)))
-int32x4_t vminq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_x_s32)))
-int32x4_t vminq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_x_s8)))
-int8x16_t vminq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_x_s8)))
-int8x16_t vminq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_x_u16)))
-uint16x8_t vminq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_x_u16)))
-uint16x8_t vminq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_x_u32)))
-uint32x4_t vminq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_x_u32)))
-uint32x4_t vminq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_x_u8)))
-uint8x16_t vminq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_x_u8)))
-uint8x16_t vminq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_s16)))
-int16_t vminvq_s16(int16_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_s16)))
-int16_t vminvq(int16_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_s32)))
-int32_t vminvq_s32(int32_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_s32)))
-int32_t vminvq(int32_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_s8)))
-int8_t vminvq_s8(int8_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_s8)))
-int8_t vminvq(int8_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_u16)))
-uint16_t vminvq_u16(uint16_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_u16)))
-uint16_t vminvq(uint16_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_u32)))
-uint32_t vminvq_u32(uint32_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_u32)))
-uint32_t vminvq(uint32_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_u8)))
-uint8_t vminvq_u8(uint8_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_u8)))
-uint8_t vminvq(uint8_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_s16)))
-int32_t vmladavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_s16)))
-int32_t vmladavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_s32)))
-int32_t vmladavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_s32)))
-int32_t vmladavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_s8)))
-int32_t vmladavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_s8)))
-int32_t vmladavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_u16)))
-uint32_t vmladavaq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_u16)))
-uint32_t vmladavaq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_u32)))
-uint32_t vmladavaq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_u32)))
-uint32_t vmladavaq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_u8)))
-uint32_t vmladavaq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_u8)))
-uint32_t vmladavaq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_s16)))
-int32_t vmladavaq_s16(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_s16)))
-int32_t vmladavaq(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_s32)))
-int32_t vmladavaq_s32(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_s32)))
-int32_t vmladavaq(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_s8)))
-int32_t vmladavaq_s8(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_s8)))
-int32_t vmladavaq(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_u16)))
-uint32_t vmladavaq_u16(uint32_t, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_u16)))
-uint32_t vmladavaq(uint32_t, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_u32)))
-uint32_t vmladavaq_u32(uint32_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_u32)))
-uint32_t vmladavaq(uint32_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_u8)))
-uint32_t vmladavaq_u8(uint32_t, uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_u8)))
-uint32_t vmladavaq(uint32_t, uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_p_s16)))
-int32_t vmladavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_p_s16)))
-int32_t vmladavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_p_s32)))
-int32_t vmladavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_p_s32)))
-int32_t vmladavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_p_s8)))
-int32_t vmladavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_p_s8)))
-int32_t vmladavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_s16)))
-int32_t vmladavaxq_s16(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_s16)))
-int32_t vmladavaxq(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_s32)))
-int32_t vmladavaxq_s32(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_s32)))
-int32_t vmladavaxq(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_s8)))
-int32_t vmladavaxq_s8(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_s8)))
-int32_t vmladavaxq(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_s16)))
-int32_t vmladavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_s16)))
-int32_t vmladavq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_s32)))
-int32_t vmladavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_s32)))
-int32_t vmladavq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_s8)))
-int32_t vmladavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_s8)))
-int32_t vmladavq_p(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_u16)))
-uint32_t vmladavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_u16)))
-uint32_t vmladavq_p(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_u32)))
-uint32_t vmladavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_u32)))
-uint32_t vmladavq_p(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_u8)))
-uint32_t vmladavq_p_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_u8)))
-uint32_t vmladavq_p(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_s16)))
-int32_t vmladavq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_s16)))
-int32_t vmladavq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_s32)))
-int32_t vmladavq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_s32)))
-int32_t vmladavq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_s8)))
-int32_t vmladavq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_s8)))
-int32_t vmladavq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_u16)))
-uint32_t vmladavq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_u16)))
-uint32_t vmladavq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_u32)))
-uint32_t vmladavq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_u32)))
-uint32_t vmladavq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_u8)))
-uint32_t vmladavq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_u8)))
-uint32_t vmladavq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_p_s16)))
-int32_t vmladavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_p_s16)))
-int32_t vmladavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_p_s32)))
-int32_t vmladavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_p_s32)))
-int32_t vmladavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_p_s8)))
-int32_t vmladavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_p_s8)))
-int32_t vmladavxq_p(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_s16)))
-int32_t vmladavxq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_s16)))
-int32_t vmladavxq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_s32)))
-int32_t vmladavxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_s32)))
-int32_t vmladavxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_s8)))
-int32_t vmladavxq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_s8)))
-int32_t vmladavxq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_s16)))
-int64_t vmlaldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_s16)))
-int64_t vmlaldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_s32)))
-int64_t vmlaldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_s32)))
-int64_t vmlaldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_u16)))
-uint64_t vmlaldavaq_p_u16(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_u16)))
-uint64_t vmlaldavaq_p(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_u32)))
-uint64_t vmlaldavaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_u32)))
-uint64_t vmlaldavaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_s16)))
-int64_t vmlaldavaq_s16(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_s16)))
-int64_t vmlaldavaq(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_s32)))
-int64_t vmlaldavaq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_s32)))
-int64_t vmlaldavaq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_u16)))
-uint64_t vmlaldavaq_u16(uint64_t, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_u16)))
-uint64_t vmlaldavaq(uint64_t, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_u32)))
-uint64_t vmlaldavaq_u32(uint64_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_u32)))
-uint64_t vmlaldavaq(uint64_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_p_s16)))
-int64_t vmlaldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_p_s16)))
-int64_t vmlaldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_p_s32)))
-int64_t vmlaldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_p_s32)))
-int64_t vmlaldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_s16)))
-int64_t vmlaldavaxq_s16(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_s16)))
-int64_t vmlaldavaxq(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_s32)))
-int64_t vmlaldavaxq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_s32)))
-int64_t vmlaldavaxq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_s16)))
-int64_t vmlaldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_s16)))
-int64_t vmlaldavq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_s32)))
-int64_t vmlaldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_s32)))
-int64_t vmlaldavq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_u16)))
-uint64_t vmlaldavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_u16)))
-uint64_t vmlaldavq_p(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_u32)))
-uint64_t vmlaldavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_u32)))
-uint64_t vmlaldavq_p(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_s16)))
-int64_t vmlaldavq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_s16)))
-int64_t vmlaldavq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_s32)))
-int64_t vmlaldavq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_s32)))
-int64_t vmlaldavq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_u16)))
-uint64_t vmlaldavq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_u16)))
-uint64_t vmlaldavq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_u32)))
-uint64_t vmlaldavq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_u32)))
-uint64_t vmlaldavq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_p_s16)))
-int64_t vmlaldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_p_s16)))
-int64_t vmlaldavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_p_s32)))
-int64_t vmlaldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_p_s32)))
-int64_t vmlaldavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_s16)))
-int64_t vmlaldavxq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_s16)))
-int64_t vmlaldavxq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_s32)))
-int64_t vmlaldavxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_s32)))
-int64_t vmlaldavxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_p_s16)))
-int32_t vmlsdavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_p_s16)))
-int32_t vmlsdavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_p_s32)))
-int32_t vmlsdavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_p_s32)))
-int32_t vmlsdavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_p_s8)))
-int32_t vmlsdavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_p_s8)))
-int32_t vmlsdavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_s16)))
-int32_t vmlsdavaq_s16(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_s16)))
-int32_t vmlsdavaq(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_s32)))
-int32_t vmlsdavaq_s32(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_s32)))
-int32_t vmlsdavaq(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_s8)))
-int32_t vmlsdavaq_s8(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_s8)))
-int32_t vmlsdavaq(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_p_s16)))
-int32_t vmlsdavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_p_s16)))
-int32_t vmlsdavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_p_s32)))
-int32_t vmlsdavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_p_s32)))
-int32_t vmlsdavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_p_s8)))
-int32_t vmlsdavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_p_s8)))
-int32_t vmlsdavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_s16)))
-int32_t vmlsdavaxq_s16(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_s16)))
-int32_t vmlsdavaxq(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_s32)))
-int32_t vmlsdavaxq_s32(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_s32)))
-int32_t vmlsdavaxq(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_s8)))
-int32_t vmlsdavaxq_s8(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_s8)))
-int32_t vmlsdavaxq(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_p_s16)))
-int32_t vmlsdavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_p_s16)))
-int32_t vmlsdavq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_p_s32)))
-int32_t vmlsdavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_p_s32)))
-int32_t vmlsdavq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_p_s8)))
-int32_t vmlsdavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_p_s8)))
-int32_t vmlsdavq_p(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_s16)))
-int32_t vmlsdavq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_s16)))
-int32_t vmlsdavq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_s32)))
-int32_t vmlsdavq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_s32)))
-int32_t vmlsdavq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_s8)))
-int32_t vmlsdavq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_s8)))
-int32_t vmlsdavq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_p_s16)))
-int32_t vmlsdavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_p_s16)))
-int32_t vmlsdavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_p_s32)))
-int32_t vmlsdavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_p_s32)))
-int32_t vmlsdavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_p_s8)))
-int32_t vmlsdavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_p_s8)))
-int32_t vmlsdavxq_p(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_s16)))
-int32_t vmlsdavxq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_s16)))
-int32_t vmlsdavxq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_s32)))
-int32_t vmlsdavxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_s32)))
-int32_t vmlsdavxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_s8)))
-int32_t vmlsdavxq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_s8)))
-int32_t vmlsdavxq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_p_s16)))
-int64_t vmlsldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_p_s16)))
-int64_t vmlsldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_p_s32)))
-int64_t vmlsldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_p_s32)))
-int64_t vmlsldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_s16)))
-int64_t vmlsldavaq_s16(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_s16)))
-int64_t vmlsldavaq(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_s32)))
-int64_t vmlsldavaq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_s32)))
-int64_t vmlsldavaq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_p_s16)))
-int64_t vmlsldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_p_s16)))
-int64_t vmlsldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_p_s32)))
-int64_t vmlsldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_p_s32)))
-int64_t vmlsldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_s16)))
-int64_t vmlsldavaxq_s16(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_s16)))
-int64_t vmlsldavaxq(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_s32)))
-int64_t vmlsldavaxq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_s32)))
-int64_t vmlsldavaxq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_p_s16)))
-int64_t vmlsldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_p_s16)))
-int64_t vmlsldavq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_p_s32)))
-int64_t vmlsldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_p_s32)))
-int64_t vmlsldavq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_s16)))
-int64_t vmlsldavq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_s16)))
-int64_t vmlsldavq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_s32)))
-int64_t vmlsldavq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_s32)))
-int64_t vmlsldavq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_p_s16)))
-int64_t vmlsldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_p_s16)))
-int64_t vmlsldavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_p_s32)))
-int64_t vmlsldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_p_s32)))
-int64_t vmlsldavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_s16)))
-int64_t vmlsldavxq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_s16)))
-int64_t vmlsldavxq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_s32)))
-int64_t vmlsldavxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_s32)))
-int64_t vmlsldavxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_s16)))
-int16x8_t vmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_s16)))
-int16x8_t vmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_s32)))
-int32x4_t vmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_s32)))
-int32x4_t vmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_s8)))
-int8x16_t vmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_s8)))
-int8x16_t vmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_u16)))
-uint16x8_t vmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_u16)))
-uint16x8_t vmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_u32)))
-uint32x4_t vmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_u32)))
-uint32x4_t vmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_u8)))
-uint8x16_t vmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_u8)))
-uint8x16_t vmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_s16)))
-int16x8_t vmulhq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_s16)))
-int16x8_t vmulhq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_s32)))
-int32x4_t vmulhq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_s32)))
-int32x4_t vmulhq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_s8)))
-int8x16_t vmulhq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_s8)))
-int8x16_t vmulhq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_u16)))
-uint16x8_t vmulhq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_u16)))
-uint16x8_t vmulhq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_u32)))
-uint32x4_t vmulhq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_u32)))
-uint32x4_t vmulhq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_u8)))
-uint8x16_t vmulhq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_u8)))
-uint8x16_t vmulhq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_s16)))
-int16x8_t vmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_s16)))
-int16x8_t vmulhq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_s32)))
-int32x4_t vmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_s32)))
-int32x4_t vmulhq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_s8)))
-int8x16_t vmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_s8)))
-int8x16_t vmulhq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_u16)))
-uint16x8_t vmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_u16)))
-uint16x8_t vmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_u32)))
-uint32x4_t vmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_u32)))
-uint32x4_t vmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_u8)))
-uint8x16_t vmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_u8)))
-uint8x16_t vmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_s16)))
-int32x4_t vmullbq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_s16)))
-int32x4_t vmullbq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_s32)))
-int64x2_t vmullbq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_s32)))
-int64x2_t vmullbq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_s8)))
-int16x8_t vmullbq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_s8)))
-int16x8_t vmullbq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_u16)))
-uint32x4_t vmullbq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_u16)))
-uint32x4_t vmullbq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_u32)))
-uint64x2_t vmullbq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_u32)))
-uint64x2_t vmullbq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_u8)))
-uint16x8_t vmullbq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_u8)))
-uint16x8_t vmullbq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_s16)))
-int32x4_t vmullbq_int_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_s16)))
-int32x4_t vmullbq_int(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_s32)))
-int64x2_t vmullbq_int_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_s32)))
-int64x2_t vmullbq_int(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_s8)))
-int16x8_t vmullbq_int_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_s8)))
-int16x8_t vmullbq_int(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_u16)))
-uint32x4_t vmullbq_int_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_u16)))
-uint32x4_t vmullbq_int(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_u32)))
-uint64x2_t vmullbq_int_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_u32)))
-uint64x2_t vmullbq_int(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_u8)))
-uint16x8_t vmullbq_int_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_u8)))
-uint16x8_t vmullbq_int(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_s16)))
-int32x4_t vmullbq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_s16)))
-int32x4_t vmullbq_int_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_s32)))
-int64x2_t vmullbq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_s32)))
-int64x2_t vmullbq_int_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_s8)))
-int16x8_t vmullbq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_s8)))
-int16x8_t vmullbq_int_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_u16)))
-uint32x4_t vmullbq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_u16)))
-uint32x4_t vmullbq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_u32)))
-uint64x2_t vmullbq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_u32)))
-uint64x2_t vmullbq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_u8)))
-uint16x8_t vmullbq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_u8)))
-uint16x8_t vmullbq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_m_p16)))
-uint32x4_t vmullbq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_m_p16)))
-uint32x4_t vmullbq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_m_p8)))
-uint16x8_t vmullbq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_m_p8)))
-uint16x8_t vmullbq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_p16)))
-uint32x4_t vmullbq_poly_p16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_p16)))
-uint32x4_t vmullbq_poly(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_p8)))
-uint16x8_t vmullbq_poly_p8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_p8)))
-uint16x8_t vmullbq_poly(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_x_p16)))
-uint32x4_t vmullbq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_x_p16)))
-uint32x4_t vmullbq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_x_p8)))
-uint16x8_t vmullbq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_x_p8)))
-uint16x8_t vmullbq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_s16)))
-int32x4_t vmulltq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_s16)))
-int32x4_t vmulltq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_s32)))
-int64x2_t vmulltq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_s32)))
-int64x2_t vmulltq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_s8)))
-int16x8_t vmulltq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_s8)))
-int16x8_t vmulltq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_u16)))
-uint32x4_t vmulltq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_u16)))
-uint32x4_t vmulltq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_u32)))
-uint64x2_t vmulltq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_u32)))
-uint64x2_t vmulltq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_u8)))
-uint16x8_t vmulltq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_u8)))
-uint16x8_t vmulltq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_s16)))
-int32x4_t vmulltq_int_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_s16)))
-int32x4_t vmulltq_int(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_s32)))
-int64x2_t vmulltq_int_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_s32)))
-int64x2_t vmulltq_int(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_s8)))
-int16x8_t vmulltq_int_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_s8)))
-int16x8_t vmulltq_int(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_u16)))
-uint32x4_t vmulltq_int_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_u16)))
-uint32x4_t vmulltq_int(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_u32)))
-uint64x2_t vmulltq_int_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_u32)))
-uint64x2_t vmulltq_int(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_u8)))
-uint16x8_t vmulltq_int_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_u8)))
-uint16x8_t vmulltq_int(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_s16)))
-int32x4_t vmulltq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_s16)))
-int32x4_t vmulltq_int_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_s32)))
-int64x2_t vmulltq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_s32)))
-int64x2_t vmulltq_int_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_s8)))
-int16x8_t vmulltq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_s8)))
-int16x8_t vmulltq_int_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_u16)))
-uint32x4_t vmulltq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_u16)))
-uint32x4_t vmulltq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_u32)))
-uint64x2_t vmulltq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_u32)))
-uint64x2_t vmulltq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_u8)))
-uint16x8_t vmulltq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_u8)))
-uint16x8_t vmulltq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_m_p16)))
-uint32x4_t vmulltq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_m_p16)))
-uint32x4_t vmulltq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_m_p8)))
-uint16x8_t vmulltq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_m_p8)))
-uint16x8_t vmulltq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_p16)))
-uint32x4_t vmulltq_poly_p16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_p16)))
-uint32x4_t vmulltq_poly(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_p8)))
-uint16x8_t vmulltq_poly_p8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_p8)))
-uint16x8_t vmulltq_poly(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_x_p16)))
-uint32x4_t vmulltq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_x_p16)))
-uint32x4_t vmulltq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_x_p8)))
-uint16x8_t vmulltq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_x_p8)))
-uint16x8_t vmulltq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_s16)))
-int16x8_t vmulq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_s16)))
-int16x8_t vmulq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_s32)))
-int32x4_t vmulq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_s32)))
-int32x4_t vmulq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_s8)))
-int8x16_t vmulq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_s8)))
-int8x16_t vmulq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_u16)))
-uint16x8_t vmulq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_u16)))
-uint16x8_t vmulq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_u32)))
-uint32x4_t vmulq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_u32)))
-uint32x4_t vmulq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_u8)))
-uint8x16_t vmulq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_u8)))
-uint8x16_t vmulq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_s16)))
-int16x8_t vmulq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_s16)))
-int16x8_t vmulq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_s32)))
-int32x4_t vmulq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_s32)))
-int32x4_t vmulq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_s8)))
-int8x16_t vmulq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_s8)))
-int8x16_t vmulq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_u16)))
-uint16x8_t vmulq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_u16)))
-uint16x8_t vmulq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_u32)))
-uint32x4_t vmulq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_u32)))
-uint32x4_t vmulq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_u8)))
-uint8x16_t vmulq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_u8)))
-uint8x16_t vmulq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_s16)))
-int16x8_t vmulq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_s16)))
-int16x8_t vmulq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_s32)))
-int32x4_t vmulq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_s32)))
-int32x4_t vmulq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_s8)))
-int8x16_t vmulq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_s8)))
-int8x16_t vmulq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_u16)))
-uint16x8_t vmulq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_u16)))
-uint16x8_t vmulq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_u32)))
-uint32x4_t vmulq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_u32)))
-uint32x4_t vmulq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_u8)))
-uint8x16_t vmulq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_u8)))
-uint8x16_t vmulq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_s16)))
-int16x8_t vmvnq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_s16)))
-int16x8_t vmvnq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_s32)))
-int32x4_t vmvnq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_s32)))
-int32x4_t vmvnq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_u16)))
-uint16x8_t vmvnq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_u16)))
-uint16x8_t vmvnq_m(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_u32)))
-uint32x4_t vmvnq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_u32)))
-uint32x4_t vmvnq_m(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_n_s16)))
-int16x8_t vmvnq_n_s16(int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_n_s32)))
-int32x4_t vmvnq_n_s32(int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_n_u16)))
-uint16x8_t vmvnq_n_u16(uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_n_u32)))
-uint32x4_t vmvnq_n_u32(uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_x_n_s16)))
-int16x8_t vmvnq_x_n_s16(int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_x_n_s32)))
-int32x4_t vmvnq_x_n_s32(int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_x_n_u16)))
-uint16x8_t vmvnq_x_n_u16(uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_x_n_u32)))
-uint32x4_t vmvnq_x_n_u32(uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_s16)))
-int16x8_t vornq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_s16)))
-int16x8_t vornq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_s32)))
-int32x4_t vornq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_s32)))
-int32x4_t vornq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_s8)))
-int8x16_t vornq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_s8)))
-int8x16_t vornq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_u16)))
-uint16x8_t vornq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_u16)))
-uint16x8_t vornq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_u32)))
-uint32x4_t vornq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_u32)))
-uint32x4_t vornq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_u8)))
-uint8x16_t vornq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_u8)))
-uint8x16_t vornq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_s16)))
-int16x8_t vornq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_s16)))
-int16x8_t vornq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_s32)))
-int32x4_t vornq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_s32)))
-int32x4_t vornq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_s8)))
-int8x16_t vornq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_s8)))
-int8x16_t vornq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_u16)))
-uint16x8_t vornq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_u16)))
-uint16x8_t vornq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_u32)))
-uint32x4_t vornq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_u32)))
-uint32x4_t vornq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_u8)))
-uint8x16_t vornq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_u8)))
-uint8x16_t vornq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_s16)))
-int16x8_t vornq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_s16)))
-int16x8_t vornq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_s32)))
-int32x4_t vornq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_s32)))
-int32x4_t vornq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_s8)))
-int8x16_t vornq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_s8)))
-int8x16_t vornq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_u16)))
-uint16x8_t vornq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_u16)))
-uint16x8_t vornq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_u32)))
-uint32x4_t vornq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_u32)))
-uint32x4_t vornq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_u8)))
-uint8x16_t vornq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_u8)))
-uint8x16_t vornq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_s16)))
-int16x8_t vorrq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_s16)))
-int16x8_t vorrq_m_n(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_s32)))
-int32x4_t vorrq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_s32)))
-int32x4_t vorrq_m_n(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_u16)))
-uint16x8_t vorrq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_u16)))
-uint16x8_t vorrq_m_n(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_u32)))
-uint32x4_t vorrq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_u32)))
-uint32x4_t vorrq_m_n(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_s16)))
-int16x8_t vorrq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_s16)))
-int16x8_t vorrq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_s32)))
-int32x4_t vorrq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_s32)))
-int32x4_t vorrq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_s8)))
-int8x16_t vorrq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_s8)))
-int8x16_t vorrq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_u16)))
-uint16x8_t vorrq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_u16)))
-uint16x8_t vorrq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_u32)))
-uint32x4_t vorrq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_u32)))
-uint32x4_t vorrq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_u8)))
-uint8x16_t vorrq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_u8)))
-uint8x16_t vorrq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_s16)))
-int16x8_t vorrq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_s16)))
-int16x8_t vorrq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_s32)))
-int32x4_t vorrq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_s32)))
-int32x4_t vorrq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_u16)))
-uint16x8_t vorrq_n_u16(uint16x8_t, uint16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_u16)))
-uint16x8_t vorrq(uint16x8_t, uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_u32)))
-uint32x4_t vorrq_n_u32(uint32x4_t, uint32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_u32)))
-uint32x4_t vorrq(uint32x4_t, uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_s16)))
-int16x8_t vorrq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_s16)))
-int16x8_t vorrq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_s32)))
-int32x4_t vorrq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_s32)))
-int32x4_t vorrq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_s8)))
-int8x16_t vorrq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_s8)))
-int8x16_t vorrq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_u16)))
-uint16x8_t vorrq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_u16)))
-uint16x8_t vorrq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_u32)))
-uint32x4_t vorrq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_u32)))
-uint32x4_t vorrq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_u8)))
-uint8x16_t vorrq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_u8)))
-uint8x16_t vorrq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_s16)))
-int16x8_t vorrq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_s16)))
-int16x8_t vorrq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_s32)))
-int32x4_t vorrq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_s32)))
-int32x4_t vorrq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_s8)))
-int8x16_t vorrq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_s8)))
-int8x16_t vorrq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_u16)))
-uint16x8_t vorrq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_u16)))
-uint16x8_t vorrq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_u32)))
-uint32x4_t vorrq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_u32)))
-uint32x4_t vorrq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_u8)))
-uint8x16_t vorrq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_u8)))
-uint8x16_t vorrq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpnot)))
-mve_pred16_t vpnot(mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_s16)))
-int16x8_t vpselq_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_s16)))
-int16x8_t vpselq(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_s32)))
-int32x4_t vpselq_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_s32)))
-int32x4_t vpselq(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_s64)))
-int64x2_t vpselq_s64(int64x2_t, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_s64)))
-int64x2_t vpselq(int64x2_t, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_s8)))
-int8x16_t vpselq_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_s8)))
-int8x16_t vpselq(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_u16)))
-uint16x8_t vpselq_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_u16)))
-uint16x8_t vpselq(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_u32)))
-uint32x4_t vpselq_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_u32)))
-uint32x4_t vpselq(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_u64)))
-uint64x2_t vpselq_u64(uint64x2_t, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_u64)))
-uint64x2_t vpselq(uint64x2_t, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_u8)))
-uint8x16_t vpselq_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_u8)))
-uint8x16_t vpselq(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_s16)))
-int16x8_t vqaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_s16)))
-int16x8_t vqaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_s32)))
-int32x4_t vqaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_s32)))
-int32x4_t vqaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_s8)))
-int8x16_t vqaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_s8)))
-int8x16_t vqaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_u16)))
-uint16x8_t vqaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_u16)))
-uint16x8_t vqaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_u32)))
-uint32x4_t vqaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_u32)))
-uint32x4_t vqaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_u8)))
-uint8x16_t vqaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_u8)))
-uint8x16_t vqaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_s16)))
-int16x8_t vqaddq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_s16)))
-int16x8_t vqaddq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_s32)))
-int32x4_t vqaddq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_s32)))
-int32x4_t vqaddq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_s8)))
-int8x16_t vqaddq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_s8)))
-int8x16_t vqaddq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_u16)))
-uint16x8_t vqaddq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_u16)))
-uint16x8_t vqaddq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_u32)))
-uint32x4_t vqaddq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_u32)))
-uint32x4_t vqaddq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_u8)))
-uint8x16_t vqaddq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_u8)))
-uint8x16_t vqaddq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_m_s16)))
-int16x8_t vqdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_m_s16)))
-int16x8_t vqdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_m_s32)))
-int32x4_t vqdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_m_s32)))
-int32x4_t vqdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_m_s8)))
-int8x16_t vqdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_m_s8)))
-int8x16_t vqdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_s16)))
-int16x8_t vqdmulhq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_s16)))
-int16x8_t vqdmulhq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_s32)))
-int32x4_t vqdmulhq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_s32)))
-int32x4_t vqdmulhq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_s8)))
-int8x16_t vqdmulhq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_s8)))
-int8x16_t vqdmulhq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_m_s16)))
-int16x8_t vqrdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_m_s16)))
-int16x8_t vqrdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_m_s32)))
-int32x4_t vqrdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_m_s32)))
-int32x4_t vqrdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_m_s8)))
-int8x16_t vqrdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_m_s8)))
-int8x16_t vqrdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_s16)))
-int16x8_t vqrdmulhq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_s16)))
-int16x8_t vqrdmulhq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_s32)))
-int32x4_t vqrdmulhq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_s32)))
-int32x4_t vqrdmulhq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_s8)))
-int8x16_t vqrdmulhq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_s8)))
-int8x16_t vqrdmulhq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_s16)))
-int16x8_t vqrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_s16)))
-int16x8_t vqrshlq_m_n(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_s32)))
-int32x4_t vqrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_s32)))
-int32x4_t vqrshlq_m_n(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_s8)))
-int8x16_t vqrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_s8)))
-int8x16_t vqrshlq_m_n(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_u16)))
-uint16x8_t vqrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_u16)))
-uint16x8_t vqrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_u32)))
-uint32x4_t vqrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_u32)))
-uint32x4_t vqrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_u8)))
-uint8x16_t vqrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_u8)))
-uint8x16_t vqrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_s16)))
-int16x8_t vqrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_s16)))
-int16x8_t vqrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_s32)))
-int32x4_t vqrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_s32)))
-int32x4_t vqrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_s8)))
-int8x16_t vqrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_s8)))
-int8x16_t vqrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_u16)))
-uint16x8_t vqrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_u16)))
-uint16x8_t vqrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_u32)))
-uint32x4_t vqrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_u32)))
-uint32x4_t vqrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_u8)))
-uint8x16_t vqrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_u8)))
-uint8x16_t vqrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_s16)))
-int16x8_t vqrshlq_n_s16(int16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_s16)))
-int16x8_t vqrshlq(int16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_s32)))
-int32x4_t vqrshlq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_s32)))
-int32x4_t vqrshlq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_s8)))
-int8x16_t vqrshlq_n_s8(int8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_s8)))
-int8x16_t vqrshlq(int8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_u16)))
-uint16x8_t vqrshlq_n_u16(uint16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_u16)))
-uint16x8_t vqrshlq(uint16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_u32)))
-uint32x4_t vqrshlq_n_u32(uint32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_u32)))
-uint32x4_t vqrshlq(uint32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_u8)))
-uint8x16_t vqrshlq_n_u8(uint8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_u8)))
-uint8x16_t vqrshlq(uint8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_s16)))
-int16x8_t vqrshlq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_s16)))
-int16x8_t vqrshlq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_s32)))
-int32x4_t vqrshlq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_s32)))
-int32x4_t vqrshlq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_s8)))
-int8x16_t vqrshlq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_s8)))
-int8x16_t vqrshlq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_u16)))
-uint16x8_t vqrshlq_u16(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_u16)))
-uint16x8_t vqrshlq(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_u32)))
-uint32x4_t vqrshlq_u32(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_u32)))
-uint32x4_t vqrshlq(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_u8)))
-uint8x16_t vqrshlq_u8(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_u8)))
-uint8x16_t vqrshlq(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16)))
-int8x16_t vqrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16)))
-int8x16_t vqrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32)))
-int16x8_t vqrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32)))
-int16x8_t vqrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16)))
-uint8x16_t vqrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16)))
-uint8x16_t vqrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32)))
-uint16x8_t vqrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32)))
-uint16x8_t vqrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_s16)))
-int8x16_t vqrshrnbq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_s16)))
-int8x16_t vqrshrnbq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_s32)))
-int16x8_t vqrshrnbq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_s32)))
-int16x8_t vqrshrnbq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_u16)))
-uint8x16_t vqrshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_u16)))
-uint8x16_t vqrshrnbq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_u32)))
-uint16x8_t vqrshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_u32)))
-uint16x8_t vqrshrnbq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_s16)))
-int8x16_t vqrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_s16)))
-int8x16_t vqrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_s32)))
-int16x8_t vqrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_s32)))
-int16x8_t vqrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_u16)))
-uint8x16_t vqrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_u16)))
-uint8x16_t vqrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_u32)))
-uint16x8_t vqrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_u32)))
-uint16x8_t vqrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_s16)))
-int8x16_t vqrshrntq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_s16)))
-int8x16_t vqrshrntq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_s32)))
-int16x8_t vqrshrntq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_s32)))
-int16x8_t vqrshrntq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_u16)))
-uint8x16_t vqrshrntq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_u16)))
-uint8x16_t vqrshrntq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_u32)))
-uint16x8_t vqrshrntq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_u32)))
-uint16x8_t vqrshrntq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16)))
-uint8x16_t vqrshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16)))
-uint8x16_t vqrshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32)))
-uint16x8_t vqrshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32)))
-uint16x8_t vqrshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_n_s16)))
-uint8x16_t vqrshrunbq_n_s16(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_n_s16)))
-uint8x16_t vqrshrunbq(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_n_s32)))
-uint16x8_t vqrshrunbq_n_s32(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_n_s32)))
-uint16x8_t vqrshrunbq(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_m_n_s16)))
-uint8x16_t vqrshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_m_n_s16)))
-uint8x16_t vqrshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_m_n_s32)))
-uint16x8_t vqrshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_m_n_s32)))
-uint16x8_t vqrshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_n_s16)))
-uint8x16_t vqrshruntq_n_s16(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_n_s16)))
-uint8x16_t vqrshruntq(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_n_s32)))
-uint16x8_t vqrshruntq_n_s32(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_n_s32)))
-uint16x8_t vqrshruntq(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_s16)))
-int16x8_t vqshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_s16)))
-int16x8_t vqshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_s32)))
-int32x4_t vqshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_s32)))
-int32x4_t vqshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_s8)))
-int8x16_t vqshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_s8)))
-int8x16_t vqshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_u16)))
-uint16x8_t vqshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_u16)))
-uint16x8_t vqshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_u32)))
-uint32x4_t vqshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_u32)))
-uint32x4_t vqshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_u8)))
-uint8x16_t vqshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_u8)))
-uint8x16_t vqshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_s16)))
-int16x8_t vqshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_s16)))
-int16x8_t vqshlq_m_r(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_s32)))
-int32x4_t vqshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_s32)))
-int32x4_t vqshlq_m_r(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_s8)))
-int8x16_t vqshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_s8)))
-int8x16_t vqshlq_m_r(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_u16)))
-uint16x8_t vqshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_u16)))
-uint16x8_t vqshlq_m_r(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_u32)))
-uint32x4_t vqshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_u32)))
-uint32x4_t vqshlq_m_r(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_u8)))
-uint8x16_t vqshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_u8)))
-uint8x16_t vqshlq_m_r(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_s16)))
-int16x8_t vqshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_s16)))
-int16x8_t vqshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_s32)))
-int32x4_t vqshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_s32)))
-int32x4_t vqshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_s8)))
-int8x16_t vqshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_s8)))
-int8x16_t vqshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_u16)))
-uint16x8_t vqshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_u16)))
-uint16x8_t vqshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_u32)))
-uint32x4_t vqshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_u32)))
-uint32x4_t vqshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_u8)))
-uint8x16_t vqshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_u8)))
-uint8x16_t vqshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_s16)))
-int16x8_t vqshlq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_s16)))
-int16x8_t vqshlq_n(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_s32)))
-int32x4_t vqshlq_n_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_s32)))
-int32x4_t vqshlq_n(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_s8)))
-int8x16_t vqshlq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_s8)))
-int8x16_t vqshlq_n(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_u16)))
-uint16x8_t vqshlq_n_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_u16)))
-uint16x8_t vqshlq_n(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_u32)))
-uint32x4_t vqshlq_n_u32(uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_u32)))
-uint32x4_t vqshlq_n(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_u8)))
-uint8x16_t vqshlq_n_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_u8)))
-uint8x16_t vqshlq_n(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_s16)))
-int16x8_t vqshlq_r_s16(int16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_s16)))
-int16x8_t vqshlq_r(int16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_s32)))
-int32x4_t vqshlq_r_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_s32)))
-int32x4_t vqshlq_r(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_s8)))
-int8x16_t vqshlq_r_s8(int8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_s8)))
-int8x16_t vqshlq_r(int8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_u16)))
-uint16x8_t vqshlq_r_u16(uint16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_u16)))
-uint16x8_t vqshlq_r(uint16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_u32)))
-uint32x4_t vqshlq_r_u32(uint32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_u32)))
-uint32x4_t vqshlq_r(uint32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_u8)))
-uint8x16_t vqshlq_r_u8(uint8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_u8)))
-uint8x16_t vqshlq_r(uint8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_s16)))
-int16x8_t vqshlq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_s16)))
-int16x8_t vqshlq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_s32)))
-int32x4_t vqshlq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_s32)))
-int32x4_t vqshlq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_s8)))
-int8x16_t vqshlq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_s8)))
-int8x16_t vqshlq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_u16)))
-uint16x8_t vqshlq_u16(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_u16)))
-uint16x8_t vqshlq(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_u32)))
-uint32x4_t vqshlq_u32(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_u32)))
-uint32x4_t vqshlq(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_u8)))
-uint8x16_t vqshlq_u8(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_u8)))
-uint8x16_t vqshlq(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshluq_m_n_s16)))
-uint16x8_t vqshluq_m_n_s16(uint16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshluq_m_n_s16)))
-uint16x8_t vqshluq_m(uint16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshluq_m_n_s32)))
-uint32x4_t vqshluq_m_n_s32(uint32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshluq_m_n_s32)))
-uint32x4_t vqshluq_m(uint32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshluq_m_n_s8)))
-uint8x16_t vqshluq_m_n_s8(uint8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshluq_m_n_s8)))
-uint8x16_t vqshluq_m(uint8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshluq_n_s16)))
-uint16x8_t vqshluq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshluq_n_s16)))
-uint16x8_t vqshluq(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshluq_n_s32)))
-uint32x4_t vqshluq_n_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshluq_n_s32)))
-uint32x4_t vqshluq(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshluq_n_s8)))
-uint8x16_t vqshluq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshluq_n_s8)))
-uint8x16_t vqshluq(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_s16)))
-int8x16_t vqshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_s16)))
-int8x16_t vqshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_s32)))
-int16x8_t vqshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_s32)))
-int16x8_t vqshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_u16)))
-uint8x16_t vqshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_u16)))
-uint8x16_t vqshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_u32)))
-uint16x8_t vqshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_u32)))
-uint16x8_t vqshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_s16)))
-int8x16_t vqshrnbq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_s16)))
-int8x16_t vqshrnbq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_s32)))
-int16x8_t vqshrnbq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_s32)))
-int16x8_t vqshrnbq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_u16)))
-uint8x16_t vqshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_u16)))
-uint8x16_t vqshrnbq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_u32)))
-uint16x8_t vqshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_u32)))
-uint16x8_t vqshrnbq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_s16)))
-int8x16_t vqshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_s16)))
-int8x16_t vqshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_s32)))
-int16x8_t vqshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_s32)))
-int16x8_t vqshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_u16)))
-uint8x16_t vqshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_u16)))
-uint8x16_t vqshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_u32)))
-uint16x8_t vqshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_u32)))
-uint16x8_t vqshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_s16)))
-int8x16_t vqshrntq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_s16)))
-int8x16_t vqshrntq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_s32)))
-int16x8_t vqshrntq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_s32)))
-int16x8_t vqshrntq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_u16)))
-uint8x16_t vqshrntq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_u16)))
-uint8x16_t vqshrntq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_u32)))
-uint16x8_t vqshrntq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_u32)))
-uint16x8_t vqshrntq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_m_n_s16)))
-uint8x16_t vqshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_m_n_s16)))
-uint8x16_t vqshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_m_n_s32)))
-uint16x8_t vqshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_m_n_s32)))
-uint16x8_t vqshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_n_s16)))
-uint8x16_t vqshrunbq_n_s16(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_n_s16)))
-uint8x16_t vqshrunbq(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_n_s32)))
-uint16x8_t vqshrunbq_n_s32(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_n_s32)))
-uint16x8_t vqshrunbq(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_m_n_s16)))
-uint8x16_t vqshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_m_n_s16)))
-uint8x16_t vqshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_m_n_s32)))
-uint16x8_t vqshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_m_n_s32)))
-uint16x8_t vqshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_n_s16)))
-uint8x16_t vqshruntq_n_s16(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_n_s16)))
-uint8x16_t vqshruntq(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_n_s32)))
-uint16x8_t vqshruntq_n_s32(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_n_s32)))
-uint16x8_t vqshruntq(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_s16)))
-int16x8_t vqsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_s16)))
-int16x8_t vqsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_s32)))
-int32x4_t vqsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_s32)))
-int32x4_t vqsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_s8)))
-int8x16_t vqsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_s8)))
-int8x16_t vqsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_u16)))
-uint16x8_t vqsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_u16)))
-uint16x8_t vqsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_u32)))
-uint32x4_t vqsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_u32)))
-uint32x4_t vqsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_u8)))
-uint8x16_t vqsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_u8)))
-uint8x16_t vqsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_s16)))
-int16x8_t vqsubq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_s16)))
-int16x8_t vqsubq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_s32)))
-int32x4_t vqsubq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_s32)))
-int32x4_t vqsubq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_s8)))
-int8x16_t vqsubq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_s8)))
-int8x16_t vqsubq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_u16)))
-uint16x8_t vqsubq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_u16)))
-uint16x8_t vqsubq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_u32)))
-uint32x4_t vqsubq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_u32)))
-uint32x4_t vqsubq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_u8)))
-uint8x16_t vqsubq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_u8)))
-uint8x16_t vqsubq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))
-int16x8_t vreinterpretq_s16_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))
-int16x8_t vreinterpretq_s16(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))
-int16x8_t vreinterpretq_s16_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))
-int16x8_t vreinterpretq_s16(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))
-int16x8_t vreinterpretq_s16_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))
-int16x8_t vreinterpretq_s16(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))
-int16x8_t vreinterpretq_s16_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))
-int16x8_t vreinterpretq_s16(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))
-int16x8_t vreinterpretq_s16_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))
-int16x8_t vreinterpretq_s16(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))
-int16x8_t vreinterpretq_s16_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))
-int16x8_t vreinterpretq_s16(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
-int16x8_t vreinterpretq_s16_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
-int16x8_t vreinterpretq_s16(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))
-int32x4_t vreinterpretq_s32_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))
-int32x4_t vreinterpretq_s32(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))
-int32x4_t vreinterpretq_s32_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))
-int32x4_t vreinterpretq_s32(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))
-int32x4_t vreinterpretq_s32_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))
-int32x4_t vreinterpretq_s32(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))
-int32x4_t vreinterpretq_s32_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))
-int32x4_t vreinterpretq_s32(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))
-int32x4_t vreinterpretq_s32_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))
-int32x4_t vreinterpretq_s32(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))
-int32x4_t vreinterpretq_s32_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))
-int32x4_t vreinterpretq_s32(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
-int32x4_t vreinterpretq_s32_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
-int32x4_t vreinterpretq_s32(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))
-int64x2_t vreinterpretq_s64_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))
-int64x2_t vreinterpretq_s64(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))
-int64x2_t vreinterpretq_s64_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))
-int64x2_t vreinterpretq_s64(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))
-int64x2_t vreinterpretq_s64_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))
-int64x2_t vreinterpretq_s64(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))
-int64x2_t vreinterpretq_s64_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))
-int64x2_t vreinterpretq_s64(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))
-int64x2_t vreinterpretq_s64_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))
-int64x2_t vreinterpretq_s64(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))
-int64x2_t vreinterpretq_s64_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))
-int64x2_t vreinterpretq_s64(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
-int64x2_t vreinterpretq_s64_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
-int64x2_t vreinterpretq_s64(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))
-int8x16_t vreinterpretq_s8_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))
-int8x16_t vreinterpretq_s8(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))
-int8x16_t vreinterpretq_s8_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))
-int8x16_t vreinterpretq_s8(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))
-int8x16_t vreinterpretq_s8_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))
-int8x16_t vreinterpretq_s8(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))
-int8x16_t vreinterpretq_s8_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))
-int8x16_t vreinterpretq_s8(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))
-int8x16_t vreinterpretq_s8_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))
-int8x16_t vreinterpretq_s8(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))
-int8x16_t vreinterpretq_s8_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))
-int8x16_t vreinterpretq_s8(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
-int8x16_t vreinterpretq_s8_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
-int8x16_t vreinterpretq_s8(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))
-uint16x8_t vreinterpretq_u16_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))
-uint16x8_t vreinterpretq_u16(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))
-uint16x8_t vreinterpretq_u16_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))
-uint16x8_t vreinterpretq_u16(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))
-uint16x8_t vreinterpretq_u16_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))
-uint16x8_t vreinterpretq_u16(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))
-uint16x8_t vreinterpretq_u16_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))
-uint16x8_t vreinterpretq_u16(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))
-uint16x8_t vreinterpretq_u16_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))
-uint16x8_t vreinterpretq_u16(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))
-uint16x8_t vreinterpretq_u16_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))
-uint16x8_t vreinterpretq_u16(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
-uint16x8_t vreinterpretq_u16_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
-uint16x8_t vreinterpretq_u16(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))
-uint32x4_t vreinterpretq_u32_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))
-uint32x4_t vreinterpretq_u32(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))
-uint32x4_t vreinterpretq_u32_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))
-uint32x4_t vreinterpretq_u32(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))
-uint32x4_t vreinterpretq_u32_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))
-uint32x4_t vreinterpretq_u32(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))
-uint32x4_t vreinterpretq_u32_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))
-uint32x4_t vreinterpretq_u32(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))
-uint32x4_t vreinterpretq_u32_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))
-uint32x4_t vreinterpretq_u32(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))
-uint32x4_t vreinterpretq_u32_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))
-uint32x4_t vreinterpretq_u32(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
-uint32x4_t vreinterpretq_u32_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
-uint32x4_t vreinterpretq_u32(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))
-uint64x2_t vreinterpretq_u64_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))
-uint64x2_t vreinterpretq_u64(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))
-uint64x2_t vreinterpretq_u64_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))
-uint64x2_t vreinterpretq_u64(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))
-uint64x2_t vreinterpretq_u64_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))
-uint64x2_t vreinterpretq_u64(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))
-uint64x2_t vreinterpretq_u64_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))
-uint64x2_t vreinterpretq_u64(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))
-uint64x2_t vreinterpretq_u64_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))
-uint64x2_t vreinterpretq_u64(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))
-uint64x2_t vreinterpretq_u64_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))
-uint64x2_t vreinterpretq_u64(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
-uint64x2_t vreinterpretq_u64_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
-uint64x2_t vreinterpretq_u64(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
-uint8x16_t vreinterpretq_u8_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
-uint8x16_t vreinterpretq_u8(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
-uint8x16_t vreinterpretq_u8_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
-uint8x16_t vreinterpretq_u8(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
-uint8x16_t vreinterpretq_u8_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
-uint8x16_t vreinterpretq_u8(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
-uint8x16_t vreinterpretq_u8_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
-uint8x16_t vreinterpretq_u8(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
-uint8x16_t vreinterpretq_u8_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
-uint8x16_t vreinterpretq_u8(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
-uint8x16_t vreinterpretq_u8_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
-uint8x16_t vreinterpretq_u8(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
-uint8x16_t vreinterpretq_u8_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
-uint8x16_t vreinterpretq_u8(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_s16)))
-int16x8_t vrhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_s16)))
-int16x8_t vrhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_s32)))
-int32x4_t vrhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_s32)))
-int32x4_t vrhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_s8)))
-int8x16_t vrhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_s8)))
-int8x16_t vrhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_u16)))
-uint16x8_t vrhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_u16)))
-uint16x8_t vrhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_u32)))
-uint32x4_t vrhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_u32)))
-uint32x4_t vrhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_u8)))
-uint8x16_t vrhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_u8)))
-uint8x16_t vrhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_s16)))
-int16x8_t vrhaddq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_s16)))
-int16x8_t vrhaddq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_s32)))
-int32x4_t vrhaddq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_s32)))
-int32x4_t vrhaddq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_s8)))
-int8x16_t vrhaddq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_s8)))
-int8x16_t vrhaddq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_u16)))
-uint16x8_t vrhaddq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_u16)))
-uint16x8_t vrhaddq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_u32)))
-uint32x4_t vrhaddq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_u32)))
-uint32x4_t vrhaddq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_u8)))
-uint8x16_t vrhaddq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_u8)))
-uint8x16_t vrhaddq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_s16)))
-int16x8_t vrhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_s16)))
-int16x8_t vrhaddq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_s32)))
-int32x4_t vrhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_s32)))
-int32x4_t vrhaddq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_s8)))
-int8x16_t vrhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_s8)))
-int8x16_t vrhaddq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_u16)))
-uint16x8_t vrhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_u16)))
-uint16x8_t vrhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_u32)))
-uint32x4_t vrhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_u32)))
-uint32x4_t vrhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_u8)))
-uint8x16_t vrhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_u8)))
-uint8x16_t vrhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32)))
-int64_t vrmlaldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32)))
-int64_t vrmlaldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32)))
-uint64_t vrmlaldavhaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32)))
-uint64_t vrmlaldavhaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_s32)))
-int64_t vrmlaldavhaq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_s32)))
-int64_t vrmlaldavhaq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_u32)))
-uint64_t vrmlaldavhaq_u32(uint64_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_u32)))
-uint64_t vrmlaldavhaq(uint64_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32)))
-int64_t vrmlaldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32)))
-int64_t vrmlaldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaxq_s32)))
-int64_t vrmlaldavhaxq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaxq_s32)))
-int64_t vrmlaldavhaxq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_p_s32)))
-int64_t vrmlaldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_p_s32)))
-int64_t vrmlaldavhq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_p_u32)))
-uint64_t vrmlaldavhq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_p_u32)))
-uint64_t vrmlaldavhq_p(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_s32)))
-int64_t vrmlaldavhq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_s32)))
-int64_t vrmlaldavhq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_u32)))
-uint64_t vrmlaldavhq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_u32)))
-uint64_t vrmlaldavhq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32)))
-int64_t vrmlaldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32)))
-int64_t vrmlaldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhxq_s32)))
-int64_t vrmlaldavhxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhxq_s32)))
-int64_t vrmlaldavhxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32)))
-int64_t vrmlsldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32)))
-int64_t vrmlsldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaq_s32)))
-int64_t vrmlsldavhaq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaq_s32)))
-int64_t vrmlsldavhaq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32)))
-int64_t vrmlsldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32)))
-int64_t vrmlsldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaxq_s32)))
-int64_t vrmlsldavhaxq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaxq_s32)))
-int64_t vrmlsldavhaxq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhq_p_s32)))
-int64_t vrmlsldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhq_p_s32)))
-int64_t vrmlsldavhq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhq_s32)))
-int64_t vrmlsldavhq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhq_s32)))
-int64_t vrmlsldavhq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32)))
-int64_t vrmlsldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32)))
-int64_t vrmlsldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhxq_s32)))
-int64_t vrmlsldavhxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhxq_s32)))
-int64_t vrmlsldavhxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_s16)))
-int16x8_t vrmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_s16)))
-int16x8_t vrmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_s32)))
-int32x4_t vrmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_s32)))
-int32x4_t vrmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_s8)))
-int8x16_t vrmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_s8)))
-int8x16_t vrmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_u16)))
-uint16x8_t vrmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_u16)))
-uint16x8_t vrmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_u32)))
-uint32x4_t vrmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_u32)))
-uint32x4_t vrmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_u8)))
-uint8x16_t vrmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_u8)))
-uint8x16_t vrmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_s16)))
-int16x8_t vrmulhq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_s16)))
-int16x8_t vrmulhq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_s32)))
-int32x4_t vrmulhq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_s32)))
-int32x4_t vrmulhq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_s8)))
-int8x16_t vrmulhq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_s8)))
-int8x16_t vrmulhq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_u16)))
-uint16x8_t vrmulhq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_u16)))
-uint16x8_t vrmulhq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_u32)))
-uint32x4_t vrmulhq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_u32)))
-uint32x4_t vrmulhq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_u8)))
-uint8x16_t vrmulhq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_u8)))
-uint8x16_t vrmulhq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_s16)))
-int16x8_t vrmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_s16)))
-int16x8_t vrmulhq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_s32)))
-int32x4_t vrmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_s32)))
-int32x4_t vrmulhq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_s8)))
-int8x16_t vrmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_s8)))
-int8x16_t vrmulhq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_u16)))
-uint16x8_t vrmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_u16)))
-uint16x8_t vrmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_u32)))
-uint32x4_t vrmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_u32)))
-uint32x4_t vrmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_u8)))
-uint8x16_t vrmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_u8)))
-uint8x16_t vrmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_s16)))
-int16x8_t vrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_s16)))
-int16x8_t vrshlq_m_n(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_s32)))
-int32x4_t vrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_s32)))
-int32x4_t vrshlq_m_n(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_s8)))
-int8x16_t vrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_s8)))
-int8x16_t vrshlq_m_n(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_u16)))
-uint16x8_t vrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_u16)))
-uint16x8_t vrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_u32)))
-uint32x4_t vrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_u32)))
-uint32x4_t vrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_u8)))
-uint8x16_t vrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_u8)))
-uint8x16_t vrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_s16)))
-int16x8_t vrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_s16)))
-int16x8_t vrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_s32)))
-int32x4_t vrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_s32)))
-int32x4_t vrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_s8)))
-int8x16_t vrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_s8)))
-int8x16_t vrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_u16)))
-uint16x8_t vrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_u16)))
-uint16x8_t vrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_u32)))
-uint32x4_t vrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_u32)))
-uint32x4_t vrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_u8)))
-uint8x16_t vrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_u8)))
-uint8x16_t vrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_s16)))
-int16x8_t vrshlq_n_s16(int16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_s16)))
-int16x8_t vrshlq(int16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_s32)))
-int32x4_t vrshlq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_s32)))
-int32x4_t vrshlq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_s8)))
-int8x16_t vrshlq_n_s8(int8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_s8)))
-int8x16_t vrshlq(int8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_u16)))
-uint16x8_t vrshlq_n_u16(uint16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_u16)))
-uint16x8_t vrshlq(uint16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_u32)))
-uint32x4_t vrshlq_n_u32(uint32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_u32)))
-uint32x4_t vrshlq(uint32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_u8)))
-uint8x16_t vrshlq_n_u8(uint8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_u8)))
-uint8x16_t vrshlq(uint8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_s16)))
-int16x8_t vrshlq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_s16)))
-int16x8_t vrshlq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_s32)))
-int32x4_t vrshlq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_s32)))
-int32x4_t vrshlq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_s8)))
-int8x16_t vrshlq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_s8)))
-int8x16_t vrshlq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_u16)))
-uint16x8_t vrshlq_u16(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_u16)))
-uint16x8_t vrshlq(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_u32)))
-uint32x4_t vrshlq_u32(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_u32)))
-uint32x4_t vrshlq(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_u8)))
-uint8x16_t vrshlq_u8(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_u8)))
-uint8x16_t vrshlq(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_s16)))
-int16x8_t vrshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_s16)))
-int16x8_t vrshlq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_s32)))
-int32x4_t vrshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_s32)))
-int32x4_t vrshlq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_s8)))
-int8x16_t vrshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_s8)))
-int8x16_t vrshlq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_u16)))
-uint16x8_t vrshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_u16)))
-uint16x8_t vrshlq_x(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_u32)))
-uint32x4_t vrshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_u32)))
-uint32x4_t vrshlq_x(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_u8)))
-uint8x16_t vrshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_u8)))
-uint8x16_t vrshlq_x(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_s16)))
-int8x16_t vrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_s16)))
-int8x16_t vrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_s32)))
-int16x8_t vrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_s32)))
-int16x8_t vrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_u16)))
-uint8x16_t vrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_u16)))
-uint8x16_t vrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_u32)))
-uint16x8_t vrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_u32)))
-uint16x8_t vrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_s16)))
-int8x16_t vrshrnbq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_s16)))
-int8x16_t vrshrnbq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_s32)))
-int16x8_t vrshrnbq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_s32)))
-int16x8_t vrshrnbq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_u16)))
-uint8x16_t vrshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_u16)))
-uint8x16_t vrshrnbq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_u32)))
-uint16x8_t vrshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_u32)))
-uint16x8_t vrshrnbq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_s16)))
-int8x16_t vrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_s16)))
-int8x16_t vrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_s32)))
-int16x8_t vrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_s32)))
-int16x8_t vrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_u16)))
-uint8x16_t vrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_u16)))
-uint8x16_t vrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_u32)))
-uint16x8_t vrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_u32)))
-uint16x8_t vrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_s16)))
-int8x16_t vrshrntq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_s16)))
-int8x16_t vrshrntq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_s32)))
-int16x8_t vrshrntq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_s32)))
-int16x8_t vrshrntq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_u16)))
-uint8x16_t vrshrntq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_u16)))
-uint8x16_t vrshrntq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_u32)))
-uint16x8_t vrshrntq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_u32)))
-uint16x8_t vrshrntq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_s16)))
-int16x8_t vrshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_s16)))
-int16x8_t vrshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_s32)))
-int32x4_t vrshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_s32)))
-int32x4_t vrshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_s8)))
-int8x16_t vrshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_s8)))
-int8x16_t vrshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_u16)))
-uint16x8_t vrshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_u16)))
-uint16x8_t vrshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_u32)))
-uint32x4_t vrshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_u32)))
-uint32x4_t vrshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_u8)))
-uint8x16_t vrshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_u8)))
-uint8x16_t vrshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_s16)))
-int16x8_t vrshrq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_s16)))
-int16x8_t vrshrq(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_s32)))
-int32x4_t vrshrq_n_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_s32)))
-int32x4_t vrshrq(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_s8)))
-int8x16_t vrshrq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_s8)))
-int8x16_t vrshrq(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_u16)))
-uint16x8_t vrshrq_n_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_u16)))
-uint16x8_t vrshrq(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_u32)))
-uint32x4_t vrshrq_n_u32(uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_u32)))
-uint32x4_t vrshrq(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_u8)))
-uint8x16_t vrshrq_n_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_u8)))
-uint8x16_t vrshrq(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_s16)))
-int16x8_t vrshrq_x_n_s16(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_s16)))
-int16x8_t vrshrq_x(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_s32)))
-int32x4_t vrshrq_x_n_s32(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_s32)))
-int32x4_t vrshrq_x(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_s8)))
-int8x16_t vrshrq_x_n_s8(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_s8)))
-int8x16_t vrshrq_x(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_u16)))
-uint16x8_t vrshrq_x_n_u16(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_u16)))
-uint16x8_t vrshrq_x(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_u32)))
-uint32x4_t vrshrq_x_n_u32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_u32)))
-uint32x4_t vrshrq_x(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_u8)))
-uint8x16_t vrshrq_x_n_u8(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_u8)))
-uint8x16_t vrshrq_x(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s16)))
-int16x8_t vsetq_lane_s16(int16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s16)))
-int16x8_t vsetq_lane(int16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s32)))
-int32x4_t vsetq_lane_s32(int32_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s32)))
-int32x4_t vsetq_lane(int32_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s64)))
-int64x2_t vsetq_lane_s64(int64_t, int64x2_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s64)))
-int64x2_t vsetq_lane(int64_t, int64x2_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s8)))
-int8x16_t vsetq_lane_s8(int8_t, int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s8)))
-int8x16_t vsetq_lane(int8_t, int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u16)))
-uint16x8_t vsetq_lane_u16(uint16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u16)))
-uint16x8_t vsetq_lane(uint16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u32)))
-uint32x4_t vsetq_lane_u32(uint32_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u32)))
-uint32x4_t vsetq_lane(uint32_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u64)))
-uint64x2_t vsetq_lane_u64(uint64_t, uint64x2_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u64)))
-uint64x2_t vsetq_lane(uint64_t, uint64x2_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u8)))
-uint8x16_t vsetq_lane_u8(uint8_t, uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u8)))
-uint8x16_t vsetq_lane(uint8_t, uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_s16)))
-int32x4_t vshllbq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_s16)))
-int32x4_t vshllbq_m(int32x4_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_s8)))
-int16x8_t vshllbq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_s8)))
-int16x8_t vshllbq_m(int16x8_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_u16)))
-uint32x4_t vshllbq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_u16)))
-uint32x4_t vshllbq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_u8)))
-uint16x8_t vshllbq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_u8)))
-uint16x8_t vshllbq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_s16)))
-int32x4_t vshllbq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_s16)))
-int32x4_t vshllbq(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_s8)))
-int16x8_t vshllbq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_s8)))
-int16x8_t vshllbq(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_u16)))
-uint32x4_t vshllbq_n_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_u16)))
-uint32x4_t vshllbq(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_u8)))
-uint16x8_t vshllbq_n_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_u8)))
-uint16x8_t vshllbq(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_s16)))
-int32x4_t vshllbq_x_n_s16(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_s16)))
-int32x4_t vshllbq_x(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_s8)))
-int16x8_t vshllbq_x_n_s8(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_s8)))
-int16x8_t vshllbq_x(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_u16)))
-uint32x4_t vshllbq_x_n_u16(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_u16)))
-uint32x4_t vshllbq_x(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_u8)))
-uint16x8_t vshllbq_x_n_u8(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_u8)))
-uint16x8_t vshllbq_x(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_s16)))
-int32x4_t vshlltq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_s16)))
-int32x4_t vshlltq_m(int32x4_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_s8)))
-int16x8_t vshlltq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_s8)))
-int16x8_t vshlltq_m(int16x8_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_u16)))
-uint32x4_t vshlltq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_u16)))
-uint32x4_t vshlltq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_u8)))
-uint16x8_t vshlltq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_u8)))
-uint16x8_t vshlltq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_s16)))
-int32x4_t vshlltq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_s16)))
-int32x4_t vshlltq(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_s8)))
-int16x8_t vshlltq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_s8)))
-int16x8_t vshlltq(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_u16)))
-uint32x4_t vshlltq_n_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_u16)))
-uint32x4_t vshlltq(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_u8)))
-uint16x8_t vshlltq_n_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_u8)))
-uint16x8_t vshlltq(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_s16)))
-int32x4_t vshlltq_x_n_s16(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_s16)))
-int32x4_t vshlltq_x(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_s8)))
-int16x8_t vshlltq_x_n_s8(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_s8)))
-int16x8_t vshlltq_x(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_u16)))
-uint32x4_t vshlltq_x_n_u16(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_u16)))
-uint32x4_t vshlltq_x(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_u8)))
-uint16x8_t vshlltq_x_n_u8(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_u8)))
-uint16x8_t vshlltq_x(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_s16)))
-int16x8_t vshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_s16)))
-int16x8_t vshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_s32)))
-int32x4_t vshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_s32)))
-int32x4_t vshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_s8)))
-int8x16_t vshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_s8)))
-int8x16_t vshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_u16)))
-uint16x8_t vshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_u16)))
-uint16x8_t vshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_u32)))
-uint32x4_t vshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_u32)))
-uint32x4_t vshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_u8)))
-uint8x16_t vshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_u8)))
-uint8x16_t vshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_s16)))
-int16x8_t vshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_s16)))
-int16x8_t vshlq_m_r(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_s32)))
-int32x4_t vshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_s32)))
-int32x4_t vshlq_m_r(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_s8)))
-int8x16_t vshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_s8)))
-int8x16_t vshlq_m_r(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_u16)))
-uint16x8_t vshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_u16)))
-uint16x8_t vshlq_m_r(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_u32)))
-uint32x4_t vshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_u32)))
-uint32x4_t vshlq_m_r(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_u8)))
-uint8x16_t vshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_u8)))
-uint8x16_t vshlq_m_r(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_s16)))
-int16x8_t vshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_s16)))
-int16x8_t vshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_s32)))
-int32x4_t vshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_s32)))
-int32x4_t vshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_s8)))
-int8x16_t vshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_s8)))
-int8x16_t vshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_u16)))
-uint16x8_t vshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_u16)))
-uint16x8_t vshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_u32)))
-uint32x4_t vshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_u32)))
-uint32x4_t vshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_u8)))
-uint8x16_t vshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_u8)))
-uint8x16_t vshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_s16)))
-int16x8_t vshlq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_s16)))
-int16x8_t vshlq_n(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_s32)))
-int32x4_t vshlq_n_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_s32)))
-int32x4_t vshlq_n(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_s8)))
-int8x16_t vshlq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_s8)))
-int8x16_t vshlq_n(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_u16)))
-uint16x8_t vshlq_n_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_u16)))
-uint16x8_t vshlq_n(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_u32)))
-uint32x4_t vshlq_n_u32(uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_u32)))
-uint32x4_t vshlq_n(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_u8)))
-uint8x16_t vshlq_n_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_u8)))
-uint8x16_t vshlq_n(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_s16)))
-int16x8_t vshlq_r_s16(int16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_s16)))
-int16x8_t vshlq_r(int16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_s32)))
-int32x4_t vshlq_r_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_s32)))
-int32x4_t vshlq_r(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_s8)))
-int8x16_t vshlq_r_s8(int8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_s8)))
-int8x16_t vshlq_r(int8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_u16)))
-uint16x8_t vshlq_r_u16(uint16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_u16)))
-uint16x8_t vshlq_r(uint16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_u32)))
-uint32x4_t vshlq_r_u32(uint32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_u32)))
-uint32x4_t vshlq_r(uint32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_u8)))
-uint8x16_t vshlq_r_u8(uint8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_u8)))
-uint8x16_t vshlq_r(uint8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_s16)))
-int16x8_t vshlq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_s16)))
-int16x8_t vshlq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_s32)))
-int32x4_t vshlq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_s32)))
-int32x4_t vshlq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_s8)))
-int8x16_t vshlq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_s8)))
-int8x16_t vshlq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_u16)))
-uint16x8_t vshlq_u16(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_u16)))
-uint16x8_t vshlq(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_u32)))
-uint32x4_t vshlq_u32(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_u32)))
-uint32x4_t vshlq(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_u8)))
-uint8x16_t vshlq_u8(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_u8)))
-uint8x16_t vshlq(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_s16)))
-int16x8_t vshlq_x_n_s16(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_s16)))
-int16x8_t vshlq_x_n(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_s32)))
-int32x4_t vshlq_x_n_s32(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_s32)))
-int32x4_t vshlq_x_n(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_s8)))
-int8x16_t vshlq_x_n_s8(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_s8)))
-int8x16_t vshlq_x_n(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_u16)))
-uint16x8_t vshlq_x_n_u16(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_u16)))
-uint16x8_t vshlq_x_n(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_u32)))
-uint32x4_t vshlq_x_n_u32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_u32)))
-uint32x4_t vshlq_x_n(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_u8)))
-uint8x16_t vshlq_x_n_u8(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_u8)))
-uint8x16_t vshlq_x_n(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_s16)))
-int16x8_t vshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_s16)))
-int16x8_t vshlq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_s32)))
-int32x4_t vshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_s32)))
-int32x4_t vshlq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_s8)))
-int8x16_t vshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_s8)))
-int8x16_t vshlq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_u16)))
-uint16x8_t vshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_u16)))
-uint16x8_t vshlq_x(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_u32)))
-uint32x4_t vshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_u32)))
-uint32x4_t vshlq_x(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_u8)))
-uint8x16_t vshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_u8)))
-uint8x16_t vshlq_x(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_s16)))
-int8x16_t vshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_s16)))
-int8x16_t vshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_s32)))
-int16x8_t vshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_s32)))
-int16x8_t vshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_u16)))
-uint8x16_t vshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_u16)))
-uint8x16_t vshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_u32)))
-uint16x8_t vshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_u32)))
-uint16x8_t vshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_s16)))
-int8x16_t vshrnbq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_s16)))
-int8x16_t vshrnbq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_s32)))
-int16x8_t vshrnbq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_s32)))
-int16x8_t vshrnbq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_u16)))
-uint8x16_t vshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_u16)))
-uint8x16_t vshrnbq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_u32)))
-uint16x8_t vshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_u32)))
-uint16x8_t vshrnbq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_s16)))
-int8x16_t vshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_s16)))
-int8x16_t vshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_s32)))
-int16x8_t vshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_s32)))
-int16x8_t vshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_u16)))
-uint8x16_t vshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_u16)))
-uint8x16_t vshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_u32)))
-uint16x8_t vshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_u32)))
-uint16x8_t vshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_s16)))
-int8x16_t vshrntq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_s16)))
-int8x16_t vshrntq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_s32)))
-int16x8_t vshrntq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_s32)))
-int16x8_t vshrntq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_u16)))
-uint8x16_t vshrntq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_u16)))
-uint8x16_t vshrntq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_u32)))
-uint16x8_t vshrntq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_u32)))
-uint16x8_t vshrntq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_s16)))
-int16x8_t vshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_s16)))
-int16x8_t vshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_s32)))
-int32x4_t vshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_s32)))
-int32x4_t vshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_s8)))
-int8x16_t vshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_s8)))
-int8x16_t vshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_u16)))
-uint16x8_t vshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_u16)))
-uint16x8_t vshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_u32)))
-uint32x4_t vshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_u32)))
-uint32x4_t vshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_u8)))
-uint8x16_t vshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_u8)))
-uint8x16_t vshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_s16)))
-int16x8_t vshrq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_s16)))
-int16x8_t vshrq(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_s32)))
-int32x4_t vshrq_n_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_s32)))
-int32x4_t vshrq(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_s8)))
-int8x16_t vshrq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_s8)))
-int8x16_t vshrq(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_u16)))
-uint16x8_t vshrq_n_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_u16)))
-uint16x8_t vshrq(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_u32)))
-uint32x4_t vshrq_n_u32(uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_u32)))
-uint32x4_t vshrq(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_u8)))
-uint8x16_t vshrq_n_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_u8)))
-uint8x16_t vshrq(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_s16)))
-int16x8_t vshrq_x_n_s16(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_s16)))
-int16x8_t vshrq_x(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_s32)))
-int32x4_t vshrq_x_n_s32(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_s32)))
-int32x4_t vshrq_x(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_s8)))
-int8x16_t vshrq_x_n_s8(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_s8)))
-int8x16_t vshrq_x(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_u16)))
-uint16x8_t vshrq_x_n_u16(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_u16)))
-uint16x8_t vshrq_x(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_u32)))
-uint32x4_t vshrq_x_n_u32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_u32)))
-uint32x4_t vshrq_x(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_u8)))
-uint8x16_t vshrq_x_n_u8(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_u8)))
-uint8x16_t vshrq_x(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_s16)))
-int16x8_t vsliq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_s16)))
-int16x8_t vsliq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_s32)))
-int32x4_t vsliq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_s32)))
-int32x4_t vsliq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_s8)))
-int8x16_t vsliq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_s8)))
-int8x16_t vsliq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_u16)))
-uint16x8_t vsliq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_u16)))
-uint16x8_t vsliq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_u32)))
-uint32x4_t vsliq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_u32)))
-uint32x4_t vsliq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_u8)))
-uint8x16_t vsliq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_u8)))
-uint8x16_t vsliq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_s16)))
-int16x8_t vsliq_n_s16(int16x8_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_s16)))
-int16x8_t vsliq(int16x8_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_s32)))
-int32x4_t vsliq_n_s32(int32x4_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_s32)))
-int32x4_t vsliq(int32x4_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_s8)))
-int8x16_t vsliq_n_s8(int8x16_t, int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_s8)))
-int8x16_t vsliq(int8x16_t, int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_u16)))
-uint16x8_t vsliq_n_u16(uint16x8_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_u16)))
-uint16x8_t vsliq(uint16x8_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_u32)))
-uint32x4_t vsliq_n_u32(uint32x4_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_u32)))
-uint32x4_t vsliq(uint32x4_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_u8)))
-uint8x16_t vsliq_n_u8(uint8x16_t, uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_u8)))
-uint8x16_t vsliq(uint8x16_t, uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_s16)))
-int16x8_t vsriq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_s16)))
-int16x8_t vsriq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_s32)))
-int32x4_t vsriq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_s32)))
-int32x4_t vsriq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_s8)))
-int8x16_t vsriq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_s8)))
-int8x16_t vsriq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_u16)))
-uint16x8_t vsriq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_u16)))
-uint16x8_t vsriq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_u32)))
-uint32x4_t vsriq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_u32)))
-uint32x4_t vsriq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_u8)))
-uint8x16_t vsriq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_u8)))
-uint8x16_t vsriq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_s16)))
-int16x8_t vsriq_n_s16(int16x8_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_s16)))
-int16x8_t vsriq(int16x8_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_s32)))
-int32x4_t vsriq_n_s32(int32x4_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_s32)))
-int32x4_t vsriq(int32x4_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_s8)))
-int8x16_t vsriq_n_s8(int8x16_t, int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_s8)))
-int8x16_t vsriq(int8x16_t, int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_u16)))
-uint16x8_t vsriq_n_u16(uint16x8_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_u16)))
-uint16x8_t vsriq(uint16x8_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_u32)))
-uint32x4_t vsriq_n_u32(uint32x4_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_u32)))
-uint32x4_t vsriq(uint32x4_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_u8)))
-uint8x16_t vsriq_n_u8(uint8x16_t, uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_u8)))
-uint8x16_t vsriq(uint8x16_t, uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s16)))
-void vst1q_p_s16(int16_t *, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s16)))
-void vst1q_p(int16_t *, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s32)))
-void vst1q_p_s32(int32_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s32)))
-void vst1q_p(int32_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s8)))
-void vst1q_p_s8(int8_t *, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s8)))
-void vst1q_p(int8_t *, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u16)))
-void vst1q_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u16)))
-void vst1q_p(uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u32)))
-void vst1q_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u32)))
-void vst1q_p(uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u8)))
-void vst1q_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u8)))
-void vst1q_p(uint8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_s16)))
-void vst1q_s16(int16_t *, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_s16)))
-void vst1q(int16_t *, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_s32)))
-void vst1q_s32(int32_t *, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_s32)))
-void vst1q(int32_t *, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_s8)))
-void vst1q_s8(int8_t *, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_s8)))
-void vst1q(int8_t *, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_u16)))
-void vst1q_u16(uint16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_u16)))
-void vst1q(uint16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_u32)))
-void vst1q_u32(uint32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_u32)))
-void vst1q(uint32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_u8)))
-void vst1q_u8(uint8_t *, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_u8)))
-void vst1q(uint8_t *, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_s16)))
-void vst2q_s16(int16_t *, int16x8x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_s16)))
-void vst2q(int16_t *, int16x8x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_s32)))
-void vst2q_s32(int32_t *, int32x4x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_s32)))
-void vst2q(int32_t *, int32x4x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_s8)))
-void vst2q_s8(int8_t *, int8x16x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_s8)))
-void vst2q(int8_t *, int8x16x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_u16)))
-void vst2q_u16(uint16_t *, uint16x8x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_u16)))
-void vst2q(uint16_t *, uint16x8x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_u32)))
-void vst2q_u32(uint32_t *, uint32x4x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_u32)))
-void vst2q(uint32_t *, uint32x4x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_u8)))
-void vst2q_u8(uint8_t *, uint8x16x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_u8)))
-void vst2q(uint8_t *, uint8x16x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_s16)))
-void vst4q_s16(int16_t *, int16x8x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_s16)))
-void vst4q(int16_t *, int16x8x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_s32)))
-void vst4q_s32(int32_t *, int32x4x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_s32)))
-void vst4q(int32_t *, int32x4x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_s8)))
-void vst4q_s8(int8_t *, int8x16x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_s8)))
-void vst4q(int8_t *, int8x16x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_u16)))
-void vst4q_u16(uint16_t *, uint16x8x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_u16)))
-void vst4q(uint16_t *, uint16x8x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_u32)))
-void vst4q_u32(uint32_t *, uint32x4x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_u32)))
-void vst4q(uint32_t *, uint32x4x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_u8)))
-void vst4q_u8(uint8_t *, uint8x16x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_u8)))
-void vst4q(uint8_t *, uint8x16x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s16)))
-void vstrbq_p_s16(int8_t *, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s16)))
-void vstrbq_p(int8_t *, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s32)))
-void vstrbq_p_s32(int8_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s32)))
-void vstrbq_p(int8_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s8)))
-void vstrbq_p_s8(int8_t *, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s8)))
-void vstrbq_p(int8_t *, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u16)))
-void vstrbq_p_u16(uint8_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u16)))
-void vstrbq_p(uint8_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u32)))
-void vstrbq_p_u32(uint8_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u32)))
-void vstrbq_p(uint8_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u8)))
-void vstrbq_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u8)))
-void vstrbq_p(uint8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s16)))
-void vstrbq_s16(int8_t *, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s16)))
-void vstrbq(int8_t *, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s32)))
-void vstrbq_s32(int8_t *, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s32)))
-void vstrbq(int8_t *, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s8)))
-void vstrbq_s8(int8_t *, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s8)))
-void vstrbq(int8_t *, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))
-void vstrbq_scatter_offset_p_s16(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))
-void vstrbq_scatter_offset_p(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))
-void vstrbq_scatter_offset_p_s32(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))
-void vstrbq_scatter_offset_p(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))
-void vstrbq_scatter_offset_p_s8(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))
-void vstrbq_scatter_offset_p(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))
-void vstrbq_scatter_offset_p_u16(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))
-void vstrbq_scatter_offset_p(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))
-void vstrbq_scatter_offset_p_u32(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))
-void vstrbq_scatter_offset_p(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))
-void vstrbq_scatter_offset_p_u8(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))
-void vstrbq_scatter_offset_p(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))
-void vstrbq_scatter_offset_s16(int8_t *, uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))
-void vstrbq_scatter_offset(int8_t *, uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))
-void vstrbq_scatter_offset_s32(int8_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))
-void vstrbq_scatter_offset(int8_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))
-void vstrbq_scatter_offset_s8(int8_t *, uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))
-void vstrbq_scatter_offset(int8_t *, uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))
-void vstrbq_scatter_offset_u16(uint8_t *, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))
-void vstrbq_scatter_offset(uint8_t *, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))
-void vstrbq_scatter_offset_u32(uint8_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))
-void vstrbq_scatter_offset(uint8_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))
-void vstrbq_scatter_offset_u8(uint8_t *, uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))
-void vstrbq_scatter_offset(uint8_t *, uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u16)))
-void vstrbq_u16(uint8_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u16)))
-void vstrbq(uint8_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u32)))
-void vstrbq_u32(uint8_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u32)))
-void vstrbq(uint8_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u8)))
-void vstrbq_u8(uint8_t *, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u8)))
-void vstrbq(uint8_t *, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))
-void vstrdq_scatter_base_p_s64(uint64x2_t, int, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))
-void vstrdq_scatter_base_p(uint64x2_t, int, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))
-void vstrdq_scatter_base_p_u64(uint64x2_t, int, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))
-void vstrdq_scatter_base_p(uint64x2_t, int, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))
-void vstrdq_scatter_base_s64(uint64x2_t, int, int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))
-void vstrdq_scatter_base(uint64x2_t, int, int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))
-void vstrdq_scatter_base_u64(uint64x2_t, int, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))
-void vstrdq_scatter_base(uint64x2_t, int, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))
-void vstrdq_scatter_base_wb_p_s64(uint64x2_t *, int, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))
-void vstrdq_scatter_base_wb_p(uint64x2_t *, int, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))
-void vstrdq_scatter_base_wb_p_u64(uint64x2_t *, int, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))
-void vstrdq_scatter_base_wb_p(uint64x2_t *, int, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))
-void vstrdq_scatter_base_wb_s64(uint64x2_t *, int, int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))
-void vstrdq_scatter_base_wb(uint64x2_t *, int, int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))
-void vstrdq_scatter_base_wb_u64(uint64x2_t *, int, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))
-void vstrdq_scatter_base_wb(uint64x2_t *, int, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))
-void vstrdq_scatter_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))
-void vstrdq_scatter_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))
-void vstrdq_scatter_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))
-void vstrdq_scatter_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))
-void vstrdq_scatter_offset_s64(int64_t *, uint64x2_t, int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))
-void vstrdq_scatter_offset(int64_t *, uint64x2_t, int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))
-void vstrdq_scatter_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))
-void vstrdq_scatter_offset(uint64_t *, uint64x2_t, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))
-void vstrdq_scatter_shifted_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))
-void vstrdq_scatter_shifted_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))
-void vstrdq_scatter_shifted_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))
-void vstrdq_scatter_shifted_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))
-void vstrdq_scatter_shifted_offset_s64(int64_t *, uint64x2_t, int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))
-void vstrdq_scatter_shifted_offset(int64_t *, uint64x2_t, int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))
-void vstrdq_scatter_shifted_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))
-void vstrdq_scatter_shifted_offset(uint64_t *, uint64x2_t, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s16)))
-void vstrhq_p_s16(int16_t *, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s16)))
-void vstrhq_p(int16_t *, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s32)))
-void vstrhq_p_s32(int16_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s32)))
-void vstrhq_p(int16_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u16)))
-void vstrhq_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u16)))
-void vstrhq_p(uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u32)))
-void vstrhq_p_u32(uint16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u32)))
-void vstrhq_p(uint16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s16)))
-void vstrhq_s16(int16_t *, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s16)))
-void vstrhq(int16_t *, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s32)))
-void vstrhq_s32(int16_t *, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s32)))
-void vstrhq(int16_t *, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))
-void vstrhq_scatter_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))
-void vstrhq_scatter_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))
-void vstrhq_scatter_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))
-void vstrhq_scatter_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))
-void vstrhq_scatter_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))
-void vstrhq_scatter_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))
-void vstrhq_scatter_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))
-void vstrhq_scatter_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))
-void vstrhq_scatter_offset_s16(int16_t *, uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))
-void vstrhq_scatter_offset(int16_t *, uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))
-void vstrhq_scatter_offset_s32(int16_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))
-void vstrhq_scatter_offset(int16_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))
-void vstrhq_scatter_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))
-void vstrhq_scatter_offset(uint16_t *, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))
-void vstrhq_scatter_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))
-void vstrhq_scatter_offset(uint16_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))
-void vstrhq_scatter_shifted_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))
-void vstrhq_scatter_shifted_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))
-void vstrhq_scatter_shifted_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))
-void vstrhq_scatter_shifted_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))
-void vstrhq_scatter_shifted_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))
-void vstrhq_scatter_shifted_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))
-void vstrhq_scatter_shifted_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))
-void vstrhq_scatter_shifted_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))
-void vstrhq_scatter_shifted_offset_s16(int16_t *, uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))
-void vstrhq_scatter_shifted_offset(int16_t *, uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))
-void vstrhq_scatter_shifted_offset_s32(int16_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))
-void vstrhq_scatter_shifted_offset(int16_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))
-void vstrhq_scatter_shifted_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))
-void vstrhq_scatter_shifted_offset(uint16_t *, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))
-void vstrhq_scatter_shifted_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))
-void vstrhq_scatter_shifted_offset(uint16_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u16)))
-void vstrhq_u16(uint16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u16)))
-void vstrhq(uint16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u32)))
-void vstrhq_u32(uint16_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u32)))
-void vstrhq(uint16_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_s32)))
-void vstrwq_p_s32(int32_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_s32)))
-void vstrwq_p(int32_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_u32)))
-void vstrwq_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_u32)))
-void vstrwq_p(uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_s32)))
-void vstrwq_s32(int32_t *, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_s32)))
-void vstrwq(int32_t *, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))
-void vstrwq_scatter_base_p_s32(uint32x4_t, int, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))
-void vstrwq_scatter_base_p(uint32x4_t, int, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))
-void vstrwq_scatter_base_p_u32(uint32x4_t, int, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))
-void vstrwq_scatter_base_p(uint32x4_t, int, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))
-void vstrwq_scatter_base_s32(uint32x4_t, int, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))
-void vstrwq_scatter_base(uint32x4_t, int, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))
-void vstrwq_scatter_base_u32(uint32x4_t, int, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))
-void vstrwq_scatter_base(uint32x4_t, int, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))
-void vstrwq_scatter_base_wb_p_s32(uint32x4_t *, int, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))
-void vstrwq_scatter_base_wb_p(uint32x4_t *, int, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))
-void vstrwq_scatter_base_wb_p_u32(uint32x4_t *, int, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))
-void vstrwq_scatter_base_wb_p(uint32x4_t *, int, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))
-void vstrwq_scatter_base_wb_s32(uint32x4_t *, int, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))
-void vstrwq_scatter_base_wb(uint32x4_t *, int, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))
-void vstrwq_scatter_base_wb_u32(uint32x4_t *, int, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))
-void vstrwq_scatter_base_wb(uint32x4_t *, int, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))
-void vstrwq_scatter_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))
-void vstrwq_scatter_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))
-void vstrwq_scatter_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))
-void vstrwq_scatter_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))
-void vstrwq_scatter_offset_s32(int32_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))
-void vstrwq_scatter_offset(int32_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))
-void vstrwq_scatter_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))
-void vstrwq_scatter_offset(uint32_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))
-void vstrwq_scatter_shifted_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))
-void vstrwq_scatter_shifted_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))
-void vstrwq_scatter_shifted_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))
-void vstrwq_scatter_shifted_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))
-void vstrwq_scatter_shifted_offset_s32(int32_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))
-void vstrwq_scatter_shifted_offset(int32_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))
-void vstrwq_scatter_shifted_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))
-void vstrwq_scatter_shifted_offset(uint32_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_u32)))
-void vstrwq_u32(uint32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_u32)))
-void vstrwq(uint32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s16)))
-int16x8_t vsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s16)))
-int16x8_t vsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s32)))
-int32x4_t vsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s32)))
-int32x4_t vsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s8)))
-int8x16_t vsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s8)))
-int8x16_t vsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u16)))
-uint16x8_t vsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u16)))
-uint16x8_t vsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u32)))
-uint32x4_t vsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u32)))
-uint32x4_t vsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u8)))
-uint8x16_t vsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u8)))
-uint8x16_t vsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_s16)))
-int16x8_t vsubq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_s16)))
-int16x8_t vsubq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_s32)))
-int32x4_t vsubq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_s32)))
-int32x4_t vsubq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_s8)))
-int8x16_t vsubq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_s8)))
-int8x16_t vsubq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_u16)))
-uint16x8_t vsubq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_u16)))
-uint16x8_t vsubq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_u32)))
-uint32x4_t vsubq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_u32)))
-uint32x4_t vsubq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_u8)))
-uint8x16_t vsubq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_u8)))
-uint8x16_t vsubq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_s16)))
-int16x8_t vsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_s16)))
-int16x8_t vsubq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_s32)))
-int32x4_t vsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_s32)))
-int32x4_t vsubq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_s8)))
-int8x16_t vsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_s8)))
-int8x16_t vsubq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_u16)))
-uint16x8_t vsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_u16)))
-uint16x8_t vsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_u32)))
-uint32x4_t vsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_u32)))
-uint32x4_t vsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_u8)))
-uint8x16_t vsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_u8)))
-uint8x16_t vsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s16)))
-int16x8_t vuninitializedq(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s32)))
-int32x4_t vuninitializedq(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s64)))
-int64x2_t vuninitializedq(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s8)))
-int8x16_t vuninitializedq(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u16)))
-uint16x8_t vuninitializedq(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u32)))
-uint32x4_t vuninitializedq(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u64)))
-uint64x2_t vuninitializedq(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u8)))
-uint8x16_t vuninitializedq(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s16)))
-int16x8_t vuninitializedq_s16();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s32)))
-int32x4_t vuninitializedq_s32();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s64)))
-int64x2_t vuninitializedq_s64();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s8)))
-int8x16_t vuninitializedq_s8();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u16)))
-uint16x8_t vuninitializedq_u16();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u32)))
-uint32x4_t vuninitializedq_u32();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u64)))
-uint64x2_t vuninitializedq_u64();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u8)))
-uint8x16_t vuninitializedq_u8();
-
-#endif /* (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE) */
-
-#if (__ARM_FEATURE_MVE & 2) && (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE)
-
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_f16)))
-float16x8_t vabdq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_f16)))
-float16x8_t vabdq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_f32)))
-float32x4_t vabdq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_f32)))
-float32x4_t vabdq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_f16)))
-float16x8_t vabdq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_f16)))
-float16x8_t vabdq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_f32)))
-float32x4_t vabdq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_f32)))
-float32x4_t vabdq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_f16)))
-float16x8_t vabdq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_f16)))
-float16x8_t vabdq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_f32)))
-float32x4_t vabdq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_f32)))
-float32x4_t vabdq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_f16)))
-float16x8_t vaddq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_f16)))
-float16x8_t vaddq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_f32)))
-float32x4_t vaddq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_f32)))
-float32x4_t vaddq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f16)))
-float16x8_t vaddq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f16)))
-float16x8_t vaddq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f32)))
-float32x4_t vaddq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f32)))
-float32x4_t vaddq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_f16)))
-float16x8_t vaddq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_f16)))
-float16x8_t vaddq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_f32)))
-float32x4_t vaddq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_f32)))
-float32x4_t vaddq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_f16)))
-float16x8_t vandq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_f16)))
-float16x8_t vandq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_f32)))
-float32x4_t vandq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_f32)))
-float32x4_t vandq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_f16)))
-float16x8_t vandq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_f16)))
-float16x8_t vandq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_f32)))
-float32x4_t vandq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_f32)))
-float32x4_t vandq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_f16)))
-float16x8_t vandq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_f16)))
-float16x8_t vandq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_f32)))
-float32x4_t vandq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_f32)))
-float32x4_t vandq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_f16)))
-float16x8_t vbicq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_f16)))
-float16x8_t vbicq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_f32)))
-float32x4_t vbicq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_f32)))
-float32x4_t vbicq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_f16)))
-float16x8_t vbicq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_f16)))
-float16x8_t vbicq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_f32)))
-float32x4_t vbicq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_f32)))
-float32x4_t vbicq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_f16)))
-float16x8_t vbicq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_f16)))
-float16x8_t vbicq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_f32)))
-float32x4_t vbicq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_f32)))
-float32x4_t vbicq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_f16)))
-float16x8_t vcaddq_rot270_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_f16)))
-float16x8_t vcaddq_rot270(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_f32)))
-float32x4_t vcaddq_rot270_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_f32)))
-float32x4_t vcaddq_rot270(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_f16)))
-float16x8_t vcaddq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_f16)))
-float16x8_t vcaddq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_f32)))
-float32x4_t vcaddq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_f32)))
-float32x4_t vcaddq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_f16)))
-float16x8_t vcaddq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_f16)))
-float16x8_t vcaddq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_f32)))
-float32x4_t vcaddq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_f32)))
-float32x4_t vcaddq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_f16)))
-float16x8_t vcaddq_rot90_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_f16)))
-float16x8_t vcaddq_rot90(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_f32)))
-float32x4_t vcaddq_rot90_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_f32)))
-float32x4_t vcaddq_rot90(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_f16)))
-float16x8_t vcaddq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_f16)))
-float16x8_t vcaddq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_f32)))
-float32x4_t vcaddq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_f32)))
-float32x4_t vcaddq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_f16)))
-float16x8_t vcaddq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_f16)))
-float16x8_t vcaddq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_f32)))
-float32x4_t vcaddq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_f32)))
-float32x4_t vcaddq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_f16)))
-float16x8_t vcmlaq_f16(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_f16)))
-float16x8_t vcmlaq(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_f32)))
-float32x4_t vcmlaq_f32(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_f32)))
-float32x4_t vcmlaq(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_m_f16)))
-float16x8_t vcmlaq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_m_f16)))
-float16x8_t vcmlaq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_m_f32)))
-float32x4_t vcmlaq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_m_f32)))
-float32x4_t vcmlaq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_f16)))
-float16x8_t vcmlaq_rot180_f16(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_f16)))
-float16x8_t vcmlaq_rot180(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_f32)))
-float32x4_t vcmlaq_rot180_f32(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_f32)))
-float32x4_t vcmlaq_rot180(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16)))
-float16x8_t vcmlaq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16)))
-float16x8_t vcmlaq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32)))
-float32x4_t vcmlaq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32)))
-float32x4_t vcmlaq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_f16)))
-float16x8_t vcmlaq_rot270_f16(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_f16)))
-float16x8_t vcmlaq_rot270(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_f32)))
-float32x4_t vcmlaq_rot270_f32(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_f32)))
-float32x4_t vcmlaq_rot270(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16)))
-float16x8_t vcmlaq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16)))
-float16x8_t vcmlaq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32)))
-float32x4_t vcmlaq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32)))
-float32x4_t vcmlaq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_f16)))
-float16x8_t vcmlaq_rot90_f16(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_f16)))
-float16x8_t vcmlaq_rot90(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_f32)))
-float32x4_t vcmlaq_rot90_f32(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_f32)))
-float32x4_t vcmlaq_rot90(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16)))
-float16x8_t vcmlaq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16)))
-float16x8_t vcmlaq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32)))
-float32x4_t vcmlaq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32)))
-float32x4_t vcmlaq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f16)))
-mve_pred16_t vcmpeqq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f16)))
-mve_pred16_t vcmpeqq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f32)))
-mve_pred16_t vcmpeqq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f32)))
-mve_pred16_t vcmpeqq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f16)))
-mve_pred16_t vcmpeqq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f16)))
-mve_pred16_t vcmpeqq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f32)))
-mve_pred16_t vcmpeqq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f32)))
-mve_pred16_t vcmpeqq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))
-mve_pred16_t vcmpeqq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))
-mve_pred16_t vcmpeqq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))
-mve_pred16_t vcmpeqq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))
-mve_pred16_t vcmpeqq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f16)))
-mve_pred16_t vcmpeqq_n_f16(float16x8_t, float16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f16)))
-mve_pred16_t vcmpeqq(float16x8_t, float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f32)))
-mve_pred16_t vcmpeqq_n_f32(float32x4_t, float32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f32)))
-mve_pred16_t vcmpeqq(float32x4_t, float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f16)))
-mve_pred16_t vcmpgeq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f16)))
-mve_pred16_t vcmpgeq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f32)))
-mve_pred16_t vcmpgeq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f32)))
-mve_pred16_t vcmpgeq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f16)))
-mve_pred16_t vcmpgeq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f16)))
-mve_pred16_t vcmpgeq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f32)))
-mve_pred16_t vcmpgeq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f32)))
-mve_pred16_t vcmpgeq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))
-mve_pred16_t vcmpgeq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))
-mve_pred16_t vcmpgeq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))
-mve_pred16_t vcmpgeq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))
-mve_pred16_t vcmpgeq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f16)))
-mve_pred16_t vcmpgeq_n_f16(float16x8_t, float16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f16)))
-mve_pred16_t vcmpgeq(float16x8_t, float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f32)))
-mve_pred16_t vcmpgeq_n_f32(float32x4_t, float32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f32)))
-mve_pred16_t vcmpgeq(float32x4_t, float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f16)))
-mve_pred16_t vcmpgtq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f16)))
-mve_pred16_t vcmpgtq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f32)))
-mve_pred16_t vcmpgtq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f32)))
-mve_pred16_t vcmpgtq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f16)))
-mve_pred16_t vcmpgtq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f16)))
-mve_pred16_t vcmpgtq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f32)))
-mve_pred16_t vcmpgtq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f32)))
-mve_pred16_t vcmpgtq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))
-mve_pred16_t vcmpgtq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))
-mve_pred16_t vcmpgtq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))
-mve_pred16_t vcmpgtq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))
-mve_pred16_t vcmpgtq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f16)))
-mve_pred16_t vcmpgtq_n_f16(float16x8_t, float16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f16)))
-mve_pred16_t vcmpgtq(float16x8_t, float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f32)))
-mve_pred16_t vcmpgtq_n_f32(float32x4_t, float32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f32)))
-mve_pred16_t vcmpgtq(float32x4_t, float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f16)))
-mve_pred16_t vcmpleq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f16)))
-mve_pred16_t vcmpleq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f32)))
-mve_pred16_t vcmpleq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f32)))
-mve_pred16_t vcmpleq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f16)))
-mve_pred16_t vcmpleq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f16)))
-mve_pred16_t vcmpleq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f32)))
-mve_pred16_t vcmpleq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f32)))
-mve_pred16_t vcmpleq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))
-mve_pred16_t vcmpleq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))
-mve_pred16_t vcmpleq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))
-mve_pred16_t vcmpleq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))
-mve_pred16_t vcmpleq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f16)))
-mve_pred16_t vcmpleq_n_f16(float16x8_t, float16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f16)))
-mve_pred16_t vcmpleq(float16x8_t, float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f32)))
-mve_pred16_t vcmpleq_n_f32(float32x4_t, float32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f32)))
-mve_pred16_t vcmpleq(float32x4_t, float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f16)))
-mve_pred16_t vcmpltq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f16)))
-mve_pred16_t vcmpltq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f32)))
-mve_pred16_t vcmpltq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f32)))
-mve_pred16_t vcmpltq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f16)))
-mve_pred16_t vcmpltq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f16)))
-mve_pred16_t vcmpltq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f32)))
-mve_pred16_t vcmpltq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f32)))
-mve_pred16_t vcmpltq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))
-mve_pred16_t vcmpltq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))
-mve_pred16_t vcmpltq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))
-mve_pred16_t vcmpltq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))
-mve_pred16_t vcmpltq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f16)))
-mve_pred16_t vcmpltq_n_f16(float16x8_t, float16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f16)))
-mve_pred16_t vcmpltq(float16x8_t, float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f32)))
-mve_pred16_t vcmpltq_n_f32(float32x4_t, float32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f32)))
-mve_pred16_t vcmpltq(float32x4_t, float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f16)))
-mve_pred16_t vcmpneq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f16)))
-mve_pred16_t vcmpneq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f32)))
-mve_pred16_t vcmpneq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f32)))
-mve_pred16_t vcmpneq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f16)))
-mve_pred16_t vcmpneq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f16)))
-mve_pred16_t vcmpneq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f32)))
-mve_pred16_t vcmpneq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f32)))
-mve_pred16_t vcmpneq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))
-mve_pred16_t vcmpneq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))
-mve_pred16_t vcmpneq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))
-mve_pred16_t vcmpneq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))
-mve_pred16_t vcmpneq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f16)))
-mve_pred16_t vcmpneq_n_f16(float16x8_t, float16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f16)))
-mve_pred16_t vcmpneq(float16x8_t, float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f32)))
-mve_pred16_t vcmpneq_n_f32(float32x4_t, float32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f32)))
-mve_pred16_t vcmpneq(float32x4_t, float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_f16)))
-float16x8_t vcmulq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_f16)))
-float16x8_t vcmulq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_f32)))
-float32x4_t vcmulq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_f32)))
-float32x4_t vcmulq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_m_f16)))
-float16x8_t vcmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_m_f16)))
-float16x8_t vcmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_m_f32)))
-float32x4_t vcmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_m_f32)))
-float32x4_t vcmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_f16)))
-float16x8_t vcmulq_rot180_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_f16)))
-float16x8_t vcmulq_rot180(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_f32)))
-float32x4_t vcmulq_rot180_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_f32)))
-float32x4_t vcmulq_rot180(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_m_f16)))
-float16x8_t vcmulq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_m_f16)))
-float16x8_t vcmulq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_m_f32)))
-float32x4_t vcmulq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_m_f32)))
-float32x4_t vcmulq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_x_f16)))
-float16x8_t vcmulq_rot180_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_x_f16)))
-float16x8_t vcmulq_rot180_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_x_f32)))
-float32x4_t vcmulq_rot180_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_x_f32)))
-float32x4_t vcmulq_rot180_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_f16)))
-float16x8_t vcmulq_rot270_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_f16)))
-float16x8_t vcmulq_rot270(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_f32)))
-float32x4_t vcmulq_rot270_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_f32)))
-float32x4_t vcmulq_rot270(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_m_f16)))
-float16x8_t vcmulq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_m_f16)))
-float16x8_t vcmulq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_m_f32)))
-float32x4_t vcmulq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_m_f32)))
-float32x4_t vcmulq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_x_f16)))
-float16x8_t vcmulq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_x_f16)))
-float16x8_t vcmulq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_x_f32)))
-float32x4_t vcmulq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_x_f32)))
-float32x4_t vcmulq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_f16)))
-float16x8_t vcmulq_rot90_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_f16)))
-float16x8_t vcmulq_rot90(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_f32)))
-float32x4_t vcmulq_rot90_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_f32)))
-float32x4_t vcmulq_rot90(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_m_f16)))
-float16x8_t vcmulq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_m_f16)))
-float16x8_t vcmulq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_m_f32)))
-float32x4_t vcmulq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_m_f32)))
-float32x4_t vcmulq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_x_f16)))
-float16x8_t vcmulq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_x_f16)))
-float16x8_t vcmulq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_x_f32)))
-float32x4_t vcmulq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_x_f32)))
-float32x4_t vcmulq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_x_f16)))
-float16x8_t vcmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_x_f16)))
-float16x8_t vcmulq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_x_f32)))
-float32x4_t vcmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_x_f32)))
-float32x4_t vcmulq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_f16)))
-float16x8_t vcreateq_f16(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_f32)))
-float32x4_t vcreateq_f32(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtbq_f16_f32)))
-float16x8_t vcvtbq_f16_f32(float16x8_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtbq_m_f16_f32)))
-float16x8_t vcvtbq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16)))
-float16x8_t vcvtq_m_n_f16_s16(float16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16)))
-float16x8_t vcvtq_m_n(float16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16)))
-float16x8_t vcvtq_m_n_f16_u16(float16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16)))
-float16x8_t vcvtq_m_n(float16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32)))
-float32x4_t vcvtq_m_n_f32_s32(float32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32)))
-float32x4_t vcvtq_m_n(float32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32)))
-float32x4_t vcvtq_m_n_f32_u32(float32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32)))
-float32x4_t vcvtq_m_n(float32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16)))
-int16x8_t vcvtq_m_n_s16_f16(int16x8_t, float16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16)))
-int16x8_t vcvtq_m_n(int16x8_t, float16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32)))
-int32x4_t vcvtq_m_n_s32_f32(int32x4_t, float32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32)))
-int32x4_t vcvtq_m_n(int32x4_t, float32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16)))
-uint16x8_t vcvtq_m_n_u16_f16(uint16x8_t, float16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16)))
-uint16x8_t vcvtq_m_n(uint16x8_t, float16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32)))
-uint32x4_t vcvtq_m_n_u32_f32(uint32x4_t, float32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32)))
-uint32x4_t vcvtq_m_n(uint32x4_t, float32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f16_s16)))
-float16x8_t vcvtq_n_f16_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f16_s16)))
-float16x8_t vcvtq_n(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f16_u16)))
-float16x8_t vcvtq_n_f16_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f16_u16)))
-float16x8_t vcvtq_n(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f32_s32)))
-float32x4_t vcvtq_n_f32_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f32_s32)))
-float32x4_t vcvtq_n(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f32_u32)))
-float32x4_t vcvtq_n_f32_u32(uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f32_u32)))
-float32x4_t vcvtq_n(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_s16_f16)))
-int16x8_t vcvtq_n_s16_f16(float16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_s32_f32)))
-int32x4_t vcvtq_n_s32_f32(float32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_u16_f16)))
-uint16x8_t vcvtq_n_u16_f16(float16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_u32_f32)))
-uint32x4_t vcvtq_n_u32_f32(float32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16)))
-float16x8_t vcvtq_x_n_f16_s16(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16)))
-float16x8_t vcvtq_x_n(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16)))
-float16x8_t vcvtq_x_n_f16_u16(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16)))
-float16x8_t vcvtq_x_n(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32)))
-float32x4_t vcvtq_x_n_f32_s32(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32)))
-float32x4_t vcvtq_x_n(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32)))
-float32x4_t vcvtq_x_n_f32_u32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32)))
-float32x4_t vcvtq_x_n(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_s16_f16)))
-int16x8_t vcvtq_x_n_s16_f16(float16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_s32_f32)))
-int32x4_t vcvtq_x_n_s32_f32(float32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_u16_f16)))
-uint16x8_t vcvtq_x_n_u16_f16(float16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_u32_f32)))
-uint32x4_t vcvtq_x_n_u32_f32(float32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvttq_f16_f32)))
-float16x8_t vcvttq_f16_f32(float16x8_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvttq_m_f16_f32)))
-float16x8_t vcvttq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_f16)))
-float16x8_t vdupq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_f16)))
-float16x8_t vdupq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_f32)))
-float32x4_t vdupq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_f32)))
-float32x4_t vdupq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_f16)))
-float16x8_t vdupq_n_f16(float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_f32)))
-float32x4_t vdupq_n_f32(float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_f16)))
-float16x8_t vdupq_x_n_f16(float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_f32)))
-float32x4_t vdupq_x_n_f32(float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_f16)))
-float16x8_t veorq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_f16)))
-float16x8_t veorq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_f32)))
-float32x4_t veorq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_f32)))
-float32x4_t veorq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_f16)))
-float16x8_t veorq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_f16)))
-float16x8_t veorq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_f32)))
-float32x4_t veorq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_f32)))
-float32x4_t veorq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_f16)))
-float16x8_t veorq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_f16)))
-float16x8_t veorq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_f32)))
-float32x4_t veorq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_f32)))
-float32x4_t veorq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f16)))
-float16_t vgetq_lane_f16(float16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f16)))
-float16_t vgetq_lane(float16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f32)))
-float32_t vgetq_lane_f32(float32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f32)))
-float32_t vgetq_lane(float32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_f16)))
-float16x8_t vld1q_f16(const float16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_f16)))
-float16x8_t vld1q(const float16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_f32)))
-float32x4_t vld1q_f32(const float32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_f32)))
-float32x4_t vld1q(const float32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f16)))
-float16x8_t vld1q_z_f16(const float16_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f16)))
-float16x8_t vld1q_z(const float16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f32)))
-float32x4_t vld1q_z_f32(const float32_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f32)))
-float32x4_t vld1q_z(const float32_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_f16)))
-float16x8x2_t vld2q_f16(const float16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_f16)))
-float16x8x2_t vld2q(const float16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_f32)))
-float32x4x2_t vld2q_f32(const float32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_f32)))
-float32x4x2_t vld2q(const float32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_f16)))
-float16x8x4_t vld4q_f16(const float16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_f16)))
-float16x8x4_t vld4q(const float16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_f32)))
-float32x4x4_t vld4q_f32(const float32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_f32)))
-float32x4x4_t vld4q(const float32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_f16)))
-float16x8_t vldrhq_f16(const float16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))
-float16x8_t vldrhq_gather_offset_f16(const float16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))
-float16x8_t vldrhq_gather_offset(const float16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))
-float16x8_t vldrhq_gather_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))
-float16x8_t vldrhq_gather_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))
-float16x8_t vldrhq_gather_shifted_offset_f16(const float16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))
-float16x8_t vldrhq_gather_shifted_offset(const float16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))
-float16x8_t vldrhq_gather_shifted_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))
-float16x8_t vldrhq_gather_shifted_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_f16)))
-float16x8_t vldrhq_z_f16(const float16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_f32)))
-float32x4_t vldrwq_f32(const float32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_f32)))
-float32x4_t vldrwq_gather_base_f32(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_f32)))
-float32x4_t vldrwq_gather_base_wb_f32(uint32x4_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_f32)))
-float32x4_t vldrwq_gather_base_wb_z_f32(uint32x4_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_z_f32)))
-float32x4_t vldrwq_gather_base_z_f32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))
-float32x4_t vldrwq_gather_offset_f32(const float32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))
-float32x4_t vldrwq_gather_offset(const float32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))
-float32x4_t vldrwq_gather_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))
-float32x4_t vldrwq_gather_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))
-float32x4_t vldrwq_gather_shifted_offset_f32(const float32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))
-float32x4_t vldrwq_gather_shifted_offset(const float32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))
-float32x4_t vldrwq_gather_shifted_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))
-float32x4_t vldrwq_gather_shifted_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_z_f32)))
-float32x4_t vldrwq_z_f32(const float32_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_f16)))
-float16x8_t vmaxnmaq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_f16)))
-float16x8_t vmaxnmaq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_f32)))
-float32x4_t vmaxnmaq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_f32)))
-float32x4_t vmaxnmaq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_m_f16)))
-float16x8_t vmaxnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_m_f16)))
-float16x8_t vmaxnmaq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_m_f32)))
-float32x4_t vmaxnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_m_f32)))
-float32x4_t vmaxnmaq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_f16)))
-float16x8_t vmaxnmq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_f16)))
-float16x8_t vmaxnmq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_f32)))
-float32x4_t vmaxnmq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_f32)))
-float32x4_t vmaxnmq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_m_f16)))
-float16x8_t vmaxnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_m_f16)))
-float16x8_t vmaxnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_m_f32)))
-float32x4_t vmaxnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_m_f32)))
-float32x4_t vmaxnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_x_f16)))
-float16x8_t vmaxnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_x_f16)))
-float16x8_t vmaxnmq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_x_f32)))
-float32x4_t vmaxnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_x_f32)))
-float32x4_t vmaxnmq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_f16)))
-float16x8_t vminnmaq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_f16)))
-float16x8_t vminnmaq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_f32)))
-float32x4_t vminnmaq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_f32)))
-float32x4_t vminnmaq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_m_f16)))
-float16x8_t vminnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_m_f16)))
-float16x8_t vminnmaq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_m_f32)))
-float32x4_t vminnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_m_f32)))
-float32x4_t vminnmaq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmq_f16)))
-float16x8_t vminnmq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmq_f16)))
-float16x8_t vminnmq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmq_f32)))
-float32x4_t vminnmq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmq_f32)))
-float32x4_t vminnmq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmq_m_f16)))
-float16x8_t vminnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmq_m_f16)))
-float16x8_t vminnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmq_m_f32)))
-float32x4_t vminnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmq_m_f32)))
-float32x4_t vminnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmq_x_f16)))
-float16x8_t vminnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmq_x_f16)))
-float16x8_t vminnmq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmq_x_f32)))
-float32x4_t vminnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmq_x_f32)))
-float32x4_t vminnmq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_f16)))
-float16x8_t vmulq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_f16)))
-float16x8_t vmulq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_f32)))
-float32x4_t vmulq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_f32)))
-float32x4_t vmulq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_f16)))
-float16x8_t vmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_f16)))
-float16x8_t vmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_f32)))
-float32x4_t vmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_f32)))
-float32x4_t vmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_f16)))
-float16x8_t vmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_f16)))
-float16x8_t vmulq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_f32)))
-float32x4_t vmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_f32)))
-float32x4_t vmulq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_f16)))
-float16x8_t vornq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_f16)))
-float16x8_t vornq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_f32)))
-float32x4_t vornq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_f32)))
-float32x4_t vornq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_f16)))
-float16x8_t vornq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_f16)))
-float16x8_t vornq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_f32)))
-float32x4_t vornq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_f32)))
-float32x4_t vornq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_f16)))
-float16x8_t vornq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_f16)))
-float16x8_t vornq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_f32)))
-float32x4_t vornq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_f32)))
-float32x4_t vornq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_f16)))
-float16x8_t vorrq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_f16)))
-float16x8_t vorrq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_f32)))
-float32x4_t vorrq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_f32)))
-float32x4_t vorrq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_f16)))
-float16x8_t vorrq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_f16)))
-float16x8_t vorrq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_f32)))
-float32x4_t vorrq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_f32)))
-float32x4_t vorrq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_f16)))
-float16x8_t vorrq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_f16)))
-float16x8_t vorrq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_f32)))
-float32x4_t vorrq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_f32)))
-float32x4_t vorrq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_f16)))
-float16x8_t vpselq_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_f16)))
-float16x8_t vpselq(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_f32)))
-float32x4_t vpselq_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_f32)))
-float32x4_t vpselq(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))
-float16x8_t vreinterpretq_f16_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))
-float16x8_t vreinterpretq_f16(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))
-float16x8_t vreinterpretq_f16_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))
-float16x8_t vreinterpretq_f16(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))
-float16x8_t vreinterpretq_f16_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))
-float16x8_t vreinterpretq_f16(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))
-float16x8_t vreinterpretq_f16_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))
-float16x8_t vreinterpretq_f16(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))
-float16x8_t vreinterpretq_f16_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))
-float16x8_t vreinterpretq_f16(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))
-float16x8_t vreinterpretq_f16_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))
-float16x8_t vreinterpretq_f16(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))
-float16x8_t vreinterpretq_f16_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))
-float16x8_t vreinterpretq_f16(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))
-float16x8_t vreinterpretq_f16_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))
-float16x8_t vreinterpretq_f16(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
-float16x8_t vreinterpretq_f16_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
-float16x8_t vreinterpretq_f16(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))
-float32x4_t vreinterpretq_f32_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))
-float32x4_t vreinterpretq_f32(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))
-float32x4_t vreinterpretq_f32_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))
-float32x4_t vreinterpretq_f32(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))
-float32x4_t vreinterpretq_f32_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))
-float32x4_t vreinterpretq_f32(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))
-float32x4_t vreinterpretq_f32_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))
-float32x4_t vreinterpretq_f32(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))
-float32x4_t vreinterpretq_f32_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))
-float32x4_t vreinterpretq_f32(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))
-float32x4_t vreinterpretq_f32_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))
-float32x4_t vreinterpretq_f32(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))
-float32x4_t vreinterpretq_f32_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))
-float32x4_t vreinterpretq_f32(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))
-float32x4_t vreinterpretq_f32_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))
-float32x4_t vreinterpretq_f32(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
-float32x4_t vreinterpretq_f32_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
-float32x4_t vreinterpretq_f32(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))
-int16x8_t vreinterpretq_s16_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))
-int16x8_t vreinterpretq_s16(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))
-int16x8_t vreinterpretq_s16_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))
-int16x8_t vreinterpretq_s16(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))
-int32x4_t vreinterpretq_s32_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))
-int32x4_t vreinterpretq_s32(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))
-int32x4_t vreinterpretq_s32_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))
-int32x4_t vreinterpretq_s32(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))
-int64x2_t vreinterpretq_s64_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))
-int64x2_t vreinterpretq_s64(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))
-int64x2_t vreinterpretq_s64_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))
-int64x2_t vreinterpretq_s64(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))
-int8x16_t vreinterpretq_s8_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))
-int8x16_t vreinterpretq_s8(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))
-int8x16_t vreinterpretq_s8_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))
-int8x16_t vreinterpretq_s8(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))
-uint16x8_t vreinterpretq_u16_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))
-uint16x8_t vreinterpretq_u16(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))
-uint16x8_t vreinterpretq_u16_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))
-uint16x8_t vreinterpretq_u16(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))
-uint32x4_t vreinterpretq_u32_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))
-uint32x4_t vreinterpretq_u32(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))
-uint32x4_t vreinterpretq_u32_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))
-uint32x4_t vreinterpretq_u32(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))
-uint64x2_t vreinterpretq_u64_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))
-uint64x2_t vreinterpretq_u64(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))
-uint64x2_t vreinterpretq_u64_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))
-uint64x2_t vreinterpretq_u64(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
-uint8x16_t vreinterpretq_u8_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
-uint8x16_t vreinterpretq_u8(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
-uint8x16_t vreinterpretq_u8_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
-uint8x16_t vreinterpretq_u8(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f16)))
-float16x8_t vsetq_lane_f16(float16_t, float16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f16)))
-float16x8_t vsetq_lane(float16_t, float16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f32)))
-float32x4_t vsetq_lane_f32(float32_t, float32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f32)))
-float32x4_t vsetq_lane(float32_t, float32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_f16)))
-void vst1q_f16(float16_t *, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_f16)))
-void vst1q(float16_t *, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_f32)))
-void vst1q_f32(float32_t *, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_f32)))
-void vst1q(float32_t *, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f16)))
-void vst1q_p_f16(float16_t *, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f16)))
-void vst1q_p(float16_t *, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f32)))
-void vst1q_p_f32(float32_t *, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f32)))
-void vst1q_p(float32_t *, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_f16)))
-void vst2q_f16(float16_t *, float16x8x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_f16)))
-void vst2q(float16_t *, float16x8x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_f32)))
-void vst2q_f32(float32_t *, float32x4x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_f32)))
-void vst2q(float32_t *, float32x4x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_f16)))
-void vst4q_f16(float16_t *, float16x8x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_f16)))
-void vst4q(float16_t *, float16x8x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_f32)))
-void vst4q_f32(float32_t *, float32x4x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_f32)))
-void vst4q(float32_t *, float32x4x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_f16)))
-void vstrhq_f16(float16_t *, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_f16)))
-void vstrhq(float16_t *, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_f16)))
-void vstrhq_p_f16(float16_t *, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_f16)))
-void vstrhq_p(float16_t *, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))
-void vstrhq_scatter_offset_f16(float16_t *, uint16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))
-void vstrhq_scatter_offset(float16_t *, uint16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))
-void vstrhq_scatter_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))
-void vstrhq_scatter_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))
-void vstrhq_scatter_shifted_offset_f16(float16_t *, uint16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))
-void vstrhq_scatter_shifted_offset(float16_t *, uint16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))
-void vstrhq_scatter_shifted_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))
-void vstrhq_scatter_shifted_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_f32)))
-void vstrwq_f32(float32_t *, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_f32)))
-void vstrwq(float32_t *, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_f32)))
-void vstrwq_p_f32(float32_t *, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_f32)))
-void vstrwq_p(float32_t *, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))
-void vstrwq_scatter_base_f32(uint32x4_t, int, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))
-void vstrwq_scatter_base(uint32x4_t, int, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))
-void vstrwq_scatter_base_p_f32(uint32x4_t, int, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))
-void vstrwq_scatter_base_p(uint32x4_t, int, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))
-void vstrwq_scatter_base_wb_f32(uint32x4_t *, int, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))
-void vstrwq_scatter_base_wb(uint32x4_t *, int, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))
-void vstrwq_scatter_base_wb_p_f32(uint32x4_t *, int, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))
-void vstrwq_scatter_base_wb_p(uint32x4_t *, int, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))
-void vstrwq_scatter_offset_f32(float32_t *, uint32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))
-void vstrwq_scatter_offset(float32_t *, uint32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))
-void vstrwq_scatter_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))
-void vstrwq_scatter_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))
-void vstrwq_scatter_shifted_offset_f32(float32_t *, uint32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))
-void vstrwq_scatter_shifted_offset(float32_t *, uint32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))
-void vstrwq_scatter_shifted_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))
-void vstrwq_scatter_shifted_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_f16)))
-float16x8_t vsubq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_f16)))
-float16x8_t vsubq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_f32)))
-float32x4_t vsubq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_f32)))
-float32x4_t vsubq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f16)))
-float16x8_t vsubq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f16)))
-float16x8_t vsubq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f32)))
-float32x4_t vsubq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f32)))
-float32x4_t vsubq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_f16)))
-float16x8_t vsubq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_f16)))
-float16x8_t vsubq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_f32)))
-float32x4_t vsubq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_f32)))
-float32x4_t vsubq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_f16)))
-float16x8_t vuninitializedq_f16();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_f32)))
-float32x4_t vuninitializedq_f32();
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f16)))
-float16x8_t vuninitializedq(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f32)))
-float32x4_t vuninitializedq(float32x4_t);
-
-#endif /* (__ARM_FEATURE_MVE & 2) && (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE) */
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
-
-#endif /* __ARM_MVE_H */
diff --git a/darwin-x86/lib64/clang/11.0.1/include/fuzzer/FuzzedDataProvider.h b/darwin-x86/lib64/clang/11.0.1/include/fuzzer/FuzzedDataProvider.h
deleted file mode 100644
index 3e069eb..0000000
--- a/darwin-x86/lib64/clang/11.0.1/include/fuzzer/FuzzedDataProvider.h
+++ /dev/null
@@ -1,305 +0,0 @@
-//===- FuzzedDataProvider.h - Utility header for fuzz targets ---*- C++ -* ===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-// A single header library providing an utility class to break up an array of
-// bytes. Whenever run on the same input, provides the same output, as long as
-// its methods are called in the same order, with the same arguments.
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_
-#define LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_
-
-#include <algorithm>
-#include <climits>
-#include <cstddef>
-#include <cstdint>
-#include <cstring>
-#include <initializer_list>
-#include <string>
-#include <type_traits>
-#include <utility>
-#include <vector>
-
-// In addition to the comments below, the API is also briefly documented at
-// https://github.com/google/fuzzing/blob/master/docs/split-inputs.md#fuzzed-data-provider
-class FuzzedDataProvider {
- public:
-  // |data| is an array of length |size| that the FuzzedDataProvider wraps to
-  // provide more granular access. |data| must outlive the FuzzedDataProvider.
-  FuzzedDataProvider(const uint8_t *data, size_t size)
-      : data_ptr_(data), remaining_bytes_(size) {}
-  ~FuzzedDataProvider() = default;
-
-  // Returns a std::vector containing |num_bytes| of input data. If fewer than
-  // |num_bytes| of data remain, returns a shorter std::vector containing all
-  // of the data that's left. Can be used with any byte sized type, such as
-  // char, unsigned char, uint8_t, etc.
-  template <typename T> std::vector<T> ConsumeBytes(size_t num_bytes) {
-    num_bytes = std::min(num_bytes, remaining_bytes_);
-    return ConsumeBytes<T>(num_bytes, num_bytes);
-  }
-
-  // Similar to |ConsumeBytes|, but also appends the terminator value at the end
-  // of the resulting vector. Useful, when a mutable null-terminated C-string is
-  // needed, for example. But that is a rare case. Better avoid it, if possible,
-  // and prefer using |ConsumeBytes| or |ConsumeBytesAsString| methods.
-  template <typename T>
-  std::vector<T> ConsumeBytesWithTerminator(size_t num_bytes,
-                                            T terminator = 0) {
-    num_bytes = std::min(num_bytes, remaining_bytes_);
-    std::vector<T> result = ConsumeBytes<T>(num_bytes + 1, num_bytes);
-    result.back() = terminator;
-    return result;
-  }
-
-  // Returns a std::string containing |num_bytes| of input data. Using this and
-  // |.c_str()| on the resulting string is the best way to get an immutable
-  // null-terminated C string. If fewer than |num_bytes| of data remain, returns
-  // a shorter std::string containing all of the data that's left.
-  std::string ConsumeBytesAsString(size_t num_bytes) {
-    static_assert(sizeof(std::string::value_type) == sizeof(uint8_t),
-                  "ConsumeBytesAsString cannot convert the data to a string.");
-
-    num_bytes = std::min(num_bytes, remaining_bytes_);
-    std::string result(
-        reinterpret_cast<const std::string::value_type *>(data_ptr_),
-        num_bytes);
-    Advance(num_bytes);
-    return result;
-  }
-
-  // Returns a number in the range [min, max] by consuming bytes from the
-  // input data. The value might not be uniformly distributed in the given
-  // range. If there's no input data left, always returns |min|. |min| must
-  // be less than or equal to |max|.
-  template <typename T> T ConsumeIntegralInRange(T min, T max) {
-    static_assert(std::is_integral<T>::value, "An integral type is required.");
-    static_assert(sizeof(T) <= sizeof(uint64_t), "Unsupported integral type.");
-
-    if (min > max)
-      abort();
-
-    // Use the biggest type possible to hold the range and the result.
-    uint64_t range = static_cast<uint64_t>(max) - min;
-    uint64_t result = 0;
-    size_t offset = 0;
-
-    while (offset < sizeof(T) * CHAR_BIT && (range >> offset) > 0 &&
-           remaining_bytes_ != 0) {
-      // Pull bytes off the end of the seed data. Experimentally, this seems to
-      // allow the fuzzer to more easily explore the input space. This makes
-      // sense, since it works by modifying inputs that caused new code to run,
-      // and this data is often used to encode length of data read by
-      // |ConsumeBytes|. Separating out read lengths makes it easier modify the
-      // contents of the data that is actually read.
-      --remaining_bytes_;
-      result = (result << CHAR_BIT) | data_ptr_[remaining_bytes_];
-      offset += CHAR_BIT;
-    }
-
-    // Avoid division by 0, in case |range + 1| results in overflow.
-    if (range != std::numeric_limits<decltype(range)>::max())
-      result = result % (range + 1);
-
-    return static_cast<T>(min + result);
-  }
-
-  // Returns a std::string of length from 0 to |max_length|. When it runs out of
-  // input data, returns what remains of the input. Designed to be more stable
-  // with respect to a fuzzer inserting characters than just picking a random
-  // length and then consuming that many bytes with |ConsumeBytes|.
-  std::string ConsumeRandomLengthString(size_t max_length) {
-    // Reads bytes from the start of |data_ptr_|. Maps "\\" to "\", and maps "\"
-    // followed by anything else to the end of the string. As a result of this
-    // logic, a fuzzer can insert characters into the string, and the string
-    // will be lengthened to include those new characters, resulting in a more
-    // stable fuzzer than picking the length of a string independently from
-    // picking its contents.
-    std::string result;
-
-    // Reserve the anticipated capaticity to prevent several reallocations.
-    result.reserve(std::min(max_length, remaining_bytes_));
-    for (size_t i = 0; i < max_length && remaining_bytes_ != 0; ++i) {
-      char next = ConvertUnsignedToSigned<char>(data_ptr_[0]);
-      Advance(1);
-      if (next == '\\' && remaining_bytes_ != 0) {
-        next = ConvertUnsignedToSigned<char>(data_ptr_[0]);
-        Advance(1);
-        if (next != '\\')
-          break;
-      }
-      result += next;
-    }
-
-    result.shrink_to_fit();
-    return result;
-  }
-
-  // Returns a std::vector containing all remaining bytes of the input data.
-  template <typename T> std::vector<T> ConsumeRemainingBytes() {
-    return ConsumeBytes<T>(remaining_bytes_);
-  }
-
-  // Returns a std::string containing all remaining bytes of the input data.
-  // Prefer using |ConsumeRemainingBytes| unless you actually need a std::string
-  // object.
-  std::string ConsumeRemainingBytesAsString() {
-    return ConsumeBytesAsString(remaining_bytes_);
-  }
-
-  // Returns a number in the range [Type's min, Type's max]. The value might
-  // not be uniformly distributed in the given range. If there's no input data
-  // left, always returns |min|.
-  template <typename T> T ConsumeIntegral() {
-    return ConsumeIntegralInRange(std::numeric_limits<T>::min(),
-                                  std::numeric_limits<T>::max());
-  }
-
-  // Reads one byte and returns a bool, or false when no data remains.
-  bool ConsumeBool() { return 1 & ConsumeIntegral<uint8_t>(); }
-
-  // Returns a copy of the value selected from the given fixed-size |array|.
-  template <typename T, size_t size>
-  T PickValueInArray(const T (&array)[size]) {
-    static_assert(size > 0, "The array must be non empty.");
-    return array[ConsumeIntegralInRange<size_t>(0, size - 1)];
-  }
-
-  template <typename T>
-  T PickValueInArray(std::initializer_list<const T> list) {
-    // TODO(Dor1s): switch to static_assert once C++14 is allowed.
-    if (!list.size())
-      abort();
-
-    return *(list.begin() + ConsumeIntegralInRange<size_t>(0, list.size() - 1));
-  }
-
-  // Returns an enum value. The enum must start at 0 and be contiguous. It must
-  // also contain |kMaxValue| aliased to its largest (inclusive) value. Such as:
-  // enum class Foo { SomeValue, OtherValue, kMaxValue = OtherValue };
-  template <typename T> T ConsumeEnum() {
-    static_assert(std::is_enum<T>::value, "|T| must be an enum type.");
-    return static_cast<T>(ConsumeIntegralInRange<uint32_t>(
-        0, static_cast<uint32_t>(T::kMaxValue)));
-  }
-
-  // Returns a floating point number in the range [0.0, 1.0]. If there's no
-  // input data left, always returns 0.
-  template <typename T> T ConsumeProbability() {
-    static_assert(std::is_floating_point<T>::value,
-                  "A floating point type is required.");
-
-    // Use different integral types for different floating point types in order
-    // to provide better density of the resulting values.
-    using IntegralType =
-        typename std::conditional<(sizeof(T) <= sizeof(uint32_t)), uint32_t,
-                                  uint64_t>::type;
-
-    T result = static_cast<T>(ConsumeIntegral<IntegralType>());
-    result /= static_cast<T>(std::numeric_limits<IntegralType>::max());
-    return result;
-  }
-
-  // Returns a floating point value in the range [Type's lowest, Type's max] by
-  // consuming bytes from the input data. If there's no input data left, always
-  // returns approximately 0.
-  template <typename T> T ConsumeFloatingPoint() {
-    return ConsumeFloatingPointInRange<T>(std::numeric_limits<T>::lowest(),
-                                          std::numeric_limits<T>::max());
-  }
-
-  // Returns a floating point value in the given range by consuming bytes from
-  // the input data. If there's no input data left, returns |min|. Note that
-  // |min| must be less than or equal to |max|.
-  template <typename T> T ConsumeFloatingPointInRange(T min, T max) {
-    if (min > max)
-      abort();
-
-    T range = .0;
-    T result = min;
-    constexpr T zero(.0);
-    if (max > zero && min < zero && max > min + std::numeric_limits<T>::max()) {
-      // The diff |max - min| would overflow the given floating point type. Use
-      // the half of the diff as the range and consume a bool to decide whether
-      // the result is in the first of the second part of the diff.
-      range = (max / 2.0) - (min / 2.0);
-      if (ConsumeBool()) {
-        result += range;
-      }
-    } else {
-      range = max - min;
-    }
-
-    return result + range * ConsumeProbability<T>();
-  }
-
-  // Reports the remaining bytes available for fuzzed input.
-  size_t remaining_bytes() { return remaining_bytes_; }
-
- private:
-  FuzzedDataProvider(const FuzzedDataProvider &) = delete;
-  FuzzedDataProvider &operator=(const FuzzedDataProvider &) = delete;
-
-  void Advance(size_t num_bytes) {
-    if (num_bytes > remaining_bytes_)
-      abort();
-
-    data_ptr_ += num_bytes;
-    remaining_bytes_ -= num_bytes;
-  }
-
-  template <typename T>
-  std::vector<T> ConsumeBytes(size_t size, size_t num_bytes_to_consume) {
-    static_assert(sizeof(T) == sizeof(uint8_t), "Incompatible data type.");
-
-    // The point of using the size-based constructor below is to increase the
-    // odds of having a vector object with capacity being equal to the length.
-    // That part is always implementation specific, but at least both libc++ and
-    // libstdc++ allocate the requested number of bytes in that constructor,
-    // which seems to be a natural choice for other implementations as well.
-    // To increase the odds even more, we also call |shrink_to_fit| below.
-    std::vector<T> result(size);
-    if (size == 0) {
-      if (num_bytes_to_consume != 0)
-        abort();
-      return result;
-    }
-
-    std::memcpy(result.data(), data_ptr_, num_bytes_to_consume);
-    Advance(num_bytes_to_consume);
-
-    // Even though |shrink_to_fit| is also implementation specific, we expect it
-    // to provide an additional assurance in case vector's constructor allocated
-    // a buffer which is larger than the actual amount of data we put inside it.
-    result.shrink_to_fit();
-    return result;
-  }
-
-  template <typename TS, typename TU> TS ConvertUnsignedToSigned(TU value) {
-    static_assert(sizeof(TS) == sizeof(TU), "Incompatible data types.");
-    static_assert(!std::numeric_limits<TU>::is_signed,
-                  "Source type must be unsigned.");
-
-    // TODO(Dor1s): change to `if constexpr` once C++17 becomes mainstream.
-    if (std::numeric_limits<TS>::is_modulo)
-      return static_cast<TS>(value);
-
-    // Avoid using implementation-defined unsigned to signer conversions.
-    // To learn more, see https://stackoverflow.com/questions/13150449.
-    if (value <= std::numeric_limits<TS>::max()) {
-      return static_cast<TS>(value);
-    } else {
-      constexpr auto TS_min = std::numeric_limits<TS>::min();
-      return TS_min + static_cast<char>(value - TS_min);
-    }
-  }
-
-  const uint8_t *data_ptr_;
-  size_t remaining_bytes_;
-};
-
-#endif // LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_
diff --git a/darwin-x86/lib64/clang/11.0.1/include/openmp_wrappers/__clang_openmp_math.h b/darwin-x86/lib64/clang/11.0.1/include/openmp_wrappers/__clang_openmp_math.h
deleted file mode 100644
index 5d7ce9a..0000000
--- a/darwin-x86/lib64/clang/11.0.1/include/openmp_wrappers/__clang_openmp_math.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*===---- __clang_openmp_math.h - OpenMP target math support ---------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#if defined(__NVPTX__) && defined(_OPENMP)
-/// TODO:
-/// We are currently reusing the functionality of the Clang-CUDA code path
-/// as an alternative to the host declarations provided by math.h and cmath.
-/// This is suboptimal.
-///
-/// We should instead declare the device functions in a similar way, e.g.,
-/// through OpenMP 5.0 variants, and afterwards populate the module with the
-/// host declarations by unconditionally including the host math.h or cmath,
-/// respectively. This is actually what the Clang-CUDA code path does, using
-/// __device__ instead of variants to avoid redeclarations and get the desired
-/// overload resolution.
-
-#define __CUDA__
-
-#if defined(__cplusplus)
-  #include <__clang_cuda_cmath.h>
-#endif
-
-#undef __CUDA__
-
-/// Magic macro for stopping the math.h/cmath host header from being included.
-#define __CLANG_NO_HOST_MATH__
-
-#endif
-
diff --git a/darwin-x86/lib64/clang/11.0.1/include/openmp_wrappers/__clang_openmp_math_declares.h b/darwin-x86/lib64/clang/11.0.1/include/openmp_wrappers/__clang_openmp_math_declares.h
deleted file mode 100644
index a422c98..0000000
--- a/darwin-x86/lib64/clang/11.0.1/include/openmp_wrappers/__clang_openmp_math_declares.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*===---- __clang_openmp_math_declares.h - OpenMP math declares ------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __CLANG_OPENMP_MATH_DECLARES_H__
-#define __CLANG_OPENMP_MATH_DECLARES_H__
-
-#ifndef _OPENMP
-#error "This file is for OpenMP compilation only."
-#endif
-
-#if defined(__NVPTX__) && defined(_OPENMP)
-
-#define __CUDA__
-
-#if defined(__cplusplus)
-  #include <__clang_cuda_math_forward_declares.h>
-#endif
-
-/// Include declarations for libdevice functions.
-#include <__clang_cuda_libdevice_declares.h>
-/// Provide definitions for these functions.
-#include <__clang_cuda_device_functions.h>
-
-#undef __CUDA__
-
-#endif
-#endif
diff --git a/darwin-x86/lib64/clang/11.0.1/include/openmp_wrappers/cmath b/darwin-x86/lib64/clang/11.0.1/include/openmp_wrappers/cmath
deleted file mode 100644
index a5183a1..0000000
--- a/darwin-x86/lib64/clang/11.0.1/include/openmp_wrappers/cmath
+++ /dev/null
@@ -1,16 +0,0 @@
-/*===-------------- cmath - Alternative cmath header -----------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#include <__clang_openmp_math.h>
-
-#ifndef __CLANG_NO_HOST_MATH__
-#include_next <cmath>
-#else
-#undef __CLANG_NO_HOST_MATH__
-#endif
diff --git a/darwin-x86/lib64/clang/11.0.1/include/openmp_wrappers/math.h b/darwin-x86/lib64/clang/11.0.1/include/openmp_wrappers/math.h
deleted file mode 100644
index d2786ec..0000000
--- a/darwin-x86/lib64/clang/11.0.1/include/openmp_wrappers/math.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*===------------- math.h - Alternative math.h header ----------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#include <__clang_openmp_math.h>
-
-#ifndef __CLANG_NO_HOST_MATH__
-#include_next <math.h>
-#else
-#undef __CLANG_NO_HOST_MATH__
-#endif
-
diff --git a/darwin-x86/lib64/clang/11.0.1/include/vecintrin.h b/darwin-x86/lib64/clang/11.0.1/include/vecintrin.h
deleted file mode 100644
index 6f9b609..0000000
--- a/darwin-x86/lib64/clang/11.0.1/include/vecintrin.h
+++ /dev/null
@@ -1,10862 +0,0 @@
-/*===---- vecintrin.h - Vector intrinsics ----------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#if defined(__s390x__) && defined(__VEC__)
-
-#define __ATTRS_ai __attribute__((__always_inline__))
-#define __ATTRS_o __attribute__((__overloadable__))
-#define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__))
-
-#define __constant(PARM) \
-  __attribute__((__enable_if__ ((PARM) == (PARM), \
-     "argument must be a constant integer")))
-#define __constant_range(PARM, LOW, HIGH) \
-  __attribute__((__enable_if__ ((PARM) >= (LOW) && (PARM) <= (HIGH), \
-     "argument must be a constant integer from " #LOW " to " #HIGH)))
-#define __constant_pow2_range(PARM, LOW, HIGH) \
-  __attribute__((__enable_if__ ((PARM) >= (LOW) && (PARM) <= (HIGH) && \
-                                ((PARM) & ((PARM) - 1)) == 0, \
-     "argument must be a constant power of 2 from " #LOW " to " #HIGH)))
-
-/*-- __lcbb -----------------------------------------------------------------*/
-
-extern __ATTRS_o unsigned int
-__lcbb(const void *__ptr, unsigned short __len)
-  __constant_pow2_range(__len, 64, 4096);
-
-#define __lcbb(X, Y) ((__typeof__((__lcbb)((X), (Y)))) \
-  __builtin_s390_lcbb((X), __builtin_constant_p((Y))? \
-                           ((Y) == 64 ? 0 : \
-                            (Y) == 128 ? 1 : \
-                            (Y) == 256 ? 2 : \
-                            (Y) == 512 ? 3 : \
-                            (Y) == 1024 ? 4 : \
-                            (Y) == 2048 ? 5 : \
-                            (Y) == 4096 ? 6 : 0) : 0))
-
-/*-- vec_extract ------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai signed char
-vec_extract(vector signed char __vec, int __index) {
-  return __vec[__index & 15];
-}
-
-static inline __ATTRS_o_ai unsigned char
-vec_extract(vector bool char __vec, int __index) {
-  return __vec[__index & 15];
-}
-
-static inline __ATTRS_o_ai unsigned char
-vec_extract(vector unsigned char __vec, int __index) {
-  return __vec[__index & 15];
-}
-
-static inline __ATTRS_o_ai signed short
-vec_extract(vector signed short __vec, int __index) {
-  return __vec[__index & 7];
-}
-
-static inline __ATTRS_o_ai unsigned short
-vec_extract(vector bool short __vec, int __index) {
-  return __vec[__index & 7];
-}
-
-static inline __ATTRS_o_ai unsigned short
-vec_extract(vector unsigned short __vec, int __index) {
-  return __vec[__index & 7];
-}
-
-static inline __ATTRS_o_ai signed int
-vec_extract(vector signed int __vec, int __index) {
-  return __vec[__index & 3];
-}
-
-static inline __ATTRS_o_ai unsigned int
-vec_extract(vector bool int __vec, int __index) {
-  return __vec[__index & 3];
-}
-
-static inline __ATTRS_o_ai unsigned int
-vec_extract(vector unsigned int __vec, int __index) {
-  return __vec[__index & 3];
-}
-
-static inline __ATTRS_o_ai signed long long
-vec_extract(vector signed long long __vec, int __index) {
-  return __vec[__index & 1];
-}
-
-static inline __ATTRS_o_ai unsigned long long
-vec_extract(vector bool long long __vec, int __index) {
-  return __vec[__index & 1];
-}
-
-static inline __ATTRS_o_ai unsigned long long
-vec_extract(vector unsigned long long __vec, int __index) {
-  return __vec[__index & 1];
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai float
-vec_extract(vector float __vec, int __index) {
-  return __vec[__index & 3];
-}
-#endif
-
-static inline __ATTRS_o_ai double
-vec_extract(vector double __vec, int __index) {
-  return __vec[__index & 1];
-}
-
-/*-- vec_insert -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_insert(signed char __scalar, vector signed char __vec, int __index) {
-  __vec[__index & 15] = __scalar;
-  return __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_insert(unsigned char __scalar, vector bool char __vec, int __index) {
-  vector unsigned char __newvec = (vector unsigned char)__vec;
-  __newvec[__index & 15] = (unsigned char)__scalar;
-  return __newvec;
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_insert(unsigned char __scalar, vector unsigned char __vec, int __index) {
-  __vec[__index & 15] = __scalar;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_insert(signed short __scalar, vector signed short __vec, int __index) {
-  __vec[__index & 7] = __scalar;
-  return __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_insert(unsigned short __scalar, vector bool short __vec, int __index) {
-  vector unsigned short __newvec = (vector unsigned short)__vec;
-  __newvec[__index & 7] = (unsigned short)__scalar;
-  return __newvec;
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_insert(unsigned short __scalar, vector unsigned short __vec, int __index) {
-  __vec[__index & 7] = __scalar;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_insert(signed int __scalar, vector signed int __vec, int __index) {
-  __vec[__index & 3] = __scalar;
-  return __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_insert(unsigned int __scalar, vector bool int __vec, int __index) {
-  vector unsigned int __newvec = (vector unsigned int)__vec;
-  __newvec[__index & 3] = __scalar;
-  return __newvec;
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_insert(unsigned int __scalar, vector unsigned int __vec, int __index) {
-  __vec[__index & 3] = __scalar;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_insert(signed long long __scalar, vector signed long long __vec,
-           int __index) {
-  __vec[__index & 1] = __scalar;
-  return __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_insert(unsigned long long __scalar, vector bool long long __vec,
-           int __index) {
-  vector unsigned long long __newvec = (vector unsigned long long)__vec;
-  __newvec[__index & 1] = __scalar;
-  return __newvec;
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_insert(unsigned long long __scalar, vector unsigned long long __vec,
-           int __index) {
-  __vec[__index & 1] = __scalar;
-  return __vec;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_insert(float __scalar, vector float __vec, int __index) {
-  __vec[__index & 1] = __scalar;
-  return __vec;
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_insert(double __scalar, vector double __vec, int __index) {
-  __vec[__index & 1] = __scalar;
-  return __vec;
-}
-
-/*-- vec_promote ------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_promote(signed char __scalar, int __index) {
-  const vector signed char __zero = (vector signed char)0;
-  vector signed char __vec = __builtin_shufflevector(__zero, __zero,
-    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
-  __vec[__index & 15] = __scalar;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_promote(unsigned char __scalar, int __index) {
-  const vector unsigned char __zero = (vector unsigned char)0;
-  vector unsigned char __vec = __builtin_shufflevector(__zero, __zero,
-    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
-  __vec[__index & 15] = __scalar;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_promote(signed short __scalar, int __index) {
-  const vector signed short __zero = (vector signed short)0;
-  vector signed short __vec = __builtin_shufflevector(__zero, __zero,
-                                -1, -1, -1, -1, -1, -1, -1, -1);
-  __vec[__index & 7] = __scalar;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_promote(unsigned short __scalar, int __index) {
-  const vector unsigned short __zero = (vector unsigned short)0;
-  vector unsigned short __vec = __builtin_shufflevector(__zero, __zero,
-                                  -1, -1, -1, -1, -1, -1, -1, -1);
-  __vec[__index & 7] = __scalar;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_promote(signed int __scalar, int __index) {
-  const vector signed int __zero = (vector signed int)0;
-  vector signed int __vec = __builtin_shufflevector(__zero, __zero,
-                                                    -1, -1, -1, -1);
-  __vec[__index & 3] = __scalar;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_promote(unsigned int __scalar, int __index) {
-  const vector unsigned int __zero = (vector unsigned int)0;
-  vector unsigned int __vec = __builtin_shufflevector(__zero, __zero,
-                                                      -1, -1, -1, -1);
-  __vec[__index & 3] = __scalar;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_promote(signed long long __scalar, int __index) {
-  const vector signed long long __zero = (vector signed long long)0;
-  vector signed long long __vec = __builtin_shufflevector(__zero, __zero,
-                                                          -1, -1);
-  __vec[__index & 1] = __scalar;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_promote(unsigned long long __scalar, int __index) {
-  const vector unsigned long long __zero = (vector unsigned long long)0;
-  vector unsigned long long __vec = __builtin_shufflevector(__zero, __zero,
-                                                            -1, -1);
-  __vec[__index & 1] = __scalar;
-  return __vec;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_promote(float __scalar, int __index) {
-  const vector float __zero = (vector float)0.0f;
-  vector float __vec = __builtin_shufflevector(__zero, __zero, -1, -1, -1, -1);
-  __vec[__index & 3] = __scalar;
-  return __vec;
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_promote(double __scalar, int __index) {
-  const vector double __zero = (vector double)0.0;
-  vector double __vec = __builtin_shufflevector(__zero, __zero, -1, -1);
-  __vec[__index & 1] = __scalar;
-  return __vec;
-}
-
-/*-- vec_insert_and_zero ----------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_insert_and_zero(const signed char *__ptr) {
-  vector signed char __vec = (vector signed char)0;
-  __vec[7] = *__ptr;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_insert_and_zero(const unsigned char *__ptr) {
-  vector unsigned char __vec = (vector unsigned char)0;
-  __vec[7] = *__ptr;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_insert_and_zero(const signed short *__ptr) {
-  vector signed short __vec = (vector signed short)0;
-  __vec[3] = *__ptr;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_insert_and_zero(const unsigned short *__ptr) {
-  vector unsigned short __vec = (vector unsigned short)0;
-  __vec[3] = *__ptr;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_insert_and_zero(const signed int *__ptr) {
-  vector signed int __vec = (vector signed int)0;
-  __vec[1] = *__ptr;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_insert_and_zero(const unsigned int *__ptr) {
-  vector unsigned int __vec = (vector unsigned int)0;
-  __vec[1] = *__ptr;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_insert_and_zero(const signed long long *__ptr) {
-  vector signed long long __vec = (vector signed long long)0;
-  __vec[0] = *__ptr;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_insert_and_zero(const unsigned long long *__ptr) {
-  vector unsigned long long __vec = (vector unsigned long long)0;
-  __vec[0] = *__ptr;
-  return __vec;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_insert_and_zero(const float *__ptr) {
-  vector float __vec = (vector float)0.0f;
-  __vec[1] = *__ptr;
-  return __vec;
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_insert_and_zero(const double *__ptr) {
-  vector double __vec = (vector double)0.0;
-  __vec[0] = *__ptr;
-  return __vec;
-}
-
-/*-- vec_perm ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_perm(vector signed char __a, vector signed char __b,
-         vector unsigned char __c) {
-  return (vector signed char)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_perm(vector unsigned char __a, vector unsigned char __b,
-         vector unsigned char __c) {
-  return (vector unsigned char)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_perm(vector bool char __a, vector bool char __b,
-         vector unsigned char __c) {
-  return (vector bool char)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_perm(vector signed short __a, vector signed short __b,
-         vector unsigned char __c) {
-  return (vector signed short)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_perm(vector unsigned short __a, vector unsigned short __b,
-         vector unsigned char __c) {
-  return (vector unsigned short)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_perm(vector bool short __a, vector bool short __b,
-         vector unsigned char __c) {
-  return (vector bool short)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_perm(vector signed int __a, vector signed int __b,
-         vector unsigned char __c) {
-  return (vector signed int)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_perm(vector unsigned int __a, vector unsigned int __b,
-         vector unsigned char __c) {
-  return (vector unsigned int)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_perm(vector bool int __a, vector bool int __b,
-         vector unsigned char __c) {
-  return (vector bool int)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_perm(vector signed long long __a, vector signed long long __b,
-         vector unsigned char __c) {
-  return (vector signed long long)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_perm(vector unsigned long long __a, vector unsigned long long __b,
-         vector unsigned char __c) {
-  return (vector unsigned long long)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_perm(vector bool long long __a, vector bool long long __b,
-         vector unsigned char __c) {
-  return (vector bool long long)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_perm(vector float __a, vector float __b,
-         vector unsigned char __c) {
-  return (vector float)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_perm(vector double __a, vector double __b,
-         vector unsigned char __c) {
-  return (vector double)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-/*-- vec_permi --------------------------------------------------------------*/
-
-// This prototype is deprecated.
-extern __ATTRS_o vector signed long long
-vec_permi(vector signed long long __a, vector signed long long __b, int __c)
-  __constant_range(__c, 0, 3);
-
-// This prototype is deprecated.
-extern __ATTRS_o vector unsigned long long
-vec_permi(vector unsigned long long __a, vector unsigned long long __b, int __c)
-  __constant_range(__c, 0, 3);
-
-// This prototype is deprecated.
-extern __ATTRS_o vector bool long long
-vec_permi(vector bool long long __a, vector bool long long __b, int __c)
-  __constant_range(__c, 0, 3);
-
-// This prototype is deprecated.
-extern __ATTRS_o vector double
-vec_permi(vector double __a, vector double __b, int __c)
-  __constant_range(__c, 0, 3);
-
-#define vec_permi(X, Y, Z) ((__typeof__((vec_permi)((X), (Y), (Z)))) \
-  __builtin_s390_vpdi((vector unsigned long long)(X), \
-                      (vector unsigned long long)(Y), \
-                      (((Z) & 2) << 1) | ((Z) & 1)))
-
-/*-- vec_bperm_u128 ---------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_ai vector unsigned long long
-vec_bperm_u128(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vbperm(__a, __b);
-}
-#endif
-
-/*-- vec_revb ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed short
-vec_revb(vector signed short __vec) {
-  return (vector signed short)
-         __builtin_s390_vlbrh((vector unsigned short)__vec);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_revb(vector unsigned short __vec) {
-  return __builtin_s390_vlbrh(__vec);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_revb(vector signed int __vec) {
-  return (vector signed int)
-         __builtin_s390_vlbrf((vector unsigned int)__vec);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_revb(vector unsigned int __vec) {
-  return __builtin_s390_vlbrf(__vec);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_revb(vector signed long long __vec) {
-  return (vector signed long long)
-         __builtin_s390_vlbrg((vector unsigned long long)__vec);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_revb(vector unsigned long long __vec) {
-  return __builtin_s390_vlbrg(__vec);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_revb(vector float __vec) {
-  return (vector float)
-         __builtin_s390_vlbrf((vector unsigned int)__vec);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_revb(vector double __vec) {
-  return (vector double)
-         __builtin_s390_vlbrg((vector unsigned long long)__vec);
-}
-
-/*-- vec_reve ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_reve(vector signed char __vec) {
-  return (vector signed char) { __vec[15], __vec[14], __vec[13], __vec[12],
-                                __vec[11], __vec[10], __vec[9], __vec[8],
-                                __vec[7], __vec[6], __vec[5], __vec[4],
-                                __vec[3], __vec[2], __vec[1], __vec[0] };
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_reve(vector unsigned char __vec) {
-  return (vector unsigned char) { __vec[15], __vec[14], __vec[13], __vec[12],
-                                  __vec[11], __vec[10], __vec[9], __vec[8],
-                                  __vec[7], __vec[6], __vec[5], __vec[4],
-                                  __vec[3], __vec[2], __vec[1], __vec[0] };
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_reve(vector bool char __vec) {
-  return (vector bool char) { __vec[15], __vec[14], __vec[13], __vec[12],
-                              __vec[11], __vec[10], __vec[9], __vec[8],
-                              __vec[7], __vec[6], __vec[5], __vec[4],
-                              __vec[3], __vec[2], __vec[1], __vec[0] };
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_reve(vector signed short __vec) {
-  return (vector signed short) { __vec[7], __vec[6], __vec[5], __vec[4],
-                                 __vec[3], __vec[2], __vec[1], __vec[0] };
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_reve(vector unsigned short __vec) {
-  return (vector unsigned short) { __vec[7], __vec[6], __vec[5], __vec[4],
-                                   __vec[3], __vec[2], __vec[1], __vec[0] };
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_reve(vector bool short __vec) {
-  return (vector bool short) { __vec[7], __vec[6], __vec[5], __vec[4],
-                               __vec[3], __vec[2], __vec[1], __vec[0] };
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_reve(vector signed int __vec) {
-  return (vector signed int) { __vec[3], __vec[2], __vec[1], __vec[0] };
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_reve(vector unsigned int __vec) {
-  return (vector unsigned int) { __vec[3], __vec[2], __vec[1], __vec[0] };
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_reve(vector bool int __vec) {
-  return (vector bool int) { __vec[3], __vec[2], __vec[1], __vec[0] };
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_reve(vector signed long long __vec) {
-  return (vector signed long long) { __vec[1], __vec[0] };
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_reve(vector unsigned long long __vec) {
-  return (vector unsigned long long) { __vec[1], __vec[0] };
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_reve(vector bool long long __vec) {
-  return (vector bool long long) { __vec[1], __vec[0] };
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_reve(vector float __vec) {
-  return (vector float) { __vec[3], __vec[2], __vec[1], __vec[0] };
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_reve(vector double __vec) {
-  return (vector double) { __vec[1], __vec[0] };
-}
-
-/*-- vec_sel ----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_sel(vector signed char __a, vector signed char __b,
-        vector unsigned char __c) {
-  return ((vector signed char)__c & __b) | (~(vector signed char)__c & __a);
-}
-
-static inline __ATTRS_o_ai vector signed char
-vec_sel(vector signed char __a, vector signed char __b, vector bool char __c) {
-  return ((vector signed char)__c & __b) | (~(vector signed char)__c & __a);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_sel(vector bool char __a, vector bool char __b, vector unsigned char __c) {
-  return ((vector bool char)__c & __b) | (~(vector bool char)__c & __a);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_sel(vector bool char __a, vector bool char __b, vector bool char __c) {
-  return (__c & __b) | (~__c & __a);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_sel(vector unsigned char __a, vector unsigned char __b,
-        vector unsigned char __c) {
-  return (__c & __b) | (~__c & __a);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_sel(vector unsigned char __a, vector unsigned char __b,
-        vector bool char __c) {
-  return ((vector unsigned char)__c & __b) | (~(vector unsigned char)__c & __a);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_sel(vector signed short __a, vector signed short __b,
-        vector unsigned short __c) {
-  return ((vector signed short)__c & __b) | (~(vector signed short)__c & __a);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_sel(vector signed short __a, vector signed short __b,
-        vector bool short __c) {
-  return ((vector signed short)__c & __b) | (~(vector signed short)__c & __a);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_sel(vector bool short __a, vector bool short __b,
-        vector unsigned short __c) {
-  return ((vector bool short)__c & __b) | (~(vector bool short)__c & __a);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_sel(vector bool short __a, vector bool short __b, vector bool short __c) {
-  return (__c & __b) | (~__c & __a);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_sel(vector unsigned short __a, vector unsigned short __b,
-        vector unsigned short __c) {
-  return (__c & __b) | (~__c & __a);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_sel(vector unsigned short __a, vector unsigned short __b,
-        vector bool short __c) {
-  return (((vector unsigned short)__c & __b) |
-          (~(vector unsigned short)__c & __a));
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_sel(vector signed int __a, vector signed int __b,
-        vector unsigned int __c) {
-  return ((vector signed int)__c & __b) | (~(vector signed int)__c & __a);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_sel(vector signed int __a, vector signed int __b, vector bool int __c) {
-  return ((vector signed int)__c & __b) | (~(vector signed int)__c & __a);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_sel(vector bool int __a, vector bool int __b, vector unsigned int __c) {
-  return ((vector bool int)__c & __b) | (~(vector bool int)__c & __a);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_sel(vector bool int __a, vector bool int __b, vector bool int __c) {
-  return (__c & __b) | (~__c & __a);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_sel(vector unsigned int __a, vector unsigned int __b,
-        vector unsigned int __c) {
-  return (__c & __b) | (~__c & __a);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_sel(vector unsigned int __a, vector unsigned int __b, vector bool int __c) {
-  return ((vector unsigned int)__c & __b) | (~(vector unsigned int)__c & __a);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_sel(vector signed long long __a, vector signed long long __b,
-        vector unsigned long long __c) {
-  return (((vector signed long long)__c & __b) |
-          (~(vector signed long long)__c & __a));
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_sel(vector signed long long __a, vector signed long long __b,
-        vector bool long long __c) {
-  return (((vector signed long long)__c & __b) |
-          (~(vector signed long long)__c & __a));
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_sel(vector bool long long __a, vector bool long long __b,
-        vector unsigned long long __c) {
-  return (((vector bool long long)__c & __b) |
-          (~(vector bool long long)__c & __a));
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_sel(vector bool long long __a, vector bool long long __b,
-        vector bool long long __c) {
-  return (__c & __b) | (~__c & __a);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sel(vector unsigned long long __a, vector unsigned long long __b,
-        vector unsigned long long __c) {
-  return (__c & __b) | (~__c & __a);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sel(vector unsigned long long __a, vector unsigned long long __b,
-        vector bool long long __c) {
-  return (((vector unsigned long long)__c & __b) |
-          (~(vector unsigned long long)__c & __a));
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_sel(vector float __a, vector float __b, vector unsigned int __c) {
-  return (vector float)((__c & (vector unsigned int)__b) |
-                        (~__c & (vector unsigned int)__a));
-}
-
-static inline __ATTRS_o_ai vector float
-vec_sel(vector float __a, vector float __b, vector bool int __c) {
-  vector unsigned int __ac = (vector unsigned int)__a;
-  vector unsigned int __bc = (vector unsigned int)__b;
-  vector unsigned int __cc = (vector unsigned int)__c;
-  return (vector float)((__cc & __bc) | (~__cc & __ac));
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_sel(vector double __a, vector double __b, vector unsigned long long __c) {
-  return (vector double)((__c & (vector unsigned long long)__b) |
-                         (~__c & (vector unsigned long long)__a));
-}
-
-static inline __ATTRS_o_ai vector double
-vec_sel(vector double __a, vector double __b, vector bool long long __c) {
-  vector unsigned long long __ac = (vector unsigned long long)__a;
-  vector unsigned long long __bc = (vector unsigned long long)__b;
-  vector unsigned long long __cc = (vector unsigned long long)__c;
-  return (vector double)((__cc & __bc) | (~__cc & __ac));
-}
-
-/*-- vec_gather_element -----------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed int
-vec_gather_element(vector signed int __vec, vector unsigned int __offset,
-                   const signed int *__ptr, int __index)
-  __constant_range(__index, 0, 3) {
-  __vec[__index] = *(const signed int *)(
-    (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_gather_element(vector bool int __vec, vector unsigned int __offset,
-                   const unsigned int *__ptr, int __index)
-  __constant_range(__index, 0, 3) {
-  __vec[__index] = *(const unsigned int *)(
-    (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_gather_element(vector unsigned int __vec, vector unsigned int __offset,
-                   const unsigned int *__ptr, int __index)
-  __constant_range(__index, 0, 3) {
-  __vec[__index] = *(const unsigned int *)(
-    (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_gather_element(vector signed long long __vec,
-                   vector unsigned long long __offset,
-                   const signed long long *__ptr, int __index)
-  __constant_range(__index, 0, 1) {
-  __vec[__index] = *(const signed long long *)(
-    (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_gather_element(vector bool long long __vec,
-                   vector unsigned long long __offset,
-                   const unsigned long long *__ptr, int __index)
-  __constant_range(__index, 0, 1) {
-  __vec[__index] = *(const unsigned long long *)(
-    (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_gather_element(vector unsigned long long __vec,
-                   vector unsigned long long __offset,
-                   const unsigned long long *__ptr, int __index)
-  __constant_range(__index, 0, 1) {
-  __vec[__index] = *(const unsigned long long *)(
-    (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
-  return __vec;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_gather_element(vector float __vec, vector unsigned int __offset,
-                   const float *__ptr, int __index)
-  __constant_range(__index, 0, 3) {
-  __vec[__index] = *(const float *)(
-    (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
-  return __vec;
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_gather_element(vector double __vec, vector unsigned long long __offset,
-                   const double *__ptr, int __index)
-  __constant_range(__index, 0, 1) {
-  __vec[__index] = *(const double *)(
-    (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
-  return __vec;
-}
-
-/*-- vec_scatter_element ----------------------------------------------------*/
-
-static inline __ATTRS_o_ai void
-vec_scatter_element(vector signed int __vec, vector unsigned int __offset,
-                    signed int *__ptr, int __index)
-  __constant_range(__index, 0, 3) {
-  *(signed int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
-    __vec[__index];
-}
-
-static inline __ATTRS_o_ai void
-vec_scatter_element(vector bool int __vec, vector unsigned int __offset,
-                    unsigned int *__ptr, int __index)
-  __constant_range(__index, 0, 3) {
-  *(unsigned int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
-    __vec[__index];
-}
-
-static inline __ATTRS_o_ai void
-vec_scatter_element(vector unsigned int __vec, vector unsigned int __offset,
-                    unsigned int *__ptr, int __index)
-  __constant_range(__index, 0, 3) {
-  *(unsigned int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
-    __vec[__index];
-}
-
-static inline __ATTRS_o_ai void
-vec_scatter_element(vector signed long long __vec,
-                    vector unsigned long long __offset,
-                    signed long long *__ptr, int __index)
-  __constant_range(__index, 0, 1) {
-  *(signed long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
-    __vec[__index];
-}
-
-static inline __ATTRS_o_ai void
-vec_scatter_element(vector bool long long __vec,
-                    vector unsigned long long __offset,
-                    unsigned long long *__ptr, int __index)
-  __constant_range(__index, 0, 1) {
-  *(unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
-    __vec[__index];
-}
-
-static inline __ATTRS_o_ai void
-vec_scatter_element(vector unsigned long long __vec,
-                    vector unsigned long long __offset,
-                    unsigned long long *__ptr, int __index)
-  __constant_range(__index, 0, 1) {
-  *(unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
-    __vec[__index];
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai void
-vec_scatter_element(vector float __vec, vector unsigned int __offset,
-                    float *__ptr, int __index)
-  __constant_range(__index, 0, 3) {
-  *(float *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
-    __vec[__index];
-}
-#endif
-
-static inline __ATTRS_o_ai void
-vec_scatter_element(vector double __vec, vector unsigned long long __offset,
-                    double *__ptr, int __index)
-  __constant_range(__index, 0, 1) {
-  *(double *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
-    __vec[__index];
-}
-
-/*-- vec_xl -----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_xl(long __offset, const signed char *__ptr) {
-  return *(const vector signed char *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_xl(long __offset, const unsigned char *__ptr) {
-  return *(const vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_xl(long __offset, const signed short *__ptr) {
-  return *(const vector signed short *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_xl(long __offset, const unsigned short *__ptr) {
-  return *(const vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_xl(long __offset, const signed int *__ptr) {
-  return *(const vector signed int *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_xl(long __offset, const unsigned int *__ptr) {
-  return *(const vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_xl(long __offset, const signed long long *__ptr) {
-  return *(const vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_xl(long __offset, const unsigned long long *__ptr) {
-  return *(const vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_xl(long __offset, const float *__ptr) {
-  return *(const vector float *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_xl(long __offset, const double *__ptr) {
-  return *(const vector double *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-/*-- vec_xld2 ---------------------------------------------------------------*/
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_xld2(long __offset, const signed char *__ptr) {
-  return *(const vector signed char *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_xld2(long __offset, const unsigned char *__ptr) {
-  return *(const vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_xld2(long __offset, const signed short *__ptr) {
-  return *(const vector signed short *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_xld2(long __offset, const unsigned short *__ptr) {
-  return *(const vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_xld2(long __offset, const signed int *__ptr) {
-  return *(const vector signed int *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_xld2(long __offset, const unsigned int *__ptr) {
-  return *(const vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_xld2(long __offset, const signed long long *__ptr) {
-  return *(const vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_xld2(long __offset, const unsigned long long *__ptr) {
-  return *(const vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_xld2(long __offset, const double *__ptr) {
-  return *(const vector double *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-/*-- vec_xlw4 ---------------------------------------------------------------*/
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_xlw4(long __offset, const signed char *__ptr) {
-  return *(const vector signed char *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_xlw4(long __offset, const unsigned char *__ptr) {
-  return *(const vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_xlw4(long __offset, const signed short *__ptr) {
-  return *(const vector signed short *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_xlw4(long __offset, const unsigned short *__ptr) {
-  return *(const vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_xlw4(long __offset, const signed int *__ptr) {
-  return *(const vector signed int *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_xlw4(long __offset, const unsigned int *__ptr) {
-  return *(const vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-/*-- vec_xst ----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai void
-vec_xst(vector signed char __vec, long __offset, signed char *__ptr) {
-  *(vector signed char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void
-vec_xst(vector unsigned char __vec, long __offset, unsigned char *__ptr) {
-  *(vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void
-vec_xst(vector signed short __vec, long __offset, signed short *__ptr) {
-  *(vector signed short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void
-vec_xst(vector unsigned short __vec, long __offset, unsigned short *__ptr) {
-  *(vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void
-vec_xst(vector signed int __vec, long __offset, signed int *__ptr) {
-  *(vector signed int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void
-vec_xst(vector unsigned int __vec, long __offset, unsigned int *__ptr) {
-  *(vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void
-vec_xst(vector signed long long __vec, long __offset,
-          signed long long *__ptr) {
-  *(vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void
-vec_xst(vector unsigned long long __vec, long __offset,
-          unsigned long long *__ptr) {
-  *(vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset) =
-    __vec;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai void
-vec_xst(vector float __vec, long __offset, float *__ptr) {
-  *(vector float *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-#endif
-
-static inline __ATTRS_o_ai void
-vec_xst(vector double __vec, long __offset, double *__ptr) {
-  *(vector double *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-/*-- vec_xstd2 --------------------------------------------------------------*/
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstd2(vector signed char __vec, long __offset, signed char *__ptr) {
-  *(vector signed char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstd2(vector unsigned char __vec, long __offset, unsigned char *__ptr) {
-  *(vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstd2(vector signed short __vec, long __offset, signed short *__ptr) {
-  *(vector signed short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstd2(vector unsigned short __vec, long __offset, unsigned short *__ptr) {
-  *(vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstd2(vector signed int __vec, long __offset, signed int *__ptr) {
-  *(vector signed int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstd2(vector unsigned int __vec, long __offset, unsigned int *__ptr) {
-  *(vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstd2(vector signed long long __vec, long __offset,
-          signed long long *__ptr) {
-  *(vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstd2(vector unsigned long long __vec, long __offset,
-          unsigned long long *__ptr) {
-  *(vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset) =
-    __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstd2(vector double __vec, long __offset, double *__ptr) {
-  *(vector double *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-/*-- vec_xstw4 --------------------------------------------------------------*/
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstw4(vector signed char __vec, long __offset, signed char *__ptr) {
-  *(vector signed char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstw4(vector unsigned char __vec, long __offset, unsigned char *__ptr) {
-  *(vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstw4(vector signed short __vec, long __offset, signed short *__ptr) {
-  *(vector signed short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstw4(vector unsigned short __vec, long __offset, unsigned short *__ptr) {
-  *(vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstw4(vector signed int __vec, long __offset, signed int *__ptr) {
-  *(vector signed int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstw4(vector unsigned int __vec, long __offset, unsigned int *__ptr) {
-  *(vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-/*-- vec_load_bndry ---------------------------------------------------------*/
-
-extern __ATTRS_o vector signed char
-vec_load_bndry(const signed char *__ptr, unsigned short __len)
-  __constant_pow2_range(__len, 64, 4096);
-
-extern __ATTRS_o vector unsigned char
-vec_load_bndry(const unsigned char *__ptr, unsigned short __len)
-  __constant_pow2_range(__len, 64, 4096);
-
-extern __ATTRS_o vector signed short
-vec_load_bndry(const signed short *__ptr, unsigned short __len)
-  __constant_pow2_range(__len, 64, 4096);
-
-extern __ATTRS_o vector unsigned short
-vec_load_bndry(const unsigned short *__ptr, unsigned short __len)
-  __constant_pow2_range(__len, 64, 4096);
-
-extern __ATTRS_o vector signed int
-vec_load_bndry(const signed int *__ptr, unsigned short __len)
-  __constant_pow2_range(__len, 64, 4096);
-
-extern __ATTRS_o vector unsigned int
-vec_load_bndry(const unsigned int *__ptr, unsigned short __len)
-  __constant_pow2_range(__len, 64, 4096);
-
-extern __ATTRS_o vector signed long long
-vec_load_bndry(const signed long long *__ptr, unsigned short __len)
-  __constant_pow2_range(__len, 64, 4096);
-
-extern __ATTRS_o vector unsigned long long
-vec_load_bndry(const unsigned long long *__ptr, unsigned short __len)
-  __constant_pow2_range(__len, 64, 4096);
-
-#if __ARCH__ >= 12
-extern __ATTRS_o vector float
-vec_load_bndry(const float *__ptr, unsigned short __len)
-  __constant_pow2_range(__len, 64, 4096);
-#endif
-
-extern __ATTRS_o vector double
-vec_load_bndry(const double *__ptr, unsigned short __len)
-  __constant_pow2_range(__len, 64, 4096);
-
-#define vec_load_bndry(X, Y) ((__typeof__((vec_load_bndry)((X), (Y)))) \
-  __builtin_s390_vlbb((X), ((Y) == 64 ? 0 : \
-                            (Y) == 128 ? 1 : \
-                            (Y) == 256 ? 2 : \
-                            (Y) == 512 ? 3 : \
-                            (Y) == 1024 ? 4 : \
-                            (Y) == 2048 ? 5 : \
-                            (Y) == 4096 ? 6 : -1)))
-
-/*-- vec_load_len -----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_load_len(const signed char *__ptr, unsigned int __len) {
-  return (vector signed char)__builtin_s390_vll(__len, __ptr);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_load_len(const unsigned char *__ptr, unsigned int __len) {
-  return (vector unsigned char)__builtin_s390_vll(__len, __ptr);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_load_len(const signed short *__ptr, unsigned int __len) {
-  return (vector signed short)__builtin_s390_vll(__len, __ptr);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_load_len(const unsigned short *__ptr, unsigned int __len) {
-  return (vector unsigned short)__builtin_s390_vll(__len, __ptr);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_load_len(const signed int *__ptr, unsigned int __len) {
-  return (vector signed int)__builtin_s390_vll(__len, __ptr);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_load_len(const unsigned int *__ptr, unsigned int __len) {
-  return (vector unsigned int)__builtin_s390_vll(__len, __ptr);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_load_len(const signed long long *__ptr, unsigned int __len) {
-  return (vector signed long long)__builtin_s390_vll(__len, __ptr);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_load_len(const unsigned long long *__ptr, unsigned int __len) {
-  return (vector unsigned long long)__builtin_s390_vll(__len, __ptr);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_load_len(const float *__ptr, unsigned int __len) {
-  return (vector float)__builtin_s390_vll(__len, __ptr);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_load_len(const double *__ptr, unsigned int __len) {
-  return (vector double)__builtin_s390_vll(__len, __ptr);
-}
-
-/*-- vec_load_len_r ---------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_ai vector unsigned char
-vec_load_len_r(const unsigned char *__ptr, unsigned int __len) {
-  return (vector unsigned char)__builtin_s390_vlrl(__len, __ptr);
-}
-#endif
-
-/*-- vec_store_len ----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai void
-vec_store_len(vector signed char __vec, signed char *__ptr,
-              unsigned int __len) {
-  __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
-}
-
-static inline __ATTRS_o_ai void
-vec_store_len(vector unsigned char __vec, unsigned char *__ptr,
-              unsigned int __len) {
-  __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
-}
-
-static inline __ATTRS_o_ai void
-vec_store_len(vector signed short __vec, signed short *__ptr,
-              unsigned int __len) {
-  __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
-}
-
-static inline __ATTRS_o_ai void
-vec_store_len(vector unsigned short __vec, unsigned short *__ptr,
-              unsigned int __len) {
-  __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
-}
-
-static inline __ATTRS_o_ai void
-vec_store_len(vector signed int __vec, signed int *__ptr,
-              unsigned int __len) {
-  __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
-}
-
-static inline __ATTRS_o_ai void
-vec_store_len(vector unsigned int __vec, unsigned int *__ptr,
-              unsigned int __len) {
-  __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
-}
-
-static inline __ATTRS_o_ai void
-vec_store_len(vector signed long long __vec, signed long long *__ptr,
-              unsigned int __len) {
-  __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
-}
-
-static inline __ATTRS_o_ai void
-vec_store_len(vector unsigned long long __vec, unsigned long long *__ptr,
-              unsigned int __len) {
-  __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai void
-vec_store_len(vector float __vec, float *__ptr,
-              unsigned int __len) {
-  __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
-}
-#endif
-
-static inline __ATTRS_o_ai void
-vec_store_len(vector double __vec, double *__ptr,
-              unsigned int __len) {
-  __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
-}
-
-/*-- vec_store_len_r --------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_ai void
-vec_store_len_r(vector unsigned char __vec, unsigned char *__ptr,
-                unsigned int __len) {
-  __builtin_s390_vstrl((vector signed char)__vec, __len, __ptr);
-}
-#endif
-
-/*-- vec_load_pair ----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed long long
-vec_load_pair(signed long long __a, signed long long __b) {
-  return (vector signed long long)(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_load_pair(unsigned long long __a, unsigned long long __b) {
-  return (vector unsigned long long)(__a, __b);
-}
-
-/*-- vec_genmask ------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_genmask(unsigned short __mask)
-  __constant(__mask) {
-  return (vector unsigned char)(
-    __mask & 0x8000 ? 0xff : 0,
-    __mask & 0x4000 ? 0xff : 0,
-    __mask & 0x2000 ? 0xff : 0,
-    __mask & 0x1000 ? 0xff : 0,
-    __mask & 0x0800 ? 0xff : 0,
-    __mask & 0x0400 ? 0xff : 0,
-    __mask & 0x0200 ? 0xff : 0,
-    __mask & 0x0100 ? 0xff : 0,
-    __mask & 0x0080 ? 0xff : 0,
-    __mask & 0x0040 ? 0xff : 0,
-    __mask & 0x0020 ? 0xff : 0,
-    __mask & 0x0010 ? 0xff : 0,
-    __mask & 0x0008 ? 0xff : 0,
-    __mask & 0x0004 ? 0xff : 0,
-    __mask & 0x0002 ? 0xff : 0,
-    __mask & 0x0001 ? 0xff : 0);
-}
-
-/*-- vec_genmasks_* ---------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_genmasks_8(unsigned char __first, unsigned char __last)
-  __constant(__first) __constant(__last) {
-  unsigned char __bit1 = __first & 7;
-  unsigned char __bit2 = __last & 7;
-  unsigned char __mask1 = (unsigned char)(1U << (7 - __bit1) << 1) - 1;
-  unsigned char __mask2 = (unsigned char)(1U << (7 - __bit2)) - 1;
-  unsigned char __value = (__bit1 <= __bit2 ?
-                           __mask1 & ~__mask2 :
-                           __mask1 | ~__mask2);
-  return (vector unsigned char)__value;
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_genmasks_16(unsigned char __first, unsigned char __last)
-  __constant(__first) __constant(__last) {
-  unsigned char __bit1 = __first & 15;
-  unsigned char __bit2 = __last & 15;
-  unsigned short __mask1 = (unsigned short)(1U << (15 - __bit1) << 1) - 1;
-  unsigned short __mask2 = (unsigned short)(1U << (15 - __bit2)) - 1;
-  unsigned short __value = (__bit1 <= __bit2 ?
-                            __mask1 & ~__mask2 :
-                            __mask1 | ~__mask2);
-  return (vector unsigned short)__value;
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_genmasks_32(unsigned char __first, unsigned char __last)
-  __constant(__first) __constant(__last) {
-  unsigned char __bit1 = __first & 31;
-  unsigned char __bit2 = __last & 31;
-  unsigned int __mask1 = (1U << (31 - __bit1) << 1) - 1;
-  unsigned int __mask2 = (1U << (31 - __bit2)) - 1;
-  unsigned int __value = (__bit1 <= __bit2 ?
-                          __mask1 & ~__mask2 :
-                          __mask1 | ~__mask2);
-  return (vector unsigned int)__value;
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_genmasks_64(unsigned char __first, unsigned char __last)
-  __constant(__first) __constant(__last) {
-  unsigned char __bit1 = __first & 63;
-  unsigned char __bit2 = __last & 63;
-  unsigned long long __mask1 = (1ULL << (63 - __bit1) << 1) - 1;
-  unsigned long long __mask2 = (1ULL << (63 - __bit2)) - 1;
-  unsigned long long __value = (__bit1 <= __bit2 ?
-                                __mask1 & ~__mask2 :
-                                __mask1 | ~__mask2);
-  return (vector unsigned long long)__value;
-}
-
-/*-- vec_splat --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_splat(vector signed char __vec, int __index)
-  __constant_range(__index, 0, 15) {
-  return (vector signed char)__vec[__index];
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_splat(vector bool char __vec, int __index)
-  __constant_range(__index, 0, 15) {
-  return (vector bool char)(vector unsigned char)__vec[__index];
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_splat(vector unsigned char __vec, int __index)
-  __constant_range(__index, 0, 15) {
-  return (vector unsigned char)__vec[__index];
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_splat(vector signed short __vec, int __index)
-  __constant_range(__index, 0, 7) {
-  return (vector signed short)__vec[__index];
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_splat(vector bool short __vec, int __index)
-  __constant_range(__index, 0, 7) {
-  return (vector bool short)(vector unsigned short)__vec[__index];
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_splat(vector unsigned short __vec, int __index)
-  __constant_range(__index, 0, 7) {
-  return (vector unsigned short)__vec[__index];
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_splat(vector signed int __vec, int __index)
-  __constant_range(__index, 0, 3) {
-  return (vector signed int)__vec[__index];
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_splat(vector bool int __vec, int __index)
-  __constant_range(__index, 0, 3) {
-  return (vector bool int)(vector unsigned int)__vec[__index];
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_splat(vector unsigned int __vec, int __index)
-  __constant_range(__index, 0, 3) {
-  return (vector unsigned int)__vec[__index];
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_splat(vector signed long long __vec, int __index)
-  __constant_range(__index, 0, 1) {
-  return (vector signed long long)__vec[__index];
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_splat(vector bool long long __vec, int __index)
-  __constant_range(__index, 0, 1) {
-  return (vector bool long long)(vector unsigned long long)__vec[__index];
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_splat(vector unsigned long long __vec, int __index)
-  __constant_range(__index, 0, 1) {
-  return (vector unsigned long long)__vec[__index];
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_splat(vector float __vec, int __index)
-  __constant_range(__index, 0, 3) {
-  return (vector float)__vec[__index];
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_splat(vector double __vec, int __index)
-  __constant_range(__index, 0, 1) {
-  return (vector double)__vec[__index];
-}
-
-/*-- vec_splat_s* -----------------------------------------------------------*/
-
-static inline __ATTRS_ai vector signed char
-vec_splat_s8(signed char __scalar)
-  __constant(__scalar) {
-  return (vector signed char)__scalar;
-}
-
-static inline __ATTRS_ai vector signed short
-vec_splat_s16(signed short __scalar)
-  __constant(__scalar) {
-  return (vector signed short)__scalar;
-}
-
-static inline __ATTRS_ai vector signed int
-vec_splat_s32(signed short __scalar)
-  __constant(__scalar) {
-  return (vector signed int)(signed int)__scalar;
-}
-
-static inline __ATTRS_ai vector signed long long
-vec_splat_s64(signed short __scalar)
-  __constant(__scalar) {
-  return (vector signed long long)(signed long)__scalar;
-}
-
-/*-- vec_splat_u* -----------------------------------------------------------*/
-
-static inline __ATTRS_ai vector unsigned char
-vec_splat_u8(unsigned char __scalar)
-  __constant(__scalar) {
-  return (vector unsigned char)__scalar;
-}
-
-static inline __ATTRS_ai vector unsigned short
-vec_splat_u16(unsigned short __scalar)
-  __constant(__scalar) {
-  return (vector unsigned short)__scalar;
-}
-
-static inline __ATTRS_ai vector unsigned int
-vec_splat_u32(signed short __scalar)
-  __constant(__scalar) {
-  return (vector unsigned int)(signed int)__scalar;
-}
-
-static inline __ATTRS_ai vector unsigned long long
-vec_splat_u64(signed short __scalar)
-  __constant(__scalar) {
-  return (vector unsigned long long)(signed long long)__scalar;
-}
-
-/*-- vec_splats -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_splats(signed char __scalar) {
-  return (vector signed char)__scalar;
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_splats(unsigned char __scalar) {
-  return (vector unsigned char)__scalar;
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_splats(signed short __scalar) {
-  return (vector signed short)__scalar;
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_splats(unsigned short __scalar) {
-  return (vector unsigned short)__scalar;
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_splats(signed int __scalar) {
-  return (vector signed int)__scalar;
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_splats(unsigned int __scalar) {
-  return (vector unsigned int)__scalar;
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_splats(signed long long __scalar) {
-  return (vector signed long long)__scalar;
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_splats(unsigned long long __scalar) {
-  return (vector unsigned long long)__scalar;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_splats(float __scalar) {
-  return (vector float)__scalar;
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_splats(double __scalar) {
-  return (vector double)__scalar;
-}
-
-/*-- vec_extend_s64 ---------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed long long
-vec_extend_s64(vector signed char __a) {
-  return (vector signed long long)(__a[7], __a[15]);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_extend_s64(vector signed short __a) {
-  return (vector signed long long)(__a[3], __a[7]);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_extend_s64(vector signed int __a) {
-  return (vector signed long long)(__a[1], __a[3]);
-}
-
-/*-- vec_mergeh -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_mergeh(vector signed char __a, vector signed char __b) {
-  return (vector signed char)(
-    __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
-    __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_mergeh(vector bool char __a, vector bool char __b) {
-  return (vector bool char)(
-    __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
-    __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_mergeh(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)(
-    __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
-    __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_mergeh(vector signed short __a, vector signed short __b) {
-  return (vector signed short)(
-    __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_mergeh(vector bool short __a, vector bool short __b) {
-  return (vector bool short)(
-    __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_mergeh(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)(
-    __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_mergeh(vector signed int __a, vector signed int __b) {
-  return (vector signed int)(__a[0], __b[0], __a[1], __b[1]);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_mergeh(vector bool int __a, vector bool int __b) {
-  return (vector bool int)(__a[0], __b[0], __a[1], __b[1]);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_mergeh(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)(__a[0], __b[0], __a[1], __b[1]);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_mergeh(vector signed long long __a, vector signed long long __b) {
-  return (vector signed long long)(__a[0], __b[0]);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_mergeh(vector bool long long __a, vector bool long long __b) {
-  return (vector bool long long)(__a[0], __b[0]);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_mergeh(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)(__a[0], __b[0]);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_mergeh(vector float __a, vector float __b) {
-  return (vector float)(__a[0], __b[0], __a[1], __b[1]);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_mergeh(vector double __a, vector double __b) {
-  return (vector double)(__a[0], __b[0]);
-}
-
-/*-- vec_mergel -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_mergel(vector signed char __a, vector signed char __b) {
-  return (vector signed char)(
-    __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
-    __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_mergel(vector bool char __a, vector bool char __b) {
-  return (vector bool char)(
-    __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
-    __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_mergel(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)(
-    __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
-    __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_mergel(vector signed short __a, vector signed short __b) {
-  return (vector signed short)(
-    __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_mergel(vector bool short __a, vector bool short __b) {
-  return (vector bool short)(
-    __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_mergel(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)(
-    __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_mergel(vector signed int __a, vector signed int __b) {
-  return (vector signed int)(__a[2], __b[2], __a[3], __b[3]);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_mergel(vector bool int __a, vector bool int __b) {
-  return (vector bool int)(__a[2], __b[2], __a[3], __b[3]);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_mergel(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)(__a[2], __b[2], __a[3], __b[3]);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_mergel(vector signed long long __a, vector signed long long __b) {
-  return (vector signed long long)(__a[1], __b[1]);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_mergel(vector bool long long __a, vector bool long long __b) {
-  return (vector bool long long)(__a[1], __b[1]);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_mergel(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)(__a[1], __b[1]);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_mergel(vector float __a, vector float __b) {
-  return (vector float)(__a[2], __b[2], __a[3], __b[3]);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_mergel(vector double __a, vector double __b) {
-  return (vector double)(__a[1], __b[1]);
-}
-
-/*-- vec_pack ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_pack(vector signed short __a, vector signed short __b) {
-  vector signed char __ac = (vector signed char)__a;
-  vector signed char __bc = (vector signed char)__b;
-  return (vector signed char)(
-    __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
-    __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_pack(vector bool short __a, vector bool short __b) {
-  vector bool char __ac = (vector bool char)__a;
-  vector bool char __bc = (vector bool char)__b;
-  return (vector bool char)(
-    __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
-    __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_pack(vector unsigned short __a, vector unsigned short __b) {
-  vector unsigned char __ac = (vector unsigned char)__a;
-  vector unsigned char __bc = (vector unsigned char)__b;
-  return (vector unsigned char)(
-    __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
-    __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_pack(vector signed int __a, vector signed int __b) {
-  vector signed short __ac = (vector signed short)__a;
-  vector signed short __bc = (vector signed short)__b;
-  return (vector signed short)(
-    __ac[1], __ac[3], __ac[5], __ac[7],
-    __bc[1], __bc[3], __bc[5], __bc[7]);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_pack(vector bool int __a, vector bool int __b) {
-  vector bool short __ac = (vector bool short)__a;
-  vector bool short __bc = (vector bool short)__b;
-  return (vector bool short)(
-    __ac[1], __ac[3], __ac[5], __ac[7],
-    __bc[1], __bc[3], __bc[5], __bc[7]);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_pack(vector unsigned int __a, vector unsigned int __b) {
-  vector unsigned short __ac = (vector unsigned short)__a;
-  vector unsigned short __bc = (vector unsigned short)__b;
-  return (vector unsigned short)(
-    __ac[1], __ac[3], __ac[5], __ac[7],
-    __bc[1], __bc[3], __bc[5], __bc[7]);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_pack(vector signed long long __a, vector signed long long __b) {
-  vector signed int __ac = (vector signed int)__a;
-  vector signed int __bc = (vector signed int)__b;
-  return (vector signed int)(__ac[1], __ac[3], __bc[1], __bc[3]);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_pack(vector bool long long __a, vector bool long long __b) {
-  vector bool int __ac = (vector bool int)__a;
-  vector bool int __bc = (vector bool int)__b;
-  return (vector bool int)(__ac[1], __ac[3], __bc[1], __bc[3]);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_pack(vector unsigned long long __a, vector unsigned long long __b) {
-  vector unsigned int __ac = (vector unsigned int)__a;
-  vector unsigned int __bc = (vector unsigned int)__b;
-  return (vector unsigned int)(__ac[1], __ac[3], __bc[1], __bc[3]);
-}
-
-/*-- vec_packs --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_packs(vector signed short __a, vector signed short __b) {
-  return __builtin_s390_vpksh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_packs(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vpklsh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_packs(vector signed int __a, vector signed int __b) {
-  return __builtin_s390_vpksf(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_packs(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vpklsf(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_packs(vector signed long long __a, vector signed long long __b) {
-  return __builtin_s390_vpksg(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_packs(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_s390_vpklsg(__a, __b);
-}
-
-/*-- vec_packs_cc -----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_packs_cc(vector signed short __a, vector signed short __b, int *__cc) {
-  return __builtin_s390_vpkshs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_packs_cc(vector unsigned short __a, vector unsigned short __b, int *__cc) {
-  return __builtin_s390_vpklshs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_packs_cc(vector signed int __a, vector signed int __b, int *__cc) {
-  return __builtin_s390_vpksfs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_packs_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
-  return __builtin_s390_vpklsfs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_packs_cc(vector signed long long __a, vector signed long long __b,
-             int *__cc) {
-  return __builtin_s390_vpksgs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_packs_cc(vector unsigned long long __a, vector unsigned long long __b,
-             int *__cc) {
-  return __builtin_s390_vpklsgs(__a, __b, __cc);
-}
-
-/*-- vec_packsu -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_packsu(vector signed short __a, vector signed short __b) {
-  const vector signed short __zero = (vector signed short)0;
-  return __builtin_s390_vpklsh(
-    (vector unsigned short)(__a >= __zero) & (vector unsigned short)__a,
-    (vector unsigned short)(__b >= __zero) & (vector unsigned short)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_packsu(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vpklsh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_packsu(vector signed int __a, vector signed int __b) {
-  const vector signed int __zero = (vector signed int)0;
-  return __builtin_s390_vpklsf(
-    (vector unsigned int)(__a >= __zero) & (vector unsigned int)__a,
-    (vector unsigned int)(__b >= __zero) & (vector unsigned int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_packsu(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vpklsf(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_packsu(vector signed long long __a, vector signed long long __b) {
-  const vector signed long long __zero = (vector signed long long)0;
-  return __builtin_s390_vpklsg(
-    (vector unsigned long long)(__a >= __zero) &
-    (vector unsigned long long)__a,
-    (vector unsigned long long)(__b >= __zero) &
-    (vector unsigned long long)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_packsu(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_s390_vpklsg(__a, __b);
-}
-
-/*-- vec_packsu_cc ----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_packsu_cc(vector unsigned short __a, vector unsigned short __b, int *__cc) {
-  return __builtin_s390_vpklshs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_packsu_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
-  return __builtin_s390_vpklsfs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_packsu_cc(vector unsigned long long __a, vector unsigned long long __b,
-              int *__cc) {
-  return __builtin_s390_vpklsgs(__a, __b, __cc);
-}
-
-/*-- vec_unpackh ------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed short
-vec_unpackh(vector signed char __a) {
-  return __builtin_s390_vuphb(__a);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_unpackh(vector bool char __a) {
-  return (vector bool short)__builtin_s390_vuphb((vector signed char)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_unpackh(vector unsigned char __a) {
-  return __builtin_s390_vuplhb(__a);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_unpackh(vector signed short __a) {
-  return __builtin_s390_vuphh(__a);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_unpackh(vector bool short __a) {
-  return (vector bool int)__builtin_s390_vuphh((vector signed short)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_unpackh(vector unsigned short __a) {
-  return __builtin_s390_vuplhh(__a);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_unpackh(vector signed int __a) {
-  return __builtin_s390_vuphf(__a);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_unpackh(vector bool int __a) {
-  return (vector bool long long)__builtin_s390_vuphf((vector signed int)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_unpackh(vector unsigned int __a) {
-  return __builtin_s390_vuplhf(__a);
-}
-
-/*-- vec_unpackl ------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed short
-vec_unpackl(vector signed char __a) {
-  return __builtin_s390_vuplb(__a);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_unpackl(vector bool char __a) {
-  return (vector bool short)__builtin_s390_vuplb((vector signed char)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_unpackl(vector unsigned char __a) {
-  return __builtin_s390_vupllb(__a);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_unpackl(vector signed short __a) {
-  return __builtin_s390_vuplhw(__a);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_unpackl(vector bool short __a) {
-  return (vector bool int)__builtin_s390_vuplhw((vector signed short)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_unpackl(vector unsigned short __a) {
-  return __builtin_s390_vupllh(__a);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_unpackl(vector signed int __a) {
-  return __builtin_s390_vuplf(__a);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_unpackl(vector bool int __a) {
-  return (vector bool long long)__builtin_s390_vuplf((vector signed int)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_unpackl(vector unsigned int __a) {
-  return __builtin_s390_vupllf(__a);
-}
-
-/*-- vec_cmpeq --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmpeq(vector bool char __a, vector bool char __b) {
-  return (vector bool char)(__a == __b);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmpeq(vector signed char __a, vector signed char __b) {
-  return (vector bool char)(__a == __b);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmpeq(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)(__a == __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmpeq(vector bool short __a, vector bool short __b) {
-  return (vector bool short)(__a == __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmpeq(vector signed short __a, vector signed short __b) {
-  return (vector bool short)(__a == __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmpeq(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)(__a == __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmpeq(vector bool int __a, vector bool int __b) {
-  return (vector bool int)(__a == __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmpeq(vector signed int __a, vector signed int __b) {
-  return (vector bool int)(__a == __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmpeq(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)(__a == __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpeq(vector bool long long __a, vector bool long long __b) {
-  return (vector bool long long)(__a == __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpeq(vector signed long long __a, vector signed long long __b) {
-  return (vector bool long long)(__a == __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector bool long long)(__a == __b);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool int
-vec_cmpeq(vector float __a, vector float __b) {
-  return (vector bool int)(__a == __b);
-}
-#endif
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpeq(vector double __a, vector double __b) {
-  return (vector bool long long)(__a == __b);
-}
-
-/*-- vec_cmpge --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmpge(vector signed char __a, vector signed char __b) {
-  return (vector bool char)(__a >= __b);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmpge(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)(__a >= __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmpge(vector signed short __a, vector signed short __b) {
-  return (vector bool short)(__a >= __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmpge(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)(__a >= __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmpge(vector signed int __a, vector signed int __b) {
-  return (vector bool int)(__a >= __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmpge(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)(__a >= __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpge(vector signed long long __a, vector signed long long __b) {
-  return (vector bool long long)(__a >= __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector bool long long)(__a >= __b);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool int
-vec_cmpge(vector float __a, vector float __b) {
-  return (vector bool int)(__a >= __b);
-}
-#endif
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpge(vector double __a, vector double __b) {
-  return (vector bool long long)(__a >= __b);
-}
-
-/*-- vec_cmpgt --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmpgt(vector signed char __a, vector signed char __b) {
-  return (vector bool char)(__a > __b);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmpgt(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)(__a > __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmpgt(vector signed short __a, vector signed short __b) {
-  return (vector bool short)(__a > __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmpgt(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)(__a > __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmpgt(vector signed int __a, vector signed int __b) {
-  return (vector bool int)(__a > __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmpgt(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)(__a > __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpgt(vector signed long long __a, vector signed long long __b) {
-  return (vector bool long long)(__a > __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector bool long long)(__a > __b);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool int
-vec_cmpgt(vector float __a, vector float __b) {
-  return (vector bool int)(__a > __b);
-}
-#endif
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpgt(vector double __a, vector double __b) {
-  return (vector bool long long)(__a > __b);
-}
-
-/*-- vec_cmple --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmple(vector signed char __a, vector signed char __b) {
-  return (vector bool char)(__a <= __b);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmple(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)(__a <= __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmple(vector signed short __a, vector signed short __b) {
-  return (vector bool short)(__a <= __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmple(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)(__a <= __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmple(vector signed int __a, vector signed int __b) {
-  return (vector bool int)(__a <= __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmple(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)(__a <= __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmple(vector signed long long __a, vector signed long long __b) {
-  return (vector bool long long)(__a <= __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmple(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector bool long long)(__a <= __b);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool int
-vec_cmple(vector float __a, vector float __b) {
-  return (vector bool int)(__a <= __b);
-}
-#endif
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmple(vector double __a, vector double __b) {
-  return (vector bool long long)(__a <= __b);
-}
-
-/*-- vec_cmplt --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmplt(vector signed char __a, vector signed char __b) {
-  return (vector bool char)(__a < __b);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmplt(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)(__a < __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmplt(vector signed short __a, vector signed short __b) {
-  return (vector bool short)(__a < __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmplt(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)(__a < __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmplt(vector signed int __a, vector signed int __b) {
-  return (vector bool int)(__a < __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmplt(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)(__a < __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmplt(vector signed long long __a, vector signed long long __b) {
-  return (vector bool long long)(__a < __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector bool long long)(__a < __b);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool int
-vec_cmplt(vector float __a, vector float __b) {
-  return (vector bool int)(__a < __b);
-}
-#endif
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmplt(vector double __a, vector double __b) {
-  return (vector bool long long)(__a < __b);
-}
-
-/*-- vec_all_eq -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector signed char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vceqbs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector signed char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector bool char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector bool char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector bool char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector signed short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vceqhs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector signed short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector bool short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector bool short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector bool short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector signed int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vceqfs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector signed int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector bool int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector bool int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector bool int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector signed long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector signed long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector bool long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector bool long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector bool long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc == 0;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_all_eq(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfcesbs(__a, __b, &__cc);
-  return __cc == 0;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfcedbs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-/*-- vec_all_ne -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector signed char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vceqbs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector signed char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector bool char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector bool char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector bool char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector signed short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vceqhs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector signed short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector bool short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector bool short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector bool short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector signed int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vceqfs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector signed int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector bool int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector bool int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector bool int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector signed long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector signed long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector bool long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector bool long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector bool long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc == 3;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_all_ne(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfcesbs(__a, __b, &__cc);
-  return __cc == 3;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfcedbs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-/*-- vec_all_ge -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_all_ge(vector signed char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector signed char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector bool char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector bool char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector bool char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__b,
-                        (vector unsigned char)__a, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ge(vector signed short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector signed short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector bool short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector bool short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector bool short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__b,
-                        (vector unsigned short)__a, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ge(vector signed int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector signed int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector bool int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector bool int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector bool int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__b,
-                        (vector unsigned int)__a, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ge(vector signed long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector signed long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector bool long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector bool long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector bool long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__b,
-                        (vector unsigned long long)__a, &__cc);
-  return __cc == 3;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_all_ge(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchesbs(__a, __b, &__cc);
-  return __cc == 0;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_all_ge(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchedbs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-/*-- vec_all_gt -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_all_gt(vector signed char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector signed char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector bool char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector bool char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector bool char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__a,
-                        (vector unsigned char)__b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_gt(vector signed short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector signed short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector bool short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector bool short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector bool short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__a,
-                        (vector unsigned short)__b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_gt(vector signed int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector signed int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector bool int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector bool int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector bool int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__a,
-                        (vector unsigned int)__b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_gt(vector signed long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector signed long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector bool long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector bool long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector bool long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__a,
-                        (vector unsigned long long)__b, &__cc);
-  return __cc == 0;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_all_gt(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchsbs(__a, __b, &__cc);
-  return __cc == 0;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_all_gt(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchdbs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-/*-- vec_all_le -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_all_le(vector signed char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector signed char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector bool char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector bool char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector bool char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__a,
-                        (vector unsigned char)__b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_le(vector signed short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector signed short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector bool short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector bool short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector bool short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__a,
-                        (vector unsigned short)__b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_le(vector signed int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector signed int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector bool int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector bool int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector bool int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__a,
-                        (vector unsigned int)__b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_le(vector signed long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector signed long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector bool long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector bool long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector bool long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__a,
-                        (vector unsigned long long)__b, &__cc);
-  return __cc == 3;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_all_le(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchesbs(__b, __a, &__cc);
-  return __cc == 0;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_all_le(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchedbs(__b, __a, &__cc);
-  return __cc == 0;
-}
-
-/*-- vec_all_lt -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_all_lt(vector signed char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector signed char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector bool char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector bool char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector bool char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__b,
-                        (vector unsigned char)__a, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_lt(vector signed short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector signed short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector bool short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector bool short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector bool short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__b,
-                        (vector unsigned short)__a, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_lt(vector signed int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector signed int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector bool int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector bool int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector bool int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__b,
-                        (vector unsigned int)__a, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_lt(vector signed long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector signed long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector bool long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector bool long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector bool long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__b,
-                        (vector unsigned long long)__a, &__cc);
-  return __cc == 0;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_all_lt(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchsbs(__b, __a, &__cc);
-  return __cc == 0;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_all_lt(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchdbs(__b, __a, &__cc);
-  return __cc == 0;
-}
-
-/*-- vec_all_nge ------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_all_nge(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchesbs(__a, __b, &__cc);
-  return __cc == 3;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_all_nge(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchedbs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-/*-- vec_all_ngt ------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_all_ngt(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchsbs(__a, __b, &__cc);
-  return __cc == 3;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_all_ngt(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchdbs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-/*-- vec_all_nle ------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_all_nle(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchesbs(__b, __a, &__cc);
-  return __cc == 3;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_all_nle(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchedbs(__b, __a, &__cc);
-  return __cc == 3;
-}
-
-/*-- vec_all_nlt ------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_all_nlt(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchsbs(__b, __a, &__cc);
-  return __cc == 3;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_all_nlt(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchdbs(__b, __a, &__cc);
-  return __cc == 3;
-}
-
-/*-- vec_all_nan ------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_all_nan(vector float __a) {
-  int __cc;
-  __builtin_s390_vftcisb(__a, 15, &__cc);
-  return __cc == 0;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_all_nan(vector double __a) {
-  int __cc;
-  __builtin_s390_vftcidb(__a, 15, &__cc);
-  return __cc == 0;
-}
-
-/*-- vec_all_numeric --------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_all_numeric(vector float __a) {
-  int __cc;
-  __builtin_s390_vftcisb(__a, 15, &__cc);
-  return __cc == 3;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_all_numeric(vector double __a) {
-  int __cc;
-  __builtin_s390_vftcidb(__a, 15, &__cc);
-  return __cc == 3;
-}
-
-/*-- vec_any_eq -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector signed char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vceqbs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector signed char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector bool char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector bool char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector bool char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector signed short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vceqhs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector signed short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector bool short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector bool short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector bool short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector signed int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vceqfs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector signed int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector bool int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector bool int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector bool int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector signed long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector signed long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector bool long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector bool long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector bool long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc <= 1;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_any_eq(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfcesbs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfcedbs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-/*-- vec_any_ne -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector signed char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vceqbs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector signed char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector bool char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector bool char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector bool char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector signed short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vceqhs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector signed short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector bool short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector bool short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector bool short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector signed int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vceqfs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector signed int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector bool int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector bool int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector bool int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector signed long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector signed long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector bool long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector bool long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector bool long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc != 0;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_any_ne(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfcesbs(__a, __b, &__cc);
-  return __cc != 0;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfcedbs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-/*-- vec_any_ge -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_any_ge(vector signed char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector signed char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector bool char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector bool char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector bool char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__b,
-                        (vector unsigned char)__a, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ge(vector signed short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector signed short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector bool short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector bool short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector bool short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__b,
-                        (vector unsigned short)__a, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ge(vector signed int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector signed int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector bool int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector bool int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector bool int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__b,
-                        (vector unsigned int)__a, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ge(vector signed long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector signed long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector bool long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector bool long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector bool long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__b,
-                        (vector unsigned long long)__a, &__cc);
-  return __cc != 0;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_any_ge(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchesbs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_any_ge(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchedbs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-/*-- vec_any_gt -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_any_gt(vector signed char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector signed char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector bool char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector bool char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector bool char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__a,
-                        (vector unsigned char)__b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_gt(vector signed short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector signed short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector bool short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector bool short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector bool short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__a,
-                        (vector unsigned short)__b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_gt(vector signed int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector signed int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector bool int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector bool int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector bool int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__a,
-                        (vector unsigned int)__b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_gt(vector signed long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector signed long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector bool long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector bool long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector bool long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__a,
-                        (vector unsigned long long)__b, &__cc);
-  return __cc <= 1;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_any_gt(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchsbs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_any_gt(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchdbs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-/*-- vec_any_le -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_any_le(vector signed char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector signed char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector bool char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector bool char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector bool char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__a,
-                        (vector unsigned char)__b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_le(vector signed short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector signed short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector bool short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector bool short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector bool short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__a,
-                        (vector unsigned short)__b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_le(vector signed int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector signed int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector bool int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector bool int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector bool int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__a,
-                        (vector unsigned int)__b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_le(vector signed long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector signed long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector bool long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector bool long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector bool long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__a,
-                        (vector unsigned long long)__b, &__cc);
-  return __cc != 0;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_any_le(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchesbs(__b, __a, &__cc);
-  return __cc <= 1;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_any_le(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchedbs(__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-/*-- vec_any_lt -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_any_lt(vector signed char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector signed char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector bool char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector bool char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector bool char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__b,
-                        (vector unsigned char)__a, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_lt(vector signed short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector signed short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector bool short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector bool short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector bool short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__b,
-                        (vector unsigned short)__a, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_lt(vector signed int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector signed int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector bool int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector bool int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector bool int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__b,
-                        (vector unsigned int)__a, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_lt(vector signed long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector signed long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector bool long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector bool long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector bool long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__b,
-                        (vector unsigned long long)__a, &__cc);
-  return __cc <= 1;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_any_lt(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchsbs(__b, __a, &__cc);
-  return __cc <= 1;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_any_lt(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchdbs(__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-/*-- vec_any_nge ------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_any_nge(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchesbs(__a, __b, &__cc);
-  return __cc != 0;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_any_nge(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchedbs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-/*-- vec_any_ngt ------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_any_ngt(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchsbs(__a, __b, &__cc);
-  return __cc != 0;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_any_ngt(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchdbs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-/*-- vec_any_nle ------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_any_nle(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchesbs(__b, __a, &__cc);
-  return __cc != 0;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_any_nle(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchedbs(__b, __a, &__cc);
-  return __cc != 0;
-}
-
-/*-- vec_any_nlt ------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_any_nlt(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchsbs(__b, __a, &__cc);
-  return __cc != 0;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_any_nlt(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchdbs(__b, __a, &__cc);
-  return __cc != 0;
-}
-
-/*-- vec_any_nan ------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_any_nan(vector float __a) {
-  int __cc;
-  __builtin_s390_vftcisb(__a, 15, &__cc);
-  return __cc != 3;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_any_nan(vector double __a) {
-  int __cc;
-  __builtin_s390_vftcidb(__a, 15, &__cc);
-  return __cc != 3;
-}
-
-/*-- vec_any_numeric --------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_any_numeric(vector float __a) {
-  int __cc;
-  __builtin_s390_vftcisb(__a, 15, &__cc);
-  return __cc != 0;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_any_numeric(vector double __a) {
-  int __cc;
-  __builtin_s390_vftcidb(__a, 15, &__cc);
-  return __cc != 0;
-}
-
-/*-- vec_andc ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_andc(vector bool char __a, vector bool char __b) {
-  return __a & ~__b;
-}
-
-static inline __ATTRS_o_ai vector signed char
-vec_andc(vector signed char __a, vector signed char __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_andc(vector bool char __a, vector signed char __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_andc(vector signed char __a, vector bool char __b) {
-  return __a & ~__b;
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_andc(vector unsigned char __a, vector unsigned char __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_andc(vector bool char __a, vector unsigned char __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_andc(vector unsigned char __a, vector bool char __b) {
-  return __a & ~__b;
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_andc(vector bool short __a, vector bool short __b) {
-  return __a & ~__b;
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_andc(vector signed short __a, vector signed short __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_andc(vector bool short __a, vector signed short __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_andc(vector signed short __a, vector bool short __b) {
-  return __a & ~__b;
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_andc(vector unsigned short __a, vector unsigned short __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_andc(vector bool short __a, vector unsigned short __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_andc(vector unsigned short __a, vector bool short __b) {
-  return __a & ~__b;
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_andc(vector bool int __a, vector bool int __b) {
-  return __a & ~__b;
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_andc(vector signed int __a, vector signed int __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_andc(vector bool int __a, vector signed int __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_andc(vector signed int __a, vector bool int __b) {
-  return __a & ~__b;
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_andc(vector unsigned int __a, vector unsigned int __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_andc(vector bool int __a, vector unsigned int __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_andc(vector unsigned int __a, vector bool int __b) {
-  return __a & ~__b;
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_andc(vector bool long long __a, vector bool long long __b) {
-  return __a & ~__b;
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_andc(vector signed long long __a, vector signed long long __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_andc(vector bool long long __a, vector signed long long __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_andc(vector signed long long __a, vector bool long long __b) {
-  return __a & ~__b;
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_andc(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_andc(vector bool long long __a, vector unsigned long long __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_andc(vector unsigned long long __a, vector bool long long __b) {
-  return __a & ~__b;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_andc(vector float __a, vector float __b) {
-  return (vector float)((vector unsigned int)__a &
-                         ~(vector unsigned int)__b);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_andc(vector double __a, vector double __b) {
-  return (vector double)((vector unsigned long long)__a &
-                         ~(vector unsigned long long)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_andc(vector bool long long __a, vector double __b) {
-  return (vector double)((vector unsigned long long)__a &
-                         ~(vector unsigned long long)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_andc(vector double __a, vector bool long long __b) {
-  return (vector double)((vector unsigned long long)__a &
-                         ~(vector unsigned long long)__b);
-}
-
-/*-- vec_nor ----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_nor(vector bool char __a, vector bool char __b) {
-  return ~(__a | __b);
-}
-
-static inline __ATTRS_o_ai vector signed char
-vec_nor(vector signed char __a, vector signed char __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_nor(vector bool char __a, vector signed char __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_nor(vector signed char __a, vector bool char __b) {
-  return ~(__a | __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_nor(vector unsigned char __a, vector unsigned char __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_nor(vector bool char __a, vector unsigned char __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_nor(vector unsigned char __a, vector bool char __b) {
-  return ~(__a | __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_nor(vector bool short __a, vector bool short __b) {
-  return ~(__a | __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_nor(vector signed short __a, vector signed short __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_nor(vector bool short __a, vector signed short __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_nor(vector signed short __a, vector bool short __b) {
-  return ~(__a | __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_nor(vector unsigned short __a, vector unsigned short __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_nor(vector bool short __a, vector unsigned short __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_nor(vector unsigned short __a, vector bool short __b) {
-  return ~(__a | __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_nor(vector bool int __a, vector bool int __b) {
-  return ~(__a | __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_nor(vector signed int __a, vector signed int __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_nor(vector bool int __a, vector signed int __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_nor(vector signed int __a, vector bool int __b) {
-  return ~(__a | __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_nor(vector unsigned int __a, vector unsigned int __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_nor(vector bool int __a, vector unsigned int __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_nor(vector unsigned int __a, vector bool int __b) {
-  return ~(__a | __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_nor(vector bool long long __a, vector bool long long __b) {
-  return ~(__a | __b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_nor(vector signed long long __a, vector signed long long __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_nor(vector bool long long __a, vector signed long long __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_nor(vector signed long long __a, vector bool long long __b) {
-  return ~(__a | __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_nor(vector unsigned long long __a, vector unsigned long long __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_nor(vector bool long long __a, vector unsigned long long __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_nor(vector unsigned long long __a, vector bool long long __b) {
-  return ~(__a | __b);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_nor(vector float __a, vector float __b) {
-  return (vector float)~((vector unsigned int)__a |
-                         (vector unsigned int)__b);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_nor(vector double __a, vector double __b) {
-  return (vector double)~((vector unsigned long long)__a |
-                          (vector unsigned long long)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_nor(vector bool long long __a, vector double __b) {
-  return (vector double)~((vector unsigned long long)__a |
-                          (vector unsigned long long)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_nor(vector double __a, vector bool long long __b) {
-  return (vector double)~((vector unsigned long long)__a |
-                          (vector unsigned long long)__b);
-}
-
-/*-- vec_orc ----------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool char
-vec_orc(vector bool char __a, vector bool char __b) {
-  return __a | ~__b;
-}
-
-static inline __ATTRS_o_ai vector signed char
-vec_orc(vector signed char __a, vector signed char __b) {
-  return __a | ~__b;
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_orc(vector unsigned char __a, vector unsigned char __b) {
-  return __a | ~__b;
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_orc(vector bool short __a, vector bool short __b) {
-  return __a | ~__b;
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_orc(vector signed short __a, vector signed short __b) {
-  return __a | ~__b;
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_orc(vector unsigned short __a, vector unsigned short __b) {
-  return __a | ~__b;
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_orc(vector bool int __a, vector bool int __b) {
-  return __a | ~__b;
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_orc(vector signed int __a, vector signed int __b) {
-  return __a | ~__b;
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_orc(vector unsigned int __a, vector unsigned int __b) {
-  return __a | ~__b;
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_orc(vector bool long long __a, vector bool long long __b) {
-  return __a | ~__b;
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_orc(vector signed long long __a, vector signed long long __b) {
-  return __a | ~__b;
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_orc(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a | ~__b;
-}
-
-static inline __ATTRS_o_ai vector float
-vec_orc(vector float __a, vector float __b) {
-  return (vector float)((vector unsigned int)__a |
-                        ~(vector unsigned int)__b);
-}
-
-static inline __ATTRS_o_ai vector double
-vec_orc(vector double __a, vector double __b) {
-  return (vector double)((vector unsigned long long)__a |
-                         ~(vector unsigned long long)__b);
-}
-#endif
-
-/*-- vec_nand ---------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool char
-vec_nand(vector bool char __a, vector bool char __b) {
-  return ~(__a & __b);
-}
-
-static inline __ATTRS_o_ai vector signed char
-vec_nand(vector signed char __a, vector signed char __b) {
-  return ~(__a & __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_nand(vector unsigned char __a, vector unsigned char __b) {
-  return ~(__a & __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_nand(vector bool short __a, vector bool short __b) {
-  return ~(__a & __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_nand(vector signed short __a, vector signed short __b) {
-  return ~(__a & __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_nand(vector unsigned short __a, vector unsigned short __b) {
-  return ~(__a & __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_nand(vector bool int __a, vector bool int __b) {
-  return ~(__a & __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_nand(vector signed int __a, vector signed int __b) {
-  return ~(__a & __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_nand(vector unsigned int __a, vector unsigned int __b) {
-  return ~(__a & __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_nand(vector bool long long __a, vector bool long long __b) {
-  return ~(__a & __b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_nand(vector signed long long __a, vector signed long long __b) {
-  return ~(__a & __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_nand(vector unsigned long long __a, vector unsigned long long __b) {
-  return ~(__a & __b);
-}
-
-static inline __ATTRS_o_ai vector float
-vec_nand(vector float __a, vector float __b) {
-  return (vector float)~((vector unsigned int)__a &
-                         (vector unsigned int)__b);
-}
-
-static inline __ATTRS_o_ai vector double
-vec_nand(vector double __a, vector double __b) {
-  return (vector double)~((vector unsigned long long)__a &
-                          (vector unsigned long long)__b);
-}
-#endif
-
-/*-- vec_eqv ----------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool char
-vec_eqv(vector bool char __a, vector bool char __b) {
-  return ~(__a ^ __b);
-}
-
-static inline __ATTRS_o_ai vector signed char
-vec_eqv(vector signed char __a, vector signed char __b) {
-  return ~(__a ^ __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_eqv(vector unsigned char __a, vector unsigned char __b) {
-  return ~(__a ^ __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_eqv(vector bool short __a, vector bool short __b) {
-  return ~(__a ^ __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_eqv(vector signed short __a, vector signed short __b) {
-  return ~(__a ^ __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_eqv(vector unsigned short __a, vector unsigned short __b) {
-  return ~(__a ^ __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_eqv(vector bool int __a, vector bool int __b) {
-  return ~(__a ^ __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_eqv(vector signed int __a, vector signed int __b) {
-  return ~(__a ^ __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_eqv(vector unsigned int __a, vector unsigned int __b) {
-  return ~(__a ^ __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_eqv(vector bool long long __a, vector bool long long __b) {
-  return ~(__a ^ __b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_eqv(vector signed long long __a, vector signed long long __b) {
-  return ~(__a ^ __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_eqv(vector unsigned long long __a, vector unsigned long long __b) {
-  return ~(__a ^ __b);
-}
-
-static inline __ATTRS_o_ai vector float
-vec_eqv(vector float __a, vector float __b) {
-  return (vector float)~((vector unsigned int)__a ^
-                         (vector unsigned int)__b);
-}
-
-static inline __ATTRS_o_ai vector double
-vec_eqv(vector double __a, vector double __b) {
-  return (vector double)~((vector unsigned long long)__a ^
-                          (vector unsigned long long)__b);
-}
-#endif
-
-/*-- vec_cntlz --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cntlz(vector signed char __a) {
-  return __builtin_s390_vclzb((vector unsigned char)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cntlz(vector unsigned char __a) {
-  return __builtin_s390_vclzb(__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cntlz(vector signed short __a) {
-  return __builtin_s390_vclzh((vector unsigned short)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cntlz(vector unsigned short __a) {
-  return __builtin_s390_vclzh(__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cntlz(vector signed int __a) {
-  return __builtin_s390_vclzf((vector unsigned int)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cntlz(vector unsigned int __a) {
-  return __builtin_s390_vclzf(__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_cntlz(vector signed long long __a) {
-  return __builtin_s390_vclzg((vector unsigned long long)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_cntlz(vector unsigned long long __a) {
-  return __builtin_s390_vclzg(__a);
-}
-
-/*-- vec_cnttz --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cnttz(vector signed char __a) {
-  return __builtin_s390_vctzb((vector unsigned char)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cnttz(vector unsigned char __a) {
-  return __builtin_s390_vctzb(__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cnttz(vector signed short __a) {
-  return __builtin_s390_vctzh((vector unsigned short)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cnttz(vector unsigned short __a) {
-  return __builtin_s390_vctzh(__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cnttz(vector signed int __a) {
-  return __builtin_s390_vctzf((vector unsigned int)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cnttz(vector unsigned int __a) {
-  return __builtin_s390_vctzf(__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_cnttz(vector signed long long __a) {
-  return __builtin_s390_vctzg((vector unsigned long long)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_cnttz(vector unsigned long long __a) {
-  return __builtin_s390_vctzg(__a);
-}
-
-/*-- vec_popcnt -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_popcnt(vector signed char __a) {
-  return __builtin_s390_vpopctb((vector unsigned char)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_popcnt(vector unsigned char __a) {
-  return __builtin_s390_vpopctb(__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_popcnt(vector signed short __a) {
-  return __builtin_s390_vpopcth((vector unsigned short)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_popcnt(vector unsigned short __a) {
-  return __builtin_s390_vpopcth(__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_popcnt(vector signed int __a) {
-  return __builtin_s390_vpopctf((vector unsigned int)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_popcnt(vector unsigned int __a) {
-  return __builtin_s390_vpopctf(__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_popcnt(vector signed long long __a) {
-  return __builtin_s390_vpopctg((vector unsigned long long)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_popcnt(vector unsigned long long __a) {
-  return __builtin_s390_vpopctg(__a);
-}
-
-/*-- vec_rl -----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_rl(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_s390_verllvb(
-    (vector unsigned char)__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_rl(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_verllvb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_rl(vector signed short __a, vector unsigned short __b) {
-  return (vector signed short)__builtin_s390_verllvh(
-    (vector unsigned short)__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_rl(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_verllvh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_rl(vector signed int __a, vector unsigned int __b) {
-  return (vector signed int)__builtin_s390_verllvf(
-    (vector unsigned int)__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_rl(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_verllvf(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_rl(vector signed long long __a, vector unsigned long long __b) {
-  return (vector signed long long)__builtin_s390_verllvg(
-    (vector unsigned long long)__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_rl(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_s390_verllvg(__a, __b);
-}
-
-/*-- vec_rli ----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_rli(vector signed char __a, unsigned long __b) {
-  return (vector signed char)__builtin_s390_verllb(
-    (vector unsigned char)__a, (int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_rli(vector unsigned char __a, unsigned long __b) {
-  return __builtin_s390_verllb(__a, (int)__b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_rli(vector signed short __a, unsigned long __b) {
-  return (vector signed short)__builtin_s390_verllh(
-    (vector unsigned short)__a, (int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_rli(vector unsigned short __a, unsigned long __b) {
-  return __builtin_s390_verllh(__a, (int)__b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_rli(vector signed int __a, unsigned long __b) {
-  return (vector signed int)__builtin_s390_verllf(
-    (vector unsigned int)__a, (int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_rli(vector unsigned int __a, unsigned long __b) {
-  return __builtin_s390_verllf(__a, (int)__b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_rli(vector signed long long __a, unsigned long __b) {
-  return (vector signed long long)__builtin_s390_verllg(
-    (vector unsigned long long)__a, (int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_rli(vector unsigned long long __a, unsigned long __b) {
-  return __builtin_s390_verllg(__a, (int)__b);
-}
-
-/*-- vec_rl_mask ------------------------------------------------------------*/
-
-extern __ATTRS_o vector signed char
-vec_rl_mask(vector signed char __a, vector unsigned char __b,
-            unsigned char __c) __constant(__c);
-
-extern __ATTRS_o vector unsigned char
-vec_rl_mask(vector unsigned char __a, vector unsigned char __b,
-            unsigned char __c) __constant(__c);
-
-extern __ATTRS_o vector signed short
-vec_rl_mask(vector signed short __a, vector unsigned short __b,
-            unsigned char __c) __constant(__c);
-
-extern __ATTRS_o vector unsigned short
-vec_rl_mask(vector unsigned short __a, vector unsigned short __b,
-            unsigned char __c) __constant(__c);
-
-extern __ATTRS_o vector signed int
-vec_rl_mask(vector signed int __a, vector unsigned int __b,
-            unsigned char __c) __constant(__c);
-
-extern __ATTRS_o vector unsigned int
-vec_rl_mask(vector unsigned int __a, vector unsigned int __b,
-            unsigned char __c) __constant(__c);
-
-extern __ATTRS_o vector signed long long
-vec_rl_mask(vector signed long long __a, vector unsigned long long __b,
-            unsigned char __c) __constant(__c);
-
-extern __ATTRS_o vector unsigned long long
-vec_rl_mask(vector unsigned long long __a, vector unsigned long long __b,
-            unsigned char __c) __constant(__c);
-
-#define vec_rl_mask(X, Y, Z) ((__typeof__((vec_rl_mask)((X), (Y), (Z)))) \
-  __extension__ ({ \
-    vector unsigned char __res; \
-    vector unsigned char __x = (vector unsigned char)(X); \
-    vector unsigned char __y = (vector unsigned char)(Y); \
-    switch (sizeof ((X)[0])) { \
-    case 1: __res = (vector unsigned char) __builtin_s390_verimb( \
-             (vector unsigned char)__x, (vector unsigned char)__x, \
-             (vector unsigned char)__y, (Z)); break; \
-    case 2: __res = (vector unsigned char) __builtin_s390_verimh( \
-             (vector unsigned short)__x, (vector unsigned short)__x, \
-             (vector unsigned short)__y, (Z)); break; \
-    case 4: __res = (vector unsigned char) __builtin_s390_verimf( \
-             (vector unsigned int)__x, (vector unsigned int)__x, \
-             (vector unsigned int)__y, (Z)); break; \
-    default: __res = (vector unsigned char) __builtin_s390_verimg( \
-             (vector unsigned long long)__x, (vector unsigned long long)__x, \
-             (vector unsigned long long)__y, (Z)); break; \
-    } __res; }))
-
-/*-- vec_sll ----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_sll(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_s390_vsl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_sll(vector signed char __a, vector unsigned short __b) {
-  return (vector signed char)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_sll(vector signed char __a, vector unsigned int __b) {
-  return (vector signed char)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_sll(vector bool char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_s390_vsl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_sll(vector bool char __a, vector unsigned short __b) {
-  return (vector bool char)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_sll(vector bool char __a, vector unsigned int __b) {
-  return (vector bool char)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_sll(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vsl(__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_sll(vector unsigned char __a, vector unsigned short __b) {
-  return __builtin_s390_vsl(__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_sll(vector unsigned char __a, vector unsigned int __b) {
-  return __builtin_s390_vsl(__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_sll(vector signed short __a, vector unsigned char __b) {
-  return (vector signed short)__builtin_s390_vsl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_sll(vector signed short __a, vector unsigned short __b) {
-  return (vector signed short)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_sll(vector signed short __a, vector unsigned int __b) {
-  return (vector signed short)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_sll(vector bool short __a, vector unsigned char __b) {
-  return (vector bool short)__builtin_s390_vsl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_sll(vector bool short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_sll(vector bool short __a, vector unsigned int __b) {
-  return (vector bool short)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_sll(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_s390_vsl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_sll(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_sll(vector unsigned short __a, vector unsigned int __b) {
-  return (vector unsigned short)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_sll(vector signed int __a, vector unsigned char __b) {
-  return (vector signed int)__builtin_s390_vsl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_sll(vector signed int __a, vector unsigned short __b) {
-  return (vector signed int)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_sll(vector signed int __a, vector unsigned int __b) {
-  return (vector signed int)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_sll(vector bool int __a, vector unsigned char __b) {
-  return (vector bool int)__builtin_s390_vsl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_sll(vector bool int __a, vector unsigned short __b) {
-  return (vector bool int)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_sll(vector bool int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_sll(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_s390_vsl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_sll(vector unsigned int __a, vector unsigned short __b) {
-  return (vector unsigned int)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_sll(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_sll(vector signed long long __a, vector unsigned char __b) {
-  return (vector signed long long)__builtin_s390_vsl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_sll(vector signed long long __a, vector unsigned short __b) {
-  return (vector signed long long)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_sll(vector signed long long __a, vector unsigned int __b) {
-  return (vector signed long long)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_sll(vector bool long long __a, vector unsigned char __b) {
-  return (vector bool long long)__builtin_s390_vsl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_sll(vector bool long long __a, vector unsigned short __b) {
-  return (vector bool long long)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_sll(vector bool long long __a, vector unsigned int __b) {
-  return (vector bool long long)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sll(vector unsigned long long __a, vector unsigned char __b) {
-  return (vector unsigned long long)__builtin_s390_vsl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sll(vector unsigned long long __a, vector unsigned short __b) {
-  return (vector unsigned long long)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sll(vector unsigned long long __a, vector unsigned int __b) {
-  return (vector unsigned long long)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-/*-- vec_slb ----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_slb(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed char
-vec_slb(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_s390_vslb(
-    (vector unsigned char)__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_slb(vector unsigned char __a, vector signed char __b) {
-  return __builtin_s390_vslb(__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_slb(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vslb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_slb(vector signed short __a, vector signed short __b) {
-  return (vector signed short)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_slb(vector signed short __a, vector unsigned short __b) {
-  return (vector signed short)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_slb(vector unsigned short __a, vector signed short __b) {
-  return (vector unsigned short)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_slb(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_slb(vector signed int __a, vector signed int __b) {
-  return (vector signed int)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_slb(vector signed int __a, vector unsigned int __b) {
-  return (vector signed int)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_slb(vector unsigned int __a, vector signed int __b) {
-  return (vector unsigned int)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_slb(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_slb(vector signed long long __a, vector signed long long __b) {
-  return (vector signed long long)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_slb(vector signed long long __a, vector unsigned long long __b) {
-  return (vector signed long long)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_slb(vector unsigned long long __a, vector signed long long __b) {
-  return (vector unsigned long long)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_slb(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_slb(vector float __a, vector signed int __b) {
-  return (vector float)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector float
-vec_slb(vector float __a, vector unsigned int __b) {
-  return (vector float)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_slb(vector double __a, vector signed long long __b) {
-  return (vector double)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector double
-vec_slb(vector double __a, vector unsigned long long __b) {
-  return (vector double)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-/*-- vec_sld ----------------------------------------------------------------*/
-
-extern __ATTRS_o vector signed char
-vec_sld(vector signed char __a, vector signed char __b, int __c)
-  __constant_range(__c, 0, 15);
-
-extern __ATTRS_o vector bool char
-vec_sld(vector bool char __a, vector bool char __b, int __c)
-  __constant_range(__c, 0, 15);
-
-extern __ATTRS_o vector unsigned char
-vec_sld(vector unsigned char __a, vector unsigned char __b, int __c)
-  __constant_range(__c, 0, 15);
-
-extern __ATTRS_o vector signed short
-vec_sld(vector signed short __a, vector signed short __b, int __c)
-  __constant_range(__c, 0, 15);
-
-extern __ATTRS_o vector bool short
-vec_sld(vector bool short __a, vector bool short __b, int __c)
-  __constant_range(__c, 0, 15);
-
-extern __ATTRS_o vector unsigned short
-vec_sld(vector unsigned short __a, vector unsigned short __b, int __c)
-  __constant_range(__c, 0, 15);
-
-extern __ATTRS_o vector signed int
-vec_sld(vector signed int __a, vector signed int __b, int __c)
-  __constant_range(__c, 0, 15);
-
-extern __ATTRS_o vector bool int
-vec_sld(vector bool int __a, vector bool int __b, int __c)
-  __constant_range(__c, 0, 15);
-
-extern __ATTRS_o vector unsigned int
-vec_sld(vector unsigned int __a, vector unsigned int __b, int __c)
-  __constant_range(__c, 0, 15);
-
-extern __ATTRS_o vector signed long long
-vec_sld(vector signed long long __a, vector signed long long __b, int __c)
-  __constant_range(__c, 0, 15);
-
-extern __ATTRS_o vector bool long long
-vec_sld(vector bool long long __a, vector bool long long __b, int __c)
-  __constant_range(__c, 0, 15);
-
-extern __ATTRS_o vector unsigned long long
-vec_sld(vector unsigned long long __a, vector unsigned long long __b, int __c)
-  __constant_range(__c, 0, 15);
-
-#if __ARCH__ >= 12
-extern __ATTRS_o vector float
-vec_sld(vector float __a, vector float __b, int __c)
-  __constant_range(__c, 0, 15);
-#endif
-
-extern __ATTRS_o vector double
-vec_sld(vector double __a, vector double __b, int __c)
-  __constant_range(__c, 0, 15);
-
-#define vec_sld(X, Y, Z) ((__typeof__((vec_sld)((X), (Y), (Z)))) \
-  __builtin_s390_vsldb((vector unsigned char)(X), \
-                       (vector unsigned char)(Y), (Z)))
-
-/*-- vec_sldw ---------------------------------------------------------------*/
-
-extern __ATTRS_o vector signed char
-vec_sldw(vector signed char __a, vector signed char __b, int __c)
-  __constant_range(__c, 0, 3);
-
-extern __ATTRS_o vector unsigned char
-vec_sldw(vector unsigned char __a, vector unsigned char __b, int __c)
-  __constant_range(__c, 0, 3);
-
-extern __ATTRS_o vector signed short
-vec_sldw(vector signed short __a, vector signed short __b, int __c)
-  __constant_range(__c, 0, 3);
-
-extern __ATTRS_o vector unsigned short
-vec_sldw(vector unsigned short __a, vector unsigned short __b, int __c)
-  __constant_range(__c, 0, 3);
-
-extern __ATTRS_o vector signed int
-vec_sldw(vector signed int __a, vector signed int __b, int __c)
-  __constant_range(__c, 0, 3);
-
-extern __ATTRS_o vector unsigned int
-vec_sldw(vector unsigned int __a, vector unsigned int __b, int __c)
-  __constant_range(__c, 0, 3);
-
-extern __ATTRS_o vector signed long long
-vec_sldw(vector signed long long __a, vector signed long long __b, int __c)
-  __constant_range(__c, 0, 3);
-
-extern __ATTRS_o vector unsigned long long
-vec_sldw(vector unsigned long long __a, vector unsigned long long __b, int __c)
-  __constant_range(__c, 0, 3);
-
-// This prototype is deprecated.
-extern __ATTRS_o vector double
-vec_sldw(vector double __a, vector double __b, int __c)
-  __constant_range(__c, 0, 3);
-
-#define vec_sldw(X, Y, Z) ((__typeof__((vec_sldw)((X), (Y), (Z)))) \
-  __builtin_s390_vsldb((vector unsigned char)(X), \
-                       (vector unsigned char)(Y), (Z) * 4))
-
-/*-- vec_sldb ---------------------------------------------------------------*/
-
-#if __ARCH__ >= 13
-
-extern __ATTRS_o vector signed char
-vec_sldb(vector signed char __a, vector signed char __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector unsigned char
-vec_sldb(vector unsigned char __a, vector unsigned char __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector signed short
-vec_sldb(vector signed short __a, vector signed short __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector unsigned short
-vec_sldb(vector unsigned short __a, vector unsigned short __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector signed int
-vec_sldb(vector signed int __a, vector signed int __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector unsigned int
-vec_sldb(vector unsigned int __a, vector unsigned int __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector signed long long
-vec_sldb(vector signed long long __a, vector signed long long __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector unsigned long long
-vec_sldb(vector unsigned long long __a, vector unsigned long long __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector float
-vec_sldb(vector float __a, vector float __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector double
-vec_sldb(vector double __a, vector double __b, int __c)
-  __constant_range(__c, 0, 7);
-
-#define vec_sldb(X, Y, Z) ((__typeof__((vec_sldb)((X), (Y), (Z)))) \
-  __builtin_s390_vsld((vector unsigned char)(X), \
-                      (vector unsigned char)(Y), (Z)))
-
-#endif
-
-/*-- vec_sral ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_sral(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_s390_vsra(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_sral(vector signed char __a, vector unsigned short __b) {
-  return (vector signed char)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_sral(vector signed char __a, vector unsigned int __b) {
-  return (vector signed char)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_sral(vector bool char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_s390_vsra(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_sral(vector bool char __a, vector unsigned short __b) {
-  return (vector bool char)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_sral(vector bool char __a, vector unsigned int __b) {
-  return (vector bool char)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_sral(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vsra(__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_sral(vector unsigned char __a, vector unsigned short __b) {
-  return __builtin_s390_vsra(__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_sral(vector unsigned char __a, vector unsigned int __b) {
-  return __builtin_s390_vsra(__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_sral(vector signed short __a, vector unsigned char __b) {
-  return (vector signed short)__builtin_s390_vsra(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_sral(vector signed short __a, vector unsigned short __b) {
-  return (vector signed short)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_sral(vector signed short __a, vector unsigned int __b) {
-  return (vector signed short)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_sral(vector bool short __a, vector unsigned char __b) {
-  return (vector bool short)__builtin_s390_vsra(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_sral(vector bool short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_sral(vector bool short __a, vector unsigned int __b) {
-  return (vector bool short)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_sral(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_s390_vsra(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_sral(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_sral(vector unsigned short __a, vector unsigned int __b) {
-  return (vector unsigned short)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_sral(vector signed int __a, vector unsigned char __b) {
-  return (vector signed int)__builtin_s390_vsra(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_sral(vector signed int __a, vector unsigned short __b) {
-  return (vector signed int)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_sral(vector signed int __a, vector unsigned int __b) {
-  return (vector signed int)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_sral(vector bool int __a, vector unsigned char __b) {
-  return (vector bool int)__builtin_s390_vsra(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_sral(vector bool int __a, vector unsigned short __b) {
-  return (vector bool int)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_sral(vector bool int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_sral(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_s390_vsra(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_sral(vector unsigned int __a, vector unsigned short __b) {
-  return (vector unsigned int)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_sral(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_sral(vector signed long long __a, vector unsigned char __b) {
-  return (vector signed long long)__builtin_s390_vsra(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_sral(vector signed long long __a, vector unsigned short __b) {
-  return (vector signed long long)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_sral(vector signed long long __a, vector unsigned int __b) {
-  return (vector signed long long)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_sral(vector bool long long __a, vector unsigned char __b) {
-  return (vector bool long long)__builtin_s390_vsra(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_sral(vector bool long long __a, vector unsigned short __b) {
-  return (vector bool long long)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_sral(vector bool long long __a, vector unsigned int __b) {
-  return (vector bool long long)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sral(vector unsigned long long __a, vector unsigned char __b) {
-  return (vector unsigned long long)__builtin_s390_vsra(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sral(vector unsigned long long __a, vector unsigned short __b) {
-  return (vector unsigned long long)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sral(vector unsigned long long __a, vector unsigned int __b) {
-  return (vector unsigned long long)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-/*-- vec_srab ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_srab(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed char
-vec_srab(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_s390_vsrab(
-    (vector unsigned char)__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_srab(vector unsigned char __a, vector signed char __b) {
-  return __builtin_s390_vsrab(__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_srab(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vsrab(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_srab(vector signed short __a, vector signed short __b) {
-  return (vector signed short)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_srab(vector signed short __a, vector unsigned short __b) {
-  return (vector signed short)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_srab(vector unsigned short __a, vector signed short __b) {
-  return (vector unsigned short)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_srab(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_srab(vector signed int __a, vector signed int __b) {
-  return (vector signed int)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_srab(vector signed int __a, vector unsigned int __b) {
-  return (vector signed int)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_srab(vector unsigned int __a, vector signed int __b) {
-  return (vector unsigned int)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_srab(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_srab(vector signed long long __a, vector signed long long __b) {
-  return (vector signed long long)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_srab(vector signed long long __a, vector unsigned long long __b) {
-  return (vector signed long long)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srab(vector unsigned long long __a, vector signed long long __b) {
-  return (vector unsigned long long)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srab(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_srab(vector float __a, vector signed int __b) {
-  return (vector float)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector float
-vec_srab(vector float __a, vector unsigned int __b) {
-  return (vector float)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_srab(vector double __a, vector signed long long __b) {
-  return (vector double)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector double
-vec_srab(vector double __a, vector unsigned long long __b) {
-  return (vector double)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-/*-- vec_srl ----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_srl(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_s390_vsrl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_srl(vector signed char __a, vector unsigned short __b) {
-  return (vector signed char)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_srl(vector signed char __a, vector unsigned int __b) {
-  return (vector signed char)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_srl(vector bool char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_s390_vsrl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_srl(vector bool char __a, vector unsigned short __b) {
-  return (vector bool char)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_srl(vector bool char __a, vector unsigned int __b) {
-  return (vector bool char)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_srl(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vsrl(__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_srl(vector unsigned char __a, vector unsigned short __b) {
-  return __builtin_s390_vsrl(__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_srl(vector unsigned char __a, vector unsigned int __b) {
-  return __builtin_s390_vsrl(__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_srl(vector signed short __a, vector unsigned char __b) {
-  return (vector signed short)__builtin_s390_vsrl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_srl(vector signed short __a, vector unsigned short __b) {
-  return (vector signed short)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_srl(vector signed short __a, vector unsigned int __b) {
-  return (vector signed short)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_srl(vector bool short __a, vector unsigned char __b) {
-  return (vector bool short)__builtin_s390_vsrl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_srl(vector bool short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_srl(vector bool short __a, vector unsigned int __b) {
-  return (vector bool short)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_srl(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_s390_vsrl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_srl(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_srl(vector unsigned short __a, vector unsigned int __b) {
-  return (vector unsigned short)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_srl(vector signed int __a, vector unsigned char __b) {
-  return (vector signed int)__builtin_s390_vsrl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_srl(vector signed int __a, vector unsigned short __b) {
-  return (vector signed int)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_srl(vector signed int __a, vector unsigned int __b) {
-  return (vector signed int)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_srl(vector bool int __a, vector unsigned char __b) {
-  return (vector bool int)__builtin_s390_vsrl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_srl(vector bool int __a, vector unsigned short __b) {
-  return (vector bool int)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_srl(vector bool int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_srl(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_s390_vsrl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_srl(vector unsigned int __a, vector unsigned short __b) {
-  return (vector unsigned int)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_srl(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_srl(vector signed long long __a, vector unsigned char __b) {
-  return (vector signed long long)__builtin_s390_vsrl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_srl(vector signed long long __a, vector unsigned short __b) {
-  return (vector signed long long)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_srl(vector signed long long __a, vector unsigned int __b) {
-  return (vector signed long long)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_srl(vector bool long long __a, vector unsigned char __b) {
-  return (vector bool long long)__builtin_s390_vsrl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_srl(vector bool long long __a, vector unsigned short __b) {
-  return (vector bool long long)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_srl(vector bool long long __a, vector unsigned int __b) {
-  return (vector bool long long)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srl(vector unsigned long long __a, vector unsigned char __b) {
-  return (vector unsigned long long)__builtin_s390_vsrl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srl(vector unsigned long long __a, vector unsigned short __b) {
-  return (vector unsigned long long)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srl(vector unsigned long long __a, vector unsigned int __b) {
-  return (vector unsigned long long)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-/*-- vec_srb ----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_srb(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed char
-vec_srb(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_srb(vector unsigned char __a, vector signed char __b) {
-  return __builtin_s390_vsrlb(__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_srb(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vsrlb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_srb(vector signed short __a, vector signed short __b) {
-  return (vector signed short)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_srb(vector signed short __a, vector unsigned short __b) {
-  return (vector signed short)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_srb(vector unsigned short __a, vector signed short __b) {
-  return (vector unsigned short)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_srb(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_srb(vector signed int __a, vector signed int __b) {
-  return (vector signed int)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_srb(vector signed int __a, vector unsigned int __b) {
-  return (vector signed int)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_srb(vector unsigned int __a, vector signed int __b) {
-  return (vector unsigned int)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_srb(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_srb(vector signed long long __a, vector signed long long __b) {
-  return (vector signed long long)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_srb(vector signed long long __a, vector unsigned long long __b) {
-  return (vector signed long long)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srb(vector unsigned long long __a, vector signed long long __b) {
-  return (vector unsigned long long)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srb(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_srb(vector float __a, vector signed int __b) {
-  return (vector float)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector float
-vec_srb(vector float __a, vector unsigned int __b) {
-  return (vector float)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_srb(vector double __a, vector signed long long __b) {
-  return (vector double)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector double
-vec_srb(vector double __a, vector unsigned long long __b) {
-  return (vector double)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-/*-- vec_srdb ---------------------------------------------------------------*/
-
-#if __ARCH__ >= 13
-
-extern __ATTRS_o vector signed char
-vec_srdb(vector signed char __a, vector signed char __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector unsigned char
-vec_srdb(vector unsigned char __a, vector unsigned char __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector signed short
-vec_srdb(vector signed short __a, vector signed short __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector unsigned short
-vec_srdb(vector unsigned short __a, vector unsigned short __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector signed int
-vec_srdb(vector signed int __a, vector signed int __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector unsigned int
-vec_srdb(vector unsigned int __a, vector unsigned int __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector signed long long
-vec_srdb(vector signed long long __a, vector signed long long __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector unsigned long long
-vec_srdb(vector unsigned long long __a, vector unsigned long long __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector float
-vec_srdb(vector float __a, vector float __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector double
-vec_srdb(vector double __a, vector double __b, int __c)
-  __constant_range(__c, 0, 7);
-
-#define vec_srdb(X, Y, Z) ((__typeof__((vec_srdb)((X), (Y), (Z)))) \
-  __builtin_s390_vsrd((vector unsigned char)(X), \
-                      (vector unsigned char)(Y), (Z)))
-
-#endif
-
-/*-- vec_abs ----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_abs(vector signed char __a) {
-  return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed char)0));
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_abs(vector signed short __a) {
-  return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed short)0));
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_abs(vector signed int __a) {
-  return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed int)0));
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_abs(vector signed long long __a) {
-  return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed long long)0));
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_abs(vector float __a) {
-  return __builtin_s390_vflpsb(__a);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_abs(vector double __a) {
-  return __builtin_s390_vflpdb(__a);
-}
-
-/*-- vec_nabs ---------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_nabs(vector float __a) {
-  return __builtin_s390_vflnsb(__a);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_nabs(vector double __a) {
-  return __builtin_s390_vflndb(__a);
-}
-
-/*-- vec_max ----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_max(vector signed char __a, vector signed char __b) {
-  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_max(vector signed char __a, vector bool char __b) {
-  vector signed char __bc = (vector signed char)__b;
-  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_max(vector bool char __a, vector signed char __b) {
-  vector signed char __ac = (vector signed char)__a;
-  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_max(vector unsigned char __a, vector unsigned char __b) {
-  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_max(vector unsigned char __a, vector bool char __b) {
-  vector unsigned char __bc = (vector unsigned char)__b;
-  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_max(vector bool char __a, vector unsigned char __b) {
-  vector unsigned char __ac = (vector unsigned char)__a;
-  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_max(vector signed short __a, vector signed short __b) {
-  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_max(vector signed short __a, vector bool short __b) {
-  vector signed short __bc = (vector signed short)__b;
-  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_max(vector bool short __a, vector signed short __b) {
-  vector signed short __ac = (vector signed short)__a;
-  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_max(vector unsigned short __a, vector unsigned short __b) {
-  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_max(vector unsigned short __a, vector bool short __b) {
-  vector unsigned short __bc = (vector unsigned short)__b;
-  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_max(vector bool short __a, vector unsigned short __b) {
-  vector unsigned short __ac = (vector unsigned short)__a;
-  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_max(vector signed int __a, vector signed int __b) {
-  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_max(vector signed int __a, vector bool int __b) {
-  vector signed int __bc = (vector signed int)__b;
-  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_max(vector bool int __a, vector signed int __b) {
-  vector signed int __ac = (vector signed int)__a;
-  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_max(vector unsigned int __a, vector unsigned int __b) {
-  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_max(vector unsigned int __a, vector bool int __b) {
-  vector unsigned int __bc = (vector unsigned int)__b;
-  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_max(vector bool int __a, vector unsigned int __b) {
-  vector unsigned int __ac = (vector unsigned int)__a;
-  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_max(vector signed long long __a, vector signed long long __b) {
-  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_max(vector signed long long __a, vector bool long long __b) {
-  vector signed long long __bc = (vector signed long long)__b;
-  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_max(vector bool long long __a, vector signed long long __b) {
-  vector signed long long __ac = (vector signed long long)__a;
-  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_max(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_max(vector unsigned long long __a, vector bool long long __b) {
-  vector unsigned long long __bc = (vector unsigned long long)__b;
-  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_max(vector bool long long __a, vector unsigned long long __b) {
-  vector unsigned long long __ac = (vector unsigned long long)__a;
-  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_max(vector float __a, vector float __b) {
-  return __builtin_s390_vfmaxsb(__a, __b, 0);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_max(vector double __a, vector double __b) {
-#if __ARCH__ >= 12
-  return __builtin_s390_vfmaxdb(__a, __b, 0);
-#else
-  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
-#endif
-}
-
-/*-- vec_min ----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_min(vector signed char __a, vector signed char __b) {
-  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_min(vector signed char __a, vector bool char __b) {
-  vector signed char __bc = (vector signed char)__b;
-  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_min(vector bool char __a, vector signed char __b) {
-  vector signed char __ac = (vector signed char)__a;
-  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_min(vector unsigned char __a, vector unsigned char __b) {
-  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_min(vector unsigned char __a, vector bool char __b) {
-  vector unsigned char __bc = (vector unsigned char)__b;
-  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_min(vector bool char __a, vector unsigned char __b) {
-  vector unsigned char __ac = (vector unsigned char)__a;
-  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_min(vector signed short __a, vector signed short __b) {
-  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_min(vector signed short __a, vector bool short __b) {
-  vector signed short __bc = (vector signed short)__b;
-  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_min(vector bool short __a, vector signed short __b) {
-  vector signed short __ac = (vector signed short)__a;
-  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_min(vector unsigned short __a, vector unsigned short __b) {
-  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_min(vector unsigned short __a, vector bool short __b) {
-  vector unsigned short __bc = (vector unsigned short)__b;
-  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_min(vector bool short __a, vector unsigned short __b) {
-  vector unsigned short __ac = (vector unsigned short)__a;
-  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_min(vector signed int __a, vector signed int __b) {
-  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_min(vector signed int __a, vector bool int __b) {
-  vector signed int __bc = (vector signed int)__b;
-  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_min(vector bool int __a, vector signed int __b) {
-  vector signed int __ac = (vector signed int)__a;
-  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_min(vector unsigned int __a, vector unsigned int __b) {
-  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_min(vector unsigned int __a, vector bool int __b) {
-  vector unsigned int __bc = (vector unsigned int)__b;
-  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_min(vector bool int __a, vector unsigned int __b) {
-  vector unsigned int __ac = (vector unsigned int)__a;
-  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_min(vector signed long long __a, vector signed long long __b) {
-  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_min(vector signed long long __a, vector bool long long __b) {
-  vector signed long long __bc = (vector signed long long)__b;
-  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_min(vector bool long long __a, vector signed long long __b) {
-  vector signed long long __ac = (vector signed long long)__a;
-  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_min(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_min(vector unsigned long long __a, vector bool long long __b) {
-  vector unsigned long long __bc = (vector unsigned long long)__b;
-  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_min(vector bool long long __a, vector unsigned long long __b) {
-  vector unsigned long long __ac = (vector unsigned long long)__a;
-  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_min(vector float __a, vector float __b) {
-  return __builtin_s390_vfminsb(__a, __b, 0);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_min(vector double __a, vector double __b) {
-#if __ARCH__ >= 12
-  return __builtin_s390_vfmindb(__a, __b, 0);
-#else
-  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
-#endif
-}
-
-/*-- vec_add_u128 -----------------------------------------------------------*/
-
-static inline __ATTRS_ai vector unsigned char
-vec_add_u128(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vaq(__a, __b);
-}
-
-/*-- vec_addc ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_addc(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vaccb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_addc(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vacch(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_addc(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vaccf(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_addc(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_s390_vaccg(__a, __b);
-}
-
-/*-- vec_addc_u128 ----------------------------------------------------------*/
-
-static inline __ATTRS_ai vector unsigned char
-vec_addc_u128(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vaccq(__a, __b);
-}
-
-/*-- vec_adde_u128 ----------------------------------------------------------*/
-
-static inline __ATTRS_ai vector unsigned char
-vec_adde_u128(vector unsigned char __a, vector unsigned char __b,
-              vector unsigned char __c) {
-  return __builtin_s390_vacq(__a, __b, __c);
-}
-
-/*-- vec_addec_u128 ---------------------------------------------------------*/
-
-static inline __ATTRS_ai vector unsigned char
-vec_addec_u128(vector unsigned char __a, vector unsigned char __b,
-               vector unsigned char __c) {
-  return __builtin_s390_vacccq(__a, __b, __c);
-}
-
-/*-- vec_avg ----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_avg(vector signed char __a, vector signed char __b) {
-  return __builtin_s390_vavgb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_avg(vector signed short __a, vector signed short __b) {
-  return __builtin_s390_vavgh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_avg(vector signed int __a, vector signed int __b) {
-  return __builtin_s390_vavgf(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_avg(vector signed long long __a, vector signed long long __b) {
-  return __builtin_s390_vavgg(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_avg(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vavglb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_avg(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vavglh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_avg(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vavglf(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_avg(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_s390_vavglg(__a, __b);
-}
-
-/*-- vec_checksum -----------------------------------------------------------*/
-
-static inline __ATTRS_ai vector unsigned int
-vec_checksum(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vcksm(__a, __b);
-}
-
-/*-- vec_gfmsum -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_gfmsum(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vgfmb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_gfmsum(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vgfmh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_gfmsum(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vgfmf(__a, __b);
-}
-
-/*-- vec_gfmsum_128 ---------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_gfmsum_128(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_s390_vgfmg(__a, __b);
-}
-
-/*-- vec_gfmsum_accum -------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_gfmsum_accum(vector unsigned char __a, vector unsigned char __b,
-                 vector unsigned short __c) {
-  return __builtin_s390_vgfmab(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_gfmsum_accum(vector unsigned short __a, vector unsigned short __b,
-                 vector unsigned int __c) {
-  return __builtin_s390_vgfmah(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_gfmsum_accum(vector unsigned int __a, vector unsigned int __b,
-                 vector unsigned long long __c) {
-  return __builtin_s390_vgfmaf(__a, __b, __c);
-}
-
-/*-- vec_gfmsum_accum_128 ---------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_gfmsum_accum_128(vector unsigned long long __a,
-                     vector unsigned long long __b,
-                     vector unsigned char __c) {
-  return __builtin_s390_vgfmag(__a, __b, __c);
-}
-
-/*-- vec_mladd --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_mladd(vector signed char __a, vector signed char __b,
-          vector signed char __c) {
-  return __a * __b + __c;
-}
-
-static inline __ATTRS_o_ai vector signed char
-vec_mladd(vector unsigned char __a, vector signed char __b,
-          vector signed char __c) {
-  return (vector signed char)__a * __b + __c;
-}
-
-static inline __ATTRS_o_ai vector signed char
-vec_mladd(vector signed char __a, vector unsigned char __b,
-          vector unsigned char __c) {
-  return __a * (vector signed char)__b + (vector signed char)__c;
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_mladd(vector unsigned char __a, vector unsigned char __b,
-          vector unsigned char __c) {
-  return __a * __b + __c;
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_mladd(vector signed short __a, vector signed short __b,
-          vector signed short __c) {
-  return __a * __b + __c;
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_mladd(vector unsigned short __a, vector signed short __b,
-          vector signed short __c) {
-  return (vector signed short)__a * __b + __c;
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_mladd(vector signed short __a, vector unsigned short __b,
-          vector unsigned short __c) {
-  return __a * (vector signed short)__b + (vector signed short)__c;
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_mladd(vector unsigned short __a, vector unsigned short __b,
-          vector unsigned short __c) {
-  return __a * __b + __c;
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_mladd(vector signed int __a, vector signed int __b,
-          vector signed int __c) {
-  return __a * __b + __c;
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_mladd(vector unsigned int __a, vector signed int __b,
-          vector signed int __c) {
-  return (vector signed int)__a * __b + __c;
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_mladd(vector signed int __a, vector unsigned int __b,
-          vector unsigned int __c) {
-  return __a * (vector signed int)__b + (vector signed int)__c;
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_mladd(vector unsigned int __a, vector unsigned int __b,
-          vector unsigned int __c) {
-  return __a * __b + __c;
-}
-
-/*-- vec_mhadd --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_mhadd(vector signed char __a, vector signed char __b,
-          vector signed char __c) {
-  return __builtin_s390_vmahb(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_mhadd(vector unsigned char __a, vector unsigned char __b,
-          vector unsigned char __c) {
-  return __builtin_s390_vmalhb(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_mhadd(vector signed short __a, vector signed short __b,
-          vector signed short __c) {
-  return __builtin_s390_vmahh(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_mhadd(vector unsigned short __a, vector unsigned short __b,
-          vector unsigned short __c) {
-  return __builtin_s390_vmalhh(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_mhadd(vector signed int __a, vector signed int __b,
-          vector signed int __c) {
-  return __builtin_s390_vmahf(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_mhadd(vector unsigned int __a, vector unsigned int __b,
-          vector unsigned int __c) {
-  return __builtin_s390_vmalhf(__a, __b, __c);
-}
-
-/*-- vec_meadd --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed short
-vec_meadd(vector signed char __a, vector signed char __b,
-          vector signed short __c) {
-  return __builtin_s390_vmaeb(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_meadd(vector unsigned char __a, vector unsigned char __b,
-          vector unsigned short __c) {
-  return __builtin_s390_vmaleb(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_meadd(vector signed short __a, vector signed short __b,
-          vector signed int __c) {
-  return __builtin_s390_vmaeh(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_meadd(vector unsigned short __a, vector unsigned short __b,
-          vector unsigned int __c) {
-  return __builtin_s390_vmaleh(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_meadd(vector signed int __a, vector signed int __b,
-          vector signed long long __c) {
-  return __builtin_s390_vmaef(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_meadd(vector unsigned int __a, vector unsigned int __b,
-          vector unsigned long long __c) {
-  return __builtin_s390_vmalef(__a, __b, __c);
-}
-
-/*-- vec_moadd --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed short
-vec_moadd(vector signed char __a, vector signed char __b,
-          vector signed short __c) {
-  return __builtin_s390_vmaob(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_moadd(vector unsigned char __a, vector unsigned char __b,
-          vector unsigned short __c) {
-  return __builtin_s390_vmalob(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_moadd(vector signed short __a, vector signed short __b,
-          vector signed int __c) {
-  return __builtin_s390_vmaoh(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_moadd(vector unsigned short __a, vector unsigned short __b,
-          vector unsigned int __c) {
-  return __builtin_s390_vmaloh(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_moadd(vector signed int __a, vector signed int __b,
-          vector signed long long __c) {
-  return __builtin_s390_vmaof(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_moadd(vector unsigned int __a, vector unsigned int __b,
-          vector unsigned long long __c) {
-  return __builtin_s390_vmalof(__a, __b, __c);
-}
-
-/*-- vec_mulh ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_mulh(vector signed char __a, vector signed char __b) {
-  return __builtin_s390_vmhb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_mulh(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vmlhb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_mulh(vector signed short __a, vector signed short __b) {
-  return __builtin_s390_vmhh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_mulh(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vmlhh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_mulh(vector signed int __a, vector signed int __b) {
-  return __builtin_s390_vmhf(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_mulh(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vmlhf(__a, __b);
-}
-
-/*-- vec_mule ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed short
-vec_mule(vector signed char __a, vector signed char __b) {
-  return __builtin_s390_vmeb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_mule(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vmleb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_mule(vector signed short __a, vector signed short __b) {
-  return __builtin_s390_vmeh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_mule(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vmleh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_mule(vector signed int __a, vector signed int __b) {
-  return __builtin_s390_vmef(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_mule(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vmlef(__a, __b);
-}
-
-/*-- vec_mulo ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed short
-vec_mulo(vector signed char __a, vector signed char __b) {
-  return __builtin_s390_vmob(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_mulo(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vmlob(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_mulo(vector signed short __a, vector signed short __b) {
-  return __builtin_s390_vmoh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_mulo(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vmloh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_mulo(vector signed int __a, vector signed int __b) {
-  return __builtin_s390_vmof(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_mulo(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vmlof(__a, __b);
-}
-
-/*-- vec_msum_u128 ----------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-#define vec_msum_u128(X, Y, Z, W) \
-  ((vector unsigned char)__builtin_s390_vmslg((X), (Y), (Z), (W)));
-#endif
-
-/*-- vec_sub_u128 -----------------------------------------------------------*/
-
-static inline __ATTRS_ai vector unsigned char
-vec_sub_u128(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vsq(__a, __b);
-}
-
-/*-- vec_subc ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_subc(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vscbib(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_subc(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vscbih(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_subc(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vscbif(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_subc(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_s390_vscbig(__a, __b);
-}
-
-/*-- vec_subc_u128 ----------------------------------------------------------*/
-
-static inline __ATTRS_ai vector unsigned char
-vec_subc_u128(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vscbiq(__a, __b);
-}
-
-/*-- vec_sube_u128 ----------------------------------------------------------*/
-
-static inline __ATTRS_ai vector unsigned char
-vec_sube_u128(vector unsigned char __a, vector unsigned char __b,
-              vector unsigned char __c) {
-  return __builtin_s390_vsbiq(__a, __b, __c);
-}
-
-/*-- vec_subec_u128 ---------------------------------------------------------*/
-
-static inline __ATTRS_ai vector unsigned char
-vec_subec_u128(vector unsigned char __a, vector unsigned char __b,
-               vector unsigned char __c) {
-  return __builtin_s390_vsbcbiq(__a, __b, __c);
-}
-
-/*-- vec_sum2 ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sum2(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vsumgh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sum2(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vsumgf(__a, __b);
-}
-
-/*-- vec_sum_u128 -----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_sum_u128(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vsumqf(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_sum_u128(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_s390_vsumqg(__a, __b);
-}
-
-/*-- vec_sum4 ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_sum4(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vsumb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_sum4(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vsumh(__a, __b);
-}
-
-/*-- vec_test_mask ----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_test_mask(vector signed char __a, vector unsigned char __b) {
-  return __builtin_s390_vtm((vector unsigned char)__a,
-                            (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai int
-vec_test_mask(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vtm(__a, __b);
-}
-
-static inline __ATTRS_o_ai int
-vec_test_mask(vector signed short __a, vector unsigned short __b) {
-  return __builtin_s390_vtm((vector unsigned char)__a,
-                            (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai int
-vec_test_mask(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vtm((vector unsigned char)__a,
-                            (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai int
-vec_test_mask(vector signed int __a, vector unsigned int __b) {
-  return __builtin_s390_vtm((vector unsigned char)__a,
-                            (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai int
-vec_test_mask(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vtm((vector unsigned char)__a,
-                            (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai int
-vec_test_mask(vector signed long long __a, vector unsigned long long __b) {
-  return __builtin_s390_vtm((vector unsigned char)__a,
-                            (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai int
-vec_test_mask(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_s390_vtm((vector unsigned char)__a,
-                            (vector unsigned char)__b);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_test_mask(vector float __a, vector unsigned int __b) {
-  return __builtin_s390_vtm((vector unsigned char)__a,
-                            (vector unsigned char)__b);
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_test_mask(vector double __a, vector unsigned long long __b) {
-  return __builtin_s390_vtm((vector unsigned char)__a,
-                            (vector unsigned char)__b);
-}
-
-/*-- vec_madd ---------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_madd(vector float __a, vector float __b, vector float __c) {
-  return __builtin_s390_vfmasb(__a, __b, __c);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_madd(vector double __a, vector double __b, vector double __c) {
-  return __builtin_s390_vfmadb(__a, __b, __c);
-}
-
-/*-- vec_msub ---------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_msub(vector float __a, vector float __b, vector float __c) {
-  return __builtin_s390_vfmssb(__a, __b, __c);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_msub(vector double __a, vector double __b, vector double __c) {
-  return __builtin_s390_vfmsdb(__a, __b, __c);
-}
-
-/*-- vec_nmadd ---------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_nmadd(vector float __a, vector float __b, vector float __c) {
-  return __builtin_s390_vfnmasb(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector double
-vec_nmadd(vector double __a, vector double __b, vector double __c) {
-  return __builtin_s390_vfnmadb(__a, __b, __c);
-}
-#endif
-
-/*-- vec_nmsub ---------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_nmsub(vector float __a, vector float __b, vector float __c) {
-  return __builtin_s390_vfnmssb(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector double
-vec_nmsub(vector double __a, vector double __b, vector double __c) {
-  return __builtin_s390_vfnmsdb(__a, __b, __c);
-}
-#endif
-
-/*-- vec_sqrt ---------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_sqrt(vector float __a) {
-  return __builtin_s390_vfsqsb(__a);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_sqrt(vector double __a) {
-  return __builtin_s390_vfsqdb(__a);
-}
-
-/*-- vec_ld2f ---------------------------------------------------------------*/
-
-// This prototype is deprecated.
-static inline __ATTRS_ai vector double
-vec_ld2f(const float *__ptr) {
-  typedef float __v2f32 __attribute__((__vector_size__(8)));
-  return __builtin_convertvector(*(const __v2f32 *)__ptr, vector double);
-}
-
-/*-- vec_st2f ---------------------------------------------------------------*/
-
-// This prototype is deprecated.
-static inline __ATTRS_ai void
-vec_st2f(vector double __a, float *__ptr) {
-  typedef float __v2f32 __attribute__((__vector_size__(8)));
-  *(__v2f32 *)__ptr = __builtin_convertvector(__a, __v2f32);
-}
-
-/*-- vec_ctd ----------------------------------------------------------------*/
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_ctd(vector signed long long __a, int __b)
-  __constant_range(__b, 0, 31) {
-  vector double __conv = __builtin_convertvector(__a, vector double);
-  __conv *= (vector double)(vector unsigned long long)((0x3ffULL - __b) << 52);
-  return __conv;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_ctd(vector unsigned long long __a, int __b)
-  __constant_range(__b, 0, 31) {
-  vector double __conv = __builtin_convertvector(__a, vector double);
-  __conv *= (vector double)(vector unsigned long long)((0x3ffULL - __b) << 52);
-  return __conv;
-}
-
-/*-- vec_ctsl ---------------------------------------------------------------*/
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_ctsl(vector double __a, int __b)
-  __constant_range(__b, 0, 31) {
-  __a *= (vector double)(vector unsigned long long)((0x3ffULL + __b) << 52);
-  return __builtin_convertvector(__a, vector signed long long);
-}
-
-/*-- vec_ctul ---------------------------------------------------------------*/
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_ctul(vector double __a, int __b)
-  __constant_range(__b, 0, 31) {
-  __a *= (vector double)(vector unsigned long long)((0x3ffULL + __b) << 52);
-  return __builtin_convertvector(__a, vector unsigned long long);
-}
-
-/*-- vec_doublee ------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_ai vector double
-vec_doublee(vector float __a) {
-  typedef float __v2f32 __attribute__((__vector_size__(8)));
-  __v2f32 __pack = __builtin_shufflevector(__a, __a, 0, 2);
-  return __builtin_convertvector(__pack, vector double);
-}
-#endif
-
-/*-- vec_floate -------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_ai vector float
-vec_floate(vector double __a) {
-  typedef float __v2f32 __attribute__((__vector_size__(8)));
-  __v2f32 __pack = __builtin_convertvector(__a, __v2f32);
-  return __builtin_shufflevector(__pack, __pack, 0, -1, 1, -1);
-}
-#endif
-
-/*-- vec_double -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector double
-vec_double(vector signed long long __a) {
-  return __builtin_convertvector(__a, vector double);
-}
-
-static inline __ATTRS_o_ai vector double
-vec_double(vector unsigned long long __a) {
-  return __builtin_convertvector(__a, vector double);
-}
-
-/*-- vec_float --------------------------------------------------------------*/
-
-#if __ARCH__ >= 13
-
-static inline __ATTRS_o_ai vector float
-vec_float(vector signed int __a) {
-  return __builtin_convertvector(__a, vector float);
-}
-
-static inline __ATTRS_o_ai vector float
-vec_float(vector unsigned int __a) {
-  return __builtin_convertvector(__a, vector float);
-}
-
-#endif
-
-/*-- vec_signed -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed long long
-vec_signed(vector double __a) {
-  return __builtin_convertvector(__a, vector signed long long);
-}
-
-#if __ARCH__ >= 13
-static inline __ATTRS_o_ai vector signed int
-vec_signed(vector float __a) {
-  return __builtin_convertvector(__a, vector signed int);
-}
-#endif
-
-/*-- vec_unsigned -----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_unsigned(vector double __a) {
-  return __builtin_convertvector(__a, vector unsigned long long);
-}
-
-#if __ARCH__ >= 13
-static inline __ATTRS_o_ai vector unsigned int
-vec_unsigned(vector float __a) {
-  return __builtin_convertvector(__a, vector unsigned int);
-}
-#endif
-
-/*-- vec_roundp -------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_roundp(vector float __a) {
-  return __builtin_s390_vfisb(__a, 4, 6);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_roundp(vector double __a) {
-  return __builtin_s390_vfidb(__a, 4, 6);
-}
-
-/*-- vec_ceil ---------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_ceil(vector float __a) {
-  // On this platform, vec_ceil never triggers the IEEE-inexact exception.
-  return __builtin_s390_vfisb(__a, 4, 6);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_ceil(vector double __a) {
-  // On this platform, vec_ceil never triggers the IEEE-inexact exception.
-  return __builtin_s390_vfidb(__a, 4, 6);
-}
-
-/*-- vec_roundm -------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_roundm(vector float __a) {
-  return __builtin_s390_vfisb(__a, 4, 7);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_roundm(vector double __a) {
-  return __builtin_s390_vfidb(__a, 4, 7);
-}
-
-/*-- vec_floor --------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_floor(vector float __a) {
-  // On this platform, vec_floor never triggers the IEEE-inexact exception.
-  return __builtin_s390_vfisb(__a, 4, 7);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_floor(vector double __a) {
-  // On this platform, vec_floor never triggers the IEEE-inexact exception.
-  return __builtin_s390_vfidb(__a, 4, 7);
-}
-
-/*-- vec_roundz -------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_roundz(vector float __a) {
-  return __builtin_s390_vfisb(__a, 4, 5);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_roundz(vector double __a) {
-  return __builtin_s390_vfidb(__a, 4, 5);
-}
-
-/*-- vec_trunc --------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_trunc(vector float __a) {
-  // On this platform, vec_trunc never triggers the IEEE-inexact exception.
-  return __builtin_s390_vfisb(__a, 4, 5);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_trunc(vector double __a) {
-  // On this platform, vec_trunc never triggers the IEEE-inexact exception.
-  return __builtin_s390_vfidb(__a, 4, 5);
-}
-
-/*-- vec_roundc -------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_roundc(vector float __a) {
-  return __builtin_s390_vfisb(__a, 4, 0);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_roundc(vector double __a) {
-  return __builtin_s390_vfidb(__a, 4, 0);
-}
-
-/*-- vec_rint ---------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_rint(vector float __a) {
-  // vec_rint may trigger the IEEE-inexact exception.
-  return __builtin_s390_vfisb(__a, 0, 0);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_rint(vector double __a) {
-  // vec_rint may trigger the IEEE-inexact exception.
-  return __builtin_s390_vfidb(__a, 0, 0);
-}
-
-/*-- vec_round --------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_round(vector float __a) {
-  return __builtin_s390_vfisb(__a, 4, 4);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_round(vector double __a) {
-  return __builtin_s390_vfidb(__a, 4, 4);
-}
-
-/*-- vec_fp_test_data_class -------------------------------------------------*/
-
-#if __ARCH__ >= 12
-extern __ATTRS_o vector bool int
-vec_fp_test_data_class(vector float __a, int __b, int *__c)
-  __constant_range(__b, 0, 4095);
-
-extern __ATTRS_o vector bool long long
-vec_fp_test_data_class(vector double __a, int __b, int *__c)
-  __constant_range(__b, 0, 4095);
-
-#define vec_fp_test_data_class(X, Y, Z) \
-  ((__typeof__((vec_fp_test_data_class)((X), (Y), (Z)))) \
-   __extension__ ({ \
-     vector unsigned char __res; \
-     vector unsigned char __x = (vector unsigned char)(X); \
-     int *__z = (Z); \
-     switch (sizeof ((X)[0])) { \
-     case 4:  __res = (vector unsigned char) \
-                      __builtin_s390_vftcisb((vector float)__x, (Y), __z); \
-              break; \
-     default: __res = (vector unsigned char) \
-                      __builtin_s390_vftcidb((vector double)__x, (Y), __z); \
-              break; \
-     } __res; }))
-#else
-#define vec_fp_test_data_class(X, Y, Z) \
-  ((vector bool long long)__builtin_s390_vftcidb((X), (Y), (Z)))
-#endif
-
-#define __VEC_CLASS_FP_ZERO_P (1 << 11)
-#define __VEC_CLASS_FP_ZERO_N (1 << 10)
-#define __VEC_CLASS_FP_ZERO (__VEC_CLASS_FP_ZERO_P | __VEC_CLASS_FP_ZERO_N)
-#define __VEC_CLASS_FP_NORMAL_P (1 << 9)
-#define __VEC_CLASS_FP_NORMAL_N (1 << 8)
-#define __VEC_CLASS_FP_NORMAL (__VEC_CLASS_FP_NORMAL_P | \
-                               __VEC_CLASS_FP_NORMAL_N)
-#define __VEC_CLASS_FP_SUBNORMAL_P (1 << 7)
-#define __VEC_CLASS_FP_SUBNORMAL_N (1 << 6)
-#define __VEC_CLASS_FP_SUBNORMAL (__VEC_CLASS_FP_SUBNORMAL_P | \
-                                  __VEC_CLASS_FP_SUBNORMAL_N)
-#define __VEC_CLASS_FP_INFINITY_P (1 << 5)
-#define __VEC_CLASS_FP_INFINITY_N (1 << 4)
-#define __VEC_CLASS_FP_INFINITY (__VEC_CLASS_FP_INFINITY_P | \
-                                 __VEC_CLASS_FP_INFINITY_N)
-#define __VEC_CLASS_FP_QNAN_P (1 << 3)
-#define __VEC_CLASS_FP_QNAN_N (1 << 2)
-#define __VEC_CLASS_FP_QNAN (__VEC_CLASS_FP_QNAN_P | __VEC_CLASS_FP_QNAN_N)
-#define __VEC_CLASS_FP_SNAN_P (1 << 1)
-#define __VEC_CLASS_FP_SNAN_N (1 << 0)
-#define __VEC_CLASS_FP_SNAN (__VEC_CLASS_FP_SNAN_P | __VEC_CLASS_FP_SNAN_N)
-#define __VEC_CLASS_FP_NAN (__VEC_CLASS_FP_QNAN | __VEC_CLASS_FP_SNAN)
-#define __VEC_CLASS_FP_NOT_NORMAL (__VEC_CLASS_FP_NAN | \
-                                   __VEC_CLASS_FP_SUBNORMAL | \
-                                   __VEC_CLASS_FP_ZERO | \
-                                   __VEC_CLASS_FP_INFINITY)
-
-/*-- vec_cp_until_zero ------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_cp_until_zero(vector signed char __a) {
-  return (vector signed char)__builtin_s390_vistrb((vector unsigned char)__a);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_cp_until_zero(vector bool char __a) {
-  return (vector bool char)__builtin_s390_vistrb((vector unsigned char)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cp_until_zero(vector unsigned char __a) {
-  return __builtin_s390_vistrb(__a);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_cp_until_zero(vector signed short __a) {
-  return (vector signed short)__builtin_s390_vistrh((vector unsigned short)__a);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cp_until_zero(vector bool short __a) {
-  return (vector bool short)__builtin_s390_vistrh((vector unsigned short)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cp_until_zero(vector unsigned short __a) {
-  return __builtin_s390_vistrh(__a);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_cp_until_zero(vector signed int __a) {
-  return (vector signed int)__builtin_s390_vistrf((vector unsigned int)__a);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cp_until_zero(vector bool int __a) {
-  return (vector bool int)__builtin_s390_vistrf((vector unsigned int)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cp_until_zero(vector unsigned int __a) {
-  return __builtin_s390_vistrf(__a);
-}
-
-/*-- vec_cp_until_zero_cc ---------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_cp_until_zero_cc(vector signed char __a, int *__cc) {
-  return (vector signed char)
-    __builtin_s390_vistrbs((vector unsigned char)__a, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_cp_until_zero_cc(vector bool char __a, int *__cc) {
-  return (vector bool char)
-    __builtin_s390_vistrbs((vector unsigned char)__a, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cp_until_zero_cc(vector unsigned char __a, int *__cc) {
-  return __builtin_s390_vistrbs(__a, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_cp_until_zero_cc(vector signed short __a, int *__cc) {
-  return (vector signed short)
-    __builtin_s390_vistrhs((vector unsigned short)__a, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cp_until_zero_cc(vector bool short __a, int *__cc) {
-  return (vector bool short)
-    __builtin_s390_vistrhs((vector unsigned short)__a, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cp_until_zero_cc(vector unsigned short __a, int *__cc) {
-  return __builtin_s390_vistrhs(__a, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_cp_until_zero_cc(vector signed int __a, int *__cc) {
-  return (vector signed int)
-    __builtin_s390_vistrfs((vector unsigned int)__a, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cp_until_zero_cc(vector bool int __a, int *__cc) {
-  return (vector bool int)__builtin_s390_vistrfs((vector unsigned int)__a,
-                                                 __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cp_until_zero_cc(vector unsigned int __a, int *__cc) {
-  return __builtin_s390_vistrfs(__a, __cc);
-}
-
-/*-- vec_cmpeq_idx ----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_cmpeq_idx(vector signed char __a, vector signed char __b) {
-  return (vector signed char)
-    __builtin_s390_vfeeb((vector unsigned char)__a,
-                         (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_idx(vector bool char __a, vector bool char __b) {
-  return __builtin_s390_vfeeb((vector unsigned char)__a,
-                              (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_idx(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vfeeb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_cmpeq_idx(vector signed short __a, vector signed short __b) {
-  return (vector signed short)
-    __builtin_s390_vfeeh((vector unsigned short)__a,
-                         (vector unsigned short)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_idx(vector bool short __a, vector bool short __b) {
-  return __builtin_s390_vfeeh((vector unsigned short)__a,
-                              (vector unsigned short)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_idx(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vfeeh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_cmpeq_idx(vector signed int __a, vector signed int __b) {
-  return (vector signed int)
-    __builtin_s390_vfeef((vector unsigned int)__a,
-                         (vector unsigned int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_idx(vector bool int __a, vector bool int __b) {
-  return __builtin_s390_vfeef((vector unsigned int)__a,
-                              (vector unsigned int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_idx(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vfeef(__a, __b);
-}
-
-/*-- vec_cmpeq_idx_cc -------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_cmpeq_idx_cc(vector signed char __a, vector signed char __b, int *__cc) {
-  return (vector signed char)
-    __builtin_s390_vfeebs((vector unsigned char)__a,
-                          (vector unsigned char)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
-  return __builtin_s390_vfeebs((vector unsigned char)__a,
-                               (vector unsigned char)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_idx_cc(vector unsigned char __a, vector unsigned char __b,
-                 int *__cc) {
-  return __builtin_s390_vfeebs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_cmpeq_idx_cc(vector signed short __a, vector signed short __b, int *__cc) {
-  return (vector signed short)
-    __builtin_s390_vfeehs((vector unsigned short)__a,
-                          (vector unsigned short)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
-  return __builtin_s390_vfeehs((vector unsigned short)__a,
-                               (vector unsigned short)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_idx_cc(vector unsigned short __a, vector unsigned short __b,
-                 int *__cc) {
-  return __builtin_s390_vfeehs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_cmpeq_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
-  return (vector signed int)
-    __builtin_s390_vfeefs((vector unsigned int)__a,
-                          (vector unsigned int)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
-  return __builtin_s390_vfeefs((vector unsigned int)__a,
-                               (vector unsigned int)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_idx_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
-  return __builtin_s390_vfeefs(__a, __b, __cc);
-}
-
-/*-- vec_cmpeq_or_0_idx -----------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_cmpeq_or_0_idx(vector signed char __a, vector signed char __b) {
-  return (vector signed char)
-    __builtin_s390_vfeezb((vector unsigned char)__a,
-                          (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_or_0_idx(vector bool char __a, vector bool char __b) {
-  return __builtin_s390_vfeezb((vector unsigned char)__a,
-                               (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vfeezb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_cmpeq_or_0_idx(vector signed short __a, vector signed short __b) {
-  return (vector signed short)
-    __builtin_s390_vfeezh((vector unsigned short)__a,
-                          (vector unsigned short)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_or_0_idx(vector bool short __a, vector bool short __b) {
-  return __builtin_s390_vfeezh((vector unsigned short)__a,
-                               (vector unsigned short)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vfeezh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_cmpeq_or_0_idx(vector signed int __a, vector signed int __b) {
-  return (vector signed int)
-    __builtin_s390_vfeezf((vector unsigned int)__a,
-                          (vector unsigned int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_or_0_idx(vector bool int __a, vector bool int __b) {
-  return __builtin_s390_vfeezf((vector unsigned int)__a,
-                               (vector unsigned int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vfeezf(__a, __b);
-}
-
-/*-- vec_cmpeq_or_0_idx_cc --------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_cmpeq_or_0_idx_cc(vector signed char __a, vector signed char __b,
-                      int *__cc) {
-  return (vector signed char)
-    __builtin_s390_vfeezbs((vector unsigned char)__a,
-                           (vector unsigned char)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_or_0_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
-  return __builtin_s390_vfeezbs((vector unsigned char)__a,
-                                (vector unsigned char)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
-                      int *__cc) {
-  return __builtin_s390_vfeezbs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_cmpeq_or_0_idx_cc(vector signed short __a, vector signed short __b,
-                      int *__cc) {
-  return (vector signed short)
-    __builtin_s390_vfeezhs((vector unsigned short)__a,
-                           (vector unsigned short)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_or_0_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
-  return __builtin_s390_vfeezhs((vector unsigned short)__a,
-                                (vector unsigned short)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
-                      int *__cc) {
-  return __builtin_s390_vfeezhs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_cmpeq_or_0_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
-  return (vector signed int)
-    __builtin_s390_vfeezfs((vector unsigned int)__a,
-                           (vector unsigned int)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_or_0_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
-  return __builtin_s390_vfeezfs((vector unsigned int)__a,
-                                (vector unsigned int)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
-                      int *__cc) {
-  return __builtin_s390_vfeezfs(__a, __b, __cc);
-}
-
-/*-- vec_cmpne_idx ----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_cmpne_idx(vector signed char __a, vector signed char __b) {
-  return (vector signed char)
-    __builtin_s390_vfeneb((vector unsigned char)__a,
-                          (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_idx(vector bool char __a, vector bool char __b) {
-  return __builtin_s390_vfeneb((vector unsigned char)__a,
-                               (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_idx(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vfeneb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_cmpne_idx(vector signed short __a, vector signed short __b) {
-  return (vector signed short)
-    __builtin_s390_vfeneh((vector unsigned short)__a,
-                          (vector unsigned short)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_idx(vector bool short __a, vector bool short __b) {
-  return __builtin_s390_vfeneh((vector unsigned short)__a,
-                               (vector unsigned short)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_idx(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vfeneh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_cmpne_idx(vector signed int __a, vector signed int __b) {
-  return (vector signed int)
-    __builtin_s390_vfenef((vector unsigned int)__a,
-                          (vector unsigned int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_idx(vector bool int __a, vector bool int __b) {
-  return __builtin_s390_vfenef((vector unsigned int)__a,
-                               (vector unsigned int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_idx(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vfenef(__a, __b);
-}
-
-/*-- vec_cmpne_idx_cc -------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_cmpne_idx_cc(vector signed char __a, vector signed char __b, int *__cc) {
-  return (vector signed char)
-    __builtin_s390_vfenebs((vector unsigned char)__a,
-                           (vector unsigned char)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
-  return __builtin_s390_vfenebs((vector unsigned char)__a,
-                                (vector unsigned char)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_idx_cc(vector unsigned char __a, vector unsigned char __b,
-                 int *__cc) {
-  return __builtin_s390_vfenebs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_cmpne_idx_cc(vector signed short __a, vector signed short __b, int *__cc) {
-  return (vector signed short)
-    __builtin_s390_vfenehs((vector unsigned short)__a,
-                           (vector unsigned short)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
-  return __builtin_s390_vfenehs((vector unsigned short)__a,
-                                (vector unsigned short)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_idx_cc(vector unsigned short __a, vector unsigned short __b,
-                 int *__cc) {
-  return __builtin_s390_vfenehs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_cmpne_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
-  return (vector signed int)
-    __builtin_s390_vfenefs((vector unsigned int)__a,
-                           (vector unsigned int)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
-  return __builtin_s390_vfenefs((vector unsigned int)__a,
-                                (vector unsigned int)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_idx_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
-  return __builtin_s390_vfenefs(__a, __b, __cc);
-}
-
-/*-- vec_cmpne_or_0_idx -----------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_cmpne_or_0_idx(vector signed char __a, vector signed char __b) {
-  return (vector signed char)
-    __builtin_s390_vfenezb((vector unsigned char)__a,
-                           (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_or_0_idx(vector bool char __a, vector bool char __b) {
-  return __builtin_s390_vfenezb((vector unsigned char)__a,
-                                (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vfenezb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_cmpne_or_0_idx(vector signed short __a, vector signed short __b) {
-  return (vector signed short)
-    __builtin_s390_vfenezh((vector unsigned short)__a,
-                           (vector unsigned short)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_or_0_idx(vector bool short __a, vector bool short __b) {
-  return __builtin_s390_vfenezh((vector unsigned short)__a,
-                                (vector unsigned short)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vfenezh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_cmpne_or_0_idx(vector signed int __a, vector signed int __b) {
-  return (vector signed int)
-    __builtin_s390_vfenezf((vector unsigned int)__a,
-                           (vector unsigned int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_or_0_idx(vector bool int __a, vector bool int __b) {
-  return __builtin_s390_vfenezf((vector unsigned int)__a,
-                                (vector unsigned int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vfenezf(__a, __b);
-}
-
-/*-- vec_cmpne_or_0_idx_cc --------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_cmpne_or_0_idx_cc(vector signed char __a, vector signed char __b,
-                      int *__cc) {
-  return (vector signed char)
-    __builtin_s390_vfenezbs((vector unsigned char)__a,
-                            (vector unsigned char)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_or_0_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
-  return __builtin_s390_vfenezbs((vector unsigned char)__a,
-                                 (vector unsigned char)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
-                      int *__cc) {
-  return __builtin_s390_vfenezbs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_cmpne_or_0_idx_cc(vector signed short __a, vector signed short __b,
-                      int *__cc) {
-  return (vector signed short)
-    __builtin_s390_vfenezhs((vector unsigned short)__a,
-                            (vector unsigned short)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_or_0_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
-  return __builtin_s390_vfenezhs((vector unsigned short)__a,
-                                 (vector unsigned short)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
-                      int *__cc) {
-  return __builtin_s390_vfenezhs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_cmpne_or_0_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
-  return (vector signed int)
-    __builtin_s390_vfenezfs((vector unsigned int)__a,
-                            (vector unsigned int)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_or_0_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
-  return __builtin_s390_vfenezfs((vector unsigned int)__a,
-                                 (vector unsigned int)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
-                      int *__cc) {
-  return __builtin_s390_vfenezfs(__a, __b, __cc);
-}
-
-/*-- vec_cmprg --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmprg(vector unsigned char __a, vector unsigned char __b,
-          vector unsigned char __c) {
-  return (vector bool char)__builtin_s390_vstrcb(__a, __b, __c, 4);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmprg(vector unsigned short __a, vector unsigned short __b,
-          vector unsigned short __c) {
-  return (vector bool short)__builtin_s390_vstrch(__a, __b, __c, 4);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmprg(vector unsigned int __a, vector unsigned int __b,
-          vector unsigned int __c) {
-  return (vector bool int)__builtin_s390_vstrcf(__a, __b, __c, 4);
-}
-
-/*-- vec_cmprg_cc -----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmprg_cc(vector unsigned char __a, vector unsigned char __b,
-             vector unsigned char __c, int *__cc) {
-  return (vector bool char)__builtin_s390_vstrcbs(__a, __b, __c, 4, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmprg_cc(vector unsigned short __a, vector unsigned short __b,
-             vector unsigned short __c, int *__cc) {
-  return (vector bool short)__builtin_s390_vstrchs(__a, __b, __c, 4, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmprg_cc(vector unsigned int __a, vector unsigned int __b,
-             vector unsigned int __c, int *__cc) {
-  return (vector bool int)__builtin_s390_vstrcfs(__a, __b, __c, 4, __cc);
-}
-
-/*-- vec_cmprg_idx ----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmprg_idx(vector unsigned char __a, vector unsigned char __b,
-              vector unsigned char __c) {
-  return __builtin_s390_vstrcb(__a, __b, __c, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmprg_idx(vector unsigned short __a, vector unsigned short __b,
-              vector unsigned short __c) {
-  return __builtin_s390_vstrch(__a, __b, __c, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmprg_idx(vector unsigned int __a, vector unsigned int __b,
-              vector unsigned int __c) {
-  return __builtin_s390_vstrcf(__a, __b, __c, 0);
-}
-
-/*-- vec_cmprg_idx_cc -------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmprg_idx_cc(vector unsigned char __a, vector unsigned char __b,
-                 vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrcbs(__a, __b, __c, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmprg_idx_cc(vector unsigned short __a, vector unsigned short __b,
-                 vector unsigned short __c, int *__cc) {
-  return __builtin_s390_vstrchs(__a, __b, __c, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmprg_idx_cc(vector unsigned int __a, vector unsigned int __b,
-                 vector unsigned int __c, int *__cc) {
-  return __builtin_s390_vstrcfs(__a, __b, __c, 0, __cc);
-}
-
-/*-- vec_cmprg_or_0_idx -----------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmprg_or_0_idx(vector unsigned char __a, vector unsigned char __b,
-                   vector unsigned char __c) {
-  return __builtin_s390_vstrczb(__a, __b, __c, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmprg_or_0_idx(vector unsigned short __a, vector unsigned short __b,
-                   vector unsigned short __c) {
-  return __builtin_s390_vstrczh(__a, __b, __c, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmprg_or_0_idx(vector unsigned int __a, vector unsigned int __b,
-                   vector unsigned int __c) {
-  return __builtin_s390_vstrczf(__a, __b, __c, 0);
-}
-
-/*-- vec_cmprg_or_0_idx_cc --------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmprg_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
-                      vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrczbs(__a, __b, __c, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmprg_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
-                      vector unsigned short __c, int *__cc) {
-  return __builtin_s390_vstrczhs(__a, __b, __c, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmprg_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
-                      vector unsigned int __c, int *__cc) {
-  return __builtin_s390_vstrczfs(__a, __b, __c, 0, __cc);
-}
-
-/*-- vec_cmpnrg -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmpnrg(vector unsigned char __a, vector unsigned char __b,
-           vector unsigned char __c) {
-  return (vector bool char)__builtin_s390_vstrcb(__a, __b, __c, 12);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmpnrg(vector unsigned short __a, vector unsigned short __b,
-           vector unsigned short __c) {
-  return (vector bool short)__builtin_s390_vstrch(__a, __b, __c, 12);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmpnrg(vector unsigned int __a, vector unsigned int __b,
-           vector unsigned int __c) {
-  return (vector bool int)__builtin_s390_vstrcf(__a, __b, __c, 12);
-}
-
-/*-- vec_cmpnrg_cc ----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmpnrg_cc(vector unsigned char __a, vector unsigned char __b,
-              vector unsigned char __c, int *__cc) {
-  return (vector bool char)__builtin_s390_vstrcbs(__a, __b, __c, 12, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmpnrg_cc(vector unsigned short __a, vector unsigned short __b,
-              vector unsigned short __c, int *__cc) {
-  return (vector bool short)__builtin_s390_vstrchs(__a, __b, __c, 12, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmpnrg_cc(vector unsigned int __a, vector unsigned int __b,
-              vector unsigned int __c, int *__cc) {
-  return (vector bool int)__builtin_s390_vstrcfs(__a, __b, __c, 12, __cc);
-}
-
-/*-- vec_cmpnrg_idx ---------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpnrg_idx(vector unsigned char __a, vector unsigned char __b,
-               vector unsigned char __c) {
-  return __builtin_s390_vstrcb(__a, __b, __c, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpnrg_idx(vector unsigned short __a, vector unsigned short __b,
-               vector unsigned short __c) {
-  return __builtin_s390_vstrch(__a, __b, __c, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpnrg_idx(vector unsigned int __a, vector unsigned int __b,
-               vector unsigned int __c) {
-  return __builtin_s390_vstrcf(__a, __b, __c, 8);
-}
-
-/*-- vec_cmpnrg_idx_cc ------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpnrg_idx_cc(vector unsigned char __a, vector unsigned char __b,
-                  vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrcbs(__a, __b, __c, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpnrg_idx_cc(vector unsigned short __a, vector unsigned short __b,
-                  vector unsigned short __c, int *__cc) {
-  return __builtin_s390_vstrchs(__a, __b, __c, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpnrg_idx_cc(vector unsigned int __a, vector unsigned int __b,
-                  vector unsigned int __c, int *__cc) {
-  return __builtin_s390_vstrcfs(__a, __b, __c, 8, __cc);
-}
-
-/*-- vec_cmpnrg_or_0_idx ----------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpnrg_or_0_idx(vector unsigned char __a, vector unsigned char __b,
-                    vector unsigned char __c) {
-  return __builtin_s390_vstrczb(__a, __b, __c, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpnrg_or_0_idx(vector unsigned short __a, vector unsigned short __b,
-                    vector unsigned short __c) {
-  return __builtin_s390_vstrczh(__a, __b, __c, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpnrg_or_0_idx(vector unsigned int __a, vector unsigned int __b,
-                    vector unsigned int __c) {
-  return __builtin_s390_vstrczf(__a, __b, __c, 8);
-}
-
-/*-- vec_cmpnrg_or_0_idx_cc -------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpnrg_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
-                       vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrczbs(__a, __b, __c, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpnrg_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
-                       vector unsigned short __c, int *__cc) {
-  return __builtin_s390_vstrczhs(__a, __b, __c, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpnrg_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
-                       vector unsigned int __c, int *__cc) {
-  return __builtin_s390_vstrczfs(__a, __b, __c, 8, __cc);
-}
-
-/*-- vec_find_any_eq --------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_eq(vector signed char __a, vector signed char __b) {
-  return (vector bool char)
-    __builtin_s390_vfaeb((vector unsigned char)__a,
-                         (vector unsigned char)__b, 4);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_eq(vector bool char __a, vector bool char __b) {
-  return (vector bool char)
-    __builtin_s390_vfaeb((vector unsigned char)__a,
-                         (vector unsigned char)__b, 4);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_eq(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_s390_vfaeb(__a, __b, 4);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_eq(vector signed short __a, vector signed short __b) {
-  return (vector bool short)
-    __builtin_s390_vfaeh((vector unsigned short)__a,
-                         (vector unsigned short)__b, 4);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_eq(vector bool short __a, vector bool short __b) {
-  return (vector bool short)
-    __builtin_s390_vfaeh((vector unsigned short)__a,
-                         (vector unsigned short)__b, 4);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_eq(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_s390_vfaeh(__a, __b, 4);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_eq(vector signed int __a, vector signed int __b) {
-  return (vector bool int)
-    __builtin_s390_vfaef((vector unsigned int)__a,
-                         (vector unsigned int)__b, 4);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_eq(vector bool int __a, vector bool int __b) {
-  return (vector bool int)
-    __builtin_s390_vfaef((vector unsigned int)__a,
-                         (vector unsigned int)__b, 4);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_eq(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_s390_vfaef(__a, __b, 4);
-}
-
-/*-- vec_find_any_eq_cc -----------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_eq_cc(vector signed char __a, vector signed char __b, int *__cc) {
-  return (vector bool char)
-    __builtin_s390_vfaebs((vector unsigned char)__a,
-                          (vector unsigned char)__b, 4, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_eq_cc(vector bool char __a, vector bool char __b, int *__cc) {
-  return (vector bool char)
-    __builtin_s390_vfaebs((vector unsigned char)__a,
-                          (vector unsigned char)__b, 4, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_eq_cc(vector unsigned char __a, vector unsigned char __b,
-                   int *__cc) {
-  return (vector bool char)__builtin_s390_vfaebs(__a, __b, 4, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_eq_cc(vector signed short __a, vector signed short __b,
-                   int *__cc) {
-  return (vector bool short)
-    __builtin_s390_vfaehs((vector unsigned short)__a,
-                          (vector unsigned short)__b, 4, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_eq_cc(vector bool short __a, vector bool short __b, int *__cc) {
-  return (vector bool short)
-    __builtin_s390_vfaehs((vector unsigned short)__a,
-                          (vector unsigned short)__b, 4, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_eq_cc(vector unsigned short __a, vector unsigned short __b,
-                   int *__cc) {
-  return (vector bool short)__builtin_s390_vfaehs(__a, __b, 4, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_eq_cc(vector signed int __a, vector signed int __b, int *__cc) {
-  return (vector bool int)
-    __builtin_s390_vfaefs((vector unsigned int)__a,
-                          (vector unsigned int)__b, 4, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_eq_cc(vector bool int __a, vector bool int __b, int *__cc) {
-  return (vector bool int)
-    __builtin_s390_vfaefs((vector unsigned int)__a,
-                          (vector unsigned int)__b, 4, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_eq_cc(vector unsigned int __a, vector unsigned int __b,
-                   int *__cc) {
-  return (vector bool int)__builtin_s390_vfaefs(__a, __b, 4, __cc);
-}
-
-/*-- vec_find_any_eq_idx ----------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_eq_idx(vector signed char __a, vector signed char __b) {
-  return (vector signed char)
-    __builtin_s390_vfaeb((vector unsigned char)__a,
-                         (vector unsigned char)__b, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_idx(vector bool char __a, vector bool char __b) {
-  return __builtin_s390_vfaeb((vector unsigned char)__a,
-                              (vector unsigned char)__b, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_idx(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vfaeb(__a, __b, 0);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_eq_idx(vector signed short __a, vector signed short __b) {
-  return (vector signed short)
-    __builtin_s390_vfaeh((vector unsigned short)__a,
-                         (vector unsigned short)__b, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_idx(vector bool short __a, vector bool short __b) {
-  return __builtin_s390_vfaeh((vector unsigned short)__a,
-                              (vector unsigned short)__b, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_idx(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vfaeh(__a, __b, 0);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_eq_idx(vector signed int __a, vector signed int __b) {
-  return (vector signed int)
-    __builtin_s390_vfaef((vector unsigned int)__a,
-                         (vector unsigned int)__b, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_idx(vector bool int __a, vector bool int __b) {
-  return __builtin_s390_vfaef((vector unsigned int)__a,
-                              (vector unsigned int)__b, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_idx(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vfaef(__a, __b, 0);
-}
-
-/*-- vec_find_any_eq_idx_cc -------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_eq_idx_cc(vector signed char __a, vector signed char __b,
-                       int *__cc) {
-  return (vector signed char)
-    __builtin_s390_vfaebs((vector unsigned char)__a,
-                          (vector unsigned char)__b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
-  return __builtin_s390_vfaebs((vector unsigned char)__a,
-                               (vector unsigned char)__b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_idx_cc(vector unsigned char __a, vector unsigned char __b,
-                       int *__cc) {
-  return __builtin_s390_vfaebs(__a, __b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_eq_idx_cc(vector signed short __a, vector signed short __b,
-                       int *__cc) {
-  return (vector signed short)
-    __builtin_s390_vfaehs((vector unsigned short)__a,
-                          (vector unsigned short)__b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_idx_cc(vector bool short __a, vector bool short __b,
-                       int *__cc) {
-  return __builtin_s390_vfaehs((vector unsigned short)__a,
-                               (vector unsigned short)__b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_idx_cc(vector unsigned short __a, vector unsigned short __b,
-                       int *__cc) {
-  return __builtin_s390_vfaehs(__a, __b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_eq_idx_cc(vector signed int __a, vector signed int __b,
-                       int *__cc) {
-  return (vector signed int)
-    __builtin_s390_vfaefs((vector unsigned int)__a,
-                          (vector unsigned int)__b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
-  return __builtin_s390_vfaefs((vector unsigned int)__a,
-                               (vector unsigned int)__b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_idx_cc(vector unsigned int __a, vector unsigned int __b,
-                       int *__cc) {
-  return __builtin_s390_vfaefs(__a, __b, 0, __cc);
-}
-
-/*-- vec_find_any_eq_or_0_idx -----------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_eq_or_0_idx(vector signed char __a, vector signed char __b) {
-  return (vector signed char)
-    __builtin_s390_vfaezb((vector unsigned char)__a,
-                          (vector unsigned char)__b, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_or_0_idx(vector bool char __a, vector bool char __b) {
-  return __builtin_s390_vfaezb((vector unsigned char)__a,
-                               (vector unsigned char)__b, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vfaezb(__a, __b, 0);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_eq_or_0_idx(vector signed short __a, vector signed short __b) {
-  return (vector signed short)
-    __builtin_s390_vfaezh((vector unsigned short)__a,
-                          (vector unsigned short)__b, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_or_0_idx(vector bool short __a, vector bool short __b) {
-  return __builtin_s390_vfaezh((vector unsigned short)__a,
-                               (vector unsigned short)__b, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vfaezh(__a, __b, 0);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_eq_or_0_idx(vector signed int __a, vector signed int __b) {
-  return (vector signed int)
-    __builtin_s390_vfaezf((vector unsigned int)__a,
-                          (vector unsigned int)__b, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_or_0_idx(vector bool int __a, vector bool int __b) {
-  return __builtin_s390_vfaezf((vector unsigned int)__a,
-                               (vector unsigned int)__b, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vfaezf(__a, __b, 0);
-}
-
-/*-- vec_find_any_eq_or_0_idx_cc --------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_eq_or_0_idx_cc(vector signed char __a, vector signed char __b,
-                            int *__cc) {
-  return (vector signed char)
-    __builtin_s390_vfaezbs((vector unsigned char)__a,
-                           (vector unsigned char)__b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_or_0_idx_cc(vector bool char __a, vector bool char __b,
-                            int *__cc) {
-  return __builtin_s390_vfaezbs((vector unsigned char)__a,
-                                (vector unsigned char)__b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
-                            int *__cc) {
-  return __builtin_s390_vfaezbs(__a, __b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_eq_or_0_idx_cc(vector signed short __a, vector signed short __b,
-                            int *__cc) {
-  return (vector signed short)
-    __builtin_s390_vfaezhs((vector unsigned short)__a,
-                           (vector unsigned short)__b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_or_0_idx_cc(vector bool short __a, vector bool short __b,
-                            int *__cc) {
-  return __builtin_s390_vfaezhs((vector unsigned short)__a,
-                                (vector unsigned short)__b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_or_0_idx_cc(vector unsigned short __a,
-                            vector unsigned short __b, int *__cc) {
-  return __builtin_s390_vfaezhs(__a, __b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_eq_or_0_idx_cc(vector signed int __a, vector signed int __b,
-                            int *__cc) {
-  return (vector signed int)
-    __builtin_s390_vfaezfs((vector unsigned int)__a,
-                           (vector unsigned int)__b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_or_0_idx_cc(vector bool int __a, vector bool int __b,
-                            int *__cc) {
-  return __builtin_s390_vfaezfs((vector unsigned int)__a,
-                                (vector unsigned int)__b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
-                            int *__cc) {
-  return __builtin_s390_vfaezfs(__a, __b, 0, __cc);
-}
-
-/*-- vec_find_any_ne --------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_ne(vector signed char __a, vector signed char __b) {
-  return (vector bool char)
-    __builtin_s390_vfaeb((vector unsigned char)__a,
-                         (vector unsigned char)__b, 12);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_ne(vector bool char __a, vector bool char __b) {
-  return (vector bool char)
-    __builtin_s390_vfaeb((vector unsigned char)__a,
-                         (vector unsigned char)__b, 12);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_ne(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_s390_vfaeb(__a, __b, 12);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_ne(vector signed short __a, vector signed short __b) {
-  return (vector bool short)
-    __builtin_s390_vfaeh((vector unsigned short)__a,
-                         (vector unsigned short)__b, 12);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_ne(vector bool short __a, vector bool short __b) {
-  return (vector bool short)
-    __builtin_s390_vfaeh((vector unsigned short)__a,
-                         (vector unsigned short)__b, 12);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_ne(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_s390_vfaeh(__a, __b, 12);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_ne(vector signed int __a, vector signed int __b) {
-  return (vector bool int)
-    __builtin_s390_vfaef((vector unsigned int)__a,
-                         (vector unsigned int)__b, 12);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_ne(vector bool int __a, vector bool int __b) {
-  return (vector bool int)
-    __builtin_s390_vfaef((vector unsigned int)__a,
-                         (vector unsigned int)__b, 12);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_ne(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_s390_vfaef(__a, __b, 12);
-}
-
-/*-- vec_find_any_ne_cc -----------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_ne_cc(vector signed char __a, vector signed char __b, int *__cc) {
-  return (vector bool char)
-    __builtin_s390_vfaebs((vector unsigned char)__a,
-                          (vector unsigned char)__b, 12, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_ne_cc(vector bool char __a, vector bool char __b, int *__cc) {
-  return (vector bool char)
-    __builtin_s390_vfaebs((vector unsigned char)__a,
-                          (vector unsigned char)__b, 12, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_ne_cc(vector unsigned char __a, vector unsigned char __b,
-                   int *__cc) {
-  return (vector bool char)__builtin_s390_vfaebs(__a, __b, 12, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_ne_cc(vector signed short __a, vector signed short __b,
-                   int *__cc) {
-  return (vector bool short)
-    __builtin_s390_vfaehs((vector unsigned short)__a,
-                          (vector unsigned short)__b, 12, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_ne_cc(vector bool short __a, vector bool short __b, int *__cc) {
-  return (vector bool short)
-    __builtin_s390_vfaehs((vector unsigned short)__a,
-                          (vector unsigned short)__b, 12, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_ne_cc(vector unsigned short __a, vector unsigned short __b,
-                   int *__cc) {
-  return (vector bool short)__builtin_s390_vfaehs(__a, __b, 12, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_ne_cc(vector signed int __a, vector signed int __b, int *__cc) {
-  return (vector bool int)
-    __builtin_s390_vfaefs((vector unsigned int)__a,
-                          (vector unsigned int)__b, 12, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_ne_cc(vector bool int __a, vector bool int __b, int *__cc) {
-  return (vector bool int)
-    __builtin_s390_vfaefs((vector unsigned int)__a,
-                          (vector unsigned int)__b, 12, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_ne_cc(vector unsigned int __a, vector unsigned int __b,
-                   int *__cc) {
-  return (vector bool int)__builtin_s390_vfaefs(__a, __b, 12, __cc);
-}
-
-/*-- vec_find_any_ne_idx ----------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_ne_idx(vector signed char __a, vector signed char __b) {
-  return (vector signed char)
-    __builtin_s390_vfaeb((vector unsigned char)__a,
-                         (vector unsigned char)__b, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_idx(vector bool char __a, vector bool char __b) {
-  return __builtin_s390_vfaeb((vector unsigned char)__a,
-                              (vector unsigned char)__b, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_idx(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vfaeb(__a, __b, 8);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_ne_idx(vector signed short __a, vector signed short __b) {
-  return (vector signed short)
-    __builtin_s390_vfaeh((vector unsigned short)__a,
-                         (vector unsigned short)__b, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_idx(vector bool short __a, vector bool short __b) {
-  return __builtin_s390_vfaeh((vector unsigned short)__a,
-                              (vector unsigned short)__b, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_idx(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vfaeh(__a, __b, 8);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_ne_idx(vector signed int __a, vector signed int __b) {
-  return (vector signed int)
-    __builtin_s390_vfaef((vector unsigned int)__a,
-                         (vector unsigned int)__b, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_idx(vector bool int __a, vector bool int __b) {
-  return __builtin_s390_vfaef((vector unsigned int)__a,
-                              (vector unsigned int)__b, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_idx(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vfaef(__a, __b, 8);
-}
-
-/*-- vec_find_any_ne_idx_cc -------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_ne_idx_cc(vector signed char __a, vector signed char __b,
-                       int *__cc) {
-  return (vector signed char)
-    __builtin_s390_vfaebs((vector unsigned char)__a,
-                          (vector unsigned char)__b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
-  return __builtin_s390_vfaebs((vector unsigned char)__a,
-                               (vector unsigned char)__b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_idx_cc(vector unsigned char __a, vector unsigned char __b,
-                       int *__cc) {
-  return __builtin_s390_vfaebs(__a, __b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_ne_idx_cc(vector signed short __a, vector signed short __b,
-                       int *__cc) {
-  return (vector signed short)
-    __builtin_s390_vfaehs((vector unsigned short)__a,
-                          (vector unsigned short)__b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_idx_cc(vector bool short __a, vector bool short __b,
-                       int *__cc) {
-  return __builtin_s390_vfaehs((vector unsigned short)__a,
-                               (vector unsigned short)__b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_idx_cc(vector unsigned short __a, vector unsigned short __b,
-                       int *__cc) {
-  return __builtin_s390_vfaehs(__a, __b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_ne_idx_cc(vector signed int __a, vector signed int __b,
-                       int *__cc) {
-  return (vector signed int)
-    __builtin_s390_vfaefs((vector unsigned int)__a,
-                          (vector unsigned int)__b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
-  return __builtin_s390_vfaefs((vector unsigned int)__a,
-                               (vector unsigned int)__b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_idx_cc(vector unsigned int __a, vector unsigned int __b,
-                       int *__cc) {
-  return __builtin_s390_vfaefs(__a, __b, 8, __cc);
-}
-
-/*-- vec_find_any_ne_or_0_idx -----------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_ne_or_0_idx(vector signed char __a, vector signed char __b) {
-  return (vector signed char)
-    __builtin_s390_vfaezb((vector unsigned char)__a,
-                          (vector unsigned char)__b, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_or_0_idx(vector bool char __a, vector bool char __b) {
-  return __builtin_s390_vfaezb((vector unsigned char)__a,
-                               (vector unsigned char)__b, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vfaezb(__a, __b, 8);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_ne_or_0_idx(vector signed short __a, vector signed short __b) {
-  return (vector signed short)
-    __builtin_s390_vfaezh((vector unsigned short)__a,
-                          (vector unsigned short)__b, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_or_0_idx(vector bool short __a, vector bool short __b) {
-  return __builtin_s390_vfaezh((vector unsigned short)__a,
-                               (vector unsigned short)__b, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vfaezh(__a, __b, 8);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_ne_or_0_idx(vector signed int __a, vector signed int __b) {
-  return (vector signed int)
-    __builtin_s390_vfaezf((vector unsigned int)__a,
-                          (vector unsigned int)__b, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_or_0_idx(vector bool int __a, vector bool int __b) {
-  return __builtin_s390_vfaezf((vector unsigned int)__a,
-                               (vector unsigned int)__b, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vfaezf(__a, __b, 8);
-}
-
-/*-- vec_find_any_ne_or_0_idx_cc --------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_ne_or_0_idx_cc(vector signed char __a, vector signed char __b,
-                            int *__cc) {
-  return (vector signed char)
-    __builtin_s390_vfaezbs((vector unsigned char)__a,
-                           (vector unsigned char)__b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_or_0_idx_cc(vector bool char __a, vector bool char __b,
-                            int *__cc) {
-  return __builtin_s390_vfaezbs((vector unsigned char)__a,
-                                (vector unsigned char)__b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
-                            int *__cc) {
-  return __builtin_s390_vfaezbs(__a, __b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_ne_or_0_idx_cc(vector signed short __a, vector signed short __b,
-                            int *__cc) {
-  return (vector signed short)
-    __builtin_s390_vfaezhs((vector unsigned short)__a,
-                           (vector unsigned short)__b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_or_0_idx_cc(vector bool short __a, vector bool short __b,
-                            int *__cc) {
-  return __builtin_s390_vfaezhs((vector unsigned short)__a,
-                                (vector unsigned short)__b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_or_0_idx_cc(vector unsigned short __a,
-                            vector unsigned short __b, int *__cc) {
-  return __builtin_s390_vfaezhs(__a, __b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_ne_or_0_idx_cc(vector signed int __a, vector signed int __b,
-                            int *__cc) {
-  return (vector signed int)
-    __builtin_s390_vfaezfs((vector unsigned int)__a,
-                           (vector unsigned int)__b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_or_0_idx_cc(vector bool int __a, vector bool int __b,
-                            int *__cc) {
-  return __builtin_s390_vfaezfs((vector unsigned int)__a,
-                                (vector unsigned int)__b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
-                            int *__cc) {
-  return __builtin_s390_vfaezfs(__a, __b, 8, __cc);
-}
-
-/*-- vec_search_string_cc ---------------------------------------------------*/
-
-#if __ARCH__ >= 13
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector signed char __a, vector signed char __b,
-                     vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrsb((vector unsigned char)__a,
-                               (vector unsigned char)__b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector bool char __a, vector bool char __b,
-                     vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrsb((vector unsigned char)__a,
-                               (vector unsigned char)__b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector unsigned char __a, vector unsigned char __b,
-                     vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrsb(__a, __b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector signed short __a, vector signed short __b,
-                     vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrsh((vector unsigned short)__a,
-                               (vector unsigned short)__b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector bool short __a, vector bool short __b,
-                     vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrsh((vector unsigned short)__a,
-                               (vector unsigned short)__b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector unsigned short __a, vector unsigned short __b,
-                     vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrsh(__a, __b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector signed int __a, vector signed int __b,
-                     vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrsf((vector unsigned int)__a,
-                               (vector unsigned int)__b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector bool int __a, vector bool int __b,
-                     vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrsf((vector unsigned int)__a,
-                               (vector unsigned int)__b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector unsigned int __a, vector unsigned int __b,
-                     vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrsf(__a, __b, __c, __cc);
-}
-
-#endif
-
-/*-- vec_search_string_until_zero_cc ----------------------------------------*/
-
-#if __ARCH__ >= 13
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector signed char __a,
-                                vector signed char __b,
-                                vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrszb((vector unsigned char)__a,
-                                (vector unsigned char)__b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector bool char __a,
-                                vector bool char __b,
-                                vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrszb((vector unsigned char)__a,
-                                (vector unsigned char)__b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector unsigned char __a,
-                                vector unsigned char __b,
-                                vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrszb(__a, __b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector signed short __a,
-                                vector signed short __b,
-                                vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrszh((vector unsigned short)__a,
-                                (vector unsigned short)__b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector bool short __a,
-                                vector bool short __b,
-                                vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrszh((vector unsigned short)__a,
-                                (vector unsigned short)__b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector unsigned short __a,
-                                vector unsigned short __b,
-                                vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrszh(__a, __b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector signed int __a,
-                                vector signed int __b,
-                                vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrszf((vector unsigned int)__a,
-                                (vector unsigned int)__b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector bool int __a,
-                                vector bool int __b,
-                                vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrszf((vector unsigned int)__a,
-                                (vector unsigned int)__b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector unsigned int __a,
-                                vector unsigned int __b,
-                                vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrszf(__a, __b, __c, __cc);
-}
-
-#endif
-
-#undef __constant_pow2_range
-#undef __constant_range
-#undef __constant
-#undef __ATTRS_o
-#undef __ATTRS_o_ai
-#undef __ATTRS_ai
-
-#else
-
-#error "Use -fzvector to enable vector extensions"
-
-#endif
diff --git a/darwin-x86/lib64/clang/11.0.1/include/x86intrin.h b/darwin-x86/lib64/clang/11.0.1/include/x86intrin.h
deleted file mode 100644
index a8b3662..0000000
--- a/darwin-x86/lib64/clang/11.0.1/include/x86intrin.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*===---- x86intrin.h - X86 intrinsics -------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __X86INTRIN_H
-#define __X86INTRIN_H
-
-#include <ia32intrin.h>
-
-#include <immintrin.h>
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__3dNOW__)
-#include <mm3dnow.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PRFCHW__)
-#include <prfchwintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE4A__)
-#include <ammintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FMA4__)
-#include <fma4intrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XOP__)
-#include <xopintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__TBM__)
-#include <tbmintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__LWP__)
-#include <lwpintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__MWAITX__)
-#include <mwaitxintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLZERO__)
-#include <clzerointrin.h>
-#endif
-
-
-#endif /* __X86INTRIN_H */
diff --git a/darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_builtin_vars.h b/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_builtin_vars.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_builtin_vars.h
rename to darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_builtin_vars.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_cmath.h b/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_cmath.h
similarity index 94%
rename from darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_cmath.h
rename to darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_cmath.h
index 834a2e3..8ba1826 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_cmath.h
+++ b/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_cmath.h
@@ -12,7 +12,9 @@
 #error "This file is for CUDA compilation only."
 #endif
 
+#ifndef __OPENMP_NVPTX__
 #include <limits>
+#endif
 
 // CUDA lets us use various std math functions on the device side.  This file
 // works in concert with __clang_cuda_math_forward_declares.h to make this work.
@@ -30,32 +32,16 @@
 // implementation.  Declaring in the global namespace and pulling into namespace
 // std covers all of the known knowns.
 
-#ifdef _OPENMP
-#define __DEVICE__ static __attribute__((always_inline))
+#ifdef __OPENMP_NVPTX__
+#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow))
 #else
 #define __DEVICE__ static __device__ __inline__ __attribute__((always_inline))
 #endif
 
-// For C++ 17 we need to include noexcept attribute to be compatible
-// with the header-defined version. This may be removed once
-// variant is supported.
-#if defined(_OPENMP) && defined(__cplusplus) && __cplusplus >= 201703L
-#define __NOEXCEPT noexcept
-#else
-#define __NOEXCEPT
-#endif
-
-#if !(defined(_OPENMP) && defined(__cplusplus))
 __DEVICE__ long long abs(long long __n) { return ::llabs(__n); }
 __DEVICE__ long abs(long __n) { return ::labs(__n); }
 __DEVICE__ float abs(float __x) { return ::fabsf(__x); }
 __DEVICE__ double abs(double __x) { return ::fabs(__x); }
-#endif
-// TODO: remove once variat is supported.
-#if defined(_OPENMP) && defined(__cplusplus)
-__DEVICE__ const float abs(const float __x) { return ::fabsf((float)__x); }
-__DEVICE__ const double abs(const double __x) { return ::fabs((double)__x); }
-#endif
 __DEVICE__ float acos(float __x) { return ::acosf(__x); }
 __DEVICE__ float asin(float __x) { return ::asinf(__x); }
 __DEVICE__ float atan(float __x) { return ::atanf(__x); }
@@ -64,11 +50,9 @@
 __DEVICE__ float cos(float __x) { return ::cosf(__x); }
 __DEVICE__ float cosh(float __x) { return ::coshf(__x); }
 __DEVICE__ float exp(float __x) { return ::expf(__x); }
-__DEVICE__ float fabs(float __x) __NOEXCEPT { return ::fabsf(__x); }
+__DEVICE__ float fabs(float __x) { return ::fabsf(__x); }
 __DEVICE__ float floor(float __x) { return ::floorf(__x); }
 __DEVICE__ float fmod(float __x, float __y) { return ::fmodf(__x, __y); }
-// TODO: remove when variant is supported
-#ifndef _OPENMP
 __DEVICE__ int fpclassify(float __x) {
   return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
                               FP_ZERO, __x);
@@ -77,14 +61,15 @@
   return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
                               FP_ZERO, __x);
 }
-#endif
 __DEVICE__ float frexp(float __arg, int *__exp) {
   return ::frexpf(__arg, __exp);
 }
 
 // For inscrutable reasons, the CUDA headers define these functions for us on
-// Windows.
-#ifndef _MSC_VER
+// Windows. For OpenMP we omit these as some old system headers have
+// non-conforming `isinf(float)` and `isnan(float)` implementations that return
+// an `int`. The system versions of these functions should be fine anyway.
+#if !defined(_MSC_VER) && !defined(__OPENMP_NVPTX__)
 __DEVICE__ bool isinf(float __x) { return ::__isinff(__x); }
 __DEVICE__ bool isinf(double __x) { return ::__isinf(__x); }
 __DEVICE__ bool isfinite(float __x) { return ::__finitef(__x); }
@@ -161,6 +146,8 @@
 // libdevice doesn't provide an implementation, and we don't want to be in the
 // business of implementing tricky libm functions in this header.
 
+#ifndef __OPENMP_NVPTX__
+
 // Now we've defined everything we promised we'd define in
 // __clang_cuda_math_forward_declares.h.  We need to do two additional things to
 // fix up our math functions.
@@ -457,10 +444,7 @@
 using ::remquof;
 using ::rintf;
 using ::roundf;
-// TODO: remove once variant is supported
-#ifndef _OPENMP
 using ::scalblnf;
-#endif
 using ::scalbnf;
 using ::sinf;
 using ::sinhf;
@@ -479,7 +463,8 @@
 } // namespace std
 #endif
 
-#undef __NOEXCEPT
+#endif // __OPENMP_NVPTX__
+
 #undef __DEVICE__
 
 #endif
diff --git a/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_complex_builtins.h b/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_complex_builtins.h
new file mode 100644
index 0000000..8c10ff6
--- /dev/null
+++ b/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_complex_builtins.h
@@ -0,0 +1,259 @@
+/*===-- __clang_cuda_complex_builtins - CUDA impls of runtime complex fns ---===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_CUDA_COMPLEX_BUILTINS
+#define __CLANG_CUDA_COMPLEX_BUILTINS
+
+// This header defines __muldc3, __mulsc3, __divdc3, and __divsc3.  These are
+// libgcc functions that clang assumes are available when compiling c99 complex
+// operations.  (These implementations come from libc++, and have been modified
+// to work with CUDA and OpenMP target offloading [in C and C++ mode].)
+
+#pragma push_macro("__DEVICE__")
+#ifdef _OPENMP
+#pragma omp declare target
+#define __DEVICE__ __attribute__((noinline, nothrow, cold, weak))
+#else
+#define __DEVICE__ __device__ inline
+#endif
+
+// To make the algorithms available for C and C++ in CUDA and OpenMP we select
+// different but equivalent function versions. TODO: For OpenMP we currently
+// select the native builtins as the overload support for templates is lacking.
+#if !defined(_OPENMP)
+#define _ISNANd std::isnan
+#define _ISNANf std::isnan
+#define _ISINFd std::isinf
+#define _ISINFf std::isinf
+#define _ISFINITEd std::isfinite
+#define _ISFINITEf std::isfinite
+#define _COPYSIGNd std::copysign
+#define _COPYSIGNf std::copysign
+#define _SCALBNd std::scalbn
+#define _SCALBNf std::scalbn
+#define _ABSd std::abs
+#define _ABSf std::abs
+#define _LOGBd std::logb
+#define _LOGBf std::logb
+#else
+#define _ISNANd __nv_isnand
+#define _ISNANf __nv_isnanf
+#define _ISINFd __nv_isinfd
+#define _ISINFf __nv_isinff
+#define _ISFINITEd __nv_isfinited
+#define _ISFINITEf __nv_finitef
+#define _COPYSIGNd __nv_copysign
+#define _COPYSIGNf __nv_copysignf
+#define _SCALBNd __nv_scalbn
+#define _SCALBNf __nv_scalbnf
+#define _ABSd __nv_fabs
+#define _ABSf __nv_fabsf
+#define _LOGBd __nv_logb
+#define _LOGBf __nv_logbf
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+__DEVICE__ double _Complex __muldc3(double __a, double __b, double __c,
+                                    double __d) {
+  double __ac = __a * __c;
+  double __bd = __b * __d;
+  double __ad = __a * __d;
+  double __bc = __b * __c;
+  double _Complex z;
+  __real__(z) = __ac - __bd;
+  __imag__(z) = __ad + __bc;
+  if (_ISNANd(__real__(z)) && _ISNANd(__imag__(z))) {
+    int __recalc = 0;
+    if (_ISINFd(__a) || _ISINFd(__b)) {
+      __a = _COPYSIGNd(_ISINFd(__a) ? 1 : 0, __a);
+      __b = _COPYSIGNd(_ISINFd(__b) ? 1 : 0, __b);
+      if (_ISNANd(__c))
+        __c = _COPYSIGNd(0, __c);
+      if (_ISNANd(__d))
+        __d = _COPYSIGNd(0, __d);
+      __recalc = 1;
+    }
+    if (_ISINFd(__c) || _ISINFd(__d)) {
+      __c = _COPYSIGNd(_ISINFd(__c) ? 1 : 0, __c);
+      __d = _COPYSIGNd(_ISINFd(__d) ? 1 : 0, __d);
+      if (_ISNANd(__a))
+        __a = _COPYSIGNd(0, __a);
+      if (_ISNANd(__b))
+        __b = _COPYSIGNd(0, __b);
+      __recalc = 1;
+    }
+    if (!__recalc &&
+        (_ISINFd(__ac) || _ISINFd(__bd) || _ISINFd(__ad) || _ISINFd(__bc))) {
+      if (_ISNANd(__a))
+        __a = _COPYSIGNd(0, __a);
+      if (_ISNANd(__b))
+        __b = _COPYSIGNd(0, __b);
+      if (_ISNANd(__c))
+        __c = _COPYSIGNd(0, __c);
+      if (_ISNANd(__d))
+        __d = _COPYSIGNd(0, __d);
+      __recalc = 1;
+    }
+    if (__recalc) {
+      // Can't use std::numeric_limits<double>::infinity() -- that doesn't have
+      // a device overload (and isn't constexpr before C++11, naturally).
+      __real__(z) = __builtin_huge_val() * (__a * __c - __b * __d);
+      __imag__(z) = __builtin_huge_val() * (__a * __d + __b * __c);
+    }
+  }
+  return z;
+}
+
+__DEVICE__ float _Complex __mulsc3(float __a, float __b, float __c, float __d) {
+  float __ac = __a * __c;
+  float __bd = __b * __d;
+  float __ad = __a * __d;
+  float __bc = __b * __c;
+  float _Complex z;
+  __real__(z) = __ac - __bd;
+  __imag__(z) = __ad + __bc;
+  if (_ISNANf(__real__(z)) && _ISNANf(__imag__(z))) {
+    int __recalc = 0;
+    if (_ISINFf(__a) || _ISINFf(__b)) {
+      __a = _COPYSIGNf(_ISINFf(__a) ? 1 : 0, __a);
+      __b = _COPYSIGNf(_ISINFf(__b) ? 1 : 0, __b);
+      if (_ISNANf(__c))
+        __c = _COPYSIGNf(0, __c);
+      if (_ISNANf(__d))
+        __d = _COPYSIGNf(0, __d);
+      __recalc = 1;
+    }
+    if (_ISINFf(__c) || _ISINFf(__d)) {
+      __c = _COPYSIGNf(_ISINFf(__c) ? 1 : 0, __c);
+      __d = _COPYSIGNf(_ISINFf(__d) ? 1 : 0, __d);
+      if (_ISNANf(__a))
+        __a = _COPYSIGNf(0, __a);
+      if (_ISNANf(__b))
+        __b = _COPYSIGNf(0, __b);
+      __recalc = 1;
+    }
+    if (!__recalc &&
+        (_ISINFf(__ac) || _ISINFf(__bd) || _ISINFf(__ad) || _ISINFf(__bc))) {
+      if (_ISNANf(__a))
+        __a = _COPYSIGNf(0, __a);
+      if (_ISNANf(__b))
+        __b = _COPYSIGNf(0, __b);
+      if (_ISNANf(__c))
+        __c = _COPYSIGNf(0, __c);
+      if (_ISNANf(__d))
+        __d = _COPYSIGNf(0, __d);
+      __recalc = 1;
+    }
+    if (__recalc) {
+      __real__(z) = __builtin_huge_valf() * (__a * __c - __b * __d);
+      __imag__(z) = __builtin_huge_valf() * (__a * __d + __b * __c);
+    }
+  }
+  return z;
+}
+
+__DEVICE__ double _Complex __divdc3(double __a, double __b, double __c,
+                                    double __d) {
+  int __ilogbw = 0;
+  // Can't use std::max, because that's defined in <algorithm>, and we don't
+  // want to pull that in for every compile.  The CUDA headers define
+  // ::max(float, float) and ::max(double, double), which is sufficient for us.
+  double __logbw = _LOGBd(max(_ABSd(__c), _ABSd(__d)));
+  if (_ISFINITEd(__logbw)) {
+    __ilogbw = (int)__logbw;
+    __c = _SCALBNd(__c, -__ilogbw);
+    __d = _SCALBNd(__d, -__ilogbw);
+  }
+  double __denom = __c * __c + __d * __d;
+  double _Complex z;
+  __real__(z) = _SCALBNd((__a * __c + __b * __d) / __denom, -__ilogbw);
+  __imag__(z) = _SCALBNd((__b * __c - __a * __d) / __denom, -__ilogbw);
+  if (_ISNANd(__real__(z)) && _ISNANd(__imag__(z))) {
+    if ((__denom == 0.0) && (!_ISNANd(__a) || !_ISNANd(__b))) {
+      __real__(z) = _COPYSIGNd(__builtin_huge_val(), __c) * __a;
+      __imag__(z) = _COPYSIGNd(__builtin_huge_val(), __c) * __b;
+    } else if ((_ISINFd(__a) || _ISINFd(__b)) && _ISFINITEd(__c) &&
+               _ISFINITEd(__d)) {
+      __a = _COPYSIGNd(_ISINFd(__a) ? 1.0 : 0.0, __a);
+      __b = _COPYSIGNd(_ISINFd(__b) ? 1.0 : 0.0, __b);
+      __real__(z) = __builtin_huge_val() * (__a * __c + __b * __d);
+      __imag__(z) = __builtin_huge_val() * (__b * __c - __a * __d);
+    } else if (_ISINFd(__logbw) && __logbw > 0.0 && _ISFINITEd(__a) &&
+               _ISFINITEd(__b)) {
+      __c = _COPYSIGNd(_ISINFd(__c) ? 1.0 : 0.0, __c);
+      __d = _COPYSIGNd(_ISINFd(__d) ? 1.0 : 0.0, __d);
+      __real__(z) = 0.0 * (__a * __c + __b * __d);
+      __imag__(z) = 0.0 * (__b * __c - __a * __d);
+    }
+  }
+  return z;
+}
+
+__DEVICE__ float _Complex __divsc3(float __a, float __b, float __c, float __d) {
+  int __ilogbw = 0;
+  float __logbw = _LOGBf(max(_ABSf(__c), _ABSf(__d)));
+  if (_ISFINITEf(__logbw)) {
+    __ilogbw = (int)__logbw;
+    __c = _SCALBNf(__c, -__ilogbw);
+    __d = _SCALBNf(__d, -__ilogbw);
+  }
+  float __denom = __c * __c + __d * __d;
+  float _Complex z;
+  __real__(z) = _SCALBNf((__a * __c + __b * __d) / __denom, -__ilogbw);
+  __imag__(z) = _SCALBNf((__b * __c - __a * __d) / __denom, -__ilogbw);
+  if (_ISNANf(__real__(z)) && _ISNANf(__imag__(z))) {
+    if ((__denom == 0) && (!_ISNANf(__a) || !_ISNANf(__b))) {
+      __real__(z) = _COPYSIGNf(__builtin_huge_valf(), __c) * __a;
+      __imag__(z) = _COPYSIGNf(__builtin_huge_valf(), __c) * __b;
+    } else if ((_ISINFf(__a) || _ISINFf(__b)) && _ISFINITEf(__c) &&
+               _ISFINITEf(__d)) {
+      __a = _COPYSIGNf(_ISINFf(__a) ? 1 : 0, __a);
+      __b = _COPYSIGNf(_ISINFf(__b) ? 1 : 0, __b);
+      __real__(z) = __builtin_huge_valf() * (__a * __c + __b * __d);
+      __imag__(z) = __builtin_huge_valf() * (__b * __c - __a * __d);
+    } else if (_ISINFf(__logbw) && __logbw > 0 && _ISFINITEf(__a) &&
+               _ISFINITEf(__b)) {
+      __c = _COPYSIGNf(_ISINFf(__c) ? 1 : 0, __c);
+      __d = _COPYSIGNf(_ISINFf(__d) ? 1 : 0, __d);
+      __real__(z) = 0 * (__a * __c + __b * __d);
+      __imag__(z) = 0 * (__b * __c - __a * __d);
+    }
+  }
+  return z;
+}
+
+#if defined(__cplusplus)
+} // extern "C"
+#endif
+
+#undef _ISNANd
+#undef _ISNANf
+#undef _ISINFd
+#undef _ISINFf
+#undef _COPYSIGNd
+#undef _COPYSIGNf
+#undef _ISFINITEd
+#undef _ISFINITEf
+#undef _SCALBNd
+#undef _SCALBNf
+#undef _ABSd
+#undef _ABSf
+#undef _LOGBd
+#undef _LOGBf
+
+#ifdef _OPENMP
+#pragma omp end declare target
+#endif
+
+#pragma pop_macro("__DEVICE__")
+
+#endif // __CLANG_CUDA_COMPLEX_BUILTINS
diff --git a/darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_device_functions.h b/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_device_functions.h
similarity index 78%
rename from darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_device_functions.h
rename to darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_device_functions.h
index 50ad674..f801e54 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_device_functions.h
+++ b/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_device_functions.h
@@ -10,7 +10,7 @@
 #ifndef __CLANG_CUDA_DEVICE_FUNCTIONS_H__
 #define __CLANG_CUDA_DEVICE_FUNCTIONS_H__
 
-#ifndef _OPENMP
+#ifndef __OPENMP_NVPTX__
 #if CUDA_VERSION < 9000
 #error This file is intended to be used with CUDA-9+ only.
 #endif
@@ -20,32 +20,12 @@
 // we implement in this file. We need static in order to avoid emitting unused
 // functions and __forceinline__ helps inlining these wrappers at -O1.
 #pragma push_macro("__DEVICE__")
-#ifdef _OPENMP
-#define __DEVICE__ static __attribute__((always_inline))
+#ifdef __OPENMP_NVPTX__
+#define __DEVICE__ static __attribute__((always_inline, nothrow))
 #else
 #define __DEVICE__ static __device__ __forceinline__
 #endif
 
-// libdevice provides fast low precision and slow full-recision implementations
-// for some functions. Which one gets selected depends on
-// __CLANG_CUDA_APPROX_TRANSCENDENTALS__ which gets defined by clang if
-// -ffast-math or -fcuda-approx-transcendentals are in effect.
-#pragma push_macro("__FAST_OR_SLOW")
-#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__)
-#define __FAST_OR_SLOW(fast, slow) fast
-#else
-#define __FAST_OR_SLOW(fast, slow) slow
-#endif
-
-// For C++ 17 we need to include noexcept attribute to be compatible
-// with the header-defined version. This may be removed once
-// variant is supported.
-#if defined(_OPENMP) && defined(__cplusplus) && __cplusplus >= 201703L
-#define __NOEXCEPT noexcept
-#else
-#define __NOEXCEPT
-#endif
-
 __DEVICE__ int __all(int __a) { return __nvvm_vote_all(__a); }
 __DEVICE__ int __any(int __a) { return __nvvm_vote_any(__a); }
 __DEVICE__ unsigned int __ballot(int __a) { return __nvvm_vote_ballot(__a); }
@@ -359,10 +339,10 @@
   return __nvvm_atom_add_gen_i(__p, __v);
 }
 __DEVICE__ int __iAtomicAdd_block(int *__p, int __v) {
-  __nvvm_atom_cta_add_gen_i(__p, __v);
+  return __nvvm_atom_cta_add_gen_i(__p, __v);
 }
 __DEVICE__ int __iAtomicAdd_system(int *__p, int __v) {
-  __nvvm_atom_sys_add_gen_i(__p, __v);
+  return __nvvm_atom_sys_add_gen_i(__p, __v);
 }
 __DEVICE__ int __iAtomicAnd(int *__p, int __v) {
   return __nvvm_atom_and_gen_i(__p, __v);
@@ -1483,152 +1463,17 @@
   return r;
 }
 #endif // CUDA_VERSION >= 9020
-__DEVICE__ int abs(int __a) __NOEXCEPT { return __nv_abs(__a); }
-__DEVICE__ double fabs(double __a) __NOEXCEPT { return __nv_fabs(__a); }
-__DEVICE__ double acos(double __a) { return __nv_acos(__a); }
-__DEVICE__ float acosf(float __a) { return __nv_acosf(__a); }
-__DEVICE__ double acosh(double __a) { return __nv_acosh(__a); }
-__DEVICE__ float acoshf(float __a) { return __nv_acoshf(__a); }
-__DEVICE__ double asin(double __a) { return __nv_asin(__a); }
-__DEVICE__ float asinf(float __a) { return __nv_asinf(__a); }
-__DEVICE__ double asinh(double __a) { return __nv_asinh(__a); }
-__DEVICE__ float asinhf(float __a) { return __nv_asinhf(__a); }
-__DEVICE__ double atan(double __a) { return __nv_atan(__a); }
-__DEVICE__ double atan2(double __a, double __b) { return __nv_atan2(__a, __b); }
-__DEVICE__ float atan2f(float __a, float __b) { return __nv_atan2f(__a, __b); }
-__DEVICE__ float atanf(float __a) { return __nv_atanf(__a); }
-__DEVICE__ double atanh(double __a) { return __nv_atanh(__a); }
-__DEVICE__ float atanhf(float __a) { return __nv_atanhf(__a); }
-__DEVICE__ double cbrt(double __a) { return __nv_cbrt(__a); }
-__DEVICE__ float cbrtf(float __a) { return __nv_cbrtf(__a); }
-__DEVICE__ double ceil(double __a) { return __nv_ceil(__a); }
-__DEVICE__ float ceilf(float __a) { return __nv_ceilf(__a); }
-#ifndef _OPENMP
-__DEVICE__ int clock() { return __nvvm_read_ptx_sreg_clock(); }
+
+// For OpenMP we require the user to include <time.h> as we need to know what
+// clock_t is on the system.
+#ifndef __OPENMP_NVPTX__
+__DEVICE__ /* clock_t= */ int clock() { return __nvvm_read_ptx_sreg_clock(); }
+#endif
 __DEVICE__ long long clock64() { return __nvvm_read_ptx_sreg_clock64(); }
-#endif
-__DEVICE__ double copysign(double __a, double __b) {
-  return __nv_copysign(__a, __b);
-}
-__DEVICE__ float copysignf(float __a, float __b) {
-  return __nv_copysignf(__a, __b);
-}
-__DEVICE__ double cos(double __a) { return __nv_cos(__a); }
-__DEVICE__ float cosf(float __a) {
-  return __FAST_OR_SLOW(__nv_fast_cosf, __nv_cosf)(__a);
-}
-__DEVICE__ double cosh(double __a) { return __nv_cosh(__a); }
-__DEVICE__ float coshf(float __a) { return __nv_coshf(__a); }
-__DEVICE__ double cospi(double __a) { return __nv_cospi(__a); }
-__DEVICE__ float cospif(float __a) { return __nv_cospif(__a); }
-__DEVICE__ double cyl_bessel_i0(double __a) { return __nv_cyl_bessel_i0(__a); }
-__DEVICE__ float cyl_bessel_i0f(float __a) { return __nv_cyl_bessel_i0f(__a); }
-__DEVICE__ double cyl_bessel_i1(double __a) { return __nv_cyl_bessel_i1(__a); }
-__DEVICE__ float cyl_bessel_i1f(float __a) { return __nv_cyl_bessel_i1f(__a); }
-__DEVICE__ double erf(double __a) { return __nv_erf(__a); }
-__DEVICE__ double erfc(double __a) { return __nv_erfc(__a); }
-__DEVICE__ float erfcf(float __a) { return __nv_erfcf(__a); }
-__DEVICE__ double erfcinv(double __a) { return __nv_erfcinv(__a); }
-__DEVICE__ float erfcinvf(float __a) { return __nv_erfcinvf(__a); }
-__DEVICE__ double erfcx(double __a) { return __nv_erfcx(__a); }
-__DEVICE__ float erfcxf(float __a) { return __nv_erfcxf(__a); }
-__DEVICE__ float erff(float __a) { return __nv_erff(__a); }
-__DEVICE__ double erfinv(double __a) { return __nv_erfinv(__a); }
-__DEVICE__ float erfinvf(float __a) { return __nv_erfinvf(__a); }
-__DEVICE__ double exp(double __a) { return __nv_exp(__a); }
-__DEVICE__ double exp10(double __a) { return __nv_exp10(__a); }
-__DEVICE__ float exp10f(float __a) { return __nv_exp10f(__a); }
-__DEVICE__ double exp2(double __a) { return __nv_exp2(__a); }
-__DEVICE__ float exp2f(float __a) { return __nv_exp2f(__a); }
-__DEVICE__ float expf(float __a) { return __nv_expf(__a); }
-__DEVICE__ double expm1(double __a) { return __nv_expm1(__a); }
-__DEVICE__ float expm1f(float __a) { return __nv_expm1f(__a); }
-__DEVICE__ float fabsf(float __a) { return __nv_fabsf(__a); }
-__DEVICE__ double fdim(double __a, double __b) { return __nv_fdim(__a, __b); }
-__DEVICE__ float fdimf(float __a, float __b) { return __nv_fdimf(__a, __b); }
-__DEVICE__ double fdivide(double __a, double __b) { return __a / __b; }
-__DEVICE__ float fdividef(float __a, float __b) {
-#if __FAST_MATH__ && !__CUDA_PREC_DIV
-  return __nv_fast_fdividef(__a, __b);
-#else
-  return __a / __b;
-#endif
-}
-__DEVICE__ double floor(double __f) { return __nv_floor(__f); }
-__DEVICE__ float floorf(float __f) { return __nv_floorf(__f); }
-__DEVICE__ double fma(double __a, double __b, double __c) {
-  return __nv_fma(__a, __b, __c);
-}
-__DEVICE__ float fmaf(float __a, float __b, float __c) {
-  return __nv_fmaf(__a, __b, __c);
-}
-__DEVICE__ double fmax(double __a, double __b) { return __nv_fmax(__a, __b); }
-__DEVICE__ float fmaxf(float __a, float __b) { return __nv_fmaxf(__a, __b); }
-__DEVICE__ double fmin(double __a, double __b) { return __nv_fmin(__a, __b); }
-__DEVICE__ float fminf(float __a, float __b) { return __nv_fminf(__a, __b); }
-__DEVICE__ double fmod(double __a, double __b) { return __nv_fmod(__a, __b); }
-__DEVICE__ float fmodf(float __a, float __b) { return __nv_fmodf(__a, __b); }
-__DEVICE__ double frexp(double __a, int *__b) { return __nv_frexp(__a, __b); }
-__DEVICE__ float frexpf(float __a, int *__b) { return __nv_frexpf(__a, __b); }
-__DEVICE__ double hypot(double __a, double __b) { return __nv_hypot(__a, __b); }
-__DEVICE__ float hypotf(float __a, float __b) { return __nv_hypotf(__a, __b); }
-__DEVICE__ int ilogb(double __a) { return __nv_ilogb(__a); }
-__DEVICE__ int ilogbf(float __a) { return __nv_ilogbf(__a); }
-__DEVICE__ double j0(double __a) { return __nv_j0(__a); }
-__DEVICE__ float j0f(float __a) { return __nv_j0f(__a); }
-__DEVICE__ double j1(double __a) { return __nv_j1(__a); }
-__DEVICE__ float j1f(float __a) { return __nv_j1f(__a); }
-__DEVICE__ double jn(int __n, double __a) { return __nv_jn(__n, __a); }
-__DEVICE__ float jnf(int __n, float __a) { return __nv_jnf(__n, __a); }
-#if defined(__LP64__) || defined(_WIN64)
-__DEVICE__ long labs(long __a) __NOEXCEPT { return __nv_llabs(__a); };
-#else
-__DEVICE__ long labs(long __a) __NOEXCEPT { return __nv_abs(__a); };
-#endif
-__DEVICE__ double ldexp(double __a, int __b) { return __nv_ldexp(__a, __b); }
-__DEVICE__ float ldexpf(float __a, int __b) { return __nv_ldexpf(__a, __b); }
-__DEVICE__ double lgamma(double __a) { return __nv_lgamma(__a); }
-__DEVICE__ float lgammaf(float __a) { return __nv_lgammaf(__a); }
-__DEVICE__ long long llabs(long long __a) __NOEXCEPT { return __nv_llabs(__a); }
-__DEVICE__ long long llmax(long long __a, long long __b) {
-  return __nv_llmax(__a, __b);
-}
-__DEVICE__ long long llmin(long long __a, long long __b) {
-  return __nv_llmin(__a, __b);
-}
-__DEVICE__ long long llrint(double __a) { return __nv_llrint(__a); }
-__DEVICE__ long long llrintf(float __a) { return __nv_llrintf(__a); }
-__DEVICE__ long long llround(double __a) { return __nv_llround(__a); }
-__DEVICE__ long long llroundf(float __a) { return __nv_llroundf(__a); }
-__DEVICE__ double log(double __a) { return __nv_log(__a); }
-__DEVICE__ double log10(double __a) { return __nv_log10(__a); }
-__DEVICE__ float log10f(float __a) { return __nv_log10f(__a); }
-__DEVICE__ double log1p(double __a) { return __nv_log1p(__a); }
-__DEVICE__ float log1pf(float __a) { return __nv_log1pf(__a); }
-__DEVICE__ double log2(double __a) { return __nv_log2(__a); }
-__DEVICE__ float log2f(float __a) {
-  return __FAST_OR_SLOW(__nv_fast_log2f, __nv_log2f)(__a);
-}
-__DEVICE__ double logb(double __a) { return __nv_logb(__a); }
-__DEVICE__ float logbf(float __a) { return __nv_logbf(__a); }
-__DEVICE__ float logf(float __a) {
-  return __FAST_OR_SLOW(__nv_fast_logf, __nv_logf)(__a);
-}
-#if defined(__LP64__) || defined(_WIN64)
-__DEVICE__ long lrint(double __a) { return llrint(__a); }
-__DEVICE__ long lrintf(float __a) { return __float2ll_rn(__a); }
-__DEVICE__ long lround(double __a) { return llround(__a); }
-__DEVICE__ long lroundf(float __a) { return llroundf(__a); }
-#else
-__DEVICE__ long lrint(double __a) { return (long)rint(__a); }
-__DEVICE__ long lrintf(float __a) { return __float2int_rn(__a); }
-__DEVICE__ long lround(double __a) { return round(__a); }
-__DEVICE__ long lroundf(float __a) { return roundf(__a); }
-#endif
-__DEVICE__ int max(int __a, int __b) { return __nv_max(__a, __b); }
+
 // These functions shouldn't be declared when including this header
 // for math function resolution purposes.
-#ifndef _OPENMP
+#ifndef __OPENMP_NVPTX__
 __DEVICE__ void *memcpy(void *__a, const void *__b, size_t __c) {
   return __builtin_memcpy(__a, __b, __c);
 }
@@ -1636,158 +1481,6 @@
   return __builtin_memset(__a, __b, __c);
 }
 #endif
-__DEVICE__ int min(int __a, int __b) { return __nv_min(__a, __b); }
-__DEVICE__ double modf(double __a, double *__b) { return __nv_modf(__a, __b); }
-__DEVICE__ float modff(float __a, float *__b) { return __nv_modff(__a, __b); }
-__DEVICE__ double nearbyint(double __a) { return __nv_nearbyint(__a); }
-__DEVICE__ float nearbyintf(float __a) { return __nv_nearbyintf(__a); }
-__DEVICE__ double nextafter(double __a, double __b) {
-  return __nv_nextafter(__a, __b);
-}
-__DEVICE__ float nextafterf(float __a, float __b) {
-  return __nv_nextafterf(__a, __b);
-}
-__DEVICE__ double norm(int __dim, const double *__t) {
-  return __nv_norm(__dim, __t);
-}
-__DEVICE__ double norm3d(double __a, double __b, double __c) {
-  return __nv_norm3d(__a, __b, __c);
-}
-__DEVICE__ float norm3df(float __a, float __b, float __c) {
-  return __nv_norm3df(__a, __b, __c);
-}
-__DEVICE__ double norm4d(double __a, double __b, double __c, double __d) {
-  return __nv_norm4d(__a, __b, __c, __d);
-}
-__DEVICE__ float norm4df(float __a, float __b, float __c, float __d) {
-  return __nv_norm4df(__a, __b, __c, __d);
-}
-__DEVICE__ double normcdf(double __a) { return __nv_normcdf(__a); }
-__DEVICE__ float normcdff(float __a) { return __nv_normcdff(__a); }
-__DEVICE__ double normcdfinv(double __a) { return __nv_normcdfinv(__a); }
-__DEVICE__ float normcdfinvf(float __a) { return __nv_normcdfinvf(__a); }
-__DEVICE__ float normf(int __dim, const float *__t) {
-  return __nv_normf(__dim, __t);
-}
-__DEVICE__ double pow(double __a, double __b) { return __nv_pow(__a, __b); }
-__DEVICE__ float powf(float __a, float __b) { return __nv_powf(__a, __b); }
-__DEVICE__ double powi(double __a, int __b) { return __nv_powi(__a, __b); }
-__DEVICE__ float powif(float __a, int __b) { return __nv_powif(__a, __b); }
-__DEVICE__ double rcbrt(double __a) { return __nv_rcbrt(__a); }
-__DEVICE__ float rcbrtf(float __a) { return __nv_rcbrtf(__a); }
-__DEVICE__ double remainder(double __a, double __b) {
-  return __nv_remainder(__a, __b);
-}
-__DEVICE__ float remainderf(float __a, float __b) {
-  return __nv_remainderf(__a, __b);
-}
-__DEVICE__ double remquo(double __a, double __b, int *__c) {
-  return __nv_remquo(__a, __b, __c);
-}
-__DEVICE__ float remquof(float __a, float __b, int *__c) {
-  return __nv_remquof(__a, __b, __c);
-}
-__DEVICE__ double rhypot(double __a, double __b) {
-  return __nv_rhypot(__a, __b);
-}
-__DEVICE__ float rhypotf(float __a, float __b) {
-  return __nv_rhypotf(__a, __b);
-}
-__DEVICE__ double rint(double __a) { return __nv_rint(__a); }
-__DEVICE__ float rintf(float __a) { return __nv_rintf(__a); }
-__DEVICE__ double rnorm(int __a, const double *__b) {
-  return __nv_rnorm(__a, __b);
-}
-__DEVICE__ double rnorm3d(double __a, double __b, double __c) {
-  return __nv_rnorm3d(__a, __b, __c);
-}
-__DEVICE__ float rnorm3df(float __a, float __b, float __c) {
-  return __nv_rnorm3df(__a, __b, __c);
-}
-__DEVICE__ double rnorm4d(double __a, double __b, double __c, double __d) {
-  return __nv_rnorm4d(__a, __b, __c, __d);
-}
-__DEVICE__ float rnorm4df(float __a, float __b, float __c, float __d) {
-  return __nv_rnorm4df(__a, __b, __c, __d);
-}
-__DEVICE__ float rnormf(int __dim, const float *__t) {
-  return __nv_rnormf(__dim, __t);
-}
-__DEVICE__ double round(double __a) { return __nv_round(__a); }
-__DEVICE__ float roundf(float __a) { return __nv_roundf(__a); }
-__DEVICE__ double rsqrt(double __a) { return __nv_rsqrt(__a); }
-__DEVICE__ float rsqrtf(float __a) { return __nv_rsqrtf(__a); }
-__DEVICE__ double scalbn(double __a, int __b) { return __nv_scalbn(__a, __b); }
-__DEVICE__ float scalbnf(float __a, int __b) { return __nv_scalbnf(__a, __b); }
-// TODO: remove once variant is supported
-#ifndef _OPENMP
-__DEVICE__ double scalbln(double __a, long __b) {
-  if (__b > INT_MAX)
-    return __a > 0 ? HUGE_VAL : -HUGE_VAL;
-  if (__b < INT_MIN)
-    return __a > 0 ? 0.0 : -0.0;
-  return scalbn(__a, (int)__b);
-}
-__DEVICE__ float scalblnf(float __a, long __b) {
-  if (__b > INT_MAX)
-    return __a > 0 ? HUGE_VALF : -HUGE_VALF;
-  if (__b < INT_MIN)
-    return __a > 0 ? 0.f : -0.f;
-  return scalbnf(__a, (int)__b);
-}
-#endif
-__DEVICE__ double sin(double __a) { return __nv_sin(__a); }
-__DEVICE__ void sincos(double __a, double *__s, double *__c) {
-  return __nv_sincos(__a, __s, __c);
-}
-__DEVICE__ void sincosf(float __a, float *__s, float *__c) {
-  return __FAST_OR_SLOW(__nv_fast_sincosf, __nv_sincosf)(__a, __s, __c);
-}
-__DEVICE__ void sincospi(double __a, double *__s, double *__c) {
-  return __nv_sincospi(__a, __s, __c);
-}
-__DEVICE__ void sincospif(float __a, float *__s, float *__c) {
-  return __nv_sincospif(__a, __s, __c);
-}
-__DEVICE__ float sinf(float __a) {
-  return __FAST_OR_SLOW(__nv_fast_sinf, __nv_sinf)(__a);
-}
-__DEVICE__ double sinh(double __a) { return __nv_sinh(__a); }
-__DEVICE__ float sinhf(float __a) { return __nv_sinhf(__a); }
-__DEVICE__ double sinpi(double __a) { return __nv_sinpi(__a); }
-__DEVICE__ float sinpif(float __a) { return __nv_sinpif(__a); }
-__DEVICE__ double sqrt(double __a) { return __nv_sqrt(__a); }
-__DEVICE__ float sqrtf(float __a) { return __nv_sqrtf(__a); }
-__DEVICE__ double tan(double __a) { return __nv_tan(__a); }
-__DEVICE__ float tanf(float __a) { return __nv_tanf(__a); }
-__DEVICE__ double tanh(double __a) { return __nv_tanh(__a); }
-__DEVICE__ float tanhf(float __a) { return __nv_tanhf(__a); }
-__DEVICE__ double tgamma(double __a) { return __nv_tgamma(__a); }
-__DEVICE__ float tgammaf(float __a) { return __nv_tgammaf(__a); }
-__DEVICE__ double trunc(double __a) { return __nv_trunc(__a); }
-__DEVICE__ float truncf(float __a) { return __nv_truncf(__a); }
-__DEVICE__ unsigned long long ullmax(unsigned long long __a,
-                                     unsigned long long __b) {
-  return __nv_ullmax(__a, __b);
-}
-__DEVICE__ unsigned long long ullmin(unsigned long long __a,
-                                     unsigned long long __b) {
-  return __nv_ullmin(__a, __b);
-}
-__DEVICE__ unsigned int umax(unsigned int __a, unsigned int __b) {
-  return __nv_umax(__a, __b);
-}
-__DEVICE__ unsigned int umin(unsigned int __a, unsigned int __b) {
-  return __nv_umin(__a, __b);
-}
-__DEVICE__ double y0(double __a) { return __nv_y0(__a); }
-__DEVICE__ float y0f(float __a) { return __nv_y0f(__a); }
-__DEVICE__ double y1(double __a) { return __nv_y1(__a); }
-__DEVICE__ float y1f(float __a) { return __nv_y1f(__a); }
-__DEVICE__ double yn(int __a, double __b) { return __nv_yn(__a, __b); }
-__DEVICE__ float ynf(int __a, float __b) { return __nv_ynf(__a, __b); }
 
-#undef __NOEXCEPT
 #pragma pop_macro("__DEVICE__")
-#pragma pop_macro("__FAST_OR_SLOW")
 #endif // __CLANG_CUDA_DEVICE_FUNCTIONS_H__
diff --git a/darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_intrinsics.h b/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_intrinsics.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_intrinsics.h
rename to darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_intrinsics.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_libdevice_declares.h b/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_libdevice_declares.h
similarity index 99%
rename from darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_libdevice_declares.h
rename to darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_libdevice_declares.h
index 4d70353..6173b58 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_libdevice_declares.h
+++ b/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_libdevice_declares.h
@@ -14,7 +14,7 @@
 extern "C" {
 #endif
 
-#if defined(_OPENMP)
+#if defined(__OPENMP_NVPTX__)
 #define __DEVICE__
 #elif defined(__CUDA__)
 #define __DEVICE__ __device__
diff --git a/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_math.h b/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_math.h
new file mode 100644
index 0000000..332e616
--- /dev/null
+++ b/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_math.h
@@ -0,0 +1,347 @@
+/*===---- __clang_cuda_math.h - Device-side CUDA math support --------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __CLANG_CUDA_MATH_H__
+#define __CLANG_CUDA_MATH_H__
+#ifndef __CUDA__
+#error "This file is for CUDA compilation only."
+#endif
+
+#ifndef __OPENMP_NVPTX__
+#if CUDA_VERSION < 9000
+#error This file is intended to be used with CUDA-9+ only.
+#endif
+#endif
+
+// __DEVICE__ is a helper macro with common set of attributes for the wrappers
+// we implement in this file. We need static in order to avoid emitting unused
+// functions and __forceinline__ helps inlining these wrappers at -O1.
+#pragma push_macro("__DEVICE__")
+#ifdef __OPENMP_NVPTX__
+#if defined(__cplusplus)
+#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow))
+#else
+#define __DEVICE__ static __attribute__((always_inline, nothrow))
+#endif
+#else
+#define __DEVICE__ static __device__ __forceinline__
+#endif
+
+// Specialized version of __DEVICE__ for functions with void return type. Needed
+// because the OpenMP overlay requires constexpr functions here but prior to
+// c++14 void return functions could not be constexpr.
+#pragma push_macro("__DEVICE_VOID__")
+#ifdef __OPENMP_NVPTX__ && defined(__cplusplus) && __cplusplus < 201402L
+#define __DEVICE_VOID__ static __attribute__((always_inline, nothrow))
+#else
+#define __DEVICE_VOID__ __DEVICE__
+#endif
+
+// libdevice provides fast low precision and slow full-recision implementations
+// for some functions. Which one gets selected depends on
+// __CLANG_CUDA_APPROX_TRANSCENDENTALS__ which gets defined by clang if
+// -ffast-math or -fcuda-approx-transcendentals are in effect.
+#pragma push_macro("__FAST_OR_SLOW")
+#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__)
+#define __FAST_OR_SLOW(fast, slow) fast
+#else
+#define __FAST_OR_SLOW(fast, slow) slow
+#endif
+
+__DEVICE__ int abs(int __a) { return __nv_abs(__a); }
+__DEVICE__ double fabs(double __a) { return __nv_fabs(__a); }
+__DEVICE__ double acos(double __a) { return __nv_acos(__a); }
+__DEVICE__ float acosf(float __a) { return __nv_acosf(__a); }
+__DEVICE__ double acosh(double __a) { return __nv_acosh(__a); }
+__DEVICE__ float acoshf(float __a) { return __nv_acoshf(__a); }
+__DEVICE__ double asin(double __a) { return __nv_asin(__a); }
+__DEVICE__ float asinf(float __a) { return __nv_asinf(__a); }
+__DEVICE__ double asinh(double __a) { return __nv_asinh(__a); }
+__DEVICE__ float asinhf(float __a) { return __nv_asinhf(__a); }
+__DEVICE__ double atan(double __a) { return __nv_atan(__a); }
+__DEVICE__ double atan2(double __a, double __b) { return __nv_atan2(__a, __b); }
+__DEVICE__ float atan2f(float __a, float __b) { return __nv_atan2f(__a, __b); }
+__DEVICE__ float atanf(float __a) { return __nv_atanf(__a); }
+__DEVICE__ double atanh(double __a) { return __nv_atanh(__a); }
+__DEVICE__ float atanhf(float __a) { return __nv_atanhf(__a); }
+__DEVICE__ double cbrt(double __a) { return __nv_cbrt(__a); }
+__DEVICE__ float cbrtf(float __a) { return __nv_cbrtf(__a); }
+__DEVICE__ double ceil(double __a) { return __nv_ceil(__a); }
+__DEVICE__ float ceilf(float __a) { return __nv_ceilf(__a); }
+__DEVICE__ double copysign(double __a, double __b) {
+  return __nv_copysign(__a, __b);
+}
+__DEVICE__ float copysignf(float __a, float __b) {
+  return __nv_copysignf(__a, __b);
+}
+__DEVICE__ double cos(double __a) { return __nv_cos(__a); }
+__DEVICE__ float cosf(float __a) {
+  return __FAST_OR_SLOW(__nv_fast_cosf, __nv_cosf)(__a);
+}
+__DEVICE__ double cosh(double __a) { return __nv_cosh(__a); }
+__DEVICE__ float coshf(float __a) { return __nv_coshf(__a); }
+__DEVICE__ double cospi(double __a) { return __nv_cospi(__a); }
+__DEVICE__ float cospif(float __a) { return __nv_cospif(__a); }
+__DEVICE__ double cyl_bessel_i0(double __a) { return __nv_cyl_bessel_i0(__a); }
+__DEVICE__ float cyl_bessel_i0f(float __a) { return __nv_cyl_bessel_i0f(__a); }
+__DEVICE__ double cyl_bessel_i1(double __a) { return __nv_cyl_bessel_i1(__a); }
+__DEVICE__ float cyl_bessel_i1f(float __a) { return __nv_cyl_bessel_i1f(__a); }
+__DEVICE__ double erf(double __a) { return __nv_erf(__a); }
+__DEVICE__ double erfc(double __a) { return __nv_erfc(__a); }
+__DEVICE__ float erfcf(float __a) { return __nv_erfcf(__a); }
+__DEVICE__ double erfcinv(double __a) { return __nv_erfcinv(__a); }
+__DEVICE__ float erfcinvf(float __a) { return __nv_erfcinvf(__a); }
+__DEVICE__ double erfcx(double __a) { return __nv_erfcx(__a); }
+__DEVICE__ float erfcxf(float __a) { return __nv_erfcxf(__a); }
+__DEVICE__ float erff(float __a) { return __nv_erff(__a); }
+__DEVICE__ double erfinv(double __a) { return __nv_erfinv(__a); }
+__DEVICE__ float erfinvf(float __a) { return __nv_erfinvf(__a); }
+__DEVICE__ double exp(double __a) { return __nv_exp(__a); }
+__DEVICE__ double exp10(double __a) { return __nv_exp10(__a); }
+__DEVICE__ float exp10f(float __a) { return __nv_exp10f(__a); }
+__DEVICE__ double exp2(double __a) { return __nv_exp2(__a); }
+__DEVICE__ float exp2f(float __a) { return __nv_exp2f(__a); }
+__DEVICE__ float expf(float __a) { return __nv_expf(__a); }
+__DEVICE__ double expm1(double __a) { return __nv_expm1(__a); }
+__DEVICE__ float expm1f(float __a) { return __nv_expm1f(__a); }
+__DEVICE__ float fabsf(float __a) { return __nv_fabsf(__a); }
+__DEVICE__ double fdim(double __a, double __b) { return __nv_fdim(__a, __b); }
+__DEVICE__ float fdimf(float __a, float __b) { return __nv_fdimf(__a, __b); }
+__DEVICE__ double fdivide(double __a, double __b) { return __a / __b; }
+__DEVICE__ float fdividef(float __a, float __b) {
+#if __FAST_MATH__ && !__CUDA_PREC_DIV
+  return __nv_fast_fdividef(__a, __b);
+#else
+  return __a / __b;
+#endif
+}
+__DEVICE__ double floor(double __f) { return __nv_floor(__f); }
+__DEVICE__ float floorf(float __f) { return __nv_floorf(__f); }
+__DEVICE__ double fma(double __a, double __b, double __c) {
+  return __nv_fma(__a, __b, __c);
+}
+__DEVICE__ float fmaf(float __a, float __b, float __c) {
+  return __nv_fmaf(__a, __b, __c);
+}
+__DEVICE__ double fmax(double __a, double __b) { return __nv_fmax(__a, __b); }
+__DEVICE__ float fmaxf(float __a, float __b) { return __nv_fmaxf(__a, __b); }
+__DEVICE__ double fmin(double __a, double __b) { return __nv_fmin(__a, __b); }
+__DEVICE__ float fminf(float __a, float __b) { return __nv_fminf(__a, __b); }
+__DEVICE__ double fmod(double __a, double __b) { return __nv_fmod(__a, __b); }
+__DEVICE__ float fmodf(float __a, float __b) { return __nv_fmodf(__a, __b); }
+__DEVICE__ double frexp(double __a, int *__b) { return __nv_frexp(__a, __b); }
+__DEVICE__ float frexpf(float __a, int *__b) { return __nv_frexpf(__a, __b); }
+__DEVICE__ double hypot(double __a, double __b) { return __nv_hypot(__a, __b); }
+__DEVICE__ float hypotf(float __a, float __b) { return __nv_hypotf(__a, __b); }
+__DEVICE__ int ilogb(double __a) { return __nv_ilogb(__a); }
+__DEVICE__ int ilogbf(float __a) { return __nv_ilogbf(__a); }
+__DEVICE__ double j0(double __a) { return __nv_j0(__a); }
+__DEVICE__ float j0f(float __a) { return __nv_j0f(__a); }
+__DEVICE__ double j1(double __a) { return __nv_j1(__a); }
+__DEVICE__ float j1f(float __a) { return __nv_j1f(__a); }
+__DEVICE__ double jn(int __n, double __a) { return __nv_jn(__n, __a); }
+__DEVICE__ float jnf(int __n, float __a) { return __nv_jnf(__n, __a); }
+#if defined(__LP64__) || defined(_WIN64)
+__DEVICE__ long labs(long __a) { return __nv_llabs(__a); };
+#else
+__DEVICE__ long labs(long __a) { return __nv_abs(__a); };
+#endif
+__DEVICE__ double ldexp(double __a, int __b) { return __nv_ldexp(__a, __b); }
+__DEVICE__ float ldexpf(float __a, int __b) { return __nv_ldexpf(__a, __b); }
+__DEVICE__ double lgamma(double __a) { return __nv_lgamma(__a); }
+__DEVICE__ float lgammaf(float __a) { return __nv_lgammaf(__a); }
+__DEVICE__ long long llabs(long long __a) { return __nv_llabs(__a); }
+__DEVICE__ long long llmax(long long __a, long long __b) {
+  return __nv_llmax(__a, __b);
+}
+__DEVICE__ long long llmin(long long __a, long long __b) {
+  return __nv_llmin(__a, __b);
+}
+__DEVICE__ long long llrint(double __a) { return __nv_llrint(__a); }
+__DEVICE__ long long llrintf(float __a) { return __nv_llrintf(__a); }
+__DEVICE__ long long llround(double __a) { return __nv_llround(__a); }
+__DEVICE__ long long llroundf(float __a) { return __nv_llroundf(__a); }
+__DEVICE__ double log(double __a) { return __nv_log(__a); }
+__DEVICE__ double log10(double __a) { return __nv_log10(__a); }
+__DEVICE__ float log10f(float __a) { return __nv_log10f(__a); }
+__DEVICE__ double log1p(double __a) { return __nv_log1p(__a); }
+__DEVICE__ float log1pf(float __a) { return __nv_log1pf(__a); }
+__DEVICE__ double log2(double __a) { return __nv_log2(__a); }
+__DEVICE__ float log2f(float __a) {
+  return __FAST_OR_SLOW(__nv_fast_log2f, __nv_log2f)(__a);
+}
+__DEVICE__ double logb(double __a) { return __nv_logb(__a); }
+__DEVICE__ float logbf(float __a) { return __nv_logbf(__a); }
+__DEVICE__ float logf(float __a) {
+  return __FAST_OR_SLOW(__nv_fast_logf, __nv_logf)(__a);
+}
+#if defined(__LP64__) || defined(_WIN64)
+__DEVICE__ long lrint(double __a) { return llrint(__a); }
+__DEVICE__ long lrintf(float __a) { return __float2ll_rn(__a); }
+__DEVICE__ long lround(double __a) { return llround(__a); }
+__DEVICE__ long lroundf(float __a) { return llroundf(__a); }
+#else
+__DEVICE__ long lrint(double __a) { return (long)rint(__a); }
+__DEVICE__ long lrintf(float __a) { return __float2int_rn(__a); }
+__DEVICE__ long lround(double __a) { return round(__a); }
+__DEVICE__ long lroundf(float __a) { return roundf(__a); }
+#endif
+__DEVICE__ int max(int __a, int __b) { return __nv_max(__a, __b); }
+__DEVICE__ int min(int __a, int __b) { return __nv_min(__a, __b); }
+__DEVICE__ double modf(double __a, double *__b) { return __nv_modf(__a, __b); }
+__DEVICE__ float modff(float __a, float *__b) { return __nv_modff(__a, __b); }
+__DEVICE__ double nearbyint(double __a) { return __nv_nearbyint(__a); }
+__DEVICE__ float nearbyintf(float __a) { return __nv_nearbyintf(__a); }
+__DEVICE__ double nextafter(double __a, double __b) {
+  return __nv_nextafter(__a, __b);
+}
+__DEVICE__ float nextafterf(float __a, float __b) {
+  return __nv_nextafterf(__a, __b);
+}
+__DEVICE__ double norm(int __dim, const double *__t) {
+  return __nv_norm(__dim, __t);
+}
+__DEVICE__ double norm3d(double __a, double __b, double __c) {
+  return __nv_norm3d(__a, __b, __c);
+}
+__DEVICE__ float norm3df(float __a, float __b, float __c) {
+  return __nv_norm3df(__a, __b, __c);
+}
+__DEVICE__ double norm4d(double __a, double __b, double __c, double __d) {
+  return __nv_norm4d(__a, __b, __c, __d);
+}
+__DEVICE__ float norm4df(float __a, float __b, float __c, float __d) {
+  return __nv_norm4df(__a, __b, __c, __d);
+}
+__DEVICE__ double normcdf(double __a) { return __nv_normcdf(__a); }
+__DEVICE__ float normcdff(float __a) { return __nv_normcdff(__a); }
+__DEVICE__ double normcdfinv(double __a) { return __nv_normcdfinv(__a); }
+__DEVICE__ float normcdfinvf(float __a) { return __nv_normcdfinvf(__a); }
+__DEVICE__ float normf(int __dim, const float *__t) {
+  return __nv_normf(__dim, __t);
+}
+__DEVICE__ double pow(double __a, double __b) { return __nv_pow(__a, __b); }
+__DEVICE__ float powf(float __a, float __b) { return __nv_powf(__a, __b); }
+__DEVICE__ double powi(double __a, int __b) { return __nv_powi(__a, __b); }
+__DEVICE__ float powif(float __a, int __b) { return __nv_powif(__a, __b); }
+__DEVICE__ double rcbrt(double __a) { return __nv_rcbrt(__a); }
+__DEVICE__ float rcbrtf(float __a) { return __nv_rcbrtf(__a); }
+__DEVICE__ double remainder(double __a, double __b) {
+  return __nv_remainder(__a, __b);
+}
+__DEVICE__ float remainderf(float __a, float __b) {
+  return __nv_remainderf(__a, __b);
+}
+__DEVICE__ double remquo(double __a, double __b, int *__c) {
+  return __nv_remquo(__a, __b, __c);
+}
+__DEVICE__ float remquof(float __a, float __b, int *__c) {
+  return __nv_remquof(__a, __b, __c);
+}
+__DEVICE__ double rhypot(double __a, double __b) {
+  return __nv_rhypot(__a, __b);
+}
+__DEVICE__ float rhypotf(float __a, float __b) {
+  return __nv_rhypotf(__a, __b);
+}
+__DEVICE__ double rint(double __a) { return __nv_rint(__a); }
+__DEVICE__ float rintf(float __a) { return __nv_rintf(__a); }
+__DEVICE__ double rnorm(int __a, const double *__b) {
+  return __nv_rnorm(__a, __b);
+}
+__DEVICE__ double rnorm3d(double __a, double __b, double __c) {
+  return __nv_rnorm3d(__a, __b, __c);
+}
+__DEVICE__ float rnorm3df(float __a, float __b, float __c) {
+  return __nv_rnorm3df(__a, __b, __c);
+}
+__DEVICE__ double rnorm4d(double __a, double __b, double __c, double __d) {
+  return __nv_rnorm4d(__a, __b, __c, __d);
+}
+__DEVICE__ float rnorm4df(float __a, float __b, float __c, float __d) {
+  return __nv_rnorm4df(__a, __b, __c, __d);
+}
+__DEVICE__ float rnormf(int __dim, const float *__t) {
+  return __nv_rnormf(__dim, __t);
+}
+__DEVICE__ double round(double __a) { return __nv_round(__a); }
+__DEVICE__ float roundf(float __a) { return __nv_roundf(__a); }
+__DEVICE__ double rsqrt(double __a) { return __nv_rsqrt(__a); }
+__DEVICE__ float rsqrtf(float __a) { return __nv_rsqrtf(__a); }
+__DEVICE__ double scalbn(double __a, int __b) { return __nv_scalbn(__a, __b); }
+__DEVICE__ float scalbnf(float __a, int __b) { return __nv_scalbnf(__a, __b); }
+__DEVICE__ double scalbln(double __a, long __b) {
+  if (__b > INT_MAX)
+    return __a > 0 ? HUGE_VAL : -HUGE_VAL;
+  if (__b < INT_MIN)
+    return __a > 0 ? 0.0 : -0.0;
+  return scalbn(__a, (int)__b);
+}
+__DEVICE__ float scalblnf(float __a, long __b) {
+  if (__b > INT_MAX)
+    return __a > 0 ? HUGE_VALF : -HUGE_VALF;
+  if (__b < INT_MIN)
+    return __a > 0 ? 0.f : -0.f;
+  return scalbnf(__a, (int)__b);
+}
+__DEVICE__ double sin(double __a) { return __nv_sin(__a); }
+__DEVICE_VOID__ void sincos(double __a, double *__s, double *__c) {
+  return __nv_sincos(__a, __s, __c);
+}
+__DEVICE_VOID__ void sincosf(float __a, float *__s, float *__c) {
+  return __FAST_OR_SLOW(__nv_fast_sincosf, __nv_sincosf)(__a, __s, __c);
+}
+__DEVICE_VOID__ void sincospi(double __a, double *__s, double *__c) {
+  return __nv_sincospi(__a, __s, __c);
+}
+__DEVICE_VOID__ void sincospif(float __a, float *__s, float *__c) {
+  return __nv_sincospif(__a, __s, __c);
+}
+__DEVICE__ float sinf(float __a) {
+  return __FAST_OR_SLOW(__nv_fast_sinf, __nv_sinf)(__a);
+}
+__DEVICE__ double sinh(double __a) { return __nv_sinh(__a); }
+__DEVICE__ float sinhf(float __a) { return __nv_sinhf(__a); }
+__DEVICE__ double sinpi(double __a) { return __nv_sinpi(__a); }
+__DEVICE__ float sinpif(float __a) { return __nv_sinpif(__a); }
+__DEVICE__ double sqrt(double __a) { return __nv_sqrt(__a); }
+__DEVICE__ float sqrtf(float __a) { return __nv_sqrtf(__a); }
+__DEVICE__ double tan(double __a) { return __nv_tan(__a); }
+__DEVICE__ float tanf(float __a) { return __nv_tanf(__a); }
+__DEVICE__ double tanh(double __a) { return __nv_tanh(__a); }
+__DEVICE__ float tanhf(float __a) { return __nv_tanhf(__a); }
+__DEVICE__ double tgamma(double __a) { return __nv_tgamma(__a); }
+__DEVICE__ float tgammaf(float __a) { return __nv_tgammaf(__a); }
+__DEVICE__ double trunc(double __a) { return __nv_trunc(__a); }
+__DEVICE__ float truncf(float __a) { return __nv_truncf(__a); }
+__DEVICE__ unsigned long long ullmax(unsigned long long __a,
+                                     unsigned long long __b) {
+  return __nv_ullmax(__a, __b);
+}
+__DEVICE__ unsigned long long ullmin(unsigned long long __a,
+                                     unsigned long long __b) {
+  return __nv_ullmin(__a, __b);
+}
+__DEVICE__ unsigned int umax(unsigned int __a, unsigned int __b) {
+  return __nv_umax(__a, __b);
+}
+__DEVICE__ unsigned int umin(unsigned int __a, unsigned int __b) {
+  return __nv_umin(__a, __b);
+}
+__DEVICE__ double y0(double __a) { return __nv_y0(__a); }
+__DEVICE__ float y0f(float __a) { return __nv_y0f(__a); }
+__DEVICE__ double y1(double __a) { return __nv_y1(__a); }
+__DEVICE__ float y1f(float __a) { return __nv_y1f(__a); }
+__DEVICE__ double yn(int __a, double __b) { return __nv_yn(__a, __b); }
+__DEVICE__ float ynf(int __a, float __b) { return __nv_ynf(__a, __b); }
+
+#pragma pop_macro("__DEVICE__")
+#pragma pop_macro("__DEVICE_VOID__")
+#pragma pop_macro("__FAST_OR_SLOW")
+
+#endif // __CLANG_CUDA_DEVICE_FUNCTIONS_H__
diff --git a/darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_math_forward_declares.h b/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_math_forward_declares.h
similarity index 87%
rename from darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_math_forward_declares.h
rename to darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_math_forward_declares.h
index 0afe4db..8a27085 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_math_forward_declares.h
+++ b/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_math_forward_declares.h
@@ -8,8 +8,8 @@
  */
 #ifndef __CLANG__CUDA_MATH_FORWARD_DECLARES_H__
 #define __CLANG__CUDA_MATH_FORWARD_DECLARES_H__
-#ifndef __CUDA__
-#error "This file is for CUDA compilation only."
+#if !defined(__CUDA__) && !__HIP__
+#error "This file is for CUDA/HIP compilation only."
 #endif
 
 // This file forward-declares of some math functions we (or the CUDA headers)
@@ -20,37 +20,14 @@
 // would preclude the use of our own __device__ overloads for these functions.
 
 #pragma push_macro("__DEVICE__")
-#ifdef _OPENMP
-#define __DEVICE__ static __inline__ __attribute__((always_inline))
-#else
 #define __DEVICE__                                                             \
   static __inline__ __attribute__((always_inline)) __attribute__((device))
-#endif
 
-// For C++ 17 we need to include noexcept attribute to be compatible
-// with the header-defined version. This may be removed once
-// variant is supported.
-#if defined(_OPENMP) && defined(__cplusplus) && __cplusplus >= 201703L
-#define __NOEXCEPT noexcept
-#else
-#define __NOEXCEPT
-#endif
-
-#if !(defined(_OPENMP) && defined(__cplusplus))
 __DEVICE__ long abs(long);
 __DEVICE__ long long abs(long long);
 __DEVICE__ double abs(double);
 __DEVICE__ float abs(float);
-#endif
-// While providing the CUDA declarations and definitions for math functions,
-// we may manually define additional functions.
-// TODO: Once variant is supported the additional functions will have
-// to be removed.
-#if defined(_OPENMP) && defined(__cplusplus)
-__DEVICE__ const double abs(const double);
-__DEVICE__ const float abs(const float);
-#endif
-__DEVICE__ int abs(int) __NOEXCEPT;
+__DEVICE__ int abs(int);
 __DEVICE__ double acos(double);
 __DEVICE__ float acos(float);
 __DEVICE__ double acosh(double);
@@ -85,8 +62,8 @@
 __DEVICE__ float exp(float);
 __DEVICE__ double expm1(double);
 __DEVICE__ float expm1(float);
-__DEVICE__ double fabs(double) __NOEXCEPT;
-__DEVICE__ float fabs(float) __NOEXCEPT;
+__DEVICE__ double fabs(double);
+__DEVICE__ float fabs(float);
 __DEVICE__ double fdim(double, double);
 __DEVICE__ float fdim(float, float);
 __DEVICE__ double floor(double);
@@ -136,12 +113,12 @@
 __DEVICE__ bool isnormal(float);
 __DEVICE__ bool isunordered(double, double);
 __DEVICE__ bool isunordered(float, float);
-__DEVICE__ long labs(long) __NOEXCEPT;
+__DEVICE__ long labs(long);
 __DEVICE__ double ldexp(double, int);
 __DEVICE__ float ldexp(float, int);
 __DEVICE__ double lgamma(double);
 __DEVICE__ float lgamma(float);
-__DEVICE__ long long llabs(long long) __NOEXCEPT;
+__DEVICE__ long long llabs(long long);
 __DEVICE__ long long llrint(double);
 __DEVICE__ long long llrint(float);
 __DEVICE__ double log10(double);
@@ -152,9 +129,6 @@
 __DEVICE__ float log2(float);
 __DEVICE__ double logb(double);
 __DEVICE__ float logb(float);
-#if defined(_OPENMP) && defined(__cplusplus)
-__DEVICE__ long double log(long double);
-#endif
 __DEVICE__ double log(double);
 __DEVICE__ float log(float);
 __DEVICE__ long lrint(double);
@@ -302,7 +276,6 @@
 } // namespace std
 #endif
 
-#undef __NOEXCEPT
 #pragma pop_macro("__DEVICE__")
 
 #endif
diff --git a/darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_runtime_wrapper.h b/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_runtime_wrapper.h
similarity index 96%
rename from darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_runtime_wrapper.h
rename to darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_runtime_wrapper.h
index e91de3c..f43ed55 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_runtime_wrapper.h
+++ b/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_runtime_wrapper.h
@@ -31,11 +31,17 @@
 // Include some forward declares that must come before cmath.
 #include <__clang_cuda_math_forward_declares.h>
 
+// Define __CUDACC__ early as libstdc++ standard headers with GNU extensions
+// enabled depend on it to avoid using __float128, which is unsupported in
+// CUDA.
+#define __CUDACC__
+
 // Include some standard headers to avoid CUDA headers including them
 // while some required macros (like __THROW) are in a weird state.
 #include <cmath>
 #include <cstdlib>
 #include <stdlib.h>
+#undef __CUDACC__
 
 // Preserve common macros that will be changed below by us or by CUDA
 // headers.
@@ -83,13 +89,15 @@
 #if CUDA_VERSION < 9000
 #define __CUDABE__
 #else
+#define __CUDACC__
 #define __CUDA_LIBDEVICE__
 #endif
 // Disables definitions of device-side runtime support stubs in
 // cuda_device_runtime_api.h
+#include "host_defines.h"
+#undef __CUDACC__
 #include "driver_types.h"
 #include "host_config.h"
-#include "host_defines.h"
 
 // Temporarily replace "nv_weak" with weak, so __attribute__((nv_weak)) in
 // cuda_device_runtime_api.h ends up being __attribute__((weak)) which is the
@@ -141,11 +149,12 @@
 // to provide our own.
 #include <__clang_cuda_libdevice_declares.h>
 
-// Wrappers for many device-side standard library functions became compiler
-// builtins in CUDA-9 and have been removed from the CUDA headers. Clang now
-// provides its own implementation of the wrappers.
+// Wrappers for many device-side standard library functions, incl. math
+// functions, became compiler builtins in CUDA-9 and have been removed from the
+// CUDA headers. Clang now provides its own implementation of the wrappers.
 #if CUDA_VERSION >= 9000
 #include <__clang_cuda_device_functions.h>
+#include <__clang_cuda_math.h>
 #endif
 
 // __THROW is redefined to be empty by device_functions_decls.h in CUDA. Clang's
diff --git a/darwin-x86/lib64/clang/11.0.5/include/__clang_hip_libdevice_declares.h b/darwin-x86/lib64/clang/11.0.5/include/__clang_hip_libdevice_declares.h
new file mode 100644
index 0000000..e1cd49a
--- /dev/null
+++ b/darwin-x86/lib64/clang/11.0.5/include/__clang_hip_libdevice_declares.h
@@ -0,0 +1,326 @@
+/*===---- __clang_hip_libdevice_declares.h - HIP device library decls -------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_HIP_LIBDEVICE_DECLARES_H__
+#define __CLANG_HIP_LIBDEVICE_DECLARES_H__
+
+extern "C" {
+
+// BEGIN FLOAT
+__device__ __attribute__((const)) float __ocml_acos_f32(float);
+__device__ __attribute__((pure)) float __ocml_acosh_f32(float);
+__device__ __attribute__((const)) float __ocml_asin_f32(float);
+__device__ __attribute__((pure)) float __ocml_asinh_f32(float);
+__device__ __attribute__((const)) float __ocml_atan2_f32(float, float);
+__device__ __attribute__((const)) float __ocml_atan_f32(float);
+__device__ __attribute__((pure)) float __ocml_atanh_f32(float);
+__device__ __attribute__((pure)) float __ocml_cbrt_f32(float);
+__device__ __attribute__((const)) float __ocml_ceil_f32(float);
+__device__ __attribute__((const)) __device__ float __ocml_copysign_f32(float,
+                                                                       float);
+__device__ float __ocml_cos_f32(float);
+__device__ float __ocml_native_cos_f32(float);
+__device__ __attribute__((pure)) __device__ float __ocml_cosh_f32(float);
+__device__ float __ocml_cospi_f32(float);
+__device__ float __ocml_i0_f32(float);
+__device__ float __ocml_i1_f32(float);
+__device__ __attribute__((pure)) float __ocml_erfc_f32(float);
+__device__ __attribute__((pure)) float __ocml_erfcinv_f32(float);
+__device__ __attribute__((pure)) float __ocml_erfcx_f32(float);
+__device__ __attribute__((pure)) float __ocml_erf_f32(float);
+__device__ __attribute__((pure)) float __ocml_erfinv_f32(float);
+__device__ __attribute__((pure)) float __ocml_exp10_f32(float);
+__device__ __attribute__((pure)) float __ocml_native_exp10_f32(float);
+__device__ __attribute__((pure)) float __ocml_exp2_f32(float);
+__device__ __attribute__((pure)) float __ocml_exp_f32(float);
+__device__ __attribute__((pure)) float __ocml_native_exp_f32(float);
+__device__ __attribute__((pure)) float __ocml_expm1_f32(float);
+__device__ __attribute__((const)) float __ocml_fabs_f32(float);
+__device__ __attribute__((const)) float __ocml_fdim_f32(float, float);
+__device__ __attribute__((const)) float __ocml_floor_f32(float);
+__device__ __attribute__((const)) float __ocml_fma_f32(float, float, float);
+__device__ __attribute__((const)) float __ocml_fmax_f32(float, float);
+__device__ __attribute__((const)) float __ocml_fmin_f32(float, float);
+__device__ __attribute__((const)) __device__ float __ocml_fmod_f32(float,
+                                                                   float);
+__device__ float __ocml_frexp_f32(float,
+                                  __attribute__((address_space(5))) int *);
+__device__ __attribute__((const)) float __ocml_hypot_f32(float, float);
+__device__ __attribute__((const)) int __ocml_ilogb_f32(float);
+__device__ __attribute__((const)) int __ocml_isfinite_f32(float);
+__device__ __attribute__((const)) int __ocml_isinf_f32(float);
+__device__ __attribute__((const)) int __ocml_isnan_f32(float);
+__device__ float __ocml_j0_f32(float);
+__device__ float __ocml_j1_f32(float);
+__device__ __attribute__((const)) float __ocml_ldexp_f32(float, int);
+__device__ float __ocml_lgamma_f32(float);
+__device__ __attribute__((pure)) float __ocml_log10_f32(float);
+__device__ __attribute__((pure)) float __ocml_native_log10_f32(float);
+__device__ __attribute__((pure)) float __ocml_log1p_f32(float);
+__device__ __attribute__((pure)) float __ocml_log2_f32(float);
+__device__ __attribute__((pure)) float __ocml_native_log2_f32(float);
+__device__ __attribute__((const)) float __ocml_logb_f32(float);
+__device__ __attribute__((pure)) float __ocml_log_f32(float);
+__device__ __attribute__((pure)) float __ocml_native_log_f32(float);
+__device__ float __ocml_modf_f32(float,
+                                 __attribute__((address_space(5))) float *);
+__device__ __attribute__((const)) float __ocml_nearbyint_f32(float);
+__device__ __attribute__((const)) float __ocml_nextafter_f32(float, float);
+__device__ __attribute__((const)) float __ocml_len3_f32(float, float, float);
+__device__ __attribute__((const)) float __ocml_len4_f32(float, float, float,
+                                                        float);
+__device__ __attribute__((pure)) float __ocml_ncdf_f32(float);
+__device__ __attribute__((pure)) float __ocml_ncdfinv_f32(float);
+__device__ __attribute__((pure)) float __ocml_pow_f32(float, float);
+__device__ __attribute__((pure)) float __ocml_rcbrt_f32(float);
+__device__ __attribute__((const)) float __ocml_remainder_f32(float, float);
+__device__ float __ocml_remquo_f32(float, float,
+                                   __attribute__((address_space(5))) int *);
+__device__ __attribute__((const)) float __ocml_rhypot_f32(float, float);
+__device__ __attribute__((const)) float __ocml_rint_f32(float);
+__device__ __attribute__((const)) float __ocml_rlen3_f32(float, float, float);
+__device__ __attribute__((const)) float __ocml_rlen4_f32(float, float, float,
+                                                         float);
+__device__ __attribute__((const)) float __ocml_round_f32(float);
+__device__ __attribute__((pure)) float __ocml_rsqrt_f32(float);
+__device__ __attribute__((const)) float __ocml_scalb_f32(float, float);
+__device__ __attribute__((const)) float __ocml_scalbn_f32(float, int);
+__device__ __attribute__((const)) int __ocml_signbit_f32(float);
+__device__ float __ocml_sincos_f32(float,
+                                   __attribute__((address_space(5))) float *);
+__device__ float __ocml_sincospi_f32(float,
+                                     __attribute__((address_space(5))) float *);
+__device__ float __ocml_sin_f32(float);
+__device__ float __ocml_native_sin_f32(float);
+__device__ __attribute__((pure)) float __ocml_sinh_f32(float);
+__device__ float __ocml_sinpi_f32(float);
+__device__ __attribute__((const)) float __ocml_sqrt_f32(float);
+__device__ __attribute__((const)) float __ocml_native_sqrt_f32(float);
+__device__ float __ocml_tan_f32(float);
+__device__ __attribute__((pure)) float __ocml_tanh_f32(float);
+__device__ float __ocml_tgamma_f32(float);
+__device__ __attribute__((const)) float __ocml_trunc_f32(float);
+__device__ float __ocml_y0_f32(float);
+__device__ float __ocml_y1_f32(float);
+
+// BEGIN INTRINSICS
+__device__ __attribute__((const)) float __ocml_add_rte_f32(float, float);
+__device__ __attribute__((const)) float __ocml_add_rtn_f32(float, float);
+__device__ __attribute__((const)) float __ocml_add_rtp_f32(float, float);
+__device__ __attribute__((const)) float __ocml_add_rtz_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sub_rte_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sub_rtn_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sub_rtp_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sub_rtz_f32(float, float);
+__device__ __attribute__((const)) float __ocml_mul_rte_f32(float, float);
+__device__ __attribute__((const)) float __ocml_mul_rtn_f32(float, float);
+__device__ __attribute__((const)) float __ocml_mul_rtp_f32(float, float);
+__device__ __attribute__((const)) float __ocml_mul_rtz_f32(float, float);
+__device__ __attribute__((const)) float __ocml_div_rte_f32(float, float);
+__device__ __attribute__((const)) float __ocml_div_rtn_f32(float, float);
+__device__ __attribute__((const)) float __ocml_div_rtp_f32(float, float);
+__device__ __attribute__((const)) float __ocml_div_rtz_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sqrt_rte_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sqrt_rtn_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sqrt_rtp_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sqrt_rtz_f32(float, float);
+__device__ __attribute__((const)) float __ocml_fma_rte_f32(float, float, float);
+__device__ __attribute__((const)) float __ocml_fma_rtn_f32(float, float, float);
+__device__ __attribute__((const)) float __ocml_fma_rtp_f32(float, float, float);
+__device__ __attribute__((const)) float __ocml_fma_rtz_f32(float, float, float);
+
+__device__ __attribute__((const)) float
+__llvm_amdgcn_cos_f32(float) __asm("llvm.amdgcn.cos.f32");
+__device__ __attribute__((const)) float
+__llvm_amdgcn_rcp_f32(float) __asm("llvm.amdgcn.rcp.f32");
+__device__ __attribute__((const)) float
+__llvm_amdgcn_rsq_f32(float) __asm("llvm.amdgcn.rsq.f32");
+__device__ __attribute__((const)) float
+__llvm_amdgcn_sin_f32(float) __asm("llvm.amdgcn.sin.f32");
+// END INTRINSICS
+// END FLOAT
+
+// BEGIN DOUBLE
+__device__ __attribute__((const)) double __ocml_acos_f64(double);
+__device__ __attribute__((pure)) double __ocml_acosh_f64(double);
+__device__ __attribute__((const)) double __ocml_asin_f64(double);
+__device__ __attribute__((pure)) double __ocml_asinh_f64(double);
+__device__ __attribute__((const)) double __ocml_atan2_f64(double, double);
+__device__ __attribute__((const)) double __ocml_atan_f64(double);
+__device__ __attribute__((pure)) double __ocml_atanh_f64(double);
+__device__ __attribute__((pure)) double __ocml_cbrt_f64(double);
+__device__ __attribute__((const)) double __ocml_ceil_f64(double);
+__device__ __attribute__((const)) double __ocml_copysign_f64(double, double);
+__device__ double __ocml_cos_f64(double);
+__device__ __attribute__((pure)) double __ocml_cosh_f64(double);
+__device__ double __ocml_cospi_f64(double);
+__device__ double __ocml_i0_f64(double);
+__device__ double __ocml_i1_f64(double);
+__device__ __attribute__((pure)) double __ocml_erfc_f64(double);
+__device__ __attribute__((pure)) double __ocml_erfcinv_f64(double);
+__device__ __attribute__((pure)) double __ocml_erfcx_f64(double);
+__device__ __attribute__((pure)) double __ocml_erf_f64(double);
+__device__ __attribute__((pure)) double __ocml_erfinv_f64(double);
+__device__ __attribute__((pure)) double __ocml_exp10_f64(double);
+__device__ __attribute__((pure)) double __ocml_exp2_f64(double);
+__device__ __attribute__((pure)) double __ocml_exp_f64(double);
+__device__ __attribute__((pure)) double __ocml_expm1_f64(double);
+__device__ __attribute__((const)) double __ocml_fabs_f64(double);
+__device__ __attribute__((const)) double __ocml_fdim_f64(double, double);
+__device__ __attribute__((const)) double __ocml_floor_f64(double);
+__device__ __attribute__((const)) double __ocml_fma_f64(double, double, double);
+__device__ __attribute__((const)) double __ocml_fmax_f64(double, double);
+__device__ __attribute__((const)) double __ocml_fmin_f64(double, double);
+__device__ __attribute__((const)) double __ocml_fmod_f64(double, double);
+__device__ double __ocml_frexp_f64(double,
+                                   __attribute__((address_space(5))) int *);
+__device__ __attribute__((const)) double __ocml_hypot_f64(double, double);
+__device__ __attribute__((const)) int __ocml_ilogb_f64(double);
+__device__ __attribute__((const)) int __ocml_isfinite_f64(double);
+__device__ __attribute__((const)) int __ocml_isinf_f64(double);
+__device__ __attribute__((const)) int __ocml_isnan_f64(double);
+__device__ double __ocml_j0_f64(double);
+__device__ double __ocml_j1_f64(double);
+__device__ __attribute__((const)) double __ocml_ldexp_f64(double, int);
+__device__ double __ocml_lgamma_f64(double);
+__device__ __attribute__((pure)) double __ocml_log10_f64(double);
+__device__ __attribute__((pure)) double __ocml_log1p_f64(double);
+__device__ __attribute__((pure)) double __ocml_log2_f64(double);
+__device__ __attribute__((const)) double __ocml_logb_f64(double);
+__device__ __attribute__((pure)) double __ocml_log_f64(double);
+__device__ double __ocml_modf_f64(double,
+                                  __attribute__((address_space(5))) double *);
+__device__ __attribute__((const)) double __ocml_nearbyint_f64(double);
+__device__ __attribute__((const)) double __ocml_nextafter_f64(double, double);
+__device__ __attribute__((const)) double __ocml_len3_f64(double, double,
+                                                         double);
+__device__ __attribute__((const)) double __ocml_len4_f64(double, double, double,
+                                                         double);
+__device__ __attribute__((pure)) double __ocml_ncdf_f64(double);
+__device__ __attribute__((pure)) double __ocml_ncdfinv_f64(double);
+__device__ __attribute__((pure)) double __ocml_pow_f64(double, double);
+__device__ __attribute__((pure)) double __ocml_rcbrt_f64(double);
+__device__ __attribute__((const)) double __ocml_remainder_f64(double, double);
+__device__ double __ocml_remquo_f64(double, double,
+                                    __attribute__((address_space(5))) int *);
+__device__ __attribute__((const)) double __ocml_rhypot_f64(double, double);
+__device__ __attribute__((const)) double __ocml_rint_f64(double);
+__device__ __attribute__((const)) double __ocml_rlen3_f64(double, double,
+                                                          double);
+__device__ __attribute__((const)) double __ocml_rlen4_f64(double, double,
+                                                          double, double);
+__device__ __attribute__((const)) double __ocml_round_f64(double);
+__device__ __attribute__((pure)) double __ocml_rsqrt_f64(double);
+__device__ __attribute__((const)) double __ocml_scalb_f64(double, double);
+__device__ __attribute__((const)) double __ocml_scalbn_f64(double, int);
+__device__ __attribute__((const)) int __ocml_signbit_f64(double);
+__device__ double __ocml_sincos_f64(double,
+                                    __attribute__((address_space(5))) double *);
+__device__ double
+__ocml_sincospi_f64(double, __attribute__((address_space(5))) double *);
+__device__ double __ocml_sin_f64(double);
+__device__ __attribute__((pure)) double __ocml_sinh_f64(double);
+__device__ double __ocml_sinpi_f64(double);
+__device__ __attribute__((const)) double __ocml_sqrt_f64(double);
+__device__ double __ocml_tan_f64(double);
+__device__ __attribute__((pure)) double __ocml_tanh_f64(double);
+__device__ double __ocml_tgamma_f64(double);
+__device__ __attribute__((const)) double __ocml_trunc_f64(double);
+__device__ double __ocml_y0_f64(double);
+__device__ double __ocml_y1_f64(double);
+
+// BEGIN INTRINSICS
+__device__ __attribute__((const)) double __ocml_add_rte_f64(double, double);
+__device__ __attribute__((const)) double __ocml_add_rtn_f64(double, double);
+__device__ __attribute__((const)) double __ocml_add_rtp_f64(double, double);
+__device__ __attribute__((const)) double __ocml_add_rtz_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sub_rte_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sub_rtn_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sub_rtp_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sub_rtz_f64(double, double);
+__device__ __attribute__((const)) double __ocml_mul_rte_f64(double, double);
+__device__ __attribute__((const)) double __ocml_mul_rtn_f64(double, double);
+__device__ __attribute__((const)) double __ocml_mul_rtp_f64(double, double);
+__device__ __attribute__((const)) double __ocml_mul_rtz_f64(double, double);
+__device__ __attribute__((const)) double __ocml_div_rte_f64(double, double);
+__device__ __attribute__((const)) double __ocml_div_rtn_f64(double, double);
+__device__ __attribute__((const)) double __ocml_div_rtp_f64(double, double);
+__device__ __attribute__((const)) double __ocml_div_rtz_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sqrt_rte_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sqrt_rtn_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sqrt_rtp_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sqrt_rtz_f64(double, double);
+__device__ __attribute__((const)) double __ocml_fma_rte_f64(double, double,
+                                                            double);
+__device__ __attribute__((const)) double __ocml_fma_rtn_f64(double, double,
+                                                            double);
+__device__ __attribute__((const)) double __ocml_fma_rtp_f64(double, double,
+                                                            double);
+__device__ __attribute__((const)) double __ocml_fma_rtz_f64(double, double,
+                                                            double);
+
+__device__ __attribute__((const)) double
+__llvm_amdgcn_rcp_f64(double) __asm("llvm.amdgcn.rcp.f64");
+__device__ __attribute__((const)) double
+__llvm_amdgcn_rsq_f64(double) __asm("llvm.amdgcn.rsq.f64");
+
+__device__ __attribute__((const)) _Float16 __ocml_ceil_f16(_Float16);
+__device__ _Float16 __ocml_cos_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_exp_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_exp10_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_exp2_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_floor_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_fma_f16(_Float16, _Float16,
+                                                          _Float16);
+__device__ __attribute__((const)) _Float16 __ocml_fabs_f16(_Float16);
+__device__ __attribute__((const)) int __ocml_isinf_f16(_Float16);
+__device__ __attribute__((const)) int __ocml_isnan_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_log_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_log10_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_log2_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __llvm_amdgcn_rcp_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_rint_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_rsqrt_f16(_Float16);
+__device__ _Float16 __ocml_sin_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_sqrt_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_trunc_f16(_Float16);
+
+typedef _Float16 __2f16 __attribute__((ext_vector_type(2)));
+typedef short __2i16 __attribute__((ext_vector_type(2)));
+
+__device__ __attribute__((const)) float __ockl_fdot2(__2f16 a, __2f16 b,
+                                                     float c, bool s);
+__device__ __attribute__((const)) __2f16 __ocml_ceil_2f16(__2f16);
+__device__ __attribute__((const)) __2f16 __ocml_fabs_2f16(__2f16);
+__device__ __2f16 __ocml_cos_2f16(__2f16);
+__device__ __attribute__((pure)) __2f16 __ocml_exp_2f16(__2f16);
+__device__ __attribute__((pure)) __2f16 __ocml_exp10_2f16(__2f16);
+__device__ __attribute__((pure)) __2f16 __ocml_exp2_2f16(__2f16);
+__device__ __attribute__((const)) __2f16 __ocml_floor_2f16(__2f16);
+__device__ __attribute__((const))
+__2f16 __ocml_fma_2f16(__2f16, __2f16, __2f16);
+__device__ __attribute__((const)) __2i16 __ocml_isinf_2f16(__2f16);
+__device__ __attribute__((const)) __2i16 __ocml_isnan_2f16(__2f16);
+__device__ __attribute__((pure)) __2f16 __ocml_log_2f16(__2f16);
+__device__ __attribute__((pure)) __2f16 __ocml_log10_2f16(__2f16);
+__device__ __attribute__((pure)) __2f16 __ocml_log2_2f16(__2f16);
+__device__ inline __2f16
+__llvm_amdgcn_rcp_2f16(__2f16 __x) // Not currently exposed by ROCDL.
+{
+  return __2f16{__llvm_amdgcn_rcp_f16(__x.x), __llvm_amdgcn_rcp_f16(__x.y)};
+}
+__device__ __attribute__((const)) __2f16 __ocml_rint_2f16(__2f16);
+__device__ __attribute__((const)) __2f16 __ocml_rsqrt_2f16(__2f16);
+__device__ __2f16 __ocml_sin_2f16(__2f16);
+__device__ __attribute__((const)) __2f16 __ocml_sqrt_2f16(__2f16);
+__device__ __attribute__((const)) __2f16 __ocml_trunc_2f16(__2f16);
+
+} // extern "C"
+
+#endif // __CLANG_HIP_LIBDEVICE_DECLARES_H__
diff --git a/darwin-x86/lib64/clang/11.0.5/include/__clang_hip_math.h b/darwin-x86/lib64/clang/11.0.5/include/__clang_hip_math.h
new file mode 100644
index 0000000..cf7014b
--- /dev/null
+++ b/darwin-x86/lib64/clang/11.0.5/include/__clang_hip_math.h
@@ -0,0 +1,1185 @@
+/*===---- __clang_hip_math.h - HIP math decls -------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_HIP_MATH_H__
+#define __CLANG_HIP_MATH_H__
+
+#include <algorithm>
+#include <limits.h>
+#include <limits>
+#include <stdint.h>
+
+#pragma push_macro("__DEVICE__")
+#pragma push_macro("__RETURN_TYPE")
+
+// to be consistent with __clang_cuda_math_forward_declares
+#define __DEVICE__ static __device__
+#define __RETURN_TYPE bool
+
+__DEVICE__
+inline uint64_t __make_mantissa_base8(const char *__tagp) {
+  uint64_t __r = 0;
+  while (__tagp) {
+    char __tmp = *__tagp;
+
+    if (__tmp >= '0' && __tmp <= '7')
+      __r = (__r * 8u) + __tmp - '0';
+    else
+      return 0;
+
+    ++__tagp;
+  }
+
+  return __r;
+}
+
+__DEVICE__
+inline uint64_t __make_mantissa_base10(const char *__tagp) {
+  uint64_t __r = 0;
+  while (__tagp) {
+    char __tmp = *__tagp;
+
+    if (__tmp >= '0' && __tmp <= '9')
+      __r = (__r * 10u) + __tmp - '0';
+    else
+      return 0;
+
+    ++__tagp;
+  }
+
+  return __r;
+}
+
+__DEVICE__
+inline uint64_t __make_mantissa_base16(const char *__tagp) {
+  uint64_t __r = 0;
+  while (__tagp) {
+    char __tmp = *__tagp;
+
+    if (__tmp >= '0' && __tmp <= '9')
+      __r = (__r * 16u) + __tmp - '0';
+    else if (__tmp >= 'a' && __tmp <= 'f')
+      __r = (__r * 16u) + __tmp - 'a' + 10;
+    else if (__tmp >= 'A' && __tmp <= 'F')
+      __r = (__r * 16u) + __tmp - 'A' + 10;
+    else
+      return 0;
+
+    ++__tagp;
+  }
+
+  return __r;
+}
+
+__DEVICE__
+inline uint64_t __make_mantissa(const char *__tagp) {
+  if (!__tagp)
+    return 0u;
+
+  if (*__tagp == '0') {
+    ++__tagp;
+
+    if (*__tagp == 'x' || *__tagp == 'X')
+      return __make_mantissa_base16(__tagp);
+    else
+      return __make_mantissa_base8(__tagp);
+  }
+
+  return __make_mantissa_base10(__tagp);
+}
+
+// BEGIN FLOAT
+__DEVICE__
+inline float abs(float __x) { return __ocml_fabs_f32(__x); }
+__DEVICE__
+inline float acosf(float __x) { return __ocml_acos_f32(__x); }
+__DEVICE__
+inline float acoshf(float __x) { return __ocml_acosh_f32(__x); }
+__DEVICE__
+inline float asinf(float __x) { return __ocml_asin_f32(__x); }
+__DEVICE__
+inline float asinhf(float __x) { return __ocml_asinh_f32(__x); }
+__DEVICE__
+inline float atan2f(float __x, float __y) { return __ocml_atan2_f32(__x, __y); }
+__DEVICE__
+inline float atanf(float __x) { return __ocml_atan_f32(__x); }
+__DEVICE__
+inline float atanhf(float __x) { return __ocml_atanh_f32(__x); }
+__DEVICE__
+inline float cbrtf(float __x) { return __ocml_cbrt_f32(__x); }
+__DEVICE__
+inline float ceilf(float __x) { return __ocml_ceil_f32(__x); }
+__DEVICE__
+inline float copysignf(float __x, float __y) {
+  return __ocml_copysign_f32(__x, __y);
+}
+__DEVICE__
+inline float cosf(float __x) { return __ocml_cos_f32(__x); }
+__DEVICE__
+inline float coshf(float __x) { return __ocml_cosh_f32(__x); }
+__DEVICE__
+inline float cospif(float __x) { return __ocml_cospi_f32(__x); }
+__DEVICE__
+inline float cyl_bessel_i0f(float __x) { return __ocml_i0_f32(__x); }
+__DEVICE__
+inline float cyl_bessel_i1f(float __x) { return __ocml_i1_f32(__x); }
+__DEVICE__
+inline float erfcf(float __x) { return __ocml_erfc_f32(__x); }
+__DEVICE__
+inline float erfcinvf(float __x) { return __ocml_erfcinv_f32(__x); }
+__DEVICE__
+inline float erfcxf(float __x) { return __ocml_erfcx_f32(__x); }
+__DEVICE__
+inline float erff(float __x) { return __ocml_erf_f32(__x); }
+__DEVICE__
+inline float erfinvf(float __x) { return __ocml_erfinv_f32(__x); }
+__DEVICE__
+inline float exp10f(float __x) { return __ocml_exp10_f32(__x); }
+__DEVICE__
+inline float exp2f(float __x) { return __ocml_exp2_f32(__x); }
+__DEVICE__
+inline float expf(float __x) { return __ocml_exp_f32(__x); }
+__DEVICE__
+inline float expm1f(float __x) { return __ocml_expm1_f32(__x); }
+__DEVICE__
+inline float fabsf(float __x) { return __ocml_fabs_f32(__x); }
+__DEVICE__
+inline float fdimf(float __x, float __y) { return __ocml_fdim_f32(__x, __y); }
+__DEVICE__
+inline float fdividef(float __x, float __y) { return __x / __y; }
+__DEVICE__
+inline float floorf(float __x) { return __ocml_floor_f32(__x); }
+__DEVICE__
+inline float fmaf(float __x, float __y, float __z) {
+  return __ocml_fma_f32(__x, __y, __z);
+}
+__DEVICE__
+inline float fmaxf(float __x, float __y) { return __ocml_fmax_f32(__x, __y); }
+__DEVICE__
+inline float fminf(float __x, float __y) { return __ocml_fmin_f32(__x, __y); }
+__DEVICE__
+inline float fmodf(float __x, float __y) { return __ocml_fmod_f32(__x, __y); }
+__DEVICE__
+inline float frexpf(float __x, int *__nptr) {
+  int __tmp;
+  float __r =
+      __ocml_frexp_f32(__x, (__attribute__((address_space(5))) int *)&__tmp);
+  *__nptr = __tmp;
+
+  return __r;
+}
+__DEVICE__
+inline float hypotf(float __x, float __y) { return __ocml_hypot_f32(__x, __y); }
+__DEVICE__
+inline int ilogbf(float __x) { return __ocml_ilogb_f32(__x); }
+__DEVICE__
+inline __RETURN_TYPE isfinite(float __x) { return __ocml_isfinite_f32(__x); }
+__DEVICE__
+inline __RETURN_TYPE isinf(float __x) { return __ocml_isinf_f32(__x); }
+__DEVICE__
+inline __RETURN_TYPE isnan(float __x) { return __ocml_isnan_f32(__x); }
+__DEVICE__
+inline float j0f(float __x) { return __ocml_j0_f32(__x); }
+__DEVICE__
+inline float j1f(float __x) { return __ocml_j1_f32(__x); }
+__DEVICE__
+inline float jnf(int __n,
+                 float __x) { // TODO: we could use Ahmes multiplication
+                              // and the Miller & Brown algorithm
+  //       for linear recurrences to get O(log n) steps, but it's unclear if
+  //       it'd be beneficial in this case.
+  if (__n == 0)
+    return j0f(__x);
+  if (__n == 1)
+    return j1f(__x);
+
+  float __x0 = j0f(__x);
+  float __x1 = j1f(__x);
+  for (int __i = 1; __i < __n; ++__i) {
+    float __x2 = (2 * __i) / __x * __x1 - __x0;
+    __x0 = __x1;
+    __x1 = __x2;
+  }
+
+  return __x1;
+}
+__DEVICE__
+inline float ldexpf(float __x, int __e) { return __ocml_ldexp_f32(__x, __e); }
+__DEVICE__
+inline float lgammaf(float __x) { return __ocml_lgamma_f32(__x); }
+__DEVICE__
+inline long long int llrintf(float __x) { return __ocml_rint_f32(__x); }
+__DEVICE__
+inline long long int llroundf(float __x) { return __ocml_round_f32(__x); }
+__DEVICE__
+inline float log10f(float __x) { return __ocml_log10_f32(__x); }
+__DEVICE__
+inline float log1pf(float __x) { return __ocml_log1p_f32(__x); }
+__DEVICE__
+inline float log2f(float __x) { return __ocml_log2_f32(__x); }
+__DEVICE__
+inline float logbf(float __x) { return __ocml_logb_f32(__x); }
+__DEVICE__
+inline float logf(float __x) { return __ocml_log_f32(__x); }
+__DEVICE__
+inline long int lrintf(float __x) { return __ocml_rint_f32(__x); }
+__DEVICE__
+inline long int lroundf(float __x) { return __ocml_round_f32(__x); }
+__DEVICE__
+inline float modff(float __x, float *__iptr) {
+  float __tmp;
+  float __r =
+      __ocml_modf_f32(__x, (__attribute__((address_space(5))) float *)&__tmp);
+  *__iptr = __tmp;
+
+  return __r;
+}
+__DEVICE__
+inline float nanf(const char *__tagp) {
+  union {
+    float val;
+    struct ieee_float {
+      uint32_t mantissa : 22;
+      uint32_t quiet : 1;
+      uint32_t exponent : 8;
+      uint32_t sign : 1;
+    } bits;
+
+    static_assert(sizeof(float) == sizeof(ieee_float), "");
+  } __tmp;
+
+  __tmp.bits.sign = 0u;
+  __tmp.bits.exponent = ~0u;
+  __tmp.bits.quiet = 1u;
+  __tmp.bits.mantissa = __make_mantissa(__tagp);
+
+  return __tmp.val;
+}
+__DEVICE__
+inline float nearbyintf(float __x) { return __ocml_nearbyint_f32(__x); }
+__DEVICE__
+inline float nextafterf(float __x, float __y) {
+  return __ocml_nextafter_f32(__x, __y);
+}
+__DEVICE__
+inline float norm3df(float __x, float __y, float __z) {
+  return __ocml_len3_f32(__x, __y, __z);
+}
+__DEVICE__
+inline float norm4df(float __x, float __y, float __z, float __w) {
+  return __ocml_len4_f32(__x, __y, __z, __w);
+}
+__DEVICE__
+inline float normcdff(float __x) { return __ocml_ncdf_f32(__x); }
+__DEVICE__
+inline float normcdfinvf(float __x) { return __ocml_ncdfinv_f32(__x); }
+__DEVICE__
+inline float
+normf(int __dim,
+      const float *__a) { // TODO: placeholder until OCML adds support.
+  float __r = 0;
+  while (__dim--) {
+    __r += __a[0] * __a[0];
+    ++__a;
+  }
+
+  return __ocml_sqrt_f32(__r);
+}
+__DEVICE__
+inline float powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
+__DEVICE__
+inline float rcbrtf(float __x) { return __ocml_rcbrt_f32(__x); }
+__DEVICE__
+inline float remainderf(float __x, float __y) {
+  return __ocml_remainder_f32(__x, __y);
+}
+__DEVICE__
+inline float remquof(float __x, float __y, int *__quo) {
+  int __tmp;
+  float __r = __ocml_remquo_f32(
+      __x, __y, (__attribute__((address_space(5))) int *)&__tmp);
+  *__quo = __tmp;
+
+  return __r;
+}
+__DEVICE__
+inline float rhypotf(float __x, float __y) {
+  return __ocml_rhypot_f32(__x, __y);
+}
+__DEVICE__
+inline float rintf(float __x) { return __ocml_rint_f32(__x); }
+__DEVICE__
+inline float rnorm3df(float __x, float __y, float __z) {
+  return __ocml_rlen3_f32(__x, __y, __z);
+}
+
+__DEVICE__
+inline float rnorm4df(float __x, float __y, float __z, float __w) {
+  return __ocml_rlen4_f32(__x, __y, __z, __w);
+}
+__DEVICE__
+inline float
+rnormf(int __dim,
+       const float *__a) { // TODO: placeholder until OCML adds support.
+  float __r = 0;
+  while (__dim--) {
+    __r += __a[0] * __a[0];
+    ++__a;
+  }
+
+  return __ocml_rsqrt_f32(__r);
+}
+__DEVICE__
+inline float roundf(float __x) { return __ocml_round_f32(__x); }
+__DEVICE__
+inline float rsqrtf(float __x) { return __ocml_rsqrt_f32(__x); }
+__DEVICE__
+inline float scalblnf(float __x, long int __n) {
+  return (__n < INT_MAX) ? __ocml_scalbn_f32(__x, __n)
+                         : __ocml_scalb_f32(__x, __n);
+}
+__DEVICE__
+inline float scalbnf(float __x, int __n) { return __ocml_scalbn_f32(__x, __n); }
+__DEVICE__
+inline __RETURN_TYPE signbit(float __x) { return __ocml_signbit_f32(__x); }
+__DEVICE__
+inline void sincosf(float __x, float *__sinptr, float *__cosptr) {
+  float __tmp;
+
+  *__sinptr =
+      __ocml_sincos_f32(__x, (__attribute__((address_space(5))) float *)&__tmp);
+  *__cosptr = __tmp;
+}
+__DEVICE__
+inline void sincospif(float __x, float *__sinptr, float *__cosptr) {
+  float __tmp;
+
+  *__sinptr = __ocml_sincospi_f32(
+      __x, (__attribute__((address_space(5))) float *)&__tmp);
+  *__cosptr = __tmp;
+}
+__DEVICE__
+inline float sinf(float __x) { return __ocml_sin_f32(__x); }
+__DEVICE__
+inline float sinhf(float __x) { return __ocml_sinh_f32(__x); }
+__DEVICE__
+inline float sinpif(float __x) { return __ocml_sinpi_f32(__x); }
+__DEVICE__
+inline float sqrtf(float __x) { return __ocml_sqrt_f32(__x); }
+__DEVICE__
+inline float tanf(float __x) { return __ocml_tan_f32(__x); }
+__DEVICE__
+inline float tanhf(float __x) { return __ocml_tanh_f32(__x); }
+__DEVICE__
+inline float tgammaf(float __x) { return __ocml_tgamma_f32(__x); }
+__DEVICE__
+inline float truncf(float __x) { return __ocml_trunc_f32(__x); }
+__DEVICE__
+inline float y0f(float __x) { return __ocml_y0_f32(__x); }
+__DEVICE__
+inline float y1f(float __x) { return __ocml_y1_f32(__x); }
+__DEVICE__
+inline float ynf(int __n,
+                 float __x) { // TODO: we could use Ahmes multiplication
+                              // and the Miller & Brown algorithm
+  //       for linear recurrences to get O(log n) steps, but it's unclear if
+  //       it'd be beneficial in this case. Placeholder until OCML adds
+  //       support.
+  if (__n == 0)
+    return y0f(__x);
+  if (__n == 1)
+    return y1f(__x);
+
+  float __x0 = y0f(__x);
+  float __x1 = y1f(__x);
+  for (int __i = 1; __i < __n; ++__i) {
+    float __x2 = (2 * __i) / __x * __x1 - __x0;
+    __x0 = __x1;
+    __x1 = __x2;
+  }
+
+  return __x1;
+}
+
+// BEGIN INTRINSICS
+__DEVICE__
+inline float __cosf(float __x) { return __ocml_native_cos_f32(__x); }
+__DEVICE__
+inline float __exp10f(float __x) { return __ocml_native_exp10_f32(__x); }
+__DEVICE__
+inline float __expf(float __x) { return __ocml_native_exp_f32(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fadd_rd(float __x, float __y) {
+  return __ocml_add_rtn_f32(__x, __y);
+}
+#endif
+__DEVICE__
+inline float __fadd_rn(float __x, float __y) { return __x + __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fadd_ru(float __x, float __y) {
+  return __ocml_add_rtp_f32(__x, __y);
+}
+__DEVICE__
+inline float __fadd_rz(float __x, float __y) {
+  return __ocml_add_rtz_f32(__x, __y);
+}
+__DEVICE__
+inline float __fdiv_rd(float __x, float __y) {
+  return __ocml_div_rtn_f32(__x, __y);
+}
+#endif
+__DEVICE__
+inline float __fdiv_rn(float __x, float __y) { return __x / __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fdiv_ru(float __x, float __y) {
+  return __ocml_div_rtp_f32(__x, __y);
+}
+__DEVICE__
+inline float __fdiv_rz(float __x, float __y) {
+  return __ocml_div_rtz_f32(__x, __y);
+}
+#endif
+__DEVICE__
+inline float __fdividef(float __x, float __y) { return __x / __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fmaf_rd(float __x, float __y, float __z) {
+  return __ocml_fma_rtn_f32(__x, __y, __z);
+}
+#endif
+__DEVICE__
+inline float __fmaf_rn(float __x, float __y, float __z) {
+  return __ocml_fma_f32(__x, __y, __z);
+}
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fmaf_ru(float __x, float __y, float __z) {
+  return __ocml_fma_rtp_f32(__x, __y, __z);
+}
+__DEVICE__
+inline float __fmaf_rz(float __x, float __y, float __z) {
+  return __ocml_fma_rtz_f32(__x, __y, __z);
+}
+__DEVICE__
+inline float __fmul_rd(float __x, float __y) {
+  return __ocml_mul_rtn_f32(__x, __y);
+}
+#endif
+__DEVICE__
+inline float __fmul_rn(float __x, float __y) { return __x * __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fmul_ru(float __x, float __y) {
+  return __ocml_mul_rtp_f32(__x, __y);
+}
+__DEVICE__
+inline float __fmul_rz(float __x, float __y) {
+  return __ocml_mul_rtz_f32(__x, __y);
+}
+__DEVICE__
+inline float __frcp_rd(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+#endif
+__DEVICE__
+inline float __frcp_rn(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __frcp_ru(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+__DEVICE__
+inline float __frcp_rz(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+#endif
+__DEVICE__
+inline float __frsqrt_rn(float __x) { return __llvm_amdgcn_rsq_f32(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fsqrt_rd(float __x) { return __ocml_sqrt_rtn_f32(__x); }
+#endif
+__DEVICE__
+inline float __fsqrt_rn(float __x) { return __ocml_native_sqrt_f32(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fsqrt_ru(float __x) { return __ocml_sqrt_rtp_f32(__x); }
+__DEVICE__
+inline float __fsqrt_rz(float __x) { return __ocml_sqrt_rtz_f32(__x); }
+__DEVICE__
+inline float __fsub_rd(float __x, float __y) {
+  return __ocml_sub_rtn_f32(__x, __y);
+}
+#endif
+__DEVICE__
+inline float __fsub_rn(float __x, float __y) { return __x - __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fsub_ru(float __x, float __y) {
+  return __ocml_sub_rtp_f32(__x, __y);
+}
+__DEVICE__
+inline float __fsub_rz(float __x, float __y) {
+  return __ocml_sub_rtz_f32(__x, __y);
+}
+#endif
+__DEVICE__
+inline float __log10f(float __x) { return __ocml_native_log10_f32(__x); }
+__DEVICE__
+inline float __log2f(float __x) { return __ocml_native_log2_f32(__x); }
+__DEVICE__
+inline float __logf(float __x) { return __ocml_native_log_f32(__x); }
+__DEVICE__
+inline float __powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
+__DEVICE__
+inline float __saturatef(float __x) {
+  return (__x < 0) ? 0 : ((__x > 1) ? 1 : __x);
+}
+__DEVICE__
+inline void __sincosf(float __x, float *__sinptr, float *__cosptr) {
+  *__sinptr = __ocml_native_sin_f32(__x);
+  *__cosptr = __ocml_native_cos_f32(__x);
+}
+__DEVICE__
+inline float __sinf(float __x) { return __ocml_native_sin_f32(__x); }
+__DEVICE__
+inline float __tanf(float __x) { return __ocml_tan_f32(__x); }
+// END INTRINSICS
+// END FLOAT
+
+// BEGIN DOUBLE
+__DEVICE__
+inline double abs(double __x) { return __ocml_fabs_f64(__x); }
+__DEVICE__
+inline double acos(double __x) { return __ocml_acos_f64(__x); }
+__DEVICE__
+inline double acosh(double __x) { return __ocml_acosh_f64(__x); }
+__DEVICE__
+inline double asin(double __x) { return __ocml_asin_f64(__x); }
+__DEVICE__
+inline double asinh(double __x) { return __ocml_asinh_f64(__x); }
+__DEVICE__
+inline double atan(double __x) { return __ocml_atan_f64(__x); }
+__DEVICE__
+inline double atan2(double __x, double __y) {
+  return __ocml_atan2_f64(__x, __y);
+}
+__DEVICE__
+inline double atanh(double __x) { return __ocml_atanh_f64(__x); }
+__DEVICE__
+inline double cbrt(double __x) { return __ocml_cbrt_f64(__x); }
+__DEVICE__
+inline double ceil(double __x) { return __ocml_ceil_f64(__x); }
+__DEVICE__
+inline double copysign(double __x, double __y) {
+  return __ocml_copysign_f64(__x, __y);
+}
+__DEVICE__
+inline double cos(double __x) { return __ocml_cos_f64(__x); }
+__DEVICE__
+inline double cosh(double __x) { return __ocml_cosh_f64(__x); }
+__DEVICE__
+inline double cospi(double __x) { return __ocml_cospi_f64(__x); }
+__DEVICE__
+inline double cyl_bessel_i0(double __x) { return __ocml_i0_f64(__x); }
+__DEVICE__
+inline double cyl_bessel_i1(double __x) { return __ocml_i1_f64(__x); }
+__DEVICE__
+inline double erf(double __x) { return __ocml_erf_f64(__x); }
+__DEVICE__
+inline double erfc(double __x) { return __ocml_erfc_f64(__x); }
+__DEVICE__
+inline double erfcinv(double __x) { return __ocml_erfcinv_f64(__x); }
+__DEVICE__
+inline double erfcx(double __x) { return __ocml_erfcx_f64(__x); }
+__DEVICE__
+inline double erfinv(double __x) { return __ocml_erfinv_f64(__x); }
+__DEVICE__
+inline double exp(double __x) { return __ocml_exp_f64(__x); }
+__DEVICE__
+inline double exp10(double __x) { return __ocml_exp10_f64(__x); }
+__DEVICE__
+inline double exp2(double __x) { return __ocml_exp2_f64(__x); }
+__DEVICE__
+inline double expm1(double __x) { return __ocml_expm1_f64(__x); }
+__DEVICE__
+inline double fabs(double __x) { return __ocml_fabs_f64(__x); }
+__DEVICE__
+inline double fdim(double __x, double __y) { return __ocml_fdim_f64(__x, __y); }
+__DEVICE__
+inline double floor(double __x) { return __ocml_floor_f64(__x); }
+__DEVICE__
+inline double fma(double __x, double __y, double __z) {
+  return __ocml_fma_f64(__x, __y, __z);
+}
+__DEVICE__
+inline double fmax(double __x, double __y) { return __ocml_fmax_f64(__x, __y); }
+__DEVICE__
+inline double fmin(double __x, double __y) { return __ocml_fmin_f64(__x, __y); }
+__DEVICE__
+inline double fmod(double __x, double __y) { return __ocml_fmod_f64(__x, __y); }
+__DEVICE__
+inline double frexp(double __x, int *__nptr) {
+  int __tmp;
+  double __r =
+      __ocml_frexp_f64(__x, (__attribute__((address_space(5))) int *)&__tmp);
+  *__nptr = __tmp;
+
+  return __r;
+}
+__DEVICE__
+inline double hypot(double __x, double __y) {
+  return __ocml_hypot_f64(__x, __y);
+}
+__DEVICE__
+inline int ilogb(double __x) { return __ocml_ilogb_f64(__x); }
+__DEVICE__
+inline __RETURN_TYPE isfinite(double __x) { return __ocml_isfinite_f64(__x); }
+__DEVICE__
+inline __RETURN_TYPE isinf(double __x) { return __ocml_isinf_f64(__x); }
+__DEVICE__
+inline __RETURN_TYPE isnan(double __x) { return __ocml_isnan_f64(__x); }
+__DEVICE__
+inline double j0(double __x) { return __ocml_j0_f64(__x); }
+__DEVICE__
+inline double j1(double __x) { return __ocml_j1_f64(__x); }
+__DEVICE__
+inline double jn(int __n,
+                 double __x) { // TODO: we could use Ahmes multiplication
+                               // and the Miller & Brown algorithm
+  //       for linear recurrences to get O(log n) steps, but it's unclear if
+  //       it'd be beneficial in this case. Placeholder until OCML adds
+  //       support.
+  if (__n == 0)
+    return j0f(__x);
+  if (__n == 1)
+    return j1f(__x);
+
+  double __x0 = j0f(__x);
+  double __x1 = j1f(__x);
+  for (int __i = 1; __i < __n; ++__i) {
+    double __x2 = (2 * __i) / __x * __x1 - __x0;
+    __x0 = __x1;
+    __x1 = __x2;
+  }
+
+  return __x1;
+}
+__DEVICE__
+inline double ldexp(double __x, int __e) { return __ocml_ldexp_f64(__x, __e); }
+__DEVICE__
+inline double lgamma(double __x) { return __ocml_lgamma_f64(__x); }
+__DEVICE__
+inline long long int llrint(double __x) { return __ocml_rint_f64(__x); }
+__DEVICE__
+inline long long int llround(double __x) { return __ocml_round_f64(__x); }
+__DEVICE__
+inline double log(double __x) { return __ocml_log_f64(__x); }
+__DEVICE__
+inline double log10(double __x) { return __ocml_log10_f64(__x); }
+__DEVICE__
+inline double log1p(double __x) { return __ocml_log1p_f64(__x); }
+__DEVICE__
+inline double log2(double __x) { return __ocml_log2_f64(__x); }
+__DEVICE__
+inline double logb(double __x) { return __ocml_logb_f64(__x); }
+__DEVICE__
+inline long int lrint(double __x) { return __ocml_rint_f64(__x); }
+__DEVICE__
+inline long int lround(double __x) { return __ocml_round_f64(__x); }
+__DEVICE__
+inline double modf(double __x, double *__iptr) {
+  double __tmp;
+  double __r =
+      __ocml_modf_f64(__x, (__attribute__((address_space(5))) double *)&__tmp);
+  *__iptr = __tmp;
+
+  return __r;
+}
+__DEVICE__
+inline double nan(const char *__tagp) {
+#if !_WIN32
+  union {
+    double val;
+    struct ieee_double {
+      uint64_t mantissa : 51;
+      uint32_t quiet : 1;
+      uint32_t exponent : 11;
+      uint32_t sign : 1;
+    } bits;
+    static_assert(sizeof(double) == sizeof(ieee_double), "");
+  } __tmp;
+
+  __tmp.bits.sign = 0u;
+  __tmp.bits.exponent = ~0u;
+  __tmp.bits.quiet = 1u;
+  __tmp.bits.mantissa = __make_mantissa(__tagp);
+
+  return __tmp.val;
+#else
+  static_assert(sizeof(uint64_t) == sizeof(double));
+  uint64_t val = __make_mantissa(__tagp);
+  val |= 0xFFF << 51;
+  return *reinterpret_cast<double *>(&val);
+#endif
+}
+__DEVICE__
+inline double nearbyint(double __x) { return __ocml_nearbyint_f64(__x); }
+__DEVICE__
+inline double nextafter(double __x, double __y) {
+  return __ocml_nextafter_f64(__x, __y);
+}
+__DEVICE__
+inline double
+norm(int __dim,
+     const double *__a) { // TODO: placeholder until OCML adds support.
+  double __r = 0;
+  while (__dim--) {
+    __r += __a[0] * __a[0];
+    ++__a;
+  }
+
+  return __ocml_sqrt_f64(__r);
+}
+__DEVICE__
+inline double norm3d(double __x, double __y, double __z) {
+  return __ocml_len3_f64(__x, __y, __z);
+}
+__DEVICE__
+inline double norm4d(double __x, double __y, double __z, double __w) {
+  return __ocml_len4_f64(__x, __y, __z, __w);
+}
+__DEVICE__
+inline double normcdf(double __x) { return __ocml_ncdf_f64(__x); }
+__DEVICE__
+inline double normcdfinv(double __x) { return __ocml_ncdfinv_f64(__x); }
+__DEVICE__
+inline double pow(double __x, double __y) { return __ocml_pow_f64(__x, __y); }
+__DEVICE__
+inline double rcbrt(double __x) { return __ocml_rcbrt_f64(__x); }
+__DEVICE__
+inline double remainder(double __x, double __y) {
+  return __ocml_remainder_f64(__x, __y);
+}
+__DEVICE__
+inline double remquo(double __x, double __y, int *__quo) {
+  int __tmp;
+  double __r = __ocml_remquo_f64(
+      __x, __y, (__attribute__((address_space(5))) int *)&__tmp);
+  *__quo = __tmp;
+
+  return __r;
+}
+__DEVICE__
+inline double rhypot(double __x, double __y) {
+  return __ocml_rhypot_f64(__x, __y);
+}
+__DEVICE__
+inline double rint(double __x) { return __ocml_rint_f64(__x); }
+__DEVICE__
+inline double
+rnorm(int __dim,
+      const double *__a) { // TODO: placeholder until OCML adds support.
+  double __r = 0;
+  while (__dim--) {
+    __r += __a[0] * __a[0];
+    ++__a;
+  }
+
+  return __ocml_rsqrt_f64(__r);
+}
+__DEVICE__
+inline double rnorm3d(double __x, double __y, double __z) {
+  return __ocml_rlen3_f64(__x, __y, __z);
+}
+__DEVICE__
+inline double rnorm4d(double __x, double __y, double __z, double __w) {
+  return __ocml_rlen4_f64(__x, __y, __z, __w);
+}
+__DEVICE__
+inline double round(double __x) { return __ocml_round_f64(__x); }
+__DEVICE__
+inline double rsqrt(double __x) { return __ocml_rsqrt_f64(__x); }
+__DEVICE__
+inline double scalbln(double __x, long int __n) {
+  return (__n < INT_MAX) ? __ocml_scalbn_f64(__x, __n)
+                         : __ocml_scalb_f64(__x, __n);
+}
+__DEVICE__
+inline double scalbn(double __x, int __n) {
+  return __ocml_scalbn_f64(__x, __n);
+}
+__DEVICE__
+inline __RETURN_TYPE signbit(double __x) { return __ocml_signbit_f64(__x); }
+__DEVICE__
+inline double sin(double __x) { return __ocml_sin_f64(__x); }
+__DEVICE__
+inline void sincos(double __x, double *__sinptr, double *__cosptr) {
+  double __tmp;
+  *__sinptr = __ocml_sincos_f64(
+      __x, (__attribute__((address_space(5))) double *)&__tmp);
+  *__cosptr = __tmp;
+}
+__DEVICE__
+inline void sincospi(double __x, double *__sinptr, double *__cosptr) {
+  double __tmp;
+  *__sinptr = __ocml_sincospi_f64(
+      __x, (__attribute__((address_space(5))) double *)&__tmp);
+  *__cosptr = __tmp;
+}
+__DEVICE__
+inline double sinh(double __x) { return __ocml_sinh_f64(__x); }
+__DEVICE__
+inline double sinpi(double __x) { return __ocml_sinpi_f64(__x); }
+__DEVICE__
+inline double sqrt(double __x) { return __ocml_sqrt_f64(__x); }
+__DEVICE__
+inline double tan(double __x) { return __ocml_tan_f64(__x); }
+__DEVICE__
+inline double tanh(double __x) { return __ocml_tanh_f64(__x); }
+__DEVICE__
+inline double tgamma(double __x) { return __ocml_tgamma_f64(__x); }
+__DEVICE__
+inline double trunc(double __x) { return __ocml_trunc_f64(__x); }
+__DEVICE__
+inline double y0(double __x) { return __ocml_y0_f64(__x); }
+__DEVICE__
+inline double y1(double __x) { return __ocml_y1_f64(__x); }
+__DEVICE__
+inline double yn(int __n,
+                 double __x) { // TODO: we could use Ahmes multiplication
+                               // and the Miller & Brown algorithm
+  //       for linear recurrences to get O(log n) steps, but it's unclear if
+  //       it'd be beneficial in this case. Placeholder until OCML adds
+  //       support.
+  if (__n == 0)
+    return j0f(__x);
+  if (__n == 1)
+    return j1f(__x);
+
+  double __x0 = j0f(__x);
+  double __x1 = j1f(__x);
+  for (int __i = 1; __i < __n; ++__i) {
+    double __x2 = (2 * __i) / __x * __x1 - __x0;
+    __x0 = __x1;
+    __x1 = __x2;
+  }
+
+  return __x1;
+}
+
+// BEGIN INTRINSICS
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __dadd_rd(double __x, double __y) {
+  return __ocml_add_rtn_f64(__x, __y);
+}
+#endif
+__DEVICE__
+inline double __dadd_rn(double __x, double __y) { return __x + __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __dadd_ru(double __x, double __y) {
+  return __ocml_add_rtp_f64(__x, __y);
+}
+__DEVICE__
+inline double __dadd_rz(double __x, double __y) {
+  return __ocml_add_rtz_f64(__x, __y);
+}
+__DEVICE__
+inline double __ddiv_rd(double __x, double __y) {
+  return __ocml_div_rtn_f64(__x, __y);
+}
+#endif
+__DEVICE__
+inline double __ddiv_rn(double __x, double __y) { return __x / __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __ddiv_ru(double __x, double __y) {
+  return __ocml_div_rtp_f64(__x, __y);
+}
+__DEVICE__
+inline double __ddiv_rz(double __x, double __y) {
+  return __ocml_div_rtz_f64(__x, __y);
+}
+__DEVICE__
+inline double __dmul_rd(double __x, double __y) {
+  return __ocml_mul_rtn_f64(__x, __y);
+}
+#endif
+__DEVICE__
+inline double __dmul_rn(double __x, double __y) { return __x * __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __dmul_ru(double __x, double __y) {
+  return __ocml_mul_rtp_f64(__x, __y);
+}
+__DEVICE__
+inline double __dmul_rz(double __x, double __y) {
+  return __ocml_mul_rtz_f64(__x, __y);
+}
+__DEVICE__
+inline double __drcp_rd(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+#endif
+__DEVICE__
+inline double __drcp_rn(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __drcp_ru(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+__DEVICE__
+inline double __drcp_rz(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+__DEVICE__
+inline double __dsqrt_rd(double __x) { return __ocml_sqrt_rtn_f64(__x); }
+#endif
+__DEVICE__
+inline double __dsqrt_rn(double __x) { return __ocml_sqrt_f64(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __dsqrt_ru(double __x) { return __ocml_sqrt_rtp_f64(__x); }
+__DEVICE__
+inline double __dsqrt_rz(double __x) { return __ocml_sqrt_rtz_f64(__x); }
+__DEVICE__
+inline double __dsub_rd(double __x, double __y) {
+  return __ocml_sub_rtn_f64(__x, __y);
+}
+#endif
+__DEVICE__
+inline double __dsub_rn(double __x, double __y) { return __x - __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __dsub_ru(double __x, double __y) {
+  return __ocml_sub_rtp_f64(__x, __y);
+}
+__DEVICE__
+inline double __dsub_rz(double __x, double __y) {
+  return __ocml_sub_rtz_f64(__x, __y);
+}
+__DEVICE__
+inline double __fma_rd(double __x, double __y, double __z) {
+  return __ocml_fma_rtn_f64(__x, __y, __z);
+}
+#endif
+__DEVICE__
+inline double __fma_rn(double __x, double __y, double __z) {
+  return __ocml_fma_f64(__x, __y, __z);
+}
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __fma_ru(double __x, double __y, double __z) {
+  return __ocml_fma_rtp_f64(__x, __y, __z);
+}
+__DEVICE__
+inline double __fma_rz(double __x, double __y, double __z) {
+  return __ocml_fma_rtz_f64(__x, __y, __z);
+}
+#endif
+// END INTRINSICS
+// END DOUBLE
+
+// BEGIN INTEGER
+__DEVICE__
+inline int abs(int __x) {
+  int __sgn = __x >> (sizeof(int) * CHAR_BIT - 1);
+  return (__x ^ __sgn) - __sgn;
+}
+__DEVICE__
+inline long labs(long __x) {
+  long __sgn = __x >> (sizeof(long) * CHAR_BIT - 1);
+  return (__x ^ __sgn) - __sgn;
+}
+__DEVICE__
+inline long long llabs(long long __x) {
+  long long __sgn = __x >> (sizeof(long long) * CHAR_BIT - 1);
+  return (__x ^ __sgn) - __sgn;
+}
+
+#if defined(__cplusplus)
+__DEVICE__
+inline long abs(long __x) { return labs(__x); }
+__DEVICE__
+inline long long abs(long long __x) { return llabs(__x); }
+#endif
+// END INTEGER
+
+__DEVICE__
+inline _Float16 fma(_Float16 __x, _Float16 __y, _Float16 __z) {
+  return __ocml_fma_f16(__x, __y, __z);
+}
+
+__DEVICE__
+inline float fma(float __x, float __y, float __z) {
+  return fmaf(__x, __y, __z);
+}
+
+#pragma push_macro("__DEF_FUN1")
+#pragma push_macro("__DEF_FUN2")
+#pragma push_macro("__DEF_FUNI")
+#pragma push_macro("__DEF_FLOAT_FUN2I")
+#pragma push_macro("__HIP_OVERLOAD1")
+#pragma push_macro("__HIP_OVERLOAD2")
+
+// __hip_enable_if::type is a type function which returns __T if __B is true.
+template <bool __B, class __T = void> struct __hip_enable_if {};
+
+template <class __T> struct __hip_enable_if<true, __T> { typedef __T type; };
+
+// __HIP_OVERLOAD1 is used to resolve function calls with integer argument to
+// avoid compilation error due to ambibuity. e.g. floor(5) is resolved with
+// floor(double).
+#define __HIP_OVERLOAD1(__retty, __fn)                                         \
+  template <typename __T>                                                      \
+  __DEVICE__ typename __hip_enable_if<std::numeric_limits<__T>::is_integer,    \
+                                      __retty>::type                           \
+  __fn(__T __x) {                                                              \
+    return ::__fn((double)__x);                                                \
+  }
+
+// __HIP_OVERLOAD2 is used to resolve function calls with mixed float/double
+// or integer argument to avoid compilation error due to ambibuity. e.g.
+// max(5.0f, 6.0) is resolved with max(double, double).
+#define __HIP_OVERLOAD2(__retty, __fn)                                         \
+  template <typename __T1, typename __T2>                                      \
+  __DEVICE__                                                                   \
+      typename __hip_enable_if<std::numeric_limits<__T1>::is_specialized &&    \
+                                   std::numeric_limits<__T2>::is_specialized,  \
+                               __retty>::type                                  \
+      __fn(__T1 __x, __T2 __y) {                                               \
+    return __fn((double)__x, (double)__y);                                     \
+  }
+
+// Define cmath functions with float argument and returns float.
+#define __DEF_FUN1(__retty, __func)                                            \
+  __DEVICE__                                                                   \
+  inline float __func(float __x) { return __func##f(__x); }                    \
+  __HIP_OVERLOAD1(__retty, __func)
+
+// Define cmath functions with float argument and returns __retty.
+#define __DEF_FUNI(__retty, __func)                                            \
+  __DEVICE__                                                                   \
+  inline __retty __func(float __x) { return __func##f(__x); }                  \
+  __HIP_OVERLOAD1(__retty, __func)
+
+// define cmath functions with two float arguments.
+#define __DEF_FUN2(__retty, __func)                                            \
+  __DEVICE__                                                                   \
+  inline float __func(float __x, float __y) { return __func##f(__x, __y); }    \
+  __HIP_OVERLOAD2(__retty, __func)
+
+__DEF_FUN1(double, acos)
+__DEF_FUN1(double, acosh)
+__DEF_FUN1(double, asin)
+__DEF_FUN1(double, asinh)
+__DEF_FUN1(double, atan)
+__DEF_FUN2(double, atan2);
+__DEF_FUN1(double, atanh)
+__DEF_FUN1(double, cbrt)
+__DEF_FUN1(double, ceil)
+__DEF_FUN2(double, copysign);
+__DEF_FUN1(double, cos)
+__DEF_FUN1(double, cosh)
+__DEF_FUN1(double, erf)
+__DEF_FUN1(double, erfc)
+__DEF_FUN1(double, exp)
+__DEF_FUN1(double, exp2)
+__DEF_FUN1(double, expm1)
+__DEF_FUN1(double, fabs)
+__DEF_FUN2(double, fdim);
+__DEF_FUN1(double, floor)
+__DEF_FUN2(double, fmax);
+__DEF_FUN2(double, fmin);
+__DEF_FUN2(double, fmod);
+//__HIP_OVERLOAD1(int, fpclassify)
+__DEF_FUN2(double, hypot);
+__DEF_FUNI(int, ilogb)
+__HIP_OVERLOAD1(bool, isfinite)
+__HIP_OVERLOAD2(bool, isgreater);
+__HIP_OVERLOAD2(bool, isgreaterequal);
+__HIP_OVERLOAD1(bool, isinf);
+__HIP_OVERLOAD2(bool, isless);
+__HIP_OVERLOAD2(bool, islessequal);
+__HIP_OVERLOAD2(bool, islessgreater);
+__HIP_OVERLOAD1(bool, isnan);
+//__HIP_OVERLOAD1(bool, isnormal)
+__HIP_OVERLOAD2(bool, isunordered);
+__DEF_FUN1(double, lgamma)
+__DEF_FUN1(double, log)
+__DEF_FUN1(double, log10)
+__DEF_FUN1(double, log1p)
+__DEF_FUN1(double, log2)
+__DEF_FUN1(double, logb)
+__DEF_FUNI(long long, llrint)
+__DEF_FUNI(long long, llround)
+__DEF_FUNI(long, lrint)
+__DEF_FUNI(long, lround)
+__DEF_FUN1(double, nearbyint);
+__DEF_FUN2(double, nextafter);
+__DEF_FUN2(double, pow);
+__DEF_FUN2(double, remainder);
+__DEF_FUN1(double, rint);
+__DEF_FUN1(double, round);
+__HIP_OVERLOAD1(bool, signbit)
+__DEF_FUN1(double, sin)
+__DEF_FUN1(double, sinh)
+__DEF_FUN1(double, sqrt)
+__DEF_FUN1(double, tan)
+__DEF_FUN1(double, tanh)
+__DEF_FUN1(double, tgamma)
+__DEF_FUN1(double, trunc);
+
+// define cmath functions with a float and an integer argument.
+#define __DEF_FLOAT_FUN2I(__func)                                              \
+  __DEVICE__                                                                   \
+  inline float __func(float __x, int __y) { return __func##f(__x, __y); }
+__DEF_FLOAT_FUN2I(scalbn)
+
+template <class T> __DEVICE__ inline T min(T __arg1, T __arg2) {
+  return (__arg1 < __arg2) ? __arg1 : __arg2;
+}
+
+template <class T> __DEVICE__ inline T max(T __arg1, T __arg2) {
+  return (__arg1 > __arg2) ? __arg1 : __arg2;
+}
+
+__DEVICE__ inline int min(int __arg1, int __arg2) {
+  return (__arg1 < __arg2) ? __arg1 : __arg2;
+}
+__DEVICE__ inline int max(int __arg1, int __arg2) {
+  return (__arg1 > __arg2) ? __arg1 : __arg2;
+}
+
+__DEVICE__
+inline float max(float __x, float __y) { return fmaxf(__x, __y); }
+
+__DEVICE__
+inline double max(double __x, double __y) { return fmax(__x, __y); }
+
+__DEVICE__
+inline float min(float __x, float __y) { return fminf(__x, __y); }
+
+__DEVICE__
+inline double min(double __x, double __y) { return fmin(__x, __y); }
+
+__HIP_OVERLOAD2(double, max)
+__HIP_OVERLOAD2(double, min)
+
+__host__ inline static int min(int __arg1, int __arg2) {
+  return std::min(__arg1, __arg2);
+}
+
+__host__ inline static int max(int __arg1, int __arg2) {
+  return std::max(__arg1, __arg2);
+}
+
+#pragma pop_macro("__DEF_FUN1")
+#pragma pop_macro("__DEF_FUN2")
+#pragma pop_macro("__DEF_FUNI")
+#pragma pop_macro("__DEF_FLOAT_FUN2I")
+#pragma pop_macro("__HIP_OVERLOAD1")
+#pragma pop_macro("__HIP_OVERLOAD2")
+#pragma pop_macro("__DEVICE__")
+#pragma pop_macro("__RETURN_TYPE")
+
+#endif // __CLANG_HIP_MATH_H__
diff --git a/darwin-x86/lib64/clang/11.0.5/include/__clang_hip_runtime_wrapper.h b/darwin-x86/lib64/clang/11.0.5/include/__clang_hip_runtime_wrapper.h
new file mode 100644
index 0000000..addae56
--- /dev/null
+++ b/darwin-x86/lib64/clang/11.0.5/include/__clang_hip_runtime_wrapper.h
@@ -0,0 +1,64 @@
+/*===---- __clang_hip_runtime_wrapper.h - HIP runtime support ---------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/*
+ * WARNING: This header is intended to be directly -include'd by
+ * the compiler and is not supposed to be included by users.
+ *
+ */
+
+#ifndef __CLANG_HIP_RUNTIME_WRAPPER_H__
+#define __CLANG_HIP_RUNTIME_WRAPPER_H__
+
+#if __HIP__
+
+#include <cmath>
+#include <cstdlib>
+#include <stdlib.h>
+
+#define __host__ __attribute__((host))
+#define __device__ __attribute__((device))
+#define __global__ __attribute__((global))
+#define __shared__ __attribute__((shared))
+#define __constant__ __attribute__((constant))
+
+#if __HIP_ENABLE_DEVICE_MALLOC__
+extern "C" __device__ void *__hip_malloc(size_t __size);
+extern "C" __device__ void *__hip_free(void *__ptr);
+static inline __device__ void *malloc(size_t __size) {
+  return __hip_malloc(__size);
+}
+static inline __device__ void *free(void *__ptr) { return __hip_free(__ptr); }
+#else
+static inline __device__ void *malloc(size_t __size) {
+  __builtin_trap();
+  return nullptr;
+}
+static inline __device__ void *free(void *__ptr) {
+  __builtin_trap();
+  return nullptr;
+}
+#endif
+
+#include <__clang_hip_libdevice_declares.h>
+#include <__clang_hip_math.h>
+
+#if !_OPENMP || __HIP_ENABLE_CUDA_WRAPPER_FOR_OPENMP__
+#include <__clang_cuda_math_forward_declares.h>
+#include <__clang_cuda_complex_builtins.h>
+
+#include <algorithm>
+#include <complex>
+#include <new>
+#endif // !_OPENMP || __HIP_ENABLE_CUDA_WRAPPER_FOR_OPENMP__
+
+#define __CLANG_HIP_RUNTIME_WRAPPER_INCLUDED__ 1
+
+#endif // __HIP__
+#endif // __CLANG_HIP_RUNTIME_WRAPPER_H__
diff --git a/darwin-x86/lib64/clang/11.0.1/include/__stddef_max_align_t.h b/darwin-x86/lib64/clang/11.0.5/include/__stddef_max_align_t.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/__stddef_max_align_t.h
rename to darwin-x86/lib64/clang/11.0.5/include/__stddef_max_align_t.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/__wmmintrin_aes.h b/darwin-x86/lib64/clang/11.0.5/include/__wmmintrin_aes.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/__wmmintrin_aes.h
rename to darwin-x86/lib64/clang/11.0.5/include/__wmmintrin_aes.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/__wmmintrin_pclmul.h b/darwin-x86/lib64/clang/11.0.5/include/__wmmintrin_pclmul.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/__wmmintrin_pclmul.h
rename to darwin-x86/lib64/clang/11.0.5/include/__wmmintrin_pclmul.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/adxintrin.h b/darwin-x86/lib64/clang/11.0.5/include/adxintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/adxintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/adxintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/altivec.h b/darwin-x86/lib64/clang/11.0.5/include/altivec.h
similarity index 97%
rename from darwin-x86/lib64/clang/11.0.1/include/altivec.h
rename to darwin-x86/lib64/clang/11.0.5/include/altivec.h
index 7e231a2..9a40092 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/altivec.h
+++ b/darwin-x86/lib64/clang/11.0.5/include/altivec.h
@@ -16761,6 +16761,394 @@
 static vector signed char __ATTRS_o_ai vec_nabs(vector signed char __a) {
   return __builtin_altivec_vminsb(__a, -__a);
 }
+
+#ifdef __POWER10_VECTOR__
+/* vec_pdep */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_pdep(vector unsigned long long __a, vector unsigned long long __b) {
+  return __builtin_altivec_vpdepd(__a, __b);
+}
+
+/* vec_pext */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_pext(vector unsigned long long __a, vector unsigned long long __b) {
+  return __builtin_altivec_vpextd(__a, __b);
+}
+
+/* vec_cfuge */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_cfuge(vector unsigned long long __a, vector unsigned long long __b) {
+  return __builtin_altivec_vcfuged(__a, __b);
+}
+
+/* vec_gnb */
+
+#define vec_gnb(__a, __b) __builtin_altivec_vgnb(__a, __b)
+
+/* vec_ternarylogic */
+#ifdef __VSX__
+#define vec_ternarylogic(__a, __b, __c, __imm)                                 \
+  _Generic((__a), vector unsigned char                                         \
+           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
+                                  (vector unsigned long long)(__b),            \
+                                  (vector unsigned long long)(__c), (__imm)),  \
+             vector unsigned short                                             \
+           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
+                                  (vector unsigned long long)(__b),            \
+                                  (vector unsigned long long)(__c), (__imm)),  \
+             vector unsigned int                                               \
+           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
+                                  (vector unsigned long long)(__b),            \
+                                  (vector unsigned long long)(__c), (__imm)),  \
+             vector unsigned long long                                         \
+           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
+                                  (vector unsigned long long)(__b),            \
+                                  (vector unsigned long long)(__c), (__imm)),  \
+             vector unsigned __int128                                          \
+           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
+                                  (vector unsigned long long)(__b),            \
+                                  (vector unsigned long long)(__c), (__imm)))
+#endif /* __VSX__ */
+
+/* vec_genpcvm */
+
+#ifdef __VSX__
+#define vec_genpcvm(__a, __imm)                                                \
+  _Generic((__a), vector unsigned char                                         \
+           : __builtin_vsx_xxgenpcvbm((__a), (int)(__imm)),                    \
+             vector unsigned short                                             \
+           : __builtin_vsx_xxgenpcvhm((__a), (int)(__imm)),                    \
+             vector unsigned int                                               \
+           : __builtin_vsx_xxgenpcvwm((__a), (int)(__imm)),                    \
+             vector unsigned long long                                         \
+           : __builtin_vsx_xxgenpcvdm((__a), (int)(__imm)))
+#endif /* __VSX__ */
+
+/* vec_clrl */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_clrl(vector signed char __a, unsigned int __n) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vclrrb(__a, __n);
+#else
+  return __builtin_altivec_vclrlb( __a, __n);
+#endif
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_clrl(vector unsigned char __a, unsigned int __n) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vclrrb((vector signed char)__a, __n);
+#else
+  return __builtin_altivec_vclrlb((vector signed char)__a, __n);
+#endif
+}
+
+/* vec_clrr */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_clrr(vector signed char __a, unsigned int __n) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vclrlb(__a, __n);
+#else
+  return __builtin_altivec_vclrrb( __a, __n);
+#endif
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_clrr(vector unsigned char __a, unsigned int __n) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vclrlb((vector signed char)__a, __n);
+#else
+  return __builtin_altivec_vclrrb((vector signed char)__a, __n);
+#endif
+}
+
+/* vec_cntlzm */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_cntlzm(vector unsigned long long __a, vector unsigned long long __b) {
+  return __builtin_altivec_vclzdm(__a, __b);
+}
+
+/* vec_cnttzm */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_cnttzm(vector unsigned long long __a, vector unsigned long long __b) {
+  return __builtin_altivec_vctzdm(__a, __b);
+}
+
+/* vec_sldbi */
+
+#define vec_sldb(__a, __b, __c) __builtin_altivec_vsldbi(__a, __b, (__c & 0x7))
+
+/* vec_srdbi */
+
+#define vec_srdb(__a, __b, __c) __builtin_altivec_vsrdbi(__a, __b, (__c & 0x7))
+
+/* vec_insertl */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_insertl(unsigned char __a, vector unsigned char __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinsbrx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinsblx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_insertl(unsigned short __a, vector unsigned short __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinshrx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinshlx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_insertl(unsigned int __a, vector unsigned int __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinswrx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinswlx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_insertl(unsigned long long __a, vector unsigned long long __b,
+            unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinsdrx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinsdlx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_insertl(vector unsigned char __a, vector unsigned char __b,
+            unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinsbvrx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinsbvlx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_insertl(vector unsigned short __a, vector unsigned short __b,
+            unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinshvrx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinshvlx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_insertl(vector unsigned int __a, vector unsigned int __b,
+            unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinswvrx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinswvlx(__b, __c, __a);
+#endif
+}
+
+/* vec_inserth */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_inserth(unsigned char __a, vector unsigned char __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinsblx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinsbrx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_inserth(unsigned short __a, vector unsigned short __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinshlx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinshrx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_inserth(unsigned int __a, vector unsigned int __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinswlx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinswrx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_inserth(unsigned long long __a, vector unsigned long long __b,
+            unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinsdlx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinsdrx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_inserth(vector unsigned char __a, vector unsigned char __b,
+            unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinsbvlx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinsbvrx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_inserth(vector unsigned short __a, vector unsigned short __b,
+            unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinshvlx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinshvrx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_inserth(vector unsigned int __a, vector unsigned int __b,
+            unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinswvlx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinswvrx(__b, __c, __a);
+#endif
+}
+
+#ifdef __VSX__
+
+/* vec_permx */
+
+#define vec_permx(__a, __b, __c, __d)                                          \
+  __builtin_vsx_xxpermx((__a), (__b), (__c), (__d))
+
+/* vec_blendv */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_blendv(vector signed char __a, vector signed char __b,
+           vector unsigned char __c) {
+  return __builtin_vsx_xxblendvb(__a, __b, __c);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_blendv(vector unsigned char __a, vector unsigned char __b,
+           vector unsigned char __c) {
+  return __builtin_vsx_xxblendvb(__a, __b, __c);
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_blendv(vector signed short __a, vector signed short __b,
+           vector unsigned short __c) {
+  return __builtin_vsx_xxblendvh(__a, __b, __c);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_blendv(vector unsigned short __a, vector unsigned short __b,
+           vector unsigned short __c) {
+  return __builtin_vsx_xxblendvh(__a, __b, __c);
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_blendv(vector signed int __a, vector signed int __b,
+           vector unsigned int __c) {
+  return __builtin_vsx_xxblendvw(__a, __b, __c);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_blendv(vector unsigned int __a, vector unsigned int __b,
+           vector unsigned int __c) {
+  return __builtin_vsx_xxblendvw(__a, __b, __c);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_blendv(vector signed long long __a, vector signed long long __b,
+           vector unsigned long long __c) {
+  return __builtin_vsx_xxblendvd(__a, __b, __c);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_blendv(vector unsigned long long __a, vector unsigned long long __b,
+           vector unsigned long long __c) {
+  return __builtin_vsx_xxblendvd(__a, __b, __c);
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_blendv(vector float __a, vector float __b, vector unsigned int __c) {
+  return __builtin_vsx_xxblendvw(__a, __b, __c);
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_blendv(vector double __a, vector double __b,
+           vector unsigned long long __c) {
+  return __builtin_vsx_xxblendvd(__a, __b, __c);
+}
+
+/* vec_splati */
+
+#define vec_splati(__a)                                                        \
+  _Generic((__a), signed int                                                   \
+           : ((vector signed int)__a), unsigned int                            \
+           : ((vector unsigned int)__a), float                                 \
+           : ((vector float)__a))
+
+/* vec_spatid */
+
+static __inline__ vector double __ATTRS_o_ai vec_splatid(const float __a) {
+  return ((vector double)((double)__a));
+}
+
+/* vec_splati_ins */
+
+static __inline__ vector signed int __ATTRS_o_ai vec_splati_ins(
+    vector signed int __a, const unsigned int __b, const signed int __c) {
+#ifdef __LITTLE_ENDIAN__
+  __a[1 - __b] = __c;
+  __a[3 - __b] = __c;
+#else
+  __a[__b] = __c;
+  __a[2 + __b] = __c;
+#endif
+  return __a;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai vec_splati_ins(
+    vector unsigned int __a, const unsigned int __b, const unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  __a[1 - __b] = __c;
+  __a[3 - __b] = __c;
+#else
+  __a[__b] = __c;
+  __a[2 + __b] = __c;
+#endif
+  return __a;
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_splati_ins(vector float __a, const unsigned int __b, const float __c) {
+#ifdef __LITTLE_ENDIAN__
+  __a[1 - __b] = __c;
+  __a[3 - __b] = __c;
+#else
+  __a[__b] = __c;
+  __a[2 + __b] = __c;
+#endif
+  return __a;
+}
+#endif /* __VSX__ */
+#endif /* __POWER10_VECTOR__ */
+
 #undef __ATTRS_o_ai
 
 #endif /* __ALTIVEC_H */
diff --git a/darwin-x86/lib64/clang/11.0.1/include/ammintrin.h b/darwin-x86/lib64/clang/11.0.5/include/ammintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/ammintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/ammintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/amxintrin.h b/darwin-x86/lib64/clang/11.0.5/include/amxintrin.h
new file mode 100644
index 0000000..58254e2
--- /dev/null
+++ b/darwin-x86/lib64/clang/11.0.5/include/amxintrin.h
@@ -0,0 +1,225 @@
+/*===--------------- amxintrin.h - AMX intrinsics -*- C/C++ -*---------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===------------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <amxintrin.h> directly; include <immintrin.h> instead."
+#endif /* __IMMINTRIN_H */
+
+#ifndef __AMXINTRIN_H
+#define __AMXINTRIN_H
+#ifdef __x86_64__
+
+#define __DEFAULT_FN_ATTRS \
+  __attribute__((__always_inline__, __nodebug__,  __target__("amx-tile")))
+
+/// Load tile configuration from a 64-byte memory location specified by
+/// "mem_addr". The tile configuration includes the tile type palette, the
+/// number of bytes per row, and the number of rows. If the specified
+/// palette_id is zero, that signifies the init state for both the tile
+/// config and the tile data, and the tiles are zeroed. Any invalid
+/// configurations will result in #GP fault.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> LDTILECFG </c> instruction.
+///
+/// \param __config
+///    A pointer to 512-bits configuration
+static __inline__ void __DEFAULT_FN_ATTRS
+_tile_loadconfig(const void *__config)
+{
+  __builtin_ia32_tile_loadconfig(__config);
+}
+
+/// Stores the current tile configuration to a 64-byte memory location
+/// specified by "mem_addr". The tile configuration includes the tile type
+/// palette, the number of bytes per row, and the number of rows. If tiles
+/// are not configured, all zeroes will be stored to memory.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> STTILECFG </c> instruction.
+///
+/// \param __config
+///    A pointer to 512-bits configuration
+static __inline__ void __DEFAULT_FN_ATTRS
+_tile_storeconfig(void *__config)
+{
+  __builtin_ia32_tile_storeconfig(__config);
+}
+
+/// Release the tile configuration to return to the init state, which
+/// releases all storage it currently holds.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TILERELEASE </c> instruction.
+static __inline__ void __DEFAULT_FN_ATTRS
+_tile_release(void)
+{
+  __builtin_ia32_tilerelease();
+}
+
+/// Load tile rows from memory specifieid by "base" address and "stride" into
+/// destination tile "dst" using the tile configuration previously configured
+/// via "_tile_loadconfig".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TILELOADD </c> instruction.
+///
+/// \param dst
+///    A destination tile. Max size is 1024 Bytes.
+/// \param base
+///    A pointer to base address.
+/// \param stride
+///    The stride between the rows' data to be loaded in memory.
+#define _tile_loadd(dst, base, stride) \
+  __builtin_ia32_tileloadd64((dst), ((const void *)(base)), (__SIZE_TYPE__)(stride))
+
+/// Load tile rows from memory specifieid by "base" address and "stride" into
+/// destination tile "dst" using the tile configuration previously configured
+/// via "_tile_loadconfig". This intrinsic provides a hint to the implementation
+/// that the data will likely not be reused in the near future and the data
+/// caching can be optimized accordingly.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TILELOADDT1 </c> instruction.
+///
+/// \param dst
+///    A destination tile. Max size is 1024 Bytes.
+/// \param base
+///    A pointer to base address.
+/// \param stride
+///    The stride between the rows' data to be loaded in memory.
+#define _tile_stream_loadd(dst, base, stride) \
+  __builtin_ia32_tileloaddt164((dst), ((const void *)(base)), (__SIZE_TYPE__)(stride))
+
+/// Store the tile specified by "src" to memory specifieid by "base" address and
+/// "stride" using the tile configuration previously configured via
+/// "_tile_loadconfig".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TILESTORED </c> instruction.
+///
+/// \param dst
+///    A destination tile. Max size is 1024 Bytes.
+/// \param base
+///    A pointer to base address.
+/// \param stride
+///    The stride between the rows' data to be stored in memory.
+#define _tile_stored(dst, base, stride) \
+  __builtin_ia32_tilestored64((dst), ((void *)(base)), (__SIZE_TYPE__)(stride))
+
+/// Zero the tile specified by "tdest".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TILEZERO </c> instruction.
+///
+/// \param tile
+///    The destination tile to be zero. Max size is 1024 Bytes.
+#define _tile_zero(tile) __builtin_ia32_tilezero((tile))
+
+/// Compute dot-product of bytes in tiles with a source/destination accumulator.
+/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with
+/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit
+/// results. Sum these 4 results with the corresponding 32-bit integer in "dst",
+/// and store the 32-bit result back to tile "dst".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPBSSD </c> instruction.
+///
+/// \param dst
+///    The destination tile. Max size is 1024 Bytes.
+/// \param src0
+///    The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+///    The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_dpbssd(dst, src0, src1) __builtin_ia32_tdpbssd((dst), (src0), (src1))
+
+/// Compute dot-product of bytes in tiles with a source/destination accumulator.
+/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with
+/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate
+/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer
+/// in "dst", and store the 32-bit result back to tile "dst".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPBSUD </c> instruction.
+///
+/// \param dst
+///    The destination tile. Max size is 1024 Bytes.
+/// \param src0
+///    The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+///    The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_dpbsud(dst, src0, src1) __builtin_ia32_tdpbsud((dst), (src0), (src1))
+
+/// Compute dot-product of bytes in tiles with a source/destination accumulator.
+/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with
+/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit
+/// results. Sum these 4 results with the corresponding 32-bit integer in "dst",
+/// and store the 32-bit result back to tile "dst".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPBUSD </c> instruction.
+///
+/// \param dst
+///    The destination tile. Max size is 1024 Bytes.
+/// \param src0
+///    The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+///    The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_dpbusd(dst, src0, src1) __builtin_ia32_tdpbusd((dst), (src0), (src1))
+
+/// Compute dot-product of bytes in tiles with a source/destination accumulator.
+/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with
+/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate
+/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer in
+/// "dst", and store the 32-bit result back to tile "dst".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPBUUD </c> instruction.
+///
+/// \param dst
+///    The destination tile. Max size is 1024 Bytes.
+/// \param src0
+///    The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+///    The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_dpbuud(dst, src0, src1) __builtin_ia32_tdpbuud((dst), (src0), (src1))
+
+/// Compute dot-product of BF16 (16-bit) floating-point pairs in tiles src0 and
+/// src1, accumulating the intermediate single-precision (32-bit) floating-point
+/// elements with elements in "dst", and store the 32-bit result back to tile
+/// "dst".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPBF16PS </c> instruction.
+///
+/// \param dst
+///    The destination tile. Max size is 1024 Bytes.
+/// \param src0
+///    The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+///    The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_dpbf16ps(dst, src0, src1) \
+  __builtin_ia32_tdpbf16ps((dst), (src0), (src1))
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __x86_64__ */
+#endif /* __AMXINTRIN_H */
diff --git a/darwin-x86/lib64/clang/11.0.1/include/arm64intr.h b/darwin-x86/lib64/clang/11.0.5/include/arm64intr.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/arm64intr.h
rename to darwin-x86/lib64/clang/11.0.5/include/arm64intr.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/arm_acle.h b/darwin-x86/lib64/clang/11.0.5/include/arm_acle.h
similarity index 98%
rename from darwin-x86/lib64/clang/11.0.1/include/arm_acle.h
rename to darwin-x86/lib64/clang/11.0.5/include/arm_acle.h
index 596ea03..de568b4 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/arm_acle.h
+++ b/darwin-x86/lib64/clang/11.0.5/include/arm_acle.h
@@ -22,31 +22,43 @@
 
 /* 8 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */
 /* 8.3 Memory barriers */
-#if !defined(_MSC_VER)
+#if !__has_builtin(__dmb)
 #define __dmb(i) __builtin_arm_dmb(i)
+#endif
+#if !__has_builtin(__dsb)
 #define __dsb(i) __builtin_arm_dsb(i)
+#endif
+#if !__has_builtin(__isb)
 #define __isb(i) __builtin_arm_isb(i)
 #endif
 
 /* 8.4 Hints */
 
-#if !defined(_MSC_VER)
+#if !__has_builtin(__wfi)
 static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfi(void) {
   __builtin_arm_wfi();
 }
+#endif
 
+#if !__has_builtin(__wfe)
 static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfe(void) {
   __builtin_arm_wfe();
 }
+#endif
 
+#if !__has_builtin(__sev)
 static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sev(void) {
   __builtin_arm_sev();
 }
+#endif
 
+#if !__has_builtin(__sevl)
 static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sevl(void) {
   __builtin_arm_sevl();
 }
+#endif
 
+#if !__has_builtin(__yield)
 static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(void) {
   __builtin_arm_yield();
 }
diff --git a/darwin-x86/lib64/clang/11.0.5/include/arm_bf16.h b/darwin-x86/lib64/clang/11.0.5/include/arm_bf16.h
new file mode 100644
index 0000000..329ae39
--- /dev/null
+++ b/darwin-x86/lib64/clang/11.0.5/include/arm_bf16.h
@@ -0,0 +1,20 @@
+/*===---- arm_bf16.h - ARM BF16 intrinsics -----------------------------------===
+ *
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ARM_BF16_H
+#define __ARM_BF16_H
+
+typedef __bf16 bfloat16_t;
+#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__))
+
+
+#undef __ai
+
+#endif
diff --git a/darwin-x86/lib64/clang/11.0.5/include/arm_cde.h b/darwin-x86/lib64/clang/11.0.5/include/arm_cde.h
new file mode 100644
index 0000000..4ad5d82
--- /dev/null
+++ b/darwin-x86/lib64/clang/11.0.5/include/arm_cde.h
@@ -0,0 +1,410 @@
+/*===---- arm_cde.h - ARM CDE intrinsics -----------------------------------===
+ *
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ARM_CDE_H
+#define __ARM_CDE_H
+
+#if !__ARM_FEATURE_CDE
+#error "CDE support not enabled"
+#endif
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx1)))
+uint32_t __arm_cx1(int, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx1a)))
+uint32_t __arm_cx1a(int, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx1d)))
+uint64_t __arm_cx1d(int, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx1da)))
+uint64_t __arm_cx1da(int, uint64_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx2)))
+uint32_t __arm_cx2(int, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx2a)))
+uint32_t __arm_cx2a(int, uint32_t, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx2d)))
+uint64_t __arm_cx2d(int, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx2da)))
+uint64_t __arm_cx2da(int, uint64_t, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx3)))
+uint32_t __arm_cx3(int, uint32_t, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx3a)))
+uint32_t __arm_cx3a(int, uint32_t, uint32_t, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx3d)))
+uint64_t __arm_cx3d(int, uint32_t, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx3da)))
+uint64_t __arm_cx3da(int, uint64_t, uint32_t, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1_u32)))
+uint32_t __arm_vcx1_u32(int, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1a_u32)))
+uint32_t __arm_vcx1a_u32(int, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1d_u64)))
+uint64_t __arm_vcx1d_u64(int, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1da_u64)))
+uint64_t __arm_vcx1da_u64(int, uint64_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx2_u32)))
+uint32_t __arm_vcx2_u32(int, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx2a_u32)))
+uint32_t __arm_vcx2a_u32(int, uint32_t, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx2d_u64)))
+uint64_t __arm_vcx2d_u64(int, uint64_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx2da_u64)))
+uint64_t __arm_vcx2da_u64(int, uint64_t, uint64_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx3_u32)))
+uint32_t __arm_vcx3_u32(int, uint32_t, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx3a_u32)))
+uint32_t __arm_vcx3a_u32(int, uint32_t, uint32_t, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx3d_u64)))
+uint64_t __arm_vcx3d_u64(int, uint64_t, uint64_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx3da_u64)))
+uint64_t __arm_vcx3da_u64(int, uint64_t, uint64_t, uint64_t, uint32_t);
+
+#if __ARM_FEATURE_MVE
+
+typedef uint16_t mve_pred16_t;
+typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) int16_t int16x8_t;
+typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) int32_t int32x4_t;
+typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) int64_t int64x2_t;
+typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) int8_t int8x16_t;
+typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) uint16_t uint16x8_t;
+typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) uint32_t uint32x4_t;
+typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) uint64_t uint64x2_t;
+typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) uint8_t uint8x16_t;
+
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_s16)))
+int16x8_t __arm_vcx1q_m(int, int16x8_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_s32)))
+int32x4_t __arm_vcx1q_m(int, int32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_s64)))
+int64x2_t __arm_vcx1q_m(int, int64x2_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_s8)))
+int8x16_t __arm_vcx1q_m(int, int8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_u16)))
+uint16x8_t __arm_vcx1q_m(int, uint16x8_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_u32)))
+uint32x4_t __arm_vcx1q_m(int, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_u64)))
+uint64x2_t __arm_vcx1q_m(int, uint64x2_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_u8)))
+uint8x16_t __arm_vcx1q_m(int, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_u8)))
+uint8x16_t __arm_vcx1q_u8(int, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_s16)))
+int16x8_t __arm_vcx1qa_m(int, int16x8_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_s32)))
+int32x4_t __arm_vcx1qa_m(int, int32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_s64)))
+int64x2_t __arm_vcx1qa_m(int, int64x2_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_s8)))
+int8x16_t __arm_vcx1qa_m(int, int8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_u16)))
+uint16x8_t __arm_vcx1qa_m(int, uint16x8_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_u32)))
+uint32x4_t __arm_vcx1qa_m(int, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_u64)))
+uint64x2_t __arm_vcx1qa_m(int, uint64x2_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_u8)))
+uint8x16_t __arm_vcx1qa_m(int, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_s16)))
+int16x8_t __arm_vcx1qa(int, int16x8_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_s32)))
+int32x4_t __arm_vcx1qa(int, int32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_s64)))
+int64x2_t __arm_vcx1qa(int, int64x2_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_s8)))
+int8x16_t __arm_vcx1qa(int, int8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_u16)))
+uint16x8_t __arm_vcx1qa(int, uint16x8_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_u32)))
+uint32x4_t __arm_vcx1qa(int, uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_u64)))
+uint64x2_t __arm_vcx1qa(int, uint64x2_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_u8)))
+uint8x16_t __arm_vcx1qa(int, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_s16)))
+int16x8_t __arm_vcx2q_m_impl(int, int16x8_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_s32)))
+int32x4_t __arm_vcx2q_m_impl(int, int32x4_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_s64)))
+int64x2_t __arm_vcx2q_m_impl(int, int64x2_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_s8)))
+int8x16_t __arm_vcx2q_m_impl(int, int8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_u16)))
+uint16x8_t __arm_vcx2q_m_impl(int, uint16x8_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_u32)))
+uint32x4_t __arm_vcx2q_m_impl(int, uint32x4_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_u64)))
+uint64x2_t __arm_vcx2q_m_impl(int, uint64x2_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_u8)))
+uint8x16_t __arm_vcx2q_m_impl(int, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_s16)))
+int16x8_t __arm_vcx2q(int, int16x8_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_s32)))
+int32x4_t __arm_vcx2q(int, int32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_s64)))
+int64x2_t __arm_vcx2q(int, int64x2_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_s8)))
+int8x16_t __arm_vcx2q(int, int8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u16)))
+uint16x8_t __arm_vcx2q(int, uint16x8_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u32)))
+uint32x4_t __arm_vcx2q(int, uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u64)))
+uint64x2_t __arm_vcx2q(int, uint64x2_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8)))
+uint8x16_t __arm_vcx2q(int, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_s16)))
+uint8x16_t __arm_vcx2q_u8(int, int16x8_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_s32)))
+uint8x16_t __arm_vcx2q_u8(int, int32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_s64)))
+uint8x16_t __arm_vcx2q_u8(int, int64x2_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_s8)))
+uint8x16_t __arm_vcx2q_u8(int, int8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_u16)))
+uint8x16_t __arm_vcx2q_u8(int, uint16x8_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_u32)))
+uint8x16_t __arm_vcx2q_u8(int, uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_u64)))
+uint8x16_t __arm_vcx2q_u8(int, uint64x2_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_u8)))
+uint8x16_t __arm_vcx2q_u8(int, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_s16)))
+int16x8_t __arm_vcx2qa_impl(int, int16x8_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_s32)))
+int32x4_t __arm_vcx2qa_impl(int, int32x4_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_s64)))
+int64x2_t __arm_vcx2qa_impl(int, int64x2_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_s8)))
+int8x16_t __arm_vcx2qa_impl(int, int8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_u16)))
+uint16x8_t __arm_vcx2qa_impl(int, uint16x8_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_u32)))
+uint32x4_t __arm_vcx2qa_impl(int, uint32x4_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_u64)))
+uint64x2_t __arm_vcx2qa_impl(int, uint64x2_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_u8)))
+uint8x16_t __arm_vcx2qa_impl(int, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_s16)))
+int16x8_t __arm_vcx2qa_m_impl(int, int16x8_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_s32)))
+int32x4_t __arm_vcx2qa_m_impl(int, int32x4_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_s64)))
+int64x2_t __arm_vcx2qa_m_impl(int, int64x2_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_s8)))
+int8x16_t __arm_vcx2qa_m_impl(int, int8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_u16)))
+uint16x8_t __arm_vcx2qa_m_impl(int, uint16x8_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_u32)))
+uint32x4_t __arm_vcx2qa_m_impl(int, uint32x4_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_u64)))
+uint64x2_t __arm_vcx2qa_m_impl(int, uint64x2_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_u8)))
+uint8x16_t __arm_vcx2qa_m_impl(int, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_s16)))
+int16x8_t __arm_vcx3q_impl(int, int16x8_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_s32)))
+int32x4_t __arm_vcx3q_impl(int, int32x4_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_s64)))
+int64x2_t __arm_vcx3q_impl(int, int64x2_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_s8)))
+int8x16_t __arm_vcx3q_impl(int, int8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_u16)))
+uint16x8_t __arm_vcx3q_impl(int, uint16x8_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_u32)))
+uint32x4_t __arm_vcx3q_impl(int, uint32x4_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_u64)))
+uint64x2_t __arm_vcx3q_impl(int, uint64x2_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_u8)))
+uint8x16_t __arm_vcx3q_impl(int, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_s16)))
+int16x8_t __arm_vcx3q_m_impl(int, int16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_s32)))
+int32x4_t __arm_vcx3q_m_impl(int, int32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_s64)))
+int64x2_t __arm_vcx3q_m_impl(int, int64x2_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_s8)))
+int8x16_t __arm_vcx3q_m_impl(int, int8x16_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_u16)))
+uint16x8_t __arm_vcx3q_m_impl(int, uint16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_u32)))
+uint32x4_t __arm_vcx3q_m_impl(int, uint32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_u64)))
+uint64x2_t __arm_vcx3q_m_impl(int, uint64x2_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_u8)))
+uint8x16_t __arm_vcx3q_m_impl(int, uint8x16_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_s16)))
+uint8x16_t __arm_vcx3q_u8_impl(int, int16x8_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_s32)))
+uint8x16_t __arm_vcx3q_u8_impl(int, int32x4_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_s64)))
+uint8x16_t __arm_vcx3q_u8_impl(int, int64x2_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_s8)))
+uint8x16_t __arm_vcx3q_u8_impl(int, int8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_u16)))
+uint8x16_t __arm_vcx3q_u8_impl(int, uint16x8_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_u32)))
+uint8x16_t __arm_vcx3q_u8_impl(int, uint32x4_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_u64)))
+uint8x16_t __arm_vcx3q_u8_impl(int, uint64x2_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_u8)))
+uint8x16_t __arm_vcx3q_u8_impl(int, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_s16)))
+int16x8_t __arm_vcx3qa_impl(int, int16x8_t, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_s32)))
+int32x4_t __arm_vcx3qa_impl(int, int32x4_t, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_s64)))
+int64x2_t __arm_vcx3qa_impl(int, int64x2_t, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_s8)))
+int8x16_t __arm_vcx3qa_impl(int, int8x16_t, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_u16)))
+uint16x8_t __arm_vcx3qa_impl(int, uint16x8_t, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_u32)))
+uint32x4_t __arm_vcx3qa_impl(int, uint32x4_t, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_u64)))
+uint64x2_t __arm_vcx3qa_impl(int, uint64x2_t, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_u8)))
+uint8x16_t __arm_vcx3qa_impl(int, uint8x16_t, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_s16)))
+int16x8_t __arm_vcx3qa_m_impl(int, int16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_s32)))
+int32x4_t __arm_vcx3qa_m_impl(int, int32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_s64)))
+int64x2_t __arm_vcx3qa_m_impl(int, int64x2_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_s8)))
+int8x16_t __arm_vcx3qa_m_impl(int, int8x16_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_u16)))
+uint16x8_t __arm_vcx3qa_m_impl(int, uint16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_u32)))
+uint32x4_t __arm_vcx3qa_m_impl(int, uint32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_u64)))
+uint64x2_t __arm_vcx3qa_m_impl(int, uint64x2_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_u8)))
+uint8x16_t __arm_vcx3qa_m_impl(int, uint8x16_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
+int16x8_t __arm_vreinterpretq_s16_u8(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
+int32x4_t __arm_vreinterpretq_s32_u8(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
+int64x2_t __arm_vreinterpretq_s64_u8(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
+int8x16_t __arm_vreinterpretq_s8_u8(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
+uint16x8_t __arm_vreinterpretq_u16_u8(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
+uint32x4_t __arm_vreinterpretq_u32_u8(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
+uint64x2_t __arm_vreinterpretq_u64_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
+uint8x16_t __arm_vreinterpretq_u8(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
+uint8x16_t __arm_vreinterpretq_u8(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
+uint8x16_t __arm_vreinterpretq_u8(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
+uint8x16_t __arm_vreinterpretq_u8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
+uint8x16_t __arm_vreinterpretq_u8(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
+uint8x16_t __arm_vreinterpretq_u8(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
+uint8x16_t __arm_vreinterpretq_u8(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vreinterpretq_u8_u8)))
+uint8x16_t __arm_vreinterpretq_u8(uint8x16_t);
+#define __arm_vcx2q_m(cp, inactive, n, imm, pred) __arm_vcx2q_m_impl((cp), (inactive), __arm_vreinterpretq_u8(n), (imm), (pred))
+#define __arm_vcx2qa(cp, acc, n, imm) __arm_vcx2qa_impl((cp), (acc), __arm_vreinterpretq_u8(n), (imm))
+#define __arm_vcx2qa_m(cp, acc, n, imm, pred) __arm_vcx2qa_m_impl((cp), (acc), __arm_vreinterpretq_u8(n), (imm), (pred))
+#define __arm_vcx3q(cp, n, m, imm) __arm_vcx3q_impl((cp), (n), __arm_vreinterpretq_u8(m), (imm))
+#define __arm_vcx3q_m(cp, inactive, n, m, imm, pred) __arm_vcx3q_m_impl((cp), (inactive), __arm_vreinterpretq_u8(n), __arm_vreinterpretq_u8(m), (imm), (pred))
+#define __arm_vcx3q_u8(cp, n, m, imm) __arm_vcx3q_u8_impl((cp), (n), __arm_vreinterpretq_u8(m), (imm))
+#define __arm_vcx3qa(cp, acc, n, m, imm) __arm_vcx3qa_impl((cp), (acc), __arm_vreinterpretq_u8(n), __arm_vreinterpretq_u8(m), (imm))
+#define __arm_vcx3qa_m(cp, acc, n, m, imm, pred) __arm_vcx3qa_m_impl((cp), (acc), __arm_vreinterpretq_u8(n), __arm_vreinterpretq_u8(m), (imm), (pred))
+
+#endif /* __ARM_FEATURE_MVE */
+
+#if __ARM_FEATURE_MVE & 2
+
+typedef __fp16 float16_t;
+typedef float float32_t;
+typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) float16_t float16x8_t;
+typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) float32_t float32x4_t;
+
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_f16)))
+float16x8_t __arm_vcx1q_m(int, float16x8_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_f32)))
+float32x4_t __arm_vcx1q_m(int, float32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_f16)))
+float16x8_t __arm_vcx1qa(int, float16x8_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_f32)))
+float32x4_t __arm_vcx1qa(int, float32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_f16)))
+float16x8_t __arm_vcx1qa_m(int, float16x8_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_f32)))
+float32x4_t __arm_vcx1qa_m(int, float32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_f16)))
+float16x8_t __arm_vcx2q(int, float16x8_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_f32)))
+float32x4_t __arm_vcx2q(int, float32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_f16)))
+float16x8_t __arm_vcx2q_m_impl(int, float16x8_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_f32)))
+float32x4_t __arm_vcx2q_m_impl(int, float32x4_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_f16)))
+uint8x16_t __arm_vcx2q_u8(int, float16x8_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_f32)))
+uint8x16_t __arm_vcx2q_u8(int, float32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_f16)))
+float16x8_t __arm_vcx2qa_impl(int, float16x8_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_f32)))
+float32x4_t __arm_vcx2qa_impl(int, float32x4_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_f16)))
+float16x8_t __arm_vcx2qa_m_impl(int, float16x8_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_f32)))
+float32x4_t __arm_vcx2qa_m_impl(int, float32x4_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_f16)))
+float16x8_t __arm_vcx3q_impl(int, float16x8_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_f32)))
+float32x4_t __arm_vcx3q_impl(int, float32x4_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_f16)))
+float16x8_t __arm_vcx3q_m_impl(int, float16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_f32)))
+float32x4_t __arm_vcx3q_m_impl(int, float32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_f16)))
+uint8x16_t __arm_vcx3q_u8_impl(int, float16x8_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_f32)))
+uint8x16_t __arm_vcx3q_u8_impl(int, float32x4_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_f16)))
+float16x8_t __arm_vcx3qa_impl(int, float16x8_t, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_f32)))
+float32x4_t __arm_vcx3qa_impl(int, float32x4_t, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_f16)))
+float16x8_t __arm_vcx3qa_m_impl(int, float16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_f32)))
+float32x4_t __arm_vcx3qa_m_impl(int, float32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
+float16x8_t __arm_vreinterpretq_f16_u8(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
+float32x4_t __arm_vreinterpretq_f32_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
+uint8x16_t __arm_vreinterpretq_u8(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
+uint8x16_t __arm_vreinterpretq_u8(float32x4_t);
+
+#endif /* __ARM_FEATURE_MVE & 2 */
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* __ARM_CDE_H */
diff --git a/darwin-x86/lib64/clang/11.0.1/include/arm_cmse.h b/darwin-x86/lib64/clang/11.0.5/include/arm_cmse.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/arm_cmse.h
rename to darwin-x86/lib64/clang/11.0.5/include/arm_cmse.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/arm_fp16.h b/darwin-x86/lib64/clang/11.0.5/include/arm_fp16.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/arm_fp16.h
rename to darwin-x86/lib64/clang/11.0.5/include/arm_fp16.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/arm_mve.h b/darwin-x86/lib64/clang/11.0.5/include/arm_mve.h
new file mode 100644
index 0000000..4da41dc
--- /dev/null
+++ b/darwin-x86/lib64/clang/11.0.5/include/arm_mve.h
@@ -0,0 +1,19187 @@
+/*===---- arm_mve.h - ARM MVE intrinsics -----------------------------------===
+ *
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ARM_MVE_H
+#define __ARM_MVE_H
+
+#if !__ARM_FEATURE_MVE
+#error "MVE support not enabled"
+#endif
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint16_t mve_pred16_t;
+typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) int16_t int16x8_t;
+typedef struct { int16x8_t val[2]; } int16x8x2_t;
+typedef struct { int16x8_t val[4]; } int16x8x4_t;
+typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) int32_t int32x4_t;
+typedef struct { int32x4_t val[2]; } int32x4x2_t;
+typedef struct { int32x4_t val[4]; } int32x4x4_t;
+typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) int64_t int64x2_t;
+typedef struct { int64x2_t val[2]; } int64x2x2_t;
+typedef struct { int64x2_t val[4]; } int64x2x4_t;
+typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) int8_t int8x16_t;
+typedef struct { int8x16_t val[2]; } int8x16x2_t;
+typedef struct { int8x16_t val[4]; } int8x16x4_t;
+typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) uint16_t uint16x8_t;
+typedef struct { uint16x8_t val[2]; } uint16x8x2_t;
+typedef struct { uint16x8_t val[4]; } uint16x8x4_t;
+typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) uint32_t uint32x4_t;
+typedef struct { uint32x4_t val[2]; } uint32x4x2_t;
+typedef struct { uint32x4_t val[4]; } uint32x4x4_t;
+typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) uint64_t uint64x2_t;
+typedef struct { uint64x2_t val[2]; } uint64x2x2_t;
+typedef struct { uint64x2_t val[4]; } uint64x2x4_t;
+typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) uint8_t uint8x16_t;
+typedef struct { uint8x16_t val[2]; } uint8x16x2_t;
+typedef struct { uint8x16_t val[4]; } uint8x16x4_t;
+
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_asrl)))
+int64_t __arm_asrl(int64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_lsll)))
+uint64_t __arm_lsll(uint64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshr)))
+int32_t __arm_sqrshr(int32_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshrl)))
+int64_t __arm_sqrshrl(int64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshrl_sat48)))
+int64_t __arm_sqrshrl_sat48(int64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqshl)))
+int32_t __arm_sqshl(int32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqshll)))
+int64_t __arm_sqshll(int64_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_srshr)))
+int32_t __arm_srshr(int32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_srshrl)))
+int64_t __arm_srshrl(int64_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshl)))
+uint32_t __arm_uqrshl(uint32_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshll)))
+uint64_t __arm_uqrshll(uint64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshll_sat48)))
+uint64_t __arm_uqrshll_sat48(uint64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqshl)))
+uint32_t __arm_uqshl(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqshll)))
+uint64_t __arm_uqshll(uint64_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_urshr)))
+uint32_t __arm_urshr(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_urshrl)))
+uint64_t __arm_urshrl(uint64_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s16)))
+uint32_t __arm_vabavq_p_s16(uint32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s16)))
+uint32_t __arm_vabavq_p(uint32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s32)))
+uint32_t __arm_vabavq_p_s32(uint32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s32)))
+uint32_t __arm_vabavq_p(uint32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s8)))
+uint32_t __arm_vabavq_p_s8(uint32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s8)))
+uint32_t __arm_vabavq_p(uint32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u16)))
+uint32_t __arm_vabavq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u16)))
+uint32_t __arm_vabavq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u32)))
+uint32_t __arm_vabavq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u32)))
+uint32_t __arm_vabavq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u8)))
+uint32_t __arm_vabavq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u8)))
+uint32_t __arm_vabavq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s16)))
+uint32_t __arm_vabavq_s16(uint32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s16)))
+uint32_t __arm_vabavq(uint32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s32)))
+uint32_t __arm_vabavq_s32(uint32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s32)))
+uint32_t __arm_vabavq(uint32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s8)))
+uint32_t __arm_vabavq_s8(uint32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s8)))
+uint32_t __arm_vabavq(uint32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u16)))
+uint32_t __arm_vabavq_u16(uint32_t, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u16)))
+uint32_t __arm_vabavq(uint32_t, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u32)))
+uint32_t __arm_vabavq_u32(uint32_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u32)))
+uint32_t __arm_vabavq(uint32_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u8)))
+uint32_t __arm_vabavq_u8(uint32_t, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u8)))
+uint32_t __arm_vabavq(uint32_t, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s16)))
+int16x8_t __arm_vabdq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s16)))
+int16x8_t __arm_vabdq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s32)))
+int32x4_t __arm_vabdq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s32)))
+int32x4_t __arm_vabdq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s8)))
+int8x16_t __arm_vabdq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s8)))
+int8x16_t __arm_vabdq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u16)))
+uint16x8_t __arm_vabdq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u16)))
+uint16x8_t __arm_vabdq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u32)))
+uint32x4_t __arm_vabdq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u32)))
+uint32x4_t __arm_vabdq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u8)))
+uint8x16_t __arm_vabdq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u8)))
+uint8x16_t __arm_vabdq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s16)))
+int16x8_t __arm_vabdq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s16)))
+int16x8_t __arm_vabdq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s32)))
+int32x4_t __arm_vabdq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s32)))
+int32x4_t __arm_vabdq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s8)))
+int8x16_t __arm_vabdq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s8)))
+int8x16_t __arm_vabdq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u16)))
+uint16x8_t __arm_vabdq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u16)))
+uint16x8_t __arm_vabdq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u32)))
+uint32x4_t __arm_vabdq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u32)))
+uint32x4_t __arm_vabdq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u8)))
+uint8x16_t __arm_vabdq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u8)))
+uint8x16_t __arm_vabdq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s16)))
+int16x8_t __arm_vabdq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s16)))
+int16x8_t __arm_vabdq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s32)))
+int32x4_t __arm_vabdq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s32)))
+int32x4_t __arm_vabdq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s8)))
+int8x16_t __arm_vabdq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s8)))
+int8x16_t __arm_vabdq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u16)))
+uint16x8_t __arm_vabdq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u16)))
+uint16x8_t __arm_vabdq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u32)))
+uint32x4_t __arm_vabdq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u32)))
+uint32x4_t __arm_vabdq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u8)))
+uint8x16_t __arm_vabdq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u8)))
+uint8x16_t __arm_vabdq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s16)))
+int16x8_t __arm_vabsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s16)))
+int16x8_t __arm_vabsq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s32)))
+int32x4_t __arm_vabsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s32)))
+int32x4_t __arm_vabsq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s8)))
+int8x16_t __arm_vabsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s8)))
+int8x16_t __arm_vabsq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s16)))
+int16x8_t __arm_vabsq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s16)))
+int16x8_t __arm_vabsq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s32)))
+int32x4_t __arm_vabsq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s32)))
+int32x4_t __arm_vabsq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s8)))
+int8x16_t __arm_vabsq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s8)))
+int8x16_t __arm_vabsq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s16)))
+int16x8_t __arm_vabsq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s16)))
+int16x8_t __arm_vabsq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s32)))
+int32x4_t __arm_vabsq_x_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s32)))
+int32x4_t __arm_vabsq_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s8)))
+int8x16_t __arm_vabsq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s8)))
+int8x16_t __arm_vabsq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_s32)))
+int32x4_t __arm_vadciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_s32)))
+int32x4_t __arm_vadciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_u32)))
+uint32x4_t __arm_vadciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_u32)))
+uint32x4_t __arm_vadciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_s32)))
+int32x4_t __arm_vadciq_s32(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_s32)))
+int32x4_t __arm_vadciq(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_u32)))
+uint32x4_t __arm_vadciq_u32(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_u32)))
+uint32x4_t __arm_vadciq(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_s32)))
+int32x4_t __arm_vadcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_s32)))
+int32x4_t __arm_vadcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_u32)))
+uint32x4_t __arm_vadcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_u32)))
+uint32x4_t __arm_vadcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_s32)))
+int32x4_t __arm_vadcq_s32(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_s32)))
+int32x4_t __arm_vadcq(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_u32)))
+uint32x4_t __arm_vadcq_u32(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_u32)))
+uint32x4_t __arm_vadcq(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_s32)))
+int64_t __arm_vaddlvaq_p_s32(int64_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_s32)))
+int64_t __arm_vaddlvaq_p(int64_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_u32)))
+uint64_t __arm_vaddlvaq_p_u32(uint64_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_u32)))
+uint64_t __arm_vaddlvaq_p(uint64_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_s32)))
+int64_t __arm_vaddlvaq_s32(int64_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_s32)))
+int64_t __arm_vaddlvaq(int64_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_u32)))
+uint64_t __arm_vaddlvaq_u32(uint64_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_u32)))
+uint64_t __arm_vaddlvaq(uint64_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_s32)))
+int64_t __arm_vaddlvq_p_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_s32)))
+int64_t __arm_vaddlvq_p(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_u32)))
+uint64_t __arm_vaddlvq_p_u32(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_u32)))
+uint64_t __arm_vaddlvq_p(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_s32)))
+int64_t __arm_vaddlvq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_s32)))
+int64_t __arm_vaddlvq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_u32)))
+uint64_t __arm_vaddlvq_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_u32)))
+uint64_t __arm_vaddlvq(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s16)))
+int16x8_t __arm_vaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s16)))
+int16x8_t __arm_vaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s32)))
+int32x4_t __arm_vaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s32)))
+int32x4_t __arm_vaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s8)))
+int8x16_t __arm_vaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s8)))
+int8x16_t __arm_vaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u16)))
+uint16x8_t __arm_vaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u16)))
+uint16x8_t __arm_vaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u32)))
+uint32x4_t __arm_vaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u32)))
+uint32x4_t __arm_vaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u8)))
+uint8x16_t __arm_vaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u8)))
+uint8x16_t __arm_vaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s16)))
+int16x8_t __arm_vaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s16)))
+int16x8_t __arm_vaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s32)))
+int32x4_t __arm_vaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s32)))
+int32x4_t __arm_vaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s8)))
+int8x16_t __arm_vaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s8)))
+int8x16_t __arm_vaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u16)))
+uint16x8_t __arm_vaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u16)))
+uint16x8_t __arm_vaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u32)))
+uint32x4_t __arm_vaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u32)))
+uint32x4_t __arm_vaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u8)))
+uint8x16_t __arm_vaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u8)))
+uint8x16_t __arm_vaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s16)))
+int16x8_t __arm_vaddq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s16)))
+int16x8_t __arm_vaddq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s32)))
+int32x4_t __arm_vaddq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s32)))
+int32x4_t __arm_vaddq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s8)))
+int8x16_t __arm_vaddq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s8)))
+int8x16_t __arm_vaddq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u16)))
+uint16x8_t __arm_vaddq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u16)))
+uint16x8_t __arm_vaddq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u32)))
+uint32x4_t __arm_vaddq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u32)))
+uint32x4_t __arm_vaddq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u8)))
+uint8x16_t __arm_vaddq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u8)))
+uint8x16_t __arm_vaddq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s16)))
+int16x8_t __arm_vaddq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s16)))
+int16x8_t __arm_vaddq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s32)))
+int32x4_t __arm_vaddq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s32)))
+int32x4_t __arm_vaddq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s8)))
+int8x16_t __arm_vaddq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s8)))
+int8x16_t __arm_vaddq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u16)))
+uint16x8_t __arm_vaddq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u16)))
+uint16x8_t __arm_vaddq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u32)))
+uint32x4_t __arm_vaddq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u32)))
+uint32x4_t __arm_vaddq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u8)))
+uint8x16_t __arm_vaddq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u8)))
+uint8x16_t __arm_vaddq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s16)))
+int16x8_t __arm_vaddq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s16)))
+int16x8_t __arm_vaddq_x(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s32)))
+int32x4_t __arm_vaddq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s32)))
+int32x4_t __arm_vaddq_x(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s8)))
+int8x16_t __arm_vaddq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s8)))
+int8x16_t __arm_vaddq_x(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u16)))
+uint16x8_t __arm_vaddq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u16)))
+uint16x8_t __arm_vaddq_x(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u32)))
+uint32x4_t __arm_vaddq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u32)))
+uint32x4_t __arm_vaddq_x(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u8)))
+uint8x16_t __arm_vaddq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u8)))
+uint8x16_t __arm_vaddq_x(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s16)))
+int16x8_t __arm_vaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s16)))
+int16x8_t __arm_vaddq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s32)))
+int32x4_t __arm_vaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s32)))
+int32x4_t __arm_vaddq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s8)))
+int8x16_t __arm_vaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s8)))
+int8x16_t __arm_vaddq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u16)))
+uint16x8_t __arm_vaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u16)))
+uint16x8_t __arm_vaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u32)))
+uint32x4_t __arm_vaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u32)))
+uint32x4_t __arm_vaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u8)))
+uint8x16_t __arm_vaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u8)))
+uint8x16_t __arm_vaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s16)))
+int32_t __arm_vaddvaq_p_s16(int32_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s16)))
+int32_t __arm_vaddvaq_p(int32_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s32)))
+int32_t __arm_vaddvaq_p_s32(int32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s32)))
+int32_t __arm_vaddvaq_p(int32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s8)))
+int32_t __arm_vaddvaq_p_s8(int32_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s8)))
+int32_t __arm_vaddvaq_p(int32_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u16)))
+uint32_t __arm_vaddvaq_p_u16(uint32_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u16)))
+uint32_t __arm_vaddvaq_p(uint32_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u32)))
+uint32_t __arm_vaddvaq_p_u32(uint32_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u32)))
+uint32_t __arm_vaddvaq_p(uint32_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u8)))
+uint32_t __arm_vaddvaq_p_u8(uint32_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u8)))
+uint32_t __arm_vaddvaq_p(uint32_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s16)))
+int32_t __arm_vaddvaq_s16(int32_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s16)))
+int32_t __arm_vaddvaq(int32_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s32)))
+int32_t __arm_vaddvaq_s32(int32_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s32)))
+int32_t __arm_vaddvaq(int32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s8)))
+int32_t __arm_vaddvaq_s8(int32_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s8)))
+int32_t __arm_vaddvaq(int32_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u16)))
+uint32_t __arm_vaddvaq_u16(uint32_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u16)))
+uint32_t __arm_vaddvaq(uint32_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u32)))
+uint32_t __arm_vaddvaq_u32(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u32)))
+uint32_t __arm_vaddvaq(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u8)))
+uint32_t __arm_vaddvaq_u8(uint32_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u8)))
+uint32_t __arm_vaddvaq(uint32_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s16)))
+int32_t __arm_vaddvq_p_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s16)))
+int32_t __arm_vaddvq_p(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s32)))
+int32_t __arm_vaddvq_p_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s32)))
+int32_t __arm_vaddvq_p(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s8)))
+int32_t __arm_vaddvq_p_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s8)))
+int32_t __arm_vaddvq_p(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u16)))
+uint32_t __arm_vaddvq_p_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u16)))
+uint32_t __arm_vaddvq_p(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u32)))
+uint32_t __arm_vaddvq_p_u32(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u32)))
+uint32_t __arm_vaddvq_p(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u8)))
+uint32_t __arm_vaddvq_p_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u8)))
+uint32_t __arm_vaddvq_p(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s16)))
+int32_t __arm_vaddvq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s16)))
+int32_t __arm_vaddvq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s32)))
+int32_t __arm_vaddvq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s32)))
+int32_t __arm_vaddvq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s8)))
+int32_t __arm_vaddvq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s8)))
+int32_t __arm_vaddvq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u16)))
+uint32_t __arm_vaddvq_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u16)))
+uint32_t __arm_vaddvq(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u32)))
+uint32_t __arm_vaddvq_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u32)))
+uint32_t __arm_vaddvq(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u8)))
+uint32_t __arm_vaddvq_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u8)))
+uint32_t __arm_vaddvq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s16)))
+int16x8_t __arm_vandq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s16)))
+int16x8_t __arm_vandq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s32)))
+int32x4_t __arm_vandq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s32)))
+int32x4_t __arm_vandq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s8)))
+int8x16_t __arm_vandq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s8)))
+int8x16_t __arm_vandq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u16)))
+uint16x8_t __arm_vandq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u16)))
+uint16x8_t __arm_vandq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u32)))
+uint32x4_t __arm_vandq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u32)))
+uint32x4_t __arm_vandq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u8)))
+uint8x16_t __arm_vandq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u8)))
+uint8x16_t __arm_vandq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s16)))
+int16x8_t __arm_vandq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s16)))
+int16x8_t __arm_vandq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s32)))
+int32x4_t __arm_vandq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s32)))
+int32x4_t __arm_vandq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s8)))
+int8x16_t __arm_vandq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s8)))
+int8x16_t __arm_vandq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u16)))
+uint16x8_t __arm_vandq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u16)))
+uint16x8_t __arm_vandq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u32)))
+uint32x4_t __arm_vandq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u32)))
+uint32x4_t __arm_vandq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u8)))
+uint8x16_t __arm_vandq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u8)))
+uint8x16_t __arm_vandq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s16)))
+int16x8_t __arm_vandq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s16)))
+int16x8_t __arm_vandq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s32)))
+int32x4_t __arm_vandq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s32)))
+int32x4_t __arm_vandq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s8)))
+int8x16_t __arm_vandq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s8)))
+int8x16_t __arm_vandq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u16)))
+uint16x8_t __arm_vandq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u16)))
+uint16x8_t __arm_vandq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u32)))
+uint32x4_t __arm_vandq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u32)))
+uint32x4_t __arm_vandq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u8)))
+uint8x16_t __arm_vandq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u8)))
+uint8x16_t __arm_vandq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s16)))
+int16x8_t __arm_vbicq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s16)))
+int16x8_t __arm_vbicq_m_n(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s32)))
+int32x4_t __arm_vbicq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s32)))
+int32x4_t __arm_vbicq_m_n(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u16)))
+uint16x8_t __arm_vbicq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u16)))
+uint16x8_t __arm_vbicq_m_n(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u32)))
+uint32x4_t __arm_vbicq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u32)))
+uint32x4_t __arm_vbicq_m_n(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s16)))
+int16x8_t __arm_vbicq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s16)))
+int16x8_t __arm_vbicq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s32)))
+int32x4_t __arm_vbicq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s32)))
+int32x4_t __arm_vbicq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s8)))
+int8x16_t __arm_vbicq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s8)))
+int8x16_t __arm_vbicq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u16)))
+uint16x8_t __arm_vbicq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u16)))
+uint16x8_t __arm_vbicq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u32)))
+uint32x4_t __arm_vbicq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u32)))
+uint32x4_t __arm_vbicq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u8)))
+uint8x16_t __arm_vbicq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u8)))
+uint8x16_t __arm_vbicq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s16)))
+int16x8_t __arm_vbicq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s16)))
+int16x8_t __arm_vbicq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s32)))
+int32x4_t __arm_vbicq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s32)))
+int32x4_t __arm_vbicq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u16)))
+uint16x8_t __arm_vbicq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u16)))
+uint16x8_t __arm_vbicq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u32)))
+uint32x4_t __arm_vbicq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u32)))
+uint32x4_t __arm_vbicq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s16)))
+int16x8_t __arm_vbicq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s16)))
+int16x8_t __arm_vbicq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s32)))
+int32x4_t __arm_vbicq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s32)))
+int32x4_t __arm_vbicq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s8)))
+int8x16_t __arm_vbicq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s8)))
+int8x16_t __arm_vbicq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u16)))
+uint16x8_t __arm_vbicq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u16)))
+uint16x8_t __arm_vbicq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u32)))
+uint32x4_t __arm_vbicq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u32)))
+uint32x4_t __arm_vbicq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u8)))
+uint8x16_t __arm_vbicq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u8)))
+uint8x16_t __arm_vbicq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s16)))
+int16x8_t __arm_vbicq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s16)))
+int16x8_t __arm_vbicq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s32)))
+int32x4_t __arm_vbicq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s32)))
+int32x4_t __arm_vbicq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s8)))
+int8x16_t __arm_vbicq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s8)))
+int8x16_t __arm_vbicq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u16)))
+uint16x8_t __arm_vbicq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u16)))
+uint16x8_t __arm_vbicq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u32)))
+uint32x4_t __arm_vbicq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u32)))
+uint32x4_t __arm_vbicq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u8)))
+uint8x16_t __arm_vbicq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u8)))
+uint8x16_t __arm_vbicq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s16)))
+int16x8_t __arm_vbrsrq_m_n_s16(int16x8_t, int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s16)))
+int16x8_t __arm_vbrsrq_m(int16x8_t, int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s32)))
+int32x4_t __arm_vbrsrq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s32)))
+int32x4_t __arm_vbrsrq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s8)))
+int8x16_t __arm_vbrsrq_m_n_s8(int8x16_t, int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s8)))
+int8x16_t __arm_vbrsrq_m(int8x16_t, int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u16)))
+uint16x8_t __arm_vbrsrq_m_n_u16(uint16x8_t, uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u16)))
+uint16x8_t __arm_vbrsrq_m(uint16x8_t, uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u32)))
+uint32x4_t __arm_vbrsrq_m_n_u32(uint32x4_t, uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u32)))
+uint32x4_t __arm_vbrsrq_m(uint32x4_t, uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u8)))
+uint8x16_t __arm_vbrsrq_m_n_u8(uint8x16_t, uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u8)))
+uint8x16_t __arm_vbrsrq_m(uint8x16_t, uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s16)))
+int16x8_t __arm_vbrsrq_n_s16(int16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s16)))
+int16x8_t __arm_vbrsrq(int16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s32)))
+int32x4_t __arm_vbrsrq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s32)))
+int32x4_t __arm_vbrsrq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s8)))
+int8x16_t __arm_vbrsrq_n_s8(int8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s8)))
+int8x16_t __arm_vbrsrq(int8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u16)))
+uint16x8_t __arm_vbrsrq_n_u16(uint16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u16)))
+uint16x8_t __arm_vbrsrq(uint16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u32)))
+uint32x4_t __arm_vbrsrq_n_u32(uint32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u32)))
+uint32x4_t __arm_vbrsrq(uint32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u8)))
+uint8x16_t __arm_vbrsrq_n_u8(uint8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u8)))
+uint8x16_t __arm_vbrsrq(uint8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s16)))
+int16x8_t __arm_vbrsrq_x_n_s16(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s16)))
+int16x8_t __arm_vbrsrq_x(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s32)))
+int32x4_t __arm_vbrsrq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s32)))
+int32x4_t __arm_vbrsrq_x(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s8)))
+int8x16_t __arm_vbrsrq_x_n_s8(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s8)))
+int8x16_t __arm_vbrsrq_x(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u16)))
+uint16x8_t __arm_vbrsrq_x_n_u16(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u16)))
+uint16x8_t __arm_vbrsrq_x(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u32)))
+uint32x4_t __arm_vbrsrq_x_n_u32(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u32)))
+uint32x4_t __arm_vbrsrq_x(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u8)))
+uint8x16_t __arm_vbrsrq_x_n_u8(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u8)))
+uint8x16_t __arm_vbrsrq_x(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s16)))
+int16x8_t __arm_vcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s16)))
+int16x8_t __arm_vcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s32)))
+int32x4_t __arm_vcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s32)))
+int32x4_t __arm_vcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s8)))
+int8x16_t __arm_vcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s8)))
+int8x16_t __arm_vcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u16)))
+uint16x8_t __arm_vcaddq_rot270_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u16)))
+uint16x8_t __arm_vcaddq_rot270_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u32)))
+uint32x4_t __arm_vcaddq_rot270_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u32)))
+uint32x4_t __arm_vcaddq_rot270_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u8)))
+uint8x16_t __arm_vcaddq_rot270_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u8)))
+uint8x16_t __arm_vcaddq_rot270_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s16)))
+int16x8_t __arm_vcaddq_rot270_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s16)))
+int16x8_t __arm_vcaddq_rot270(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s32)))
+int32x4_t __arm_vcaddq_rot270_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s32)))
+int32x4_t __arm_vcaddq_rot270(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s8)))
+int8x16_t __arm_vcaddq_rot270_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s8)))
+int8x16_t __arm_vcaddq_rot270(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u16)))
+uint16x8_t __arm_vcaddq_rot270_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u16)))
+uint16x8_t __arm_vcaddq_rot270(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u32)))
+uint32x4_t __arm_vcaddq_rot270_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u32)))
+uint32x4_t __arm_vcaddq_rot270(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u8)))
+uint8x16_t __arm_vcaddq_rot270_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u8)))
+uint8x16_t __arm_vcaddq_rot270(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s16)))
+int16x8_t __arm_vcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s16)))
+int16x8_t __arm_vcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s32)))
+int32x4_t __arm_vcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s32)))
+int32x4_t __arm_vcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s8)))
+int8x16_t __arm_vcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s8)))
+int8x16_t __arm_vcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u16)))
+uint16x8_t __arm_vcaddq_rot270_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u16)))
+uint16x8_t __arm_vcaddq_rot270_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u32)))
+uint32x4_t __arm_vcaddq_rot270_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u32)))
+uint32x4_t __arm_vcaddq_rot270_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u8)))
+uint8x16_t __arm_vcaddq_rot270_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u8)))
+uint8x16_t __arm_vcaddq_rot270_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s16)))
+int16x8_t __arm_vcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s16)))
+int16x8_t __arm_vcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s32)))
+int32x4_t __arm_vcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s32)))
+int32x4_t __arm_vcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s8)))
+int8x16_t __arm_vcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s8)))
+int8x16_t __arm_vcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u16)))
+uint16x8_t __arm_vcaddq_rot90_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u16)))
+uint16x8_t __arm_vcaddq_rot90_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u32)))
+uint32x4_t __arm_vcaddq_rot90_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u32)))
+uint32x4_t __arm_vcaddq_rot90_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u8)))
+uint8x16_t __arm_vcaddq_rot90_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u8)))
+uint8x16_t __arm_vcaddq_rot90_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s16)))
+int16x8_t __arm_vcaddq_rot90_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s16)))
+int16x8_t __arm_vcaddq_rot90(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s32)))
+int32x4_t __arm_vcaddq_rot90_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s32)))
+int32x4_t __arm_vcaddq_rot90(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s8)))
+int8x16_t __arm_vcaddq_rot90_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s8)))
+int8x16_t __arm_vcaddq_rot90(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u16)))
+uint16x8_t __arm_vcaddq_rot90_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u16)))
+uint16x8_t __arm_vcaddq_rot90(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u32)))
+uint32x4_t __arm_vcaddq_rot90_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u32)))
+uint32x4_t __arm_vcaddq_rot90(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u8)))
+uint8x16_t __arm_vcaddq_rot90_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u8)))
+uint8x16_t __arm_vcaddq_rot90(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s16)))
+int16x8_t __arm_vcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s16)))
+int16x8_t __arm_vcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s32)))
+int32x4_t __arm_vcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s32)))
+int32x4_t __arm_vcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s8)))
+int8x16_t __arm_vcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s8)))
+int8x16_t __arm_vcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u16)))
+uint16x8_t __arm_vcaddq_rot90_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u16)))
+uint16x8_t __arm_vcaddq_rot90_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u32)))
+uint32x4_t __arm_vcaddq_rot90_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u32)))
+uint32x4_t __arm_vcaddq_rot90_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u8)))
+uint8x16_t __arm_vcaddq_rot90_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u8)))
+uint8x16_t __arm_vcaddq_rot90_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s16)))
+int16x8_t __arm_vclsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s16)))
+int16x8_t __arm_vclsq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s32)))
+int32x4_t __arm_vclsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s32)))
+int32x4_t __arm_vclsq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s8)))
+int8x16_t __arm_vclsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s8)))
+int8x16_t __arm_vclsq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s16)))
+int16x8_t __arm_vclsq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s16)))
+int16x8_t __arm_vclsq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s32)))
+int32x4_t __arm_vclsq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s32)))
+int32x4_t __arm_vclsq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s8)))
+int8x16_t __arm_vclsq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s8)))
+int8x16_t __arm_vclsq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s16)))
+int16x8_t __arm_vclsq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s16)))
+int16x8_t __arm_vclsq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s32)))
+int32x4_t __arm_vclsq_x_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s32)))
+int32x4_t __arm_vclsq_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s8)))
+int8x16_t __arm_vclsq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s8)))
+int8x16_t __arm_vclsq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s16)))
+int16x8_t __arm_vclzq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s16)))
+int16x8_t __arm_vclzq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s32)))
+int32x4_t __arm_vclzq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s32)))
+int32x4_t __arm_vclzq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s8)))
+int8x16_t __arm_vclzq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s8)))
+int8x16_t __arm_vclzq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u16)))
+uint16x8_t __arm_vclzq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u16)))
+uint16x8_t __arm_vclzq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u32)))
+uint32x4_t __arm_vclzq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u32)))
+uint32x4_t __arm_vclzq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u8)))
+uint8x16_t __arm_vclzq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u8)))
+uint8x16_t __arm_vclzq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s16)))
+int16x8_t __arm_vclzq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s16)))
+int16x8_t __arm_vclzq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s32)))
+int32x4_t __arm_vclzq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s32)))
+int32x4_t __arm_vclzq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s8)))
+int8x16_t __arm_vclzq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s8)))
+int8x16_t __arm_vclzq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u16)))
+uint16x8_t __arm_vclzq_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u16)))
+uint16x8_t __arm_vclzq(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u32)))
+uint32x4_t __arm_vclzq_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u32)))
+uint32x4_t __arm_vclzq(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u8)))
+uint8x16_t __arm_vclzq_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u8)))
+uint8x16_t __arm_vclzq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s16)))
+int16x8_t __arm_vclzq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s16)))
+int16x8_t __arm_vclzq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s32)))
+int32x4_t __arm_vclzq_x_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s32)))
+int32x4_t __arm_vclzq_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s8)))
+int8x16_t __arm_vclzq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s8)))
+int8x16_t __arm_vclzq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u16)))
+uint16x8_t __arm_vclzq_x_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u16)))
+uint16x8_t __arm_vclzq_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u32)))
+uint32x4_t __arm_vclzq_x_u32(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u32)))
+uint32x4_t __arm_vclzq_x(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u8)))
+uint8x16_t __arm_vclzq_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u8)))
+uint8x16_t __arm_vclzq_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))
+mve_pred16_t __arm_vcmpcsq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))
+mve_pred16_t __arm_vcmpcsq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))
+mve_pred16_t __arm_vcmpcsq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))
+mve_pred16_t __arm_vcmpcsq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))
+mve_pred16_t __arm_vcmpcsq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))
+mve_pred16_t __arm_vcmpcsq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u16)))
+mve_pred16_t __arm_vcmpcsq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u16)))
+mve_pred16_t __arm_vcmpcsq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u32)))
+mve_pred16_t __arm_vcmpcsq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u32)))
+mve_pred16_t __arm_vcmpcsq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u8)))
+mve_pred16_t __arm_vcmpcsq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u8)))
+mve_pred16_t __arm_vcmpcsq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u16)))
+mve_pred16_t __arm_vcmpcsq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u16)))
+mve_pred16_t __arm_vcmpcsq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u32)))
+mve_pred16_t __arm_vcmpcsq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u32)))
+mve_pred16_t __arm_vcmpcsq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u8)))
+mve_pred16_t __arm_vcmpcsq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u8)))
+mve_pred16_t __arm_vcmpcsq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u16)))
+mve_pred16_t __arm_vcmpcsq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u16)))
+mve_pred16_t __arm_vcmpcsq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u32)))
+mve_pred16_t __arm_vcmpcsq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u32)))
+mve_pred16_t __arm_vcmpcsq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u8)))
+mve_pred16_t __arm_vcmpcsq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u8)))
+mve_pred16_t __arm_vcmpcsq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))
+mve_pred16_t __arm_vcmpeqq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))
+mve_pred16_t __arm_vcmpeqq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))
+mve_pred16_t __arm_vcmpeqq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))
+mve_pred16_t __arm_vcmpeqq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))
+mve_pred16_t __arm_vcmpeqq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))
+mve_pred16_t __arm_vcmpeqq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))
+mve_pred16_t __arm_vcmpeqq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))
+mve_pred16_t __arm_vcmpeqq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))
+mve_pred16_t __arm_vcmpeqq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))
+mve_pred16_t __arm_vcmpeqq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))
+mve_pred16_t __arm_vcmpeqq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))
+mve_pred16_t __arm_vcmpeqq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s16)))
+mve_pred16_t __arm_vcmpeqq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s16)))
+mve_pred16_t __arm_vcmpeqq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s32)))
+mve_pred16_t __arm_vcmpeqq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s32)))
+mve_pred16_t __arm_vcmpeqq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s8)))
+mve_pred16_t __arm_vcmpeqq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s8)))
+mve_pred16_t __arm_vcmpeqq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u16)))
+mve_pred16_t __arm_vcmpeqq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u16)))
+mve_pred16_t __arm_vcmpeqq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u32)))
+mve_pred16_t __arm_vcmpeqq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u32)))
+mve_pred16_t __arm_vcmpeqq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u8)))
+mve_pred16_t __arm_vcmpeqq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u8)))
+mve_pred16_t __arm_vcmpeqq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s16)))
+mve_pred16_t __arm_vcmpeqq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s16)))
+mve_pred16_t __arm_vcmpeqq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s32)))
+mve_pred16_t __arm_vcmpeqq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s32)))
+mve_pred16_t __arm_vcmpeqq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s8)))
+mve_pred16_t __arm_vcmpeqq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s8)))
+mve_pred16_t __arm_vcmpeqq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u16)))
+mve_pred16_t __arm_vcmpeqq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u16)))
+mve_pred16_t __arm_vcmpeqq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u32)))
+mve_pred16_t __arm_vcmpeqq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u32)))
+mve_pred16_t __arm_vcmpeqq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u8)))
+mve_pred16_t __arm_vcmpeqq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u8)))
+mve_pred16_t __arm_vcmpeqq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s16)))
+mve_pred16_t __arm_vcmpeqq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s16)))
+mve_pred16_t __arm_vcmpeqq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s32)))
+mve_pred16_t __arm_vcmpeqq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s32)))
+mve_pred16_t __arm_vcmpeqq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s8)))
+mve_pred16_t __arm_vcmpeqq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s8)))
+mve_pred16_t __arm_vcmpeqq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u16)))
+mve_pred16_t __arm_vcmpeqq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u16)))
+mve_pred16_t __arm_vcmpeqq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u32)))
+mve_pred16_t __arm_vcmpeqq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u32)))
+mve_pred16_t __arm_vcmpeqq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u8)))
+mve_pred16_t __arm_vcmpeqq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u8)))
+mve_pred16_t __arm_vcmpeqq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))
+mve_pred16_t __arm_vcmpgeq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))
+mve_pred16_t __arm_vcmpgeq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))
+mve_pred16_t __arm_vcmpgeq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))
+mve_pred16_t __arm_vcmpgeq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))
+mve_pred16_t __arm_vcmpgeq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))
+mve_pred16_t __arm_vcmpgeq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s16)))
+mve_pred16_t __arm_vcmpgeq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s16)))
+mve_pred16_t __arm_vcmpgeq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s32)))
+mve_pred16_t __arm_vcmpgeq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s32)))
+mve_pred16_t __arm_vcmpgeq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s8)))
+mve_pred16_t __arm_vcmpgeq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s8)))
+mve_pred16_t __arm_vcmpgeq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s16)))
+mve_pred16_t __arm_vcmpgeq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s16)))
+mve_pred16_t __arm_vcmpgeq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s32)))
+mve_pred16_t __arm_vcmpgeq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s32)))
+mve_pred16_t __arm_vcmpgeq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s8)))
+mve_pred16_t __arm_vcmpgeq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s8)))
+mve_pred16_t __arm_vcmpgeq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s16)))
+mve_pred16_t __arm_vcmpgeq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s16)))
+mve_pred16_t __arm_vcmpgeq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s32)))
+mve_pred16_t __arm_vcmpgeq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s32)))
+mve_pred16_t __arm_vcmpgeq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s8)))
+mve_pred16_t __arm_vcmpgeq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s8)))
+mve_pred16_t __arm_vcmpgeq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))
+mve_pred16_t __arm_vcmpgtq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))
+mve_pred16_t __arm_vcmpgtq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))
+mve_pred16_t __arm_vcmpgtq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))
+mve_pred16_t __arm_vcmpgtq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))
+mve_pred16_t __arm_vcmpgtq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))
+mve_pred16_t __arm_vcmpgtq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s16)))
+mve_pred16_t __arm_vcmpgtq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s16)))
+mve_pred16_t __arm_vcmpgtq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s32)))
+mve_pred16_t __arm_vcmpgtq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s32)))
+mve_pred16_t __arm_vcmpgtq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s8)))
+mve_pred16_t __arm_vcmpgtq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s8)))
+mve_pred16_t __arm_vcmpgtq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s16)))
+mve_pred16_t __arm_vcmpgtq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s16)))
+mve_pred16_t __arm_vcmpgtq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s32)))
+mve_pred16_t __arm_vcmpgtq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s32)))
+mve_pred16_t __arm_vcmpgtq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s8)))
+mve_pred16_t __arm_vcmpgtq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s8)))
+mve_pred16_t __arm_vcmpgtq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s16)))
+mve_pred16_t __arm_vcmpgtq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s16)))
+mve_pred16_t __arm_vcmpgtq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s32)))
+mve_pred16_t __arm_vcmpgtq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s32)))
+mve_pred16_t __arm_vcmpgtq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s8)))
+mve_pred16_t __arm_vcmpgtq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s8)))
+mve_pred16_t __arm_vcmpgtq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))
+mve_pred16_t __arm_vcmphiq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))
+mve_pred16_t __arm_vcmphiq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))
+mve_pred16_t __arm_vcmphiq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))
+mve_pred16_t __arm_vcmphiq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))
+mve_pred16_t __arm_vcmphiq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))
+mve_pred16_t __arm_vcmphiq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u16)))
+mve_pred16_t __arm_vcmphiq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u16)))
+mve_pred16_t __arm_vcmphiq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u32)))
+mve_pred16_t __arm_vcmphiq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u32)))
+mve_pred16_t __arm_vcmphiq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u8)))
+mve_pred16_t __arm_vcmphiq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u8)))
+mve_pred16_t __arm_vcmphiq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u16)))
+mve_pred16_t __arm_vcmphiq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u16)))
+mve_pred16_t __arm_vcmphiq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u32)))
+mve_pred16_t __arm_vcmphiq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u32)))
+mve_pred16_t __arm_vcmphiq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u8)))
+mve_pred16_t __arm_vcmphiq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u8)))
+mve_pred16_t __arm_vcmphiq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u16)))
+mve_pred16_t __arm_vcmphiq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u16)))
+mve_pred16_t __arm_vcmphiq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u32)))
+mve_pred16_t __arm_vcmphiq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u32)))
+mve_pred16_t __arm_vcmphiq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u8)))
+mve_pred16_t __arm_vcmphiq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u8)))
+mve_pred16_t __arm_vcmphiq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))
+mve_pred16_t __arm_vcmpleq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))
+mve_pred16_t __arm_vcmpleq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))
+mve_pred16_t __arm_vcmpleq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))
+mve_pred16_t __arm_vcmpleq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))
+mve_pred16_t __arm_vcmpleq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))
+mve_pred16_t __arm_vcmpleq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s16)))
+mve_pred16_t __arm_vcmpleq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s16)))
+mve_pred16_t __arm_vcmpleq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s32)))
+mve_pred16_t __arm_vcmpleq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s32)))
+mve_pred16_t __arm_vcmpleq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s8)))
+mve_pred16_t __arm_vcmpleq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s8)))
+mve_pred16_t __arm_vcmpleq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s16)))
+mve_pred16_t __arm_vcmpleq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s16)))
+mve_pred16_t __arm_vcmpleq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s32)))
+mve_pred16_t __arm_vcmpleq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s32)))
+mve_pred16_t __arm_vcmpleq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s8)))
+mve_pred16_t __arm_vcmpleq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s8)))
+mve_pred16_t __arm_vcmpleq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s16)))
+mve_pred16_t __arm_vcmpleq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s16)))
+mve_pred16_t __arm_vcmpleq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s32)))
+mve_pred16_t __arm_vcmpleq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s32)))
+mve_pred16_t __arm_vcmpleq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s8)))
+mve_pred16_t __arm_vcmpleq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s8)))
+mve_pred16_t __arm_vcmpleq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))
+mve_pred16_t __arm_vcmpltq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))
+mve_pred16_t __arm_vcmpltq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))
+mve_pred16_t __arm_vcmpltq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))
+mve_pred16_t __arm_vcmpltq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))
+mve_pred16_t __arm_vcmpltq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))
+mve_pred16_t __arm_vcmpltq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s16)))
+mve_pred16_t __arm_vcmpltq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s16)))
+mve_pred16_t __arm_vcmpltq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s32)))
+mve_pred16_t __arm_vcmpltq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s32)))
+mve_pred16_t __arm_vcmpltq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s8)))
+mve_pred16_t __arm_vcmpltq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s8)))
+mve_pred16_t __arm_vcmpltq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s16)))
+mve_pred16_t __arm_vcmpltq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s16)))
+mve_pred16_t __arm_vcmpltq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s32)))
+mve_pred16_t __arm_vcmpltq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s32)))
+mve_pred16_t __arm_vcmpltq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s8)))
+mve_pred16_t __arm_vcmpltq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s8)))
+mve_pred16_t __arm_vcmpltq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s16)))
+mve_pred16_t __arm_vcmpltq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s16)))
+mve_pred16_t __arm_vcmpltq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s32)))
+mve_pred16_t __arm_vcmpltq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s32)))
+mve_pred16_t __arm_vcmpltq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s8)))
+mve_pred16_t __arm_vcmpltq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s8)))
+mve_pred16_t __arm_vcmpltq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))
+mve_pred16_t __arm_vcmpneq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))
+mve_pred16_t __arm_vcmpneq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))
+mve_pred16_t __arm_vcmpneq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))
+mve_pred16_t __arm_vcmpneq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))
+mve_pred16_t __arm_vcmpneq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))
+mve_pred16_t __arm_vcmpneq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))
+mve_pred16_t __arm_vcmpneq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))
+mve_pred16_t __arm_vcmpneq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))
+mve_pred16_t __arm_vcmpneq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))
+mve_pred16_t __arm_vcmpneq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))
+mve_pred16_t __arm_vcmpneq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))
+mve_pred16_t __arm_vcmpneq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s16)))
+mve_pred16_t __arm_vcmpneq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s16)))
+mve_pred16_t __arm_vcmpneq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s32)))
+mve_pred16_t __arm_vcmpneq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s32)))
+mve_pred16_t __arm_vcmpneq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s8)))
+mve_pred16_t __arm_vcmpneq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s8)))
+mve_pred16_t __arm_vcmpneq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u16)))
+mve_pred16_t __arm_vcmpneq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u16)))
+mve_pred16_t __arm_vcmpneq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u32)))
+mve_pred16_t __arm_vcmpneq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u32)))
+mve_pred16_t __arm_vcmpneq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u8)))
+mve_pred16_t __arm_vcmpneq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u8)))
+mve_pred16_t __arm_vcmpneq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s16)))
+mve_pred16_t __arm_vcmpneq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s16)))
+mve_pred16_t __arm_vcmpneq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s32)))
+mve_pred16_t __arm_vcmpneq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s32)))
+mve_pred16_t __arm_vcmpneq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s8)))
+mve_pred16_t __arm_vcmpneq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s8)))
+mve_pred16_t __arm_vcmpneq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u16)))
+mve_pred16_t __arm_vcmpneq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u16)))
+mve_pred16_t __arm_vcmpneq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u32)))
+mve_pred16_t __arm_vcmpneq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u32)))
+mve_pred16_t __arm_vcmpneq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u8)))
+mve_pred16_t __arm_vcmpneq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u8)))
+mve_pred16_t __arm_vcmpneq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s16)))
+mve_pred16_t __arm_vcmpneq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s16)))
+mve_pred16_t __arm_vcmpneq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s32)))
+mve_pred16_t __arm_vcmpneq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s32)))
+mve_pred16_t __arm_vcmpneq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s8)))
+mve_pred16_t __arm_vcmpneq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s8)))
+mve_pred16_t __arm_vcmpneq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u16)))
+mve_pred16_t __arm_vcmpneq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u16)))
+mve_pred16_t __arm_vcmpneq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u32)))
+mve_pred16_t __arm_vcmpneq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u32)))
+mve_pred16_t __arm_vcmpneq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u8)))
+mve_pred16_t __arm_vcmpneq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u8)))
+mve_pred16_t __arm_vcmpneq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s16)))
+int16x8_t __arm_vcreateq_s16(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s32)))
+int32x4_t __arm_vcreateq_s32(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s64)))
+int64x2_t __arm_vcreateq_s64(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s8)))
+int8x16_t __arm_vcreateq_s8(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u16)))
+uint16x8_t __arm_vcreateq_u16(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u32)))
+uint32x4_t __arm_vcreateq_u32(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u64)))
+uint64x2_t __arm_vcreateq_u64(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u8)))
+uint8x16_t __arm_vcreateq_u8(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp16q)))
+mve_pred16_t __arm_vctp16q(uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp16q_m)))
+mve_pred16_t __arm_vctp16q_m(uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp32q)))
+mve_pred16_t __arm_vctp32q(uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp32q_m)))
+mve_pred16_t __arm_vctp32q_m(uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp64q)))
+mve_pred16_t __arm_vctp64q(uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp64q_m)))
+mve_pred16_t __arm_vctp64q_m(uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp8q)))
+mve_pred16_t __arm_vctp8q(uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp8q_m)))
+mve_pred16_t __arm_vctp8q_m(uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u16)))
+uint16x8_t __arm_vddupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u16)))
+uint16x8_t __arm_vddupq_m(uint16x8_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u32)))
+uint32x4_t __arm_vddupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u32)))
+uint32x4_t __arm_vddupq_m(uint32x4_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u8)))
+uint8x16_t __arm_vddupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u8)))
+uint8x16_t __arm_vddupq_m(uint8x16_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u16)))
+uint16x8_t __arm_vddupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u16)))
+uint16x8_t __arm_vddupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u32)))
+uint32x4_t __arm_vddupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u32)))
+uint32x4_t __arm_vddupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u8)))
+uint8x16_t __arm_vddupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u8)))
+uint8x16_t __arm_vddupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u16)))
+uint16x8_t __arm_vddupq_n_u16(uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u16)))
+uint16x8_t __arm_vddupq_u16(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u32)))
+uint32x4_t __arm_vddupq_n_u32(uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u32)))
+uint32x4_t __arm_vddupq_u32(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u8)))
+uint8x16_t __arm_vddupq_n_u8(uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u8)))
+uint8x16_t __arm_vddupq_u8(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u16)))
+uint16x8_t __arm_vddupq_wb_u16(uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u16)))
+uint16x8_t __arm_vddupq_u16(uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u32)))
+uint32x4_t __arm_vddupq_wb_u32(uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u32)))
+uint32x4_t __arm_vddupq_u32(uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u8)))
+uint8x16_t __arm_vddupq_wb_u8(uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u8)))
+uint8x16_t __arm_vddupq_u8(uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u16)))
+uint16x8_t __arm_vddupq_x_n_u16(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u16)))
+uint16x8_t __arm_vddupq_x_u16(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u32)))
+uint32x4_t __arm_vddupq_x_n_u32(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u32)))
+uint32x4_t __arm_vddupq_x_u32(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u8)))
+uint8x16_t __arm_vddupq_x_n_u8(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u8)))
+uint8x16_t __arm_vddupq_x_u8(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u16)))
+uint16x8_t __arm_vddupq_x_wb_u16(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u16)))
+uint16x8_t __arm_vddupq_x_u16(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u32)))
+uint32x4_t __arm_vddupq_x_wb_u32(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u32)))
+uint32x4_t __arm_vddupq_x_u32(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u8)))
+uint8x16_t __arm_vddupq_x_wb_u8(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u8)))
+uint8x16_t __arm_vddupq_x_u8(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s16)))
+int16x8_t __arm_vdupq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s16)))
+int16x8_t __arm_vdupq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s32)))
+int32x4_t __arm_vdupq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s32)))
+int32x4_t __arm_vdupq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s8)))
+int8x16_t __arm_vdupq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s8)))
+int8x16_t __arm_vdupq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u16)))
+uint16x8_t __arm_vdupq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u16)))
+uint16x8_t __arm_vdupq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u32)))
+uint32x4_t __arm_vdupq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u32)))
+uint32x4_t __arm_vdupq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u8)))
+uint8x16_t __arm_vdupq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u8)))
+uint8x16_t __arm_vdupq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s16)))
+int16x8_t __arm_vdupq_n_s16(int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s32)))
+int32x4_t __arm_vdupq_n_s32(int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s8)))
+int8x16_t __arm_vdupq_n_s8(int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u16)))
+uint16x8_t __arm_vdupq_n_u16(uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u32)))
+uint32x4_t __arm_vdupq_n_u32(uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u8)))
+uint8x16_t __arm_vdupq_n_u8(uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s16)))
+int16x8_t __arm_vdupq_x_n_s16(int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s32)))
+int32x4_t __arm_vdupq_x_n_s32(int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s8)))
+int8x16_t __arm_vdupq_x_n_s8(int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u16)))
+uint16x8_t __arm_vdupq_x_n_u16(uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u32)))
+uint32x4_t __arm_vdupq_x_n_u32(uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u8)))
+uint8x16_t __arm_vdupq_x_n_u8(uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u16)))
+uint16x8_t __arm_vdwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u16)))
+uint16x8_t __arm_vdwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u32)))
+uint32x4_t __arm_vdwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u32)))
+uint32x4_t __arm_vdwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u8)))
+uint8x16_t __arm_vdwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u8)))
+uint8x16_t __arm_vdwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u16)))
+uint16x8_t __arm_vdwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u16)))
+uint16x8_t __arm_vdwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u32)))
+uint32x4_t __arm_vdwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u32)))
+uint32x4_t __arm_vdwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u8)))
+uint8x16_t __arm_vdwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u8)))
+uint8x16_t __arm_vdwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u16)))
+uint16x8_t __arm_vdwdupq_n_u16(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u16)))
+uint16x8_t __arm_vdwdupq_u16(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u32)))
+uint32x4_t __arm_vdwdupq_n_u32(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u32)))
+uint32x4_t __arm_vdwdupq_u32(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u8)))
+uint8x16_t __arm_vdwdupq_n_u8(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u8)))
+uint8x16_t __arm_vdwdupq_u8(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u16)))
+uint16x8_t __arm_vdwdupq_wb_u16(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u16)))
+uint16x8_t __arm_vdwdupq_u16(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u32)))
+uint32x4_t __arm_vdwdupq_wb_u32(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u32)))
+uint32x4_t __arm_vdwdupq_u32(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u8)))
+uint8x16_t __arm_vdwdupq_wb_u8(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u8)))
+uint8x16_t __arm_vdwdupq_u8(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u16)))
+uint16x8_t __arm_vdwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u16)))
+uint16x8_t __arm_vdwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u32)))
+uint32x4_t __arm_vdwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u32)))
+uint32x4_t __arm_vdwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u8)))
+uint8x16_t __arm_vdwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u8)))
+uint8x16_t __arm_vdwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u16)))
+uint16x8_t __arm_vdwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u16)))
+uint16x8_t __arm_vdwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u32)))
+uint32x4_t __arm_vdwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u32)))
+uint32x4_t __arm_vdwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u8)))
+uint8x16_t __arm_vdwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u8)))
+uint8x16_t __arm_vdwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s16)))
+int16x8_t __arm_veorq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s16)))
+int16x8_t __arm_veorq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s32)))
+int32x4_t __arm_veorq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s32)))
+int32x4_t __arm_veorq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s8)))
+int8x16_t __arm_veorq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s8)))
+int8x16_t __arm_veorq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u16)))
+uint16x8_t __arm_veorq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u16)))
+uint16x8_t __arm_veorq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u32)))
+uint32x4_t __arm_veorq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u32)))
+uint32x4_t __arm_veorq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u8)))
+uint8x16_t __arm_veorq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u8)))
+uint8x16_t __arm_veorq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s16)))
+int16x8_t __arm_veorq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s16)))
+int16x8_t __arm_veorq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s32)))
+int32x4_t __arm_veorq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s32)))
+int32x4_t __arm_veorq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s8)))
+int8x16_t __arm_veorq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s8)))
+int8x16_t __arm_veorq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u16)))
+uint16x8_t __arm_veorq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u16)))
+uint16x8_t __arm_veorq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u32)))
+uint32x4_t __arm_veorq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u32)))
+uint32x4_t __arm_veorq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u8)))
+uint8x16_t __arm_veorq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u8)))
+uint8x16_t __arm_veorq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s16)))
+int16x8_t __arm_veorq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s16)))
+int16x8_t __arm_veorq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s32)))
+int32x4_t __arm_veorq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s32)))
+int32x4_t __arm_veorq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s8)))
+int8x16_t __arm_veorq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s8)))
+int8x16_t __arm_veorq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u16)))
+uint16x8_t __arm_veorq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u16)))
+uint16x8_t __arm_veorq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u32)))
+uint32x4_t __arm_veorq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u32)))
+uint32x4_t __arm_veorq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u8)))
+uint8x16_t __arm_veorq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u8)))
+uint8x16_t __arm_veorq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s16)))
+int16_t __arm_vgetq_lane_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s16)))
+int16_t __arm_vgetq_lane(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s32)))
+int32_t __arm_vgetq_lane_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s32)))
+int32_t __arm_vgetq_lane(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s64)))
+int64_t __arm_vgetq_lane_s64(int64x2_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s64)))
+int64_t __arm_vgetq_lane(int64x2_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s8)))
+int8_t __arm_vgetq_lane_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s8)))
+int8_t __arm_vgetq_lane(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u16)))
+uint16_t __arm_vgetq_lane_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u16)))
+uint16_t __arm_vgetq_lane(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u32)))
+uint32_t __arm_vgetq_lane_u32(uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u32)))
+uint32_t __arm_vgetq_lane(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u64)))
+uint64_t __arm_vgetq_lane_u64(uint64x2_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u64)))
+uint64_t __arm_vgetq_lane(uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u8)))
+uint8_t __arm_vgetq_lane_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u8)))
+uint8_t __arm_vgetq_lane(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s16)))
+int16x8_t __arm_vhaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s16)))
+int16x8_t __arm_vhaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s32)))
+int32x4_t __arm_vhaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s32)))
+int32x4_t __arm_vhaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s8)))
+int8x16_t __arm_vhaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s8)))
+int8x16_t __arm_vhaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u16)))
+uint16x8_t __arm_vhaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u16)))
+uint16x8_t __arm_vhaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u32)))
+uint32x4_t __arm_vhaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u32)))
+uint32x4_t __arm_vhaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u8)))
+uint8x16_t __arm_vhaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u8)))
+uint8x16_t __arm_vhaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s16)))
+int16x8_t __arm_vhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s16)))
+int16x8_t __arm_vhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s32)))
+int32x4_t __arm_vhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s32)))
+int32x4_t __arm_vhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s8)))
+int8x16_t __arm_vhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s8)))
+int8x16_t __arm_vhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u16)))
+uint16x8_t __arm_vhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u16)))
+uint16x8_t __arm_vhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u32)))
+uint32x4_t __arm_vhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u32)))
+uint32x4_t __arm_vhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u8)))
+uint8x16_t __arm_vhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u8)))
+uint8x16_t __arm_vhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s16)))
+int16x8_t __arm_vhaddq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s16)))
+int16x8_t __arm_vhaddq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s32)))
+int32x4_t __arm_vhaddq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s32)))
+int32x4_t __arm_vhaddq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s8)))
+int8x16_t __arm_vhaddq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s8)))
+int8x16_t __arm_vhaddq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u16)))
+uint16x8_t __arm_vhaddq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u16)))
+uint16x8_t __arm_vhaddq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u32)))
+uint32x4_t __arm_vhaddq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u32)))
+uint32x4_t __arm_vhaddq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u8)))
+uint8x16_t __arm_vhaddq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u8)))
+uint8x16_t __arm_vhaddq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s16)))
+int16x8_t __arm_vhaddq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s16)))
+int16x8_t __arm_vhaddq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s32)))
+int32x4_t __arm_vhaddq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s32)))
+int32x4_t __arm_vhaddq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s8)))
+int8x16_t __arm_vhaddq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s8)))
+int8x16_t __arm_vhaddq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u16)))
+uint16x8_t __arm_vhaddq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u16)))
+uint16x8_t __arm_vhaddq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u32)))
+uint32x4_t __arm_vhaddq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u32)))
+uint32x4_t __arm_vhaddq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u8)))
+uint8x16_t __arm_vhaddq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u8)))
+uint8x16_t __arm_vhaddq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s16)))
+int16x8_t __arm_vhaddq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s16)))
+int16x8_t __arm_vhaddq_x(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s32)))
+int32x4_t __arm_vhaddq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s32)))
+int32x4_t __arm_vhaddq_x(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s8)))
+int8x16_t __arm_vhaddq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s8)))
+int8x16_t __arm_vhaddq_x(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u16)))
+uint16x8_t __arm_vhaddq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u16)))
+uint16x8_t __arm_vhaddq_x(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u32)))
+uint32x4_t __arm_vhaddq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u32)))
+uint32x4_t __arm_vhaddq_x(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u8)))
+uint8x16_t __arm_vhaddq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u8)))
+uint8x16_t __arm_vhaddq_x(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s16)))
+int16x8_t __arm_vhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s16)))
+int16x8_t __arm_vhaddq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s32)))
+int32x4_t __arm_vhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s32)))
+int32x4_t __arm_vhaddq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s8)))
+int8x16_t __arm_vhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s8)))
+int8x16_t __arm_vhaddq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u16)))
+uint16x8_t __arm_vhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u16)))
+uint16x8_t __arm_vhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u32)))
+uint32x4_t __arm_vhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u32)))
+uint32x4_t __arm_vhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u8)))
+uint8x16_t __arm_vhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u8)))
+uint8x16_t __arm_vhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16)))
+int16x8_t __arm_vhcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16)))
+int16x8_t __arm_vhcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32)))
+int32x4_t __arm_vhcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32)))
+int32x4_t __arm_vhcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8)))
+int8x16_t __arm_vhcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8)))
+int8x16_t __arm_vhcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s16)))
+int16x8_t __arm_vhcaddq_rot270_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s16)))
+int16x8_t __arm_vhcaddq_rot270(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s32)))
+int32x4_t __arm_vhcaddq_rot270_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s32)))
+int32x4_t __arm_vhcaddq_rot270(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s8)))
+int8x16_t __arm_vhcaddq_rot270_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s8)))
+int8x16_t __arm_vhcaddq_rot270(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16)))
+int16x8_t __arm_vhcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16)))
+int16x8_t __arm_vhcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32)))
+int32x4_t __arm_vhcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32)))
+int32x4_t __arm_vhcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8)))
+int8x16_t __arm_vhcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8)))
+int8x16_t __arm_vhcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16)))
+int16x8_t __arm_vhcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16)))
+int16x8_t __arm_vhcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32)))
+int32x4_t __arm_vhcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32)))
+int32x4_t __arm_vhcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8)))
+int8x16_t __arm_vhcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8)))
+int8x16_t __arm_vhcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s16)))
+int16x8_t __arm_vhcaddq_rot90_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s16)))
+int16x8_t __arm_vhcaddq_rot90(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s32)))
+int32x4_t __arm_vhcaddq_rot90_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s32)))
+int32x4_t __arm_vhcaddq_rot90(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s8)))
+int8x16_t __arm_vhcaddq_rot90_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s8)))
+int8x16_t __arm_vhcaddq_rot90(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16)))
+int16x8_t __arm_vhcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16)))
+int16x8_t __arm_vhcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32)))
+int32x4_t __arm_vhcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32)))
+int32x4_t __arm_vhcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8)))
+int8x16_t __arm_vhcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8)))
+int8x16_t __arm_vhcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s16)))
+int16x8_t __arm_vhsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s16)))
+int16x8_t __arm_vhsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s32)))
+int32x4_t __arm_vhsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s32)))
+int32x4_t __arm_vhsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s8)))
+int8x16_t __arm_vhsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s8)))
+int8x16_t __arm_vhsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u16)))
+uint16x8_t __arm_vhsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u16)))
+uint16x8_t __arm_vhsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u32)))
+uint32x4_t __arm_vhsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u32)))
+uint32x4_t __arm_vhsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u8)))
+uint8x16_t __arm_vhsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u8)))
+uint8x16_t __arm_vhsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s16)))
+int16x8_t __arm_vhsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s16)))
+int16x8_t __arm_vhsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s32)))
+int32x4_t __arm_vhsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s32)))
+int32x4_t __arm_vhsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s8)))
+int8x16_t __arm_vhsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s8)))
+int8x16_t __arm_vhsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u16)))
+uint16x8_t __arm_vhsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u16)))
+uint16x8_t __arm_vhsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u32)))
+uint32x4_t __arm_vhsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u32)))
+uint32x4_t __arm_vhsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u8)))
+uint8x16_t __arm_vhsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u8)))
+uint8x16_t __arm_vhsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s16)))
+int16x8_t __arm_vhsubq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s16)))
+int16x8_t __arm_vhsubq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s32)))
+int32x4_t __arm_vhsubq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s32)))
+int32x4_t __arm_vhsubq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s8)))
+int8x16_t __arm_vhsubq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s8)))
+int8x16_t __arm_vhsubq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u16)))
+uint16x8_t __arm_vhsubq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u16)))
+uint16x8_t __arm_vhsubq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u32)))
+uint32x4_t __arm_vhsubq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u32)))
+uint32x4_t __arm_vhsubq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u8)))
+uint8x16_t __arm_vhsubq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u8)))
+uint8x16_t __arm_vhsubq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s16)))
+int16x8_t __arm_vhsubq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s16)))
+int16x8_t __arm_vhsubq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s32)))
+int32x4_t __arm_vhsubq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s32)))
+int32x4_t __arm_vhsubq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s8)))
+int8x16_t __arm_vhsubq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s8)))
+int8x16_t __arm_vhsubq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u16)))
+uint16x8_t __arm_vhsubq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u16)))
+uint16x8_t __arm_vhsubq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u32)))
+uint32x4_t __arm_vhsubq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u32)))
+uint32x4_t __arm_vhsubq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u8)))
+uint8x16_t __arm_vhsubq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u8)))
+uint8x16_t __arm_vhsubq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s16)))
+int16x8_t __arm_vhsubq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s16)))
+int16x8_t __arm_vhsubq_x(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s32)))
+int32x4_t __arm_vhsubq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s32)))
+int32x4_t __arm_vhsubq_x(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s8)))
+int8x16_t __arm_vhsubq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s8)))
+int8x16_t __arm_vhsubq_x(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u16)))
+uint16x8_t __arm_vhsubq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u16)))
+uint16x8_t __arm_vhsubq_x(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u32)))
+uint32x4_t __arm_vhsubq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u32)))
+uint32x4_t __arm_vhsubq_x(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u8)))
+uint8x16_t __arm_vhsubq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u8)))
+uint8x16_t __arm_vhsubq_x(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s16)))
+int16x8_t __arm_vhsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s16)))
+int16x8_t __arm_vhsubq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s32)))
+int32x4_t __arm_vhsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s32)))
+int32x4_t __arm_vhsubq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s8)))
+int8x16_t __arm_vhsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s8)))
+int8x16_t __arm_vhsubq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u16)))
+uint16x8_t __arm_vhsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u16)))
+uint16x8_t __arm_vhsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u32)))
+uint32x4_t __arm_vhsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u32)))
+uint32x4_t __arm_vhsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u8)))
+uint8x16_t __arm_vhsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u8)))
+uint8x16_t __arm_vhsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u16)))
+uint16x8_t __arm_vidupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u16)))
+uint16x8_t __arm_vidupq_m(uint16x8_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u32)))
+uint32x4_t __arm_vidupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u32)))
+uint32x4_t __arm_vidupq_m(uint32x4_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u8)))
+uint8x16_t __arm_vidupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u8)))
+uint8x16_t __arm_vidupq_m(uint8x16_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u16)))
+uint16x8_t __arm_vidupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u16)))
+uint16x8_t __arm_vidupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u32)))
+uint32x4_t __arm_vidupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u32)))
+uint32x4_t __arm_vidupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u8)))
+uint8x16_t __arm_vidupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u8)))
+uint8x16_t __arm_vidupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u16)))
+uint16x8_t __arm_vidupq_n_u16(uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u16)))
+uint16x8_t __arm_vidupq_u16(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u32)))
+uint32x4_t __arm_vidupq_n_u32(uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u32)))
+uint32x4_t __arm_vidupq_u32(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u8)))
+uint8x16_t __arm_vidupq_n_u8(uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u8)))
+uint8x16_t __arm_vidupq_u8(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u16)))
+uint16x8_t __arm_vidupq_wb_u16(uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u16)))
+uint16x8_t __arm_vidupq_u16(uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u32)))
+uint32x4_t __arm_vidupq_wb_u32(uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u32)))
+uint32x4_t __arm_vidupq_u32(uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u8)))
+uint8x16_t __arm_vidupq_wb_u8(uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u8)))
+uint8x16_t __arm_vidupq_u8(uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u16)))
+uint16x8_t __arm_vidupq_x_n_u16(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u16)))
+uint16x8_t __arm_vidupq_x_u16(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u32)))
+uint32x4_t __arm_vidupq_x_n_u32(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u32)))
+uint32x4_t __arm_vidupq_x_u32(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u8)))
+uint8x16_t __arm_vidupq_x_n_u8(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u8)))
+uint8x16_t __arm_vidupq_x_u8(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u16)))
+uint16x8_t __arm_vidupq_x_wb_u16(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u16)))
+uint16x8_t __arm_vidupq_x_u16(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u32)))
+uint32x4_t __arm_vidupq_x_wb_u32(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u32)))
+uint32x4_t __arm_vidupq_x_u32(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u8)))
+uint8x16_t __arm_vidupq_x_wb_u8(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u8)))
+uint8x16_t __arm_vidupq_x_u8(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u16)))
+uint16x8_t __arm_viwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u16)))
+uint16x8_t __arm_viwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u32)))
+uint32x4_t __arm_viwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u32)))
+uint32x4_t __arm_viwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u8)))
+uint8x16_t __arm_viwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u8)))
+uint8x16_t __arm_viwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u16)))
+uint16x8_t __arm_viwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u16)))
+uint16x8_t __arm_viwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u32)))
+uint32x4_t __arm_viwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u32)))
+uint32x4_t __arm_viwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u8)))
+uint8x16_t __arm_viwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u8)))
+uint8x16_t __arm_viwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u16)))
+uint16x8_t __arm_viwdupq_n_u16(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u16)))
+uint16x8_t __arm_viwdupq_u16(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u32)))
+uint32x4_t __arm_viwdupq_n_u32(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u32)))
+uint32x4_t __arm_viwdupq_u32(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u8)))
+uint8x16_t __arm_viwdupq_n_u8(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u8)))
+uint8x16_t __arm_viwdupq_u8(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u16)))
+uint16x8_t __arm_viwdupq_wb_u16(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u16)))
+uint16x8_t __arm_viwdupq_u16(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u32)))
+uint32x4_t __arm_viwdupq_wb_u32(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u32)))
+uint32x4_t __arm_viwdupq_u32(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u8)))
+uint8x16_t __arm_viwdupq_wb_u8(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u8)))
+uint8x16_t __arm_viwdupq_u8(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u16)))
+uint16x8_t __arm_viwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u16)))
+uint16x8_t __arm_viwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u32)))
+uint32x4_t __arm_viwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u32)))
+uint32x4_t __arm_viwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u8)))
+uint8x16_t __arm_viwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u8)))
+uint8x16_t __arm_viwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u16)))
+uint16x8_t __arm_viwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u16)))
+uint16x8_t __arm_viwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u32)))
+uint32x4_t __arm_viwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u32)))
+uint32x4_t __arm_viwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u8)))
+uint8x16_t __arm_viwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u8)))
+uint8x16_t __arm_viwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s16)))
+int16x8_t __arm_vld1q_s16(const int16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s16)))
+int16x8_t __arm_vld1q(const int16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s32)))
+int32x4_t __arm_vld1q_s32(const int32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s32)))
+int32x4_t __arm_vld1q(const int32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s8)))
+int8x16_t __arm_vld1q_s8(const int8_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s8)))
+int8x16_t __arm_vld1q(const int8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u16)))
+uint16x8_t __arm_vld1q_u16(const uint16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u16)))
+uint16x8_t __arm_vld1q(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u32)))
+uint32x4_t __arm_vld1q_u32(const uint32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u32)))
+uint32x4_t __arm_vld1q(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u8)))
+uint8x16_t __arm_vld1q_u8(const uint8_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u8)))
+uint8x16_t __arm_vld1q(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s16)))
+int16x8_t __arm_vld1q_z_s16(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s16)))
+int16x8_t __arm_vld1q_z(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s32)))
+int32x4_t __arm_vld1q_z_s32(const int32_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s32)))
+int32x4_t __arm_vld1q_z(const int32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s8)))
+int8x16_t __arm_vld1q_z_s8(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s8)))
+int8x16_t __arm_vld1q_z(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u16)))
+uint16x8_t __arm_vld1q_z_u16(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u16)))
+uint16x8_t __arm_vld1q_z(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u32)))
+uint32x4_t __arm_vld1q_z_u32(const uint32_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u32)))
+uint32x4_t __arm_vld1q_z(const uint32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u8)))
+uint8x16_t __arm_vld1q_z_u8(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u8)))
+uint8x16_t __arm_vld1q_z(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s16)))
+int16x8x2_t __arm_vld2q_s16(const int16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s16)))
+int16x8x2_t __arm_vld2q(const int16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s32)))
+int32x4x2_t __arm_vld2q_s32(const int32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s32)))
+int32x4x2_t __arm_vld2q(const int32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s8)))
+int8x16x2_t __arm_vld2q_s8(const int8_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s8)))
+int8x16x2_t __arm_vld2q(const int8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u16)))
+uint16x8x2_t __arm_vld2q_u16(const uint16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u16)))
+uint16x8x2_t __arm_vld2q(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u32)))
+uint32x4x2_t __arm_vld2q_u32(const uint32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u32)))
+uint32x4x2_t __arm_vld2q(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u8)))
+uint8x16x2_t __arm_vld2q_u8(const uint8_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u8)))
+uint8x16x2_t __arm_vld2q(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s16)))
+int16x8x4_t __arm_vld4q_s16(const int16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s16)))
+int16x8x4_t __arm_vld4q(const int16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s32)))
+int32x4x4_t __arm_vld4q_s32(const int32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s32)))
+int32x4x4_t __arm_vld4q(const int32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s8)))
+int8x16x4_t __arm_vld4q_s8(const int8_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s8)))
+int8x16x4_t __arm_vld4q(const int8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u16)))
+uint16x8x4_t __arm_vld4q_u16(const uint16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u16)))
+uint16x8x4_t __arm_vld4q(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u32)))
+uint32x4x4_t __arm_vld4q_u32(const uint32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u32)))
+uint32x4x4_t __arm_vld4q(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u8)))
+uint8x16x4_t __arm_vld4q_u8(const uint8_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u8)))
+uint8x16x4_t __arm_vld4q(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))
+int16x8_t __arm_vldrbq_gather_offset_s16(const int8_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))
+int16x8_t __arm_vldrbq_gather_offset(const int8_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))
+int32x4_t __arm_vldrbq_gather_offset_s32(const int8_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))
+int32x4_t __arm_vldrbq_gather_offset(const int8_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))
+int8x16_t __arm_vldrbq_gather_offset_s8(const int8_t *, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))
+int8x16_t __arm_vldrbq_gather_offset(const int8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))
+uint16x8_t __arm_vldrbq_gather_offset_u16(const uint8_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))
+uint16x8_t __arm_vldrbq_gather_offset(const uint8_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))
+uint32x4_t __arm_vldrbq_gather_offset_u32(const uint8_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))
+uint32x4_t __arm_vldrbq_gather_offset(const uint8_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))
+uint8x16_t __arm_vldrbq_gather_offset_u8(const uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))
+uint8x16_t __arm_vldrbq_gather_offset(const uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))
+int16x8_t __arm_vldrbq_gather_offset_z_s16(const int8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))
+int16x8_t __arm_vldrbq_gather_offset_z(const int8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))
+int32x4_t __arm_vldrbq_gather_offset_z_s32(const int8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))
+int32x4_t __arm_vldrbq_gather_offset_z(const int8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))
+int8x16_t __arm_vldrbq_gather_offset_z_s8(const int8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))
+int8x16_t __arm_vldrbq_gather_offset_z(const int8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))
+uint16x8_t __arm_vldrbq_gather_offset_z_u16(const uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))
+uint16x8_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))
+uint32x4_t __arm_vldrbq_gather_offset_z_u32(const uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))
+uint32x4_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))
+uint8x16_t __arm_vldrbq_gather_offset_z_u8(const uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))
+uint8x16_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s16)))
+int16x8_t __arm_vldrbq_s16(const int8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s32)))
+int32x4_t __arm_vldrbq_s32(const int8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s8)))
+int8x16_t __arm_vldrbq_s8(const int8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u16)))
+uint16x8_t __arm_vldrbq_u16(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u32)))
+uint32x4_t __arm_vldrbq_u32(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u8)))
+uint8x16_t __arm_vldrbq_u8(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s16)))
+int16x8_t __arm_vldrbq_z_s16(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s32)))
+int32x4_t __arm_vldrbq_z_s32(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s8)))
+int8x16_t __arm_vldrbq_z_s8(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u16)))
+uint16x8_t __arm_vldrbq_z_u16(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u32)))
+uint32x4_t __arm_vldrbq_z_u32(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u8)))
+uint8x16_t __arm_vldrbq_z_u8(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_s64)))
+int64x2_t __arm_vldrdq_gather_base_s64(uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_u64)))
+uint64x2_t __arm_vldrdq_gather_base_u64(uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_s64)))
+int64x2_t __arm_vldrdq_gather_base_wb_s64(uint64x2_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_u64)))
+uint64x2_t __arm_vldrdq_gather_base_wb_u64(uint64x2_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_s64)))
+int64x2_t __arm_vldrdq_gather_base_wb_z_s64(uint64x2_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_u64)))
+uint64x2_t __arm_vldrdq_gather_base_wb_z_u64(uint64x2_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_z_s64)))
+int64x2_t __arm_vldrdq_gather_base_z_s64(uint64x2_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_z_u64)))
+uint64x2_t __arm_vldrdq_gather_base_z_u64(uint64x2_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))
+int64x2_t __arm_vldrdq_gather_offset_s64(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))
+int64x2_t __arm_vldrdq_gather_offset(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))
+uint64x2_t __arm_vldrdq_gather_offset_u64(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))
+uint64x2_t __arm_vldrdq_gather_offset(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))
+int64x2_t __arm_vldrdq_gather_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))
+int64x2_t __arm_vldrdq_gather_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))
+uint64x2_t __arm_vldrdq_gather_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))
+uint64x2_t __arm_vldrdq_gather_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))
+int64x2_t __arm_vldrdq_gather_shifted_offset_s64(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))
+int64x2_t __arm_vldrdq_gather_shifted_offset(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))
+uint64x2_t __arm_vldrdq_gather_shifted_offset_u64(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))
+uint64x2_t __arm_vldrdq_gather_shifted_offset(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))
+int64x2_t __arm_vldrdq_gather_shifted_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))
+int64x2_t __arm_vldrdq_gather_shifted_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))
+uint64x2_t __arm_vldrdq_gather_shifted_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))
+uint64x2_t __arm_vldrdq_gather_shifted_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))
+int16x8_t __arm_vldrhq_gather_offset_s16(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))
+int16x8_t __arm_vldrhq_gather_offset(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))
+int32x4_t __arm_vldrhq_gather_offset_s32(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))
+int32x4_t __arm_vldrhq_gather_offset(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))
+uint16x8_t __arm_vldrhq_gather_offset_u16(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))
+uint16x8_t __arm_vldrhq_gather_offset(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))
+uint32x4_t __arm_vldrhq_gather_offset_u32(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))
+uint32x4_t __arm_vldrhq_gather_offset(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))
+int16x8_t __arm_vldrhq_gather_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))
+int16x8_t __arm_vldrhq_gather_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))
+int32x4_t __arm_vldrhq_gather_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))
+int32x4_t __arm_vldrhq_gather_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))
+uint16x8_t __arm_vldrhq_gather_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))
+uint16x8_t __arm_vldrhq_gather_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))
+uint32x4_t __arm_vldrhq_gather_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))
+uint32x4_t __arm_vldrhq_gather_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))
+int16x8_t __arm_vldrhq_gather_shifted_offset_s16(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))
+int16x8_t __arm_vldrhq_gather_shifted_offset(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))
+int32x4_t __arm_vldrhq_gather_shifted_offset_s32(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))
+int32x4_t __arm_vldrhq_gather_shifted_offset(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))
+uint16x8_t __arm_vldrhq_gather_shifted_offset_u16(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))
+uint16x8_t __arm_vldrhq_gather_shifted_offset(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))
+uint32x4_t __arm_vldrhq_gather_shifted_offset_u32(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))
+uint32x4_t __arm_vldrhq_gather_shifted_offset(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))
+int16x8_t __arm_vldrhq_gather_shifted_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))
+int16x8_t __arm_vldrhq_gather_shifted_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))
+int32x4_t __arm_vldrhq_gather_shifted_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))
+int32x4_t __arm_vldrhq_gather_shifted_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))
+uint16x8_t __arm_vldrhq_gather_shifted_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))
+uint16x8_t __arm_vldrhq_gather_shifted_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))
+uint32x4_t __arm_vldrhq_gather_shifted_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))
+uint32x4_t __arm_vldrhq_gather_shifted_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_s16)))
+int16x8_t __arm_vldrhq_s16(const int16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_s32)))
+int32x4_t __arm_vldrhq_s32(const int16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_u16)))
+uint16x8_t __arm_vldrhq_u16(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_u32)))
+uint32x4_t __arm_vldrhq_u32(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_s16)))
+int16x8_t __arm_vldrhq_z_s16(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_s32)))
+int32x4_t __arm_vldrhq_z_s32(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_u16)))
+uint16x8_t __arm_vldrhq_z_u16(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_u32)))
+uint32x4_t __arm_vldrhq_z_u32(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_s32)))
+int32x4_t __arm_vldrwq_gather_base_s32(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_u32)))
+uint32x4_t __arm_vldrwq_gather_base_u32(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_s32)))
+int32x4_t __arm_vldrwq_gather_base_wb_s32(uint32x4_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_u32)))
+uint32x4_t __arm_vldrwq_gather_base_wb_u32(uint32x4_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_s32)))
+int32x4_t __arm_vldrwq_gather_base_wb_z_s32(uint32x4_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_u32)))
+uint32x4_t __arm_vldrwq_gather_base_wb_z_u32(uint32x4_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_s32)))
+int32x4_t __arm_vldrwq_gather_base_z_s32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_u32)))
+uint32x4_t __arm_vldrwq_gather_base_z_u32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))
+int32x4_t __arm_vldrwq_gather_offset_s32(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))
+int32x4_t __arm_vldrwq_gather_offset(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))
+uint32x4_t __arm_vldrwq_gather_offset_u32(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))
+uint32x4_t __arm_vldrwq_gather_offset(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))
+int32x4_t __arm_vldrwq_gather_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))
+int32x4_t __arm_vldrwq_gather_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))
+uint32x4_t __arm_vldrwq_gather_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))
+uint32x4_t __arm_vldrwq_gather_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))
+int32x4_t __arm_vldrwq_gather_shifted_offset_s32(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))
+int32x4_t __arm_vldrwq_gather_shifted_offset(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))
+uint32x4_t __arm_vldrwq_gather_shifted_offset_u32(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))
+uint32x4_t __arm_vldrwq_gather_shifted_offset(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))
+int32x4_t __arm_vldrwq_gather_shifted_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))
+int32x4_t __arm_vldrwq_gather_shifted_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))
+uint32x4_t __arm_vldrwq_gather_shifted_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))
+uint32x4_t __arm_vldrwq_gather_shifted_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_s32)))
+int32x4_t __arm_vldrwq_s32(const int32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_u32)))
+uint32x4_t __arm_vldrwq_u32(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_s32)))
+int32x4_t __arm_vldrwq_z_s32(const int32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_u32)))
+uint32x4_t __arm_vldrwq_z_u32(const uint32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s16)))
+uint16x8_t __arm_vmaxaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s16)))
+uint16x8_t __arm_vmaxaq_m(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s32)))
+uint32x4_t __arm_vmaxaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s32)))
+uint32x4_t __arm_vmaxaq_m(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s8)))
+uint8x16_t __arm_vmaxaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s8)))
+uint8x16_t __arm_vmaxaq_m(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s16)))
+uint16x8_t __arm_vmaxaq_s16(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s16)))
+uint16x8_t __arm_vmaxaq(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s32)))
+uint32x4_t __arm_vmaxaq_s32(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s32)))
+uint32x4_t __arm_vmaxaq(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s8)))
+uint8x16_t __arm_vmaxaq_s8(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s8)))
+uint8x16_t __arm_vmaxaq(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s16)))
+uint16_t __arm_vmaxavq_p_s16(uint16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s16)))
+uint16_t __arm_vmaxavq_p(uint16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s32)))
+uint32_t __arm_vmaxavq_p_s32(uint32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s32)))
+uint32_t __arm_vmaxavq_p(uint32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s8)))
+uint8_t __arm_vmaxavq_p_s8(uint8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s8)))
+uint8_t __arm_vmaxavq_p(uint8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s16)))
+uint16_t __arm_vmaxavq_s16(uint16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s16)))
+uint16_t __arm_vmaxavq(uint16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s32)))
+uint32_t __arm_vmaxavq_s32(uint32_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s32)))
+uint32_t __arm_vmaxavq(uint32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s8)))
+uint8_t __arm_vmaxavq_s8(uint8_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s8)))
+uint8_t __arm_vmaxavq(uint8_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s16)))
+int16x8_t __arm_vmaxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s16)))
+int16x8_t __arm_vmaxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s32)))
+int32x4_t __arm_vmaxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s32)))
+int32x4_t __arm_vmaxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s8)))
+int8x16_t __arm_vmaxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s8)))
+int8x16_t __arm_vmaxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u16)))
+uint16x8_t __arm_vmaxq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u16)))
+uint16x8_t __arm_vmaxq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u32)))
+uint32x4_t __arm_vmaxq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u32)))
+uint32x4_t __arm_vmaxq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u8)))
+uint8x16_t __arm_vmaxq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u8)))
+uint8x16_t __arm_vmaxq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s16)))
+int16x8_t __arm_vmaxq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s16)))
+int16x8_t __arm_vmaxq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s32)))
+int32x4_t __arm_vmaxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s32)))
+int32x4_t __arm_vmaxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s8)))
+int8x16_t __arm_vmaxq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s8)))
+int8x16_t __arm_vmaxq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u16)))
+uint16x8_t __arm_vmaxq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u16)))
+uint16x8_t __arm_vmaxq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u32)))
+uint32x4_t __arm_vmaxq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u32)))
+uint32x4_t __arm_vmaxq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u8)))
+uint8x16_t __arm_vmaxq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u8)))
+uint8x16_t __arm_vmaxq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s16)))
+int16x8_t __arm_vmaxq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s16)))
+int16x8_t __arm_vmaxq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s32)))
+int32x4_t __arm_vmaxq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s32)))
+int32x4_t __arm_vmaxq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s8)))
+int8x16_t __arm_vmaxq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s8)))
+int8x16_t __arm_vmaxq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u16)))
+uint16x8_t __arm_vmaxq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u16)))
+uint16x8_t __arm_vmaxq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u32)))
+uint32x4_t __arm_vmaxq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u32)))
+uint32x4_t __arm_vmaxq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u8)))
+uint8x16_t __arm_vmaxq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u8)))
+uint8x16_t __arm_vmaxq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s16)))
+int16_t __arm_vmaxvq_p_s16(int16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s16)))
+int16_t __arm_vmaxvq_p(int16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s32)))
+int32_t __arm_vmaxvq_p_s32(int32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s32)))
+int32_t __arm_vmaxvq_p(int32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s8)))
+int8_t __arm_vmaxvq_p_s8(int8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s8)))
+int8_t __arm_vmaxvq_p(int8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u16)))
+uint16_t __arm_vmaxvq_p_u16(uint16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u16)))
+uint16_t __arm_vmaxvq_p(uint16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u32)))
+uint32_t __arm_vmaxvq_p_u32(uint32_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u32)))
+uint32_t __arm_vmaxvq_p(uint32_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u8)))
+uint8_t __arm_vmaxvq_p_u8(uint8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u8)))
+uint8_t __arm_vmaxvq_p(uint8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s16)))
+int16_t __arm_vmaxvq_s16(int16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s16)))
+int16_t __arm_vmaxvq(int16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s32)))
+int32_t __arm_vmaxvq_s32(int32_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s32)))
+int32_t __arm_vmaxvq(int32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s8)))
+int8_t __arm_vmaxvq_s8(int8_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s8)))
+int8_t __arm_vmaxvq(int8_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u16)))
+uint16_t __arm_vmaxvq_u16(uint16_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u16)))
+uint16_t __arm_vmaxvq(uint16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u32)))
+uint32_t __arm_vmaxvq_u32(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u32)))
+uint32_t __arm_vmaxvq(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u8)))
+uint8_t __arm_vmaxvq_u8(uint8_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u8)))
+uint8_t __arm_vmaxvq(uint8_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s16)))
+uint16x8_t __arm_vminaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s16)))
+uint16x8_t __arm_vminaq_m(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s32)))
+uint32x4_t __arm_vminaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s32)))
+uint32x4_t __arm_vminaq_m(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s8)))
+uint8x16_t __arm_vminaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s8)))
+uint8x16_t __arm_vminaq_m(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s16)))
+uint16x8_t __arm_vminaq_s16(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s16)))
+uint16x8_t __arm_vminaq(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s32)))
+uint32x4_t __arm_vminaq_s32(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s32)))
+uint32x4_t __arm_vminaq(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s8)))
+uint8x16_t __arm_vminaq_s8(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s8)))
+uint8x16_t __arm_vminaq(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s16)))
+uint16_t __arm_vminavq_p_s16(uint16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s16)))
+uint16_t __arm_vminavq_p(uint16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s32)))
+uint32_t __arm_vminavq_p_s32(uint32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s32)))
+uint32_t __arm_vminavq_p(uint32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s8)))
+uint8_t __arm_vminavq_p_s8(uint8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s8)))
+uint8_t __arm_vminavq_p(uint8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s16)))
+uint16_t __arm_vminavq_s16(uint16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s16)))
+uint16_t __arm_vminavq(uint16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s32)))
+uint32_t __arm_vminavq_s32(uint32_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s32)))
+uint32_t __arm_vminavq(uint32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s8)))
+uint8_t __arm_vminavq_s8(uint8_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s8)))
+uint8_t __arm_vminavq(uint8_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s16)))
+int16x8_t __arm_vminq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s16)))
+int16x8_t __arm_vminq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s32)))
+int32x4_t __arm_vminq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s32)))
+int32x4_t __arm_vminq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s8)))
+int8x16_t __arm_vminq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s8)))
+int8x16_t __arm_vminq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u16)))
+uint16x8_t __arm_vminq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u16)))
+uint16x8_t __arm_vminq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u32)))
+uint32x4_t __arm_vminq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u32)))
+uint32x4_t __arm_vminq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u8)))
+uint8x16_t __arm_vminq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u8)))
+uint8x16_t __arm_vminq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s16)))
+int16x8_t __arm_vminq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s16)))
+int16x8_t __arm_vminq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s32)))
+int32x4_t __arm_vminq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s32)))
+int32x4_t __arm_vminq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s8)))
+int8x16_t __arm_vminq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s8)))
+int8x16_t __arm_vminq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u16)))
+uint16x8_t __arm_vminq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u16)))
+uint16x8_t __arm_vminq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u32)))
+uint32x4_t __arm_vminq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u32)))
+uint32x4_t __arm_vminq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u8)))
+uint8x16_t __arm_vminq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u8)))
+uint8x16_t __arm_vminq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s16)))
+int16x8_t __arm_vminq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s16)))
+int16x8_t __arm_vminq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s32)))
+int32x4_t __arm_vminq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s32)))
+int32x4_t __arm_vminq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s8)))
+int8x16_t __arm_vminq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s8)))
+int8x16_t __arm_vminq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u16)))
+uint16x8_t __arm_vminq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u16)))
+uint16x8_t __arm_vminq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u32)))
+uint32x4_t __arm_vminq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u32)))
+uint32x4_t __arm_vminq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u8)))
+uint8x16_t __arm_vminq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u8)))
+uint8x16_t __arm_vminq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s16)))
+int16_t __arm_vminvq_p_s16(int16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s16)))
+int16_t __arm_vminvq_p(int16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s32)))
+int32_t __arm_vminvq_p_s32(int32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s32)))
+int32_t __arm_vminvq_p(int32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s8)))
+int8_t __arm_vminvq_p_s8(int8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s8)))
+int8_t __arm_vminvq_p(int8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u16)))
+uint16_t __arm_vminvq_p_u16(uint16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u16)))
+uint16_t __arm_vminvq_p(uint16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u32)))
+uint32_t __arm_vminvq_p_u32(uint32_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u32)))
+uint32_t __arm_vminvq_p(uint32_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u8)))
+uint8_t __arm_vminvq_p_u8(uint8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u8)))
+uint8_t __arm_vminvq_p(uint8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s16)))
+int16_t __arm_vminvq_s16(int16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s16)))
+int16_t __arm_vminvq(int16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s32)))
+int32_t __arm_vminvq_s32(int32_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s32)))
+int32_t __arm_vminvq(int32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s8)))
+int8_t __arm_vminvq_s8(int8_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s8)))
+int8_t __arm_vminvq(int8_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u16)))
+uint16_t __arm_vminvq_u16(uint16_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u16)))
+uint16_t __arm_vminvq(uint16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u32)))
+uint32_t __arm_vminvq_u32(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u32)))
+uint32_t __arm_vminvq(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u8)))
+uint8_t __arm_vminvq_u8(uint8_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u8)))
+uint8_t __arm_vminvq(uint8_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s16)))
+int32_t __arm_vmladavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s16)))
+int32_t __arm_vmladavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s32)))
+int32_t __arm_vmladavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s32)))
+int32_t __arm_vmladavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s8)))
+int32_t __arm_vmladavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s8)))
+int32_t __arm_vmladavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u16)))
+uint32_t __arm_vmladavaq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u16)))
+uint32_t __arm_vmladavaq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u32)))
+uint32_t __arm_vmladavaq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u32)))
+uint32_t __arm_vmladavaq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u8)))
+uint32_t __arm_vmladavaq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u8)))
+uint32_t __arm_vmladavaq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s16)))
+int32_t __arm_vmladavaq_s16(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s16)))
+int32_t __arm_vmladavaq(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s32)))
+int32_t __arm_vmladavaq_s32(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s32)))
+int32_t __arm_vmladavaq(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s8)))
+int32_t __arm_vmladavaq_s8(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s8)))
+int32_t __arm_vmladavaq(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u16)))
+uint32_t __arm_vmladavaq_u16(uint32_t, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u16)))
+uint32_t __arm_vmladavaq(uint32_t, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u32)))
+uint32_t __arm_vmladavaq_u32(uint32_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u32)))
+uint32_t __arm_vmladavaq(uint32_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u8)))
+uint32_t __arm_vmladavaq_u8(uint32_t, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u8)))
+uint32_t __arm_vmladavaq(uint32_t, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s16)))
+int32_t __arm_vmladavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s16)))
+int32_t __arm_vmladavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s32)))
+int32_t __arm_vmladavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s32)))
+int32_t __arm_vmladavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s8)))
+int32_t __arm_vmladavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s8)))
+int32_t __arm_vmladavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s16)))
+int32_t __arm_vmladavaxq_s16(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s16)))
+int32_t __arm_vmladavaxq(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s32)))
+int32_t __arm_vmladavaxq_s32(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s32)))
+int32_t __arm_vmladavaxq(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s8)))
+int32_t __arm_vmladavaxq_s8(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s8)))
+int32_t __arm_vmladavaxq(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s16)))
+int32_t __arm_vmladavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s16)))
+int32_t __arm_vmladavq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s32)))
+int32_t __arm_vmladavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s32)))
+int32_t __arm_vmladavq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s8)))
+int32_t __arm_vmladavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s8)))
+int32_t __arm_vmladavq_p(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u16)))
+uint32_t __arm_vmladavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u16)))
+uint32_t __arm_vmladavq_p(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u32)))
+uint32_t __arm_vmladavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u32)))
+uint32_t __arm_vmladavq_p(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u8)))
+uint32_t __arm_vmladavq_p_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u8)))
+uint32_t __arm_vmladavq_p(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s16)))
+int32_t __arm_vmladavq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s16)))
+int32_t __arm_vmladavq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s32)))
+int32_t __arm_vmladavq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s32)))
+int32_t __arm_vmladavq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s8)))
+int32_t __arm_vmladavq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s8)))
+int32_t __arm_vmladavq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u16)))
+uint32_t __arm_vmladavq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u16)))
+uint32_t __arm_vmladavq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u32)))
+uint32_t __arm_vmladavq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u32)))
+uint32_t __arm_vmladavq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u8)))
+uint32_t __arm_vmladavq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u8)))
+uint32_t __arm_vmladavq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s16)))
+int32_t __arm_vmladavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s16)))
+int32_t __arm_vmladavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s32)))
+int32_t __arm_vmladavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s32)))
+int32_t __arm_vmladavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s8)))
+int32_t __arm_vmladavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s8)))
+int32_t __arm_vmladavxq_p(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s16)))
+int32_t __arm_vmladavxq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s16)))
+int32_t __arm_vmladavxq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s32)))
+int32_t __arm_vmladavxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s32)))
+int32_t __arm_vmladavxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s8)))
+int32_t __arm_vmladavxq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s8)))
+int32_t __arm_vmladavxq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s16)))
+int64_t __arm_vmlaldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s16)))
+int64_t __arm_vmlaldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s32)))
+int64_t __arm_vmlaldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s32)))
+int64_t __arm_vmlaldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u16)))
+uint64_t __arm_vmlaldavaq_p_u16(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u16)))
+uint64_t __arm_vmlaldavaq_p(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u32)))
+uint64_t __arm_vmlaldavaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u32)))
+uint64_t __arm_vmlaldavaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s16)))
+int64_t __arm_vmlaldavaq_s16(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s16)))
+int64_t __arm_vmlaldavaq(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s32)))
+int64_t __arm_vmlaldavaq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s32)))
+int64_t __arm_vmlaldavaq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u16)))
+uint64_t __arm_vmlaldavaq_u16(uint64_t, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u16)))
+uint64_t __arm_vmlaldavaq(uint64_t, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u32)))
+uint64_t __arm_vmlaldavaq_u32(uint64_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u32)))
+uint64_t __arm_vmlaldavaq(uint64_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s16)))
+int64_t __arm_vmlaldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s16)))
+int64_t __arm_vmlaldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s32)))
+int64_t __arm_vmlaldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s32)))
+int64_t __arm_vmlaldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s16)))
+int64_t __arm_vmlaldavaxq_s16(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s16)))
+int64_t __arm_vmlaldavaxq(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s32)))
+int64_t __arm_vmlaldavaxq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s32)))
+int64_t __arm_vmlaldavaxq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s16)))
+int64_t __arm_vmlaldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s16)))
+int64_t __arm_vmlaldavq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s32)))
+int64_t __arm_vmlaldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s32)))
+int64_t __arm_vmlaldavq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u16)))
+uint64_t __arm_vmlaldavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u16)))
+uint64_t __arm_vmlaldavq_p(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u32)))
+uint64_t __arm_vmlaldavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u32)))
+uint64_t __arm_vmlaldavq_p(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s16)))
+int64_t __arm_vmlaldavq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s16)))
+int64_t __arm_vmlaldavq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s32)))
+int64_t __arm_vmlaldavq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s32)))
+int64_t __arm_vmlaldavq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u16)))
+uint64_t __arm_vmlaldavq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u16)))
+uint64_t __arm_vmlaldavq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u32)))
+uint64_t __arm_vmlaldavq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u32)))
+uint64_t __arm_vmlaldavq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s16)))
+int64_t __arm_vmlaldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s16)))
+int64_t __arm_vmlaldavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s32)))
+int64_t __arm_vmlaldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s32)))
+int64_t __arm_vmlaldavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s16)))
+int64_t __arm_vmlaldavxq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s16)))
+int64_t __arm_vmlaldavxq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s32)))
+int64_t __arm_vmlaldavxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s32)))
+int64_t __arm_vmlaldavxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s16)))
+int16x8_t __arm_vmlaq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s16)))
+int16x8_t __arm_vmlaq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s32)))
+int32x4_t __arm_vmlaq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s32)))
+int32x4_t __arm_vmlaq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s8)))
+int8x16_t __arm_vmlaq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s8)))
+int8x16_t __arm_vmlaq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u16)))
+uint16x8_t __arm_vmlaq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u16)))
+uint16x8_t __arm_vmlaq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u32)))
+uint32x4_t __arm_vmlaq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u32)))
+uint32x4_t __arm_vmlaq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u8)))
+uint8x16_t __arm_vmlaq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u8)))
+uint8x16_t __arm_vmlaq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s16)))
+int16x8_t __arm_vmlaq_n_s16(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s16)))
+int16x8_t __arm_vmlaq(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s32)))
+int32x4_t __arm_vmlaq_n_s32(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s32)))
+int32x4_t __arm_vmlaq(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s8)))
+int8x16_t __arm_vmlaq_n_s8(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s8)))
+int8x16_t __arm_vmlaq(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u16)))
+uint16x8_t __arm_vmlaq_n_u16(uint16x8_t, uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u16)))
+uint16x8_t __arm_vmlaq(uint16x8_t, uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u32)))
+uint32x4_t __arm_vmlaq_n_u32(uint32x4_t, uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u32)))
+uint32x4_t __arm_vmlaq(uint32x4_t, uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u8)))
+uint8x16_t __arm_vmlaq_n_u8(uint8x16_t, uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u8)))
+uint8x16_t __arm_vmlaq(uint8x16_t, uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s16)))
+int16x8_t __arm_vmlasq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s16)))
+int16x8_t __arm_vmlasq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s32)))
+int32x4_t __arm_vmlasq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s32)))
+int32x4_t __arm_vmlasq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s8)))
+int8x16_t __arm_vmlasq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s8)))
+int8x16_t __arm_vmlasq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u16)))
+uint16x8_t __arm_vmlasq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u16)))
+uint16x8_t __arm_vmlasq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u32)))
+uint32x4_t __arm_vmlasq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u32)))
+uint32x4_t __arm_vmlasq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u8)))
+uint8x16_t __arm_vmlasq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u8)))
+uint8x16_t __arm_vmlasq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s16)))
+int16x8_t __arm_vmlasq_n_s16(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s16)))
+int16x8_t __arm_vmlasq(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s32)))
+int32x4_t __arm_vmlasq_n_s32(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s32)))
+int32x4_t __arm_vmlasq(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s8)))
+int8x16_t __arm_vmlasq_n_s8(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s8)))
+int8x16_t __arm_vmlasq(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u16)))
+uint16x8_t __arm_vmlasq_n_u16(uint16x8_t, uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u16)))
+uint16x8_t __arm_vmlasq(uint16x8_t, uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u32)))
+uint32x4_t __arm_vmlasq_n_u32(uint32x4_t, uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u32)))
+uint32x4_t __arm_vmlasq(uint32x4_t, uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u8)))
+uint8x16_t __arm_vmlasq_n_u8(uint8x16_t, uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u8)))
+uint8x16_t __arm_vmlasq(uint8x16_t, uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s16)))
+int32_t __arm_vmlsdavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s16)))
+int32_t __arm_vmlsdavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s32)))
+int32_t __arm_vmlsdavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s32)))
+int32_t __arm_vmlsdavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s8)))
+int32_t __arm_vmlsdavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s8)))
+int32_t __arm_vmlsdavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s16)))
+int32_t __arm_vmlsdavaq_s16(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s16)))
+int32_t __arm_vmlsdavaq(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s32)))
+int32_t __arm_vmlsdavaq_s32(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s32)))
+int32_t __arm_vmlsdavaq(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s8)))
+int32_t __arm_vmlsdavaq_s8(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s8)))
+int32_t __arm_vmlsdavaq(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s16)))
+int32_t __arm_vmlsdavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s16)))
+int32_t __arm_vmlsdavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s32)))
+int32_t __arm_vmlsdavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s32)))
+int32_t __arm_vmlsdavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s8)))
+int32_t __arm_vmlsdavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s8)))
+int32_t __arm_vmlsdavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s16)))
+int32_t __arm_vmlsdavaxq_s16(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s16)))
+int32_t __arm_vmlsdavaxq(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s32)))
+int32_t __arm_vmlsdavaxq_s32(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s32)))
+int32_t __arm_vmlsdavaxq(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s8)))
+int32_t __arm_vmlsdavaxq_s8(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s8)))
+int32_t __arm_vmlsdavaxq(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s16)))
+int32_t __arm_vmlsdavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s16)))
+int32_t __arm_vmlsdavq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s32)))
+int32_t __arm_vmlsdavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s32)))
+int32_t __arm_vmlsdavq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s8)))
+int32_t __arm_vmlsdavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s8)))
+int32_t __arm_vmlsdavq_p(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s16)))
+int32_t __arm_vmlsdavq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s16)))
+int32_t __arm_vmlsdavq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s32)))
+int32_t __arm_vmlsdavq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s32)))
+int32_t __arm_vmlsdavq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s8)))
+int32_t __arm_vmlsdavq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s8)))
+int32_t __arm_vmlsdavq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s16)))
+int32_t __arm_vmlsdavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s16)))
+int32_t __arm_vmlsdavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s32)))
+int32_t __arm_vmlsdavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s32)))
+int32_t __arm_vmlsdavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s8)))
+int32_t __arm_vmlsdavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s8)))
+int32_t __arm_vmlsdavxq_p(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s16)))
+int32_t __arm_vmlsdavxq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s16)))
+int32_t __arm_vmlsdavxq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s32)))
+int32_t __arm_vmlsdavxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s32)))
+int32_t __arm_vmlsdavxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s8)))
+int32_t __arm_vmlsdavxq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s8)))
+int32_t __arm_vmlsdavxq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s16)))
+int64_t __arm_vmlsldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s16)))
+int64_t __arm_vmlsldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s32)))
+int64_t __arm_vmlsldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s32)))
+int64_t __arm_vmlsldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s16)))
+int64_t __arm_vmlsldavaq_s16(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s16)))
+int64_t __arm_vmlsldavaq(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s32)))
+int64_t __arm_vmlsldavaq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s32)))
+int64_t __arm_vmlsldavaq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s16)))
+int64_t __arm_vmlsldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s16)))
+int64_t __arm_vmlsldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s32)))
+int64_t __arm_vmlsldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s32)))
+int64_t __arm_vmlsldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s16)))
+int64_t __arm_vmlsldavaxq_s16(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s16)))
+int64_t __arm_vmlsldavaxq(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s32)))
+int64_t __arm_vmlsldavaxq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s32)))
+int64_t __arm_vmlsldavaxq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s16)))
+int64_t __arm_vmlsldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s16)))
+int64_t __arm_vmlsldavq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s32)))
+int64_t __arm_vmlsldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s32)))
+int64_t __arm_vmlsldavq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s16)))
+int64_t __arm_vmlsldavq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s16)))
+int64_t __arm_vmlsldavq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s32)))
+int64_t __arm_vmlsldavq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s32)))
+int64_t __arm_vmlsldavq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s16)))
+int64_t __arm_vmlsldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s16)))
+int64_t __arm_vmlsldavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s32)))
+int64_t __arm_vmlsldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s32)))
+int64_t __arm_vmlsldavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s16)))
+int64_t __arm_vmlsldavxq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s16)))
+int64_t __arm_vmlsldavxq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s32)))
+int64_t __arm_vmlsldavxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s32)))
+int64_t __arm_vmlsldavxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s16)))
+int32x4_t __arm_vmovlbq_m_s16(int32x4_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s16)))
+int32x4_t __arm_vmovlbq_m(int32x4_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s8)))
+int16x8_t __arm_vmovlbq_m_s8(int16x8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s8)))
+int16x8_t __arm_vmovlbq_m(int16x8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u16)))
+uint32x4_t __arm_vmovlbq_m_u16(uint32x4_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u16)))
+uint32x4_t __arm_vmovlbq_m(uint32x4_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u8)))
+uint16x8_t __arm_vmovlbq_m_u8(uint16x8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u8)))
+uint16x8_t __arm_vmovlbq_m(uint16x8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s16)))
+int32x4_t __arm_vmovlbq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s16)))
+int32x4_t __arm_vmovlbq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s8)))
+int16x8_t __arm_vmovlbq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s8)))
+int16x8_t __arm_vmovlbq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u16)))
+uint32x4_t __arm_vmovlbq_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u16)))
+uint32x4_t __arm_vmovlbq(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u8)))
+uint16x8_t __arm_vmovlbq_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u8)))
+uint16x8_t __arm_vmovlbq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s16)))
+int32x4_t __arm_vmovlbq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s16)))
+int32x4_t __arm_vmovlbq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s8)))
+int16x8_t __arm_vmovlbq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s8)))
+int16x8_t __arm_vmovlbq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u16)))
+uint32x4_t __arm_vmovlbq_x_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u16)))
+uint32x4_t __arm_vmovlbq_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u8)))
+uint16x8_t __arm_vmovlbq_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u8)))
+uint16x8_t __arm_vmovlbq_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s16)))
+int32x4_t __arm_vmovltq_m_s16(int32x4_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s16)))
+int32x4_t __arm_vmovltq_m(int32x4_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s8)))
+int16x8_t __arm_vmovltq_m_s8(int16x8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s8)))
+int16x8_t __arm_vmovltq_m(int16x8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u16)))
+uint32x4_t __arm_vmovltq_m_u16(uint32x4_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u16)))
+uint32x4_t __arm_vmovltq_m(uint32x4_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u8)))
+uint16x8_t __arm_vmovltq_m_u8(uint16x8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u8)))
+uint16x8_t __arm_vmovltq_m(uint16x8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s16)))
+int32x4_t __arm_vmovltq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s16)))
+int32x4_t __arm_vmovltq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s8)))
+int16x8_t __arm_vmovltq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s8)))
+int16x8_t __arm_vmovltq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u16)))
+uint32x4_t __arm_vmovltq_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u16)))
+uint32x4_t __arm_vmovltq(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u8)))
+uint16x8_t __arm_vmovltq_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u8)))
+uint16x8_t __arm_vmovltq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s16)))
+int32x4_t __arm_vmovltq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s16)))
+int32x4_t __arm_vmovltq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s8)))
+int16x8_t __arm_vmovltq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s8)))
+int16x8_t __arm_vmovltq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u16)))
+uint32x4_t __arm_vmovltq_x_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u16)))
+uint32x4_t __arm_vmovltq_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u8)))
+uint16x8_t __arm_vmovltq_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u8)))
+uint16x8_t __arm_vmovltq_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s16)))
+int8x16_t __arm_vmovnbq_m_s16(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s16)))
+int8x16_t __arm_vmovnbq_m(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s32)))
+int16x8_t __arm_vmovnbq_m_s32(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s32)))
+int16x8_t __arm_vmovnbq_m(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u16)))
+uint8x16_t __arm_vmovnbq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u16)))
+uint8x16_t __arm_vmovnbq_m(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u32)))
+uint16x8_t __arm_vmovnbq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u32)))
+uint16x8_t __arm_vmovnbq_m(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s16)))
+int8x16_t __arm_vmovnbq_s16(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s16)))
+int8x16_t __arm_vmovnbq(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s32)))
+int16x8_t __arm_vmovnbq_s32(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s32)))
+int16x8_t __arm_vmovnbq(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u16)))
+uint8x16_t __arm_vmovnbq_u16(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u16)))
+uint8x16_t __arm_vmovnbq(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u32)))
+uint16x8_t __arm_vmovnbq_u32(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u32)))
+uint16x8_t __arm_vmovnbq(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s16)))
+int8x16_t __arm_vmovntq_m_s16(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s16)))
+int8x16_t __arm_vmovntq_m(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s32)))
+int16x8_t __arm_vmovntq_m_s32(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s32)))
+int16x8_t __arm_vmovntq_m(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u16)))
+uint8x16_t __arm_vmovntq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u16)))
+uint8x16_t __arm_vmovntq_m(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u32)))
+uint16x8_t __arm_vmovntq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u32)))
+uint16x8_t __arm_vmovntq_m(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s16)))
+int8x16_t __arm_vmovntq_s16(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s16)))
+int8x16_t __arm_vmovntq(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s32)))
+int16x8_t __arm_vmovntq_s32(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s32)))
+int16x8_t __arm_vmovntq(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u16)))
+uint8x16_t __arm_vmovntq_u16(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u16)))
+uint8x16_t __arm_vmovntq(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u32)))
+uint16x8_t __arm_vmovntq_u32(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u32)))
+uint16x8_t __arm_vmovntq(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s16)))
+int16x8_t __arm_vmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s16)))
+int16x8_t __arm_vmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s32)))
+int32x4_t __arm_vmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s32)))
+int32x4_t __arm_vmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s8)))
+int8x16_t __arm_vmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s8)))
+int8x16_t __arm_vmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u16)))
+uint16x8_t __arm_vmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u16)))
+uint16x8_t __arm_vmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u32)))
+uint32x4_t __arm_vmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u32)))
+uint32x4_t __arm_vmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u8)))
+uint8x16_t __arm_vmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u8)))
+uint8x16_t __arm_vmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s16)))
+int16x8_t __arm_vmulhq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s16)))
+int16x8_t __arm_vmulhq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s32)))
+int32x4_t __arm_vmulhq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s32)))
+int32x4_t __arm_vmulhq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s8)))
+int8x16_t __arm_vmulhq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s8)))
+int8x16_t __arm_vmulhq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u16)))
+uint16x8_t __arm_vmulhq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u16)))
+uint16x8_t __arm_vmulhq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u32)))
+uint32x4_t __arm_vmulhq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u32)))
+uint32x4_t __arm_vmulhq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u8)))
+uint8x16_t __arm_vmulhq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u8)))
+uint8x16_t __arm_vmulhq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s16)))
+int16x8_t __arm_vmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s16)))
+int16x8_t __arm_vmulhq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s32)))
+int32x4_t __arm_vmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s32)))
+int32x4_t __arm_vmulhq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s8)))
+int8x16_t __arm_vmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s8)))
+int8x16_t __arm_vmulhq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u16)))
+uint16x8_t __arm_vmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u16)))
+uint16x8_t __arm_vmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u32)))
+uint32x4_t __arm_vmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u32)))
+uint32x4_t __arm_vmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u8)))
+uint8x16_t __arm_vmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u8)))
+uint8x16_t __arm_vmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s16)))
+int32x4_t __arm_vmullbq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s16)))
+int32x4_t __arm_vmullbq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s32)))
+int64x2_t __arm_vmullbq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s32)))
+int64x2_t __arm_vmullbq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s8)))
+int16x8_t __arm_vmullbq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s8)))
+int16x8_t __arm_vmullbq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u16)))
+uint32x4_t __arm_vmullbq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u16)))
+uint32x4_t __arm_vmullbq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u32)))
+uint64x2_t __arm_vmullbq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u32)))
+uint64x2_t __arm_vmullbq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u8)))
+uint16x8_t __arm_vmullbq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u8)))
+uint16x8_t __arm_vmullbq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s16)))
+int32x4_t __arm_vmullbq_int_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s16)))
+int32x4_t __arm_vmullbq_int(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s32)))
+int64x2_t __arm_vmullbq_int_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s32)))
+int64x2_t __arm_vmullbq_int(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s8)))
+int16x8_t __arm_vmullbq_int_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s8)))
+int16x8_t __arm_vmullbq_int(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u16)))
+uint32x4_t __arm_vmullbq_int_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u16)))
+uint32x4_t __arm_vmullbq_int(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u32)))
+uint64x2_t __arm_vmullbq_int_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u32)))
+uint64x2_t __arm_vmullbq_int(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u8)))
+uint16x8_t __arm_vmullbq_int_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u8)))
+uint16x8_t __arm_vmullbq_int(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s16)))
+int32x4_t __arm_vmullbq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s16)))
+int32x4_t __arm_vmullbq_int_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s32)))
+int64x2_t __arm_vmullbq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s32)))
+int64x2_t __arm_vmullbq_int_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s8)))
+int16x8_t __arm_vmullbq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s8)))
+int16x8_t __arm_vmullbq_int_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u16)))
+uint32x4_t __arm_vmullbq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u16)))
+uint32x4_t __arm_vmullbq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u32)))
+uint64x2_t __arm_vmullbq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u32)))
+uint64x2_t __arm_vmullbq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u8)))
+uint16x8_t __arm_vmullbq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u8)))
+uint16x8_t __arm_vmullbq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p16)))
+uint32x4_t __arm_vmullbq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p16)))
+uint32x4_t __arm_vmullbq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p8)))
+uint16x8_t __arm_vmullbq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p8)))
+uint16x8_t __arm_vmullbq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p16)))
+uint32x4_t __arm_vmullbq_poly_p16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p16)))
+uint32x4_t __arm_vmullbq_poly(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p8)))
+uint16x8_t __arm_vmullbq_poly_p8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p8)))
+uint16x8_t __arm_vmullbq_poly(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p16)))
+uint32x4_t __arm_vmullbq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p16)))
+uint32x4_t __arm_vmullbq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p8)))
+uint16x8_t __arm_vmullbq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p8)))
+uint16x8_t __arm_vmullbq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s16)))
+int32x4_t __arm_vmulltq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s16)))
+int32x4_t __arm_vmulltq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s32)))
+int64x2_t __arm_vmulltq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s32)))
+int64x2_t __arm_vmulltq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s8)))
+int16x8_t __arm_vmulltq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s8)))
+int16x8_t __arm_vmulltq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u16)))
+uint32x4_t __arm_vmulltq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u16)))
+uint32x4_t __arm_vmulltq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u32)))
+uint64x2_t __arm_vmulltq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u32)))
+uint64x2_t __arm_vmulltq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u8)))
+uint16x8_t __arm_vmulltq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u8)))
+uint16x8_t __arm_vmulltq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s16)))
+int32x4_t __arm_vmulltq_int_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s16)))
+int32x4_t __arm_vmulltq_int(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s32)))
+int64x2_t __arm_vmulltq_int_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s32)))
+int64x2_t __arm_vmulltq_int(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s8)))
+int16x8_t __arm_vmulltq_int_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s8)))
+int16x8_t __arm_vmulltq_int(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u16)))
+uint32x4_t __arm_vmulltq_int_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u16)))
+uint32x4_t __arm_vmulltq_int(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u32)))
+uint64x2_t __arm_vmulltq_int_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u32)))
+uint64x2_t __arm_vmulltq_int(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u8)))
+uint16x8_t __arm_vmulltq_int_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u8)))
+uint16x8_t __arm_vmulltq_int(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s16)))
+int32x4_t __arm_vmulltq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s16)))
+int32x4_t __arm_vmulltq_int_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s32)))
+int64x2_t __arm_vmulltq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s32)))
+int64x2_t __arm_vmulltq_int_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s8)))
+int16x8_t __arm_vmulltq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s8)))
+int16x8_t __arm_vmulltq_int_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u16)))
+uint32x4_t __arm_vmulltq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u16)))
+uint32x4_t __arm_vmulltq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u32)))
+uint64x2_t __arm_vmulltq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u32)))
+uint64x2_t __arm_vmulltq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u8)))
+uint16x8_t __arm_vmulltq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u8)))
+uint16x8_t __arm_vmulltq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p16)))
+uint32x4_t __arm_vmulltq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p16)))
+uint32x4_t __arm_vmulltq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p8)))
+uint16x8_t __arm_vmulltq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p8)))
+uint16x8_t __arm_vmulltq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p16)))
+uint32x4_t __arm_vmulltq_poly_p16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p16)))
+uint32x4_t __arm_vmulltq_poly(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p8)))
+uint16x8_t __arm_vmulltq_poly_p8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p8)))
+uint16x8_t __arm_vmulltq_poly(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p16)))
+uint32x4_t __arm_vmulltq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p16)))
+uint32x4_t __arm_vmulltq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p8)))
+uint16x8_t __arm_vmulltq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p8)))
+uint16x8_t __arm_vmulltq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s16)))
+int16x8_t __arm_vmulq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s16)))
+int16x8_t __arm_vmulq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s32)))
+int32x4_t __arm_vmulq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s32)))
+int32x4_t __arm_vmulq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s8)))
+int8x16_t __arm_vmulq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s8)))
+int8x16_t __arm_vmulq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u16)))
+uint16x8_t __arm_vmulq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u16)))
+uint16x8_t __arm_vmulq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u32)))
+uint32x4_t __arm_vmulq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u32)))
+uint32x4_t __arm_vmulq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u8)))
+uint8x16_t __arm_vmulq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u8)))
+uint8x16_t __arm_vmulq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s16)))
+int16x8_t __arm_vmulq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s16)))
+int16x8_t __arm_vmulq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s32)))
+int32x4_t __arm_vmulq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s32)))
+int32x4_t __arm_vmulq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s8)))
+int8x16_t __arm_vmulq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s8)))
+int8x16_t __arm_vmulq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u16)))
+uint16x8_t __arm_vmulq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u16)))
+uint16x8_t __arm_vmulq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u32)))
+uint32x4_t __arm_vmulq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u32)))
+uint32x4_t __arm_vmulq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u8)))
+uint8x16_t __arm_vmulq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u8)))
+uint8x16_t __arm_vmulq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s16)))
+int16x8_t __arm_vmulq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s16)))
+int16x8_t __arm_vmulq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s32)))
+int32x4_t __arm_vmulq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s32)))
+int32x4_t __arm_vmulq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s8)))
+int8x16_t __arm_vmulq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s8)))
+int8x16_t __arm_vmulq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u16)))
+uint16x8_t __arm_vmulq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u16)))
+uint16x8_t __arm_vmulq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u32)))
+uint32x4_t __arm_vmulq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u32)))
+uint32x4_t __arm_vmulq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u8)))
+uint8x16_t __arm_vmulq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u8)))
+uint8x16_t __arm_vmulq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s16)))
+int16x8_t __arm_vmulq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s16)))
+int16x8_t __arm_vmulq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s32)))
+int32x4_t __arm_vmulq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s32)))
+int32x4_t __arm_vmulq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s8)))
+int8x16_t __arm_vmulq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s8)))
+int8x16_t __arm_vmulq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u16)))
+uint16x8_t __arm_vmulq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u16)))
+uint16x8_t __arm_vmulq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u32)))
+uint32x4_t __arm_vmulq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u32)))
+uint32x4_t __arm_vmulq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u8)))
+uint8x16_t __arm_vmulq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u8)))
+uint8x16_t __arm_vmulq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s16)))
+int16x8_t __arm_vmulq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s16)))
+int16x8_t __arm_vmulq_x(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s32)))
+int32x4_t __arm_vmulq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s32)))
+int32x4_t __arm_vmulq_x(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s8)))
+int8x16_t __arm_vmulq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s8)))
+int8x16_t __arm_vmulq_x(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u16)))
+uint16x8_t __arm_vmulq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u16)))
+uint16x8_t __arm_vmulq_x(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u32)))
+uint32x4_t __arm_vmulq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u32)))
+uint32x4_t __arm_vmulq_x(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u8)))
+uint8x16_t __arm_vmulq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u8)))
+uint8x16_t __arm_vmulq_x(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s16)))
+int16x8_t __arm_vmulq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s16)))
+int16x8_t __arm_vmulq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s32)))
+int32x4_t __arm_vmulq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s32)))
+int32x4_t __arm_vmulq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s8)))
+int8x16_t __arm_vmulq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s8)))
+int8x16_t __arm_vmulq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u16)))
+uint16x8_t __arm_vmulq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u16)))
+uint16x8_t __arm_vmulq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u32)))
+uint32x4_t __arm_vmulq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u32)))
+uint32x4_t __arm_vmulq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u8)))
+uint8x16_t __arm_vmulq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u8)))
+uint8x16_t __arm_vmulq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s16)))
+int16x8_t __arm_vmvnq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s16)))
+int16x8_t __arm_vmvnq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s32)))
+int32x4_t __arm_vmvnq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s32)))
+int32x4_t __arm_vmvnq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u16)))
+uint16x8_t __arm_vmvnq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u16)))
+uint16x8_t __arm_vmvnq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u32)))
+uint32x4_t __arm_vmvnq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u32)))
+uint32x4_t __arm_vmvnq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s16)))
+int16x8_t __arm_vmvnq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s16)))
+int16x8_t __arm_vmvnq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s32)))
+int32x4_t __arm_vmvnq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s32)))
+int32x4_t __arm_vmvnq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s8)))
+int8x16_t __arm_vmvnq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s8)))
+int8x16_t __arm_vmvnq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u16)))
+uint16x8_t __arm_vmvnq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u16)))
+uint16x8_t __arm_vmvnq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u32)))
+uint32x4_t __arm_vmvnq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u32)))
+uint32x4_t __arm_vmvnq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u8)))
+uint8x16_t __arm_vmvnq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u8)))
+uint8x16_t __arm_vmvnq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_s16)))
+int16x8_t __arm_vmvnq_n_s16(int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_s32)))
+int32x4_t __arm_vmvnq_n_s32(int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_u16)))
+uint16x8_t __arm_vmvnq_n_u16(uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_u32)))
+uint32x4_t __arm_vmvnq_n_u32(uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s16)))
+int16x8_t __arm_vmvnq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s16)))
+int16x8_t __arm_vmvnq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s32)))
+int32x4_t __arm_vmvnq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s32)))
+int32x4_t __arm_vmvnq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s8)))
+int8x16_t __arm_vmvnq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s8)))
+int8x16_t __arm_vmvnq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u16)))
+uint16x8_t __arm_vmvnq_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u16)))
+uint16x8_t __arm_vmvnq(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u32)))
+uint32x4_t __arm_vmvnq_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u32)))
+uint32x4_t __arm_vmvnq(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u8)))
+uint8x16_t __arm_vmvnq_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u8)))
+uint8x16_t __arm_vmvnq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_s16)))
+int16x8_t __arm_vmvnq_x_n_s16(int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_s32)))
+int32x4_t __arm_vmvnq_x_n_s32(int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_u16)))
+uint16x8_t __arm_vmvnq_x_n_u16(uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_u32)))
+uint32x4_t __arm_vmvnq_x_n_u32(uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s16)))
+int16x8_t __arm_vmvnq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s16)))
+int16x8_t __arm_vmvnq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s32)))
+int32x4_t __arm_vmvnq_x_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s32)))
+int32x4_t __arm_vmvnq_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s8)))
+int8x16_t __arm_vmvnq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s8)))
+int8x16_t __arm_vmvnq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u16)))
+uint16x8_t __arm_vmvnq_x_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u16)))
+uint16x8_t __arm_vmvnq_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u32)))
+uint32x4_t __arm_vmvnq_x_u32(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u32)))
+uint32x4_t __arm_vmvnq_x(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u8)))
+uint8x16_t __arm_vmvnq_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u8)))
+uint8x16_t __arm_vmvnq_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s16)))
+int16x8_t __arm_vnegq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s16)))
+int16x8_t __arm_vnegq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s32)))
+int32x4_t __arm_vnegq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s32)))
+int32x4_t __arm_vnegq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s8)))
+int8x16_t __arm_vnegq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s8)))
+int8x16_t __arm_vnegq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s16)))
+int16x8_t __arm_vnegq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s16)))
+int16x8_t __arm_vnegq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s32)))
+int32x4_t __arm_vnegq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s32)))
+int32x4_t __arm_vnegq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s8)))
+int8x16_t __arm_vnegq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s8)))
+int8x16_t __arm_vnegq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s16)))
+int16x8_t __arm_vnegq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s16)))
+int16x8_t __arm_vnegq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s32)))
+int32x4_t __arm_vnegq_x_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s32)))
+int32x4_t __arm_vnegq_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s8)))
+int8x16_t __arm_vnegq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s8)))
+int8x16_t __arm_vnegq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s16)))
+int16x8_t __arm_vornq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s16)))
+int16x8_t __arm_vornq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s32)))
+int32x4_t __arm_vornq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s32)))
+int32x4_t __arm_vornq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s8)))
+int8x16_t __arm_vornq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s8)))
+int8x16_t __arm_vornq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u16)))
+uint16x8_t __arm_vornq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u16)))
+uint16x8_t __arm_vornq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u32)))
+uint32x4_t __arm_vornq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u32)))
+uint32x4_t __arm_vornq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u8)))
+uint8x16_t __arm_vornq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u8)))
+uint8x16_t __arm_vornq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s16)))
+int16x8_t __arm_vornq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s16)))
+int16x8_t __arm_vornq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s32)))
+int32x4_t __arm_vornq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s32)))
+int32x4_t __arm_vornq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s8)))
+int8x16_t __arm_vornq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s8)))
+int8x16_t __arm_vornq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u16)))
+uint16x8_t __arm_vornq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u16)))
+uint16x8_t __arm_vornq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u32)))
+uint32x4_t __arm_vornq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u32)))
+uint32x4_t __arm_vornq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u8)))
+uint8x16_t __arm_vornq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u8)))
+uint8x16_t __arm_vornq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s16)))
+int16x8_t __arm_vornq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s16)))
+int16x8_t __arm_vornq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s32)))
+int32x4_t __arm_vornq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s32)))
+int32x4_t __arm_vornq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s8)))
+int8x16_t __arm_vornq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s8)))
+int8x16_t __arm_vornq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u16)))
+uint16x8_t __arm_vornq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u16)))
+uint16x8_t __arm_vornq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u32)))
+uint32x4_t __arm_vornq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u32)))
+uint32x4_t __arm_vornq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u8)))
+uint8x16_t __arm_vornq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u8)))
+uint8x16_t __arm_vornq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s16)))
+int16x8_t __arm_vorrq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s16)))
+int16x8_t __arm_vorrq_m_n(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s32)))
+int32x4_t __arm_vorrq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s32)))
+int32x4_t __arm_vorrq_m_n(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u16)))
+uint16x8_t __arm_vorrq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u16)))
+uint16x8_t __arm_vorrq_m_n(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u32)))
+uint32x4_t __arm_vorrq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u32)))
+uint32x4_t __arm_vorrq_m_n(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s16)))
+int16x8_t __arm_vorrq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s16)))
+int16x8_t __arm_vorrq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s32)))
+int32x4_t __arm_vorrq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s32)))
+int32x4_t __arm_vorrq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s8)))
+int8x16_t __arm_vorrq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s8)))
+int8x16_t __arm_vorrq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u16)))
+uint16x8_t __arm_vorrq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u16)))
+uint16x8_t __arm_vorrq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u32)))
+uint32x4_t __arm_vorrq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u32)))
+uint32x4_t __arm_vorrq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u8)))
+uint8x16_t __arm_vorrq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u8)))
+uint8x16_t __arm_vorrq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s16)))
+int16x8_t __arm_vorrq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s16)))
+int16x8_t __arm_vorrq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s32)))
+int32x4_t __arm_vorrq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s32)))
+int32x4_t __arm_vorrq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u16)))
+uint16x8_t __arm_vorrq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u16)))
+uint16x8_t __arm_vorrq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u32)))
+uint32x4_t __arm_vorrq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u32)))
+uint32x4_t __arm_vorrq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s16)))
+int16x8_t __arm_vorrq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s16)))
+int16x8_t __arm_vorrq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s32)))
+int32x4_t __arm_vorrq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s32)))
+int32x4_t __arm_vorrq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s8)))
+int8x16_t __arm_vorrq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s8)))
+int8x16_t __arm_vorrq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u16)))
+uint16x8_t __arm_vorrq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u16)))
+uint16x8_t __arm_vorrq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u32)))
+uint32x4_t __arm_vorrq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u32)))
+uint32x4_t __arm_vorrq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u8)))
+uint8x16_t __arm_vorrq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u8)))
+uint8x16_t __arm_vorrq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s16)))
+int16x8_t __arm_vorrq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s16)))
+int16x8_t __arm_vorrq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s32)))
+int32x4_t __arm_vorrq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s32)))
+int32x4_t __arm_vorrq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s8)))
+int8x16_t __arm_vorrq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s8)))
+int8x16_t __arm_vorrq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u16)))
+uint16x8_t __arm_vorrq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u16)))
+uint16x8_t __arm_vorrq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u32)))
+uint32x4_t __arm_vorrq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u32)))
+uint32x4_t __arm_vorrq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u8)))
+uint8x16_t __arm_vorrq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u8)))
+uint8x16_t __arm_vorrq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpnot)))
+mve_pred16_t __arm_vpnot(mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s16)))
+int16x8_t __arm_vpselq_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s16)))
+int16x8_t __arm_vpselq(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s32)))
+int32x4_t __arm_vpselq_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s32)))
+int32x4_t __arm_vpselq(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s64)))
+int64x2_t __arm_vpselq_s64(int64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s64)))
+int64x2_t __arm_vpselq(int64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s8)))
+int8x16_t __arm_vpselq_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s8)))
+int8x16_t __arm_vpselq(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u16)))
+uint16x8_t __arm_vpselq_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u16)))
+uint16x8_t __arm_vpselq(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u32)))
+uint32x4_t __arm_vpselq_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u32)))
+uint32x4_t __arm_vpselq(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u64)))
+uint64x2_t __arm_vpselq_u64(uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u64)))
+uint64x2_t __arm_vpselq(uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u8)))
+uint8x16_t __arm_vpselq_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u8)))
+uint8x16_t __arm_vpselq(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s16)))
+int16x8_t __arm_vqabsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s16)))
+int16x8_t __arm_vqabsq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s32)))
+int32x4_t __arm_vqabsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s32)))
+int32x4_t __arm_vqabsq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s8)))
+int8x16_t __arm_vqabsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s8)))
+int8x16_t __arm_vqabsq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s16)))
+int16x8_t __arm_vqabsq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s16)))
+int16x8_t __arm_vqabsq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s32)))
+int32x4_t __arm_vqabsq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s32)))
+int32x4_t __arm_vqabsq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s8)))
+int8x16_t __arm_vqabsq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s8)))
+int8x16_t __arm_vqabsq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s16)))
+int16x8_t __arm_vqaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s16)))
+int16x8_t __arm_vqaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s32)))
+int32x4_t __arm_vqaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s32)))
+int32x4_t __arm_vqaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s8)))
+int8x16_t __arm_vqaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s8)))
+int8x16_t __arm_vqaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u16)))
+uint16x8_t __arm_vqaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u16)))
+uint16x8_t __arm_vqaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u32)))
+uint32x4_t __arm_vqaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u32)))
+uint32x4_t __arm_vqaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u8)))
+uint8x16_t __arm_vqaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u8)))
+uint8x16_t __arm_vqaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s16)))
+int16x8_t __arm_vqaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s16)))
+int16x8_t __arm_vqaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s32)))
+int32x4_t __arm_vqaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s32)))
+int32x4_t __arm_vqaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s8)))
+int8x16_t __arm_vqaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s8)))
+int8x16_t __arm_vqaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u16)))
+uint16x8_t __arm_vqaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u16)))
+uint16x8_t __arm_vqaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u32)))
+uint32x4_t __arm_vqaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u32)))
+uint32x4_t __arm_vqaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u8)))
+uint8x16_t __arm_vqaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u8)))
+uint8x16_t __arm_vqaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s16)))
+int16x8_t __arm_vqaddq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s16)))
+int16x8_t __arm_vqaddq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s32)))
+int32x4_t __arm_vqaddq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s32)))
+int32x4_t __arm_vqaddq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s8)))
+int8x16_t __arm_vqaddq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s8)))
+int8x16_t __arm_vqaddq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u16)))
+uint16x8_t __arm_vqaddq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u16)))
+uint16x8_t __arm_vqaddq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u32)))
+uint32x4_t __arm_vqaddq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u32)))
+uint32x4_t __arm_vqaddq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u8)))
+uint8x16_t __arm_vqaddq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u8)))
+uint8x16_t __arm_vqaddq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s16)))
+int16x8_t __arm_vqaddq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s16)))
+int16x8_t __arm_vqaddq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s32)))
+int32x4_t __arm_vqaddq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s32)))
+int32x4_t __arm_vqaddq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s8)))
+int8x16_t __arm_vqaddq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s8)))
+int8x16_t __arm_vqaddq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u16)))
+uint16x8_t __arm_vqaddq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u16)))
+uint16x8_t __arm_vqaddq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u32)))
+uint32x4_t __arm_vqaddq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u32)))
+uint32x4_t __arm_vqaddq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u8)))
+uint8x16_t __arm_vqaddq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u8)))
+uint8x16_t __arm_vqaddq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s16)))
+int16x8_t __arm_vqdmladhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s16)))
+int16x8_t __arm_vqdmladhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s32)))
+int32x4_t __arm_vqdmladhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s32)))
+int32x4_t __arm_vqdmladhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s8)))
+int8x16_t __arm_vqdmladhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s8)))
+int8x16_t __arm_vqdmladhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s16)))
+int16x8_t __arm_vqdmladhq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s16)))
+int16x8_t __arm_vqdmladhq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s32)))
+int32x4_t __arm_vqdmladhq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s32)))
+int32x4_t __arm_vqdmladhq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s8)))
+int8x16_t __arm_vqdmladhq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s8)))
+int8x16_t __arm_vqdmladhq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s16)))
+int16x8_t __arm_vqdmladhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s16)))
+int16x8_t __arm_vqdmladhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s32)))
+int32x4_t __arm_vqdmladhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s32)))
+int32x4_t __arm_vqdmladhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s8)))
+int8x16_t __arm_vqdmladhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s8)))
+int8x16_t __arm_vqdmladhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s16)))
+int16x8_t __arm_vqdmladhxq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s16)))
+int16x8_t __arm_vqdmladhxq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s32)))
+int32x4_t __arm_vqdmladhxq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s32)))
+int32x4_t __arm_vqdmladhxq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s8)))
+int8x16_t __arm_vqdmladhxq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s8)))
+int8x16_t __arm_vqdmladhxq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s16)))
+int16x8_t __arm_vqdmlahq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s16)))
+int16x8_t __arm_vqdmlahq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s32)))
+int32x4_t __arm_vqdmlahq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s32)))
+int32x4_t __arm_vqdmlahq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s8)))
+int8x16_t __arm_vqdmlahq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s8)))
+int8x16_t __arm_vqdmlahq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s16)))
+int16x8_t __arm_vqdmlahq_n_s16(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s16)))
+int16x8_t __arm_vqdmlahq(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s32)))
+int32x4_t __arm_vqdmlahq_n_s32(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s32)))
+int32x4_t __arm_vqdmlahq(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s8)))
+int8x16_t __arm_vqdmlahq_n_s8(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s8)))
+int8x16_t __arm_vqdmlahq(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s16)))
+int16x8_t __arm_vqdmlashq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s16)))
+int16x8_t __arm_vqdmlashq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s32)))
+int32x4_t __arm_vqdmlashq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s32)))
+int32x4_t __arm_vqdmlashq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s8)))
+int8x16_t __arm_vqdmlashq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s8)))
+int8x16_t __arm_vqdmlashq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s16)))
+int16x8_t __arm_vqdmlashq_n_s16(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s16)))
+int16x8_t __arm_vqdmlashq(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s32)))
+int32x4_t __arm_vqdmlashq_n_s32(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s32)))
+int32x4_t __arm_vqdmlashq(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s8)))
+int8x16_t __arm_vqdmlashq_n_s8(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s8)))
+int8x16_t __arm_vqdmlashq(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s16)))
+int16x8_t __arm_vqdmlsdhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s16)))
+int16x8_t __arm_vqdmlsdhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s32)))
+int32x4_t __arm_vqdmlsdhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s32)))
+int32x4_t __arm_vqdmlsdhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s8)))
+int8x16_t __arm_vqdmlsdhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s8)))
+int8x16_t __arm_vqdmlsdhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s16)))
+int16x8_t __arm_vqdmlsdhq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s16)))
+int16x8_t __arm_vqdmlsdhq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s32)))
+int32x4_t __arm_vqdmlsdhq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s32)))
+int32x4_t __arm_vqdmlsdhq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s8)))
+int8x16_t __arm_vqdmlsdhq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s8)))
+int8x16_t __arm_vqdmlsdhq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s16)))
+int16x8_t __arm_vqdmlsdhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s16)))
+int16x8_t __arm_vqdmlsdhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s32)))
+int32x4_t __arm_vqdmlsdhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s32)))
+int32x4_t __arm_vqdmlsdhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s8)))
+int8x16_t __arm_vqdmlsdhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s8)))
+int8x16_t __arm_vqdmlsdhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s16)))
+int16x8_t __arm_vqdmlsdhxq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s16)))
+int16x8_t __arm_vqdmlsdhxq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s32)))
+int32x4_t __arm_vqdmlsdhxq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s32)))
+int32x4_t __arm_vqdmlsdhxq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s8)))
+int8x16_t __arm_vqdmlsdhxq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s8)))
+int8x16_t __arm_vqdmlsdhxq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s16)))
+int16x8_t __arm_vqdmulhq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s16)))
+int16x8_t __arm_vqdmulhq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s32)))
+int32x4_t __arm_vqdmulhq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s32)))
+int32x4_t __arm_vqdmulhq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s8)))
+int8x16_t __arm_vqdmulhq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s8)))
+int8x16_t __arm_vqdmulhq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s16)))
+int16x8_t __arm_vqdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s16)))
+int16x8_t __arm_vqdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s32)))
+int32x4_t __arm_vqdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s32)))
+int32x4_t __arm_vqdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s8)))
+int8x16_t __arm_vqdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s8)))
+int8x16_t __arm_vqdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s16)))
+int16x8_t __arm_vqdmulhq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s16)))
+int16x8_t __arm_vqdmulhq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s32)))
+int32x4_t __arm_vqdmulhq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s32)))
+int32x4_t __arm_vqdmulhq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s8)))
+int8x16_t __arm_vqdmulhq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s8)))
+int8x16_t __arm_vqdmulhq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s16)))
+int16x8_t __arm_vqdmulhq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s16)))
+int16x8_t __arm_vqdmulhq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s32)))
+int32x4_t __arm_vqdmulhq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s32)))
+int32x4_t __arm_vqdmulhq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s8)))
+int8x16_t __arm_vqdmulhq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s8)))
+int8x16_t __arm_vqdmulhq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s16)))
+int32x4_t __arm_vqdmullbq_m_n_s16(int32x4_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s16)))
+int32x4_t __arm_vqdmullbq_m(int32x4_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s32)))
+int64x2_t __arm_vqdmullbq_m_n_s32(int64x2_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s32)))
+int64x2_t __arm_vqdmullbq_m(int64x2_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s16)))
+int32x4_t __arm_vqdmullbq_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s16)))
+int32x4_t __arm_vqdmullbq_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s32)))
+int64x2_t __arm_vqdmullbq_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s32)))
+int64x2_t __arm_vqdmullbq_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s16)))
+int32x4_t __arm_vqdmullbq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s16)))
+int32x4_t __arm_vqdmullbq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s32)))
+int64x2_t __arm_vqdmullbq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s32)))
+int64x2_t __arm_vqdmullbq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s16)))
+int32x4_t __arm_vqdmullbq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s16)))
+int32x4_t __arm_vqdmullbq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s32)))
+int64x2_t __arm_vqdmullbq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s32)))
+int64x2_t __arm_vqdmullbq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s16)))
+int32x4_t __arm_vqdmulltq_m_n_s16(int32x4_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s16)))
+int32x4_t __arm_vqdmulltq_m(int32x4_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s32)))
+int64x2_t __arm_vqdmulltq_m_n_s32(int64x2_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s32)))
+int64x2_t __arm_vqdmulltq_m(int64x2_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s16)))
+int32x4_t __arm_vqdmulltq_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s16)))
+int32x4_t __arm_vqdmulltq_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s32)))
+int64x2_t __arm_vqdmulltq_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s32)))
+int64x2_t __arm_vqdmulltq_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s16)))
+int32x4_t __arm_vqdmulltq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s16)))
+int32x4_t __arm_vqdmulltq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s32)))
+int64x2_t __arm_vqdmulltq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s32)))
+int64x2_t __arm_vqdmulltq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s16)))
+int32x4_t __arm_vqdmulltq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s16)))
+int32x4_t __arm_vqdmulltq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s32)))
+int64x2_t __arm_vqdmulltq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s32)))
+int64x2_t __arm_vqdmulltq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s16)))
+int8x16_t __arm_vqmovnbq_m_s16(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s16)))
+int8x16_t __arm_vqmovnbq_m(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s32)))
+int16x8_t __arm_vqmovnbq_m_s32(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s32)))
+int16x8_t __arm_vqmovnbq_m(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u16)))
+uint8x16_t __arm_vqmovnbq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u16)))
+uint8x16_t __arm_vqmovnbq_m(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u32)))
+uint16x8_t __arm_vqmovnbq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u32)))
+uint16x8_t __arm_vqmovnbq_m(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s16)))
+int8x16_t __arm_vqmovnbq_s16(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s16)))
+int8x16_t __arm_vqmovnbq(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s32)))
+int16x8_t __arm_vqmovnbq_s32(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s32)))
+int16x8_t __arm_vqmovnbq(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u16)))
+uint8x16_t __arm_vqmovnbq_u16(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u16)))
+uint8x16_t __arm_vqmovnbq(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u32)))
+uint16x8_t __arm_vqmovnbq_u32(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u32)))
+uint16x8_t __arm_vqmovnbq(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s16)))
+int8x16_t __arm_vqmovntq_m_s16(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s16)))
+int8x16_t __arm_vqmovntq_m(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s32)))
+int16x8_t __arm_vqmovntq_m_s32(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s32)))
+int16x8_t __arm_vqmovntq_m(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u16)))
+uint8x16_t __arm_vqmovntq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u16)))
+uint8x16_t __arm_vqmovntq_m(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u32)))
+uint16x8_t __arm_vqmovntq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u32)))
+uint16x8_t __arm_vqmovntq_m(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s16)))
+int8x16_t __arm_vqmovntq_s16(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s16)))
+int8x16_t __arm_vqmovntq(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s32)))
+int16x8_t __arm_vqmovntq_s32(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s32)))
+int16x8_t __arm_vqmovntq(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u16)))
+uint8x16_t __arm_vqmovntq_u16(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u16)))
+uint8x16_t __arm_vqmovntq(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u32)))
+uint16x8_t __arm_vqmovntq_u32(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u32)))
+uint16x8_t __arm_vqmovntq(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s16)))
+uint8x16_t __arm_vqmovunbq_m_s16(uint8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s16)))
+uint8x16_t __arm_vqmovunbq_m(uint8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s32)))
+uint16x8_t __arm_vqmovunbq_m_s32(uint16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s32)))
+uint16x8_t __arm_vqmovunbq_m(uint16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s16)))
+uint8x16_t __arm_vqmovunbq_s16(uint8x16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s16)))
+uint8x16_t __arm_vqmovunbq(uint8x16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s32)))
+uint16x8_t __arm_vqmovunbq_s32(uint16x8_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s32)))
+uint16x8_t __arm_vqmovunbq(uint16x8_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s16)))
+uint8x16_t __arm_vqmovuntq_m_s16(uint8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s16)))
+uint8x16_t __arm_vqmovuntq_m(uint8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s32)))
+uint16x8_t __arm_vqmovuntq_m_s32(uint16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s32)))
+uint16x8_t __arm_vqmovuntq_m(uint16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s16)))
+uint8x16_t __arm_vqmovuntq_s16(uint8x16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s16)))
+uint8x16_t __arm_vqmovuntq(uint8x16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s32)))
+uint16x8_t __arm_vqmovuntq_s32(uint16x8_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s32)))
+uint16x8_t __arm_vqmovuntq(uint16x8_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s16)))
+int16x8_t __arm_vqnegq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s16)))
+int16x8_t __arm_vqnegq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s32)))
+int32x4_t __arm_vqnegq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s32)))
+int32x4_t __arm_vqnegq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s8)))
+int8x16_t __arm_vqnegq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s8)))
+int8x16_t __arm_vqnegq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s16)))
+int16x8_t __arm_vqnegq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s16)))
+int16x8_t __arm_vqnegq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s32)))
+int32x4_t __arm_vqnegq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s32)))
+int32x4_t __arm_vqnegq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s8)))
+int8x16_t __arm_vqnegq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s8)))
+int8x16_t __arm_vqnegq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s16)))
+int16x8_t __arm_vqrdmladhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s16)))
+int16x8_t __arm_vqrdmladhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s32)))
+int32x4_t __arm_vqrdmladhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s32)))
+int32x4_t __arm_vqrdmladhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s8)))
+int8x16_t __arm_vqrdmladhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s8)))
+int8x16_t __arm_vqrdmladhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s16)))
+int16x8_t __arm_vqrdmladhq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s16)))
+int16x8_t __arm_vqrdmladhq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s32)))
+int32x4_t __arm_vqrdmladhq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s32)))
+int32x4_t __arm_vqrdmladhq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s8)))
+int8x16_t __arm_vqrdmladhq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s8)))
+int8x16_t __arm_vqrdmladhq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s16)))
+int16x8_t __arm_vqrdmladhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s16)))
+int16x8_t __arm_vqrdmladhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s32)))
+int32x4_t __arm_vqrdmladhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s32)))
+int32x4_t __arm_vqrdmladhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s8)))
+int8x16_t __arm_vqrdmladhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s8)))
+int8x16_t __arm_vqrdmladhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s16)))
+int16x8_t __arm_vqrdmladhxq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s16)))
+int16x8_t __arm_vqrdmladhxq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s32)))
+int32x4_t __arm_vqrdmladhxq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s32)))
+int32x4_t __arm_vqrdmladhxq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s8)))
+int8x16_t __arm_vqrdmladhxq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s8)))
+int8x16_t __arm_vqrdmladhxq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s16)))
+int16x8_t __arm_vqrdmlahq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s16)))
+int16x8_t __arm_vqrdmlahq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s32)))
+int32x4_t __arm_vqrdmlahq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s32)))
+int32x4_t __arm_vqrdmlahq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s8)))
+int8x16_t __arm_vqrdmlahq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s8)))
+int8x16_t __arm_vqrdmlahq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s16)))
+int16x8_t __arm_vqrdmlahq_n_s16(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s16)))
+int16x8_t __arm_vqrdmlahq(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s32)))
+int32x4_t __arm_vqrdmlahq_n_s32(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s32)))
+int32x4_t __arm_vqrdmlahq(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s8)))
+int8x16_t __arm_vqrdmlahq_n_s8(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s8)))
+int8x16_t __arm_vqrdmlahq(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s16)))
+int16x8_t __arm_vqrdmlashq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s16)))
+int16x8_t __arm_vqrdmlashq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s32)))
+int32x4_t __arm_vqrdmlashq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s32)))
+int32x4_t __arm_vqrdmlashq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s8)))
+int8x16_t __arm_vqrdmlashq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s8)))
+int8x16_t __arm_vqrdmlashq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s16)))
+int16x8_t __arm_vqrdmlashq_n_s16(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s16)))
+int16x8_t __arm_vqrdmlashq(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s32)))
+int32x4_t __arm_vqrdmlashq_n_s32(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s32)))
+int32x4_t __arm_vqrdmlashq(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s8)))
+int8x16_t __arm_vqrdmlashq_n_s8(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s8)))
+int8x16_t __arm_vqrdmlashq(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s16)))
+int16x8_t __arm_vqrdmlsdhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s16)))
+int16x8_t __arm_vqrdmlsdhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s32)))
+int32x4_t __arm_vqrdmlsdhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s32)))
+int32x4_t __arm_vqrdmlsdhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s8)))
+int8x16_t __arm_vqrdmlsdhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s8)))
+int8x16_t __arm_vqrdmlsdhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s16)))
+int16x8_t __arm_vqrdmlsdhq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s16)))
+int16x8_t __arm_vqrdmlsdhq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s32)))
+int32x4_t __arm_vqrdmlsdhq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s32)))
+int32x4_t __arm_vqrdmlsdhq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s8)))
+int8x16_t __arm_vqrdmlsdhq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s8)))
+int8x16_t __arm_vqrdmlsdhq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s16)))
+int16x8_t __arm_vqrdmlsdhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s16)))
+int16x8_t __arm_vqrdmlsdhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s32)))
+int32x4_t __arm_vqrdmlsdhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s32)))
+int32x4_t __arm_vqrdmlsdhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s8)))
+int8x16_t __arm_vqrdmlsdhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s8)))
+int8x16_t __arm_vqrdmlsdhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s16)))
+int16x8_t __arm_vqrdmlsdhxq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s16)))
+int16x8_t __arm_vqrdmlsdhxq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s32)))
+int32x4_t __arm_vqrdmlsdhxq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s32)))
+int32x4_t __arm_vqrdmlsdhxq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s8)))
+int8x16_t __arm_vqrdmlsdhxq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s8)))
+int8x16_t __arm_vqrdmlsdhxq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s16)))
+int16x8_t __arm_vqrdmulhq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s16)))
+int16x8_t __arm_vqrdmulhq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s32)))
+int32x4_t __arm_vqrdmulhq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s32)))
+int32x4_t __arm_vqrdmulhq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s8)))
+int8x16_t __arm_vqrdmulhq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s8)))
+int8x16_t __arm_vqrdmulhq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s16)))
+int16x8_t __arm_vqrdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s16)))
+int16x8_t __arm_vqrdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s32)))
+int32x4_t __arm_vqrdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s32)))
+int32x4_t __arm_vqrdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s8)))
+int8x16_t __arm_vqrdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s8)))
+int8x16_t __arm_vqrdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s16)))
+int16x8_t __arm_vqrdmulhq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s16)))
+int16x8_t __arm_vqrdmulhq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s32)))
+int32x4_t __arm_vqrdmulhq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s32)))
+int32x4_t __arm_vqrdmulhq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s8)))
+int8x16_t __arm_vqrdmulhq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s8)))
+int8x16_t __arm_vqrdmulhq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s16)))
+int16x8_t __arm_vqrdmulhq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s16)))
+int16x8_t __arm_vqrdmulhq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s32)))
+int32x4_t __arm_vqrdmulhq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s32)))
+int32x4_t __arm_vqrdmulhq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s8)))
+int8x16_t __arm_vqrdmulhq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s8)))
+int8x16_t __arm_vqrdmulhq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s16)))
+int16x8_t __arm_vqrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s16)))
+int16x8_t __arm_vqrshlq_m_n(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s32)))
+int32x4_t __arm_vqrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s32)))
+int32x4_t __arm_vqrshlq_m_n(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s8)))
+int8x16_t __arm_vqrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s8)))
+int8x16_t __arm_vqrshlq_m_n(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u16)))
+uint16x8_t __arm_vqrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u16)))
+uint16x8_t __arm_vqrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u32)))
+uint32x4_t __arm_vqrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u32)))
+uint32x4_t __arm_vqrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u8)))
+uint8x16_t __arm_vqrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u8)))
+uint8x16_t __arm_vqrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s16)))
+int16x8_t __arm_vqrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s16)))
+int16x8_t __arm_vqrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s32)))
+int32x4_t __arm_vqrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s32)))
+int32x4_t __arm_vqrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s8)))
+int8x16_t __arm_vqrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s8)))
+int8x16_t __arm_vqrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u16)))
+uint16x8_t __arm_vqrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u16)))
+uint16x8_t __arm_vqrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u32)))
+uint32x4_t __arm_vqrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u32)))
+uint32x4_t __arm_vqrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u8)))
+uint8x16_t __arm_vqrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u8)))
+uint8x16_t __arm_vqrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s16)))
+int16x8_t __arm_vqrshlq_n_s16(int16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s16)))
+int16x8_t __arm_vqrshlq(int16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s32)))
+int32x4_t __arm_vqrshlq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s32)))
+int32x4_t __arm_vqrshlq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s8)))
+int8x16_t __arm_vqrshlq_n_s8(int8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s8)))
+int8x16_t __arm_vqrshlq(int8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u16)))
+uint16x8_t __arm_vqrshlq_n_u16(uint16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u16)))
+uint16x8_t __arm_vqrshlq(uint16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u32)))
+uint32x4_t __arm_vqrshlq_n_u32(uint32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u32)))
+uint32x4_t __arm_vqrshlq(uint32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u8)))
+uint8x16_t __arm_vqrshlq_n_u8(uint8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u8)))
+uint8x16_t __arm_vqrshlq(uint8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s16)))
+int16x8_t __arm_vqrshlq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s16)))
+int16x8_t __arm_vqrshlq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s32)))
+int32x4_t __arm_vqrshlq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s32)))
+int32x4_t __arm_vqrshlq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s8)))
+int8x16_t __arm_vqrshlq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s8)))
+int8x16_t __arm_vqrshlq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u16)))
+uint16x8_t __arm_vqrshlq_u16(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u16)))
+uint16x8_t __arm_vqrshlq(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u32)))
+uint32x4_t __arm_vqrshlq_u32(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u32)))
+uint32x4_t __arm_vqrshlq(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u8)))
+uint8x16_t __arm_vqrshlq_u8(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u8)))
+uint8x16_t __arm_vqrshlq(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16)))
+int8x16_t __arm_vqrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16)))
+int8x16_t __arm_vqrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32)))
+int16x8_t __arm_vqrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32)))
+int16x8_t __arm_vqrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16)))
+uint8x16_t __arm_vqrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16)))
+uint8x16_t __arm_vqrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32)))
+uint16x8_t __arm_vqrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32)))
+uint16x8_t __arm_vqrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s16)))
+int8x16_t __arm_vqrshrnbq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s16)))
+int8x16_t __arm_vqrshrnbq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s32)))
+int16x8_t __arm_vqrshrnbq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s32)))
+int16x8_t __arm_vqrshrnbq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u16)))
+uint8x16_t __arm_vqrshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u16)))
+uint8x16_t __arm_vqrshrnbq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u32)))
+uint16x8_t __arm_vqrshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u32)))
+uint16x8_t __arm_vqrshrnbq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s16)))
+int8x16_t __arm_vqrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s16)))
+int8x16_t __arm_vqrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s32)))
+int16x8_t __arm_vqrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s32)))
+int16x8_t __arm_vqrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u16)))
+uint8x16_t __arm_vqrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u16)))
+uint8x16_t __arm_vqrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u32)))
+uint16x8_t __arm_vqrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u32)))
+uint16x8_t __arm_vqrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s16)))
+int8x16_t __arm_vqrshrntq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s16)))
+int8x16_t __arm_vqrshrntq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s32)))
+int16x8_t __arm_vqrshrntq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s32)))
+int16x8_t __arm_vqrshrntq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u16)))
+uint8x16_t __arm_vqrshrntq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u16)))
+uint8x16_t __arm_vqrshrntq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u32)))
+uint16x8_t __arm_vqrshrntq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u32)))
+uint16x8_t __arm_vqrshrntq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16)))
+uint8x16_t __arm_vqrshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16)))
+uint8x16_t __arm_vqrshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32)))
+uint16x8_t __arm_vqrshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32)))
+uint16x8_t __arm_vqrshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s16)))
+uint8x16_t __arm_vqrshrunbq_n_s16(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s16)))
+uint8x16_t __arm_vqrshrunbq(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s32)))
+uint16x8_t __arm_vqrshrunbq_n_s32(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s32)))
+uint16x8_t __arm_vqrshrunbq(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s16)))
+uint8x16_t __arm_vqrshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s16)))
+uint8x16_t __arm_vqrshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s32)))
+uint16x8_t __arm_vqrshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s32)))
+uint16x8_t __arm_vqrshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s16)))
+uint8x16_t __arm_vqrshruntq_n_s16(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s16)))
+uint8x16_t __arm_vqrshruntq(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s32)))
+uint16x8_t __arm_vqrshruntq_n_s32(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s32)))
+uint16x8_t __arm_vqrshruntq(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s16)))
+int16x8_t __arm_vqshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s16)))
+int16x8_t __arm_vqshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s32)))
+int32x4_t __arm_vqshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s32)))
+int32x4_t __arm_vqshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s8)))
+int8x16_t __arm_vqshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s8)))
+int8x16_t __arm_vqshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u16)))
+uint16x8_t __arm_vqshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u16)))
+uint16x8_t __arm_vqshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u32)))
+uint32x4_t __arm_vqshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u32)))
+uint32x4_t __arm_vqshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u8)))
+uint8x16_t __arm_vqshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u8)))
+uint8x16_t __arm_vqshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s16)))
+int16x8_t __arm_vqshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s16)))
+int16x8_t __arm_vqshlq_m_r(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s32)))
+int32x4_t __arm_vqshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s32)))
+int32x4_t __arm_vqshlq_m_r(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s8)))
+int8x16_t __arm_vqshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s8)))
+int8x16_t __arm_vqshlq_m_r(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u16)))
+uint16x8_t __arm_vqshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u16)))
+uint16x8_t __arm_vqshlq_m_r(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u32)))
+uint32x4_t __arm_vqshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u32)))
+uint32x4_t __arm_vqshlq_m_r(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u8)))
+uint8x16_t __arm_vqshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u8)))
+uint8x16_t __arm_vqshlq_m_r(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s16)))
+int16x8_t __arm_vqshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s16)))
+int16x8_t __arm_vqshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s32)))
+int32x4_t __arm_vqshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s32)))
+int32x4_t __arm_vqshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s8)))
+int8x16_t __arm_vqshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s8)))
+int8x16_t __arm_vqshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u16)))
+uint16x8_t __arm_vqshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u16)))
+uint16x8_t __arm_vqshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u32)))
+uint32x4_t __arm_vqshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u32)))
+uint32x4_t __arm_vqshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u8)))
+uint8x16_t __arm_vqshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u8)))
+uint8x16_t __arm_vqshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s16)))
+int16x8_t __arm_vqshlq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s16)))
+int16x8_t __arm_vqshlq_n(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s32)))
+int32x4_t __arm_vqshlq_n_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s32)))
+int32x4_t __arm_vqshlq_n(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s8)))
+int8x16_t __arm_vqshlq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s8)))
+int8x16_t __arm_vqshlq_n(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u16)))
+uint16x8_t __arm_vqshlq_n_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u16)))
+uint16x8_t __arm_vqshlq_n(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u32)))
+uint32x4_t __arm_vqshlq_n_u32(uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u32)))
+uint32x4_t __arm_vqshlq_n(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u8)))
+uint8x16_t __arm_vqshlq_n_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u8)))
+uint8x16_t __arm_vqshlq_n(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s16)))
+int16x8_t __arm_vqshlq_r_s16(int16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s16)))
+int16x8_t __arm_vqshlq_r(int16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s32)))
+int32x4_t __arm_vqshlq_r_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s32)))
+int32x4_t __arm_vqshlq_r(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s8)))
+int8x16_t __arm_vqshlq_r_s8(int8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s8)))
+int8x16_t __arm_vqshlq_r(int8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u16)))
+uint16x8_t __arm_vqshlq_r_u16(uint16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u16)))
+uint16x8_t __arm_vqshlq_r(uint16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u32)))
+uint32x4_t __arm_vqshlq_r_u32(uint32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u32)))
+uint32x4_t __arm_vqshlq_r(uint32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u8)))
+uint8x16_t __arm_vqshlq_r_u8(uint8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u8)))
+uint8x16_t __arm_vqshlq_r(uint8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s16)))
+int16x8_t __arm_vqshlq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s16)))
+int16x8_t __arm_vqshlq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s32)))
+int32x4_t __arm_vqshlq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s32)))
+int32x4_t __arm_vqshlq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s8)))
+int8x16_t __arm_vqshlq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s8)))
+int8x16_t __arm_vqshlq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u16)))
+uint16x8_t __arm_vqshlq_u16(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u16)))
+uint16x8_t __arm_vqshlq(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u32)))
+uint32x4_t __arm_vqshlq_u32(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u32)))
+uint32x4_t __arm_vqshlq(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u8)))
+uint8x16_t __arm_vqshlq_u8(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u8)))
+uint8x16_t __arm_vqshlq(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s16)))
+uint16x8_t __arm_vqshluq_m_n_s16(uint16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s16)))
+uint16x8_t __arm_vqshluq_m(uint16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s32)))
+uint32x4_t __arm_vqshluq_m_n_s32(uint32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s32)))
+uint32x4_t __arm_vqshluq_m(uint32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s8)))
+uint8x16_t __arm_vqshluq_m_n_s8(uint8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s8)))
+uint8x16_t __arm_vqshluq_m(uint8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s16)))
+uint16x8_t __arm_vqshluq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s16)))
+uint16x8_t __arm_vqshluq(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s32)))
+uint32x4_t __arm_vqshluq_n_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s32)))
+uint32x4_t __arm_vqshluq(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s8)))
+uint8x16_t __arm_vqshluq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s8)))
+uint8x16_t __arm_vqshluq(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s16)))
+int8x16_t __arm_vqshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s16)))
+int8x16_t __arm_vqshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s32)))
+int16x8_t __arm_vqshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s32)))
+int16x8_t __arm_vqshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u16)))
+uint8x16_t __arm_vqshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u16)))
+uint8x16_t __arm_vqshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u32)))
+uint16x8_t __arm_vqshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u32)))
+uint16x8_t __arm_vqshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s16)))
+int8x16_t __arm_vqshrnbq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s16)))
+int8x16_t __arm_vqshrnbq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s32)))
+int16x8_t __arm_vqshrnbq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s32)))
+int16x8_t __arm_vqshrnbq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u16)))
+uint8x16_t __arm_vqshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u16)))
+uint8x16_t __arm_vqshrnbq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u32)))
+uint16x8_t __arm_vqshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u32)))
+uint16x8_t __arm_vqshrnbq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s16)))
+int8x16_t __arm_vqshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s16)))
+int8x16_t __arm_vqshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s32)))
+int16x8_t __arm_vqshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s32)))
+int16x8_t __arm_vqshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u16)))
+uint8x16_t __arm_vqshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u16)))
+uint8x16_t __arm_vqshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u32)))
+uint16x8_t __arm_vqshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u32)))
+uint16x8_t __arm_vqshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s16)))
+int8x16_t __arm_vqshrntq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s16)))
+int8x16_t __arm_vqshrntq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s32)))
+int16x8_t __arm_vqshrntq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s32)))
+int16x8_t __arm_vqshrntq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u16)))
+uint8x16_t __arm_vqshrntq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u16)))
+uint8x16_t __arm_vqshrntq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u32)))
+uint16x8_t __arm_vqshrntq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u32)))
+uint16x8_t __arm_vqshrntq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s16)))
+uint8x16_t __arm_vqshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s16)))
+uint8x16_t __arm_vqshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s32)))
+uint16x8_t __arm_vqshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s32)))
+uint16x8_t __arm_vqshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s16)))
+uint8x16_t __arm_vqshrunbq_n_s16(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s16)))
+uint8x16_t __arm_vqshrunbq(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s32)))
+uint16x8_t __arm_vqshrunbq_n_s32(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s32)))
+uint16x8_t __arm_vqshrunbq(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s16)))
+uint8x16_t __arm_vqshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s16)))
+uint8x16_t __arm_vqshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s32)))
+uint16x8_t __arm_vqshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s32)))
+uint16x8_t __arm_vqshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s16)))
+uint8x16_t __arm_vqshruntq_n_s16(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s16)))
+uint8x16_t __arm_vqshruntq(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s32)))
+uint16x8_t __arm_vqshruntq_n_s32(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s32)))
+uint16x8_t __arm_vqshruntq(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s16)))
+int16x8_t __arm_vqsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s16)))
+int16x8_t __arm_vqsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s32)))
+int32x4_t __arm_vqsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s32)))
+int32x4_t __arm_vqsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s8)))
+int8x16_t __arm_vqsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s8)))
+int8x16_t __arm_vqsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u16)))
+uint16x8_t __arm_vqsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u16)))
+uint16x8_t __arm_vqsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u32)))
+uint32x4_t __arm_vqsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u32)))
+uint32x4_t __arm_vqsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u8)))
+uint8x16_t __arm_vqsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u8)))
+uint8x16_t __arm_vqsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s16)))
+int16x8_t __arm_vqsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s16)))
+int16x8_t __arm_vqsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s32)))
+int32x4_t __arm_vqsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s32)))
+int32x4_t __arm_vqsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s8)))
+int8x16_t __arm_vqsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s8)))
+int8x16_t __arm_vqsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u16)))
+uint16x8_t __arm_vqsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u16)))
+uint16x8_t __arm_vqsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u32)))
+uint32x4_t __arm_vqsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u32)))
+uint32x4_t __arm_vqsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u8)))
+uint8x16_t __arm_vqsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u8)))
+uint8x16_t __arm_vqsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s16)))
+int16x8_t __arm_vqsubq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s16)))
+int16x8_t __arm_vqsubq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s32)))
+int32x4_t __arm_vqsubq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s32)))
+int32x4_t __arm_vqsubq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s8)))
+int8x16_t __arm_vqsubq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s8)))
+int8x16_t __arm_vqsubq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u16)))
+uint16x8_t __arm_vqsubq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u16)))
+uint16x8_t __arm_vqsubq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u32)))
+uint32x4_t __arm_vqsubq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u32)))
+uint32x4_t __arm_vqsubq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u8)))
+uint8x16_t __arm_vqsubq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u8)))
+uint8x16_t __arm_vqsubq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s16)))
+int16x8_t __arm_vqsubq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s16)))
+int16x8_t __arm_vqsubq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s32)))
+int32x4_t __arm_vqsubq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s32)))
+int32x4_t __arm_vqsubq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s8)))
+int8x16_t __arm_vqsubq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s8)))
+int8x16_t __arm_vqsubq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u16)))
+uint16x8_t __arm_vqsubq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u16)))
+uint16x8_t __arm_vqsubq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u32)))
+uint32x4_t __arm_vqsubq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u32)))
+uint32x4_t __arm_vqsubq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u8)))
+uint8x16_t __arm_vqsubq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u8)))
+uint8x16_t __arm_vqsubq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))
+int16x8_t __arm_vreinterpretq_s16_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))
+int16x8_t __arm_vreinterpretq_s16(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))
+int16x8_t __arm_vreinterpretq_s16_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))
+int16x8_t __arm_vreinterpretq_s16(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))
+int16x8_t __arm_vreinterpretq_s16_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))
+int16x8_t __arm_vreinterpretq_s16(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))
+int16x8_t __arm_vreinterpretq_s16_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))
+int16x8_t __arm_vreinterpretq_s16(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))
+int16x8_t __arm_vreinterpretq_s16_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))
+int16x8_t __arm_vreinterpretq_s16(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))
+int16x8_t __arm_vreinterpretq_s16_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))
+int16x8_t __arm_vreinterpretq_s16(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
+int16x8_t __arm_vreinterpretq_s16_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
+int16x8_t __arm_vreinterpretq_s16(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))
+int32x4_t __arm_vreinterpretq_s32_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))
+int32x4_t __arm_vreinterpretq_s32(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))
+int32x4_t __arm_vreinterpretq_s32_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))
+int32x4_t __arm_vreinterpretq_s32(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))
+int32x4_t __arm_vreinterpretq_s32_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))
+int32x4_t __arm_vreinterpretq_s32(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))
+int32x4_t __arm_vreinterpretq_s32_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))
+int32x4_t __arm_vreinterpretq_s32(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))
+int32x4_t __arm_vreinterpretq_s32_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))
+int32x4_t __arm_vreinterpretq_s32(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))
+int32x4_t __arm_vreinterpretq_s32_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))
+int32x4_t __arm_vreinterpretq_s32(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
+int32x4_t __arm_vreinterpretq_s32_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
+int32x4_t __arm_vreinterpretq_s32(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))
+int64x2_t __arm_vreinterpretq_s64_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))
+int64x2_t __arm_vreinterpretq_s64(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))
+int64x2_t __arm_vreinterpretq_s64_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))
+int64x2_t __arm_vreinterpretq_s64(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))
+int64x2_t __arm_vreinterpretq_s64_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))
+int64x2_t __arm_vreinterpretq_s64(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))
+int64x2_t __arm_vreinterpretq_s64_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))
+int64x2_t __arm_vreinterpretq_s64(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))
+int64x2_t __arm_vreinterpretq_s64_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))
+int64x2_t __arm_vreinterpretq_s64(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))
+int64x2_t __arm_vreinterpretq_s64_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))
+int64x2_t __arm_vreinterpretq_s64(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
+int64x2_t __arm_vreinterpretq_s64_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
+int64x2_t __arm_vreinterpretq_s64(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))
+int8x16_t __arm_vreinterpretq_s8_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))
+int8x16_t __arm_vreinterpretq_s8(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))
+int8x16_t __arm_vreinterpretq_s8_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))
+int8x16_t __arm_vreinterpretq_s8(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))
+int8x16_t __arm_vreinterpretq_s8_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))
+int8x16_t __arm_vreinterpretq_s8(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))
+int8x16_t __arm_vreinterpretq_s8_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))
+int8x16_t __arm_vreinterpretq_s8(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))
+int8x16_t __arm_vreinterpretq_s8_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))
+int8x16_t __arm_vreinterpretq_s8(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))
+int8x16_t __arm_vreinterpretq_s8_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))
+int8x16_t __arm_vreinterpretq_s8(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
+int8x16_t __arm_vreinterpretq_s8_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
+int8x16_t __arm_vreinterpretq_s8(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))
+uint16x8_t __arm_vreinterpretq_u16_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))
+uint16x8_t __arm_vreinterpretq_u16(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))
+uint16x8_t __arm_vreinterpretq_u16_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))
+uint16x8_t __arm_vreinterpretq_u16(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))
+uint16x8_t __arm_vreinterpretq_u16_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))
+uint16x8_t __arm_vreinterpretq_u16(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))
+uint16x8_t __arm_vreinterpretq_u16_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))
+uint16x8_t __arm_vreinterpretq_u16(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))
+uint16x8_t __arm_vreinterpretq_u16_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))
+uint16x8_t __arm_vreinterpretq_u16(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))
+uint16x8_t __arm_vreinterpretq_u16_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))
+uint16x8_t __arm_vreinterpretq_u16(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
+uint16x8_t __arm_vreinterpretq_u16_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
+uint16x8_t __arm_vreinterpretq_u16(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))
+uint32x4_t __arm_vreinterpretq_u32_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))
+uint32x4_t __arm_vreinterpretq_u32(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))
+uint32x4_t __arm_vreinterpretq_u32_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))
+uint32x4_t __arm_vreinterpretq_u32(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))
+uint32x4_t __arm_vreinterpretq_u32_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))
+uint32x4_t __arm_vreinterpretq_u32(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))
+uint32x4_t __arm_vreinterpretq_u32_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))
+uint32x4_t __arm_vreinterpretq_u32(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))
+uint32x4_t __arm_vreinterpretq_u32_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))
+uint32x4_t __arm_vreinterpretq_u32(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))
+uint32x4_t __arm_vreinterpretq_u32_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))
+uint32x4_t __arm_vreinterpretq_u32(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
+uint32x4_t __arm_vreinterpretq_u32_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
+uint32x4_t __arm_vreinterpretq_u32(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))
+uint64x2_t __arm_vreinterpretq_u64_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))
+uint64x2_t __arm_vreinterpretq_u64(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))
+uint64x2_t __arm_vreinterpretq_u64_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))
+uint64x2_t __arm_vreinterpretq_u64(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))
+uint64x2_t __arm_vreinterpretq_u64_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))
+uint64x2_t __arm_vreinterpretq_u64(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))
+uint64x2_t __arm_vreinterpretq_u64_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))
+uint64x2_t __arm_vreinterpretq_u64(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))
+uint64x2_t __arm_vreinterpretq_u64_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))
+uint64x2_t __arm_vreinterpretq_u64(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))
+uint64x2_t __arm_vreinterpretq_u64_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))
+uint64x2_t __arm_vreinterpretq_u64(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
+uint64x2_t __arm_vreinterpretq_u64_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
+uint64x2_t __arm_vreinterpretq_u64(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
+uint8x16_t __arm_vreinterpretq_u8_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
+uint8x16_t __arm_vreinterpretq_u8(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
+uint8x16_t __arm_vreinterpretq_u8_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
+uint8x16_t __arm_vreinterpretq_u8(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
+uint8x16_t __arm_vreinterpretq_u8_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
+uint8x16_t __arm_vreinterpretq_u8(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
+uint8x16_t __arm_vreinterpretq_u8_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
+uint8x16_t __arm_vreinterpretq_u8(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
+uint8x16_t __arm_vreinterpretq_u8_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
+uint8x16_t __arm_vreinterpretq_u8(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
+uint8x16_t __arm_vreinterpretq_u8_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
+uint8x16_t __arm_vreinterpretq_u8(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
+uint8x16_t __arm_vreinterpretq_u8_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
+uint8x16_t __arm_vreinterpretq_u8(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_s8)))
+int8x16_t __arm_vrev16q_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_s8)))
+int8x16_t __arm_vrev16q_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_u8)))
+uint8x16_t __arm_vrev16q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_u8)))
+uint8x16_t __arm_vrev16q_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_s8)))
+int8x16_t __arm_vrev16q_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_s8)))
+int8x16_t __arm_vrev16q(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_u8)))
+uint8x16_t __arm_vrev16q_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_u8)))
+uint8x16_t __arm_vrev16q(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_s8)))
+int8x16_t __arm_vrev16q_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_s8)))
+int8x16_t __arm_vrev16q_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_u8)))
+uint8x16_t __arm_vrev16q_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_u8)))
+uint8x16_t __arm_vrev16q_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s16)))
+int16x8_t __arm_vrev32q_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s16)))
+int16x8_t __arm_vrev32q_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s8)))
+int8x16_t __arm_vrev32q_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s8)))
+int8x16_t __arm_vrev32q_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u16)))
+uint16x8_t __arm_vrev32q_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u16)))
+uint16x8_t __arm_vrev32q_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u8)))
+uint8x16_t __arm_vrev32q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u8)))
+uint8x16_t __arm_vrev32q_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s16)))
+int16x8_t __arm_vrev32q_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s16)))
+int16x8_t __arm_vrev32q(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s8)))
+int8x16_t __arm_vrev32q_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s8)))
+int8x16_t __arm_vrev32q(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u16)))
+uint16x8_t __arm_vrev32q_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u16)))
+uint16x8_t __arm_vrev32q(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u8)))
+uint8x16_t __arm_vrev32q_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u8)))
+uint8x16_t __arm_vrev32q(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s16)))
+int16x8_t __arm_vrev32q_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s16)))
+int16x8_t __arm_vrev32q_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s8)))
+int8x16_t __arm_vrev32q_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s8)))
+int8x16_t __arm_vrev32q_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u16)))
+uint16x8_t __arm_vrev32q_x_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u16)))
+uint16x8_t __arm_vrev32q_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u8)))
+uint8x16_t __arm_vrev32q_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u8)))
+uint8x16_t __arm_vrev32q_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s16)))
+int16x8_t __arm_vrev64q_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s16)))
+int16x8_t __arm_vrev64q_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s32)))
+int32x4_t __arm_vrev64q_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s32)))
+int32x4_t __arm_vrev64q_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s8)))
+int8x16_t __arm_vrev64q_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s8)))
+int8x16_t __arm_vrev64q_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u16)))
+uint16x8_t __arm_vrev64q_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u16)))
+uint16x8_t __arm_vrev64q_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u32)))
+uint32x4_t __arm_vrev64q_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u32)))
+uint32x4_t __arm_vrev64q_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u8)))
+uint8x16_t __arm_vrev64q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u8)))
+uint8x16_t __arm_vrev64q_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s16)))
+int16x8_t __arm_vrev64q_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s16)))
+int16x8_t __arm_vrev64q(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s32)))
+int32x4_t __arm_vrev64q_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s32)))
+int32x4_t __arm_vrev64q(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s8)))
+int8x16_t __arm_vrev64q_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s8)))
+int8x16_t __arm_vrev64q(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u16)))
+uint16x8_t __arm_vrev64q_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u16)))
+uint16x8_t __arm_vrev64q(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u32)))
+uint32x4_t __arm_vrev64q_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u32)))
+uint32x4_t __arm_vrev64q(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u8)))
+uint8x16_t __arm_vrev64q_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u8)))
+uint8x16_t __arm_vrev64q(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s16)))
+int16x8_t __arm_vrev64q_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s16)))
+int16x8_t __arm_vrev64q_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s32)))
+int32x4_t __arm_vrev64q_x_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s32)))
+int32x4_t __arm_vrev64q_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s8)))
+int8x16_t __arm_vrev64q_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s8)))
+int8x16_t __arm_vrev64q_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u16)))
+uint16x8_t __arm_vrev64q_x_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u16)))
+uint16x8_t __arm_vrev64q_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u32)))
+uint32x4_t __arm_vrev64q_x_u32(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u32)))
+uint32x4_t __arm_vrev64q_x(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u8)))
+uint8x16_t __arm_vrev64q_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u8)))
+uint8x16_t __arm_vrev64q_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s16)))
+int16x8_t __arm_vrhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s16)))
+int16x8_t __arm_vrhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s32)))
+int32x4_t __arm_vrhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s32)))
+int32x4_t __arm_vrhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s8)))
+int8x16_t __arm_vrhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s8)))
+int8x16_t __arm_vrhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u16)))
+uint16x8_t __arm_vrhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u16)))
+uint16x8_t __arm_vrhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u32)))
+uint32x4_t __arm_vrhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u32)))
+uint32x4_t __arm_vrhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u8)))
+uint8x16_t __arm_vrhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u8)))
+uint8x16_t __arm_vrhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s16)))
+int16x8_t __arm_vrhaddq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s16)))
+int16x8_t __arm_vrhaddq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s32)))
+int32x4_t __arm_vrhaddq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s32)))
+int32x4_t __arm_vrhaddq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s8)))
+int8x16_t __arm_vrhaddq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s8)))
+int8x16_t __arm_vrhaddq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u16)))
+uint16x8_t __arm_vrhaddq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u16)))
+uint16x8_t __arm_vrhaddq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u32)))
+uint32x4_t __arm_vrhaddq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u32)))
+uint32x4_t __arm_vrhaddq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u8)))
+uint8x16_t __arm_vrhaddq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u8)))
+uint8x16_t __arm_vrhaddq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s16)))
+int16x8_t __arm_vrhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s16)))
+int16x8_t __arm_vrhaddq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s32)))
+int32x4_t __arm_vrhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s32)))
+int32x4_t __arm_vrhaddq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s8)))
+int8x16_t __arm_vrhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s8)))
+int8x16_t __arm_vrhaddq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u16)))
+uint16x8_t __arm_vrhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u16)))
+uint16x8_t __arm_vrhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u32)))
+uint32x4_t __arm_vrhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u32)))
+uint32x4_t __arm_vrhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u8)))
+uint8x16_t __arm_vrhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u8)))
+uint8x16_t __arm_vrhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32)))
+int64_t __arm_vrmlaldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32)))
+int64_t __arm_vrmlaldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32)))
+uint64_t __arm_vrmlaldavhaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32)))
+uint64_t __arm_vrmlaldavhaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_s32)))
+int64_t __arm_vrmlaldavhaq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_s32)))
+int64_t __arm_vrmlaldavhaq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_u32)))
+uint64_t __arm_vrmlaldavhaq_u32(uint64_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_u32)))
+uint64_t __arm_vrmlaldavhaq(uint64_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32)))
+int64_t __arm_vrmlaldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32)))
+int64_t __arm_vrmlaldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_s32)))
+int64_t __arm_vrmlaldavhaxq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_s32)))
+int64_t __arm_vrmlaldavhaxq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_s32)))
+int64_t __arm_vrmlaldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_s32)))
+int64_t __arm_vrmlaldavhq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_u32)))
+uint64_t __arm_vrmlaldavhq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_u32)))
+uint64_t __arm_vrmlaldavhq_p(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_s32)))
+int64_t __arm_vrmlaldavhq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_s32)))
+int64_t __arm_vrmlaldavhq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_u32)))
+uint64_t __arm_vrmlaldavhq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_u32)))
+uint64_t __arm_vrmlaldavhq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32)))
+int64_t __arm_vrmlaldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32)))
+int64_t __arm_vrmlaldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_s32)))
+int64_t __arm_vrmlaldavhxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_s32)))
+int64_t __arm_vrmlaldavhxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32)))
+int64_t __arm_vrmlsldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32)))
+int64_t __arm_vrmlsldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_s32)))
+int64_t __arm_vrmlsldavhaq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_s32)))
+int64_t __arm_vrmlsldavhaq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32)))
+int64_t __arm_vrmlsldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32)))
+int64_t __arm_vrmlsldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_s32)))
+int64_t __arm_vrmlsldavhaxq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_s32)))
+int64_t __arm_vrmlsldavhaxq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_p_s32)))
+int64_t __arm_vrmlsldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_p_s32)))
+int64_t __arm_vrmlsldavhq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_s32)))
+int64_t __arm_vrmlsldavhq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_s32)))
+int64_t __arm_vrmlsldavhq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32)))
+int64_t __arm_vrmlsldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32)))
+int64_t __arm_vrmlsldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_s32)))
+int64_t __arm_vrmlsldavhxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_s32)))
+int64_t __arm_vrmlsldavhxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s16)))
+int16x8_t __arm_vrmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s16)))
+int16x8_t __arm_vrmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s32)))
+int32x4_t __arm_vrmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s32)))
+int32x4_t __arm_vrmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s8)))
+int8x16_t __arm_vrmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s8)))
+int8x16_t __arm_vrmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u16)))
+uint16x8_t __arm_vrmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u16)))
+uint16x8_t __arm_vrmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u32)))
+uint32x4_t __arm_vrmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u32)))
+uint32x4_t __arm_vrmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u8)))
+uint8x16_t __arm_vrmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u8)))
+uint8x16_t __arm_vrmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s16)))
+int16x8_t __arm_vrmulhq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s16)))
+int16x8_t __arm_vrmulhq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s32)))
+int32x4_t __arm_vrmulhq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s32)))
+int32x4_t __arm_vrmulhq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s8)))
+int8x16_t __arm_vrmulhq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s8)))
+int8x16_t __arm_vrmulhq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u16)))
+uint16x8_t __arm_vrmulhq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u16)))
+uint16x8_t __arm_vrmulhq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u32)))
+uint32x4_t __arm_vrmulhq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u32)))
+uint32x4_t __arm_vrmulhq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u8)))
+uint8x16_t __arm_vrmulhq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u8)))
+uint8x16_t __arm_vrmulhq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s16)))
+int16x8_t __arm_vrmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s16)))
+int16x8_t __arm_vrmulhq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s32)))
+int32x4_t __arm_vrmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s32)))
+int32x4_t __arm_vrmulhq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s8)))
+int8x16_t __arm_vrmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s8)))
+int8x16_t __arm_vrmulhq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u16)))
+uint16x8_t __arm_vrmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u16)))
+uint16x8_t __arm_vrmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u32)))
+uint32x4_t __arm_vrmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u32)))
+uint32x4_t __arm_vrmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u8)))
+uint8x16_t __arm_vrmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u8)))
+uint8x16_t __arm_vrmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s16)))
+int16x8_t __arm_vrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s16)))
+int16x8_t __arm_vrshlq_m_n(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s32)))
+int32x4_t __arm_vrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s32)))
+int32x4_t __arm_vrshlq_m_n(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s8)))
+int8x16_t __arm_vrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s8)))
+int8x16_t __arm_vrshlq_m_n(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u16)))
+uint16x8_t __arm_vrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u16)))
+uint16x8_t __arm_vrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u32)))
+uint32x4_t __arm_vrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u32)))
+uint32x4_t __arm_vrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u8)))
+uint8x16_t __arm_vrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u8)))
+uint8x16_t __arm_vrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s16)))
+int16x8_t __arm_vrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s16)))
+int16x8_t __arm_vrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s32)))
+int32x4_t __arm_vrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s32)))
+int32x4_t __arm_vrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s8)))
+int8x16_t __arm_vrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s8)))
+int8x16_t __arm_vrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u16)))
+uint16x8_t __arm_vrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u16)))
+uint16x8_t __arm_vrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u32)))
+uint32x4_t __arm_vrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u32)))
+uint32x4_t __arm_vrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u8)))
+uint8x16_t __arm_vrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u8)))
+uint8x16_t __arm_vrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s16)))
+int16x8_t __arm_vrshlq_n_s16(int16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s16)))
+int16x8_t __arm_vrshlq(int16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s32)))
+int32x4_t __arm_vrshlq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s32)))
+int32x4_t __arm_vrshlq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s8)))
+int8x16_t __arm_vrshlq_n_s8(int8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s8)))
+int8x16_t __arm_vrshlq(int8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u16)))
+uint16x8_t __arm_vrshlq_n_u16(uint16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u16)))
+uint16x8_t __arm_vrshlq(uint16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u32)))
+uint32x4_t __arm_vrshlq_n_u32(uint32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u32)))
+uint32x4_t __arm_vrshlq(uint32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u8)))
+uint8x16_t __arm_vrshlq_n_u8(uint8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u8)))
+uint8x16_t __arm_vrshlq(uint8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s16)))
+int16x8_t __arm_vrshlq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s16)))
+int16x8_t __arm_vrshlq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s32)))
+int32x4_t __arm_vrshlq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s32)))
+int32x4_t __arm_vrshlq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s8)))
+int8x16_t __arm_vrshlq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s8)))
+int8x16_t __arm_vrshlq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u16)))
+uint16x8_t __arm_vrshlq_u16(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u16)))
+uint16x8_t __arm_vrshlq(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u32)))
+uint32x4_t __arm_vrshlq_u32(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u32)))
+uint32x4_t __arm_vrshlq(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u8)))
+uint8x16_t __arm_vrshlq_u8(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u8)))
+uint8x16_t __arm_vrshlq(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s16)))
+int16x8_t __arm_vrshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s16)))
+int16x8_t __arm_vrshlq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s32)))
+int32x4_t __arm_vrshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s32)))
+int32x4_t __arm_vrshlq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s8)))
+int8x16_t __arm_vrshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s8)))
+int8x16_t __arm_vrshlq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u16)))
+uint16x8_t __arm_vrshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u16)))
+uint16x8_t __arm_vrshlq_x(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u32)))
+uint32x4_t __arm_vrshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u32)))
+uint32x4_t __arm_vrshlq_x(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u8)))
+uint8x16_t __arm_vrshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u8)))
+uint8x16_t __arm_vrshlq_x(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s16)))
+int8x16_t __arm_vrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s16)))
+int8x16_t __arm_vrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s32)))
+int16x8_t __arm_vrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s32)))
+int16x8_t __arm_vrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u16)))
+uint8x16_t __arm_vrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u16)))
+uint8x16_t __arm_vrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u32)))
+uint16x8_t __arm_vrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u32)))
+uint16x8_t __arm_vrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s16)))
+int8x16_t __arm_vrshrnbq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s16)))
+int8x16_t __arm_vrshrnbq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s32)))
+int16x8_t __arm_vrshrnbq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s32)))
+int16x8_t __arm_vrshrnbq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u16)))
+uint8x16_t __arm_vrshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u16)))
+uint8x16_t __arm_vrshrnbq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u32)))
+uint16x8_t __arm_vrshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u32)))
+uint16x8_t __arm_vrshrnbq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s16)))
+int8x16_t __arm_vrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s16)))
+int8x16_t __arm_vrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s32)))
+int16x8_t __arm_vrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s32)))
+int16x8_t __arm_vrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u16)))
+uint8x16_t __arm_vrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u16)))
+uint8x16_t __arm_vrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u32)))
+uint16x8_t __arm_vrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u32)))
+uint16x8_t __arm_vrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s16)))
+int8x16_t __arm_vrshrntq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s16)))
+int8x16_t __arm_vrshrntq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s32)))
+int16x8_t __arm_vrshrntq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s32)))
+int16x8_t __arm_vrshrntq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u16)))
+uint8x16_t __arm_vrshrntq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u16)))
+uint8x16_t __arm_vrshrntq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u32)))
+uint16x8_t __arm_vrshrntq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u32)))
+uint16x8_t __arm_vrshrntq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s16)))
+int16x8_t __arm_vrshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s16)))
+int16x8_t __arm_vrshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s32)))
+int32x4_t __arm_vrshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s32)))
+int32x4_t __arm_vrshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s8)))
+int8x16_t __arm_vrshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s8)))
+int8x16_t __arm_vrshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u16)))
+uint16x8_t __arm_vrshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u16)))
+uint16x8_t __arm_vrshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u32)))
+uint32x4_t __arm_vrshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u32)))
+uint32x4_t __arm_vrshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u8)))
+uint8x16_t __arm_vrshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u8)))
+uint8x16_t __arm_vrshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s16)))
+int16x8_t __arm_vrshrq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s16)))
+int16x8_t __arm_vrshrq(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s32)))
+int32x4_t __arm_vrshrq_n_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s32)))
+int32x4_t __arm_vrshrq(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s8)))
+int8x16_t __arm_vrshrq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s8)))
+int8x16_t __arm_vrshrq(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u16)))
+uint16x8_t __arm_vrshrq_n_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u16)))
+uint16x8_t __arm_vrshrq(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u32)))
+uint32x4_t __arm_vrshrq_n_u32(uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u32)))
+uint32x4_t __arm_vrshrq(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u8)))
+uint8x16_t __arm_vrshrq_n_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u8)))
+uint8x16_t __arm_vrshrq(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s16)))
+int16x8_t __arm_vrshrq_x_n_s16(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s16)))
+int16x8_t __arm_vrshrq_x(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s32)))
+int32x4_t __arm_vrshrq_x_n_s32(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s32)))
+int32x4_t __arm_vrshrq_x(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s8)))
+int8x16_t __arm_vrshrq_x_n_s8(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s8)))
+int8x16_t __arm_vrshrq_x(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u16)))
+uint16x8_t __arm_vrshrq_x_n_u16(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u16)))
+uint16x8_t __arm_vrshrq_x(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u32)))
+uint32x4_t __arm_vrshrq_x_n_u32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u32)))
+uint32x4_t __arm_vrshrq_x(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u8)))
+uint8x16_t __arm_vrshrq_x_n_u8(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u8)))
+uint8x16_t __arm_vrshrq_x(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_s32)))
+int32x4_t __arm_vsbciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_s32)))
+int32x4_t __arm_vsbciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_u32)))
+uint32x4_t __arm_vsbciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_u32)))
+uint32x4_t __arm_vsbciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_s32)))
+int32x4_t __arm_vsbciq_s32(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_s32)))
+int32x4_t __arm_vsbciq(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_u32)))
+uint32x4_t __arm_vsbciq_u32(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_u32)))
+uint32x4_t __arm_vsbciq(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_s32)))
+int32x4_t __arm_vsbcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_s32)))
+int32x4_t __arm_vsbcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_u32)))
+uint32x4_t __arm_vsbcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_u32)))
+uint32x4_t __arm_vsbcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_s32)))
+int32x4_t __arm_vsbcq_s32(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_s32)))
+int32x4_t __arm_vsbcq(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_u32)))
+uint32x4_t __arm_vsbcq_u32(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_u32)))
+uint32x4_t __arm_vsbcq(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s16)))
+int16x8_t __arm_vsetq_lane_s16(int16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s16)))
+int16x8_t __arm_vsetq_lane(int16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s32)))
+int32x4_t __arm_vsetq_lane_s32(int32_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s32)))
+int32x4_t __arm_vsetq_lane(int32_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s64)))
+int64x2_t __arm_vsetq_lane_s64(int64_t, int64x2_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s64)))
+int64x2_t __arm_vsetq_lane(int64_t, int64x2_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s8)))
+int8x16_t __arm_vsetq_lane_s8(int8_t, int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s8)))
+int8x16_t __arm_vsetq_lane(int8_t, int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u16)))
+uint16x8_t __arm_vsetq_lane_u16(uint16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u16)))
+uint16x8_t __arm_vsetq_lane(uint16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u32)))
+uint32x4_t __arm_vsetq_lane_u32(uint32_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u32)))
+uint32x4_t __arm_vsetq_lane(uint32_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u64)))
+uint64x2_t __arm_vsetq_lane_u64(uint64_t, uint64x2_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u64)))
+uint64x2_t __arm_vsetq_lane(uint64_t, uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u8)))
+uint8x16_t __arm_vsetq_lane_u8(uint8_t, uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u8)))
+uint8x16_t __arm_vsetq_lane(uint8_t, uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s16)))
+int16x8_t __arm_vshlcq_m_s16(int16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s16)))
+int16x8_t __arm_vshlcq_m(int16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s32)))
+int32x4_t __arm_vshlcq_m_s32(int32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s32)))
+int32x4_t __arm_vshlcq_m(int32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s8)))
+int8x16_t __arm_vshlcq_m_s8(int8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s8)))
+int8x16_t __arm_vshlcq_m(int8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u16)))
+uint16x8_t __arm_vshlcq_m_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u16)))
+uint16x8_t __arm_vshlcq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u32)))
+uint32x4_t __arm_vshlcq_m_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u32)))
+uint32x4_t __arm_vshlcq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u8)))
+uint8x16_t __arm_vshlcq_m_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u8)))
+uint8x16_t __arm_vshlcq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s16)))
+int16x8_t __arm_vshlcq_s16(int16x8_t, uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s16)))
+int16x8_t __arm_vshlcq(int16x8_t, uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s32)))
+int32x4_t __arm_vshlcq_s32(int32x4_t, uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s32)))
+int32x4_t __arm_vshlcq(int32x4_t, uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s8)))
+int8x16_t __arm_vshlcq_s8(int8x16_t, uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s8)))
+int8x16_t __arm_vshlcq(int8x16_t, uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u16)))
+uint16x8_t __arm_vshlcq_u16(uint16x8_t, uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u16)))
+uint16x8_t __arm_vshlcq(uint16x8_t, uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u32)))
+uint32x4_t __arm_vshlcq_u32(uint32x4_t, uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u32)))
+uint32x4_t __arm_vshlcq(uint32x4_t, uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u8)))
+uint8x16_t __arm_vshlcq_u8(uint8x16_t, uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u8)))
+uint8x16_t __arm_vshlcq(uint8x16_t, uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s16)))
+int32x4_t __arm_vshllbq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s16)))
+int32x4_t __arm_vshllbq_m(int32x4_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s8)))
+int16x8_t __arm_vshllbq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s8)))
+int16x8_t __arm_vshllbq_m(int16x8_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u16)))
+uint32x4_t __arm_vshllbq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u16)))
+uint32x4_t __arm_vshllbq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u8)))
+uint16x8_t __arm_vshllbq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u8)))
+uint16x8_t __arm_vshllbq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s16)))
+int32x4_t __arm_vshllbq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s16)))
+int32x4_t __arm_vshllbq(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s8)))
+int16x8_t __arm_vshllbq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s8)))
+int16x8_t __arm_vshllbq(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u16)))
+uint32x4_t __arm_vshllbq_n_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u16)))
+uint32x4_t __arm_vshllbq(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u8)))
+uint16x8_t __arm_vshllbq_n_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u8)))
+uint16x8_t __arm_vshllbq(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s16)))
+int32x4_t __arm_vshllbq_x_n_s16(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s16)))
+int32x4_t __arm_vshllbq_x(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s8)))
+int16x8_t __arm_vshllbq_x_n_s8(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s8)))
+int16x8_t __arm_vshllbq_x(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u16)))
+uint32x4_t __arm_vshllbq_x_n_u16(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u16)))
+uint32x4_t __arm_vshllbq_x(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u8)))
+uint16x8_t __arm_vshllbq_x_n_u8(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u8)))
+uint16x8_t __arm_vshllbq_x(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s16)))
+int32x4_t __arm_vshlltq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s16)))
+int32x4_t __arm_vshlltq_m(int32x4_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s8)))
+int16x8_t __arm_vshlltq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s8)))
+int16x8_t __arm_vshlltq_m(int16x8_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u16)))
+uint32x4_t __arm_vshlltq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u16)))
+uint32x4_t __arm_vshlltq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u8)))
+uint16x8_t __arm_vshlltq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u8)))
+uint16x8_t __arm_vshlltq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s16)))
+int32x4_t __arm_vshlltq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s16)))
+int32x4_t __arm_vshlltq(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s8)))
+int16x8_t __arm_vshlltq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s8)))
+int16x8_t __arm_vshlltq(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u16)))
+uint32x4_t __arm_vshlltq_n_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u16)))
+uint32x4_t __arm_vshlltq(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u8)))
+uint16x8_t __arm_vshlltq_n_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u8)))
+uint16x8_t __arm_vshlltq(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s16)))
+int32x4_t __arm_vshlltq_x_n_s16(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s16)))
+int32x4_t __arm_vshlltq_x(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s8)))
+int16x8_t __arm_vshlltq_x_n_s8(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s8)))
+int16x8_t __arm_vshlltq_x(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u16)))
+uint32x4_t __arm_vshlltq_x_n_u16(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u16)))
+uint32x4_t __arm_vshlltq_x(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u8)))
+uint16x8_t __arm_vshlltq_x_n_u8(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u8)))
+uint16x8_t __arm_vshlltq_x(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s16)))
+int16x8_t __arm_vshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s16)))
+int16x8_t __arm_vshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s32)))
+int32x4_t __arm_vshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s32)))
+int32x4_t __arm_vshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s8)))
+int8x16_t __arm_vshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s8)))
+int8x16_t __arm_vshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u16)))
+uint16x8_t __arm_vshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u16)))
+uint16x8_t __arm_vshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u32)))
+uint32x4_t __arm_vshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u32)))
+uint32x4_t __arm_vshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u8)))
+uint8x16_t __arm_vshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u8)))
+uint8x16_t __arm_vshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s16)))
+int16x8_t __arm_vshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s16)))
+int16x8_t __arm_vshlq_m_r(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s32)))
+int32x4_t __arm_vshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s32)))
+int32x4_t __arm_vshlq_m_r(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s8)))
+int8x16_t __arm_vshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s8)))
+int8x16_t __arm_vshlq_m_r(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u16)))
+uint16x8_t __arm_vshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u16)))
+uint16x8_t __arm_vshlq_m_r(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u32)))
+uint32x4_t __arm_vshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u32)))
+uint32x4_t __arm_vshlq_m_r(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u8)))
+uint8x16_t __arm_vshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u8)))
+uint8x16_t __arm_vshlq_m_r(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s16)))
+int16x8_t __arm_vshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s16)))
+int16x8_t __arm_vshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s32)))
+int32x4_t __arm_vshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s32)))
+int32x4_t __arm_vshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s8)))
+int8x16_t __arm_vshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s8)))
+int8x16_t __arm_vshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u16)))
+uint16x8_t __arm_vshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u16)))
+uint16x8_t __arm_vshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u32)))
+uint32x4_t __arm_vshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u32)))
+uint32x4_t __arm_vshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u8)))
+uint8x16_t __arm_vshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u8)))
+uint8x16_t __arm_vshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s16)))
+int16x8_t __arm_vshlq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s16)))
+int16x8_t __arm_vshlq_n(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s32)))
+int32x4_t __arm_vshlq_n_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s32)))
+int32x4_t __arm_vshlq_n(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s8)))
+int8x16_t __arm_vshlq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s8)))
+int8x16_t __arm_vshlq_n(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u16)))
+uint16x8_t __arm_vshlq_n_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u16)))
+uint16x8_t __arm_vshlq_n(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u32)))
+uint32x4_t __arm_vshlq_n_u32(uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u32)))
+uint32x4_t __arm_vshlq_n(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u8)))
+uint8x16_t __arm_vshlq_n_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u8)))
+uint8x16_t __arm_vshlq_n(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s16)))
+int16x8_t __arm_vshlq_r_s16(int16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s16)))
+int16x8_t __arm_vshlq_r(int16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s32)))
+int32x4_t __arm_vshlq_r_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s32)))
+int32x4_t __arm_vshlq_r(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s8)))
+int8x16_t __arm_vshlq_r_s8(int8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s8)))
+int8x16_t __arm_vshlq_r(int8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u16)))
+uint16x8_t __arm_vshlq_r_u16(uint16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u16)))
+uint16x8_t __arm_vshlq_r(uint16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u32)))
+uint32x4_t __arm_vshlq_r_u32(uint32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u32)))
+uint32x4_t __arm_vshlq_r(uint32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u8)))
+uint8x16_t __arm_vshlq_r_u8(uint8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u8)))
+uint8x16_t __arm_vshlq_r(uint8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s16)))
+int16x8_t __arm_vshlq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s16)))
+int16x8_t __arm_vshlq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s32)))
+int32x4_t __arm_vshlq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s32)))
+int32x4_t __arm_vshlq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s8)))
+int8x16_t __arm_vshlq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s8)))
+int8x16_t __arm_vshlq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u16)))
+uint16x8_t __arm_vshlq_u16(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u16)))
+uint16x8_t __arm_vshlq(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u32)))
+uint32x4_t __arm_vshlq_u32(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u32)))
+uint32x4_t __arm_vshlq(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u8)))
+uint8x16_t __arm_vshlq_u8(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u8)))
+uint8x16_t __arm_vshlq(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s16)))
+int16x8_t __arm_vshlq_x_n_s16(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s16)))
+int16x8_t __arm_vshlq_x_n(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s32)))
+int32x4_t __arm_vshlq_x_n_s32(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s32)))
+int32x4_t __arm_vshlq_x_n(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s8)))
+int8x16_t __arm_vshlq_x_n_s8(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s8)))
+int8x16_t __arm_vshlq_x_n(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u16)))
+uint16x8_t __arm_vshlq_x_n_u16(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u16)))
+uint16x8_t __arm_vshlq_x_n(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u32)))
+uint32x4_t __arm_vshlq_x_n_u32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u32)))
+uint32x4_t __arm_vshlq_x_n(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u8)))
+uint8x16_t __arm_vshlq_x_n_u8(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u8)))
+uint8x16_t __arm_vshlq_x_n(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s16)))
+int16x8_t __arm_vshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s16)))
+int16x8_t __arm_vshlq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s32)))
+int32x4_t __arm_vshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s32)))
+int32x4_t __arm_vshlq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s8)))
+int8x16_t __arm_vshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s8)))
+int8x16_t __arm_vshlq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u16)))
+uint16x8_t __arm_vshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u16)))
+uint16x8_t __arm_vshlq_x(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u32)))
+uint32x4_t __arm_vshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u32)))
+uint32x4_t __arm_vshlq_x(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u8)))
+uint8x16_t __arm_vshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u8)))
+uint8x16_t __arm_vshlq_x(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s16)))
+int8x16_t __arm_vshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s16)))
+int8x16_t __arm_vshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s32)))
+int16x8_t __arm_vshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s32)))
+int16x8_t __arm_vshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u16)))
+uint8x16_t __arm_vshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u16)))
+uint8x16_t __arm_vshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u32)))
+uint16x8_t __arm_vshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u32)))
+uint16x8_t __arm_vshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s16)))
+int8x16_t __arm_vshrnbq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s16)))
+int8x16_t __arm_vshrnbq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s32)))
+int16x8_t __arm_vshrnbq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s32)))
+int16x8_t __arm_vshrnbq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u16)))
+uint8x16_t __arm_vshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u16)))
+uint8x16_t __arm_vshrnbq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u32)))
+uint16x8_t __arm_vshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u32)))
+uint16x8_t __arm_vshrnbq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s16)))
+int8x16_t __arm_vshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s16)))
+int8x16_t __arm_vshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s32)))
+int16x8_t __arm_vshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s32)))
+int16x8_t __arm_vshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u16)))
+uint8x16_t __arm_vshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u16)))
+uint8x16_t __arm_vshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u32)))
+uint16x8_t __arm_vshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u32)))
+uint16x8_t __arm_vshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s16)))
+int8x16_t __arm_vshrntq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s16)))
+int8x16_t __arm_vshrntq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s32)))
+int16x8_t __arm_vshrntq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s32)))
+int16x8_t __arm_vshrntq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u16)))
+uint8x16_t __arm_vshrntq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u16)))
+uint8x16_t __arm_vshrntq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u32)))
+uint16x8_t __arm_vshrntq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u32)))
+uint16x8_t __arm_vshrntq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s16)))
+int16x8_t __arm_vshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s16)))
+int16x8_t __arm_vshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s32)))
+int32x4_t __arm_vshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s32)))
+int32x4_t __arm_vshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s8)))
+int8x16_t __arm_vshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s8)))
+int8x16_t __arm_vshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u16)))
+uint16x8_t __arm_vshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u16)))
+uint16x8_t __arm_vshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u32)))
+uint32x4_t __arm_vshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u32)))
+uint32x4_t __arm_vshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u8)))
+uint8x16_t __arm_vshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u8)))
+uint8x16_t __arm_vshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s16)))
+int16x8_t __arm_vshrq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s16)))
+int16x8_t __arm_vshrq(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s32)))
+int32x4_t __arm_vshrq_n_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s32)))
+int32x4_t __arm_vshrq(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s8)))
+int8x16_t __arm_vshrq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s8)))
+int8x16_t __arm_vshrq(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u16)))
+uint16x8_t __arm_vshrq_n_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u16)))
+uint16x8_t __arm_vshrq(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u32)))
+uint32x4_t __arm_vshrq_n_u32(uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u32)))
+uint32x4_t __arm_vshrq(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u8)))
+uint8x16_t __arm_vshrq_n_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u8)))
+uint8x16_t __arm_vshrq(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s16)))
+int16x8_t __arm_vshrq_x_n_s16(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s16)))
+int16x8_t __arm_vshrq_x(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s32)))
+int32x4_t __arm_vshrq_x_n_s32(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s32)))
+int32x4_t __arm_vshrq_x(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s8)))
+int8x16_t __arm_vshrq_x_n_s8(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s8)))
+int8x16_t __arm_vshrq_x(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u16)))
+uint16x8_t __arm_vshrq_x_n_u16(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u16)))
+uint16x8_t __arm_vshrq_x(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u32)))
+uint32x4_t __arm_vshrq_x_n_u32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u32)))
+uint32x4_t __arm_vshrq_x(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u8)))
+uint8x16_t __arm_vshrq_x_n_u8(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u8)))
+uint8x16_t __arm_vshrq_x(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s16)))
+int16x8_t __arm_vsliq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s16)))
+int16x8_t __arm_vsliq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s32)))
+int32x4_t __arm_vsliq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s32)))
+int32x4_t __arm_vsliq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s8)))
+int8x16_t __arm_vsliq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s8)))
+int8x16_t __arm_vsliq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u16)))
+uint16x8_t __arm_vsliq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u16)))
+uint16x8_t __arm_vsliq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u32)))
+uint32x4_t __arm_vsliq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u32)))
+uint32x4_t __arm_vsliq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u8)))
+uint8x16_t __arm_vsliq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u8)))
+uint8x16_t __arm_vsliq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s16)))
+int16x8_t __arm_vsliq_n_s16(int16x8_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s16)))
+int16x8_t __arm_vsliq(int16x8_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s32)))
+int32x4_t __arm_vsliq_n_s32(int32x4_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s32)))
+int32x4_t __arm_vsliq(int32x4_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s8)))
+int8x16_t __arm_vsliq_n_s8(int8x16_t, int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s8)))
+int8x16_t __arm_vsliq(int8x16_t, int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u16)))
+uint16x8_t __arm_vsliq_n_u16(uint16x8_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u16)))
+uint16x8_t __arm_vsliq(uint16x8_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u32)))
+uint32x4_t __arm_vsliq_n_u32(uint32x4_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u32)))
+uint32x4_t __arm_vsliq(uint32x4_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u8)))
+uint8x16_t __arm_vsliq_n_u8(uint8x16_t, uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u8)))
+uint8x16_t __arm_vsliq(uint8x16_t, uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s16)))
+int16x8_t __arm_vsriq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s16)))
+int16x8_t __arm_vsriq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s32)))
+int32x4_t __arm_vsriq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s32)))
+int32x4_t __arm_vsriq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s8)))
+int8x16_t __arm_vsriq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s8)))
+int8x16_t __arm_vsriq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u16)))
+uint16x8_t __arm_vsriq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u16)))
+uint16x8_t __arm_vsriq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u32)))
+uint32x4_t __arm_vsriq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u32)))
+uint32x4_t __arm_vsriq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u8)))
+uint8x16_t __arm_vsriq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u8)))
+uint8x16_t __arm_vsriq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s16)))
+int16x8_t __arm_vsriq_n_s16(int16x8_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s16)))
+int16x8_t __arm_vsriq(int16x8_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s32)))
+int32x4_t __arm_vsriq_n_s32(int32x4_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s32)))
+int32x4_t __arm_vsriq(int32x4_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s8)))
+int8x16_t __arm_vsriq_n_s8(int8x16_t, int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s8)))
+int8x16_t __arm_vsriq(int8x16_t, int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u16)))
+uint16x8_t __arm_vsriq_n_u16(uint16x8_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u16)))
+uint16x8_t __arm_vsriq(uint16x8_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u32)))
+uint32x4_t __arm_vsriq_n_u32(uint32x4_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u32)))
+uint32x4_t __arm_vsriq(uint32x4_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u8)))
+uint8x16_t __arm_vsriq_n_u8(uint8x16_t, uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u8)))
+uint8x16_t __arm_vsriq(uint8x16_t, uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s16)))
+void __arm_vst1q_p_s16(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s16)))
+void __arm_vst1q_p(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s32)))
+void __arm_vst1q_p_s32(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s32)))
+void __arm_vst1q_p(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s8)))
+void __arm_vst1q_p_s8(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s8)))
+void __arm_vst1q_p(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u16)))
+void __arm_vst1q_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u16)))
+void __arm_vst1q_p(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u32)))
+void __arm_vst1q_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u32)))
+void __arm_vst1q_p(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u8)))
+void __arm_vst1q_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u8)))
+void __arm_vst1q_p(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s16)))
+void __arm_vst1q_s16(int16_t *, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s16)))
+void __arm_vst1q(int16_t *, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s32)))
+void __arm_vst1q_s32(int32_t *, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s32)))
+void __arm_vst1q(int32_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s8)))
+void __arm_vst1q_s8(int8_t *, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s8)))
+void __arm_vst1q(int8_t *, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u16)))
+void __arm_vst1q_u16(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u16)))
+void __arm_vst1q(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u32)))
+void __arm_vst1q_u32(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u32)))
+void __arm_vst1q(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u8)))
+void __arm_vst1q_u8(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u8)))
+void __arm_vst1q(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s16)))
+void __arm_vst2q_s16(int16_t *, int16x8x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s16)))
+void __arm_vst2q(int16_t *, int16x8x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s32)))
+void __arm_vst2q_s32(int32_t *, int32x4x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s32)))
+void __arm_vst2q(int32_t *, int32x4x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s8)))
+void __arm_vst2q_s8(int8_t *, int8x16x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s8)))
+void __arm_vst2q(int8_t *, int8x16x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u16)))
+void __arm_vst2q_u16(uint16_t *, uint16x8x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u16)))
+void __arm_vst2q(uint16_t *, uint16x8x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u32)))
+void __arm_vst2q_u32(uint32_t *, uint32x4x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u32)))
+void __arm_vst2q(uint32_t *, uint32x4x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u8)))
+void __arm_vst2q_u8(uint8_t *, uint8x16x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u8)))
+void __arm_vst2q(uint8_t *, uint8x16x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s16)))
+void __arm_vst4q_s16(int16_t *, int16x8x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s16)))
+void __arm_vst4q(int16_t *, int16x8x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s32)))
+void __arm_vst4q_s32(int32_t *, int32x4x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s32)))
+void __arm_vst4q(int32_t *, int32x4x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s8)))
+void __arm_vst4q_s8(int8_t *, int8x16x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s8)))
+void __arm_vst4q(int8_t *, int8x16x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u16)))
+void __arm_vst4q_u16(uint16_t *, uint16x8x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u16)))
+void __arm_vst4q(uint16_t *, uint16x8x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u32)))
+void __arm_vst4q_u32(uint32_t *, uint32x4x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u32)))
+void __arm_vst4q(uint32_t *, uint32x4x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u8)))
+void __arm_vst4q_u8(uint8_t *, uint8x16x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u8)))
+void __arm_vst4q(uint8_t *, uint8x16x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s16)))
+void __arm_vstrbq_p_s16(int8_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s16)))
+void __arm_vstrbq_p(int8_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s32)))
+void __arm_vstrbq_p_s32(int8_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s32)))
+void __arm_vstrbq_p(int8_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s8)))
+void __arm_vstrbq_p_s8(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s8)))
+void __arm_vstrbq_p(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u16)))
+void __arm_vstrbq_p_u16(uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u16)))
+void __arm_vstrbq_p(uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u32)))
+void __arm_vstrbq_p_u32(uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u32)))
+void __arm_vstrbq_p(uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u8)))
+void __arm_vstrbq_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u8)))
+void __arm_vstrbq_p(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s16)))
+void __arm_vstrbq_s16(int8_t *, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s16)))
+void __arm_vstrbq(int8_t *, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s32)))
+void __arm_vstrbq_s32(int8_t *, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s32)))
+void __arm_vstrbq(int8_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s8)))
+void __arm_vstrbq_s8(int8_t *, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s8)))
+void __arm_vstrbq(int8_t *, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))
+void __arm_vstrbq_scatter_offset_p_s16(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))
+void __arm_vstrbq_scatter_offset_p(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))
+void __arm_vstrbq_scatter_offset_p_s32(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))
+void __arm_vstrbq_scatter_offset_p(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))
+void __arm_vstrbq_scatter_offset_p_s8(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))
+void __arm_vstrbq_scatter_offset_p(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))
+void __arm_vstrbq_scatter_offset_p_u16(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))
+void __arm_vstrbq_scatter_offset_p(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))
+void __arm_vstrbq_scatter_offset_p_u32(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))
+void __arm_vstrbq_scatter_offset_p(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))
+void __arm_vstrbq_scatter_offset_p_u8(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))
+void __arm_vstrbq_scatter_offset_p(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))
+void __arm_vstrbq_scatter_offset_s16(int8_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))
+void __arm_vstrbq_scatter_offset(int8_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))
+void __arm_vstrbq_scatter_offset_s32(int8_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))
+void __arm_vstrbq_scatter_offset(int8_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))
+void __arm_vstrbq_scatter_offset_s8(int8_t *, uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))
+void __arm_vstrbq_scatter_offset(int8_t *, uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))
+void __arm_vstrbq_scatter_offset_u16(uint8_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))
+void __arm_vstrbq_scatter_offset(uint8_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))
+void __arm_vstrbq_scatter_offset_u32(uint8_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))
+void __arm_vstrbq_scatter_offset(uint8_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))
+void __arm_vstrbq_scatter_offset_u8(uint8_t *, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))
+void __arm_vstrbq_scatter_offset(uint8_t *, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u16)))
+void __arm_vstrbq_u16(uint8_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u16)))
+void __arm_vstrbq(uint8_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u32)))
+void __arm_vstrbq_u32(uint8_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u32)))
+void __arm_vstrbq(uint8_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u8)))
+void __arm_vstrbq_u8(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u8)))
+void __arm_vstrbq(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))
+void __arm_vstrdq_scatter_base_p_s64(uint64x2_t, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))
+void __arm_vstrdq_scatter_base_p(uint64x2_t, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))
+void __arm_vstrdq_scatter_base_p_u64(uint64x2_t, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))
+void __arm_vstrdq_scatter_base_p(uint64x2_t, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))
+void __arm_vstrdq_scatter_base_s64(uint64x2_t, int, int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))
+void __arm_vstrdq_scatter_base(uint64x2_t, int, int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))
+void __arm_vstrdq_scatter_base_u64(uint64x2_t, int, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))
+void __arm_vstrdq_scatter_base(uint64x2_t, int, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))
+void __arm_vstrdq_scatter_base_wb_p_s64(uint64x2_t *, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))
+void __arm_vstrdq_scatter_base_wb_p(uint64x2_t *, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))
+void __arm_vstrdq_scatter_base_wb_p_u64(uint64x2_t *, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))
+void __arm_vstrdq_scatter_base_wb_p(uint64x2_t *, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))
+void __arm_vstrdq_scatter_base_wb_s64(uint64x2_t *, int, int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))
+void __arm_vstrdq_scatter_base_wb(uint64x2_t *, int, int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))
+void __arm_vstrdq_scatter_base_wb_u64(uint64x2_t *, int, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))
+void __arm_vstrdq_scatter_base_wb(uint64x2_t *, int, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))
+void __arm_vstrdq_scatter_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))
+void __arm_vstrdq_scatter_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))
+void __arm_vstrdq_scatter_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))
+void __arm_vstrdq_scatter_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))
+void __arm_vstrdq_scatter_offset_s64(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))
+void __arm_vstrdq_scatter_offset(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))
+void __arm_vstrdq_scatter_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))
+void __arm_vstrdq_scatter_offset(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))
+void __arm_vstrdq_scatter_shifted_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))
+void __arm_vstrdq_scatter_shifted_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))
+void __arm_vstrdq_scatter_shifted_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))
+void __arm_vstrdq_scatter_shifted_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))
+void __arm_vstrdq_scatter_shifted_offset_s64(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))
+void __arm_vstrdq_scatter_shifted_offset(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))
+void __arm_vstrdq_scatter_shifted_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))
+void __arm_vstrdq_scatter_shifted_offset(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s16)))
+void __arm_vstrhq_p_s16(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s16)))
+void __arm_vstrhq_p(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s32)))
+void __arm_vstrhq_p_s32(int16_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s32)))
+void __arm_vstrhq_p(int16_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u16)))
+void __arm_vstrhq_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u16)))
+void __arm_vstrhq_p(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u32)))
+void __arm_vstrhq_p_u32(uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u32)))
+void __arm_vstrhq_p(uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s16)))
+void __arm_vstrhq_s16(int16_t *, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s16)))
+void __arm_vstrhq(int16_t *, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s32)))
+void __arm_vstrhq_s32(int16_t *, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s32)))
+void __arm_vstrhq(int16_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))
+void __arm_vstrhq_scatter_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))
+void __arm_vstrhq_scatter_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))
+void __arm_vstrhq_scatter_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))
+void __arm_vstrhq_scatter_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))
+void __arm_vstrhq_scatter_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))
+void __arm_vstrhq_scatter_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))
+void __arm_vstrhq_scatter_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))
+void __arm_vstrhq_scatter_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))
+void __arm_vstrhq_scatter_offset_s16(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))
+void __arm_vstrhq_scatter_offset(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))
+void __arm_vstrhq_scatter_offset_s32(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))
+void __arm_vstrhq_scatter_offset(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))
+void __arm_vstrhq_scatter_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))
+void __arm_vstrhq_scatter_offset(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))
+void __arm_vstrhq_scatter_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))
+void __arm_vstrhq_scatter_offset(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))
+void __arm_vstrhq_scatter_shifted_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))
+void __arm_vstrhq_scatter_shifted_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))
+void __arm_vstrhq_scatter_shifted_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))
+void __arm_vstrhq_scatter_shifted_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))
+void __arm_vstrhq_scatter_shifted_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))
+void __arm_vstrhq_scatter_shifted_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))
+void __arm_vstrhq_scatter_shifted_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))
+void __arm_vstrhq_scatter_shifted_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))
+void __arm_vstrhq_scatter_shifted_offset_s16(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))
+void __arm_vstrhq_scatter_shifted_offset(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))
+void __arm_vstrhq_scatter_shifted_offset_s32(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))
+void __arm_vstrhq_scatter_shifted_offset(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))
+void __arm_vstrhq_scatter_shifted_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))
+void __arm_vstrhq_scatter_shifted_offset(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))
+void __arm_vstrhq_scatter_shifted_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))
+void __arm_vstrhq_scatter_shifted_offset(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u16)))
+void __arm_vstrhq_u16(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u16)))
+void __arm_vstrhq(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u32)))
+void __arm_vstrhq_u32(uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u32)))
+void __arm_vstrhq(uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_s32)))
+void __arm_vstrwq_p_s32(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_s32)))
+void __arm_vstrwq_p(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_u32)))
+void __arm_vstrwq_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_u32)))
+void __arm_vstrwq_p(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_s32)))
+void __arm_vstrwq_s32(int32_t *, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_s32)))
+void __arm_vstrwq(int32_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))
+void __arm_vstrwq_scatter_base_p_s32(uint32x4_t, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))
+void __arm_vstrwq_scatter_base_p(uint32x4_t, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))
+void __arm_vstrwq_scatter_base_p_u32(uint32x4_t, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))
+void __arm_vstrwq_scatter_base_p(uint32x4_t, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))
+void __arm_vstrwq_scatter_base_s32(uint32x4_t, int, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))
+void __arm_vstrwq_scatter_base(uint32x4_t, int, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))
+void __arm_vstrwq_scatter_base_u32(uint32x4_t, int, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))
+void __arm_vstrwq_scatter_base(uint32x4_t, int, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))
+void __arm_vstrwq_scatter_base_wb_p_s32(uint32x4_t *, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))
+void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))
+void __arm_vstrwq_scatter_base_wb_p_u32(uint32x4_t *, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))
+void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))
+void __arm_vstrwq_scatter_base_wb_s32(uint32x4_t *, int, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))
+void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))
+void __arm_vstrwq_scatter_base_wb_u32(uint32x4_t *, int, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))
+void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))
+void __arm_vstrwq_scatter_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))
+void __arm_vstrwq_scatter_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))
+void __arm_vstrwq_scatter_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))
+void __arm_vstrwq_scatter_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))
+void __arm_vstrwq_scatter_offset_s32(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))
+void __arm_vstrwq_scatter_offset(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))
+void __arm_vstrwq_scatter_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))
+void __arm_vstrwq_scatter_offset(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))
+void __arm_vstrwq_scatter_shifted_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))
+void __arm_vstrwq_scatter_shifted_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))
+void __arm_vstrwq_scatter_shifted_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))
+void __arm_vstrwq_scatter_shifted_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))
+void __arm_vstrwq_scatter_shifted_offset_s32(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))
+void __arm_vstrwq_scatter_shifted_offset(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))
+void __arm_vstrwq_scatter_shifted_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))
+void __arm_vstrwq_scatter_shifted_offset(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_u32)))
+void __arm_vstrwq_u32(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_u32)))
+void __arm_vstrwq(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s16)))
+int16x8_t __arm_vsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s16)))
+int16x8_t __arm_vsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s32)))
+int32x4_t __arm_vsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s32)))
+int32x4_t __arm_vsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s8)))
+int8x16_t __arm_vsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s8)))
+int8x16_t __arm_vsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u16)))
+uint16x8_t __arm_vsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u16)))
+uint16x8_t __arm_vsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u32)))
+uint32x4_t __arm_vsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u32)))
+uint32x4_t __arm_vsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u8)))
+uint8x16_t __arm_vsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u8)))
+uint8x16_t __arm_vsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s16)))
+int16x8_t __arm_vsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s16)))
+int16x8_t __arm_vsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s32)))
+int32x4_t __arm_vsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s32)))
+int32x4_t __arm_vsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s8)))
+int8x16_t __arm_vsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s8)))
+int8x16_t __arm_vsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u16)))
+uint16x8_t __arm_vsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u16)))
+uint16x8_t __arm_vsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u32)))
+uint32x4_t __arm_vsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u32)))
+uint32x4_t __arm_vsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u8)))
+uint8x16_t __arm_vsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u8)))
+uint8x16_t __arm_vsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s16)))
+int16x8_t __arm_vsubq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s16)))
+int16x8_t __arm_vsubq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s32)))
+int32x4_t __arm_vsubq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s32)))
+int32x4_t __arm_vsubq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s8)))
+int8x16_t __arm_vsubq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s8)))
+int8x16_t __arm_vsubq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u16)))
+uint16x8_t __arm_vsubq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u16)))
+uint16x8_t __arm_vsubq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u32)))
+uint32x4_t __arm_vsubq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u32)))
+uint32x4_t __arm_vsubq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u8)))
+uint8x16_t __arm_vsubq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u8)))
+uint8x16_t __arm_vsubq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s16)))
+int16x8_t __arm_vsubq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s16)))
+int16x8_t __arm_vsubq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s32)))
+int32x4_t __arm_vsubq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s32)))
+int32x4_t __arm_vsubq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s8)))
+int8x16_t __arm_vsubq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s8)))
+int8x16_t __arm_vsubq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u16)))
+uint16x8_t __arm_vsubq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u16)))
+uint16x8_t __arm_vsubq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u32)))
+uint32x4_t __arm_vsubq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u32)))
+uint32x4_t __arm_vsubq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u8)))
+uint8x16_t __arm_vsubq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u8)))
+uint8x16_t __arm_vsubq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s16)))
+int16x8_t __arm_vsubq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s16)))
+int16x8_t __arm_vsubq_x(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s32)))
+int32x4_t __arm_vsubq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s32)))
+int32x4_t __arm_vsubq_x(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s8)))
+int8x16_t __arm_vsubq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s8)))
+int8x16_t __arm_vsubq_x(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u16)))
+uint16x8_t __arm_vsubq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u16)))
+uint16x8_t __arm_vsubq_x(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u32)))
+uint32x4_t __arm_vsubq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u32)))
+uint32x4_t __arm_vsubq_x(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u8)))
+uint8x16_t __arm_vsubq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u8)))
+uint8x16_t __arm_vsubq_x(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s16)))
+int16x8_t __arm_vsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s16)))
+int16x8_t __arm_vsubq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s32)))
+int32x4_t __arm_vsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s32)))
+int32x4_t __arm_vsubq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s8)))
+int8x16_t __arm_vsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s8)))
+int8x16_t __arm_vsubq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u16)))
+uint16x8_t __arm_vsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u16)))
+uint16x8_t __arm_vsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u32)))
+uint32x4_t __arm_vsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u32)))
+uint32x4_t __arm_vsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u8)))
+uint8x16_t __arm_vsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u8)))
+uint8x16_t __arm_vsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s16)))
+int16x8_t __arm_vuninitializedq(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s32)))
+int32x4_t __arm_vuninitializedq(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s64)))
+int64x2_t __arm_vuninitializedq(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s8)))
+int8x16_t __arm_vuninitializedq(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u16)))
+uint16x8_t __arm_vuninitializedq(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u32)))
+uint32x4_t __arm_vuninitializedq(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u64)))
+uint64x2_t __arm_vuninitializedq(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u8)))
+uint8x16_t __arm_vuninitializedq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s16)))
+int16x8_t __arm_vuninitializedq_s16();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s32)))
+int32x4_t __arm_vuninitializedq_s32();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s64)))
+int64x2_t __arm_vuninitializedq_s64();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s8)))
+int8x16_t __arm_vuninitializedq_s8();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u16)))
+uint16x8_t __arm_vuninitializedq_u16();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u32)))
+uint32x4_t __arm_vuninitializedq_u32();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u64)))
+uint64x2_t __arm_vuninitializedq_u64();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u8)))
+uint8x16_t __arm_vuninitializedq_u8();
+
+#if (__ARM_FEATURE_MVE & 2)
+
+typedef __fp16 float16_t;
+typedef float float32_t;
+typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) float16_t float16x8_t;
+typedef struct { float16x8_t val[2]; } float16x8x2_t;
+typedef struct { float16x8_t val[4]; } float16x8x4_t;
+typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) float32_t float32x4_t;
+typedef struct { float32x4_t val[2]; } float32x4x2_t;
+typedef struct { float32x4_t val[4]; } float32x4x4_t;
+
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f16)))
+float16x8_t __arm_vabdq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f16)))
+float16x8_t __arm_vabdq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f32)))
+float32x4_t __arm_vabdq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f32)))
+float32x4_t __arm_vabdq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f16)))
+float16x8_t __arm_vabdq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f16)))
+float16x8_t __arm_vabdq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f32)))
+float32x4_t __arm_vabdq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f32)))
+float32x4_t __arm_vabdq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f16)))
+float16x8_t __arm_vabdq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f16)))
+float16x8_t __arm_vabdq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f32)))
+float32x4_t __arm_vabdq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f32)))
+float32x4_t __arm_vabdq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f16)))
+float16x8_t __arm_vabsq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f16)))
+float16x8_t __arm_vabsq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f32)))
+float32x4_t __arm_vabsq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f32)))
+float32x4_t __arm_vabsq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f16)))
+float16x8_t __arm_vabsq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f16)))
+float16x8_t __arm_vabsq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f32)))
+float32x4_t __arm_vabsq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f32)))
+float32x4_t __arm_vabsq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f16)))
+float16x8_t __arm_vabsq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f16)))
+float16x8_t __arm_vabsq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f32)))
+float32x4_t __arm_vabsq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f32)))
+float32x4_t __arm_vabsq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f16)))
+float16x8_t __arm_vaddq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f16)))
+float16x8_t __arm_vaddq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f32)))
+float32x4_t __arm_vaddq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f32)))
+float32x4_t __arm_vaddq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f16)))
+float16x8_t __arm_vaddq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f16)))
+float16x8_t __arm_vaddq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f32)))
+float32x4_t __arm_vaddq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f32)))
+float32x4_t __arm_vaddq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f16)))
+float16x8_t __arm_vaddq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f16)))
+float16x8_t __arm_vaddq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f32)))
+float32x4_t __arm_vaddq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f32)))
+float32x4_t __arm_vaddq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f16)))
+float16x8_t __arm_vaddq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f16)))
+float16x8_t __arm_vaddq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f32)))
+float32x4_t __arm_vaddq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f32)))
+float32x4_t __arm_vaddq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f16)))
+float16x8_t __arm_vaddq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f16)))
+float16x8_t __arm_vaddq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f32)))
+float32x4_t __arm_vaddq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f32)))
+float32x4_t __arm_vaddq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f16)))
+float16x8_t __arm_vaddq_x_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f16)))
+float16x8_t __arm_vaddq_x(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f32)))
+float32x4_t __arm_vaddq_x_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f32)))
+float32x4_t __arm_vaddq_x(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_f16)))
+float16x8_t __arm_vandq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_f16)))
+float16x8_t __arm_vandq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_f32)))
+float32x4_t __arm_vandq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_f32)))
+float32x4_t __arm_vandq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f16)))
+float16x8_t __arm_vandq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f16)))
+float16x8_t __arm_vandq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f32)))
+float32x4_t __arm_vandq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f32)))
+float32x4_t __arm_vandq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f16)))
+float16x8_t __arm_vandq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f16)))
+float16x8_t __arm_vandq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f32)))
+float32x4_t __arm_vandq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f32)))
+float32x4_t __arm_vandq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f16)))
+float16x8_t __arm_vbicq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f16)))
+float16x8_t __arm_vbicq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f32)))
+float32x4_t __arm_vbicq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f32)))
+float32x4_t __arm_vbicq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f16)))
+float16x8_t __arm_vbicq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f16)))
+float16x8_t __arm_vbicq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f32)))
+float32x4_t __arm_vbicq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f32)))
+float32x4_t __arm_vbicq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f16)))
+float16x8_t __arm_vbicq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f16)))
+float16x8_t __arm_vbicq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f32)))
+float32x4_t __arm_vbicq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f32)))
+float32x4_t __arm_vbicq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f16)))
+float16x8_t __arm_vbrsrq_m_n_f16(float16x8_t, float16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f16)))
+float16x8_t __arm_vbrsrq_m(float16x8_t, float16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f32)))
+float32x4_t __arm_vbrsrq_m_n_f32(float32x4_t, float32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f32)))
+float32x4_t __arm_vbrsrq_m(float32x4_t, float32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f16)))
+float16x8_t __arm_vbrsrq_n_f16(float16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f16)))
+float16x8_t __arm_vbrsrq(float16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f32)))
+float32x4_t __arm_vbrsrq_n_f32(float32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f32)))
+float32x4_t __arm_vbrsrq(float32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f16)))
+float16x8_t __arm_vbrsrq_x_n_f16(float16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f16)))
+float16x8_t __arm_vbrsrq_x(float16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f32)))
+float32x4_t __arm_vbrsrq_x_n_f32(float32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f32)))
+float32x4_t __arm_vbrsrq_x(float32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f16)))
+float16x8_t __arm_vcaddq_rot270_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f16)))
+float16x8_t __arm_vcaddq_rot270(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f32)))
+float32x4_t __arm_vcaddq_rot270_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f32)))
+float32x4_t __arm_vcaddq_rot270(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f16)))
+float16x8_t __arm_vcaddq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f16)))
+float16x8_t __arm_vcaddq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f32)))
+float32x4_t __arm_vcaddq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f32)))
+float32x4_t __arm_vcaddq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f16)))
+float16x8_t __arm_vcaddq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f16)))
+float16x8_t __arm_vcaddq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f32)))
+float32x4_t __arm_vcaddq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f32)))
+float32x4_t __arm_vcaddq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f16)))
+float16x8_t __arm_vcaddq_rot90_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f16)))
+float16x8_t __arm_vcaddq_rot90(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f32)))
+float32x4_t __arm_vcaddq_rot90_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f32)))
+float32x4_t __arm_vcaddq_rot90(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f16)))
+float16x8_t __arm_vcaddq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f16)))
+float16x8_t __arm_vcaddq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f32)))
+float32x4_t __arm_vcaddq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f32)))
+float32x4_t __arm_vcaddq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f16)))
+float16x8_t __arm_vcaddq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f16)))
+float16x8_t __arm_vcaddq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f32)))
+float32x4_t __arm_vcaddq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f32)))
+float32x4_t __arm_vcaddq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f16)))
+float16x8_t __arm_vcmlaq_f16(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f16)))
+float16x8_t __arm_vcmlaq(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f32)))
+float32x4_t __arm_vcmlaq_f32(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f32)))
+float32x4_t __arm_vcmlaq(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f16)))
+float16x8_t __arm_vcmlaq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f16)))
+float16x8_t __arm_vcmlaq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f32)))
+float32x4_t __arm_vcmlaq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f32)))
+float32x4_t __arm_vcmlaq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f16)))
+float16x8_t __arm_vcmlaq_rot180_f16(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f16)))
+float16x8_t __arm_vcmlaq_rot180(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f32)))
+float32x4_t __arm_vcmlaq_rot180_f32(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f32)))
+float32x4_t __arm_vcmlaq_rot180(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16)))
+float16x8_t __arm_vcmlaq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16)))
+float16x8_t __arm_vcmlaq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32)))
+float32x4_t __arm_vcmlaq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32)))
+float32x4_t __arm_vcmlaq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f16)))
+float16x8_t __arm_vcmlaq_rot270_f16(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f16)))
+float16x8_t __arm_vcmlaq_rot270(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f32)))
+float32x4_t __arm_vcmlaq_rot270_f32(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f32)))
+float32x4_t __arm_vcmlaq_rot270(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16)))
+float16x8_t __arm_vcmlaq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16)))
+float16x8_t __arm_vcmlaq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32)))
+float32x4_t __arm_vcmlaq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32)))
+float32x4_t __arm_vcmlaq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f16)))
+float16x8_t __arm_vcmlaq_rot90_f16(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f16)))
+float16x8_t __arm_vcmlaq_rot90(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f32)))
+float32x4_t __arm_vcmlaq_rot90_f32(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f32)))
+float32x4_t __arm_vcmlaq_rot90(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16)))
+float16x8_t __arm_vcmlaq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16)))
+float16x8_t __arm_vcmlaq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32)))
+float32x4_t __arm_vcmlaq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32)))
+float32x4_t __arm_vcmlaq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f16)))
+mve_pred16_t __arm_vcmpeqq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f16)))
+mve_pred16_t __arm_vcmpeqq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f32)))
+mve_pred16_t __arm_vcmpeqq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f32)))
+mve_pred16_t __arm_vcmpeqq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f16)))
+mve_pred16_t __arm_vcmpeqq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f16)))
+mve_pred16_t __arm_vcmpeqq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f32)))
+mve_pred16_t __arm_vcmpeqq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f32)))
+mve_pred16_t __arm_vcmpeqq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))
+mve_pred16_t __arm_vcmpeqq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))
+mve_pred16_t __arm_vcmpeqq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))
+mve_pred16_t __arm_vcmpeqq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))
+mve_pred16_t __arm_vcmpeqq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f16)))
+mve_pred16_t __arm_vcmpeqq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f16)))
+mve_pred16_t __arm_vcmpeqq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f32)))
+mve_pred16_t __arm_vcmpeqq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f32)))
+mve_pred16_t __arm_vcmpeqq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f16)))
+mve_pred16_t __arm_vcmpgeq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f16)))
+mve_pred16_t __arm_vcmpgeq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f32)))
+mve_pred16_t __arm_vcmpgeq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f32)))
+mve_pred16_t __arm_vcmpgeq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f16)))
+mve_pred16_t __arm_vcmpgeq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f16)))
+mve_pred16_t __arm_vcmpgeq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f32)))
+mve_pred16_t __arm_vcmpgeq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f32)))
+mve_pred16_t __arm_vcmpgeq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))
+mve_pred16_t __arm_vcmpgeq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))
+mve_pred16_t __arm_vcmpgeq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))
+mve_pred16_t __arm_vcmpgeq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))
+mve_pred16_t __arm_vcmpgeq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f16)))
+mve_pred16_t __arm_vcmpgeq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f16)))
+mve_pred16_t __arm_vcmpgeq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f32)))
+mve_pred16_t __arm_vcmpgeq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f32)))
+mve_pred16_t __arm_vcmpgeq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f16)))
+mve_pred16_t __arm_vcmpgtq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f16)))
+mve_pred16_t __arm_vcmpgtq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f32)))
+mve_pred16_t __arm_vcmpgtq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f32)))
+mve_pred16_t __arm_vcmpgtq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f16)))
+mve_pred16_t __arm_vcmpgtq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f16)))
+mve_pred16_t __arm_vcmpgtq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f32)))
+mve_pred16_t __arm_vcmpgtq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f32)))
+mve_pred16_t __arm_vcmpgtq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))
+mve_pred16_t __arm_vcmpgtq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))
+mve_pred16_t __arm_vcmpgtq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))
+mve_pred16_t __arm_vcmpgtq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))
+mve_pred16_t __arm_vcmpgtq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f16)))
+mve_pred16_t __arm_vcmpgtq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f16)))
+mve_pred16_t __arm_vcmpgtq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f32)))
+mve_pred16_t __arm_vcmpgtq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f32)))
+mve_pred16_t __arm_vcmpgtq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f16)))
+mve_pred16_t __arm_vcmpleq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f16)))
+mve_pred16_t __arm_vcmpleq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f32)))
+mve_pred16_t __arm_vcmpleq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f32)))
+mve_pred16_t __arm_vcmpleq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f16)))
+mve_pred16_t __arm_vcmpleq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f16)))
+mve_pred16_t __arm_vcmpleq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f32)))
+mve_pred16_t __arm_vcmpleq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f32)))
+mve_pred16_t __arm_vcmpleq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))
+mve_pred16_t __arm_vcmpleq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))
+mve_pred16_t __arm_vcmpleq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))
+mve_pred16_t __arm_vcmpleq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))
+mve_pred16_t __arm_vcmpleq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f16)))
+mve_pred16_t __arm_vcmpleq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f16)))
+mve_pred16_t __arm_vcmpleq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f32)))
+mve_pred16_t __arm_vcmpleq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f32)))
+mve_pred16_t __arm_vcmpleq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f16)))
+mve_pred16_t __arm_vcmpltq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f16)))
+mve_pred16_t __arm_vcmpltq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f32)))
+mve_pred16_t __arm_vcmpltq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f32)))
+mve_pred16_t __arm_vcmpltq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f16)))
+mve_pred16_t __arm_vcmpltq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f16)))
+mve_pred16_t __arm_vcmpltq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f32)))
+mve_pred16_t __arm_vcmpltq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f32)))
+mve_pred16_t __arm_vcmpltq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))
+mve_pred16_t __arm_vcmpltq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))
+mve_pred16_t __arm_vcmpltq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))
+mve_pred16_t __arm_vcmpltq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))
+mve_pred16_t __arm_vcmpltq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f16)))
+mve_pred16_t __arm_vcmpltq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f16)))
+mve_pred16_t __arm_vcmpltq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f32)))
+mve_pred16_t __arm_vcmpltq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f32)))
+mve_pred16_t __arm_vcmpltq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f16)))
+mve_pred16_t __arm_vcmpneq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f16)))
+mve_pred16_t __arm_vcmpneq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f32)))
+mve_pred16_t __arm_vcmpneq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f32)))
+mve_pred16_t __arm_vcmpneq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f16)))
+mve_pred16_t __arm_vcmpneq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f16)))
+mve_pred16_t __arm_vcmpneq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f32)))
+mve_pred16_t __arm_vcmpneq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f32)))
+mve_pred16_t __arm_vcmpneq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))
+mve_pred16_t __arm_vcmpneq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))
+mve_pred16_t __arm_vcmpneq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))
+mve_pred16_t __arm_vcmpneq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))
+mve_pred16_t __arm_vcmpneq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f16)))
+mve_pred16_t __arm_vcmpneq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f16)))
+mve_pred16_t __arm_vcmpneq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f32)))
+mve_pred16_t __arm_vcmpneq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f32)))
+mve_pred16_t __arm_vcmpneq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f16)))
+float16x8_t __arm_vcmulq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f16)))
+float16x8_t __arm_vcmulq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f32)))
+float32x4_t __arm_vcmulq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f32)))
+float32x4_t __arm_vcmulq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f16)))
+float16x8_t __arm_vcmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f16)))
+float16x8_t __arm_vcmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f32)))
+float32x4_t __arm_vcmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f32)))
+float32x4_t __arm_vcmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f16)))
+float16x8_t __arm_vcmulq_rot180_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f16)))
+float16x8_t __arm_vcmulq_rot180(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f32)))
+float32x4_t __arm_vcmulq_rot180_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f32)))
+float32x4_t __arm_vcmulq_rot180(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f16)))
+float16x8_t __arm_vcmulq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f16)))
+float16x8_t __arm_vcmulq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f32)))
+float32x4_t __arm_vcmulq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f32)))
+float32x4_t __arm_vcmulq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f16)))
+float16x8_t __arm_vcmulq_rot180_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f16)))
+float16x8_t __arm_vcmulq_rot180_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f32)))
+float32x4_t __arm_vcmulq_rot180_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f32)))
+float32x4_t __arm_vcmulq_rot180_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f16)))
+float16x8_t __arm_vcmulq_rot270_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f16)))
+float16x8_t __arm_vcmulq_rot270(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f32)))
+float32x4_t __arm_vcmulq_rot270_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f32)))
+float32x4_t __arm_vcmulq_rot270(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f16)))
+float16x8_t __arm_vcmulq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f16)))
+float16x8_t __arm_vcmulq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f32)))
+float32x4_t __arm_vcmulq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f32)))
+float32x4_t __arm_vcmulq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f16)))
+float16x8_t __arm_vcmulq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f16)))
+float16x8_t __arm_vcmulq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f32)))
+float32x4_t __arm_vcmulq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f32)))
+float32x4_t __arm_vcmulq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f16)))
+float16x8_t __arm_vcmulq_rot90_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f16)))
+float16x8_t __arm_vcmulq_rot90(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f32)))
+float32x4_t __arm_vcmulq_rot90_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f32)))
+float32x4_t __arm_vcmulq_rot90(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f16)))
+float16x8_t __arm_vcmulq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f16)))
+float16x8_t __arm_vcmulq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f32)))
+float32x4_t __arm_vcmulq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f32)))
+float32x4_t __arm_vcmulq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f16)))
+float16x8_t __arm_vcmulq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f16)))
+float16x8_t __arm_vcmulq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f32)))
+float32x4_t __arm_vcmulq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f32)))
+float32x4_t __arm_vcmulq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f16)))
+float16x8_t __arm_vcmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f16)))
+float16x8_t __arm_vcmulq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f32)))
+float32x4_t __arm_vcmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f32)))
+float32x4_t __arm_vcmulq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_f16)))
+float16x8_t __arm_vcreateq_f16(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_f32)))
+float32x4_t __arm_vcreateq_f32(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s16_f16)))
+int16x8_t __arm_vcvtaq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s16_f16)))
+int16x8_t __arm_vcvtaq_m(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s32_f32)))
+int32x4_t __arm_vcvtaq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s32_f32)))
+int32x4_t __arm_vcvtaq_m(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u16_f16)))
+uint16x8_t __arm_vcvtaq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u16_f16)))
+uint16x8_t __arm_vcvtaq_m(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u32_f32)))
+uint32x4_t __arm_vcvtaq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u32_f32)))
+uint32x4_t __arm_vcvtaq_m(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_s16_f16)))
+int16x8_t __arm_vcvtaq_s16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_s32_f32)))
+int32x4_t __arm_vcvtaq_s32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_u16_f16)))
+uint16x8_t __arm_vcvtaq_u16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_u32_f32)))
+uint32x4_t __arm_vcvtaq_u32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_s16_f16)))
+int16x8_t __arm_vcvtaq_x_s16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_s32_f32)))
+int32x4_t __arm_vcvtaq_x_s32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_u16_f16)))
+uint16x8_t __arm_vcvtaq_x_u16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_u32_f32)))
+uint32x4_t __arm_vcvtaq_x_u32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_f16_f32)))
+float16x8_t __arm_vcvtbq_f16_f32(float16x8_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_f32_f16)))
+float32x4_t __arm_vcvtbq_f32_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_m_f16_f32)))
+float16x8_t __arm_vcvtbq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_m_f32_f16)))
+float32x4_t __arm_vcvtbq_m_f32_f16(float32x4_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_x_f32_f16)))
+float32x4_t __arm_vcvtbq_x_f32_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s16_f16)))
+int16x8_t __arm_vcvtmq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s16_f16)))
+int16x8_t __arm_vcvtmq_m(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s32_f32)))
+int32x4_t __arm_vcvtmq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s32_f32)))
+int32x4_t __arm_vcvtmq_m(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u16_f16)))
+uint16x8_t __arm_vcvtmq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u16_f16)))
+uint16x8_t __arm_vcvtmq_m(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u32_f32)))
+uint32x4_t __arm_vcvtmq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u32_f32)))
+uint32x4_t __arm_vcvtmq_m(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_s16_f16)))
+int16x8_t __arm_vcvtmq_s16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_s32_f32)))
+int32x4_t __arm_vcvtmq_s32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_u16_f16)))
+uint16x8_t __arm_vcvtmq_u16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_u32_f32)))
+uint32x4_t __arm_vcvtmq_u32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_s16_f16)))
+int16x8_t __arm_vcvtmq_x_s16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_s32_f32)))
+int32x4_t __arm_vcvtmq_x_s32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_u16_f16)))
+uint16x8_t __arm_vcvtmq_x_u16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_u32_f32)))
+uint32x4_t __arm_vcvtmq_x_u32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s16_f16)))
+int16x8_t __arm_vcvtnq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s16_f16)))
+int16x8_t __arm_vcvtnq_m(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s32_f32)))
+int32x4_t __arm_vcvtnq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s32_f32)))
+int32x4_t __arm_vcvtnq_m(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u16_f16)))
+uint16x8_t __arm_vcvtnq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u16_f16)))
+uint16x8_t __arm_vcvtnq_m(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u32_f32)))
+uint32x4_t __arm_vcvtnq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u32_f32)))
+uint32x4_t __arm_vcvtnq_m(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_s16_f16)))
+int16x8_t __arm_vcvtnq_s16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_s32_f32)))
+int32x4_t __arm_vcvtnq_s32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_u16_f16)))
+uint16x8_t __arm_vcvtnq_u16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_u32_f32)))
+uint32x4_t __arm_vcvtnq_u32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_s16_f16)))
+int16x8_t __arm_vcvtnq_x_s16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_s32_f32)))
+int32x4_t __arm_vcvtnq_x_s32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_u16_f16)))
+uint16x8_t __arm_vcvtnq_x_u16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_u32_f32)))
+uint32x4_t __arm_vcvtnq_x_u32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s16_f16)))
+int16x8_t __arm_vcvtpq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s16_f16)))
+int16x8_t __arm_vcvtpq_m(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s32_f32)))
+int32x4_t __arm_vcvtpq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s32_f32)))
+int32x4_t __arm_vcvtpq_m(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u16_f16)))
+uint16x8_t __arm_vcvtpq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u16_f16)))
+uint16x8_t __arm_vcvtpq_m(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u32_f32)))
+uint32x4_t __arm_vcvtpq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u32_f32)))
+uint32x4_t __arm_vcvtpq_m(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_s16_f16)))
+int16x8_t __arm_vcvtpq_s16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_s32_f32)))
+int32x4_t __arm_vcvtpq_s32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_u16_f16)))
+uint16x8_t __arm_vcvtpq_u16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_u32_f32)))
+uint32x4_t __arm_vcvtpq_u32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_s16_f16)))
+int16x8_t __arm_vcvtpq_x_s16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_s32_f32)))
+int32x4_t __arm_vcvtpq_x_s32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_u16_f16)))
+uint16x8_t __arm_vcvtpq_x_u16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_u32_f32)))
+uint32x4_t __arm_vcvtpq_x_u32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_s16)))
+float16x8_t __arm_vcvtq_f16_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_s16)))
+float16x8_t __arm_vcvtq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_u16)))
+float16x8_t __arm_vcvtq_f16_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_u16)))
+float16x8_t __arm_vcvtq(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_s32)))
+float32x4_t __arm_vcvtq_f32_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_s32)))
+float32x4_t __arm_vcvtq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_u32)))
+float32x4_t __arm_vcvtq_f32_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_u32)))
+float32x4_t __arm_vcvtq(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_s16)))
+float16x8_t __arm_vcvtq_m_f16_s16(float16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_s16)))
+float16x8_t __arm_vcvtq_m(float16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_u16)))
+float16x8_t __arm_vcvtq_m_f16_u16(float16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_u16)))
+float16x8_t __arm_vcvtq_m(float16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_s32)))
+float32x4_t __arm_vcvtq_m_f32_s32(float32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_s32)))
+float32x4_t __arm_vcvtq_m(float32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_u32)))
+float32x4_t __arm_vcvtq_m_f32_u32(float32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_u32)))
+float32x4_t __arm_vcvtq_m(float32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16)))
+float16x8_t __arm_vcvtq_m_n_f16_s16(float16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16)))
+float16x8_t __arm_vcvtq_m_n(float16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16)))
+float16x8_t __arm_vcvtq_m_n_f16_u16(float16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16)))
+float16x8_t __arm_vcvtq_m_n(float16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32)))
+float32x4_t __arm_vcvtq_m_n_f32_s32(float32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32)))
+float32x4_t __arm_vcvtq_m_n(float32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32)))
+float32x4_t __arm_vcvtq_m_n_f32_u32(float32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32)))
+float32x4_t __arm_vcvtq_m_n(float32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16)))
+int16x8_t __arm_vcvtq_m_n_s16_f16(int16x8_t, float16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16)))
+int16x8_t __arm_vcvtq_m_n(int16x8_t, float16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32)))
+int32x4_t __arm_vcvtq_m_n_s32_f32(int32x4_t, float32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32)))
+int32x4_t __arm_vcvtq_m_n(int32x4_t, float32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16)))
+uint16x8_t __arm_vcvtq_m_n_u16_f16(uint16x8_t, float16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16)))
+uint16x8_t __arm_vcvtq_m_n(uint16x8_t, float16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32)))
+uint32x4_t __arm_vcvtq_m_n_u32_f32(uint32x4_t, float32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32)))
+uint32x4_t __arm_vcvtq_m_n(uint32x4_t, float32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s16_f16)))
+int16x8_t __arm_vcvtq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s16_f16)))
+int16x8_t __arm_vcvtq_m(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s32_f32)))
+int32x4_t __arm_vcvtq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s32_f32)))
+int32x4_t __arm_vcvtq_m(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u16_f16)))
+uint16x8_t __arm_vcvtq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u16_f16)))
+uint16x8_t __arm_vcvtq_m(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u32_f32)))
+uint32x4_t __arm_vcvtq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u32_f32)))
+uint32x4_t __arm_vcvtq_m(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_s16)))
+float16x8_t __arm_vcvtq_n_f16_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_s16)))
+float16x8_t __arm_vcvtq_n(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_u16)))
+float16x8_t __arm_vcvtq_n_f16_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_u16)))
+float16x8_t __arm_vcvtq_n(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_s32)))
+float32x4_t __arm_vcvtq_n_f32_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_s32)))
+float32x4_t __arm_vcvtq_n(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_u32)))
+float32x4_t __arm_vcvtq_n_f32_u32(uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_u32)))
+float32x4_t __arm_vcvtq_n(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_s16_f16)))
+int16x8_t __arm_vcvtq_n_s16_f16(float16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_s32_f32)))
+int32x4_t __arm_vcvtq_n_s32_f32(float32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_u16_f16)))
+uint16x8_t __arm_vcvtq_n_u16_f16(float16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_u32_f32)))
+uint32x4_t __arm_vcvtq_n_u32_f32(float32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_s16_f16)))
+int16x8_t __arm_vcvtq_s16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_s32_f32)))
+int32x4_t __arm_vcvtq_s32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_u16_f16)))
+uint16x8_t __arm_vcvtq_u16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_u32_f32)))
+uint32x4_t __arm_vcvtq_u32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_s16)))
+float16x8_t __arm_vcvtq_x_f16_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_s16)))
+float16x8_t __arm_vcvtq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_u16)))
+float16x8_t __arm_vcvtq_x_f16_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_u16)))
+float16x8_t __arm_vcvtq_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_s32)))
+float32x4_t __arm_vcvtq_x_f32_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_s32)))
+float32x4_t __arm_vcvtq_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_u32)))
+float32x4_t __arm_vcvtq_x_f32_u32(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_u32)))
+float32x4_t __arm_vcvtq_x(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16)))
+float16x8_t __arm_vcvtq_x_n_f16_s16(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16)))
+float16x8_t __arm_vcvtq_x_n(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16)))
+float16x8_t __arm_vcvtq_x_n_f16_u16(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16)))
+float16x8_t __arm_vcvtq_x_n(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32)))
+float32x4_t __arm_vcvtq_x_n_f32_s32(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32)))
+float32x4_t __arm_vcvtq_x_n(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32)))
+float32x4_t __arm_vcvtq_x_n_f32_u32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32)))
+float32x4_t __arm_vcvtq_x_n(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_s16_f16)))
+int16x8_t __arm_vcvtq_x_n_s16_f16(float16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_s32_f32)))
+int32x4_t __arm_vcvtq_x_n_s32_f32(float32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_u16_f16)))
+uint16x8_t __arm_vcvtq_x_n_u16_f16(float16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_u32_f32)))
+uint32x4_t __arm_vcvtq_x_n_u32_f32(float32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_s16_f16)))
+int16x8_t __arm_vcvtq_x_s16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_s32_f32)))
+int32x4_t __arm_vcvtq_x_s32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_u16_f16)))
+uint16x8_t __arm_vcvtq_x_u16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_u32_f32)))
+uint32x4_t __arm_vcvtq_x_u32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_f16_f32)))
+float16x8_t __arm_vcvttq_f16_f32(float16x8_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_f32_f16)))
+float32x4_t __arm_vcvttq_f32_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_m_f16_f32)))
+float16x8_t __arm_vcvttq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_m_f32_f16)))
+float32x4_t __arm_vcvttq_m_f32_f16(float32x4_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_x_f32_f16)))
+float32x4_t __arm_vcvttq_x_f32_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f16)))
+float16x8_t __arm_vdupq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f16)))
+float16x8_t __arm_vdupq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f32)))
+float32x4_t __arm_vdupq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f32)))
+float32x4_t __arm_vdupq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_f16)))
+float16x8_t __arm_vdupq_n_f16(float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_f32)))
+float32x4_t __arm_vdupq_n_f32(float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_f16)))
+float16x8_t __arm_vdupq_x_n_f16(float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_f32)))
+float32x4_t __arm_vdupq_x_n_f32(float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_f16)))
+float16x8_t __arm_veorq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_f16)))
+float16x8_t __arm_veorq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_f32)))
+float32x4_t __arm_veorq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_f32)))
+float32x4_t __arm_veorq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f16)))
+float16x8_t __arm_veorq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f16)))
+float16x8_t __arm_veorq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f32)))
+float32x4_t __arm_veorq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f32)))
+float32x4_t __arm_veorq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f16)))
+float16x8_t __arm_veorq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f16)))
+float16x8_t __arm_veorq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f32)))
+float32x4_t __arm_veorq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f32)))
+float32x4_t __arm_veorq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f16)))
+float16x8_t __arm_vfmaq_f16(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f16)))
+float16x8_t __arm_vfmaq(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f32)))
+float32x4_t __arm_vfmaq_f32(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f32)))
+float32x4_t __arm_vfmaq(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f16)))
+float16x8_t __arm_vfmaq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f16)))
+float16x8_t __arm_vfmaq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f32)))
+float32x4_t __arm_vfmaq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f32)))
+float32x4_t __arm_vfmaq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f16)))
+float16x8_t __arm_vfmaq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f16)))
+float16x8_t __arm_vfmaq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f32)))
+float32x4_t __arm_vfmaq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f32)))
+float32x4_t __arm_vfmaq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f16)))
+float16x8_t __arm_vfmaq_n_f16(float16x8_t, float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f16)))
+float16x8_t __arm_vfmaq(float16x8_t, float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f32)))
+float32x4_t __arm_vfmaq_n_f32(float32x4_t, float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f32)))
+float32x4_t __arm_vfmaq(float32x4_t, float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f16)))
+float16x8_t __arm_vfmasq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f16)))
+float16x8_t __arm_vfmasq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f32)))
+float32x4_t __arm_vfmasq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f32)))
+float32x4_t __arm_vfmasq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f16)))
+float16x8_t __arm_vfmasq_n_f16(float16x8_t, float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f16)))
+float16x8_t __arm_vfmasq(float16x8_t, float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f32)))
+float32x4_t __arm_vfmasq_n_f32(float32x4_t, float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f32)))
+float32x4_t __arm_vfmasq(float32x4_t, float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f16)))
+float16x8_t __arm_vfmsq_f16(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f16)))
+float16x8_t __arm_vfmsq(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f32)))
+float32x4_t __arm_vfmsq_f32(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f32)))
+float32x4_t __arm_vfmsq(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f16)))
+float16x8_t __arm_vfmsq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f16)))
+float16x8_t __arm_vfmsq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f32)))
+float32x4_t __arm_vfmsq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f32)))
+float32x4_t __arm_vfmsq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f16)))
+float16_t __arm_vgetq_lane_f16(float16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f16)))
+float16_t __arm_vgetq_lane(float16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f32)))
+float32_t __arm_vgetq_lane_f32(float32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f32)))
+float32_t __arm_vgetq_lane(float32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f16)))
+float16x8_t __arm_vld1q_f16(const float16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f16)))
+float16x8_t __arm_vld1q(const float16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f32)))
+float32x4_t __arm_vld1q_f32(const float32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f32)))
+float32x4_t __arm_vld1q(const float32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f16)))
+float16x8_t __arm_vld1q_z_f16(const float16_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f16)))
+float16x8_t __arm_vld1q_z(const float16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f32)))
+float32x4_t __arm_vld1q_z_f32(const float32_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f32)))
+float32x4_t __arm_vld1q_z(const float32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f16)))
+float16x8x2_t __arm_vld2q_f16(const float16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f16)))
+float16x8x2_t __arm_vld2q(const float16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f32)))
+float32x4x2_t __arm_vld2q_f32(const float32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f32)))
+float32x4x2_t __arm_vld2q(const float32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f16)))
+float16x8x4_t __arm_vld4q_f16(const float16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f16)))
+float16x8x4_t __arm_vld4q(const float16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f32)))
+float32x4x4_t __arm_vld4q_f32(const float32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f32)))
+float32x4x4_t __arm_vld4q(const float32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_f16)))
+float16x8_t __arm_vldrhq_f16(const float16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))
+float16x8_t __arm_vldrhq_gather_offset_f16(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))
+float16x8_t __arm_vldrhq_gather_offset(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))
+float16x8_t __arm_vldrhq_gather_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))
+float16x8_t __arm_vldrhq_gather_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))
+float16x8_t __arm_vldrhq_gather_shifted_offset_f16(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))
+float16x8_t __arm_vldrhq_gather_shifted_offset(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))
+float16x8_t __arm_vldrhq_gather_shifted_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))
+float16x8_t __arm_vldrhq_gather_shifted_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_f16)))
+float16x8_t __arm_vldrhq_z_f16(const float16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_f32)))
+float32x4_t __arm_vldrwq_f32(const float32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_f32)))
+float32x4_t __arm_vldrwq_gather_base_f32(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_f32)))
+float32x4_t __arm_vldrwq_gather_base_wb_f32(uint32x4_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_f32)))
+float32x4_t __arm_vldrwq_gather_base_wb_z_f32(uint32x4_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_f32)))
+float32x4_t __arm_vldrwq_gather_base_z_f32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))
+float32x4_t __arm_vldrwq_gather_offset_f32(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))
+float32x4_t __arm_vldrwq_gather_offset(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))
+float32x4_t __arm_vldrwq_gather_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))
+float32x4_t __arm_vldrwq_gather_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))
+float32x4_t __arm_vldrwq_gather_shifted_offset_f32(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))
+float32x4_t __arm_vldrwq_gather_shifted_offset(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))
+float32x4_t __arm_vldrwq_gather_shifted_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))
+float32x4_t __arm_vldrwq_gather_shifted_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_f32)))
+float32x4_t __arm_vldrwq_z_f32(const float32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f16)))
+float16x8_t __arm_vmaxnmaq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f16)))
+float16x8_t __arm_vmaxnmaq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f32)))
+float32x4_t __arm_vmaxnmaq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f32)))
+float32x4_t __arm_vmaxnmaq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f16)))
+float16x8_t __arm_vmaxnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f16)))
+float16x8_t __arm_vmaxnmaq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f32)))
+float32x4_t __arm_vmaxnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f32)))
+float32x4_t __arm_vmaxnmaq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f16)))
+float16_t __arm_vmaxnmavq_f16(float16_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f16)))
+float16_t __arm_vmaxnmavq(float16_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f32)))
+float32_t __arm_vmaxnmavq_f32(float32_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f32)))
+float32_t __arm_vmaxnmavq(float32_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f16)))
+float16_t __arm_vmaxnmavq_p_f16(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f16)))
+float16_t __arm_vmaxnmavq_p(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f32)))
+float32_t __arm_vmaxnmavq_p_f32(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f32)))
+float32_t __arm_vmaxnmavq_p(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f16)))
+float16x8_t __arm_vmaxnmq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f16)))
+float16x8_t __arm_vmaxnmq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f32)))
+float32x4_t __arm_vmaxnmq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f32)))
+float32x4_t __arm_vmaxnmq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f16)))
+float16x8_t __arm_vmaxnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f16)))
+float16x8_t __arm_vmaxnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f32)))
+float32x4_t __arm_vmaxnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f32)))
+float32x4_t __arm_vmaxnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f16)))
+float16x8_t __arm_vmaxnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f16)))
+float16x8_t __arm_vmaxnmq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f32)))
+float32x4_t __arm_vmaxnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f32)))
+float32x4_t __arm_vmaxnmq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f16)))
+float16_t __arm_vmaxnmvq_f16(float16_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f16)))
+float16_t __arm_vmaxnmvq(float16_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f32)))
+float32_t __arm_vmaxnmvq_f32(float32_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f32)))
+float32_t __arm_vmaxnmvq(float32_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f16)))
+float16_t __arm_vmaxnmvq_p_f16(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f16)))
+float16_t __arm_vmaxnmvq_p(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f32)))
+float32_t __arm_vmaxnmvq_p_f32(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f32)))
+float32_t __arm_vmaxnmvq_p(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f16)))
+float16x8_t __arm_vminnmaq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f16)))
+float16x8_t __arm_vminnmaq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f32)))
+float32x4_t __arm_vminnmaq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f32)))
+float32x4_t __arm_vminnmaq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f16)))
+float16x8_t __arm_vminnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f16)))
+float16x8_t __arm_vminnmaq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f32)))
+float32x4_t __arm_vminnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f32)))
+float32x4_t __arm_vminnmaq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f16)))
+float16_t __arm_vminnmavq_f16(float16_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f16)))
+float16_t __arm_vminnmavq(float16_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f32)))
+float32_t __arm_vminnmavq_f32(float32_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f32)))
+float32_t __arm_vminnmavq(float32_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f16)))
+float16_t __arm_vminnmavq_p_f16(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f16)))
+float16_t __arm_vminnmavq_p(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f32)))
+float32_t __arm_vminnmavq_p_f32(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f32)))
+float32_t __arm_vminnmavq_p(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f16)))
+float16x8_t __arm_vminnmq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f16)))
+float16x8_t __arm_vminnmq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f32)))
+float32x4_t __arm_vminnmq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f32)))
+float32x4_t __arm_vminnmq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f16)))
+float16x8_t __arm_vminnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f16)))
+float16x8_t __arm_vminnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f32)))
+float32x4_t __arm_vminnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f32)))
+float32x4_t __arm_vminnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f16)))
+float16x8_t __arm_vminnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f16)))
+float16x8_t __arm_vminnmq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f32)))
+float32x4_t __arm_vminnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f32)))
+float32x4_t __arm_vminnmq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f16)))
+float16_t __arm_vminnmvq_f16(float16_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f16)))
+float16_t __arm_vminnmvq(float16_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f32)))
+float32_t __arm_vminnmvq_f32(float32_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f32)))
+float32_t __arm_vminnmvq(float32_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f16)))
+float16_t __arm_vminnmvq_p_f16(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f16)))
+float16_t __arm_vminnmvq_p(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f32)))
+float32_t __arm_vminnmvq_p_f32(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f32)))
+float32_t __arm_vminnmvq_p(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f16)))
+float16x8_t __arm_vmulq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f16)))
+float16x8_t __arm_vmulq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f32)))
+float32x4_t __arm_vmulq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f32)))
+float32x4_t __arm_vmulq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f16)))
+float16x8_t __arm_vmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f16)))
+float16x8_t __arm_vmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f32)))
+float32x4_t __arm_vmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f32)))
+float32x4_t __arm_vmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f16)))
+float16x8_t __arm_vmulq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f16)))
+float16x8_t __arm_vmulq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f32)))
+float32x4_t __arm_vmulq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f32)))
+float32x4_t __arm_vmulq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f16)))
+float16x8_t __arm_vmulq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f16)))
+float16x8_t __arm_vmulq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f32)))
+float32x4_t __arm_vmulq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f32)))
+float32x4_t __arm_vmulq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f16)))
+float16x8_t __arm_vmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f16)))
+float16x8_t __arm_vmulq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f32)))
+float32x4_t __arm_vmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f32)))
+float32x4_t __arm_vmulq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f16)))
+float16x8_t __arm_vmulq_x_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f16)))
+float16x8_t __arm_vmulq_x(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f32)))
+float32x4_t __arm_vmulq_x_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f32)))
+float32x4_t __arm_vmulq_x(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f16)))
+float16x8_t __arm_vnegq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f16)))
+float16x8_t __arm_vnegq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f32)))
+float32x4_t __arm_vnegq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f32)))
+float32x4_t __arm_vnegq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f16)))
+float16x8_t __arm_vnegq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f16)))
+float16x8_t __arm_vnegq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f32)))
+float32x4_t __arm_vnegq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f32)))
+float32x4_t __arm_vnegq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f16)))
+float16x8_t __arm_vnegq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f16)))
+float16x8_t __arm_vnegq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f32)))
+float32x4_t __arm_vnegq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f32)))
+float32x4_t __arm_vnegq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_f16)))
+float16x8_t __arm_vornq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_f16)))
+float16x8_t __arm_vornq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_f32)))
+float32x4_t __arm_vornq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_f32)))
+float32x4_t __arm_vornq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f16)))
+float16x8_t __arm_vornq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f16)))
+float16x8_t __arm_vornq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f32)))
+float32x4_t __arm_vornq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f32)))
+float32x4_t __arm_vornq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f16)))
+float16x8_t __arm_vornq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f16)))
+float16x8_t __arm_vornq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f32)))
+float32x4_t __arm_vornq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f32)))
+float32x4_t __arm_vornq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f16)))
+float16x8_t __arm_vorrq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f16)))
+float16x8_t __arm_vorrq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f32)))
+float32x4_t __arm_vorrq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f32)))
+float32x4_t __arm_vorrq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f16)))
+float16x8_t __arm_vorrq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f16)))
+float16x8_t __arm_vorrq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f32)))
+float32x4_t __arm_vorrq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f32)))
+float32x4_t __arm_vorrq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f16)))
+float16x8_t __arm_vorrq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f16)))
+float16x8_t __arm_vorrq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f32)))
+float32x4_t __arm_vorrq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f32)))
+float32x4_t __arm_vorrq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f16)))
+float16x8_t __arm_vpselq_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f16)))
+float16x8_t __arm_vpselq(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f32)))
+float32x4_t __arm_vpselq_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f32)))
+float32x4_t __arm_vpselq(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))
+float16x8_t __arm_vreinterpretq_f16_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))
+float16x8_t __arm_vreinterpretq_f16(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))
+float16x8_t __arm_vreinterpretq_f16_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))
+float16x8_t __arm_vreinterpretq_f16(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))
+float16x8_t __arm_vreinterpretq_f16_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))
+float16x8_t __arm_vreinterpretq_f16(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))
+float16x8_t __arm_vreinterpretq_f16_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))
+float16x8_t __arm_vreinterpretq_f16(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))
+float16x8_t __arm_vreinterpretq_f16_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))
+float16x8_t __arm_vreinterpretq_f16(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))
+float16x8_t __arm_vreinterpretq_f16_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))
+float16x8_t __arm_vreinterpretq_f16(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))
+float16x8_t __arm_vreinterpretq_f16_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))
+float16x8_t __arm_vreinterpretq_f16(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))
+float16x8_t __arm_vreinterpretq_f16_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))
+float16x8_t __arm_vreinterpretq_f16(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
+float16x8_t __arm_vreinterpretq_f16_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
+float16x8_t __arm_vreinterpretq_f16(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))
+float32x4_t __arm_vreinterpretq_f32_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))
+float32x4_t __arm_vreinterpretq_f32(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))
+float32x4_t __arm_vreinterpretq_f32_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))
+float32x4_t __arm_vreinterpretq_f32(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))
+float32x4_t __arm_vreinterpretq_f32_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))
+float32x4_t __arm_vreinterpretq_f32(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))
+float32x4_t __arm_vreinterpretq_f32_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))
+float32x4_t __arm_vreinterpretq_f32(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))
+float32x4_t __arm_vreinterpretq_f32_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))
+float32x4_t __arm_vreinterpretq_f32(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))
+float32x4_t __arm_vreinterpretq_f32_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))
+float32x4_t __arm_vreinterpretq_f32(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))
+float32x4_t __arm_vreinterpretq_f32_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))
+float32x4_t __arm_vreinterpretq_f32(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))
+float32x4_t __arm_vreinterpretq_f32_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))
+float32x4_t __arm_vreinterpretq_f32(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
+float32x4_t __arm_vreinterpretq_f32_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
+float32x4_t __arm_vreinterpretq_f32(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))
+int16x8_t __arm_vreinterpretq_s16_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))
+int16x8_t __arm_vreinterpretq_s16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))
+int16x8_t __arm_vreinterpretq_s16_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))
+int16x8_t __arm_vreinterpretq_s16(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))
+int32x4_t __arm_vreinterpretq_s32_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))
+int32x4_t __arm_vreinterpretq_s32(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))
+int32x4_t __arm_vreinterpretq_s32_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))
+int32x4_t __arm_vreinterpretq_s32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))
+int64x2_t __arm_vreinterpretq_s64_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))
+int64x2_t __arm_vreinterpretq_s64(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))
+int64x2_t __arm_vreinterpretq_s64_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))
+int64x2_t __arm_vreinterpretq_s64(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))
+int8x16_t __arm_vreinterpretq_s8_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))
+int8x16_t __arm_vreinterpretq_s8(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))
+int8x16_t __arm_vreinterpretq_s8_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))
+int8x16_t __arm_vreinterpretq_s8(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))
+uint16x8_t __arm_vreinterpretq_u16_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))
+uint16x8_t __arm_vreinterpretq_u16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))
+uint16x8_t __arm_vreinterpretq_u16_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))
+uint16x8_t __arm_vreinterpretq_u16(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))
+uint32x4_t __arm_vreinterpretq_u32_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))
+uint32x4_t __arm_vreinterpretq_u32(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))
+uint32x4_t __arm_vreinterpretq_u32_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))
+uint32x4_t __arm_vreinterpretq_u32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))
+uint64x2_t __arm_vreinterpretq_u64_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))
+uint64x2_t __arm_vreinterpretq_u64(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))
+uint64x2_t __arm_vreinterpretq_u64_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))
+uint64x2_t __arm_vreinterpretq_u64(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
+uint8x16_t __arm_vreinterpretq_u8_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
+uint8x16_t __arm_vreinterpretq_u8(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
+uint8x16_t __arm_vreinterpretq_u8_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
+uint8x16_t __arm_vreinterpretq_u8(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_f16)))
+float16x8_t __arm_vrev32q_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_f16)))
+float16x8_t __arm_vrev32q(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_f16)))
+float16x8_t __arm_vrev32q_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_f16)))
+float16x8_t __arm_vrev32q_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_f16)))
+float16x8_t __arm_vrev32q_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_f16)))
+float16x8_t __arm_vrev32q_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f16)))
+float16x8_t __arm_vrev64q_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f16)))
+float16x8_t __arm_vrev64q(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f32)))
+float32x4_t __arm_vrev64q_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f32)))
+float32x4_t __arm_vrev64q(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f16)))
+float16x8_t __arm_vrev64q_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f16)))
+float16x8_t __arm_vrev64q_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f32)))
+float32x4_t __arm_vrev64q_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f32)))
+float32x4_t __arm_vrev64q_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f16)))
+float16x8_t __arm_vrev64q_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f16)))
+float16x8_t __arm_vrev64q_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f32)))
+float32x4_t __arm_vrev64q_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f32)))
+float32x4_t __arm_vrev64q_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f16)))
+float16x8_t __arm_vrndaq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f16)))
+float16x8_t __arm_vrndaq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f32)))
+float32x4_t __arm_vrndaq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f32)))
+float32x4_t __arm_vrndaq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f16)))
+float16x8_t __arm_vrndaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f16)))
+float16x8_t __arm_vrndaq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f32)))
+float32x4_t __arm_vrndaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f32)))
+float32x4_t __arm_vrndaq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f16)))
+float16x8_t __arm_vrndaq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f16)))
+float16x8_t __arm_vrndaq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f32)))
+float32x4_t __arm_vrndaq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f32)))
+float32x4_t __arm_vrndaq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f16)))
+float16x8_t __arm_vrndmq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f16)))
+float16x8_t __arm_vrndmq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f32)))
+float32x4_t __arm_vrndmq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f32)))
+float32x4_t __arm_vrndmq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f16)))
+float16x8_t __arm_vrndmq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f16)))
+float16x8_t __arm_vrndmq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f32)))
+float32x4_t __arm_vrndmq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f32)))
+float32x4_t __arm_vrndmq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f16)))
+float16x8_t __arm_vrndmq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f16)))
+float16x8_t __arm_vrndmq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f32)))
+float32x4_t __arm_vrndmq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f32)))
+float32x4_t __arm_vrndmq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f16)))
+float16x8_t __arm_vrndnq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f16)))
+float16x8_t __arm_vrndnq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f32)))
+float32x4_t __arm_vrndnq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f32)))
+float32x4_t __arm_vrndnq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f16)))
+float16x8_t __arm_vrndnq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f16)))
+float16x8_t __arm_vrndnq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f32)))
+float32x4_t __arm_vrndnq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f32)))
+float32x4_t __arm_vrndnq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f16)))
+float16x8_t __arm_vrndnq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f16)))
+float16x8_t __arm_vrndnq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f32)))
+float32x4_t __arm_vrndnq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f32)))
+float32x4_t __arm_vrndnq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f16)))
+float16x8_t __arm_vrndpq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f16)))
+float16x8_t __arm_vrndpq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f32)))
+float32x4_t __arm_vrndpq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f32)))
+float32x4_t __arm_vrndpq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f16)))
+float16x8_t __arm_vrndpq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f16)))
+float16x8_t __arm_vrndpq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f32)))
+float32x4_t __arm_vrndpq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f32)))
+float32x4_t __arm_vrndpq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f16)))
+float16x8_t __arm_vrndpq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f16)))
+float16x8_t __arm_vrndpq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f32)))
+float32x4_t __arm_vrndpq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f32)))
+float32x4_t __arm_vrndpq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f16)))
+float16x8_t __arm_vrndq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f16)))
+float16x8_t __arm_vrndq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f32)))
+float32x4_t __arm_vrndq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f32)))
+float32x4_t __arm_vrndq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f16)))
+float16x8_t __arm_vrndq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f16)))
+float16x8_t __arm_vrndq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f32)))
+float32x4_t __arm_vrndq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f32)))
+float32x4_t __arm_vrndq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f16)))
+float16x8_t __arm_vrndq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f16)))
+float16x8_t __arm_vrndq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f32)))
+float32x4_t __arm_vrndq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f32)))
+float32x4_t __arm_vrndq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f16)))
+float16x8_t __arm_vrndxq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f16)))
+float16x8_t __arm_vrndxq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f32)))
+float32x4_t __arm_vrndxq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f32)))
+float32x4_t __arm_vrndxq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f16)))
+float16x8_t __arm_vrndxq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f16)))
+float16x8_t __arm_vrndxq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f32)))
+float32x4_t __arm_vrndxq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f32)))
+float32x4_t __arm_vrndxq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f16)))
+float16x8_t __arm_vrndxq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f16)))
+float16x8_t __arm_vrndxq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f32)))
+float32x4_t __arm_vrndxq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f32)))
+float32x4_t __arm_vrndxq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f16)))
+float16x8_t __arm_vsetq_lane_f16(float16_t, float16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f16)))
+float16x8_t __arm_vsetq_lane(float16_t, float16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f32)))
+float32x4_t __arm_vsetq_lane_f32(float32_t, float32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f32)))
+float32x4_t __arm_vsetq_lane(float32_t, float32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f16)))
+void __arm_vst1q_f16(float16_t *, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f16)))
+void __arm_vst1q(float16_t *, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f32)))
+void __arm_vst1q_f32(float32_t *, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f32)))
+void __arm_vst1q(float32_t *, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f16)))
+void __arm_vst1q_p_f16(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f16)))
+void __arm_vst1q_p(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f32)))
+void __arm_vst1q_p_f32(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f32)))
+void __arm_vst1q_p(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f16)))
+void __arm_vst2q_f16(float16_t *, float16x8x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f16)))
+void __arm_vst2q(float16_t *, float16x8x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f32)))
+void __arm_vst2q_f32(float32_t *, float32x4x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f32)))
+void __arm_vst2q(float32_t *, float32x4x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f16)))
+void __arm_vst4q_f16(float16_t *, float16x8x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f16)))
+void __arm_vst4q(float16_t *, float16x8x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f32)))
+void __arm_vst4q_f32(float32_t *, float32x4x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f32)))
+void __arm_vst4q(float32_t *, float32x4x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_f16)))
+void __arm_vstrhq_f16(float16_t *, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_f16)))
+void __arm_vstrhq(float16_t *, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_f16)))
+void __arm_vstrhq_p_f16(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_f16)))
+void __arm_vstrhq_p(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))
+void __arm_vstrhq_scatter_offset_f16(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))
+void __arm_vstrhq_scatter_offset(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))
+void __arm_vstrhq_scatter_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))
+void __arm_vstrhq_scatter_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))
+void __arm_vstrhq_scatter_shifted_offset_f16(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))
+void __arm_vstrhq_scatter_shifted_offset(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))
+void __arm_vstrhq_scatter_shifted_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))
+void __arm_vstrhq_scatter_shifted_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_f32)))
+void __arm_vstrwq_f32(float32_t *, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_f32)))
+void __arm_vstrwq(float32_t *, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_f32)))
+void __arm_vstrwq_p_f32(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_f32)))
+void __arm_vstrwq_p(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))
+void __arm_vstrwq_scatter_base_f32(uint32x4_t, int, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))
+void __arm_vstrwq_scatter_base(uint32x4_t, int, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))
+void __arm_vstrwq_scatter_base_p_f32(uint32x4_t, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))
+void __arm_vstrwq_scatter_base_p(uint32x4_t, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))
+void __arm_vstrwq_scatter_base_wb_f32(uint32x4_t *, int, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))
+void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))
+void __arm_vstrwq_scatter_base_wb_p_f32(uint32x4_t *, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))
+void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))
+void __arm_vstrwq_scatter_offset_f32(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))
+void __arm_vstrwq_scatter_offset(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))
+void __arm_vstrwq_scatter_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))
+void __arm_vstrwq_scatter_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))
+void __arm_vstrwq_scatter_shifted_offset_f32(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))
+void __arm_vstrwq_scatter_shifted_offset(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))
+void __arm_vstrwq_scatter_shifted_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))
+void __arm_vstrwq_scatter_shifted_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f16)))
+float16x8_t __arm_vsubq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f16)))
+float16x8_t __arm_vsubq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f32)))
+float32x4_t __arm_vsubq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f32)))
+float32x4_t __arm_vsubq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f16)))
+float16x8_t __arm_vsubq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f16)))
+float16x8_t __arm_vsubq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f32)))
+float32x4_t __arm_vsubq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f32)))
+float32x4_t __arm_vsubq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f16)))
+float16x8_t __arm_vsubq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f16)))
+float16x8_t __arm_vsubq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f32)))
+float32x4_t __arm_vsubq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f32)))
+float32x4_t __arm_vsubq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f16)))
+float16x8_t __arm_vsubq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f16)))
+float16x8_t __arm_vsubq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f32)))
+float32x4_t __arm_vsubq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f32)))
+float32x4_t __arm_vsubq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f16)))
+float16x8_t __arm_vsubq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f16)))
+float16x8_t __arm_vsubq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f32)))
+float32x4_t __arm_vsubq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f32)))
+float32x4_t __arm_vsubq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f16)))
+float16x8_t __arm_vsubq_x_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f16)))
+float16x8_t __arm_vsubq_x(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f32)))
+float32x4_t __arm_vsubq_x_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f32)))
+float32x4_t __arm_vsubq_x(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_f16)))
+float16x8_t __arm_vuninitializedq_f16();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_f32)))
+float32x4_t __arm_vuninitializedq_f32();
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f16)))
+float16x8_t __arm_vuninitializedq(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f32)))
+float32x4_t __arm_vuninitializedq(float32x4_t);
+
+#endif /* (__ARM_FEATURE_MVE & 2) */
+
+#if (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE)
+
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_asrl)))
+int64_t asrl(int64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_lsll)))
+uint64_t lsll(uint64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshr)))
+int32_t sqrshr(int32_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshrl)))
+int64_t sqrshrl(int64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshrl_sat48)))
+int64_t sqrshrl_sat48(int64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqshl)))
+int32_t sqshl(int32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqshll)))
+int64_t sqshll(int64_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_srshr)))
+int32_t srshr(int32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_srshrl)))
+int64_t srshrl(int64_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshl)))
+uint32_t uqrshl(uint32_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshll)))
+uint64_t uqrshll(uint64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshll_sat48)))
+uint64_t uqrshll_sat48(uint64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqshl)))
+uint32_t uqshl(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqshll)))
+uint64_t uqshll(uint64_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_urshr)))
+uint32_t urshr(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_urshrl)))
+uint64_t urshrl(uint64_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s16)))
+uint32_t vabavq_p_s16(uint32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s16)))
+uint32_t vabavq_p(uint32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s32)))
+uint32_t vabavq_p_s32(uint32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s32)))
+uint32_t vabavq_p(uint32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s8)))
+uint32_t vabavq_p_s8(uint32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s8)))
+uint32_t vabavq_p(uint32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u16)))
+uint32_t vabavq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u16)))
+uint32_t vabavq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u32)))
+uint32_t vabavq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u32)))
+uint32_t vabavq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u8)))
+uint32_t vabavq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u8)))
+uint32_t vabavq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s16)))
+uint32_t vabavq_s16(uint32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s16)))
+uint32_t vabavq(uint32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s32)))
+uint32_t vabavq_s32(uint32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s32)))
+uint32_t vabavq(uint32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s8)))
+uint32_t vabavq_s8(uint32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s8)))
+uint32_t vabavq(uint32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u16)))
+uint32_t vabavq_u16(uint32_t, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u16)))
+uint32_t vabavq(uint32_t, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u32)))
+uint32_t vabavq_u32(uint32_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u32)))
+uint32_t vabavq(uint32_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u8)))
+uint32_t vabavq_u8(uint32_t, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u8)))
+uint32_t vabavq(uint32_t, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s16)))
+int16x8_t vabdq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s16)))
+int16x8_t vabdq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s32)))
+int32x4_t vabdq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s32)))
+int32x4_t vabdq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s8)))
+int8x16_t vabdq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s8)))
+int8x16_t vabdq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u16)))
+uint16x8_t vabdq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u16)))
+uint16x8_t vabdq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u32)))
+uint32x4_t vabdq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u32)))
+uint32x4_t vabdq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u8)))
+uint8x16_t vabdq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u8)))
+uint8x16_t vabdq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s16)))
+int16x8_t vabdq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s16)))
+int16x8_t vabdq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s32)))
+int32x4_t vabdq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s32)))
+int32x4_t vabdq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s8)))
+int8x16_t vabdq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s8)))
+int8x16_t vabdq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u16)))
+uint16x8_t vabdq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u16)))
+uint16x8_t vabdq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u32)))
+uint32x4_t vabdq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u32)))
+uint32x4_t vabdq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u8)))
+uint8x16_t vabdq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u8)))
+uint8x16_t vabdq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s16)))
+int16x8_t vabdq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s16)))
+int16x8_t vabdq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s32)))
+int32x4_t vabdq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s32)))
+int32x4_t vabdq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s8)))
+int8x16_t vabdq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s8)))
+int8x16_t vabdq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u16)))
+uint16x8_t vabdq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u16)))
+uint16x8_t vabdq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u32)))
+uint32x4_t vabdq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u32)))
+uint32x4_t vabdq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u8)))
+uint8x16_t vabdq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u8)))
+uint8x16_t vabdq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s16)))
+int16x8_t vabsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s16)))
+int16x8_t vabsq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s32)))
+int32x4_t vabsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s32)))
+int32x4_t vabsq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s8)))
+int8x16_t vabsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s8)))
+int8x16_t vabsq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s16)))
+int16x8_t vabsq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s16)))
+int16x8_t vabsq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s32)))
+int32x4_t vabsq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s32)))
+int32x4_t vabsq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s8)))
+int8x16_t vabsq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s8)))
+int8x16_t vabsq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s16)))
+int16x8_t vabsq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s16)))
+int16x8_t vabsq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s32)))
+int32x4_t vabsq_x_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s32)))
+int32x4_t vabsq_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s8)))
+int8x16_t vabsq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s8)))
+int8x16_t vabsq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_s32)))
+int32x4_t vadciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_s32)))
+int32x4_t vadciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_u32)))
+uint32x4_t vadciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_u32)))
+uint32x4_t vadciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_s32)))
+int32x4_t vadciq_s32(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_s32)))
+int32x4_t vadciq(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_u32)))
+uint32x4_t vadciq_u32(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_u32)))
+uint32x4_t vadciq(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_s32)))
+int32x4_t vadcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_s32)))
+int32x4_t vadcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_u32)))
+uint32x4_t vadcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_u32)))
+uint32x4_t vadcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_s32)))
+int32x4_t vadcq_s32(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_s32)))
+int32x4_t vadcq(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_u32)))
+uint32x4_t vadcq_u32(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_u32)))
+uint32x4_t vadcq(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_s32)))
+int64_t vaddlvaq_p_s32(int64_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_s32)))
+int64_t vaddlvaq_p(int64_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_u32)))
+uint64_t vaddlvaq_p_u32(uint64_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_u32)))
+uint64_t vaddlvaq_p(uint64_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_s32)))
+int64_t vaddlvaq_s32(int64_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_s32)))
+int64_t vaddlvaq(int64_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_u32)))
+uint64_t vaddlvaq_u32(uint64_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_u32)))
+uint64_t vaddlvaq(uint64_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_s32)))
+int64_t vaddlvq_p_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_s32)))
+int64_t vaddlvq_p(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_u32)))
+uint64_t vaddlvq_p_u32(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_u32)))
+uint64_t vaddlvq_p(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_s32)))
+int64_t vaddlvq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_s32)))
+int64_t vaddlvq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_u32)))
+uint64_t vaddlvq_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_u32)))
+uint64_t vaddlvq(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s16)))
+int16x8_t vaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s16)))
+int16x8_t vaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s32)))
+int32x4_t vaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s32)))
+int32x4_t vaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s8)))
+int8x16_t vaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s8)))
+int8x16_t vaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u16)))
+uint16x8_t vaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u16)))
+uint16x8_t vaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u32)))
+uint32x4_t vaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u32)))
+uint32x4_t vaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u8)))
+uint8x16_t vaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u8)))
+uint8x16_t vaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s16)))
+int16x8_t vaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s16)))
+int16x8_t vaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s32)))
+int32x4_t vaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s32)))
+int32x4_t vaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s8)))
+int8x16_t vaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s8)))
+int8x16_t vaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u16)))
+uint16x8_t vaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u16)))
+uint16x8_t vaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u32)))
+uint32x4_t vaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u32)))
+uint32x4_t vaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u8)))
+uint8x16_t vaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u8)))
+uint8x16_t vaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s16)))
+int16x8_t vaddq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s16)))
+int16x8_t vaddq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s32)))
+int32x4_t vaddq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s32)))
+int32x4_t vaddq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s8)))
+int8x16_t vaddq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s8)))
+int8x16_t vaddq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u16)))
+uint16x8_t vaddq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u16)))
+uint16x8_t vaddq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u32)))
+uint32x4_t vaddq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u32)))
+uint32x4_t vaddq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u8)))
+uint8x16_t vaddq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u8)))
+uint8x16_t vaddq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s16)))
+int16x8_t vaddq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s16)))
+int16x8_t vaddq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s32)))
+int32x4_t vaddq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s32)))
+int32x4_t vaddq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s8)))
+int8x16_t vaddq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s8)))
+int8x16_t vaddq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u16)))
+uint16x8_t vaddq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u16)))
+uint16x8_t vaddq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u32)))
+uint32x4_t vaddq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u32)))
+uint32x4_t vaddq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u8)))
+uint8x16_t vaddq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u8)))
+uint8x16_t vaddq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s16)))
+int16x8_t vaddq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s16)))
+int16x8_t vaddq_x(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s32)))
+int32x4_t vaddq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s32)))
+int32x4_t vaddq_x(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s8)))
+int8x16_t vaddq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s8)))
+int8x16_t vaddq_x(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u16)))
+uint16x8_t vaddq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u16)))
+uint16x8_t vaddq_x(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u32)))
+uint32x4_t vaddq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u32)))
+uint32x4_t vaddq_x(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u8)))
+uint8x16_t vaddq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u8)))
+uint8x16_t vaddq_x(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s16)))
+int16x8_t vaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s16)))
+int16x8_t vaddq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s32)))
+int32x4_t vaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s32)))
+int32x4_t vaddq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s8)))
+int8x16_t vaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s8)))
+int8x16_t vaddq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u16)))
+uint16x8_t vaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u16)))
+uint16x8_t vaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u32)))
+uint32x4_t vaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u32)))
+uint32x4_t vaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u8)))
+uint8x16_t vaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u8)))
+uint8x16_t vaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s16)))
+int32_t vaddvaq_p_s16(int32_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s16)))
+int32_t vaddvaq_p(int32_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s32)))
+int32_t vaddvaq_p_s32(int32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s32)))
+int32_t vaddvaq_p(int32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s8)))
+int32_t vaddvaq_p_s8(int32_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s8)))
+int32_t vaddvaq_p(int32_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u16)))
+uint32_t vaddvaq_p_u16(uint32_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u16)))
+uint32_t vaddvaq_p(uint32_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u32)))
+uint32_t vaddvaq_p_u32(uint32_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u32)))
+uint32_t vaddvaq_p(uint32_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u8)))
+uint32_t vaddvaq_p_u8(uint32_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u8)))
+uint32_t vaddvaq_p(uint32_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s16)))
+int32_t vaddvaq_s16(int32_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s16)))
+int32_t vaddvaq(int32_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s32)))
+int32_t vaddvaq_s32(int32_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s32)))
+int32_t vaddvaq(int32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s8)))
+int32_t vaddvaq_s8(int32_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s8)))
+int32_t vaddvaq(int32_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u16)))
+uint32_t vaddvaq_u16(uint32_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u16)))
+uint32_t vaddvaq(uint32_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u32)))
+uint32_t vaddvaq_u32(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u32)))
+uint32_t vaddvaq(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u8)))
+uint32_t vaddvaq_u8(uint32_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u8)))
+uint32_t vaddvaq(uint32_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s16)))
+int32_t vaddvq_p_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s16)))
+int32_t vaddvq_p(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s32)))
+int32_t vaddvq_p_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s32)))
+int32_t vaddvq_p(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s8)))
+int32_t vaddvq_p_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s8)))
+int32_t vaddvq_p(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u16)))
+uint32_t vaddvq_p_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u16)))
+uint32_t vaddvq_p(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u32)))
+uint32_t vaddvq_p_u32(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u32)))
+uint32_t vaddvq_p(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u8)))
+uint32_t vaddvq_p_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u8)))
+uint32_t vaddvq_p(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s16)))
+int32_t vaddvq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s16)))
+int32_t vaddvq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s32)))
+int32_t vaddvq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s32)))
+int32_t vaddvq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s8)))
+int32_t vaddvq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s8)))
+int32_t vaddvq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u16)))
+uint32_t vaddvq_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u16)))
+uint32_t vaddvq(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u32)))
+uint32_t vaddvq_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u32)))
+uint32_t vaddvq(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u8)))
+uint32_t vaddvq_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u8)))
+uint32_t vaddvq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s16)))
+int16x8_t vandq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s16)))
+int16x8_t vandq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s32)))
+int32x4_t vandq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s32)))
+int32x4_t vandq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s8)))
+int8x16_t vandq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s8)))
+int8x16_t vandq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u16)))
+uint16x8_t vandq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u16)))
+uint16x8_t vandq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u32)))
+uint32x4_t vandq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u32)))
+uint32x4_t vandq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u8)))
+uint8x16_t vandq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u8)))
+uint8x16_t vandq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s16)))
+int16x8_t vandq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s16)))
+int16x8_t vandq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s32)))
+int32x4_t vandq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s32)))
+int32x4_t vandq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s8)))
+int8x16_t vandq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s8)))
+int8x16_t vandq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u16)))
+uint16x8_t vandq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u16)))
+uint16x8_t vandq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u32)))
+uint32x4_t vandq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u32)))
+uint32x4_t vandq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u8)))
+uint8x16_t vandq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u8)))
+uint8x16_t vandq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s16)))
+int16x8_t vandq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s16)))
+int16x8_t vandq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s32)))
+int32x4_t vandq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s32)))
+int32x4_t vandq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s8)))
+int8x16_t vandq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s8)))
+int8x16_t vandq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u16)))
+uint16x8_t vandq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u16)))
+uint16x8_t vandq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u32)))
+uint32x4_t vandq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u32)))
+uint32x4_t vandq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u8)))
+uint8x16_t vandq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u8)))
+uint8x16_t vandq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s16)))
+int16x8_t vbicq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s16)))
+int16x8_t vbicq_m_n(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s32)))
+int32x4_t vbicq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s32)))
+int32x4_t vbicq_m_n(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u16)))
+uint16x8_t vbicq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u16)))
+uint16x8_t vbicq_m_n(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u32)))
+uint32x4_t vbicq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u32)))
+uint32x4_t vbicq_m_n(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s16)))
+int16x8_t vbicq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s16)))
+int16x8_t vbicq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s32)))
+int32x4_t vbicq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s32)))
+int32x4_t vbicq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s8)))
+int8x16_t vbicq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s8)))
+int8x16_t vbicq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u16)))
+uint16x8_t vbicq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u16)))
+uint16x8_t vbicq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u32)))
+uint32x4_t vbicq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u32)))
+uint32x4_t vbicq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u8)))
+uint8x16_t vbicq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u8)))
+uint8x16_t vbicq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s16)))
+int16x8_t vbicq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s16)))
+int16x8_t vbicq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s32)))
+int32x4_t vbicq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s32)))
+int32x4_t vbicq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u16)))
+uint16x8_t vbicq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u16)))
+uint16x8_t vbicq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u32)))
+uint32x4_t vbicq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u32)))
+uint32x4_t vbicq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s16)))
+int16x8_t vbicq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s16)))
+int16x8_t vbicq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s32)))
+int32x4_t vbicq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s32)))
+int32x4_t vbicq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s8)))
+int8x16_t vbicq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s8)))
+int8x16_t vbicq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u16)))
+uint16x8_t vbicq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u16)))
+uint16x8_t vbicq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u32)))
+uint32x4_t vbicq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u32)))
+uint32x4_t vbicq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u8)))
+uint8x16_t vbicq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u8)))
+uint8x16_t vbicq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s16)))
+int16x8_t vbicq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s16)))
+int16x8_t vbicq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s32)))
+int32x4_t vbicq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s32)))
+int32x4_t vbicq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s8)))
+int8x16_t vbicq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s8)))
+int8x16_t vbicq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u16)))
+uint16x8_t vbicq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u16)))
+uint16x8_t vbicq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u32)))
+uint32x4_t vbicq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u32)))
+uint32x4_t vbicq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u8)))
+uint8x16_t vbicq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u8)))
+uint8x16_t vbicq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s16)))
+int16x8_t vbrsrq_m_n_s16(int16x8_t, int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s16)))
+int16x8_t vbrsrq_m(int16x8_t, int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s32)))
+int32x4_t vbrsrq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s32)))
+int32x4_t vbrsrq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s8)))
+int8x16_t vbrsrq_m_n_s8(int8x16_t, int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s8)))
+int8x16_t vbrsrq_m(int8x16_t, int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u16)))
+uint16x8_t vbrsrq_m_n_u16(uint16x8_t, uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u16)))
+uint16x8_t vbrsrq_m(uint16x8_t, uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u32)))
+uint32x4_t vbrsrq_m_n_u32(uint32x4_t, uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u32)))
+uint32x4_t vbrsrq_m(uint32x4_t, uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u8)))
+uint8x16_t vbrsrq_m_n_u8(uint8x16_t, uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u8)))
+uint8x16_t vbrsrq_m(uint8x16_t, uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s16)))
+int16x8_t vbrsrq_n_s16(int16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s16)))
+int16x8_t vbrsrq(int16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s32)))
+int32x4_t vbrsrq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s32)))
+int32x4_t vbrsrq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s8)))
+int8x16_t vbrsrq_n_s8(int8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s8)))
+int8x16_t vbrsrq(int8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u16)))
+uint16x8_t vbrsrq_n_u16(uint16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u16)))
+uint16x8_t vbrsrq(uint16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u32)))
+uint32x4_t vbrsrq_n_u32(uint32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u32)))
+uint32x4_t vbrsrq(uint32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u8)))
+uint8x16_t vbrsrq_n_u8(uint8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u8)))
+uint8x16_t vbrsrq(uint8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s16)))
+int16x8_t vbrsrq_x_n_s16(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s16)))
+int16x8_t vbrsrq_x(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s32)))
+int32x4_t vbrsrq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s32)))
+int32x4_t vbrsrq_x(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s8)))
+int8x16_t vbrsrq_x_n_s8(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s8)))
+int8x16_t vbrsrq_x(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u16)))
+uint16x8_t vbrsrq_x_n_u16(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u16)))
+uint16x8_t vbrsrq_x(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u32)))
+uint32x4_t vbrsrq_x_n_u32(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u32)))
+uint32x4_t vbrsrq_x(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u8)))
+uint8x16_t vbrsrq_x_n_u8(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u8)))
+uint8x16_t vbrsrq_x(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s16)))
+int16x8_t vcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s16)))
+int16x8_t vcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s32)))
+int32x4_t vcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s32)))
+int32x4_t vcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s8)))
+int8x16_t vcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s8)))
+int8x16_t vcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u16)))
+uint16x8_t vcaddq_rot270_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u16)))
+uint16x8_t vcaddq_rot270_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u32)))
+uint32x4_t vcaddq_rot270_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u32)))
+uint32x4_t vcaddq_rot270_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u8)))
+uint8x16_t vcaddq_rot270_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u8)))
+uint8x16_t vcaddq_rot270_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s16)))
+int16x8_t vcaddq_rot270_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s16)))
+int16x8_t vcaddq_rot270(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s32)))
+int32x4_t vcaddq_rot270_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s32)))
+int32x4_t vcaddq_rot270(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s8)))
+int8x16_t vcaddq_rot270_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s8)))
+int8x16_t vcaddq_rot270(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u16)))
+uint16x8_t vcaddq_rot270_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u16)))
+uint16x8_t vcaddq_rot270(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u32)))
+uint32x4_t vcaddq_rot270_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u32)))
+uint32x4_t vcaddq_rot270(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u8)))
+uint8x16_t vcaddq_rot270_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u8)))
+uint8x16_t vcaddq_rot270(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s16)))
+int16x8_t vcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s16)))
+int16x8_t vcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s32)))
+int32x4_t vcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s32)))
+int32x4_t vcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s8)))
+int8x16_t vcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s8)))
+int8x16_t vcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u16)))
+uint16x8_t vcaddq_rot270_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u16)))
+uint16x8_t vcaddq_rot270_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u32)))
+uint32x4_t vcaddq_rot270_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u32)))
+uint32x4_t vcaddq_rot270_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u8)))
+uint8x16_t vcaddq_rot270_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u8)))
+uint8x16_t vcaddq_rot270_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s16)))
+int16x8_t vcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s16)))
+int16x8_t vcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s32)))
+int32x4_t vcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s32)))
+int32x4_t vcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s8)))
+int8x16_t vcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s8)))
+int8x16_t vcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u16)))
+uint16x8_t vcaddq_rot90_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u16)))
+uint16x8_t vcaddq_rot90_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u32)))
+uint32x4_t vcaddq_rot90_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u32)))
+uint32x4_t vcaddq_rot90_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u8)))
+uint8x16_t vcaddq_rot90_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u8)))
+uint8x16_t vcaddq_rot90_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s16)))
+int16x8_t vcaddq_rot90_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s16)))
+int16x8_t vcaddq_rot90(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s32)))
+int32x4_t vcaddq_rot90_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s32)))
+int32x4_t vcaddq_rot90(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s8)))
+int8x16_t vcaddq_rot90_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s8)))
+int8x16_t vcaddq_rot90(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u16)))
+uint16x8_t vcaddq_rot90_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u16)))
+uint16x8_t vcaddq_rot90(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u32)))
+uint32x4_t vcaddq_rot90_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u32)))
+uint32x4_t vcaddq_rot90(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u8)))
+uint8x16_t vcaddq_rot90_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u8)))
+uint8x16_t vcaddq_rot90(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s16)))
+int16x8_t vcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s16)))
+int16x8_t vcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s32)))
+int32x4_t vcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s32)))
+int32x4_t vcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s8)))
+int8x16_t vcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s8)))
+int8x16_t vcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u16)))
+uint16x8_t vcaddq_rot90_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u16)))
+uint16x8_t vcaddq_rot90_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u32)))
+uint32x4_t vcaddq_rot90_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u32)))
+uint32x4_t vcaddq_rot90_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u8)))
+uint8x16_t vcaddq_rot90_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u8)))
+uint8x16_t vcaddq_rot90_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s16)))
+int16x8_t vclsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s16)))
+int16x8_t vclsq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s32)))
+int32x4_t vclsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s32)))
+int32x4_t vclsq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s8)))
+int8x16_t vclsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s8)))
+int8x16_t vclsq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s16)))
+int16x8_t vclsq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s16)))
+int16x8_t vclsq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s32)))
+int32x4_t vclsq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s32)))
+int32x4_t vclsq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s8)))
+int8x16_t vclsq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s8)))
+int8x16_t vclsq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s16)))
+int16x8_t vclsq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s16)))
+int16x8_t vclsq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s32)))
+int32x4_t vclsq_x_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s32)))
+int32x4_t vclsq_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s8)))
+int8x16_t vclsq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s8)))
+int8x16_t vclsq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s16)))
+int16x8_t vclzq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s16)))
+int16x8_t vclzq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s32)))
+int32x4_t vclzq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s32)))
+int32x4_t vclzq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s8)))
+int8x16_t vclzq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s8)))
+int8x16_t vclzq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u16)))
+uint16x8_t vclzq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u16)))
+uint16x8_t vclzq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u32)))
+uint32x4_t vclzq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u32)))
+uint32x4_t vclzq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u8)))
+uint8x16_t vclzq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u8)))
+uint8x16_t vclzq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s16)))
+int16x8_t vclzq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s16)))
+int16x8_t vclzq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s32)))
+int32x4_t vclzq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s32)))
+int32x4_t vclzq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s8)))
+int8x16_t vclzq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s8)))
+int8x16_t vclzq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u16)))
+uint16x8_t vclzq_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u16)))
+uint16x8_t vclzq(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u32)))
+uint32x4_t vclzq_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u32)))
+uint32x4_t vclzq(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u8)))
+uint8x16_t vclzq_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u8)))
+uint8x16_t vclzq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s16)))
+int16x8_t vclzq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s16)))
+int16x8_t vclzq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s32)))
+int32x4_t vclzq_x_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s32)))
+int32x4_t vclzq_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s8)))
+int8x16_t vclzq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s8)))
+int8x16_t vclzq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u16)))
+uint16x8_t vclzq_x_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u16)))
+uint16x8_t vclzq_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u32)))
+uint32x4_t vclzq_x_u32(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u32)))
+uint32x4_t vclzq_x(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u8)))
+uint8x16_t vclzq_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u8)))
+uint8x16_t vclzq_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))
+mve_pred16_t vcmpcsq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))
+mve_pred16_t vcmpcsq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))
+mve_pred16_t vcmpcsq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))
+mve_pred16_t vcmpcsq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))
+mve_pred16_t vcmpcsq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))
+mve_pred16_t vcmpcsq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u16)))
+mve_pred16_t vcmpcsq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u16)))
+mve_pred16_t vcmpcsq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u32)))
+mve_pred16_t vcmpcsq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u32)))
+mve_pred16_t vcmpcsq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u8)))
+mve_pred16_t vcmpcsq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u8)))
+mve_pred16_t vcmpcsq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u16)))
+mve_pred16_t vcmpcsq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u16)))
+mve_pred16_t vcmpcsq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u32)))
+mve_pred16_t vcmpcsq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u32)))
+mve_pred16_t vcmpcsq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u8)))
+mve_pred16_t vcmpcsq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u8)))
+mve_pred16_t vcmpcsq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u16)))
+mve_pred16_t vcmpcsq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u16)))
+mve_pred16_t vcmpcsq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u32)))
+mve_pred16_t vcmpcsq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u32)))
+mve_pred16_t vcmpcsq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u8)))
+mve_pred16_t vcmpcsq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u8)))
+mve_pred16_t vcmpcsq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))
+mve_pred16_t vcmpeqq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))
+mve_pred16_t vcmpeqq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))
+mve_pred16_t vcmpeqq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))
+mve_pred16_t vcmpeqq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))
+mve_pred16_t vcmpeqq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))
+mve_pred16_t vcmpeqq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))
+mve_pred16_t vcmpeqq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))
+mve_pred16_t vcmpeqq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))
+mve_pred16_t vcmpeqq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))
+mve_pred16_t vcmpeqq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))
+mve_pred16_t vcmpeqq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))
+mve_pred16_t vcmpeqq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s16)))
+mve_pred16_t vcmpeqq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s16)))
+mve_pred16_t vcmpeqq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s32)))
+mve_pred16_t vcmpeqq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s32)))
+mve_pred16_t vcmpeqq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s8)))
+mve_pred16_t vcmpeqq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s8)))
+mve_pred16_t vcmpeqq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u16)))
+mve_pred16_t vcmpeqq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u16)))
+mve_pred16_t vcmpeqq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u32)))
+mve_pred16_t vcmpeqq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u32)))
+mve_pred16_t vcmpeqq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u8)))
+mve_pred16_t vcmpeqq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u8)))
+mve_pred16_t vcmpeqq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s16)))
+mve_pred16_t vcmpeqq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s16)))
+mve_pred16_t vcmpeqq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s32)))
+mve_pred16_t vcmpeqq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s32)))
+mve_pred16_t vcmpeqq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s8)))
+mve_pred16_t vcmpeqq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s8)))
+mve_pred16_t vcmpeqq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u16)))
+mve_pred16_t vcmpeqq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u16)))
+mve_pred16_t vcmpeqq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u32)))
+mve_pred16_t vcmpeqq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u32)))
+mve_pred16_t vcmpeqq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u8)))
+mve_pred16_t vcmpeqq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u8)))
+mve_pred16_t vcmpeqq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s16)))
+mve_pred16_t vcmpeqq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s16)))
+mve_pred16_t vcmpeqq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s32)))
+mve_pred16_t vcmpeqq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s32)))
+mve_pred16_t vcmpeqq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s8)))
+mve_pred16_t vcmpeqq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s8)))
+mve_pred16_t vcmpeqq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u16)))
+mve_pred16_t vcmpeqq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u16)))
+mve_pred16_t vcmpeqq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u32)))
+mve_pred16_t vcmpeqq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u32)))
+mve_pred16_t vcmpeqq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u8)))
+mve_pred16_t vcmpeqq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u8)))
+mve_pred16_t vcmpeqq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))
+mve_pred16_t vcmpgeq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))
+mve_pred16_t vcmpgeq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))
+mve_pred16_t vcmpgeq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))
+mve_pred16_t vcmpgeq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))
+mve_pred16_t vcmpgeq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))
+mve_pred16_t vcmpgeq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s16)))
+mve_pred16_t vcmpgeq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s16)))
+mve_pred16_t vcmpgeq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s32)))
+mve_pred16_t vcmpgeq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s32)))
+mve_pred16_t vcmpgeq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s8)))
+mve_pred16_t vcmpgeq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s8)))
+mve_pred16_t vcmpgeq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s16)))
+mve_pred16_t vcmpgeq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s16)))
+mve_pred16_t vcmpgeq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s32)))
+mve_pred16_t vcmpgeq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s32)))
+mve_pred16_t vcmpgeq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s8)))
+mve_pred16_t vcmpgeq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s8)))
+mve_pred16_t vcmpgeq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s16)))
+mve_pred16_t vcmpgeq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s16)))
+mve_pred16_t vcmpgeq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s32)))
+mve_pred16_t vcmpgeq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s32)))
+mve_pred16_t vcmpgeq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s8)))
+mve_pred16_t vcmpgeq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s8)))
+mve_pred16_t vcmpgeq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))
+mve_pred16_t vcmpgtq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))
+mve_pred16_t vcmpgtq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))
+mve_pred16_t vcmpgtq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))
+mve_pred16_t vcmpgtq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))
+mve_pred16_t vcmpgtq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))
+mve_pred16_t vcmpgtq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s16)))
+mve_pred16_t vcmpgtq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s16)))
+mve_pred16_t vcmpgtq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s32)))
+mve_pred16_t vcmpgtq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s32)))
+mve_pred16_t vcmpgtq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s8)))
+mve_pred16_t vcmpgtq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s8)))
+mve_pred16_t vcmpgtq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s16)))
+mve_pred16_t vcmpgtq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s16)))
+mve_pred16_t vcmpgtq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s32)))
+mve_pred16_t vcmpgtq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s32)))
+mve_pred16_t vcmpgtq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s8)))
+mve_pred16_t vcmpgtq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s8)))
+mve_pred16_t vcmpgtq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s16)))
+mve_pred16_t vcmpgtq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s16)))
+mve_pred16_t vcmpgtq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s32)))
+mve_pred16_t vcmpgtq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s32)))
+mve_pred16_t vcmpgtq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s8)))
+mve_pred16_t vcmpgtq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s8)))
+mve_pred16_t vcmpgtq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))
+mve_pred16_t vcmphiq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))
+mve_pred16_t vcmphiq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))
+mve_pred16_t vcmphiq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))
+mve_pred16_t vcmphiq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))
+mve_pred16_t vcmphiq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))
+mve_pred16_t vcmphiq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u16)))
+mve_pred16_t vcmphiq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u16)))
+mve_pred16_t vcmphiq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u32)))
+mve_pred16_t vcmphiq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u32)))
+mve_pred16_t vcmphiq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u8)))
+mve_pred16_t vcmphiq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u8)))
+mve_pred16_t vcmphiq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u16)))
+mve_pred16_t vcmphiq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u16)))
+mve_pred16_t vcmphiq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u32)))
+mve_pred16_t vcmphiq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u32)))
+mve_pred16_t vcmphiq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u8)))
+mve_pred16_t vcmphiq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u8)))
+mve_pred16_t vcmphiq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u16)))
+mve_pred16_t vcmphiq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u16)))
+mve_pred16_t vcmphiq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u32)))
+mve_pred16_t vcmphiq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u32)))
+mve_pred16_t vcmphiq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u8)))
+mve_pred16_t vcmphiq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u8)))
+mve_pred16_t vcmphiq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))
+mve_pred16_t vcmpleq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))
+mve_pred16_t vcmpleq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))
+mve_pred16_t vcmpleq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))
+mve_pred16_t vcmpleq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))
+mve_pred16_t vcmpleq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))
+mve_pred16_t vcmpleq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s16)))
+mve_pred16_t vcmpleq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s16)))
+mve_pred16_t vcmpleq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s32)))
+mve_pred16_t vcmpleq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s32)))
+mve_pred16_t vcmpleq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s8)))
+mve_pred16_t vcmpleq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s8)))
+mve_pred16_t vcmpleq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s16)))
+mve_pred16_t vcmpleq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s16)))
+mve_pred16_t vcmpleq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s32)))
+mve_pred16_t vcmpleq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s32)))
+mve_pred16_t vcmpleq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s8)))
+mve_pred16_t vcmpleq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s8)))
+mve_pred16_t vcmpleq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s16)))
+mve_pred16_t vcmpleq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s16)))
+mve_pred16_t vcmpleq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s32)))
+mve_pred16_t vcmpleq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s32)))
+mve_pred16_t vcmpleq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s8)))
+mve_pred16_t vcmpleq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s8)))
+mve_pred16_t vcmpleq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))
+mve_pred16_t vcmpltq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))
+mve_pred16_t vcmpltq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))
+mve_pred16_t vcmpltq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))
+mve_pred16_t vcmpltq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))
+mve_pred16_t vcmpltq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))
+mve_pred16_t vcmpltq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s16)))
+mve_pred16_t vcmpltq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s16)))
+mve_pred16_t vcmpltq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s32)))
+mve_pred16_t vcmpltq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s32)))
+mve_pred16_t vcmpltq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s8)))
+mve_pred16_t vcmpltq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s8)))
+mve_pred16_t vcmpltq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s16)))
+mve_pred16_t vcmpltq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s16)))
+mve_pred16_t vcmpltq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s32)))
+mve_pred16_t vcmpltq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s32)))
+mve_pred16_t vcmpltq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s8)))
+mve_pred16_t vcmpltq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s8)))
+mve_pred16_t vcmpltq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s16)))
+mve_pred16_t vcmpltq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s16)))
+mve_pred16_t vcmpltq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s32)))
+mve_pred16_t vcmpltq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s32)))
+mve_pred16_t vcmpltq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s8)))
+mve_pred16_t vcmpltq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s8)))
+mve_pred16_t vcmpltq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))
+mve_pred16_t vcmpneq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))
+mve_pred16_t vcmpneq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))
+mve_pred16_t vcmpneq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))
+mve_pred16_t vcmpneq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))
+mve_pred16_t vcmpneq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))
+mve_pred16_t vcmpneq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))
+mve_pred16_t vcmpneq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))
+mve_pred16_t vcmpneq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))
+mve_pred16_t vcmpneq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))
+mve_pred16_t vcmpneq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))
+mve_pred16_t vcmpneq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))
+mve_pred16_t vcmpneq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s16)))
+mve_pred16_t vcmpneq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s16)))
+mve_pred16_t vcmpneq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s32)))
+mve_pred16_t vcmpneq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s32)))
+mve_pred16_t vcmpneq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s8)))
+mve_pred16_t vcmpneq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s8)))
+mve_pred16_t vcmpneq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u16)))
+mve_pred16_t vcmpneq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u16)))
+mve_pred16_t vcmpneq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u32)))
+mve_pred16_t vcmpneq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u32)))
+mve_pred16_t vcmpneq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u8)))
+mve_pred16_t vcmpneq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u8)))
+mve_pred16_t vcmpneq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s16)))
+mve_pred16_t vcmpneq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s16)))
+mve_pred16_t vcmpneq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s32)))
+mve_pred16_t vcmpneq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s32)))
+mve_pred16_t vcmpneq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s8)))
+mve_pred16_t vcmpneq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s8)))
+mve_pred16_t vcmpneq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u16)))
+mve_pred16_t vcmpneq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u16)))
+mve_pred16_t vcmpneq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u32)))
+mve_pred16_t vcmpneq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u32)))
+mve_pred16_t vcmpneq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u8)))
+mve_pred16_t vcmpneq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u8)))
+mve_pred16_t vcmpneq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s16)))
+mve_pred16_t vcmpneq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s16)))
+mve_pred16_t vcmpneq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s32)))
+mve_pred16_t vcmpneq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s32)))
+mve_pred16_t vcmpneq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s8)))
+mve_pred16_t vcmpneq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s8)))
+mve_pred16_t vcmpneq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u16)))
+mve_pred16_t vcmpneq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u16)))
+mve_pred16_t vcmpneq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u32)))
+mve_pred16_t vcmpneq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u32)))
+mve_pred16_t vcmpneq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u8)))
+mve_pred16_t vcmpneq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u8)))
+mve_pred16_t vcmpneq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s16)))
+int16x8_t vcreateq_s16(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s32)))
+int32x4_t vcreateq_s32(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s64)))
+int64x2_t vcreateq_s64(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s8)))
+int8x16_t vcreateq_s8(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u16)))
+uint16x8_t vcreateq_u16(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u32)))
+uint32x4_t vcreateq_u32(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u64)))
+uint64x2_t vcreateq_u64(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u8)))
+uint8x16_t vcreateq_u8(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp16q)))
+mve_pred16_t vctp16q(uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp16q_m)))
+mve_pred16_t vctp16q_m(uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp32q)))
+mve_pred16_t vctp32q(uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp32q_m)))
+mve_pred16_t vctp32q_m(uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp64q)))
+mve_pred16_t vctp64q(uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp64q_m)))
+mve_pred16_t vctp64q_m(uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp8q)))
+mve_pred16_t vctp8q(uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp8q_m)))
+mve_pred16_t vctp8q_m(uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u16)))
+uint16x8_t vddupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u16)))
+uint16x8_t vddupq_m(uint16x8_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u32)))
+uint32x4_t vddupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u32)))
+uint32x4_t vddupq_m(uint32x4_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u8)))
+uint8x16_t vddupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u8)))
+uint8x16_t vddupq_m(uint8x16_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u16)))
+uint16x8_t vddupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u16)))
+uint16x8_t vddupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u32)))
+uint32x4_t vddupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u32)))
+uint32x4_t vddupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u8)))
+uint8x16_t vddupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u8)))
+uint8x16_t vddupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u16)))
+uint16x8_t vddupq_n_u16(uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u16)))
+uint16x8_t vddupq_u16(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u32)))
+uint32x4_t vddupq_n_u32(uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u32)))
+uint32x4_t vddupq_u32(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u8)))
+uint8x16_t vddupq_n_u8(uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u8)))
+uint8x16_t vddupq_u8(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u16)))
+uint16x8_t vddupq_wb_u16(uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u16)))
+uint16x8_t vddupq_u16(uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u32)))
+uint32x4_t vddupq_wb_u32(uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u32)))
+uint32x4_t vddupq_u32(uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u8)))
+uint8x16_t vddupq_wb_u8(uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u8)))
+uint8x16_t vddupq_u8(uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u16)))
+uint16x8_t vddupq_x_n_u16(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u16)))
+uint16x8_t vddupq_x_u16(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u32)))
+uint32x4_t vddupq_x_n_u32(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u32)))
+uint32x4_t vddupq_x_u32(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u8)))
+uint8x16_t vddupq_x_n_u8(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u8)))
+uint8x16_t vddupq_x_u8(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u16)))
+uint16x8_t vddupq_x_wb_u16(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u16)))
+uint16x8_t vddupq_x_u16(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u32)))
+uint32x4_t vddupq_x_wb_u32(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u32)))
+uint32x4_t vddupq_x_u32(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u8)))
+uint8x16_t vddupq_x_wb_u8(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u8)))
+uint8x16_t vddupq_x_u8(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s16)))
+int16x8_t vdupq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s16)))
+int16x8_t vdupq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s32)))
+int32x4_t vdupq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s32)))
+int32x4_t vdupq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s8)))
+int8x16_t vdupq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s8)))
+int8x16_t vdupq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u16)))
+uint16x8_t vdupq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u16)))
+uint16x8_t vdupq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u32)))
+uint32x4_t vdupq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u32)))
+uint32x4_t vdupq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u8)))
+uint8x16_t vdupq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u8)))
+uint8x16_t vdupq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s16)))
+int16x8_t vdupq_n_s16(int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s32)))
+int32x4_t vdupq_n_s32(int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s8)))
+int8x16_t vdupq_n_s8(int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u16)))
+uint16x8_t vdupq_n_u16(uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u32)))
+uint32x4_t vdupq_n_u32(uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u8)))
+uint8x16_t vdupq_n_u8(uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s16)))
+int16x8_t vdupq_x_n_s16(int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s32)))
+int32x4_t vdupq_x_n_s32(int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s8)))
+int8x16_t vdupq_x_n_s8(int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u16)))
+uint16x8_t vdupq_x_n_u16(uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u32)))
+uint32x4_t vdupq_x_n_u32(uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u8)))
+uint8x16_t vdupq_x_n_u8(uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u16)))
+uint16x8_t vdwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u16)))
+uint16x8_t vdwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u32)))
+uint32x4_t vdwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u32)))
+uint32x4_t vdwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u8)))
+uint8x16_t vdwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u8)))
+uint8x16_t vdwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u16)))
+uint16x8_t vdwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u16)))
+uint16x8_t vdwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u32)))
+uint32x4_t vdwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u32)))
+uint32x4_t vdwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u8)))
+uint8x16_t vdwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u8)))
+uint8x16_t vdwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u16)))
+uint16x8_t vdwdupq_n_u16(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u16)))
+uint16x8_t vdwdupq_u16(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u32)))
+uint32x4_t vdwdupq_n_u32(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u32)))
+uint32x4_t vdwdupq_u32(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u8)))
+uint8x16_t vdwdupq_n_u8(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u8)))
+uint8x16_t vdwdupq_u8(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u16)))
+uint16x8_t vdwdupq_wb_u16(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u16)))
+uint16x8_t vdwdupq_u16(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u32)))
+uint32x4_t vdwdupq_wb_u32(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u32)))
+uint32x4_t vdwdupq_u32(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u8)))
+uint8x16_t vdwdupq_wb_u8(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u8)))
+uint8x16_t vdwdupq_u8(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u16)))
+uint16x8_t vdwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u16)))
+uint16x8_t vdwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u32)))
+uint32x4_t vdwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u32)))
+uint32x4_t vdwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u8)))
+uint8x16_t vdwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u8)))
+uint8x16_t vdwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u16)))
+uint16x8_t vdwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u16)))
+uint16x8_t vdwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u32)))
+uint32x4_t vdwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u32)))
+uint32x4_t vdwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u8)))
+uint8x16_t vdwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u8)))
+uint8x16_t vdwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s16)))
+int16x8_t veorq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s16)))
+int16x8_t veorq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s32)))
+int32x4_t veorq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s32)))
+int32x4_t veorq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s8)))
+int8x16_t veorq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s8)))
+int8x16_t veorq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u16)))
+uint16x8_t veorq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u16)))
+uint16x8_t veorq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u32)))
+uint32x4_t veorq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u32)))
+uint32x4_t veorq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u8)))
+uint8x16_t veorq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u8)))
+uint8x16_t veorq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s16)))
+int16x8_t veorq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s16)))
+int16x8_t veorq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s32)))
+int32x4_t veorq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s32)))
+int32x4_t veorq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s8)))
+int8x16_t veorq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s8)))
+int8x16_t veorq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u16)))
+uint16x8_t veorq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u16)))
+uint16x8_t veorq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u32)))
+uint32x4_t veorq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u32)))
+uint32x4_t veorq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u8)))
+uint8x16_t veorq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u8)))
+uint8x16_t veorq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s16)))
+int16x8_t veorq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s16)))
+int16x8_t veorq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s32)))
+int32x4_t veorq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s32)))
+int32x4_t veorq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s8)))
+int8x16_t veorq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s8)))
+int8x16_t veorq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u16)))
+uint16x8_t veorq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u16)))
+uint16x8_t veorq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u32)))
+uint32x4_t veorq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u32)))
+uint32x4_t veorq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u8)))
+uint8x16_t veorq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u8)))
+uint8x16_t veorq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s16)))
+int16_t vgetq_lane_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s16)))
+int16_t vgetq_lane(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s32)))
+int32_t vgetq_lane_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s32)))
+int32_t vgetq_lane(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s64)))
+int64_t vgetq_lane_s64(int64x2_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s64)))
+int64_t vgetq_lane(int64x2_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s8)))
+int8_t vgetq_lane_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s8)))
+int8_t vgetq_lane(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u16)))
+uint16_t vgetq_lane_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u16)))
+uint16_t vgetq_lane(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u32)))
+uint32_t vgetq_lane_u32(uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u32)))
+uint32_t vgetq_lane(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u64)))
+uint64_t vgetq_lane_u64(uint64x2_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u64)))
+uint64_t vgetq_lane(uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u8)))
+uint8_t vgetq_lane_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u8)))
+uint8_t vgetq_lane(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s16)))
+int16x8_t vhaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s16)))
+int16x8_t vhaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s32)))
+int32x4_t vhaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s32)))
+int32x4_t vhaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s8)))
+int8x16_t vhaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s8)))
+int8x16_t vhaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u16)))
+uint16x8_t vhaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u16)))
+uint16x8_t vhaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u32)))
+uint32x4_t vhaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u32)))
+uint32x4_t vhaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u8)))
+uint8x16_t vhaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u8)))
+uint8x16_t vhaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s16)))
+int16x8_t vhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s16)))
+int16x8_t vhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s32)))
+int32x4_t vhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s32)))
+int32x4_t vhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s8)))
+int8x16_t vhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s8)))
+int8x16_t vhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u16)))
+uint16x8_t vhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u16)))
+uint16x8_t vhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u32)))
+uint32x4_t vhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u32)))
+uint32x4_t vhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u8)))
+uint8x16_t vhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u8)))
+uint8x16_t vhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s16)))
+int16x8_t vhaddq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s16)))
+int16x8_t vhaddq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s32)))
+int32x4_t vhaddq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s32)))
+int32x4_t vhaddq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s8)))
+int8x16_t vhaddq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s8)))
+int8x16_t vhaddq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u16)))
+uint16x8_t vhaddq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u16)))
+uint16x8_t vhaddq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u32)))
+uint32x4_t vhaddq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u32)))
+uint32x4_t vhaddq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u8)))
+uint8x16_t vhaddq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u8)))
+uint8x16_t vhaddq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s16)))
+int16x8_t vhaddq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s16)))
+int16x8_t vhaddq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s32)))
+int32x4_t vhaddq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s32)))
+int32x4_t vhaddq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s8)))
+int8x16_t vhaddq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s8)))
+int8x16_t vhaddq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u16)))
+uint16x8_t vhaddq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u16)))
+uint16x8_t vhaddq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u32)))
+uint32x4_t vhaddq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u32)))
+uint32x4_t vhaddq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u8)))
+uint8x16_t vhaddq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u8)))
+uint8x16_t vhaddq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s16)))
+int16x8_t vhaddq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s16)))
+int16x8_t vhaddq_x(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s32)))
+int32x4_t vhaddq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s32)))
+int32x4_t vhaddq_x(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s8)))
+int8x16_t vhaddq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s8)))
+int8x16_t vhaddq_x(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u16)))
+uint16x8_t vhaddq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u16)))
+uint16x8_t vhaddq_x(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u32)))
+uint32x4_t vhaddq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u32)))
+uint32x4_t vhaddq_x(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u8)))
+uint8x16_t vhaddq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u8)))
+uint8x16_t vhaddq_x(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s16)))
+int16x8_t vhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s16)))
+int16x8_t vhaddq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s32)))
+int32x4_t vhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s32)))
+int32x4_t vhaddq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s8)))
+int8x16_t vhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s8)))
+int8x16_t vhaddq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u16)))
+uint16x8_t vhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u16)))
+uint16x8_t vhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u32)))
+uint32x4_t vhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u32)))
+uint32x4_t vhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u8)))
+uint8x16_t vhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u8)))
+uint8x16_t vhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16)))
+int16x8_t vhcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16)))
+int16x8_t vhcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32)))
+int32x4_t vhcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32)))
+int32x4_t vhcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8)))
+int8x16_t vhcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8)))
+int8x16_t vhcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s16)))
+int16x8_t vhcaddq_rot270_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s16)))
+int16x8_t vhcaddq_rot270(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s32)))
+int32x4_t vhcaddq_rot270_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s32)))
+int32x4_t vhcaddq_rot270(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s8)))
+int8x16_t vhcaddq_rot270_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s8)))
+int8x16_t vhcaddq_rot270(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16)))
+int16x8_t vhcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16)))
+int16x8_t vhcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32)))
+int32x4_t vhcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32)))
+int32x4_t vhcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8)))
+int8x16_t vhcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8)))
+int8x16_t vhcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16)))
+int16x8_t vhcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16)))
+int16x8_t vhcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32)))
+int32x4_t vhcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32)))
+int32x4_t vhcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8)))
+int8x16_t vhcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8)))
+int8x16_t vhcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s16)))
+int16x8_t vhcaddq_rot90_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s16)))
+int16x8_t vhcaddq_rot90(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s32)))
+int32x4_t vhcaddq_rot90_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s32)))
+int32x4_t vhcaddq_rot90(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s8)))
+int8x16_t vhcaddq_rot90_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s8)))
+int8x16_t vhcaddq_rot90(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16)))
+int16x8_t vhcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16)))
+int16x8_t vhcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32)))
+int32x4_t vhcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32)))
+int32x4_t vhcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8)))
+int8x16_t vhcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8)))
+int8x16_t vhcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s16)))
+int16x8_t vhsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s16)))
+int16x8_t vhsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s32)))
+int32x4_t vhsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s32)))
+int32x4_t vhsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s8)))
+int8x16_t vhsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s8)))
+int8x16_t vhsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u16)))
+uint16x8_t vhsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u16)))
+uint16x8_t vhsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u32)))
+uint32x4_t vhsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u32)))
+uint32x4_t vhsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u8)))
+uint8x16_t vhsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u8)))
+uint8x16_t vhsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s16)))
+int16x8_t vhsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s16)))
+int16x8_t vhsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s32)))
+int32x4_t vhsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s32)))
+int32x4_t vhsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s8)))
+int8x16_t vhsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s8)))
+int8x16_t vhsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u16)))
+uint16x8_t vhsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u16)))
+uint16x8_t vhsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u32)))
+uint32x4_t vhsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u32)))
+uint32x4_t vhsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u8)))
+uint8x16_t vhsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u8)))
+uint8x16_t vhsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s16)))
+int16x8_t vhsubq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s16)))
+int16x8_t vhsubq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s32)))
+int32x4_t vhsubq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s32)))
+int32x4_t vhsubq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s8)))
+int8x16_t vhsubq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s8)))
+int8x16_t vhsubq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u16)))
+uint16x8_t vhsubq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u16)))
+uint16x8_t vhsubq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u32)))
+uint32x4_t vhsubq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u32)))
+uint32x4_t vhsubq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u8)))
+uint8x16_t vhsubq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u8)))
+uint8x16_t vhsubq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s16)))
+int16x8_t vhsubq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s16)))
+int16x8_t vhsubq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s32)))
+int32x4_t vhsubq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s32)))
+int32x4_t vhsubq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s8)))
+int8x16_t vhsubq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s8)))
+int8x16_t vhsubq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u16)))
+uint16x8_t vhsubq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u16)))
+uint16x8_t vhsubq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u32)))
+uint32x4_t vhsubq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u32)))
+uint32x4_t vhsubq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u8)))
+uint8x16_t vhsubq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u8)))
+uint8x16_t vhsubq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s16)))
+int16x8_t vhsubq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s16)))
+int16x8_t vhsubq_x(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s32)))
+int32x4_t vhsubq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s32)))
+int32x4_t vhsubq_x(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s8)))
+int8x16_t vhsubq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s8)))
+int8x16_t vhsubq_x(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u16)))
+uint16x8_t vhsubq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u16)))
+uint16x8_t vhsubq_x(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u32)))
+uint32x4_t vhsubq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u32)))
+uint32x4_t vhsubq_x(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u8)))
+uint8x16_t vhsubq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u8)))
+uint8x16_t vhsubq_x(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s16)))
+int16x8_t vhsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s16)))
+int16x8_t vhsubq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s32)))
+int32x4_t vhsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s32)))
+int32x4_t vhsubq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s8)))
+int8x16_t vhsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s8)))
+int8x16_t vhsubq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u16)))
+uint16x8_t vhsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u16)))
+uint16x8_t vhsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u32)))
+uint32x4_t vhsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u32)))
+uint32x4_t vhsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u8)))
+uint8x16_t vhsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u8)))
+uint8x16_t vhsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u16)))
+uint16x8_t vidupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u16)))
+uint16x8_t vidupq_m(uint16x8_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u32)))
+uint32x4_t vidupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u32)))
+uint32x4_t vidupq_m(uint32x4_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u8)))
+uint8x16_t vidupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u8)))
+uint8x16_t vidupq_m(uint8x16_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u16)))
+uint16x8_t vidupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u16)))
+uint16x8_t vidupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u32)))
+uint32x4_t vidupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u32)))
+uint32x4_t vidupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u8)))
+uint8x16_t vidupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u8)))
+uint8x16_t vidupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u16)))
+uint16x8_t vidupq_n_u16(uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u16)))
+uint16x8_t vidupq_u16(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u32)))
+uint32x4_t vidupq_n_u32(uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u32)))
+uint32x4_t vidupq_u32(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u8)))
+uint8x16_t vidupq_n_u8(uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u8)))
+uint8x16_t vidupq_u8(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u16)))
+uint16x8_t vidupq_wb_u16(uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u16)))
+uint16x8_t vidupq_u16(uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u32)))
+uint32x4_t vidupq_wb_u32(uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u32)))
+uint32x4_t vidupq_u32(uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u8)))
+uint8x16_t vidupq_wb_u8(uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u8)))
+uint8x16_t vidupq_u8(uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u16)))
+uint16x8_t vidupq_x_n_u16(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u16)))
+uint16x8_t vidupq_x_u16(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u32)))
+uint32x4_t vidupq_x_n_u32(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u32)))
+uint32x4_t vidupq_x_u32(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u8)))
+uint8x16_t vidupq_x_n_u8(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u8)))
+uint8x16_t vidupq_x_u8(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u16)))
+uint16x8_t vidupq_x_wb_u16(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u16)))
+uint16x8_t vidupq_x_u16(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u32)))
+uint32x4_t vidupq_x_wb_u32(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u32)))
+uint32x4_t vidupq_x_u32(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u8)))
+uint8x16_t vidupq_x_wb_u8(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u8)))
+uint8x16_t vidupq_x_u8(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u16)))
+uint16x8_t viwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u16)))
+uint16x8_t viwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u32)))
+uint32x4_t viwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u32)))
+uint32x4_t viwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u8)))
+uint8x16_t viwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u8)))
+uint8x16_t viwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u16)))
+uint16x8_t viwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u16)))
+uint16x8_t viwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u32)))
+uint32x4_t viwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u32)))
+uint32x4_t viwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u8)))
+uint8x16_t viwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u8)))
+uint8x16_t viwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u16)))
+uint16x8_t viwdupq_n_u16(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u16)))
+uint16x8_t viwdupq_u16(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u32)))
+uint32x4_t viwdupq_n_u32(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u32)))
+uint32x4_t viwdupq_u32(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u8)))
+uint8x16_t viwdupq_n_u8(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u8)))
+uint8x16_t viwdupq_u8(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u16)))
+uint16x8_t viwdupq_wb_u16(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u16)))
+uint16x8_t viwdupq_u16(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u32)))
+uint32x4_t viwdupq_wb_u32(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u32)))
+uint32x4_t viwdupq_u32(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u8)))
+uint8x16_t viwdupq_wb_u8(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u8)))
+uint8x16_t viwdupq_u8(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u16)))
+uint16x8_t viwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u16)))
+uint16x8_t viwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u32)))
+uint32x4_t viwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u32)))
+uint32x4_t viwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u8)))
+uint8x16_t viwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u8)))
+uint8x16_t viwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u16)))
+uint16x8_t viwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u16)))
+uint16x8_t viwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u32)))
+uint32x4_t viwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u32)))
+uint32x4_t viwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u8)))
+uint8x16_t viwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u8)))
+uint8x16_t viwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s16)))
+int16x8_t vld1q_s16(const int16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s16)))
+int16x8_t vld1q(const int16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s32)))
+int32x4_t vld1q_s32(const int32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s32)))
+int32x4_t vld1q(const int32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s8)))
+int8x16_t vld1q_s8(const int8_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s8)))
+int8x16_t vld1q(const int8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u16)))
+uint16x8_t vld1q_u16(const uint16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u16)))
+uint16x8_t vld1q(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u32)))
+uint32x4_t vld1q_u32(const uint32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u32)))
+uint32x4_t vld1q(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u8)))
+uint8x16_t vld1q_u8(const uint8_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u8)))
+uint8x16_t vld1q(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s16)))
+int16x8_t vld1q_z_s16(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s16)))
+int16x8_t vld1q_z(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s32)))
+int32x4_t vld1q_z_s32(const int32_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s32)))
+int32x4_t vld1q_z(const int32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s8)))
+int8x16_t vld1q_z_s8(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s8)))
+int8x16_t vld1q_z(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u16)))
+uint16x8_t vld1q_z_u16(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u16)))
+uint16x8_t vld1q_z(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u32)))
+uint32x4_t vld1q_z_u32(const uint32_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u32)))
+uint32x4_t vld1q_z(const uint32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u8)))
+uint8x16_t vld1q_z_u8(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u8)))
+uint8x16_t vld1q_z(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s16)))
+int16x8x2_t vld2q_s16(const int16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s16)))
+int16x8x2_t vld2q(const int16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s32)))
+int32x4x2_t vld2q_s32(const int32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s32)))
+int32x4x2_t vld2q(const int32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s8)))
+int8x16x2_t vld2q_s8(const int8_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s8)))
+int8x16x2_t vld2q(const int8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u16)))
+uint16x8x2_t vld2q_u16(const uint16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u16)))
+uint16x8x2_t vld2q(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u32)))
+uint32x4x2_t vld2q_u32(const uint32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u32)))
+uint32x4x2_t vld2q(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u8)))
+uint8x16x2_t vld2q_u8(const uint8_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u8)))
+uint8x16x2_t vld2q(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s16)))
+int16x8x4_t vld4q_s16(const int16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s16)))
+int16x8x4_t vld4q(const int16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s32)))
+int32x4x4_t vld4q_s32(const int32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s32)))
+int32x4x4_t vld4q(const int32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s8)))
+int8x16x4_t vld4q_s8(const int8_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s8)))
+int8x16x4_t vld4q(const int8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u16)))
+uint16x8x4_t vld4q_u16(const uint16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u16)))
+uint16x8x4_t vld4q(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u32)))
+uint32x4x4_t vld4q_u32(const uint32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u32)))
+uint32x4x4_t vld4q(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u8)))
+uint8x16x4_t vld4q_u8(const uint8_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u8)))
+uint8x16x4_t vld4q(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))
+int16x8_t vldrbq_gather_offset_s16(const int8_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))
+int16x8_t vldrbq_gather_offset(const int8_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))
+int32x4_t vldrbq_gather_offset_s32(const int8_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))
+int32x4_t vldrbq_gather_offset(const int8_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))
+int8x16_t vldrbq_gather_offset_s8(const int8_t *, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))
+int8x16_t vldrbq_gather_offset(const int8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))
+uint16x8_t vldrbq_gather_offset_u16(const uint8_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))
+uint16x8_t vldrbq_gather_offset(const uint8_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))
+uint32x4_t vldrbq_gather_offset_u32(const uint8_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))
+uint32x4_t vldrbq_gather_offset(const uint8_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))
+uint8x16_t vldrbq_gather_offset_u8(const uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))
+uint8x16_t vldrbq_gather_offset(const uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))
+int16x8_t vldrbq_gather_offset_z_s16(const int8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))
+int16x8_t vldrbq_gather_offset_z(const int8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))
+int32x4_t vldrbq_gather_offset_z_s32(const int8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))
+int32x4_t vldrbq_gather_offset_z(const int8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))
+int8x16_t vldrbq_gather_offset_z_s8(const int8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))
+int8x16_t vldrbq_gather_offset_z(const int8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))
+uint16x8_t vldrbq_gather_offset_z_u16(const uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))
+uint16x8_t vldrbq_gather_offset_z(const uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))
+uint32x4_t vldrbq_gather_offset_z_u32(const uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))
+uint32x4_t vldrbq_gather_offset_z(const uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))
+uint8x16_t vldrbq_gather_offset_z_u8(const uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))
+uint8x16_t vldrbq_gather_offset_z(const uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s16)))
+int16x8_t vldrbq_s16(const int8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s32)))
+int32x4_t vldrbq_s32(const int8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s8)))
+int8x16_t vldrbq_s8(const int8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u16)))
+uint16x8_t vldrbq_u16(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u32)))
+uint32x4_t vldrbq_u32(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u8)))
+uint8x16_t vldrbq_u8(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s16)))
+int16x8_t vldrbq_z_s16(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s32)))
+int32x4_t vldrbq_z_s32(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s8)))
+int8x16_t vldrbq_z_s8(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u16)))
+uint16x8_t vldrbq_z_u16(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u32)))
+uint32x4_t vldrbq_z_u32(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u8)))
+uint8x16_t vldrbq_z_u8(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_s64)))
+int64x2_t vldrdq_gather_base_s64(uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_u64)))
+uint64x2_t vldrdq_gather_base_u64(uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_s64)))
+int64x2_t vldrdq_gather_base_wb_s64(uint64x2_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_u64)))
+uint64x2_t vldrdq_gather_base_wb_u64(uint64x2_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_s64)))
+int64x2_t vldrdq_gather_base_wb_z_s64(uint64x2_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_u64)))
+uint64x2_t vldrdq_gather_base_wb_z_u64(uint64x2_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_z_s64)))
+int64x2_t vldrdq_gather_base_z_s64(uint64x2_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_z_u64)))
+uint64x2_t vldrdq_gather_base_z_u64(uint64x2_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))
+int64x2_t vldrdq_gather_offset_s64(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))
+int64x2_t vldrdq_gather_offset(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))
+uint64x2_t vldrdq_gather_offset_u64(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))
+uint64x2_t vldrdq_gather_offset(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))
+int64x2_t vldrdq_gather_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))
+int64x2_t vldrdq_gather_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))
+uint64x2_t vldrdq_gather_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))
+uint64x2_t vldrdq_gather_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))
+int64x2_t vldrdq_gather_shifted_offset_s64(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))
+int64x2_t vldrdq_gather_shifted_offset(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))
+uint64x2_t vldrdq_gather_shifted_offset_u64(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))
+uint64x2_t vldrdq_gather_shifted_offset(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))
+int64x2_t vldrdq_gather_shifted_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))
+int64x2_t vldrdq_gather_shifted_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))
+uint64x2_t vldrdq_gather_shifted_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))
+uint64x2_t vldrdq_gather_shifted_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))
+int16x8_t vldrhq_gather_offset_s16(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))
+int16x8_t vldrhq_gather_offset(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))
+int32x4_t vldrhq_gather_offset_s32(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))
+int32x4_t vldrhq_gather_offset(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))
+uint16x8_t vldrhq_gather_offset_u16(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))
+uint16x8_t vldrhq_gather_offset(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))
+uint32x4_t vldrhq_gather_offset_u32(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))
+uint32x4_t vldrhq_gather_offset(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))
+int16x8_t vldrhq_gather_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))
+int16x8_t vldrhq_gather_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))
+int32x4_t vldrhq_gather_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))
+int32x4_t vldrhq_gather_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))
+uint16x8_t vldrhq_gather_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))
+uint16x8_t vldrhq_gather_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))
+uint32x4_t vldrhq_gather_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))
+uint32x4_t vldrhq_gather_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))
+int16x8_t vldrhq_gather_shifted_offset_s16(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))
+int16x8_t vldrhq_gather_shifted_offset(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))
+int32x4_t vldrhq_gather_shifted_offset_s32(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))
+int32x4_t vldrhq_gather_shifted_offset(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))
+uint16x8_t vldrhq_gather_shifted_offset_u16(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))
+uint16x8_t vldrhq_gather_shifted_offset(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))
+uint32x4_t vldrhq_gather_shifted_offset_u32(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))
+uint32x4_t vldrhq_gather_shifted_offset(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))
+int16x8_t vldrhq_gather_shifted_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))
+int16x8_t vldrhq_gather_shifted_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))
+int32x4_t vldrhq_gather_shifted_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))
+int32x4_t vldrhq_gather_shifted_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))
+uint16x8_t vldrhq_gather_shifted_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))
+uint16x8_t vldrhq_gather_shifted_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))
+uint32x4_t vldrhq_gather_shifted_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))
+uint32x4_t vldrhq_gather_shifted_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_s16)))
+int16x8_t vldrhq_s16(const int16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_s32)))
+int32x4_t vldrhq_s32(const int16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_u16)))
+uint16x8_t vldrhq_u16(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_u32)))
+uint32x4_t vldrhq_u32(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_s16)))
+int16x8_t vldrhq_z_s16(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_s32)))
+int32x4_t vldrhq_z_s32(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_u16)))
+uint16x8_t vldrhq_z_u16(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_u32)))
+uint32x4_t vldrhq_z_u32(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_s32)))
+int32x4_t vldrwq_gather_base_s32(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_u32)))
+uint32x4_t vldrwq_gather_base_u32(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_s32)))
+int32x4_t vldrwq_gather_base_wb_s32(uint32x4_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_u32)))
+uint32x4_t vldrwq_gather_base_wb_u32(uint32x4_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_s32)))
+int32x4_t vldrwq_gather_base_wb_z_s32(uint32x4_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_u32)))
+uint32x4_t vldrwq_gather_base_wb_z_u32(uint32x4_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_s32)))
+int32x4_t vldrwq_gather_base_z_s32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_u32)))
+uint32x4_t vldrwq_gather_base_z_u32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))
+int32x4_t vldrwq_gather_offset_s32(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))
+int32x4_t vldrwq_gather_offset(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))
+uint32x4_t vldrwq_gather_offset_u32(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))
+uint32x4_t vldrwq_gather_offset(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))
+int32x4_t vldrwq_gather_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))
+int32x4_t vldrwq_gather_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))
+uint32x4_t vldrwq_gather_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))
+uint32x4_t vldrwq_gather_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))
+int32x4_t vldrwq_gather_shifted_offset_s32(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))
+int32x4_t vldrwq_gather_shifted_offset(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))
+uint32x4_t vldrwq_gather_shifted_offset_u32(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))
+uint32x4_t vldrwq_gather_shifted_offset(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))
+int32x4_t vldrwq_gather_shifted_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))
+int32x4_t vldrwq_gather_shifted_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))
+uint32x4_t vldrwq_gather_shifted_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))
+uint32x4_t vldrwq_gather_shifted_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_s32)))
+int32x4_t vldrwq_s32(const int32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_u32)))
+uint32x4_t vldrwq_u32(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_s32)))
+int32x4_t vldrwq_z_s32(const int32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_u32)))
+uint32x4_t vldrwq_z_u32(const uint32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s16)))
+uint16x8_t vmaxaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s16)))
+uint16x8_t vmaxaq_m(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s32)))
+uint32x4_t vmaxaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s32)))
+uint32x4_t vmaxaq_m(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s8)))
+uint8x16_t vmaxaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s8)))
+uint8x16_t vmaxaq_m(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s16)))
+uint16x8_t vmaxaq_s16(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s16)))
+uint16x8_t vmaxaq(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s32)))
+uint32x4_t vmaxaq_s32(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s32)))
+uint32x4_t vmaxaq(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s8)))
+uint8x16_t vmaxaq_s8(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s8)))
+uint8x16_t vmaxaq(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s16)))
+uint16_t vmaxavq_p_s16(uint16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s16)))
+uint16_t vmaxavq_p(uint16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s32)))
+uint32_t vmaxavq_p_s32(uint32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s32)))
+uint32_t vmaxavq_p(uint32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s8)))
+uint8_t vmaxavq_p_s8(uint8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s8)))
+uint8_t vmaxavq_p(uint8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s16)))
+uint16_t vmaxavq_s16(uint16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s16)))
+uint16_t vmaxavq(uint16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s32)))
+uint32_t vmaxavq_s32(uint32_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s32)))
+uint32_t vmaxavq(uint32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s8)))
+uint8_t vmaxavq_s8(uint8_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s8)))
+uint8_t vmaxavq(uint8_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s16)))
+int16x8_t vmaxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s16)))
+int16x8_t vmaxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s32)))
+int32x4_t vmaxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s32)))
+int32x4_t vmaxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s8)))
+int8x16_t vmaxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s8)))
+int8x16_t vmaxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u16)))
+uint16x8_t vmaxq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u16)))
+uint16x8_t vmaxq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u32)))
+uint32x4_t vmaxq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u32)))
+uint32x4_t vmaxq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u8)))
+uint8x16_t vmaxq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u8)))
+uint8x16_t vmaxq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s16)))
+int16x8_t vmaxq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s16)))
+int16x8_t vmaxq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s32)))
+int32x4_t vmaxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s32)))
+int32x4_t vmaxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s8)))
+int8x16_t vmaxq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s8)))
+int8x16_t vmaxq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u16)))
+uint16x8_t vmaxq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u16)))
+uint16x8_t vmaxq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u32)))
+uint32x4_t vmaxq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u32)))
+uint32x4_t vmaxq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u8)))
+uint8x16_t vmaxq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u8)))
+uint8x16_t vmaxq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s16)))
+int16x8_t vmaxq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s16)))
+int16x8_t vmaxq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s32)))
+int32x4_t vmaxq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s32)))
+int32x4_t vmaxq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s8)))
+int8x16_t vmaxq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s8)))
+int8x16_t vmaxq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u16)))
+uint16x8_t vmaxq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u16)))
+uint16x8_t vmaxq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u32)))
+uint32x4_t vmaxq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u32)))
+uint32x4_t vmaxq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u8)))
+uint8x16_t vmaxq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u8)))
+uint8x16_t vmaxq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s16)))
+int16_t vmaxvq_p_s16(int16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s16)))
+int16_t vmaxvq_p(int16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s32)))
+int32_t vmaxvq_p_s32(int32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s32)))
+int32_t vmaxvq_p(int32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s8)))
+int8_t vmaxvq_p_s8(int8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s8)))
+int8_t vmaxvq_p(int8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u16)))
+uint16_t vmaxvq_p_u16(uint16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u16)))
+uint16_t vmaxvq_p(uint16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u32)))
+uint32_t vmaxvq_p_u32(uint32_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u32)))
+uint32_t vmaxvq_p(uint32_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u8)))
+uint8_t vmaxvq_p_u8(uint8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u8)))
+uint8_t vmaxvq_p(uint8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s16)))
+int16_t vmaxvq_s16(int16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s16)))
+int16_t vmaxvq(int16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s32)))
+int32_t vmaxvq_s32(int32_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s32)))
+int32_t vmaxvq(int32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s8)))
+int8_t vmaxvq_s8(int8_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s8)))
+int8_t vmaxvq(int8_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u16)))
+uint16_t vmaxvq_u16(uint16_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u16)))
+uint16_t vmaxvq(uint16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u32)))
+uint32_t vmaxvq_u32(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u32)))
+uint32_t vmaxvq(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u8)))
+uint8_t vmaxvq_u8(uint8_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u8)))
+uint8_t vmaxvq(uint8_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s16)))
+uint16x8_t vminaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s16)))
+uint16x8_t vminaq_m(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s32)))
+uint32x4_t vminaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s32)))
+uint32x4_t vminaq_m(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s8)))
+uint8x16_t vminaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s8)))
+uint8x16_t vminaq_m(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s16)))
+uint16x8_t vminaq_s16(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s16)))
+uint16x8_t vminaq(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s32)))
+uint32x4_t vminaq_s32(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s32)))
+uint32x4_t vminaq(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s8)))
+uint8x16_t vminaq_s8(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s8)))
+uint8x16_t vminaq(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s16)))
+uint16_t vminavq_p_s16(uint16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s16)))
+uint16_t vminavq_p(uint16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s32)))
+uint32_t vminavq_p_s32(uint32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s32)))
+uint32_t vminavq_p(uint32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s8)))
+uint8_t vminavq_p_s8(uint8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s8)))
+uint8_t vminavq_p(uint8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s16)))
+uint16_t vminavq_s16(uint16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s16)))
+uint16_t vminavq(uint16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s32)))
+uint32_t vminavq_s32(uint32_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s32)))
+uint32_t vminavq(uint32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s8)))
+uint8_t vminavq_s8(uint8_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s8)))
+uint8_t vminavq(uint8_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s16)))
+int16x8_t vminq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s16)))
+int16x8_t vminq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s32)))
+int32x4_t vminq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s32)))
+int32x4_t vminq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s8)))
+int8x16_t vminq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s8)))
+int8x16_t vminq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u16)))
+uint16x8_t vminq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u16)))
+uint16x8_t vminq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u32)))
+uint32x4_t vminq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u32)))
+uint32x4_t vminq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u8)))
+uint8x16_t vminq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u8)))
+uint8x16_t vminq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s16)))
+int16x8_t vminq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s16)))
+int16x8_t vminq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s32)))
+int32x4_t vminq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s32)))
+int32x4_t vminq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s8)))
+int8x16_t vminq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s8)))
+int8x16_t vminq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u16)))
+uint16x8_t vminq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u16)))
+uint16x8_t vminq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u32)))
+uint32x4_t vminq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u32)))
+uint32x4_t vminq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u8)))
+uint8x16_t vminq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u8)))
+uint8x16_t vminq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s16)))
+int16x8_t vminq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s16)))
+int16x8_t vminq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s32)))
+int32x4_t vminq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s32)))
+int32x4_t vminq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s8)))
+int8x16_t vminq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s8)))
+int8x16_t vminq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u16)))
+uint16x8_t vminq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u16)))
+uint16x8_t vminq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u32)))
+uint32x4_t vminq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u32)))
+uint32x4_t vminq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u8)))
+uint8x16_t vminq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u8)))
+uint8x16_t vminq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s16)))
+int16_t vminvq_p_s16(int16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s16)))
+int16_t vminvq_p(int16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s32)))
+int32_t vminvq_p_s32(int32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s32)))
+int32_t vminvq_p(int32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s8)))
+int8_t vminvq_p_s8(int8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s8)))
+int8_t vminvq_p(int8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u16)))
+uint16_t vminvq_p_u16(uint16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u16)))
+uint16_t vminvq_p(uint16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u32)))
+uint32_t vminvq_p_u32(uint32_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u32)))
+uint32_t vminvq_p(uint32_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u8)))
+uint8_t vminvq_p_u8(uint8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u8)))
+uint8_t vminvq_p(uint8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s16)))
+int16_t vminvq_s16(int16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s16)))
+int16_t vminvq(int16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s32)))
+int32_t vminvq_s32(int32_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s32)))
+int32_t vminvq(int32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s8)))
+int8_t vminvq_s8(int8_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s8)))
+int8_t vminvq(int8_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u16)))
+uint16_t vminvq_u16(uint16_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u16)))
+uint16_t vminvq(uint16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u32)))
+uint32_t vminvq_u32(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u32)))
+uint32_t vminvq(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u8)))
+uint8_t vminvq_u8(uint8_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u8)))
+uint8_t vminvq(uint8_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s16)))
+int32_t vmladavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s16)))
+int32_t vmladavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s32)))
+int32_t vmladavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s32)))
+int32_t vmladavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s8)))
+int32_t vmladavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s8)))
+int32_t vmladavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u16)))
+uint32_t vmladavaq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u16)))
+uint32_t vmladavaq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u32)))
+uint32_t vmladavaq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u32)))
+uint32_t vmladavaq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u8)))
+uint32_t vmladavaq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u8)))
+uint32_t vmladavaq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s16)))
+int32_t vmladavaq_s16(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s16)))
+int32_t vmladavaq(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s32)))
+int32_t vmladavaq_s32(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s32)))
+int32_t vmladavaq(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s8)))
+int32_t vmladavaq_s8(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s8)))
+int32_t vmladavaq(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u16)))
+uint32_t vmladavaq_u16(uint32_t, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u16)))
+uint32_t vmladavaq(uint32_t, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u32)))
+uint32_t vmladavaq_u32(uint32_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u32)))
+uint32_t vmladavaq(uint32_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u8)))
+uint32_t vmladavaq_u8(uint32_t, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u8)))
+uint32_t vmladavaq(uint32_t, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s16)))
+int32_t vmladavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s16)))
+int32_t vmladavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s32)))
+int32_t vmladavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s32)))
+int32_t vmladavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s8)))
+int32_t vmladavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s8)))
+int32_t vmladavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s16)))
+int32_t vmladavaxq_s16(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s16)))
+int32_t vmladavaxq(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s32)))
+int32_t vmladavaxq_s32(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s32)))
+int32_t vmladavaxq(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s8)))
+int32_t vmladavaxq_s8(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s8)))
+int32_t vmladavaxq(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s16)))
+int32_t vmladavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s16)))
+int32_t vmladavq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s32)))
+int32_t vmladavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s32)))
+int32_t vmladavq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s8)))
+int32_t vmladavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s8)))
+int32_t vmladavq_p(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u16)))
+uint32_t vmladavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u16)))
+uint32_t vmladavq_p(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u32)))
+uint32_t vmladavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u32)))
+uint32_t vmladavq_p(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u8)))
+uint32_t vmladavq_p_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u8)))
+uint32_t vmladavq_p(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s16)))
+int32_t vmladavq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s16)))
+int32_t vmladavq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s32)))
+int32_t vmladavq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s32)))
+int32_t vmladavq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s8)))
+int32_t vmladavq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s8)))
+int32_t vmladavq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u16)))
+uint32_t vmladavq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u16)))
+uint32_t vmladavq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u32)))
+uint32_t vmladavq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u32)))
+uint32_t vmladavq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u8)))
+uint32_t vmladavq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u8)))
+uint32_t vmladavq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s16)))
+int32_t vmladavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s16)))
+int32_t vmladavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s32)))
+int32_t vmladavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s32)))
+int32_t vmladavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s8)))
+int32_t vmladavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s8)))
+int32_t vmladavxq_p(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s16)))
+int32_t vmladavxq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s16)))
+int32_t vmladavxq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s32)))
+int32_t vmladavxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s32)))
+int32_t vmladavxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s8)))
+int32_t vmladavxq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s8)))
+int32_t vmladavxq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s16)))
+int64_t vmlaldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s16)))
+int64_t vmlaldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s32)))
+int64_t vmlaldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s32)))
+int64_t vmlaldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u16)))
+uint64_t vmlaldavaq_p_u16(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u16)))
+uint64_t vmlaldavaq_p(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u32)))
+uint64_t vmlaldavaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u32)))
+uint64_t vmlaldavaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s16)))
+int64_t vmlaldavaq_s16(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s16)))
+int64_t vmlaldavaq(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s32)))
+int64_t vmlaldavaq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s32)))
+int64_t vmlaldavaq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u16)))
+uint64_t vmlaldavaq_u16(uint64_t, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u16)))
+uint64_t vmlaldavaq(uint64_t, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u32)))
+uint64_t vmlaldavaq_u32(uint64_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u32)))
+uint64_t vmlaldavaq(uint64_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s16)))
+int64_t vmlaldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s16)))
+int64_t vmlaldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s32)))
+int64_t vmlaldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s32)))
+int64_t vmlaldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s16)))
+int64_t vmlaldavaxq_s16(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s16)))
+int64_t vmlaldavaxq(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s32)))
+int64_t vmlaldavaxq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s32)))
+int64_t vmlaldavaxq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s16)))
+int64_t vmlaldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s16)))
+int64_t vmlaldavq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s32)))
+int64_t vmlaldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s32)))
+int64_t vmlaldavq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u16)))
+uint64_t vmlaldavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u16)))
+uint64_t vmlaldavq_p(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u32)))
+uint64_t vmlaldavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u32)))
+uint64_t vmlaldavq_p(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s16)))
+int64_t vmlaldavq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s16)))
+int64_t vmlaldavq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s32)))
+int64_t vmlaldavq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s32)))
+int64_t vmlaldavq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u16)))
+uint64_t vmlaldavq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u16)))
+uint64_t vmlaldavq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u32)))
+uint64_t vmlaldavq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u32)))
+uint64_t vmlaldavq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s16)))
+int64_t vmlaldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s16)))
+int64_t vmlaldavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s32)))
+int64_t vmlaldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s32)))
+int64_t vmlaldavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s16)))
+int64_t vmlaldavxq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s16)))
+int64_t vmlaldavxq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s32)))
+int64_t vmlaldavxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s32)))
+int64_t vmlaldavxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s16)))
+int16x8_t vmlaq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s16)))
+int16x8_t vmlaq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s32)))
+int32x4_t vmlaq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s32)))
+int32x4_t vmlaq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s8)))
+int8x16_t vmlaq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s8)))
+int8x16_t vmlaq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u16)))
+uint16x8_t vmlaq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u16)))
+uint16x8_t vmlaq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u32)))
+uint32x4_t vmlaq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u32)))
+uint32x4_t vmlaq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u8)))
+uint8x16_t vmlaq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u8)))
+uint8x16_t vmlaq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s16)))
+int16x8_t vmlaq_n_s16(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s16)))
+int16x8_t vmlaq(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s32)))
+int32x4_t vmlaq_n_s32(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s32)))
+int32x4_t vmlaq(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s8)))
+int8x16_t vmlaq_n_s8(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s8)))
+int8x16_t vmlaq(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u16)))
+uint16x8_t vmlaq_n_u16(uint16x8_t, uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u16)))
+uint16x8_t vmlaq(uint16x8_t, uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u32)))
+uint32x4_t vmlaq_n_u32(uint32x4_t, uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u32)))
+uint32x4_t vmlaq(uint32x4_t, uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u8)))
+uint8x16_t vmlaq_n_u8(uint8x16_t, uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u8)))
+uint8x16_t vmlaq(uint8x16_t, uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s16)))
+int16x8_t vmlasq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s16)))
+int16x8_t vmlasq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s32)))
+int32x4_t vmlasq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s32)))
+int32x4_t vmlasq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s8)))
+int8x16_t vmlasq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s8)))
+int8x16_t vmlasq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u16)))
+uint16x8_t vmlasq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u16)))
+uint16x8_t vmlasq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u32)))
+uint32x4_t vmlasq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u32)))
+uint32x4_t vmlasq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u8)))
+uint8x16_t vmlasq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u8)))
+uint8x16_t vmlasq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s16)))
+int16x8_t vmlasq_n_s16(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s16)))
+int16x8_t vmlasq(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s32)))
+int32x4_t vmlasq_n_s32(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s32)))
+int32x4_t vmlasq(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s8)))
+int8x16_t vmlasq_n_s8(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s8)))
+int8x16_t vmlasq(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u16)))
+uint16x8_t vmlasq_n_u16(uint16x8_t, uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u16)))
+uint16x8_t vmlasq(uint16x8_t, uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u32)))
+uint32x4_t vmlasq_n_u32(uint32x4_t, uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u32)))
+uint32x4_t vmlasq(uint32x4_t, uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u8)))
+uint8x16_t vmlasq_n_u8(uint8x16_t, uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u8)))
+uint8x16_t vmlasq(uint8x16_t, uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s16)))
+int32_t vmlsdavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s16)))
+int32_t vmlsdavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s32)))
+int32_t vmlsdavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s32)))
+int32_t vmlsdavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s8)))
+int32_t vmlsdavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s8)))
+int32_t vmlsdavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s16)))
+int32_t vmlsdavaq_s16(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s16)))
+int32_t vmlsdavaq(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s32)))
+int32_t vmlsdavaq_s32(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s32)))
+int32_t vmlsdavaq(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s8)))
+int32_t vmlsdavaq_s8(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s8)))
+int32_t vmlsdavaq(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s16)))
+int32_t vmlsdavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s16)))
+int32_t vmlsdavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s32)))
+int32_t vmlsdavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s32)))
+int32_t vmlsdavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s8)))
+int32_t vmlsdavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s8)))
+int32_t vmlsdavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s16)))
+int32_t vmlsdavaxq_s16(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s16)))
+int32_t vmlsdavaxq(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s32)))
+int32_t vmlsdavaxq_s32(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s32)))
+int32_t vmlsdavaxq(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s8)))
+int32_t vmlsdavaxq_s8(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s8)))
+int32_t vmlsdavaxq(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s16)))
+int32_t vmlsdavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s16)))
+int32_t vmlsdavq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s32)))
+int32_t vmlsdavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s32)))
+int32_t vmlsdavq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s8)))
+int32_t vmlsdavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s8)))
+int32_t vmlsdavq_p(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s16)))
+int32_t vmlsdavq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s16)))
+int32_t vmlsdavq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s32)))
+int32_t vmlsdavq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s32)))
+int32_t vmlsdavq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s8)))
+int32_t vmlsdavq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s8)))
+int32_t vmlsdavq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s16)))
+int32_t vmlsdavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s16)))
+int32_t vmlsdavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s32)))
+int32_t vmlsdavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s32)))
+int32_t vmlsdavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s8)))
+int32_t vmlsdavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s8)))
+int32_t vmlsdavxq_p(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s16)))
+int32_t vmlsdavxq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s16)))
+int32_t vmlsdavxq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s32)))
+int32_t vmlsdavxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s32)))
+int32_t vmlsdavxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s8)))
+int32_t vmlsdavxq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s8)))
+int32_t vmlsdavxq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s16)))
+int64_t vmlsldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s16)))
+int64_t vmlsldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s32)))
+int64_t vmlsldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s32)))
+int64_t vmlsldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s16)))
+int64_t vmlsldavaq_s16(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s16)))
+int64_t vmlsldavaq(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s32)))
+int64_t vmlsldavaq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s32)))
+int64_t vmlsldavaq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s16)))
+int64_t vmlsldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s16)))
+int64_t vmlsldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s32)))
+int64_t vmlsldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s32)))
+int64_t vmlsldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s16)))
+int64_t vmlsldavaxq_s16(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s16)))
+int64_t vmlsldavaxq(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s32)))
+int64_t vmlsldavaxq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s32)))
+int64_t vmlsldavaxq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s16)))
+int64_t vmlsldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s16)))
+int64_t vmlsldavq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s32)))
+int64_t vmlsldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s32)))
+int64_t vmlsldavq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s16)))
+int64_t vmlsldavq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s16)))
+int64_t vmlsldavq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s32)))
+int64_t vmlsldavq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s32)))
+int64_t vmlsldavq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s16)))
+int64_t vmlsldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s16)))
+int64_t vmlsldavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s32)))
+int64_t vmlsldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s32)))
+int64_t vmlsldavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s16)))
+int64_t vmlsldavxq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s16)))
+int64_t vmlsldavxq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s32)))
+int64_t vmlsldavxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s32)))
+int64_t vmlsldavxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s16)))
+int32x4_t vmovlbq_m_s16(int32x4_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s16)))
+int32x4_t vmovlbq_m(int32x4_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s8)))
+int16x8_t vmovlbq_m_s8(int16x8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s8)))
+int16x8_t vmovlbq_m(int16x8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u16)))
+uint32x4_t vmovlbq_m_u16(uint32x4_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u16)))
+uint32x4_t vmovlbq_m(uint32x4_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u8)))
+uint16x8_t vmovlbq_m_u8(uint16x8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u8)))
+uint16x8_t vmovlbq_m(uint16x8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s16)))
+int32x4_t vmovlbq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s16)))
+int32x4_t vmovlbq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s8)))
+int16x8_t vmovlbq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s8)))
+int16x8_t vmovlbq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u16)))
+uint32x4_t vmovlbq_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u16)))
+uint32x4_t vmovlbq(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u8)))
+uint16x8_t vmovlbq_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u8)))
+uint16x8_t vmovlbq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s16)))
+int32x4_t vmovlbq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s16)))
+int32x4_t vmovlbq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s8)))
+int16x8_t vmovlbq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s8)))
+int16x8_t vmovlbq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u16)))
+uint32x4_t vmovlbq_x_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u16)))
+uint32x4_t vmovlbq_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u8)))
+uint16x8_t vmovlbq_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u8)))
+uint16x8_t vmovlbq_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s16)))
+int32x4_t vmovltq_m_s16(int32x4_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s16)))
+int32x4_t vmovltq_m(int32x4_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s8)))
+int16x8_t vmovltq_m_s8(int16x8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s8)))
+int16x8_t vmovltq_m(int16x8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u16)))
+uint32x4_t vmovltq_m_u16(uint32x4_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u16)))
+uint32x4_t vmovltq_m(uint32x4_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u8)))
+uint16x8_t vmovltq_m_u8(uint16x8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u8)))
+uint16x8_t vmovltq_m(uint16x8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s16)))
+int32x4_t vmovltq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s16)))
+int32x4_t vmovltq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s8)))
+int16x8_t vmovltq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s8)))
+int16x8_t vmovltq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u16)))
+uint32x4_t vmovltq_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u16)))
+uint32x4_t vmovltq(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u8)))
+uint16x8_t vmovltq_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u8)))
+uint16x8_t vmovltq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s16)))
+int32x4_t vmovltq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s16)))
+int32x4_t vmovltq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s8)))
+int16x8_t vmovltq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s8)))
+int16x8_t vmovltq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u16)))
+uint32x4_t vmovltq_x_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u16)))
+uint32x4_t vmovltq_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u8)))
+uint16x8_t vmovltq_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u8)))
+uint16x8_t vmovltq_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s16)))
+int8x16_t vmovnbq_m_s16(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s16)))
+int8x16_t vmovnbq_m(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s32)))
+int16x8_t vmovnbq_m_s32(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s32)))
+int16x8_t vmovnbq_m(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u16)))
+uint8x16_t vmovnbq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u16)))
+uint8x16_t vmovnbq_m(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u32)))
+uint16x8_t vmovnbq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u32)))
+uint16x8_t vmovnbq_m(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s16)))
+int8x16_t vmovnbq_s16(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s16)))
+int8x16_t vmovnbq(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s32)))
+int16x8_t vmovnbq_s32(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s32)))
+int16x8_t vmovnbq(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u16)))
+uint8x16_t vmovnbq_u16(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u16)))
+uint8x16_t vmovnbq(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u32)))
+uint16x8_t vmovnbq_u32(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u32)))
+uint16x8_t vmovnbq(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s16)))
+int8x16_t vmovntq_m_s16(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s16)))
+int8x16_t vmovntq_m(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s32)))
+int16x8_t vmovntq_m_s32(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s32)))
+int16x8_t vmovntq_m(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u16)))
+uint8x16_t vmovntq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u16)))
+uint8x16_t vmovntq_m(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u32)))
+uint16x8_t vmovntq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u32)))
+uint16x8_t vmovntq_m(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s16)))
+int8x16_t vmovntq_s16(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s16)))
+int8x16_t vmovntq(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s32)))
+int16x8_t vmovntq_s32(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s32)))
+int16x8_t vmovntq(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u16)))
+uint8x16_t vmovntq_u16(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u16)))
+uint8x16_t vmovntq(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u32)))
+uint16x8_t vmovntq_u32(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u32)))
+uint16x8_t vmovntq(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s16)))
+int16x8_t vmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s16)))
+int16x8_t vmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s32)))
+int32x4_t vmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s32)))
+int32x4_t vmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s8)))
+int8x16_t vmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s8)))
+int8x16_t vmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u16)))
+uint16x8_t vmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u16)))
+uint16x8_t vmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u32)))
+uint32x4_t vmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u32)))
+uint32x4_t vmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u8)))
+uint8x16_t vmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u8)))
+uint8x16_t vmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s16)))
+int16x8_t vmulhq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s16)))
+int16x8_t vmulhq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s32)))
+int32x4_t vmulhq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s32)))
+int32x4_t vmulhq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s8)))
+int8x16_t vmulhq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s8)))
+int8x16_t vmulhq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u16)))
+uint16x8_t vmulhq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u16)))
+uint16x8_t vmulhq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u32)))
+uint32x4_t vmulhq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u32)))
+uint32x4_t vmulhq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u8)))
+uint8x16_t vmulhq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u8)))
+uint8x16_t vmulhq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s16)))
+int16x8_t vmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s16)))
+int16x8_t vmulhq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s32)))
+int32x4_t vmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s32)))
+int32x4_t vmulhq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s8)))
+int8x16_t vmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s8)))
+int8x16_t vmulhq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u16)))
+uint16x8_t vmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u16)))
+uint16x8_t vmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u32)))
+uint32x4_t vmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u32)))
+uint32x4_t vmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u8)))
+uint8x16_t vmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u8)))
+uint8x16_t vmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s16)))
+int32x4_t vmullbq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s16)))
+int32x4_t vmullbq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s32)))
+int64x2_t vmullbq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s32)))
+int64x2_t vmullbq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s8)))
+int16x8_t vmullbq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s8)))
+int16x8_t vmullbq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u16)))
+uint32x4_t vmullbq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u16)))
+uint32x4_t vmullbq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u32)))
+uint64x2_t vmullbq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u32)))
+uint64x2_t vmullbq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u8)))
+uint16x8_t vmullbq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u8)))
+uint16x8_t vmullbq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s16)))
+int32x4_t vmullbq_int_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s16)))
+int32x4_t vmullbq_int(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s32)))
+int64x2_t vmullbq_int_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s32)))
+int64x2_t vmullbq_int(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s8)))
+int16x8_t vmullbq_int_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s8)))
+int16x8_t vmullbq_int(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u16)))
+uint32x4_t vmullbq_int_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u16)))
+uint32x4_t vmullbq_int(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u32)))
+uint64x2_t vmullbq_int_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u32)))
+uint64x2_t vmullbq_int(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u8)))
+uint16x8_t vmullbq_int_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u8)))
+uint16x8_t vmullbq_int(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s16)))
+int32x4_t vmullbq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s16)))
+int32x4_t vmullbq_int_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s32)))
+int64x2_t vmullbq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s32)))
+int64x2_t vmullbq_int_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s8)))
+int16x8_t vmullbq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s8)))
+int16x8_t vmullbq_int_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u16)))
+uint32x4_t vmullbq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u16)))
+uint32x4_t vmullbq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u32)))
+uint64x2_t vmullbq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u32)))
+uint64x2_t vmullbq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u8)))
+uint16x8_t vmullbq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u8)))
+uint16x8_t vmullbq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p16)))
+uint32x4_t vmullbq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p16)))
+uint32x4_t vmullbq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p8)))
+uint16x8_t vmullbq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p8)))
+uint16x8_t vmullbq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p16)))
+uint32x4_t vmullbq_poly_p16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p16)))
+uint32x4_t vmullbq_poly(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p8)))
+uint16x8_t vmullbq_poly_p8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p8)))
+uint16x8_t vmullbq_poly(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p16)))
+uint32x4_t vmullbq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p16)))
+uint32x4_t vmullbq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p8)))
+uint16x8_t vmullbq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p8)))
+uint16x8_t vmullbq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s16)))
+int32x4_t vmulltq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s16)))
+int32x4_t vmulltq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s32)))
+int64x2_t vmulltq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s32)))
+int64x2_t vmulltq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s8)))
+int16x8_t vmulltq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s8)))
+int16x8_t vmulltq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u16)))
+uint32x4_t vmulltq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u16)))
+uint32x4_t vmulltq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u32)))
+uint64x2_t vmulltq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u32)))
+uint64x2_t vmulltq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u8)))
+uint16x8_t vmulltq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u8)))
+uint16x8_t vmulltq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s16)))
+int32x4_t vmulltq_int_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s16)))
+int32x4_t vmulltq_int(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s32)))
+int64x2_t vmulltq_int_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s32)))
+int64x2_t vmulltq_int(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s8)))
+int16x8_t vmulltq_int_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s8)))
+int16x8_t vmulltq_int(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u16)))
+uint32x4_t vmulltq_int_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u16)))
+uint32x4_t vmulltq_int(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u32)))
+uint64x2_t vmulltq_int_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u32)))
+uint64x2_t vmulltq_int(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u8)))
+uint16x8_t vmulltq_int_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u8)))
+uint16x8_t vmulltq_int(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s16)))
+int32x4_t vmulltq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s16)))
+int32x4_t vmulltq_int_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s32)))
+int64x2_t vmulltq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s32)))
+int64x2_t vmulltq_int_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s8)))
+int16x8_t vmulltq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s8)))
+int16x8_t vmulltq_int_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u16)))
+uint32x4_t vmulltq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u16)))
+uint32x4_t vmulltq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u32)))
+uint64x2_t vmulltq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u32)))
+uint64x2_t vmulltq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u8)))
+uint16x8_t vmulltq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u8)))
+uint16x8_t vmulltq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p16)))
+uint32x4_t vmulltq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p16)))
+uint32x4_t vmulltq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p8)))
+uint16x8_t vmulltq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p8)))
+uint16x8_t vmulltq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p16)))
+uint32x4_t vmulltq_poly_p16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p16)))
+uint32x4_t vmulltq_poly(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p8)))
+uint16x8_t vmulltq_poly_p8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p8)))
+uint16x8_t vmulltq_poly(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p16)))
+uint32x4_t vmulltq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p16)))
+uint32x4_t vmulltq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p8)))
+uint16x8_t vmulltq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p8)))
+uint16x8_t vmulltq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s16)))
+int16x8_t vmulq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s16)))
+int16x8_t vmulq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s32)))
+int32x4_t vmulq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s32)))
+int32x4_t vmulq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s8)))
+int8x16_t vmulq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s8)))
+int8x16_t vmulq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u16)))
+uint16x8_t vmulq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u16)))
+uint16x8_t vmulq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u32)))
+uint32x4_t vmulq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u32)))
+uint32x4_t vmulq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u8)))
+uint8x16_t vmulq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u8)))
+uint8x16_t vmulq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s16)))
+int16x8_t vmulq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s16)))
+int16x8_t vmulq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s32)))
+int32x4_t vmulq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s32)))
+int32x4_t vmulq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s8)))
+int8x16_t vmulq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s8)))
+int8x16_t vmulq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u16)))
+uint16x8_t vmulq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u16)))
+uint16x8_t vmulq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u32)))
+uint32x4_t vmulq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u32)))
+uint32x4_t vmulq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u8)))
+uint8x16_t vmulq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u8)))
+uint8x16_t vmulq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s16)))
+int16x8_t vmulq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s16)))
+int16x8_t vmulq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s32)))
+int32x4_t vmulq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s32)))
+int32x4_t vmulq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s8)))
+int8x16_t vmulq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s8)))
+int8x16_t vmulq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u16)))
+uint16x8_t vmulq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u16)))
+uint16x8_t vmulq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u32)))
+uint32x4_t vmulq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u32)))
+uint32x4_t vmulq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u8)))
+uint8x16_t vmulq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u8)))
+uint8x16_t vmulq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s16)))
+int16x8_t vmulq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s16)))
+int16x8_t vmulq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s32)))
+int32x4_t vmulq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s32)))
+int32x4_t vmulq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s8)))
+int8x16_t vmulq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s8)))
+int8x16_t vmulq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u16)))
+uint16x8_t vmulq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u16)))
+uint16x8_t vmulq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u32)))
+uint32x4_t vmulq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u32)))
+uint32x4_t vmulq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u8)))
+uint8x16_t vmulq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u8)))
+uint8x16_t vmulq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s16)))
+int16x8_t vmulq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s16)))
+int16x8_t vmulq_x(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s32)))
+int32x4_t vmulq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s32)))
+int32x4_t vmulq_x(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s8)))
+int8x16_t vmulq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s8)))
+int8x16_t vmulq_x(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u16)))
+uint16x8_t vmulq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u16)))
+uint16x8_t vmulq_x(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u32)))
+uint32x4_t vmulq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u32)))
+uint32x4_t vmulq_x(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u8)))
+uint8x16_t vmulq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u8)))
+uint8x16_t vmulq_x(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s16)))
+int16x8_t vmulq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s16)))
+int16x8_t vmulq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s32)))
+int32x4_t vmulq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s32)))
+int32x4_t vmulq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s8)))
+int8x16_t vmulq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s8)))
+int8x16_t vmulq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u16)))
+uint16x8_t vmulq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u16)))
+uint16x8_t vmulq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u32)))
+uint32x4_t vmulq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u32)))
+uint32x4_t vmulq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u8)))
+uint8x16_t vmulq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u8)))
+uint8x16_t vmulq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s16)))
+int16x8_t vmvnq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s16)))
+int16x8_t vmvnq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s32)))
+int32x4_t vmvnq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s32)))
+int32x4_t vmvnq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u16)))
+uint16x8_t vmvnq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u16)))
+uint16x8_t vmvnq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u32)))
+uint32x4_t vmvnq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u32)))
+uint32x4_t vmvnq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s16)))
+int16x8_t vmvnq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s16)))
+int16x8_t vmvnq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s32)))
+int32x4_t vmvnq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s32)))
+int32x4_t vmvnq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s8)))
+int8x16_t vmvnq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s8)))
+int8x16_t vmvnq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u16)))
+uint16x8_t vmvnq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u16)))
+uint16x8_t vmvnq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u32)))
+uint32x4_t vmvnq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u32)))
+uint32x4_t vmvnq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u8)))
+uint8x16_t vmvnq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u8)))
+uint8x16_t vmvnq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_s16)))
+int16x8_t vmvnq_n_s16(int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_s32)))
+int32x4_t vmvnq_n_s32(int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_u16)))
+uint16x8_t vmvnq_n_u16(uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_u32)))
+uint32x4_t vmvnq_n_u32(uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s16)))
+int16x8_t vmvnq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s16)))
+int16x8_t vmvnq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s32)))
+int32x4_t vmvnq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s32)))
+int32x4_t vmvnq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s8)))
+int8x16_t vmvnq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s8)))
+int8x16_t vmvnq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u16)))
+uint16x8_t vmvnq_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u16)))
+uint16x8_t vmvnq(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u32)))
+uint32x4_t vmvnq_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u32)))
+uint32x4_t vmvnq(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u8)))
+uint8x16_t vmvnq_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u8)))
+uint8x16_t vmvnq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_s16)))
+int16x8_t vmvnq_x_n_s16(int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_s32)))
+int32x4_t vmvnq_x_n_s32(int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_u16)))
+uint16x8_t vmvnq_x_n_u16(uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_u32)))
+uint32x4_t vmvnq_x_n_u32(uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s16)))
+int16x8_t vmvnq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s16)))
+int16x8_t vmvnq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s32)))
+int32x4_t vmvnq_x_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s32)))
+int32x4_t vmvnq_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s8)))
+int8x16_t vmvnq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s8)))
+int8x16_t vmvnq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u16)))
+uint16x8_t vmvnq_x_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u16)))
+uint16x8_t vmvnq_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u32)))
+uint32x4_t vmvnq_x_u32(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u32)))
+uint32x4_t vmvnq_x(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u8)))
+uint8x16_t vmvnq_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u8)))
+uint8x16_t vmvnq_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s16)))
+int16x8_t vnegq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s16)))
+int16x8_t vnegq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s32)))
+int32x4_t vnegq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s32)))
+int32x4_t vnegq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s8)))
+int8x16_t vnegq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s8)))
+int8x16_t vnegq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s16)))
+int16x8_t vnegq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s16)))
+int16x8_t vnegq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s32)))
+int32x4_t vnegq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s32)))
+int32x4_t vnegq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s8)))
+int8x16_t vnegq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s8)))
+int8x16_t vnegq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s16)))
+int16x8_t vnegq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s16)))
+int16x8_t vnegq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s32)))
+int32x4_t vnegq_x_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s32)))
+int32x4_t vnegq_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s8)))
+int8x16_t vnegq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s8)))
+int8x16_t vnegq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s16)))
+int16x8_t vornq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s16)))
+int16x8_t vornq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s32)))
+int32x4_t vornq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s32)))
+int32x4_t vornq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s8)))
+int8x16_t vornq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s8)))
+int8x16_t vornq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u16)))
+uint16x8_t vornq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u16)))
+uint16x8_t vornq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u32)))
+uint32x4_t vornq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u32)))
+uint32x4_t vornq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u8)))
+uint8x16_t vornq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u8)))
+uint8x16_t vornq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s16)))
+int16x8_t vornq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s16)))
+int16x8_t vornq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s32)))
+int32x4_t vornq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s32)))
+int32x4_t vornq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s8)))
+int8x16_t vornq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s8)))
+int8x16_t vornq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u16)))
+uint16x8_t vornq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u16)))
+uint16x8_t vornq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u32)))
+uint32x4_t vornq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u32)))
+uint32x4_t vornq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u8)))
+uint8x16_t vornq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u8)))
+uint8x16_t vornq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s16)))
+int16x8_t vornq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s16)))
+int16x8_t vornq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s32)))
+int32x4_t vornq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s32)))
+int32x4_t vornq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s8)))
+int8x16_t vornq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s8)))
+int8x16_t vornq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u16)))
+uint16x8_t vornq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u16)))
+uint16x8_t vornq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u32)))
+uint32x4_t vornq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u32)))
+uint32x4_t vornq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u8)))
+uint8x16_t vornq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u8)))
+uint8x16_t vornq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s16)))
+int16x8_t vorrq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s16)))
+int16x8_t vorrq_m_n(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s32)))
+int32x4_t vorrq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s32)))
+int32x4_t vorrq_m_n(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u16)))
+uint16x8_t vorrq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u16)))
+uint16x8_t vorrq_m_n(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u32)))
+uint32x4_t vorrq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u32)))
+uint32x4_t vorrq_m_n(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s16)))
+int16x8_t vorrq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s16)))
+int16x8_t vorrq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s32)))
+int32x4_t vorrq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s32)))
+int32x4_t vorrq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s8)))
+int8x16_t vorrq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s8)))
+int8x16_t vorrq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u16)))
+uint16x8_t vorrq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u16)))
+uint16x8_t vorrq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u32)))
+uint32x4_t vorrq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u32)))
+uint32x4_t vorrq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u8)))
+uint8x16_t vorrq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u8)))
+uint8x16_t vorrq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s16)))
+int16x8_t vorrq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s16)))
+int16x8_t vorrq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s32)))
+int32x4_t vorrq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s32)))
+int32x4_t vorrq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u16)))
+uint16x8_t vorrq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u16)))
+uint16x8_t vorrq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u32)))
+uint32x4_t vorrq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u32)))
+uint32x4_t vorrq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s16)))
+int16x8_t vorrq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s16)))
+int16x8_t vorrq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s32)))
+int32x4_t vorrq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s32)))
+int32x4_t vorrq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s8)))
+int8x16_t vorrq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s8)))
+int8x16_t vorrq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u16)))
+uint16x8_t vorrq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u16)))
+uint16x8_t vorrq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u32)))
+uint32x4_t vorrq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u32)))
+uint32x4_t vorrq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u8)))
+uint8x16_t vorrq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u8)))
+uint8x16_t vorrq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s16)))
+int16x8_t vorrq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s16)))
+int16x8_t vorrq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s32)))
+int32x4_t vorrq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s32)))
+int32x4_t vorrq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s8)))
+int8x16_t vorrq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s8)))
+int8x16_t vorrq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u16)))
+uint16x8_t vorrq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u16)))
+uint16x8_t vorrq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u32)))
+uint32x4_t vorrq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u32)))
+uint32x4_t vorrq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u8)))
+uint8x16_t vorrq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u8)))
+uint8x16_t vorrq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpnot)))
+mve_pred16_t vpnot(mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s16)))
+int16x8_t vpselq_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s16)))
+int16x8_t vpselq(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s32)))
+int32x4_t vpselq_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s32)))
+int32x4_t vpselq(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s64)))
+int64x2_t vpselq_s64(int64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s64)))
+int64x2_t vpselq(int64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s8)))
+int8x16_t vpselq_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s8)))
+int8x16_t vpselq(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u16)))
+uint16x8_t vpselq_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u16)))
+uint16x8_t vpselq(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u32)))
+uint32x4_t vpselq_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u32)))
+uint32x4_t vpselq(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u64)))
+uint64x2_t vpselq_u64(uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u64)))
+uint64x2_t vpselq(uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u8)))
+uint8x16_t vpselq_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u8)))
+uint8x16_t vpselq(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s16)))
+int16x8_t vqabsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s16)))
+int16x8_t vqabsq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s32)))
+int32x4_t vqabsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s32)))
+int32x4_t vqabsq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s8)))
+int8x16_t vqabsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s8)))
+int8x16_t vqabsq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s16)))
+int16x8_t vqabsq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s16)))
+int16x8_t vqabsq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s32)))
+int32x4_t vqabsq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s32)))
+int32x4_t vqabsq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s8)))
+int8x16_t vqabsq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s8)))
+int8x16_t vqabsq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s16)))
+int16x8_t vqaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s16)))
+int16x8_t vqaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s32)))
+int32x4_t vqaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s32)))
+int32x4_t vqaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s8)))
+int8x16_t vqaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s8)))
+int8x16_t vqaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u16)))
+uint16x8_t vqaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u16)))
+uint16x8_t vqaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u32)))
+uint32x4_t vqaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u32)))
+uint32x4_t vqaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u8)))
+uint8x16_t vqaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u8)))
+uint8x16_t vqaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s16)))
+int16x8_t vqaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s16)))
+int16x8_t vqaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s32)))
+int32x4_t vqaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s32)))
+int32x4_t vqaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s8)))
+int8x16_t vqaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s8)))
+int8x16_t vqaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u16)))
+uint16x8_t vqaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u16)))
+uint16x8_t vqaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u32)))
+uint32x4_t vqaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u32)))
+uint32x4_t vqaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u8)))
+uint8x16_t vqaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u8)))
+uint8x16_t vqaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s16)))
+int16x8_t vqaddq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s16)))
+int16x8_t vqaddq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s32)))
+int32x4_t vqaddq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s32)))
+int32x4_t vqaddq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s8)))
+int8x16_t vqaddq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s8)))
+int8x16_t vqaddq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u16)))
+uint16x8_t vqaddq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u16)))
+uint16x8_t vqaddq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u32)))
+uint32x4_t vqaddq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u32)))
+uint32x4_t vqaddq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u8)))
+uint8x16_t vqaddq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u8)))
+uint8x16_t vqaddq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s16)))
+int16x8_t vqaddq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s16)))
+int16x8_t vqaddq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s32)))
+int32x4_t vqaddq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s32)))
+int32x4_t vqaddq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s8)))
+int8x16_t vqaddq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s8)))
+int8x16_t vqaddq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u16)))
+uint16x8_t vqaddq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u16)))
+uint16x8_t vqaddq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u32)))
+uint32x4_t vqaddq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u32)))
+uint32x4_t vqaddq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u8)))
+uint8x16_t vqaddq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u8)))
+uint8x16_t vqaddq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s16)))
+int16x8_t vqdmladhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s16)))
+int16x8_t vqdmladhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s32)))
+int32x4_t vqdmladhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s32)))
+int32x4_t vqdmladhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s8)))
+int8x16_t vqdmladhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s8)))
+int8x16_t vqdmladhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s16)))
+int16x8_t vqdmladhq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s16)))
+int16x8_t vqdmladhq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s32)))
+int32x4_t vqdmladhq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s32)))
+int32x4_t vqdmladhq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s8)))
+int8x16_t vqdmladhq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s8)))
+int8x16_t vqdmladhq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s16)))
+int16x8_t vqdmladhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s16)))
+int16x8_t vqdmladhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s32)))
+int32x4_t vqdmladhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s32)))
+int32x4_t vqdmladhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s8)))
+int8x16_t vqdmladhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s8)))
+int8x16_t vqdmladhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s16)))
+int16x8_t vqdmladhxq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s16)))
+int16x8_t vqdmladhxq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s32)))
+int32x4_t vqdmladhxq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s32)))
+int32x4_t vqdmladhxq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s8)))
+int8x16_t vqdmladhxq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s8)))
+int8x16_t vqdmladhxq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s16)))
+int16x8_t vqdmlahq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s16)))
+int16x8_t vqdmlahq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s32)))
+int32x4_t vqdmlahq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s32)))
+int32x4_t vqdmlahq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s8)))
+int8x16_t vqdmlahq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s8)))
+int8x16_t vqdmlahq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s16)))
+int16x8_t vqdmlahq_n_s16(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s16)))
+int16x8_t vqdmlahq(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s32)))
+int32x4_t vqdmlahq_n_s32(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s32)))
+int32x4_t vqdmlahq(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s8)))
+int8x16_t vqdmlahq_n_s8(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s8)))
+int8x16_t vqdmlahq(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s16)))
+int16x8_t vqdmlashq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s16)))
+int16x8_t vqdmlashq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s32)))
+int32x4_t vqdmlashq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s32)))
+int32x4_t vqdmlashq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s8)))
+int8x16_t vqdmlashq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s8)))
+int8x16_t vqdmlashq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s16)))
+int16x8_t vqdmlashq_n_s16(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s16)))
+int16x8_t vqdmlashq(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s32)))
+int32x4_t vqdmlashq_n_s32(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s32)))
+int32x4_t vqdmlashq(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s8)))
+int8x16_t vqdmlashq_n_s8(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s8)))
+int8x16_t vqdmlashq(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s16)))
+int16x8_t vqdmlsdhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s16)))
+int16x8_t vqdmlsdhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s32)))
+int32x4_t vqdmlsdhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s32)))
+int32x4_t vqdmlsdhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s8)))
+int8x16_t vqdmlsdhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s8)))
+int8x16_t vqdmlsdhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s16)))
+int16x8_t vqdmlsdhq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s16)))
+int16x8_t vqdmlsdhq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s32)))
+int32x4_t vqdmlsdhq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s32)))
+int32x4_t vqdmlsdhq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s8)))
+int8x16_t vqdmlsdhq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s8)))
+int8x16_t vqdmlsdhq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s16)))
+int16x8_t vqdmlsdhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s16)))
+int16x8_t vqdmlsdhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s32)))
+int32x4_t vqdmlsdhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s32)))
+int32x4_t vqdmlsdhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s8)))
+int8x16_t vqdmlsdhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s8)))
+int8x16_t vqdmlsdhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s16)))
+int16x8_t vqdmlsdhxq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s16)))
+int16x8_t vqdmlsdhxq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s32)))
+int32x4_t vqdmlsdhxq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s32)))
+int32x4_t vqdmlsdhxq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s8)))
+int8x16_t vqdmlsdhxq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s8)))
+int8x16_t vqdmlsdhxq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s16)))
+int16x8_t vqdmulhq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s16)))
+int16x8_t vqdmulhq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s32)))
+int32x4_t vqdmulhq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s32)))
+int32x4_t vqdmulhq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s8)))
+int8x16_t vqdmulhq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s8)))
+int8x16_t vqdmulhq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s16)))
+int16x8_t vqdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s16)))
+int16x8_t vqdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s32)))
+int32x4_t vqdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s32)))
+int32x4_t vqdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s8)))
+int8x16_t vqdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s8)))
+int8x16_t vqdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s16)))
+int16x8_t vqdmulhq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s16)))
+int16x8_t vqdmulhq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s32)))
+int32x4_t vqdmulhq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s32)))
+int32x4_t vqdmulhq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s8)))
+int8x16_t vqdmulhq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s8)))
+int8x16_t vqdmulhq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s16)))
+int16x8_t vqdmulhq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s16)))
+int16x8_t vqdmulhq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s32)))
+int32x4_t vqdmulhq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s32)))
+int32x4_t vqdmulhq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s8)))
+int8x16_t vqdmulhq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s8)))
+int8x16_t vqdmulhq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s16)))
+int32x4_t vqdmullbq_m_n_s16(int32x4_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s16)))
+int32x4_t vqdmullbq_m(int32x4_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s32)))
+int64x2_t vqdmullbq_m_n_s32(int64x2_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s32)))
+int64x2_t vqdmullbq_m(int64x2_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s16)))
+int32x4_t vqdmullbq_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s16)))
+int32x4_t vqdmullbq_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s32)))
+int64x2_t vqdmullbq_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s32)))
+int64x2_t vqdmullbq_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s16)))
+int32x4_t vqdmullbq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s16)))
+int32x4_t vqdmullbq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s32)))
+int64x2_t vqdmullbq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s32)))
+int64x2_t vqdmullbq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s16)))
+int32x4_t vqdmullbq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s16)))
+int32x4_t vqdmullbq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s32)))
+int64x2_t vqdmullbq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s32)))
+int64x2_t vqdmullbq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s16)))
+int32x4_t vqdmulltq_m_n_s16(int32x4_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s16)))
+int32x4_t vqdmulltq_m(int32x4_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s32)))
+int64x2_t vqdmulltq_m_n_s32(int64x2_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s32)))
+int64x2_t vqdmulltq_m(int64x2_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s16)))
+int32x4_t vqdmulltq_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s16)))
+int32x4_t vqdmulltq_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s32)))
+int64x2_t vqdmulltq_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s32)))
+int64x2_t vqdmulltq_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s16)))
+int32x4_t vqdmulltq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s16)))
+int32x4_t vqdmulltq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s32)))
+int64x2_t vqdmulltq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s32)))
+int64x2_t vqdmulltq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s16)))
+int32x4_t vqdmulltq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s16)))
+int32x4_t vqdmulltq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s32)))
+int64x2_t vqdmulltq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s32)))
+int64x2_t vqdmulltq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s16)))
+int8x16_t vqmovnbq_m_s16(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s16)))
+int8x16_t vqmovnbq_m(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s32)))
+int16x8_t vqmovnbq_m_s32(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s32)))
+int16x8_t vqmovnbq_m(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u16)))
+uint8x16_t vqmovnbq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u16)))
+uint8x16_t vqmovnbq_m(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u32)))
+uint16x8_t vqmovnbq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u32)))
+uint16x8_t vqmovnbq_m(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s16)))
+int8x16_t vqmovnbq_s16(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s16)))
+int8x16_t vqmovnbq(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s32)))
+int16x8_t vqmovnbq_s32(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s32)))
+int16x8_t vqmovnbq(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u16)))
+uint8x16_t vqmovnbq_u16(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u16)))
+uint8x16_t vqmovnbq(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u32)))
+uint16x8_t vqmovnbq_u32(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u32)))
+uint16x8_t vqmovnbq(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s16)))
+int8x16_t vqmovntq_m_s16(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s16)))
+int8x16_t vqmovntq_m(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s32)))
+int16x8_t vqmovntq_m_s32(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s32)))
+int16x8_t vqmovntq_m(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u16)))
+uint8x16_t vqmovntq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u16)))
+uint8x16_t vqmovntq_m(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u32)))
+uint16x8_t vqmovntq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u32)))
+uint16x8_t vqmovntq_m(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s16)))
+int8x16_t vqmovntq_s16(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s16)))
+int8x16_t vqmovntq(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s32)))
+int16x8_t vqmovntq_s32(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s32)))
+int16x8_t vqmovntq(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u16)))
+uint8x16_t vqmovntq_u16(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u16)))
+uint8x16_t vqmovntq(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u32)))
+uint16x8_t vqmovntq_u32(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u32)))
+uint16x8_t vqmovntq(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s16)))
+uint8x16_t vqmovunbq_m_s16(uint8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s16)))
+uint8x16_t vqmovunbq_m(uint8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s32)))
+uint16x8_t vqmovunbq_m_s32(uint16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s32)))
+uint16x8_t vqmovunbq_m(uint16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s16)))
+uint8x16_t vqmovunbq_s16(uint8x16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s16)))
+uint8x16_t vqmovunbq(uint8x16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s32)))
+uint16x8_t vqmovunbq_s32(uint16x8_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s32)))
+uint16x8_t vqmovunbq(uint16x8_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s16)))
+uint8x16_t vqmovuntq_m_s16(uint8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s16)))
+uint8x16_t vqmovuntq_m(uint8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s32)))
+uint16x8_t vqmovuntq_m_s32(uint16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s32)))
+uint16x8_t vqmovuntq_m(uint16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s16)))
+uint8x16_t vqmovuntq_s16(uint8x16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s16)))
+uint8x16_t vqmovuntq(uint8x16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s32)))
+uint16x8_t vqmovuntq_s32(uint16x8_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s32)))
+uint16x8_t vqmovuntq(uint16x8_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s16)))
+int16x8_t vqnegq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s16)))
+int16x8_t vqnegq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s32)))
+int32x4_t vqnegq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s32)))
+int32x4_t vqnegq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s8)))
+int8x16_t vqnegq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s8)))
+int8x16_t vqnegq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s16)))
+int16x8_t vqnegq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s16)))
+int16x8_t vqnegq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s32)))
+int32x4_t vqnegq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s32)))
+int32x4_t vqnegq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s8)))
+int8x16_t vqnegq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s8)))
+int8x16_t vqnegq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s16)))
+int16x8_t vqrdmladhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s16)))
+int16x8_t vqrdmladhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s32)))
+int32x4_t vqrdmladhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s32)))
+int32x4_t vqrdmladhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s8)))
+int8x16_t vqrdmladhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s8)))
+int8x16_t vqrdmladhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s16)))
+int16x8_t vqrdmladhq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s16)))
+int16x8_t vqrdmladhq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s32)))
+int32x4_t vqrdmladhq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s32)))
+int32x4_t vqrdmladhq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s8)))
+int8x16_t vqrdmladhq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s8)))
+int8x16_t vqrdmladhq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s16)))
+int16x8_t vqrdmladhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s16)))
+int16x8_t vqrdmladhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s32)))
+int32x4_t vqrdmladhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s32)))
+int32x4_t vqrdmladhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s8)))
+int8x16_t vqrdmladhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s8)))
+int8x16_t vqrdmladhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s16)))
+int16x8_t vqrdmladhxq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s16)))
+int16x8_t vqrdmladhxq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s32)))
+int32x4_t vqrdmladhxq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s32)))
+int32x4_t vqrdmladhxq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s8)))
+int8x16_t vqrdmladhxq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s8)))
+int8x16_t vqrdmladhxq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s16)))
+int16x8_t vqrdmlahq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s16)))
+int16x8_t vqrdmlahq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s32)))
+int32x4_t vqrdmlahq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s32)))
+int32x4_t vqrdmlahq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s8)))
+int8x16_t vqrdmlahq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s8)))
+int8x16_t vqrdmlahq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s16)))
+int16x8_t vqrdmlahq_n_s16(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s16)))
+int16x8_t vqrdmlahq(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s32)))
+int32x4_t vqrdmlahq_n_s32(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s32)))
+int32x4_t vqrdmlahq(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s8)))
+int8x16_t vqrdmlahq_n_s8(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s8)))
+int8x16_t vqrdmlahq(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s16)))
+int16x8_t vqrdmlashq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s16)))
+int16x8_t vqrdmlashq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s32)))
+int32x4_t vqrdmlashq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s32)))
+int32x4_t vqrdmlashq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s8)))
+int8x16_t vqrdmlashq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s8)))
+int8x16_t vqrdmlashq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s16)))
+int16x8_t vqrdmlashq_n_s16(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s16)))
+int16x8_t vqrdmlashq(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s32)))
+int32x4_t vqrdmlashq_n_s32(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s32)))
+int32x4_t vqrdmlashq(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s8)))
+int8x16_t vqrdmlashq_n_s8(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s8)))
+int8x16_t vqrdmlashq(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s16)))
+int16x8_t vqrdmlsdhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s16)))
+int16x8_t vqrdmlsdhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s32)))
+int32x4_t vqrdmlsdhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s32)))
+int32x4_t vqrdmlsdhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s8)))
+int8x16_t vqrdmlsdhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s8)))
+int8x16_t vqrdmlsdhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s16)))
+int16x8_t vqrdmlsdhq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s16)))
+int16x8_t vqrdmlsdhq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s32)))
+int32x4_t vqrdmlsdhq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s32)))
+int32x4_t vqrdmlsdhq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s8)))
+int8x16_t vqrdmlsdhq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s8)))
+int8x16_t vqrdmlsdhq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s16)))
+int16x8_t vqrdmlsdhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s16)))
+int16x8_t vqrdmlsdhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s32)))
+int32x4_t vqrdmlsdhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s32)))
+int32x4_t vqrdmlsdhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s8)))
+int8x16_t vqrdmlsdhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s8)))
+int8x16_t vqrdmlsdhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s16)))
+int16x8_t vqrdmlsdhxq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s16)))
+int16x8_t vqrdmlsdhxq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s32)))
+int32x4_t vqrdmlsdhxq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s32)))
+int32x4_t vqrdmlsdhxq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s8)))
+int8x16_t vqrdmlsdhxq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s8)))
+int8x16_t vqrdmlsdhxq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s16)))
+int16x8_t vqrdmulhq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s16)))
+int16x8_t vqrdmulhq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s32)))
+int32x4_t vqrdmulhq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s32)))
+int32x4_t vqrdmulhq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s8)))
+int8x16_t vqrdmulhq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s8)))
+int8x16_t vqrdmulhq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s16)))
+int16x8_t vqrdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s16)))
+int16x8_t vqrdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s32)))
+int32x4_t vqrdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s32)))
+int32x4_t vqrdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s8)))
+int8x16_t vqrdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s8)))
+int8x16_t vqrdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s16)))
+int16x8_t vqrdmulhq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s16)))
+int16x8_t vqrdmulhq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s32)))
+int32x4_t vqrdmulhq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s32)))
+int32x4_t vqrdmulhq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s8)))
+int8x16_t vqrdmulhq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s8)))
+int8x16_t vqrdmulhq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s16)))
+int16x8_t vqrdmulhq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s16)))
+int16x8_t vqrdmulhq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s32)))
+int32x4_t vqrdmulhq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s32)))
+int32x4_t vqrdmulhq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s8)))
+int8x16_t vqrdmulhq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s8)))
+int8x16_t vqrdmulhq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s16)))
+int16x8_t vqrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s16)))
+int16x8_t vqrshlq_m_n(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s32)))
+int32x4_t vqrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s32)))
+int32x4_t vqrshlq_m_n(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s8)))
+int8x16_t vqrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s8)))
+int8x16_t vqrshlq_m_n(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u16)))
+uint16x8_t vqrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u16)))
+uint16x8_t vqrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u32)))
+uint32x4_t vqrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u32)))
+uint32x4_t vqrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u8)))
+uint8x16_t vqrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u8)))
+uint8x16_t vqrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s16)))
+int16x8_t vqrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s16)))
+int16x8_t vqrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s32)))
+int32x4_t vqrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s32)))
+int32x4_t vqrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s8)))
+int8x16_t vqrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s8)))
+int8x16_t vqrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u16)))
+uint16x8_t vqrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u16)))
+uint16x8_t vqrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u32)))
+uint32x4_t vqrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u32)))
+uint32x4_t vqrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u8)))
+uint8x16_t vqrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u8)))
+uint8x16_t vqrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s16)))
+int16x8_t vqrshlq_n_s16(int16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s16)))
+int16x8_t vqrshlq(int16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s32)))
+int32x4_t vqrshlq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s32)))
+int32x4_t vqrshlq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s8)))
+int8x16_t vqrshlq_n_s8(int8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s8)))
+int8x16_t vqrshlq(int8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u16)))
+uint16x8_t vqrshlq_n_u16(uint16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u16)))
+uint16x8_t vqrshlq(uint16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u32)))
+uint32x4_t vqrshlq_n_u32(uint32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u32)))
+uint32x4_t vqrshlq(uint32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u8)))
+uint8x16_t vqrshlq_n_u8(uint8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u8)))
+uint8x16_t vqrshlq(uint8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s16)))
+int16x8_t vqrshlq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s16)))
+int16x8_t vqrshlq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s32)))
+int32x4_t vqrshlq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s32)))
+int32x4_t vqrshlq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s8)))
+int8x16_t vqrshlq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s8)))
+int8x16_t vqrshlq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u16)))
+uint16x8_t vqrshlq_u16(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u16)))
+uint16x8_t vqrshlq(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u32)))
+uint32x4_t vqrshlq_u32(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u32)))
+uint32x4_t vqrshlq(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u8)))
+uint8x16_t vqrshlq_u8(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u8)))
+uint8x16_t vqrshlq(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16)))
+int8x16_t vqrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16)))
+int8x16_t vqrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32)))
+int16x8_t vqrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32)))
+int16x8_t vqrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16)))
+uint8x16_t vqrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16)))
+uint8x16_t vqrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32)))
+uint16x8_t vqrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32)))
+uint16x8_t vqrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s16)))
+int8x16_t vqrshrnbq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s16)))
+int8x16_t vqrshrnbq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s32)))
+int16x8_t vqrshrnbq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s32)))
+int16x8_t vqrshrnbq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u16)))
+uint8x16_t vqrshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u16)))
+uint8x16_t vqrshrnbq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u32)))
+uint16x8_t vqrshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u32)))
+uint16x8_t vqrshrnbq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s16)))
+int8x16_t vqrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s16)))
+int8x16_t vqrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s32)))
+int16x8_t vqrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s32)))
+int16x8_t vqrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u16)))
+uint8x16_t vqrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u16)))
+uint8x16_t vqrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u32)))
+uint16x8_t vqrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u32)))
+uint16x8_t vqrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s16)))
+int8x16_t vqrshrntq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s16)))
+int8x16_t vqrshrntq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s32)))
+int16x8_t vqrshrntq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s32)))
+int16x8_t vqrshrntq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u16)))
+uint8x16_t vqrshrntq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u16)))
+uint8x16_t vqrshrntq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u32)))
+uint16x8_t vqrshrntq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u32)))
+uint16x8_t vqrshrntq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16)))
+uint8x16_t vqrshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16)))
+uint8x16_t vqrshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32)))
+uint16x8_t vqrshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32)))
+uint16x8_t vqrshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s16)))
+uint8x16_t vqrshrunbq_n_s16(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s16)))
+uint8x16_t vqrshrunbq(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s32)))
+uint16x8_t vqrshrunbq_n_s32(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s32)))
+uint16x8_t vqrshrunbq(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s16)))
+uint8x16_t vqrshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s16)))
+uint8x16_t vqrshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s32)))
+uint16x8_t vqrshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s32)))
+uint16x8_t vqrshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s16)))
+uint8x16_t vqrshruntq_n_s16(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s16)))
+uint8x16_t vqrshruntq(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s32)))
+uint16x8_t vqrshruntq_n_s32(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s32)))
+uint16x8_t vqrshruntq(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s16)))
+int16x8_t vqshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s16)))
+int16x8_t vqshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s32)))
+int32x4_t vqshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s32)))
+int32x4_t vqshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s8)))
+int8x16_t vqshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s8)))
+int8x16_t vqshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u16)))
+uint16x8_t vqshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u16)))
+uint16x8_t vqshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u32)))
+uint32x4_t vqshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u32)))
+uint32x4_t vqshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u8)))
+uint8x16_t vqshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u8)))
+uint8x16_t vqshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s16)))
+int16x8_t vqshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s16)))
+int16x8_t vqshlq_m_r(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s32)))
+int32x4_t vqshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s32)))
+int32x4_t vqshlq_m_r(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s8)))
+int8x16_t vqshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s8)))
+int8x16_t vqshlq_m_r(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u16)))
+uint16x8_t vqshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u16)))
+uint16x8_t vqshlq_m_r(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u32)))
+uint32x4_t vqshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u32)))
+uint32x4_t vqshlq_m_r(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u8)))
+uint8x16_t vqshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u8)))
+uint8x16_t vqshlq_m_r(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s16)))
+int16x8_t vqshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s16)))
+int16x8_t vqshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s32)))
+int32x4_t vqshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s32)))
+int32x4_t vqshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s8)))
+int8x16_t vqshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s8)))
+int8x16_t vqshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u16)))
+uint16x8_t vqshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u16)))
+uint16x8_t vqshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u32)))
+uint32x4_t vqshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u32)))
+uint32x4_t vqshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u8)))
+uint8x16_t vqshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u8)))
+uint8x16_t vqshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s16)))
+int16x8_t vqshlq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s16)))
+int16x8_t vqshlq_n(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s32)))
+int32x4_t vqshlq_n_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s32)))
+int32x4_t vqshlq_n(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s8)))
+int8x16_t vqshlq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s8)))
+int8x16_t vqshlq_n(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u16)))
+uint16x8_t vqshlq_n_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u16)))
+uint16x8_t vqshlq_n(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u32)))
+uint32x4_t vqshlq_n_u32(uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u32)))
+uint32x4_t vqshlq_n(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u8)))
+uint8x16_t vqshlq_n_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u8)))
+uint8x16_t vqshlq_n(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s16)))
+int16x8_t vqshlq_r_s16(int16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s16)))
+int16x8_t vqshlq_r(int16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s32)))
+int32x4_t vqshlq_r_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s32)))
+int32x4_t vqshlq_r(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s8)))
+int8x16_t vqshlq_r_s8(int8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s8)))
+int8x16_t vqshlq_r(int8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u16)))
+uint16x8_t vqshlq_r_u16(uint16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u16)))
+uint16x8_t vqshlq_r(uint16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u32)))
+uint32x4_t vqshlq_r_u32(uint32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u32)))
+uint32x4_t vqshlq_r(uint32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u8)))
+uint8x16_t vqshlq_r_u8(uint8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u8)))
+uint8x16_t vqshlq_r(uint8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s16)))
+int16x8_t vqshlq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s16)))
+int16x8_t vqshlq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s32)))
+int32x4_t vqshlq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s32)))
+int32x4_t vqshlq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s8)))
+int8x16_t vqshlq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s8)))
+int8x16_t vqshlq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u16)))
+uint16x8_t vqshlq_u16(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u16)))
+uint16x8_t vqshlq(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u32)))
+uint32x4_t vqshlq_u32(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u32)))
+uint32x4_t vqshlq(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u8)))
+uint8x16_t vqshlq_u8(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u8)))
+uint8x16_t vqshlq(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s16)))
+uint16x8_t vqshluq_m_n_s16(uint16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s16)))
+uint16x8_t vqshluq_m(uint16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s32)))
+uint32x4_t vqshluq_m_n_s32(uint32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s32)))
+uint32x4_t vqshluq_m(uint32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s8)))
+uint8x16_t vqshluq_m_n_s8(uint8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s8)))
+uint8x16_t vqshluq_m(uint8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s16)))
+uint16x8_t vqshluq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s16)))
+uint16x8_t vqshluq(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s32)))
+uint32x4_t vqshluq_n_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s32)))
+uint32x4_t vqshluq(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s8)))
+uint8x16_t vqshluq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s8)))
+uint8x16_t vqshluq(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s16)))
+int8x16_t vqshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s16)))
+int8x16_t vqshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s32)))
+int16x8_t vqshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s32)))
+int16x8_t vqshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u16)))
+uint8x16_t vqshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u16)))
+uint8x16_t vqshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u32)))
+uint16x8_t vqshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u32)))
+uint16x8_t vqshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s16)))
+int8x16_t vqshrnbq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s16)))
+int8x16_t vqshrnbq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s32)))
+int16x8_t vqshrnbq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s32)))
+int16x8_t vqshrnbq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u16)))
+uint8x16_t vqshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u16)))
+uint8x16_t vqshrnbq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u32)))
+uint16x8_t vqshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u32)))
+uint16x8_t vqshrnbq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s16)))
+int8x16_t vqshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s16)))
+int8x16_t vqshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s32)))
+int16x8_t vqshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s32)))
+int16x8_t vqshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u16)))
+uint8x16_t vqshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u16)))
+uint8x16_t vqshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u32)))
+uint16x8_t vqshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u32)))
+uint16x8_t vqshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s16)))
+int8x16_t vqshrntq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s16)))
+int8x16_t vqshrntq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s32)))
+int16x8_t vqshrntq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s32)))
+int16x8_t vqshrntq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u16)))
+uint8x16_t vqshrntq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u16)))
+uint8x16_t vqshrntq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u32)))
+uint16x8_t vqshrntq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u32)))
+uint16x8_t vqshrntq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s16)))
+uint8x16_t vqshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s16)))
+uint8x16_t vqshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s32)))
+uint16x8_t vqshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s32)))
+uint16x8_t vqshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s16)))
+uint8x16_t vqshrunbq_n_s16(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s16)))
+uint8x16_t vqshrunbq(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s32)))
+uint16x8_t vqshrunbq_n_s32(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s32)))
+uint16x8_t vqshrunbq(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s16)))
+uint8x16_t vqshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s16)))
+uint8x16_t vqshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s32)))
+uint16x8_t vqshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s32)))
+uint16x8_t vqshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s16)))
+uint8x16_t vqshruntq_n_s16(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s16)))
+uint8x16_t vqshruntq(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s32)))
+uint16x8_t vqshruntq_n_s32(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s32)))
+uint16x8_t vqshruntq(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s16)))
+int16x8_t vqsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s16)))
+int16x8_t vqsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s32)))
+int32x4_t vqsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s32)))
+int32x4_t vqsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s8)))
+int8x16_t vqsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s8)))
+int8x16_t vqsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u16)))
+uint16x8_t vqsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u16)))
+uint16x8_t vqsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u32)))
+uint32x4_t vqsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u32)))
+uint32x4_t vqsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u8)))
+uint8x16_t vqsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u8)))
+uint8x16_t vqsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s16)))
+int16x8_t vqsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s16)))
+int16x8_t vqsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s32)))
+int32x4_t vqsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s32)))
+int32x4_t vqsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s8)))
+int8x16_t vqsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s8)))
+int8x16_t vqsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u16)))
+uint16x8_t vqsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u16)))
+uint16x8_t vqsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u32)))
+uint32x4_t vqsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u32)))
+uint32x4_t vqsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u8)))
+uint8x16_t vqsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u8)))
+uint8x16_t vqsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s16)))
+int16x8_t vqsubq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s16)))
+int16x8_t vqsubq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s32)))
+int32x4_t vqsubq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s32)))
+int32x4_t vqsubq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s8)))
+int8x16_t vqsubq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s8)))
+int8x16_t vqsubq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u16)))
+uint16x8_t vqsubq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u16)))
+uint16x8_t vqsubq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u32)))
+uint32x4_t vqsubq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u32)))
+uint32x4_t vqsubq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u8)))
+uint8x16_t vqsubq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u8)))
+uint8x16_t vqsubq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s16)))
+int16x8_t vqsubq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s16)))
+int16x8_t vqsubq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s32)))
+int32x4_t vqsubq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s32)))
+int32x4_t vqsubq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s8)))
+int8x16_t vqsubq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s8)))
+int8x16_t vqsubq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u16)))
+uint16x8_t vqsubq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u16)))
+uint16x8_t vqsubq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u32)))
+uint32x4_t vqsubq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u32)))
+uint32x4_t vqsubq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u8)))
+uint8x16_t vqsubq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u8)))
+uint8x16_t vqsubq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))
+int16x8_t vreinterpretq_s16_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))
+int16x8_t vreinterpretq_s16(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))
+int16x8_t vreinterpretq_s16_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))
+int16x8_t vreinterpretq_s16(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))
+int16x8_t vreinterpretq_s16_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))
+int16x8_t vreinterpretq_s16(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))
+int16x8_t vreinterpretq_s16_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))
+int16x8_t vreinterpretq_s16(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))
+int16x8_t vreinterpretq_s16_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))
+int16x8_t vreinterpretq_s16(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))
+int16x8_t vreinterpretq_s16_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))
+int16x8_t vreinterpretq_s16(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
+int16x8_t vreinterpretq_s16_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
+int16x8_t vreinterpretq_s16(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))
+int32x4_t vreinterpretq_s32_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))
+int32x4_t vreinterpretq_s32(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))
+int32x4_t vreinterpretq_s32_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))
+int32x4_t vreinterpretq_s32(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))
+int32x4_t vreinterpretq_s32_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))
+int32x4_t vreinterpretq_s32(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))
+int32x4_t vreinterpretq_s32_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))
+int32x4_t vreinterpretq_s32(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))
+int32x4_t vreinterpretq_s32_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))
+int32x4_t vreinterpretq_s32(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))
+int32x4_t vreinterpretq_s32_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))
+int32x4_t vreinterpretq_s32(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
+int32x4_t vreinterpretq_s32_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
+int32x4_t vreinterpretq_s32(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))
+int64x2_t vreinterpretq_s64_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))
+int64x2_t vreinterpretq_s64(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))
+int64x2_t vreinterpretq_s64_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))
+int64x2_t vreinterpretq_s64(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))
+int64x2_t vreinterpretq_s64_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))
+int64x2_t vreinterpretq_s64(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))
+int64x2_t vreinterpretq_s64_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))
+int64x2_t vreinterpretq_s64(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))
+int64x2_t vreinterpretq_s64_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))
+int64x2_t vreinterpretq_s64(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))
+int64x2_t vreinterpretq_s64_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))
+int64x2_t vreinterpretq_s64(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
+int64x2_t vreinterpretq_s64_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
+int64x2_t vreinterpretq_s64(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))
+int8x16_t vreinterpretq_s8_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))
+int8x16_t vreinterpretq_s8(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))
+int8x16_t vreinterpretq_s8_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))
+int8x16_t vreinterpretq_s8(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))
+int8x16_t vreinterpretq_s8_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))
+int8x16_t vreinterpretq_s8(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))
+int8x16_t vreinterpretq_s8_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))
+int8x16_t vreinterpretq_s8(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))
+int8x16_t vreinterpretq_s8_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))
+int8x16_t vreinterpretq_s8(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))
+int8x16_t vreinterpretq_s8_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))
+int8x16_t vreinterpretq_s8(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
+int8x16_t vreinterpretq_s8_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
+int8x16_t vreinterpretq_s8(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))
+uint16x8_t vreinterpretq_u16_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))
+uint16x8_t vreinterpretq_u16(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))
+uint16x8_t vreinterpretq_u16_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))
+uint16x8_t vreinterpretq_u16(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))
+uint16x8_t vreinterpretq_u16_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))
+uint16x8_t vreinterpretq_u16(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))
+uint16x8_t vreinterpretq_u16_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))
+uint16x8_t vreinterpretq_u16(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))
+uint16x8_t vreinterpretq_u16_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))
+uint16x8_t vreinterpretq_u16(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))
+uint16x8_t vreinterpretq_u16_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))
+uint16x8_t vreinterpretq_u16(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
+uint16x8_t vreinterpretq_u16_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
+uint16x8_t vreinterpretq_u16(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))
+uint32x4_t vreinterpretq_u32_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))
+uint32x4_t vreinterpretq_u32(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))
+uint32x4_t vreinterpretq_u32_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))
+uint32x4_t vreinterpretq_u32(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))
+uint32x4_t vreinterpretq_u32_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))
+uint32x4_t vreinterpretq_u32(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))
+uint32x4_t vreinterpretq_u32_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))
+uint32x4_t vreinterpretq_u32(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))
+uint32x4_t vreinterpretq_u32_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))
+uint32x4_t vreinterpretq_u32(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))
+uint32x4_t vreinterpretq_u32_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))
+uint32x4_t vreinterpretq_u32(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
+uint32x4_t vreinterpretq_u32_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
+uint32x4_t vreinterpretq_u32(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))
+uint64x2_t vreinterpretq_u64_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))
+uint64x2_t vreinterpretq_u64(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))
+uint64x2_t vreinterpretq_u64_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))
+uint64x2_t vreinterpretq_u64(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))
+uint64x2_t vreinterpretq_u64_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))
+uint64x2_t vreinterpretq_u64(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))
+uint64x2_t vreinterpretq_u64_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))
+uint64x2_t vreinterpretq_u64(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))
+uint64x2_t vreinterpretq_u64_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))
+uint64x2_t vreinterpretq_u64(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))
+uint64x2_t vreinterpretq_u64_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))
+uint64x2_t vreinterpretq_u64(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
+uint64x2_t vreinterpretq_u64_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
+uint64x2_t vreinterpretq_u64(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
+uint8x16_t vreinterpretq_u8_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
+uint8x16_t vreinterpretq_u8(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
+uint8x16_t vreinterpretq_u8_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
+uint8x16_t vreinterpretq_u8(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
+uint8x16_t vreinterpretq_u8_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
+uint8x16_t vreinterpretq_u8(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
+uint8x16_t vreinterpretq_u8_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
+uint8x16_t vreinterpretq_u8(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
+uint8x16_t vreinterpretq_u8_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
+uint8x16_t vreinterpretq_u8(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
+uint8x16_t vreinterpretq_u8_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
+uint8x16_t vreinterpretq_u8(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
+uint8x16_t vreinterpretq_u8_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
+uint8x16_t vreinterpretq_u8(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_s8)))
+int8x16_t vrev16q_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_s8)))
+int8x16_t vrev16q_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_u8)))
+uint8x16_t vrev16q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_u8)))
+uint8x16_t vrev16q_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_s8)))
+int8x16_t vrev16q_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_s8)))
+int8x16_t vrev16q(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_u8)))
+uint8x16_t vrev16q_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_u8)))
+uint8x16_t vrev16q(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_s8)))
+int8x16_t vrev16q_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_s8)))
+int8x16_t vrev16q_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_u8)))
+uint8x16_t vrev16q_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_u8)))
+uint8x16_t vrev16q_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s16)))
+int16x8_t vrev32q_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s16)))
+int16x8_t vrev32q_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s8)))
+int8x16_t vrev32q_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s8)))
+int8x16_t vrev32q_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u16)))
+uint16x8_t vrev32q_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u16)))
+uint16x8_t vrev32q_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u8)))
+uint8x16_t vrev32q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u8)))
+uint8x16_t vrev32q_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s16)))
+int16x8_t vrev32q_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s16)))
+int16x8_t vrev32q(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s8)))
+int8x16_t vrev32q_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s8)))
+int8x16_t vrev32q(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u16)))
+uint16x8_t vrev32q_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u16)))
+uint16x8_t vrev32q(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u8)))
+uint8x16_t vrev32q_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u8)))
+uint8x16_t vrev32q(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s16)))
+int16x8_t vrev32q_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s16)))
+int16x8_t vrev32q_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s8)))
+int8x16_t vrev32q_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s8)))
+int8x16_t vrev32q_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u16)))
+uint16x8_t vrev32q_x_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u16)))
+uint16x8_t vrev32q_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u8)))
+uint8x16_t vrev32q_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u8)))
+uint8x16_t vrev32q_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s16)))
+int16x8_t vrev64q_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s16)))
+int16x8_t vrev64q_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s32)))
+int32x4_t vrev64q_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s32)))
+int32x4_t vrev64q_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s8)))
+int8x16_t vrev64q_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s8)))
+int8x16_t vrev64q_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u16)))
+uint16x8_t vrev64q_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u16)))
+uint16x8_t vrev64q_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u32)))
+uint32x4_t vrev64q_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u32)))
+uint32x4_t vrev64q_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u8)))
+uint8x16_t vrev64q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u8)))
+uint8x16_t vrev64q_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s16)))
+int16x8_t vrev64q_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s16)))
+int16x8_t vrev64q(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s32)))
+int32x4_t vrev64q_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s32)))
+int32x4_t vrev64q(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s8)))
+int8x16_t vrev64q_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s8)))
+int8x16_t vrev64q(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u16)))
+uint16x8_t vrev64q_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u16)))
+uint16x8_t vrev64q(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u32)))
+uint32x4_t vrev64q_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u32)))
+uint32x4_t vrev64q(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u8)))
+uint8x16_t vrev64q_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u8)))
+uint8x16_t vrev64q(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s16)))
+int16x8_t vrev64q_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s16)))
+int16x8_t vrev64q_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s32)))
+int32x4_t vrev64q_x_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s32)))
+int32x4_t vrev64q_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s8)))
+int8x16_t vrev64q_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s8)))
+int8x16_t vrev64q_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u16)))
+uint16x8_t vrev64q_x_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u16)))
+uint16x8_t vrev64q_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u32)))
+uint32x4_t vrev64q_x_u32(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u32)))
+uint32x4_t vrev64q_x(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u8)))
+uint8x16_t vrev64q_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u8)))
+uint8x16_t vrev64q_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s16)))
+int16x8_t vrhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s16)))
+int16x8_t vrhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s32)))
+int32x4_t vrhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s32)))
+int32x4_t vrhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s8)))
+int8x16_t vrhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s8)))
+int8x16_t vrhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u16)))
+uint16x8_t vrhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u16)))
+uint16x8_t vrhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u32)))
+uint32x4_t vrhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u32)))
+uint32x4_t vrhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u8)))
+uint8x16_t vrhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u8)))
+uint8x16_t vrhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s16)))
+int16x8_t vrhaddq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s16)))
+int16x8_t vrhaddq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s32)))
+int32x4_t vrhaddq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s32)))
+int32x4_t vrhaddq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s8)))
+int8x16_t vrhaddq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s8)))
+int8x16_t vrhaddq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u16)))
+uint16x8_t vrhaddq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u16)))
+uint16x8_t vrhaddq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u32)))
+uint32x4_t vrhaddq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u32)))
+uint32x4_t vrhaddq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u8)))
+uint8x16_t vrhaddq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u8)))
+uint8x16_t vrhaddq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s16)))
+int16x8_t vrhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s16)))
+int16x8_t vrhaddq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s32)))
+int32x4_t vrhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s32)))
+int32x4_t vrhaddq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s8)))
+int8x16_t vrhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s8)))
+int8x16_t vrhaddq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u16)))
+uint16x8_t vrhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u16)))
+uint16x8_t vrhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u32)))
+uint32x4_t vrhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u32)))
+uint32x4_t vrhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u8)))
+uint8x16_t vrhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u8)))
+uint8x16_t vrhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32)))
+int64_t vrmlaldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32)))
+int64_t vrmlaldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32)))
+uint64_t vrmlaldavhaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32)))
+uint64_t vrmlaldavhaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_s32)))
+int64_t vrmlaldavhaq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_s32)))
+int64_t vrmlaldavhaq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_u32)))
+uint64_t vrmlaldavhaq_u32(uint64_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_u32)))
+uint64_t vrmlaldavhaq(uint64_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32)))
+int64_t vrmlaldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32)))
+int64_t vrmlaldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_s32)))
+int64_t vrmlaldavhaxq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_s32)))
+int64_t vrmlaldavhaxq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_s32)))
+int64_t vrmlaldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_s32)))
+int64_t vrmlaldavhq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_u32)))
+uint64_t vrmlaldavhq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_u32)))
+uint64_t vrmlaldavhq_p(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_s32)))
+int64_t vrmlaldavhq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_s32)))
+int64_t vrmlaldavhq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_u32)))
+uint64_t vrmlaldavhq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_u32)))
+uint64_t vrmlaldavhq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32)))
+int64_t vrmlaldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32)))
+int64_t vrmlaldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_s32)))
+int64_t vrmlaldavhxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_s32)))
+int64_t vrmlaldavhxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32)))
+int64_t vrmlsldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32)))
+int64_t vrmlsldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_s32)))
+int64_t vrmlsldavhaq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_s32)))
+int64_t vrmlsldavhaq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32)))
+int64_t vrmlsldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32)))
+int64_t vrmlsldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_s32)))
+int64_t vrmlsldavhaxq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_s32)))
+int64_t vrmlsldavhaxq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_p_s32)))
+int64_t vrmlsldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_p_s32)))
+int64_t vrmlsldavhq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_s32)))
+int64_t vrmlsldavhq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_s32)))
+int64_t vrmlsldavhq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32)))
+int64_t vrmlsldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32)))
+int64_t vrmlsldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_s32)))
+int64_t vrmlsldavhxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_s32)))
+int64_t vrmlsldavhxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s16)))
+int16x8_t vrmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s16)))
+int16x8_t vrmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s32)))
+int32x4_t vrmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s32)))
+int32x4_t vrmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s8)))
+int8x16_t vrmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s8)))
+int8x16_t vrmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u16)))
+uint16x8_t vrmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u16)))
+uint16x8_t vrmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u32)))
+uint32x4_t vrmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u32)))
+uint32x4_t vrmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u8)))
+uint8x16_t vrmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u8)))
+uint8x16_t vrmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s16)))
+int16x8_t vrmulhq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s16)))
+int16x8_t vrmulhq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s32)))
+int32x4_t vrmulhq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s32)))
+int32x4_t vrmulhq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s8)))
+int8x16_t vrmulhq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s8)))
+int8x16_t vrmulhq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u16)))
+uint16x8_t vrmulhq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u16)))
+uint16x8_t vrmulhq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u32)))
+uint32x4_t vrmulhq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u32)))
+uint32x4_t vrmulhq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u8)))
+uint8x16_t vrmulhq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u8)))
+uint8x16_t vrmulhq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s16)))
+int16x8_t vrmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s16)))
+int16x8_t vrmulhq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s32)))
+int32x4_t vrmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s32)))
+int32x4_t vrmulhq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s8)))
+int8x16_t vrmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s8)))
+int8x16_t vrmulhq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u16)))
+uint16x8_t vrmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u16)))
+uint16x8_t vrmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u32)))
+uint32x4_t vrmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u32)))
+uint32x4_t vrmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u8)))
+uint8x16_t vrmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u8)))
+uint8x16_t vrmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s16)))
+int16x8_t vrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s16)))
+int16x8_t vrshlq_m_n(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s32)))
+int32x4_t vrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s32)))
+int32x4_t vrshlq_m_n(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s8)))
+int8x16_t vrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s8)))
+int8x16_t vrshlq_m_n(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u16)))
+uint16x8_t vrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u16)))
+uint16x8_t vrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u32)))
+uint32x4_t vrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u32)))
+uint32x4_t vrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u8)))
+uint8x16_t vrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u8)))
+uint8x16_t vrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s16)))
+int16x8_t vrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s16)))
+int16x8_t vrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s32)))
+int32x4_t vrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s32)))
+int32x4_t vrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s8)))
+int8x16_t vrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s8)))
+int8x16_t vrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u16)))
+uint16x8_t vrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u16)))
+uint16x8_t vrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u32)))
+uint32x4_t vrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u32)))
+uint32x4_t vrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u8)))
+uint8x16_t vrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u8)))
+uint8x16_t vrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s16)))
+int16x8_t vrshlq_n_s16(int16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s16)))
+int16x8_t vrshlq(int16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s32)))
+int32x4_t vrshlq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s32)))
+int32x4_t vrshlq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s8)))
+int8x16_t vrshlq_n_s8(int8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s8)))
+int8x16_t vrshlq(int8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u16)))
+uint16x8_t vrshlq_n_u16(uint16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u16)))
+uint16x8_t vrshlq(uint16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u32)))
+uint32x4_t vrshlq_n_u32(uint32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u32)))
+uint32x4_t vrshlq(uint32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u8)))
+uint8x16_t vrshlq_n_u8(uint8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u8)))
+uint8x16_t vrshlq(uint8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s16)))
+int16x8_t vrshlq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s16)))
+int16x8_t vrshlq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s32)))
+int32x4_t vrshlq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s32)))
+int32x4_t vrshlq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s8)))
+int8x16_t vrshlq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s8)))
+int8x16_t vrshlq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u16)))
+uint16x8_t vrshlq_u16(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u16)))
+uint16x8_t vrshlq(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u32)))
+uint32x4_t vrshlq_u32(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u32)))
+uint32x4_t vrshlq(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u8)))
+uint8x16_t vrshlq_u8(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u8)))
+uint8x16_t vrshlq(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s16)))
+int16x8_t vrshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s16)))
+int16x8_t vrshlq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s32)))
+int32x4_t vrshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s32)))
+int32x4_t vrshlq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s8)))
+int8x16_t vrshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s8)))
+int8x16_t vrshlq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u16)))
+uint16x8_t vrshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u16)))
+uint16x8_t vrshlq_x(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u32)))
+uint32x4_t vrshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u32)))
+uint32x4_t vrshlq_x(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u8)))
+uint8x16_t vrshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u8)))
+uint8x16_t vrshlq_x(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s16)))
+int8x16_t vrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s16)))
+int8x16_t vrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s32)))
+int16x8_t vrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s32)))
+int16x8_t vrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u16)))
+uint8x16_t vrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u16)))
+uint8x16_t vrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u32)))
+uint16x8_t vrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u32)))
+uint16x8_t vrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s16)))
+int8x16_t vrshrnbq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s16)))
+int8x16_t vrshrnbq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s32)))
+int16x8_t vrshrnbq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s32)))
+int16x8_t vrshrnbq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u16)))
+uint8x16_t vrshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u16)))
+uint8x16_t vrshrnbq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u32)))
+uint16x8_t vrshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u32)))
+uint16x8_t vrshrnbq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s16)))
+int8x16_t vrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s16)))
+int8x16_t vrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s32)))
+int16x8_t vrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s32)))
+int16x8_t vrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u16)))
+uint8x16_t vrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u16)))
+uint8x16_t vrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u32)))
+uint16x8_t vrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u32)))
+uint16x8_t vrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s16)))
+int8x16_t vrshrntq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s16)))
+int8x16_t vrshrntq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s32)))
+int16x8_t vrshrntq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s32)))
+int16x8_t vrshrntq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u16)))
+uint8x16_t vrshrntq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u16)))
+uint8x16_t vrshrntq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u32)))
+uint16x8_t vrshrntq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u32)))
+uint16x8_t vrshrntq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s16)))
+int16x8_t vrshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s16)))
+int16x8_t vrshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s32)))
+int32x4_t vrshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s32)))
+int32x4_t vrshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s8)))
+int8x16_t vrshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s8)))
+int8x16_t vrshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u16)))
+uint16x8_t vrshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u16)))
+uint16x8_t vrshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u32)))
+uint32x4_t vrshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u32)))
+uint32x4_t vrshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u8)))
+uint8x16_t vrshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u8)))
+uint8x16_t vrshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s16)))
+int16x8_t vrshrq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s16)))
+int16x8_t vrshrq(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s32)))
+int32x4_t vrshrq_n_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s32)))
+int32x4_t vrshrq(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s8)))
+int8x16_t vrshrq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s8)))
+int8x16_t vrshrq(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u16)))
+uint16x8_t vrshrq_n_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u16)))
+uint16x8_t vrshrq(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u32)))
+uint32x4_t vrshrq_n_u32(uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u32)))
+uint32x4_t vrshrq(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u8)))
+uint8x16_t vrshrq_n_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u8)))
+uint8x16_t vrshrq(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s16)))
+int16x8_t vrshrq_x_n_s16(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s16)))
+int16x8_t vrshrq_x(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s32)))
+int32x4_t vrshrq_x_n_s32(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s32)))
+int32x4_t vrshrq_x(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s8)))
+int8x16_t vrshrq_x_n_s8(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s8)))
+int8x16_t vrshrq_x(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u16)))
+uint16x8_t vrshrq_x_n_u16(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u16)))
+uint16x8_t vrshrq_x(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u32)))
+uint32x4_t vrshrq_x_n_u32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u32)))
+uint32x4_t vrshrq_x(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u8)))
+uint8x16_t vrshrq_x_n_u8(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u8)))
+uint8x16_t vrshrq_x(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_s32)))
+int32x4_t vsbciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_s32)))
+int32x4_t vsbciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_u32)))
+uint32x4_t vsbciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_u32)))
+uint32x4_t vsbciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_s32)))
+int32x4_t vsbciq_s32(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_s32)))
+int32x4_t vsbciq(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_u32)))
+uint32x4_t vsbciq_u32(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_u32)))
+uint32x4_t vsbciq(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_s32)))
+int32x4_t vsbcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_s32)))
+int32x4_t vsbcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_u32)))
+uint32x4_t vsbcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_u32)))
+uint32x4_t vsbcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_s32)))
+int32x4_t vsbcq_s32(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_s32)))
+int32x4_t vsbcq(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_u32)))
+uint32x4_t vsbcq_u32(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_u32)))
+uint32x4_t vsbcq(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s16)))
+int16x8_t vsetq_lane_s16(int16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s16)))
+int16x8_t vsetq_lane(int16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s32)))
+int32x4_t vsetq_lane_s32(int32_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s32)))
+int32x4_t vsetq_lane(int32_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s64)))
+int64x2_t vsetq_lane_s64(int64_t, int64x2_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s64)))
+int64x2_t vsetq_lane(int64_t, int64x2_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s8)))
+int8x16_t vsetq_lane_s8(int8_t, int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s8)))
+int8x16_t vsetq_lane(int8_t, int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u16)))
+uint16x8_t vsetq_lane_u16(uint16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u16)))
+uint16x8_t vsetq_lane(uint16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u32)))
+uint32x4_t vsetq_lane_u32(uint32_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u32)))
+uint32x4_t vsetq_lane(uint32_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u64)))
+uint64x2_t vsetq_lane_u64(uint64_t, uint64x2_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u64)))
+uint64x2_t vsetq_lane(uint64_t, uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u8)))
+uint8x16_t vsetq_lane_u8(uint8_t, uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u8)))
+uint8x16_t vsetq_lane(uint8_t, uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s16)))
+int16x8_t vshlcq_m_s16(int16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s16)))
+int16x8_t vshlcq_m(int16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s32)))
+int32x4_t vshlcq_m_s32(int32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s32)))
+int32x4_t vshlcq_m(int32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s8)))
+int8x16_t vshlcq_m_s8(int8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s8)))
+int8x16_t vshlcq_m(int8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u16)))
+uint16x8_t vshlcq_m_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u16)))
+uint16x8_t vshlcq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u32)))
+uint32x4_t vshlcq_m_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u32)))
+uint32x4_t vshlcq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u8)))
+uint8x16_t vshlcq_m_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u8)))
+uint8x16_t vshlcq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s16)))
+int16x8_t vshlcq_s16(int16x8_t, uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s16)))
+int16x8_t vshlcq(int16x8_t, uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s32)))
+int32x4_t vshlcq_s32(int32x4_t, uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s32)))
+int32x4_t vshlcq(int32x4_t, uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s8)))
+int8x16_t vshlcq_s8(int8x16_t, uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s8)))
+int8x16_t vshlcq(int8x16_t, uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u16)))
+uint16x8_t vshlcq_u16(uint16x8_t, uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u16)))
+uint16x8_t vshlcq(uint16x8_t, uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u32)))
+uint32x4_t vshlcq_u32(uint32x4_t, uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u32)))
+uint32x4_t vshlcq(uint32x4_t, uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u8)))
+uint8x16_t vshlcq_u8(uint8x16_t, uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u8)))
+uint8x16_t vshlcq(uint8x16_t, uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s16)))
+int32x4_t vshllbq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s16)))
+int32x4_t vshllbq_m(int32x4_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s8)))
+int16x8_t vshllbq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s8)))
+int16x8_t vshllbq_m(int16x8_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u16)))
+uint32x4_t vshllbq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u16)))
+uint32x4_t vshllbq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u8)))
+uint16x8_t vshllbq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u8)))
+uint16x8_t vshllbq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s16)))
+int32x4_t vshllbq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s16)))
+int32x4_t vshllbq(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s8)))
+int16x8_t vshllbq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s8)))
+int16x8_t vshllbq(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u16)))
+uint32x4_t vshllbq_n_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u16)))
+uint32x4_t vshllbq(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u8)))
+uint16x8_t vshllbq_n_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u8)))
+uint16x8_t vshllbq(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s16)))
+int32x4_t vshllbq_x_n_s16(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s16)))
+int32x4_t vshllbq_x(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s8)))
+int16x8_t vshllbq_x_n_s8(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s8)))
+int16x8_t vshllbq_x(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u16)))
+uint32x4_t vshllbq_x_n_u16(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u16)))
+uint32x4_t vshllbq_x(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u8)))
+uint16x8_t vshllbq_x_n_u8(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u8)))
+uint16x8_t vshllbq_x(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s16)))
+int32x4_t vshlltq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s16)))
+int32x4_t vshlltq_m(int32x4_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s8)))
+int16x8_t vshlltq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s8)))
+int16x8_t vshlltq_m(int16x8_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u16)))
+uint32x4_t vshlltq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u16)))
+uint32x4_t vshlltq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u8)))
+uint16x8_t vshlltq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u8)))
+uint16x8_t vshlltq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s16)))
+int32x4_t vshlltq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s16)))
+int32x4_t vshlltq(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s8)))
+int16x8_t vshlltq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s8)))
+int16x8_t vshlltq(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u16)))
+uint32x4_t vshlltq_n_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u16)))
+uint32x4_t vshlltq(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u8)))
+uint16x8_t vshlltq_n_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u8)))
+uint16x8_t vshlltq(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s16)))
+int32x4_t vshlltq_x_n_s16(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s16)))
+int32x4_t vshlltq_x(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s8)))
+int16x8_t vshlltq_x_n_s8(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s8)))
+int16x8_t vshlltq_x(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u16)))
+uint32x4_t vshlltq_x_n_u16(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u16)))
+uint32x4_t vshlltq_x(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u8)))
+uint16x8_t vshlltq_x_n_u8(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u8)))
+uint16x8_t vshlltq_x(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s16)))
+int16x8_t vshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s16)))
+int16x8_t vshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s32)))
+int32x4_t vshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s32)))
+int32x4_t vshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s8)))
+int8x16_t vshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s8)))
+int8x16_t vshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u16)))
+uint16x8_t vshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u16)))
+uint16x8_t vshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u32)))
+uint32x4_t vshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u32)))
+uint32x4_t vshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u8)))
+uint8x16_t vshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u8)))
+uint8x16_t vshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s16)))
+int16x8_t vshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s16)))
+int16x8_t vshlq_m_r(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s32)))
+int32x4_t vshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s32)))
+int32x4_t vshlq_m_r(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s8)))
+int8x16_t vshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s8)))
+int8x16_t vshlq_m_r(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u16)))
+uint16x8_t vshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u16)))
+uint16x8_t vshlq_m_r(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u32)))
+uint32x4_t vshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u32)))
+uint32x4_t vshlq_m_r(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u8)))
+uint8x16_t vshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u8)))
+uint8x16_t vshlq_m_r(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s16)))
+int16x8_t vshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s16)))
+int16x8_t vshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s32)))
+int32x4_t vshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s32)))
+int32x4_t vshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s8)))
+int8x16_t vshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s8)))
+int8x16_t vshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u16)))
+uint16x8_t vshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u16)))
+uint16x8_t vshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u32)))
+uint32x4_t vshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u32)))
+uint32x4_t vshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u8)))
+uint8x16_t vshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u8)))
+uint8x16_t vshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s16)))
+int16x8_t vshlq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s16)))
+int16x8_t vshlq_n(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s32)))
+int32x4_t vshlq_n_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s32)))
+int32x4_t vshlq_n(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s8)))
+int8x16_t vshlq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s8)))
+int8x16_t vshlq_n(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u16)))
+uint16x8_t vshlq_n_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u16)))
+uint16x8_t vshlq_n(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u32)))
+uint32x4_t vshlq_n_u32(uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u32)))
+uint32x4_t vshlq_n(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u8)))
+uint8x16_t vshlq_n_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u8)))
+uint8x16_t vshlq_n(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s16)))
+int16x8_t vshlq_r_s16(int16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s16)))
+int16x8_t vshlq_r(int16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s32)))
+int32x4_t vshlq_r_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s32)))
+int32x4_t vshlq_r(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s8)))
+int8x16_t vshlq_r_s8(int8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s8)))
+int8x16_t vshlq_r(int8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u16)))
+uint16x8_t vshlq_r_u16(uint16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u16)))
+uint16x8_t vshlq_r(uint16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u32)))
+uint32x4_t vshlq_r_u32(uint32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u32)))
+uint32x4_t vshlq_r(uint32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u8)))
+uint8x16_t vshlq_r_u8(uint8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u8)))
+uint8x16_t vshlq_r(uint8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s16)))
+int16x8_t vshlq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s16)))
+int16x8_t vshlq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s32)))
+int32x4_t vshlq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s32)))
+int32x4_t vshlq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s8)))
+int8x16_t vshlq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s8)))
+int8x16_t vshlq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u16)))
+uint16x8_t vshlq_u16(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u16)))
+uint16x8_t vshlq(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u32)))
+uint32x4_t vshlq_u32(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u32)))
+uint32x4_t vshlq(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u8)))
+uint8x16_t vshlq_u8(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u8)))
+uint8x16_t vshlq(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s16)))
+int16x8_t vshlq_x_n_s16(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s16)))
+int16x8_t vshlq_x_n(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s32)))
+int32x4_t vshlq_x_n_s32(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s32)))
+int32x4_t vshlq_x_n(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s8)))
+int8x16_t vshlq_x_n_s8(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s8)))
+int8x16_t vshlq_x_n(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u16)))
+uint16x8_t vshlq_x_n_u16(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u16)))
+uint16x8_t vshlq_x_n(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u32)))
+uint32x4_t vshlq_x_n_u32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u32)))
+uint32x4_t vshlq_x_n(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u8)))
+uint8x16_t vshlq_x_n_u8(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u8)))
+uint8x16_t vshlq_x_n(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s16)))
+int16x8_t vshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s16)))
+int16x8_t vshlq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s32)))
+int32x4_t vshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s32)))
+int32x4_t vshlq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s8)))
+int8x16_t vshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s8)))
+int8x16_t vshlq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u16)))
+uint16x8_t vshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u16)))
+uint16x8_t vshlq_x(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u32)))
+uint32x4_t vshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u32)))
+uint32x4_t vshlq_x(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u8)))
+uint8x16_t vshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u8)))
+uint8x16_t vshlq_x(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s16)))
+int8x16_t vshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s16)))
+int8x16_t vshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s32)))
+int16x8_t vshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s32)))
+int16x8_t vshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u16)))
+uint8x16_t vshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u16)))
+uint8x16_t vshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u32)))
+uint16x8_t vshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u32)))
+uint16x8_t vshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s16)))
+int8x16_t vshrnbq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s16)))
+int8x16_t vshrnbq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s32)))
+int16x8_t vshrnbq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s32)))
+int16x8_t vshrnbq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u16)))
+uint8x16_t vshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u16)))
+uint8x16_t vshrnbq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u32)))
+uint16x8_t vshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u32)))
+uint16x8_t vshrnbq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s16)))
+int8x16_t vshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s16)))
+int8x16_t vshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s32)))
+int16x8_t vshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s32)))
+int16x8_t vshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u16)))
+uint8x16_t vshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u16)))
+uint8x16_t vshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u32)))
+uint16x8_t vshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u32)))
+uint16x8_t vshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s16)))
+int8x16_t vshrntq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s16)))
+int8x16_t vshrntq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s32)))
+int16x8_t vshrntq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s32)))
+int16x8_t vshrntq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u16)))
+uint8x16_t vshrntq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u16)))
+uint8x16_t vshrntq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u32)))
+uint16x8_t vshrntq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u32)))
+uint16x8_t vshrntq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s16)))
+int16x8_t vshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s16)))
+int16x8_t vshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s32)))
+int32x4_t vshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s32)))
+int32x4_t vshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s8)))
+int8x16_t vshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s8)))
+int8x16_t vshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u16)))
+uint16x8_t vshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u16)))
+uint16x8_t vshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u32)))
+uint32x4_t vshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u32)))
+uint32x4_t vshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u8)))
+uint8x16_t vshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u8)))
+uint8x16_t vshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s16)))
+int16x8_t vshrq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s16)))
+int16x8_t vshrq(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s32)))
+int32x4_t vshrq_n_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s32)))
+int32x4_t vshrq(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s8)))
+int8x16_t vshrq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s8)))
+int8x16_t vshrq(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u16)))
+uint16x8_t vshrq_n_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u16)))
+uint16x8_t vshrq(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u32)))
+uint32x4_t vshrq_n_u32(uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u32)))
+uint32x4_t vshrq(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u8)))
+uint8x16_t vshrq_n_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u8)))
+uint8x16_t vshrq(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s16)))
+int16x8_t vshrq_x_n_s16(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s16)))
+int16x8_t vshrq_x(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s32)))
+int32x4_t vshrq_x_n_s32(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s32)))
+int32x4_t vshrq_x(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s8)))
+int8x16_t vshrq_x_n_s8(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s8)))
+int8x16_t vshrq_x(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u16)))
+uint16x8_t vshrq_x_n_u16(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u16)))
+uint16x8_t vshrq_x(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u32)))
+uint32x4_t vshrq_x_n_u32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u32)))
+uint32x4_t vshrq_x(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u8)))
+uint8x16_t vshrq_x_n_u8(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u8)))
+uint8x16_t vshrq_x(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s16)))
+int16x8_t vsliq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s16)))
+int16x8_t vsliq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s32)))
+int32x4_t vsliq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s32)))
+int32x4_t vsliq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s8)))
+int8x16_t vsliq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s8)))
+int8x16_t vsliq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u16)))
+uint16x8_t vsliq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u16)))
+uint16x8_t vsliq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u32)))
+uint32x4_t vsliq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u32)))
+uint32x4_t vsliq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u8)))
+uint8x16_t vsliq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u8)))
+uint8x16_t vsliq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s16)))
+int16x8_t vsliq_n_s16(int16x8_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s16)))
+int16x8_t vsliq(int16x8_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s32)))
+int32x4_t vsliq_n_s32(int32x4_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s32)))
+int32x4_t vsliq(int32x4_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s8)))
+int8x16_t vsliq_n_s8(int8x16_t, int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s8)))
+int8x16_t vsliq(int8x16_t, int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u16)))
+uint16x8_t vsliq_n_u16(uint16x8_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u16)))
+uint16x8_t vsliq(uint16x8_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u32)))
+uint32x4_t vsliq_n_u32(uint32x4_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u32)))
+uint32x4_t vsliq(uint32x4_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u8)))
+uint8x16_t vsliq_n_u8(uint8x16_t, uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u8)))
+uint8x16_t vsliq(uint8x16_t, uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s16)))
+int16x8_t vsriq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s16)))
+int16x8_t vsriq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s32)))
+int32x4_t vsriq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s32)))
+int32x4_t vsriq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s8)))
+int8x16_t vsriq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s8)))
+int8x16_t vsriq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u16)))
+uint16x8_t vsriq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u16)))
+uint16x8_t vsriq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u32)))
+uint32x4_t vsriq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u32)))
+uint32x4_t vsriq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u8)))
+uint8x16_t vsriq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u8)))
+uint8x16_t vsriq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s16)))
+int16x8_t vsriq_n_s16(int16x8_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s16)))
+int16x8_t vsriq(int16x8_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s32)))
+int32x4_t vsriq_n_s32(int32x4_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s32)))
+int32x4_t vsriq(int32x4_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s8)))
+int8x16_t vsriq_n_s8(int8x16_t, int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s8)))
+int8x16_t vsriq(int8x16_t, int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u16)))
+uint16x8_t vsriq_n_u16(uint16x8_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u16)))
+uint16x8_t vsriq(uint16x8_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u32)))
+uint32x4_t vsriq_n_u32(uint32x4_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u32)))
+uint32x4_t vsriq(uint32x4_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u8)))
+uint8x16_t vsriq_n_u8(uint8x16_t, uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u8)))
+uint8x16_t vsriq(uint8x16_t, uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s16)))
+void vst1q_p_s16(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s16)))
+void vst1q_p(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s32)))
+void vst1q_p_s32(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s32)))
+void vst1q_p(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s8)))
+void vst1q_p_s8(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s8)))
+void vst1q_p(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u16)))
+void vst1q_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u16)))
+void vst1q_p(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u32)))
+void vst1q_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u32)))
+void vst1q_p(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u8)))
+void vst1q_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u8)))
+void vst1q_p(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s16)))
+void vst1q_s16(int16_t *, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s16)))
+void vst1q(int16_t *, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s32)))
+void vst1q_s32(int32_t *, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s32)))
+void vst1q(int32_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s8)))
+void vst1q_s8(int8_t *, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s8)))
+void vst1q(int8_t *, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u16)))
+void vst1q_u16(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u16)))
+void vst1q(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u32)))
+void vst1q_u32(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u32)))
+void vst1q(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u8)))
+void vst1q_u8(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u8)))
+void vst1q(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s16)))
+void vst2q_s16(int16_t *, int16x8x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s16)))
+void vst2q(int16_t *, int16x8x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s32)))
+void vst2q_s32(int32_t *, int32x4x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s32)))
+void vst2q(int32_t *, int32x4x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s8)))
+void vst2q_s8(int8_t *, int8x16x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s8)))
+void vst2q(int8_t *, int8x16x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u16)))
+void vst2q_u16(uint16_t *, uint16x8x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u16)))
+void vst2q(uint16_t *, uint16x8x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u32)))
+void vst2q_u32(uint32_t *, uint32x4x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u32)))
+void vst2q(uint32_t *, uint32x4x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u8)))
+void vst2q_u8(uint8_t *, uint8x16x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u8)))
+void vst2q(uint8_t *, uint8x16x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s16)))
+void vst4q_s16(int16_t *, int16x8x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s16)))
+void vst4q(int16_t *, int16x8x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s32)))
+void vst4q_s32(int32_t *, int32x4x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s32)))
+void vst4q(int32_t *, int32x4x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s8)))
+void vst4q_s8(int8_t *, int8x16x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s8)))
+void vst4q(int8_t *, int8x16x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u16)))
+void vst4q_u16(uint16_t *, uint16x8x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u16)))
+void vst4q(uint16_t *, uint16x8x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u32)))
+void vst4q_u32(uint32_t *, uint32x4x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u32)))
+void vst4q(uint32_t *, uint32x4x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u8)))
+void vst4q_u8(uint8_t *, uint8x16x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u8)))
+void vst4q(uint8_t *, uint8x16x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s16)))
+void vstrbq_p_s16(int8_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s16)))
+void vstrbq_p(int8_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s32)))
+void vstrbq_p_s32(int8_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s32)))
+void vstrbq_p(int8_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s8)))
+void vstrbq_p_s8(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s8)))
+void vstrbq_p(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u16)))
+void vstrbq_p_u16(uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u16)))
+void vstrbq_p(uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u32)))
+void vstrbq_p_u32(uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u32)))
+void vstrbq_p(uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u8)))
+void vstrbq_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u8)))
+void vstrbq_p(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s16)))
+void vstrbq_s16(int8_t *, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s16)))
+void vstrbq(int8_t *, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s32)))
+void vstrbq_s32(int8_t *, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s32)))
+void vstrbq(int8_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s8)))
+void vstrbq_s8(int8_t *, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s8)))
+void vstrbq(int8_t *, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))
+void vstrbq_scatter_offset_p_s16(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))
+void vstrbq_scatter_offset_p(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))
+void vstrbq_scatter_offset_p_s32(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))
+void vstrbq_scatter_offset_p(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))
+void vstrbq_scatter_offset_p_s8(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))
+void vstrbq_scatter_offset_p(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))
+void vstrbq_scatter_offset_p_u16(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))
+void vstrbq_scatter_offset_p(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))
+void vstrbq_scatter_offset_p_u32(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))
+void vstrbq_scatter_offset_p(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))
+void vstrbq_scatter_offset_p_u8(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))
+void vstrbq_scatter_offset_p(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))
+void vstrbq_scatter_offset_s16(int8_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))
+void vstrbq_scatter_offset(int8_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))
+void vstrbq_scatter_offset_s32(int8_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))
+void vstrbq_scatter_offset(int8_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))
+void vstrbq_scatter_offset_s8(int8_t *, uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))
+void vstrbq_scatter_offset(int8_t *, uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))
+void vstrbq_scatter_offset_u16(uint8_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))
+void vstrbq_scatter_offset(uint8_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))
+void vstrbq_scatter_offset_u32(uint8_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))
+void vstrbq_scatter_offset(uint8_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))
+void vstrbq_scatter_offset_u8(uint8_t *, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))
+void vstrbq_scatter_offset(uint8_t *, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u16)))
+void vstrbq_u16(uint8_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u16)))
+void vstrbq(uint8_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u32)))
+void vstrbq_u32(uint8_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u32)))
+void vstrbq(uint8_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u8)))
+void vstrbq_u8(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u8)))
+void vstrbq(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))
+void vstrdq_scatter_base_p_s64(uint64x2_t, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))
+void vstrdq_scatter_base_p(uint64x2_t, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))
+void vstrdq_scatter_base_p_u64(uint64x2_t, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))
+void vstrdq_scatter_base_p(uint64x2_t, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))
+void vstrdq_scatter_base_s64(uint64x2_t, int, int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))
+void vstrdq_scatter_base(uint64x2_t, int, int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))
+void vstrdq_scatter_base_u64(uint64x2_t, int, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))
+void vstrdq_scatter_base(uint64x2_t, int, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))
+void vstrdq_scatter_base_wb_p_s64(uint64x2_t *, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))
+void vstrdq_scatter_base_wb_p(uint64x2_t *, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))
+void vstrdq_scatter_base_wb_p_u64(uint64x2_t *, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))
+void vstrdq_scatter_base_wb_p(uint64x2_t *, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))
+void vstrdq_scatter_base_wb_s64(uint64x2_t *, int, int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))
+void vstrdq_scatter_base_wb(uint64x2_t *, int, int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))
+void vstrdq_scatter_base_wb_u64(uint64x2_t *, int, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))
+void vstrdq_scatter_base_wb(uint64x2_t *, int, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))
+void vstrdq_scatter_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))
+void vstrdq_scatter_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))
+void vstrdq_scatter_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))
+void vstrdq_scatter_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))
+void vstrdq_scatter_offset_s64(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))
+void vstrdq_scatter_offset(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))
+void vstrdq_scatter_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))
+void vstrdq_scatter_offset(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))
+void vstrdq_scatter_shifted_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))
+void vstrdq_scatter_shifted_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))
+void vstrdq_scatter_shifted_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))
+void vstrdq_scatter_shifted_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))
+void vstrdq_scatter_shifted_offset_s64(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))
+void vstrdq_scatter_shifted_offset(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))
+void vstrdq_scatter_shifted_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))
+void vstrdq_scatter_shifted_offset(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s16)))
+void vstrhq_p_s16(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s16)))
+void vstrhq_p(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s32)))
+void vstrhq_p_s32(int16_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s32)))
+void vstrhq_p(int16_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u16)))
+void vstrhq_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u16)))
+void vstrhq_p(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u32)))
+void vstrhq_p_u32(uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u32)))
+void vstrhq_p(uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s16)))
+void vstrhq_s16(int16_t *, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s16)))
+void vstrhq(int16_t *, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s32)))
+void vstrhq_s32(int16_t *, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s32)))
+void vstrhq(int16_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))
+void vstrhq_scatter_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))
+void vstrhq_scatter_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))
+void vstrhq_scatter_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))
+void vstrhq_scatter_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))
+void vstrhq_scatter_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))
+void vstrhq_scatter_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))
+void vstrhq_scatter_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))
+void vstrhq_scatter_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))
+void vstrhq_scatter_offset_s16(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))
+void vstrhq_scatter_offset(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))
+void vstrhq_scatter_offset_s32(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))
+void vstrhq_scatter_offset(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))
+void vstrhq_scatter_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))
+void vstrhq_scatter_offset(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))
+void vstrhq_scatter_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))
+void vstrhq_scatter_offset(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))
+void vstrhq_scatter_shifted_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))
+void vstrhq_scatter_shifted_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))
+void vstrhq_scatter_shifted_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))
+void vstrhq_scatter_shifted_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))
+void vstrhq_scatter_shifted_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))
+void vstrhq_scatter_shifted_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))
+void vstrhq_scatter_shifted_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))
+void vstrhq_scatter_shifted_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))
+void vstrhq_scatter_shifted_offset_s16(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))
+void vstrhq_scatter_shifted_offset(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))
+void vstrhq_scatter_shifted_offset_s32(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))
+void vstrhq_scatter_shifted_offset(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))
+void vstrhq_scatter_shifted_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))
+void vstrhq_scatter_shifted_offset(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))
+void vstrhq_scatter_shifted_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))
+void vstrhq_scatter_shifted_offset(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u16)))
+void vstrhq_u16(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u16)))
+void vstrhq(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u32)))
+void vstrhq_u32(uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u32)))
+void vstrhq(uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_s32)))
+void vstrwq_p_s32(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_s32)))
+void vstrwq_p(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_u32)))
+void vstrwq_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_u32)))
+void vstrwq_p(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_s32)))
+void vstrwq_s32(int32_t *, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_s32)))
+void vstrwq(int32_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))
+void vstrwq_scatter_base_p_s32(uint32x4_t, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))
+void vstrwq_scatter_base_p(uint32x4_t, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))
+void vstrwq_scatter_base_p_u32(uint32x4_t, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))
+void vstrwq_scatter_base_p(uint32x4_t, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))
+void vstrwq_scatter_base_s32(uint32x4_t, int, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))
+void vstrwq_scatter_base(uint32x4_t, int, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))
+void vstrwq_scatter_base_u32(uint32x4_t, int, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))
+void vstrwq_scatter_base(uint32x4_t, int, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))
+void vstrwq_scatter_base_wb_p_s32(uint32x4_t *, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))
+void vstrwq_scatter_base_wb_p(uint32x4_t *, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))
+void vstrwq_scatter_base_wb_p_u32(uint32x4_t *, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))
+void vstrwq_scatter_base_wb_p(uint32x4_t *, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))
+void vstrwq_scatter_base_wb_s32(uint32x4_t *, int, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))
+void vstrwq_scatter_base_wb(uint32x4_t *, int, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))
+void vstrwq_scatter_base_wb_u32(uint32x4_t *, int, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))
+void vstrwq_scatter_base_wb(uint32x4_t *, int, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))
+void vstrwq_scatter_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))
+void vstrwq_scatter_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))
+void vstrwq_scatter_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))
+void vstrwq_scatter_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))
+void vstrwq_scatter_offset_s32(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))
+void vstrwq_scatter_offset(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))
+void vstrwq_scatter_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))
+void vstrwq_scatter_offset(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))
+void vstrwq_scatter_shifted_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))
+void vstrwq_scatter_shifted_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))
+void vstrwq_scatter_shifted_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))
+void vstrwq_scatter_shifted_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))
+void vstrwq_scatter_shifted_offset_s32(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))
+void vstrwq_scatter_shifted_offset(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))
+void vstrwq_scatter_shifted_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))
+void vstrwq_scatter_shifted_offset(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_u32)))
+void vstrwq_u32(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_u32)))
+void vstrwq(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s16)))
+int16x8_t vsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s16)))
+int16x8_t vsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s32)))
+int32x4_t vsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s32)))
+int32x4_t vsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s8)))
+int8x16_t vsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s8)))
+int8x16_t vsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u16)))
+uint16x8_t vsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u16)))
+uint16x8_t vsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u32)))
+uint32x4_t vsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u32)))
+uint32x4_t vsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u8)))
+uint8x16_t vsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u8)))
+uint8x16_t vsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s16)))
+int16x8_t vsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s16)))
+int16x8_t vsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s32)))
+int32x4_t vsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s32)))
+int32x4_t vsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s8)))
+int8x16_t vsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s8)))
+int8x16_t vsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u16)))
+uint16x8_t vsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u16)))
+uint16x8_t vsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u32)))
+uint32x4_t vsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u32)))
+uint32x4_t vsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u8)))
+uint8x16_t vsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u8)))
+uint8x16_t vsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s16)))
+int16x8_t vsubq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s16)))
+int16x8_t vsubq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s32)))
+int32x4_t vsubq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s32)))
+int32x4_t vsubq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s8)))
+int8x16_t vsubq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s8)))
+int8x16_t vsubq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u16)))
+uint16x8_t vsubq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u16)))
+uint16x8_t vsubq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u32)))
+uint32x4_t vsubq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u32)))
+uint32x4_t vsubq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u8)))
+uint8x16_t vsubq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u8)))
+uint8x16_t vsubq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s16)))
+int16x8_t vsubq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s16)))
+int16x8_t vsubq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s32)))
+int32x4_t vsubq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s32)))
+int32x4_t vsubq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s8)))
+int8x16_t vsubq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s8)))
+int8x16_t vsubq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u16)))
+uint16x8_t vsubq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u16)))
+uint16x8_t vsubq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u32)))
+uint32x4_t vsubq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u32)))
+uint32x4_t vsubq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u8)))
+uint8x16_t vsubq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u8)))
+uint8x16_t vsubq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s16)))
+int16x8_t vsubq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s16)))
+int16x8_t vsubq_x(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s32)))
+int32x4_t vsubq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s32)))
+int32x4_t vsubq_x(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s8)))
+int8x16_t vsubq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s8)))
+int8x16_t vsubq_x(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u16)))
+uint16x8_t vsubq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u16)))
+uint16x8_t vsubq_x(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u32)))
+uint32x4_t vsubq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u32)))
+uint32x4_t vsubq_x(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u8)))
+uint8x16_t vsubq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u8)))
+uint8x16_t vsubq_x(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s16)))
+int16x8_t vsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s16)))
+int16x8_t vsubq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s32)))
+int32x4_t vsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s32)))
+int32x4_t vsubq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s8)))
+int8x16_t vsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s8)))
+int8x16_t vsubq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u16)))
+uint16x8_t vsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u16)))
+uint16x8_t vsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u32)))
+uint32x4_t vsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u32)))
+uint32x4_t vsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u8)))
+uint8x16_t vsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u8)))
+uint8x16_t vsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s16)))
+int16x8_t vuninitializedq(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s32)))
+int32x4_t vuninitializedq(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s64)))
+int64x2_t vuninitializedq(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s8)))
+int8x16_t vuninitializedq(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u16)))
+uint16x8_t vuninitializedq(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u32)))
+uint32x4_t vuninitializedq(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u64)))
+uint64x2_t vuninitializedq(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u8)))
+uint8x16_t vuninitializedq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s16)))
+int16x8_t vuninitializedq_s16();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s32)))
+int32x4_t vuninitializedq_s32();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s64)))
+int64x2_t vuninitializedq_s64();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s8)))
+int8x16_t vuninitializedq_s8();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u16)))
+uint16x8_t vuninitializedq_u16();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u32)))
+uint32x4_t vuninitializedq_u32();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u64)))
+uint64x2_t vuninitializedq_u64();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u8)))
+uint8x16_t vuninitializedq_u8();
+
+#endif /* (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE) */
+
+#if (__ARM_FEATURE_MVE & 2) && (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE)
+
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f16)))
+float16x8_t vabdq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f16)))
+float16x8_t vabdq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f32)))
+float32x4_t vabdq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f32)))
+float32x4_t vabdq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f16)))
+float16x8_t vabdq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f16)))
+float16x8_t vabdq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f32)))
+float32x4_t vabdq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f32)))
+float32x4_t vabdq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f16)))
+float16x8_t vabdq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f16)))
+float16x8_t vabdq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f32)))
+float32x4_t vabdq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f32)))
+float32x4_t vabdq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f16)))
+float16x8_t vabsq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f16)))
+float16x8_t vabsq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f32)))
+float32x4_t vabsq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f32)))
+float32x4_t vabsq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f16)))
+float16x8_t vabsq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f16)))
+float16x8_t vabsq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f32)))
+float32x4_t vabsq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f32)))
+float32x4_t vabsq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f16)))
+float16x8_t vabsq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f16)))
+float16x8_t vabsq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f32)))
+float32x4_t vabsq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f32)))
+float32x4_t vabsq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f16)))
+float16x8_t vaddq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f16)))
+float16x8_t vaddq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f32)))
+float32x4_t vaddq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f32)))
+float32x4_t vaddq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f16)))
+float16x8_t vaddq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f16)))
+float16x8_t vaddq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f32)))
+float32x4_t vaddq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f32)))
+float32x4_t vaddq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f16)))
+float16x8_t vaddq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f16)))
+float16x8_t vaddq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f32)))
+float32x4_t vaddq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f32)))
+float32x4_t vaddq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f16)))
+float16x8_t vaddq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f16)))
+float16x8_t vaddq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f32)))
+float32x4_t vaddq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f32)))
+float32x4_t vaddq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f16)))
+float16x8_t vaddq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f16)))
+float16x8_t vaddq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f32)))
+float32x4_t vaddq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f32)))
+float32x4_t vaddq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f16)))
+float16x8_t vaddq_x_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f16)))
+float16x8_t vaddq_x(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f32)))
+float32x4_t vaddq_x_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f32)))
+float32x4_t vaddq_x(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_f16)))
+float16x8_t vandq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_f16)))
+float16x8_t vandq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_f32)))
+float32x4_t vandq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_f32)))
+float32x4_t vandq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f16)))
+float16x8_t vandq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f16)))
+float16x8_t vandq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f32)))
+float32x4_t vandq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f32)))
+float32x4_t vandq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f16)))
+float16x8_t vandq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f16)))
+float16x8_t vandq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f32)))
+float32x4_t vandq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f32)))
+float32x4_t vandq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f16)))
+float16x8_t vbicq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f16)))
+float16x8_t vbicq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f32)))
+float32x4_t vbicq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f32)))
+float32x4_t vbicq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f16)))
+float16x8_t vbicq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f16)))
+float16x8_t vbicq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f32)))
+float32x4_t vbicq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f32)))
+float32x4_t vbicq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f16)))
+float16x8_t vbicq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f16)))
+float16x8_t vbicq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f32)))
+float32x4_t vbicq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f32)))
+float32x4_t vbicq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f16)))
+float16x8_t vbrsrq_m_n_f16(float16x8_t, float16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f16)))
+float16x8_t vbrsrq_m(float16x8_t, float16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f32)))
+float32x4_t vbrsrq_m_n_f32(float32x4_t, float32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f32)))
+float32x4_t vbrsrq_m(float32x4_t, float32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f16)))
+float16x8_t vbrsrq_n_f16(float16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f16)))
+float16x8_t vbrsrq(float16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f32)))
+float32x4_t vbrsrq_n_f32(float32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f32)))
+float32x4_t vbrsrq(float32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f16)))
+float16x8_t vbrsrq_x_n_f16(float16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f16)))
+float16x8_t vbrsrq_x(float16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f32)))
+float32x4_t vbrsrq_x_n_f32(float32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f32)))
+float32x4_t vbrsrq_x(float32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f16)))
+float16x8_t vcaddq_rot270_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f16)))
+float16x8_t vcaddq_rot270(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f32)))
+float32x4_t vcaddq_rot270_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f32)))
+float32x4_t vcaddq_rot270(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f16)))
+float16x8_t vcaddq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f16)))
+float16x8_t vcaddq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f32)))
+float32x4_t vcaddq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f32)))
+float32x4_t vcaddq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f16)))
+float16x8_t vcaddq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f16)))
+float16x8_t vcaddq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f32)))
+float32x4_t vcaddq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f32)))
+float32x4_t vcaddq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f16)))
+float16x8_t vcaddq_rot90_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f16)))
+float16x8_t vcaddq_rot90(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f32)))
+float32x4_t vcaddq_rot90_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f32)))
+float32x4_t vcaddq_rot90(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f16)))
+float16x8_t vcaddq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f16)))
+float16x8_t vcaddq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f32)))
+float32x4_t vcaddq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f32)))
+float32x4_t vcaddq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f16)))
+float16x8_t vcaddq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f16)))
+float16x8_t vcaddq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f32)))
+float32x4_t vcaddq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f32)))
+float32x4_t vcaddq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f16)))
+float16x8_t vcmlaq_f16(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f16)))
+float16x8_t vcmlaq(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f32)))
+float32x4_t vcmlaq_f32(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f32)))
+float32x4_t vcmlaq(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f16)))
+float16x8_t vcmlaq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f16)))
+float16x8_t vcmlaq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f32)))
+float32x4_t vcmlaq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f32)))
+float32x4_t vcmlaq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f16)))
+float16x8_t vcmlaq_rot180_f16(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f16)))
+float16x8_t vcmlaq_rot180(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f32)))
+float32x4_t vcmlaq_rot180_f32(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f32)))
+float32x4_t vcmlaq_rot180(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16)))
+float16x8_t vcmlaq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16)))
+float16x8_t vcmlaq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32)))
+float32x4_t vcmlaq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32)))
+float32x4_t vcmlaq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f16)))
+float16x8_t vcmlaq_rot270_f16(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f16)))
+float16x8_t vcmlaq_rot270(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f32)))
+float32x4_t vcmlaq_rot270_f32(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f32)))
+float32x4_t vcmlaq_rot270(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16)))
+float16x8_t vcmlaq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16)))
+float16x8_t vcmlaq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32)))
+float32x4_t vcmlaq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32)))
+float32x4_t vcmlaq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f16)))
+float16x8_t vcmlaq_rot90_f16(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f16)))
+float16x8_t vcmlaq_rot90(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f32)))
+float32x4_t vcmlaq_rot90_f32(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f32)))
+float32x4_t vcmlaq_rot90(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16)))
+float16x8_t vcmlaq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16)))
+float16x8_t vcmlaq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32)))
+float32x4_t vcmlaq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32)))
+float32x4_t vcmlaq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f16)))
+mve_pred16_t vcmpeqq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f16)))
+mve_pred16_t vcmpeqq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f32)))
+mve_pred16_t vcmpeqq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f32)))
+mve_pred16_t vcmpeqq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f16)))
+mve_pred16_t vcmpeqq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f16)))
+mve_pred16_t vcmpeqq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f32)))
+mve_pred16_t vcmpeqq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f32)))
+mve_pred16_t vcmpeqq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))
+mve_pred16_t vcmpeqq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))
+mve_pred16_t vcmpeqq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))
+mve_pred16_t vcmpeqq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))
+mve_pred16_t vcmpeqq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f16)))
+mve_pred16_t vcmpeqq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f16)))
+mve_pred16_t vcmpeqq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f32)))
+mve_pred16_t vcmpeqq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f32)))
+mve_pred16_t vcmpeqq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f16)))
+mve_pred16_t vcmpgeq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f16)))
+mve_pred16_t vcmpgeq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f32)))
+mve_pred16_t vcmpgeq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f32)))
+mve_pred16_t vcmpgeq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f16)))
+mve_pred16_t vcmpgeq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f16)))
+mve_pred16_t vcmpgeq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f32)))
+mve_pred16_t vcmpgeq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f32)))
+mve_pred16_t vcmpgeq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))
+mve_pred16_t vcmpgeq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))
+mve_pred16_t vcmpgeq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))
+mve_pred16_t vcmpgeq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))
+mve_pred16_t vcmpgeq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f16)))
+mve_pred16_t vcmpgeq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f16)))
+mve_pred16_t vcmpgeq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f32)))
+mve_pred16_t vcmpgeq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f32)))
+mve_pred16_t vcmpgeq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f16)))
+mve_pred16_t vcmpgtq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f16)))
+mve_pred16_t vcmpgtq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f32)))
+mve_pred16_t vcmpgtq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f32)))
+mve_pred16_t vcmpgtq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f16)))
+mve_pred16_t vcmpgtq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f16)))
+mve_pred16_t vcmpgtq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f32)))
+mve_pred16_t vcmpgtq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f32)))
+mve_pred16_t vcmpgtq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))
+mve_pred16_t vcmpgtq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))
+mve_pred16_t vcmpgtq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))
+mve_pred16_t vcmpgtq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))
+mve_pred16_t vcmpgtq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f16)))
+mve_pred16_t vcmpgtq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f16)))
+mve_pred16_t vcmpgtq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f32)))
+mve_pred16_t vcmpgtq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f32)))
+mve_pred16_t vcmpgtq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f16)))
+mve_pred16_t vcmpleq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f16)))
+mve_pred16_t vcmpleq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f32)))
+mve_pred16_t vcmpleq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f32)))
+mve_pred16_t vcmpleq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f16)))
+mve_pred16_t vcmpleq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f16)))
+mve_pred16_t vcmpleq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f32)))
+mve_pred16_t vcmpleq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f32)))
+mve_pred16_t vcmpleq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))
+mve_pred16_t vcmpleq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))
+mve_pred16_t vcmpleq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))
+mve_pred16_t vcmpleq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))
+mve_pred16_t vcmpleq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f16)))
+mve_pred16_t vcmpleq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f16)))
+mve_pred16_t vcmpleq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f32)))
+mve_pred16_t vcmpleq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f32)))
+mve_pred16_t vcmpleq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f16)))
+mve_pred16_t vcmpltq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f16)))
+mve_pred16_t vcmpltq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f32)))
+mve_pred16_t vcmpltq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f32)))
+mve_pred16_t vcmpltq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f16)))
+mve_pred16_t vcmpltq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f16)))
+mve_pred16_t vcmpltq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f32)))
+mve_pred16_t vcmpltq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f32)))
+mve_pred16_t vcmpltq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))
+mve_pred16_t vcmpltq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))
+mve_pred16_t vcmpltq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))
+mve_pred16_t vcmpltq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))
+mve_pred16_t vcmpltq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f16)))
+mve_pred16_t vcmpltq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f16)))
+mve_pred16_t vcmpltq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f32)))
+mve_pred16_t vcmpltq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f32)))
+mve_pred16_t vcmpltq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f16)))
+mve_pred16_t vcmpneq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f16)))
+mve_pred16_t vcmpneq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f32)))
+mve_pred16_t vcmpneq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f32)))
+mve_pred16_t vcmpneq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f16)))
+mve_pred16_t vcmpneq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f16)))
+mve_pred16_t vcmpneq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f32)))
+mve_pred16_t vcmpneq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f32)))
+mve_pred16_t vcmpneq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))
+mve_pred16_t vcmpneq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))
+mve_pred16_t vcmpneq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))
+mve_pred16_t vcmpneq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))
+mve_pred16_t vcmpneq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f16)))
+mve_pred16_t vcmpneq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f16)))
+mve_pred16_t vcmpneq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f32)))
+mve_pred16_t vcmpneq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f32)))
+mve_pred16_t vcmpneq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f16)))
+float16x8_t vcmulq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f16)))
+float16x8_t vcmulq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f32)))
+float32x4_t vcmulq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f32)))
+float32x4_t vcmulq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f16)))
+float16x8_t vcmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f16)))
+float16x8_t vcmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f32)))
+float32x4_t vcmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f32)))
+float32x4_t vcmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f16)))
+float16x8_t vcmulq_rot180_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f16)))
+float16x8_t vcmulq_rot180(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f32)))
+float32x4_t vcmulq_rot180_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f32)))
+float32x4_t vcmulq_rot180(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f16)))
+float16x8_t vcmulq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f16)))
+float16x8_t vcmulq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f32)))
+float32x4_t vcmulq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f32)))
+float32x4_t vcmulq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f16)))
+float16x8_t vcmulq_rot180_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f16)))
+float16x8_t vcmulq_rot180_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f32)))
+float32x4_t vcmulq_rot180_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f32)))
+float32x4_t vcmulq_rot180_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f16)))
+float16x8_t vcmulq_rot270_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f16)))
+float16x8_t vcmulq_rot270(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f32)))
+float32x4_t vcmulq_rot270_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f32)))
+float32x4_t vcmulq_rot270(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f16)))
+float16x8_t vcmulq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f16)))
+float16x8_t vcmulq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f32)))
+float32x4_t vcmulq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f32)))
+float32x4_t vcmulq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f16)))
+float16x8_t vcmulq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f16)))
+float16x8_t vcmulq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f32)))
+float32x4_t vcmulq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f32)))
+float32x4_t vcmulq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f16)))
+float16x8_t vcmulq_rot90_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f16)))
+float16x8_t vcmulq_rot90(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f32)))
+float32x4_t vcmulq_rot90_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f32)))
+float32x4_t vcmulq_rot90(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f16)))
+float16x8_t vcmulq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f16)))
+float16x8_t vcmulq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f32)))
+float32x4_t vcmulq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f32)))
+float32x4_t vcmulq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f16)))
+float16x8_t vcmulq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f16)))
+float16x8_t vcmulq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f32)))
+float32x4_t vcmulq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f32)))
+float32x4_t vcmulq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f16)))
+float16x8_t vcmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f16)))
+float16x8_t vcmulq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f32)))
+float32x4_t vcmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f32)))
+float32x4_t vcmulq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_f16)))
+float16x8_t vcreateq_f16(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_f32)))
+float32x4_t vcreateq_f32(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s16_f16)))
+int16x8_t vcvtaq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s16_f16)))
+int16x8_t vcvtaq_m(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s32_f32)))
+int32x4_t vcvtaq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s32_f32)))
+int32x4_t vcvtaq_m(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u16_f16)))
+uint16x8_t vcvtaq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u16_f16)))
+uint16x8_t vcvtaq_m(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u32_f32)))
+uint32x4_t vcvtaq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u32_f32)))
+uint32x4_t vcvtaq_m(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_s16_f16)))
+int16x8_t vcvtaq_s16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_s32_f32)))
+int32x4_t vcvtaq_s32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_u16_f16)))
+uint16x8_t vcvtaq_u16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_u32_f32)))
+uint32x4_t vcvtaq_u32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_s16_f16)))
+int16x8_t vcvtaq_x_s16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_s32_f32)))
+int32x4_t vcvtaq_x_s32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_u16_f16)))
+uint16x8_t vcvtaq_x_u16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_u32_f32)))
+uint32x4_t vcvtaq_x_u32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_f16_f32)))
+float16x8_t vcvtbq_f16_f32(float16x8_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_f32_f16)))
+float32x4_t vcvtbq_f32_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_m_f16_f32)))
+float16x8_t vcvtbq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_m_f32_f16)))
+float32x4_t vcvtbq_m_f32_f16(float32x4_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_x_f32_f16)))
+float32x4_t vcvtbq_x_f32_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s16_f16)))
+int16x8_t vcvtmq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s16_f16)))
+int16x8_t vcvtmq_m(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s32_f32)))
+int32x4_t vcvtmq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s32_f32)))
+int32x4_t vcvtmq_m(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u16_f16)))
+uint16x8_t vcvtmq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u16_f16)))
+uint16x8_t vcvtmq_m(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u32_f32)))
+uint32x4_t vcvtmq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u32_f32)))
+uint32x4_t vcvtmq_m(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_s16_f16)))
+int16x8_t vcvtmq_s16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_s32_f32)))
+int32x4_t vcvtmq_s32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_u16_f16)))
+uint16x8_t vcvtmq_u16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_u32_f32)))
+uint32x4_t vcvtmq_u32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_s16_f16)))
+int16x8_t vcvtmq_x_s16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_s32_f32)))
+int32x4_t vcvtmq_x_s32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_u16_f16)))
+uint16x8_t vcvtmq_x_u16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_u32_f32)))
+uint32x4_t vcvtmq_x_u32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s16_f16)))
+int16x8_t vcvtnq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s16_f16)))
+int16x8_t vcvtnq_m(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s32_f32)))
+int32x4_t vcvtnq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s32_f32)))
+int32x4_t vcvtnq_m(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u16_f16)))
+uint16x8_t vcvtnq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u16_f16)))
+uint16x8_t vcvtnq_m(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u32_f32)))
+uint32x4_t vcvtnq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u32_f32)))
+uint32x4_t vcvtnq_m(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_s16_f16)))
+int16x8_t vcvtnq_s16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_s32_f32)))
+int32x4_t vcvtnq_s32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_u16_f16)))
+uint16x8_t vcvtnq_u16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_u32_f32)))
+uint32x4_t vcvtnq_u32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_s16_f16)))
+int16x8_t vcvtnq_x_s16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_s32_f32)))
+int32x4_t vcvtnq_x_s32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_u16_f16)))
+uint16x8_t vcvtnq_x_u16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_u32_f32)))
+uint32x4_t vcvtnq_x_u32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s16_f16)))
+int16x8_t vcvtpq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s16_f16)))
+int16x8_t vcvtpq_m(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s32_f32)))
+int32x4_t vcvtpq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s32_f32)))
+int32x4_t vcvtpq_m(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u16_f16)))
+uint16x8_t vcvtpq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u16_f16)))
+uint16x8_t vcvtpq_m(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u32_f32)))
+uint32x4_t vcvtpq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u32_f32)))
+uint32x4_t vcvtpq_m(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_s16_f16)))
+int16x8_t vcvtpq_s16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_s32_f32)))
+int32x4_t vcvtpq_s32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_u16_f16)))
+uint16x8_t vcvtpq_u16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_u32_f32)))
+uint32x4_t vcvtpq_u32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_s16_f16)))
+int16x8_t vcvtpq_x_s16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_s32_f32)))
+int32x4_t vcvtpq_x_s32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_u16_f16)))
+uint16x8_t vcvtpq_x_u16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_u32_f32)))
+uint32x4_t vcvtpq_x_u32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_s16)))
+float16x8_t vcvtq_f16_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_s16)))
+float16x8_t vcvtq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_u16)))
+float16x8_t vcvtq_f16_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_u16)))
+float16x8_t vcvtq(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_s32)))
+float32x4_t vcvtq_f32_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_s32)))
+float32x4_t vcvtq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_u32)))
+float32x4_t vcvtq_f32_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_u32)))
+float32x4_t vcvtq(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_s16)))
+float16x8_t vcvtq_m_f16_s16(float16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_s16)))
+float16x8_t vcvtq_m(float16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_u16)))
+float16x8_t vcvtq_m_f16_u16(float16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_u16)))
+float16x8_t vcvtq_m(float16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_s32)))
+float32x4_t vcvtq_m_f32_s32(float32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_s32)))
+float32x4_t vcvtq_m(float32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_u32)))
+float32x4_t vcvtq_m_f32_u32(float32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_u32)))
+float32x4_t vcvtq_m(float32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16)))
+float16x8_t vcvtq_m_n_f16_s16(float16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16)))
+float16x8_t vcvtq_m_n(float16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16)))
+float16x8_t vcvtq_m_n_f16_u16(float16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16)))
+float16x8_t vcvtq_m_n(float16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32)))
+float32x4_t vcvtq_m_n_f32_s32(float32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32)))
+float32x4_t vcvtq_m_n(float32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32)))
+float32x4_t vcvtq_m_n_f32_u32(float32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32)))
+float32x4_t vcvtq_m_n(float32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16)))
+int16x8_t vcvtq_m_n_s16_f16(int16x8_t, float16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16)))
+int16x8_t vcvtq_m_n(int16x8_t, float16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32)))
+int32x4_t vcvtq_m_n_s32_f32(int32x4_t, float32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32)))
+int32x4_t vcvtq_m_n(int32x4_t, float32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16)))
+uint16x8_t vcvtq_m_n_u16_f16(uint16x8_t, float16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16)))
+uint16x8_t vcvtq_m_n(uint16x8_t, float16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32)))
+uint32x4_t vcvtq_m_n_u32_f32(uint32x4_t, float32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32)))
+uint32x4_t vcvtq_m_n(uint32x4_t, float32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s16_f16)))
+int16x8_t vcvtq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s16_f16)))
+int16x8_t vcvtq_m(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s32_f32)))
+int32x4_t vcvtq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s32_f32)))
+int32x4_t vcvtq_m(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u16_f16)))
+uint16x8_t vcvtq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u16_f16)))
+uint16x8_t vcvtq_m(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u32_f32)))
+uint32x4_t vcvtq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u32_f32)))
+uint32x4_t vcvtq_m(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_s16)))
+float16x8_t vcvtq_n_f16_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_s16)))
+float16x8_t vcvtq_n(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_u16)))
+float16x8_t vcvtq_n_f16_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_u16)))
+float16x8_t vcvtq_n(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_s32)))
+float32x4_t vcvtq_n_f32_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_s32)))
+float32x4_t vcvtq_n(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_u32)))
+float32x4_t vcvtq_n_f32_u32(uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_u32)))
+float32x4_t vcvtq_n(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_s16_f16)))
+int16x8_t vcvtq_n_s16_f16(float16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_s32_f32)))
+int32x4_t vcvtq_n_s32_f32(float32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_u16_f16)))
+uint16x8_t vcvtq_n_u16_f16(float16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_u32_f32)))
+uint32x4_t vcvtq_n_u32_f32(float32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_s16_f16)))
+int16x8_t vcvtq_s16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_s32_f32)))
+int32x4_t vcvtq_s32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_u16_f16)))
+uint16x8_t vcvtq_u16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_u32_f32)))
+uint32x4_t vcvtq_u32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_s16)))
+float16x8_t vcvtq_x_f16_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_s16)))
+float16x8_t vcvtq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_u16)))
+float16x8_t vcvtq_x_f16_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_u16)))
+float16x8_t vcvtq_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_s32)))
+float32x4_t vcvtq_x_f32_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_s32)))
+float32x4_t vcvtq_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_u32)))
+float32x4_t vcvtq_x_f32_u32(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_u32)))
+float32x4_t vcvtq_x(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16)))
+float16x8_t vcvtq_x_n_f16_s16(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16)))
+float16x8_t vcvtq_x_n(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16)))
+float16x8_t vcvtq_x_n_f16_u16(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16)))
+float16x8_t vcvtq_x_n(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32)))
+float32x4_t vcvtq_x_n_f32_s32(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32)))
+float32x4_t vcvtq_x_n(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32)))
+float32x4_t vcvtq_x_n_f32_u32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32)))
+float32x4_t vcvtq_x_n(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_s16_f16)))
+int16x8_t vcvtq_x_n_s16_f16(float16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_s32_f32)))
+int32x4_t vcvtq_x_n_s32_f32(float32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_u16_f16)))
+uint16x8_t vcvtq_x_n_u16_f16(float16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_u32_f32)))
+uint32x4_t vcvtq_x_n_u32_f32(float32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_s16_f16)))
+int16x8_t vcvtq_x_s16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_s32_f32)))
+int32x4_t vcvtq_x_s32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_u16_f16)))
+uint16x8_t vcvtq_x_u16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_u32_f32)))
+uint32x4_t vcvtq_x_u32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_f16_f32)))
+float16x8_t vcvttq_f16_f32(float16x8_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_f32_f16)))
+float32x4_t vcvttq_f32_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_m_f16_f32)))
+float16x8_t vcvttq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_m_f32_f16)))
+float32x4_t vcvttq_m_f32_f16(float32x4_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_x_f32_f16)))
+float32x4_t vcvttq_x_f32_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f16)))
+float16x8_t vdupq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f16)))
+float16x8_t vdupq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f32)))
+float32x4_t vdupq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f32)))
+float32x4_t vdupq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_f16)))
+float16x8_t vdupq_n_f16(float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_f32)))
+float32x4_t vdupq_n_f32(float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_f16)))
+float16x8_t vdupq_x_n_f16(float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_f32)))
+float32x4_t vdupq_x_n_f32(float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_f16)))
+float16x8_t veorq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_f16)))
+float16x8_t veorq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_f32)))
+float32x4_t veorq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_f32)))
+float32x4_t veorq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f16)))
+float16x8_t veorq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f16)))
+float16x8_t veorq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f32)))
+float32x4_t veorq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f32)))
+float32x4_t veorq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f16)))
+float16x8_t veorq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f16)))
+float16x8_t veorq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f32)))
+float32x4_t veorq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f32)))
+float32x4_t veorq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f16)))
+float16x8_t vfmaq_f16(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f16)))
+float16x8_t vfmaq(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f32)))
+float32x4_t vfmaq_f32(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f32)))
+float32x4_t vfmaq(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f16)))
+float16x8_t vfmaq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f16)))
+float16x8_t vfmaq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f32)))
+float32x4_t vfmaq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f32)))
+float32x4_t vfmaq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f16)))
+float16x8_t vfmaq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f16)))
+float16x8_t vfmaq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f32)))
+float32x4_t vfmaq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f32)))
+float32x4_t vfmaq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f16)))
+float16x8_t vfmaq_n_f16(float16x8_t, float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f16)))
+float16x8_t vfmaq(float16x8_t, float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f32)))
+float32x4_t vfmaq_n_f32(float32x4_t, float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f32)))
+float32x4_t vfmaq(float32x4_t, float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f16)))
+float16x8_t vfmasq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f16)))
+float16x8_t vfmasq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f32)))
+float32x4_t vfmasq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f32)))
+float32x4_t vfmasq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f16)))
+float16x8_t vfmasq_n_f16(float16x8_t, float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f16)))
+float16x8_t vfmasq(float16x8_t, float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f32)))
+float32x4_t vfmasq_n_f32(float32x4_t, float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f32)))
+float32x4_t vfmasq(float32x4_t, float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f16)))
+float16x8_t vfmsq_f16(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f16)))
+float16x8_t vfmsq(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f32)))
+float32x4_t vfmsq_f32(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f32)))
+float32x4_t vfmsq(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f16)))
+float16x8_t vfmsq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f16)))
+float16x8_t vfmsq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f32)))
+float32x4_t vfmsq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f32)))
+float32x4_t vfmsq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f16)))
+float16_t vgetq_lane_f16(float16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f16)))
+float16_t vgetq_lane(float16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f32)))
+float32_t vgetq_lane_f32(float32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f32)))
+float32_t vgetq_lane(float32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f16)))
+float16x8_t vld1q_f16(const float16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f16)))
+float16x8_t vld1q(const float16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f32)))
+float32x4_t vld1q_f32(const float32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f32)))
+float32x4_t vld1q(const float32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f16)))
+float16x8_t vld1q_z_f16(const float16_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f16)))
+float16x8_t vld1q_z(const float16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f32)))
+float32x4_t vld1q_z_f32(const float32_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f32)))
+float32x4_t vld1q_z(const float32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f16)))
+float16x8x2_t vld2q_f16(const float16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f16)))
+float16x8x2_t vld2q(const float16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f32)))
+float32x4x2_t vld2q_f32(const float32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f32)))
+float32x4x2_t vld2q(const float32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f16)))
+float16x8x4_t vld4q_f16(const float16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f16)))
+float16x8x4_t vld4q(const float16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f32)))
+float32x4x4_t vld4q_f32(const float32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f32)))
+float32x4x4_t vld4q(const float32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_f16)))
+float16x8_t vldrhq_f16(const float16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))
+float16x8_t vldrhq_gather_offset_f16(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))
+float16x8_t vldrhq_gather_offset(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))
+float16x8_t vldrhq_gather_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))
+float16x8_t vldrhq_gather_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))
+float16x8_t vldrhq_gather_shifted_offset_f16(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))
+float16x8_t vldrhq_gather_shifted_offset(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))
+float16x8_t vldrhq_gather_shifted_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))
+float16x8_t vldrhq_gather_shifted_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_f16)))
+float16x8_t vldrhq_z_f16(const float16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_f32)))
+float32x4_t vldrwq_f32(const float32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_f32)))
+float32x4_t vldrwq_gather_base_f32(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_f32)))
+float32x4_t vldrwq_gather_base_wb_f32(uint32x4_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_f32)))
+float32x4_t vldrwq_gather_base_wb_z_f32(uint32x4_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_f32)))
+float32x4_t vldrwq_gather_base_z_f32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))
+float32x4_t vldrwq_gather_offset_f32(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))
+float32x4_t vldrwq_gather_offset(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))
+float32x4_t vldrwq_gather_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))
+float32x4_t vldrwq_gather_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))
+float32x4_t vldrwq_gather_shifted_offset_f32(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))
+float32x4_t vldrwq_gather_shifted_offset(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))
+float32x4_t vldrwq_gather_shifted_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))
+float32x4_t vldrwq_gather_shifted_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_f32)))
+float32x4_t vldrwq_z_f32(const float32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f16)))
+float16x8_t vmaxnmaq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f16)))
+float16x8_t vmaxnmaq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f32)))
+float32x4_t vmaxnmaq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f32)))
+float32x4_t vmaxnmaq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f16)))
+float16x8_t vmaxnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f16)))
+float16x8_t vmaxnmaq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f32)))
+float32x4_t vmaxnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f32)))
+float32x4_t vmaxnmaq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f16)))
+float16_t vmaxnmavq_f16(float16_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f16)))
+float16_t vmaxnmavq(float16_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f32)))
+float32_t vmaxnmavq_f32(float32_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f32)))
+float32_t vmaxnmavq(float32_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f16)))
+float16_t vmaxnmavq_p_f16(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f16)))
+float16_t vmaxnmavq_p(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f32)))
+float32_t vmaxnmavq_p_f32(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f32)))
+float32_t vmaxnmavq_p(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f16)))
+float16x8_t vmaxnmq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f16)))
+float16x8_t vmaxnmq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f32)))
+float32x4_t vmaxnmq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f32)))
+float32x4_t vmaxnmq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f16)))
+float16x8_t vmaxnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f16)))
+float16x8_t vmaxnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f32)))
+float32x4_t vmaxnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f32)))
+float32x4_t vmaxnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f16)))
+float16x8_t vmaxnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f16)))
+float16x8_t vmaxnmq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f32)))
+float32x4_t vmaxnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f32)))
+float32x4_t vmaxnmq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f16)))
+float16_t vmaxnmvq_f16(float16_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f16)))
+float16_t vmaxnmvq(float16_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f32)))
+float32_t vmaxnmvq_f32(float32_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f32)))
+float32_t vmaxnmvq(float32_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f16)))
+float16_t vmaxnmvq_p_f16(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f16)))
+float16_t vmaxnmvq_p(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f32)))
+float32_t vmaxnmvq_p_f32(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f32)))
+float32_t vmaxnmvq_p(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f16)))
+float16x8_t vminnmaq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f16)))
+float16x8_t vminnmaq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f32)))
+float32x4_t vminnmaq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f32)))
+float32x4_t vminnmaq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f16)))
+float16x8_t vminnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f16)))
+float16x8_t vminnmaq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f32)))
+float32x4_t vminnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f32)))
+float32x4_t vminnmaq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f16)))
+float16_t vminnmavq_f16(float16_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f16)))
+float16_t vminnmavq(float16_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f32)))
+float32_t vminnmavq_f32(float32_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f32)))
+float32_t vminnmavq(float32_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f16)))
+float16_t vminnmavq_p_f16(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f16)))
+float16_t vminnmavq_p(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f32)))
+float32_t vminnmavq_p_f32(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f32)))
+float32_t vminnmavq_p(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f16)))
+float16x8_t vminnmq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f16)))
+float16x8_t vminnmq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f32)))
+float32x4_t vminnmq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f32)))
+float32x4_t vminnmq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f16)))
+float16x8_t vminnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f16)))
+float16x8_t vminnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f32)))
+float32x4_t vminnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f32)))
+float32x4_t vminnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f16)))
+float16x8_t vminnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f16)))
+float16x8_t vminnmq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f32)))
+float32x4_t vminnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f32)))
+float32x4_t vminnmq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f16)))
+float16_t vminnmvq_f16(float16_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f16)))
+float16_t vminnmvq(float16_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f32)))
+float32_t vminnmvq_f32(float32_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f32)))
+float32_t vminnmvq(float32_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f16)))
+float16_t vminnmvq_p_f16(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f16)))
+float16_t vminnmvq_p(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f32)))
+float32_t vminnmvq_p_f32(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f32)))
+float32_t vminnmvq_p(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f16)))
+float16x8_t vmulq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f16)))
+float16x8_t vmulq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f32)))
+float32x4_t vmulq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f32)))
+float32x4_t vmulq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f16)))
+float16x8_t vmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f16)))
+float16x8_t vmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f32)))
+float32x4_t vmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f32)))
+float32x4_t vmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f16)))
+float16x8_t vmulq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f16)))
+float16x8_t vmulq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f32)))
+float32x4_t vmulq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f32)))
+float32x4_t vmulq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f16)))
+float16x8_t vmulq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f16)))
+float16x8_t vmulq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f32)))
+float32x4_t vmulq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f32)))
+float32x4_t vmulq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f16)))
+float16x8_t vmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f16)))
+float16x8_t vmulq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f32)))
+float32x4_t vmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f32)))
+float32x4_t vmulq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f16)))
+float16x8_t vmulq_x_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f16)))
+float16x8_t vmulq_x(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f32)))
+float32x4_t vmulq_x_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f32)))
+float32x4_t vmulq_x(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f16)))
+float16x8_t vnegq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f16)))
+float16x8_t vnegq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f32)))
+float32x4_t vnegq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f32)))
+float32x4_t vnegq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f16)))
+float16x8_t vnegq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f16)))
+float16x8_t vnegq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f32)))
+float32x4_t vnegq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f32)))
+float32x4_t vnegq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f16)))
+float16x8_t vnegq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f16)))
+float16x8_t vnegq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f32)))
+float32x4_t vnegq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f32)))
+float32x4_t vnegq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_f16)))
+float16x8_t vornq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_f16)))
+float16x8_t vornq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_f32)))
+float32x4_t vornq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_f32)))
+float32x4_t vornq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f16)))
+float16x8_t vornq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f16)))
+float16x8_t vornq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f32)))
+float32x4_t vornq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f32)))
+float32x4_t vornq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f16)))
+float16x8_t vornq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f16)))
+float16x8_t vornq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f32)))
+float32x4_t vornq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f32)))
+float32x4_t vornq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f16)))
+float16x8_t vorrq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f16)))
+float16x8_t vorrq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f32)))
+float32x4_t vorrq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f32)))
+float32x4_t vorrq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f16)))
+float16x8_t vorrq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f16)))
+float16x8_t vorrq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f32)))
+float32x4_t vorrq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f32)))
+float32x4_t vorrq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f16)))
+float16x8_t vorrq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f16)))
+float16x8_t vorrq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f32)))
+float32x4_t vorrq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f32)))
+float32x4_t vorrq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f16)))
+float16x8_t vpselq_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f16)))
+float16x8_t vpselq(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f32)))
+float32x4_t vpselq_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f32)))
+float32x4_t vpselq(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))
+float16x8_t vreinterpretq_f16_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))
+float16x8_t vreinterpretq_f16(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))
+float16x8_t vreinterpretq_f16_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))
+float16x8_t vreinterpretq_f16(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))
+float16x8_t vreinterpretq_f16_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))
+float16x8_t vreinterpretq_f16(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))
+float16x8_t vreinterpretq_f16_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))
+float16x8_t vreinterpretq_f16(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))
+float16x8_t vreinterpretq_f16_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))
+float16x8_t vreinterpretq_f16(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))
+float16x8_t vreinterpretq_f16_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))
+float16x8_t vreinterpretq_f16(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))
+float16x8_t vreinterpretq_f16_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))
+float16x8_t vreinterpretq_f16(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))
+float16x8_t vreinterpretq_f16_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))
+float16x8_t vreinterpretq_f16(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
+float16x8_t vreinterpretq_f16_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
+float16x8_t vreinterpretq_f16(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))
+float32x4_t vreinterpretq_f32_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))
+float32x4_t vreinterpretq_f32(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))
+float32x4_t vreinterpretq_f32_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))
+float32x4_t vreinterpretq_f32(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))
+float32x4_t vreinterpretq_f32_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))
+float32x4_t vreinterpretq_f32(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))
+float32x4_t vreinterpretq_f32_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))
+float32x4_t vreinterpretq_f32(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))
+float32x4_t vreinterpretq_f32_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))
+float32x4_t vreinterpretq_f32(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))
+float32x4_t vreinterpretq_f32_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))
+float32x4_t vreinterpretq_f32(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))
+float32x4_t vreinterpretq_f32_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))
+float32x4_t vreinterpretq_f32(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))
+float32x4_t vreinterpretq_f32_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))
+float32x4_t vreinterpretq_f32(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
+float32x4_t vreinterpretq_f32_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
+float32x4_t vreinterpretq_f32(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))
+int16x8_t vreinterpretq_s16_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))
+int16x8_t vreinterpretq_s16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))
+int16x8_t vreinterpretq_s16_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))
+int16x8_t vreinterpretq_s16(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))
+int32x4_t vreinterpretq_s32_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))
+int32x4_t vreinterpretq_s32(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))
+int32x4_t vreinterpretq_s32_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))
+int32x4_t vreinterpretq_s32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))
+int64x2_t vreinterpretq_s64_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))
+int64x2_t vreinterpretq_s64(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))
+int64x2_t vreinterpretq_s64_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))
+int64x2_t vreinterpretq_s64(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))
+int8x16_t vreinterpretq_s8_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))
+int8x16_t vreinterpretq_s8(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))
+int8x16_t vreinterpretq_s8_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))
+int8x16_t vreinterpretq_s8(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))
+uint16x8_t vreinterpretq_u16_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))
+uint16x8_t vreinterpretq_u16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))
+uint16x8_t vreinterpretq_u16_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))
+uint16x8_t vreinterpretq_u16(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))
+uint32x4_t vreinterpretq_u32_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))
+uint32x4_t vreinterpretq_u32(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))
+uint32x4_t vreinterpretq_u32_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))
+uint32x4_t vreinterpretq_u32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))
+uint64x2_t vreinterpretq_u64_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))
+uint64x2_t vreinterpretq_u64(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))
+uint64x2_t vreinterpretq_u64_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))
+uint64x2_t vreinterpretq_u64(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
+uint8x16_t vreinterpretq_u8_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
+uint8x16_t vreinterpretq_u8(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
+uint8x16_t vreinterpretq_u8_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
+uint8x16_t vreinterpretq_u8(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_f16)))
+float16x8_t vrev32q_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_f16)))
+float16x8_t vrev32q(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_f16)))
+float16x8_t vrev32q_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_f16)))
+float16x8_t vrev32q_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_f16)))
+float16x8_t vrev32q_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_f16)))
+float16x8_t vrev32q_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f16)))
+float16x8_t vrev64q_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f16)))
+float16x8_t vrev64q(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f32)))
+float32x4_t vrev64q_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f32)))
+float32x4_t vrev64q(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f16)))
+float16x8_t vrev64q_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f16)))
+float16x8_t vrev64q_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f32)))
+float32x4_t vrev64q_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f32)))
+float32x4_t vrev64q_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f16)))
+float16x8_t vrev64q_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f16)))
+float16x8_t vrev64q_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f32)))
+float32x4_t vrev64q_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f32)))
+float32x4_t vrev64q_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f16)))
+float16x8_t vrndaq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f16)))
+float16x8_t vrndaq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f32)))
+float32x4_t vrndaq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f32)))
+float32x4_t vrndaq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f16)))
+float16x8_t vrndaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f16)))
+float16x8_t vrndaq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f32)))
+float32x4_t vrndaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f32)))
+float32x4_t vrndaq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f16)))
+float16x8_t vrndaq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f16)))
+float16x8_t vrndaq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f32)))
+float32x4_t vrndaq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f32)))
+float32x4_t vrndaq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f16)))
+float16x8_t vrndmq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f16)))
+float16x8_t vrndmq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f32)))
+float32x4_t vrndmq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f32)))
+float32x4_t vrndmq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f16)))
+float16x8_t vrndmq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f16)))
+float16x8_t vrndmq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f32)))
+float32x4_t vrndmq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f32)))
+float32x4_t vrndmq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f16)))
+float16x8_t vrndmq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f16)))
+float16x8_t vrndmq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f32)))
+float32x4_t vrndmq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f32)))
+float32x4_t vrndmq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f16)))
+float16x8_t vrndnq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f16)))
+float16x8_t vrndnq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f32)))
+float32x4_t vrndnq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f32)))
+float32x4_t vrndnq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f16)))
+float16x8_t vrndnq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f16)))
+float16x8_t vrndnq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f32)))
+float32x4_t vrndnq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f32)))
+float32x4_t vrndnq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f16)))
+float16x8_t vrndnq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f16)))
+float16x8_t vrndnq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f32)))
+float32x4_t vrndnq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f32)))
+float32x4_t vrndnq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f16)))
+float16x8_t vrndpq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f16)))
+float16x8_t vrndpq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f32)))
+float32x4_t vrndpq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f32)))
+float32x4_t vrndpq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f16)))
+float16x8_t vrndpq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f16)))
+float16x8_t vrndpq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f32)))
+float32x4_t vrndpq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f32)))
+float32x4_t vrndpq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f16)))
+float16x8_t vrndpq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f16)))
+float16x8_t vrndpq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f32)))
+float32x4_t vrndpq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f32)))
+float32x4_t vrndpq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f16)))
+float16x8_t vrndq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f16)))
+float16x8_t vrndq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f32)))
+float32x4_t vrndq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f32)))
+float32x4_t vrndq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f16)))
+float16x8_t vrndq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f16)))
+float16x8_t vrndq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f32)))
+float32x4_t vrndq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f32)))
+float32x4_t vrndq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f16)))
+float16x8_t vrndq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f16)))
+float16x8_t vrndq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f32)))
+float32x4_t vrndq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f32)))
+float32x4_t vrndq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f16)))
+float16x8_t vrndxq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f16)))
+float16x8_t vrndxq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f32)))
+float32x4_t vrndxq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f32)))
+float32x4_t vrndxq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f16)))
+float16x8_t vrndxq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f16)))
+float16x8_t vrndxq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f32)))
+float32x4_t vrndxq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f32)))
+float32x4_t vrndxq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f16)))
+float16x8_t vrndxq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f16)))
+float16x8_t vrndxq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f32)))
+float32x4_t vrndxq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f32)))
+float32x4_t vrndxq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f16)))
+float16x8_t vsetq_lane_f16(float16_t, float16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f16)))
+float16x8_t vsetq_lane(float16_t, float16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f32)))
+float32x4_t vsetq_lane_f32(float32_t, float32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f32)))
+float32x4_t vsetq_lane(float32_t, float32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f16)))
+void vst1q_f16(float16_t *, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f16)))
+void vst1q(float16_t *, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f32)))
+void vst1q_f32(float32_t *, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f32)))
+void vst1q(float32_t *, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f16)))
+void vst1q_p_f16(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f16)))
+void vst1q_p(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f32)))
+void vst1q_p_f32(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f32)))
+void vst1q_p(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f16)))
+void vst2q_f16(float16_t *, float16x8x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f16)))
+void vst2q(float16_t *, float16x8x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f32)))
+void vst2q_f32(float32_t *, float32x4x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f32)))
+void vst2q(float32_t *, float32x4x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f16)))
+void vst4q_f16(float16_t *, float16x8x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f16)))
+void vst4q(float16_t *, float16x8x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f32)))
+void vst4q_f32(float32_t *, float32x4x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f32)))
+void vst4q(float32_t *, float32x4x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_f16)))
+void vstrhq_f16(float16_t *, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_f16)))
+void vstrhq(float16_t *, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_f16)))
+void vstrhq_p_f16(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_f16)))
+void vstrhq_p(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))
+void vstrhq_scatter_offset_f16(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))
+void vstrhq_scatter_offset(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))
+void vstrhq_scatter_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))
+void vstrhq_scatter_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))
+void vstrhq_scatter_shifted_offset_f16(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))
+void vstrhq_scatter_shifted_offset(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))
+void vstrhq_scatter_shifted_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))
+void vstrhq_scatter_shifted_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_f32)))
+void vstrwq_f32(float32_t *, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_f32)))
+void vstrwq(float32_t *, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_f32)))
+void vstrwq_p_f32(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_f32)))
+void vstrwq_p(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))
+void vstrwq_scatter_base_f32(uint32x4_t, int, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))
+void vstrwq_scatter_base(uint32x4_t, int, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))
+void vstrwq_scatter_base_p_f32(uint32x4_t, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))
+void vstrwq_scatter_base_p(uint32x4_t, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))
+void vstrwq_scatter_base_wb_f32(uint32x4_t *, int, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))
+void vstrwq_scatter_base_wb(uint32x4_t *, int, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))
+void vstrwq_scatter_base_wb_p_f32(uint32x4_t *, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))
+void vstrwq_scatter_base_wb_p(uint32x4_t *, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))
+void vstrwq_scatter_offset_f32(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))
+void vstrwq_scatter_offset(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))
+void vstrwq_scatter_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))
+void vstrwq_scatter_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))
+void vstrwq_scatter_shifted_offset_f32(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))
+void vstrwq_scatter_shifted_offset(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))
+void vstrwq_scatter_shifted_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))
+void vstrwq_scatter_shifted_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f16)))
+float16x8_t vsubq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f16)))
+float16x8_t vsubq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f32)))
+float32x4_t vsubq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f32)))
+float32x4_t vsubq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f16)))
+float16x8_t vsubq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f16)))
+float16x8_t vsubq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f32)))
+float32x4_t vsubq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f32)))
+float32x4_t vsubq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f16)))
+float16x8_t vsubq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f16)))
+float16x8_t vsubq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f32)))
+float32x4_t vsubq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f32)))
+float32x4_t vsubq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f16)))
+float16x8_t vsubq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f16)))
+float16x8_t vsubq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f32)))
+float32x4_t vsubq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f32)))
+float32x4_t vsubq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f16)))
+float16x8_t vsubq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f16)))
+float16x8_t vsubq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f32)))
+float32x4_t vsubq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f32)))
+float32x4_t vsubq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f16)))
+float16x8_t vsubq_x_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f16)))
+float16x8_t vsubq_x(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f32)))
+float32x4_t vsubq_x_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f32)))
+float32x4_t vsubq_x(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_f16)))
+float16x8_t vuninitializedq_f16();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_f32)))
+float32x4_t vuninitializedq_f32();
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f16)))
+float16x8_t vuninitializedq(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f32)))
+float32x4_t vuninitializedq(float32x4_t);
+
+#endif /* (__ARM_FEATURE_MVE & 2) && (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE) */
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* __ARM_MVE_H */
diff --git a/darwin-x86/lib64/clang/11.0.1/include/arm_neon.h b/darwin-x86/lib64/clang/11.0.5/include/arm_neon.h
similarity index 80%
rename from darwin-x86/lib64/clang/11.0.1/include/arm_neon.h
rename to darwin-x86/lib64/clang/11.0.5/include/arm_neon.h
index a8ab92f..da1e17c 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/arm_neon.h
+++ b/darwin-x86/lib64/clang/11.0.5/include/arm_neon.h
@@ -24,12 +24,21 @@
 #ifndef __ARM_NEON_H
 #define __ARM_NEON_H
 
+#ifndef __ARM_FP
+#error "NEON intrinsics not available with the soft-float ABI. Please use -mfloat-abi=softfp or -mfloat-abi=hard"
+#else
+
 #if !defined(__ARM_NEON)
 #error "NEON support not enabled"
-#endif
+#else
 
 #include <stdint.h>
 
+#ifdef __ARM_FEATURE_BF16
+#include <arm_bf16.h>
+typedef __bf16 bfloat16_t;
+#endif
+
 typedef float float32_t;
 typedef __fp16 float16_t;
 #ifdef __aarch64__
@@ -44,6 +53,7 @@
 #else
 typedef int8_t poly8_t;
 typedef int16_t poly16_t;
+typedef int64_t poly64_t;
 #endif
 typedef __attribute__((neon_vector_type(8))) int8_t int8x8_t;
 typedef __attribute__((neon_vector_type(16))) int8_t int8x16_t;
@@ -73,10 +83,8 @@
 typedef __attribute__((neon_polyvector_type(16))) poly8_t poly8x16_t;
 typedef __attribute__((neon_polyvector_type(4))) poly16_t poly16x4_t;
 typedef __attribute__((neon_polyvector_type(8))) poly16_t poly16x8_t;
-#ifdef __aarch64__
 typedef __attribute__((neon_polyvector_type(1))) poly64_t poly64x1_t;
 typedef __attribute__((neon_polyvector_type(2))) poly64_t poly64x2_t;
-#endif
 
 typedef struct int8x8x2_t {
   int8x8_t val[2];
@@ -184,7 +192,6 @@
   poly16x8_t val[2];
 } poly16x8x2_t;
 
-#ifdef __aarch64__
 typedef struct poly64x1x2_t {
   poly64x1_t val[2];
 } poly64x1x2_t;
@@ -193,7 +200,6 @@
   poly64x2_t val[2];
 } poly64x2x2_t;
 
-#endif
 typedef struct int8x8x3_t {
   int8x8_t val[3];
 } int8x8x3_t;
@@ -300,7 +306,6 @@
   poly16x8_t val[3];
 } poly16x8x3_t;
 
-#ifdef __aarch64__
 typedef struct poly64x1x3_t {
   poly64x1_t val[3];
 } poly64x1x3_t;
@@ -309,7 +314,6 @@
   poly64x2_t val[3];
 } poly64x2x3_t;
 
-#endif
 typedef struct int8x8x4_t {
   int8x8_t val[4];
 } int8x8x4_t;
@@ -416,7 +420,6 @@
   poly16x8_t val[4];
 } poly16x8x4_t;
 
-#ifdef __aarch64__
 typedef struct poly64x1x4_t {
   poly64x1_t val[4];
 } poly64x1x4_t;
@@ -425,11 +428,1303 @@
   poly64x2_t val[4];
 } poly64x2x4_t;
 
+#ifdef __ARM_FEATURE_BF16
+typedef __attribute__((neon_vector_type(4))) bfloat16_t bfloat16x4_t;
+typedef __attribute__((neon_vector_type(8))) bfloat16_t bfloat16x8_t;
+
+typedef struct bfloat16x4x2_t {
+  bfloat16x4_t val[2];
+} bfloat16x4x2_t;
+
+typedef struct bfloat16x8x2_t {
+  bfloat16x8_t val[2];
+} bfloat16x8x2_t;
+
+typedef struct bfloat16x4x3_t {
+  bfloat16x4_t val[3];
+} bfloat16x4x3_t;
+
+typedef struct bfloat16x8x3_t {
+  bfloat16x8_t val[3];
+} bfloat16x8x3_t;
+
+typedef struct bfloat16x4x4_t {
+  bfloat16x4_t val[4];
+} bfloat16x4x4_t;
+
+typedef struct bfloat16x8x4_t {
+  bfloat16x8_t val[4];
+} bfloat16x8x4_t;
+
 #endif
 
 #define __ai static __inline__ __attribute__((__always_inline__, __nodebug__))
 
 #ifdef __LITTLE_ENDIAN__
+#define splat_lane_p8(__p0, __p1) __extension__ ({ \
+  poly8x8_t __s0 = __p0; \
+  poly8x8_t __ret; \
+  __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \
+  __ret; \
+})
+#else
+#define splat_lane_p8(__p0, __p1) __extension__ ({ \
+  poly8x8_t __s0 = __p0; \
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __ret; \
+  __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 4); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_lane_p8(__p0, __p1) __extension__ ({ \
+  poly8x8_t __s0 = __p0; \
+  poly8x8_t __ret; \
+  __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \
+  __ret; \
+})
+#endif
+
+#define splat_lane_p64(__p0, __p1) __extension__ ({ \
+  poly64x1_t __s0 = __p0; \
+  poly64x1_t __ret; \
+  __ret = (poly64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define splat_lane_p16(__p0, __p1) __extension__ ({ \
+  poly16x4_t __s0 = __p0; \
+  poly16x4_t __ret; \
+  __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \
+  __ret; \
+})
+#else
+#define splat_lane_p16(__p0, __p1) __extension__ ({ \
+  poly16x4_t __s0 = __p0; \
+  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  poly16x4_t __ret; \
+  __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 5); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_lane_p16(__p0, __p1) __extension__ ({ \
+  poly16x4_t __s0 = __p0; \
+  poly16x4_t __ret; \
+  __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_p8(__p0, __p1) __extension__ ({ \
+  poly8x8_t __s0 = __p0; \
+  poly8x16_t __ret; \
+  __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \
+  __ret; \
+})
+#else
+#define splatq_lane_p8(__p0, __p1) __extension__ ({ \
+  poly8x8_t __s0 = __p0; \
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __ret; \
+  __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 4); \
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_p8(__p0, __p1) __extension__ ({ \
+  poly8x8_t __s0 = __p0; \
+  poly8x16_t __ret; \
+  __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_p64(__p0, __p1) __extension__ ({ \
+  poly64x1_t __s0 = __p0; \
+  poly64x2_t __ret; \
+  __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \
+  __ret; \
+})
+#else
+#define splatq_lane_p64(__p0, __p1) __extension__ ({ \
+  poly64x1_t __s0 = __p0; \
+  poly64x2_t __ret; \
+  __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_p64(__p0, __p1) __extension__ ({ \
+  poly64x1_t __s0 = __p0; \
+  poly64x2_t __ret; \
+  __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_p16(__p0, __p1) __extension__ ({ \
+  poly16x4_t __s0 = __p0; \
+  poly16x8_t __ret; \
+  __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \
+  __ret; \
+})
+#else
+#define splatq_lane_p16(__p0, __p1) __extension__ ({ \
+  poly16x4_t __s0 = __p0; \
+  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  poly16x8_t __ret; \
+  __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 5); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_p16(__p0, __p1) __extension__ ({ \
+  poly16x4_t __s0 = __p0; \
+  poly16x8_t __ret; \
+  __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_u8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __s0 = __p0; \
+  uint8x16_t __ret; \
+  __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \
+  __ret; \
+})
+#else
+#define splatq_lane_u8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __s0 = __p0; \
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret; \
+  __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 16); \
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_u8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __s0 = __p0; \
+  uint8x16_t __ret; \
+  __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_u32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __s0 = __p0; \
+  uint32x4_t __ret; \
+  __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \
+  __ret; \
+})
+#else
+#define splatq_lane_u32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __s0 = __p0; \
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  uint32x4_t __ret; \
+  __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 18); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_u32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __s0 = __p0; \
+  uint32x4_t __ret; \
+  __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_u64(__p0, __p1) __extension__ ({ \
+  uint64x1_t __s0 = __p0; \
+  uint64x2_t __ret; \
+  __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \
+  __ret; \
+})
+#else
+#define splatq_lane_u64(__p0, __p1) __extension__ ({ \
+  uint64x1_t __s0 = __p0; \
+  uint64x2_t __ret; \
+  __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_u64(__p0, __p1) __extension__ ({ \
+  uint64x1_t __s0 = __p0; \
+  uint64x2_t __ret; \
+  __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_u16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __s0 = __p0; \
+  uint16x8_t __ret; \
+  __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \
+  __ret; \
+})
+#else
+#define splatq_lane_u16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __s0 = __p0; \
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  uint16x8_t __ret; \
+  __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 17); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_u16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __s0 = __p0; \
+  uint16x8_t __ret; \
+  __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_s8(__p0, __p1) __extension__ ({ \
+  int8x8_t __s0 = __p0; \
+  int8x16_t __ret; \
+  __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \
+  __ret; \
+})
+#else
+#define splatq_lane_s8(__p0, __p1) __extension__ ({ \
+  int8x8_t __s0 = __p0; \
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret; \
+  __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 0); \
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_s8(__p0, __p1) __extension__ ({ \
+  int8x8_t __s0 = __p0; \
+  int8x16_t __ret; \
+  __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_f64(__p0, __p1) __extension__ ({ \
+  float64x1_t __s0 = __p0; \
+  float64x2_t __ret; \
+  __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \
+  __ret; \
+})
+#else
+#define splatq_lane_f64(__p0, __p1) __extension__ ({ \
+  float64x1_t __s0 = __p0; \
+  float64x2_t __ret; \
+  __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_f64(__p0, __p1) __extension__ ({ \
+  float64x1_t __s0 = __p0; \
+  float64x2_t __ret; \
+  __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_f32(__p0, __p1) __extension__ ({ \
+  float32x2_t __s0 = __p0; \
+  float32x4_t __ret; \
+  __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \
+  __ret; \
+})
+#else
+#define splatq_lane_f32(__p0, __p1) __extension__ ({ \
+  float32x2_t __s0 = __p0; \
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  float32x4_t __ret; \
+  __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 9); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_f32(__p0, __p1) __extension__ ({ \
+  float32x2_t __s0 = __p0; \
+  float32x4_t __ret; \
+  __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_f16(__p0, __p1) __extension__ ({ \
+  float16x4_t __s0 = __p0; \
+  float16x8_t __ret; \
+  __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \
+  __ret; \
+})
+#else
+#define splatq_lane_f16(__p0, __p1) __extension__ ({ \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  float16x8_t __ret; \
+  __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 8); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_f16(__p0, __p1) __extension__ ({ \
+  float16x4_t __s0 = __p0; \
+  float16x8_t __ret; \
+  __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_s32(__p0, __p1) __extension__ ({ \
+  int32x2_t __s0 = __p0; \
+  int32x4_t __ret; \
+  __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \
+  __ret; \
+})
+#else
+#define splatq_lane_s32(__p0, __p1) __extension__ ({ \
+  int32x2_t __s0 = __p0; \
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  int32x4_t __ret; \
+  __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 2); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_s32(__p0, __p1) __extension__ ({ \
+  int32x2_t __s0 = __p0; \
+  int32x4_t __ret; \
+  __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_s64(__p0, __p1) __extension__ ({ \
+  int64x1_t __s0 = __p0; \
+  int64x2_t __ret; \
+  __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \
+  __ret; \
+})
+#else
+#define splatq_lane_s64(__p0, __p1) __extension__ ({ \
+  int64x1_t __s0 = __p0; \
+  int64x2_t __ret; \
+  __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_s64(__p0, __p1) __extension__ ({ \
+  int64x1_t __s0 = __p0; \
+  int64x2_t __ret; \
+  __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_s16(__p0, __p1) __extension__ ({ \
+  int16x4_t __s0 = __p0; \
+  int16x8_t __ret; \
+  __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \
+  __ret; \
+})
+#else
+#define splatq_lane_s16(__p0, __p1) __extension__ ({ \
+  int16x4_t __s0 = __p0; \
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  int16x8_t __ret; \
+  __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 1); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_s16(__p0, __p1) __extension__ ({ \
+  int16x4_t __s0 = __p0; \
+  int16x8_t __ret; \
+  __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_lane_u8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __s0 = __p0; \
+  uint8x8_t __ret; \
+  __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \
+  __ret; \
+})
+#else
+#define splat_lane_u8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __s0 = __p0; \
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __ret; \
+  __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 16); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_lane_u8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __s0 = __p0; \
+  uint8x8_t __ret; \
+  __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_lane_u32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __s0 = __p0; \
+  uint32x2_t __ret; \
+  __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \
+  __ret; \
+})
+#else
+#define splat_lane_u32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __s0 = __p0; \
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  uint32x2_t __ret; \
+  __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 18); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_lane_u32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __s0 = __p0; \
+  uint32x2_t __ret; \
+  __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \
+  __ret; \
+})
+#endif
+
+#define splat_lane_u64(__p0, __p1) __extension__ ({ \
+  uint64x1_t __s0 = __p0; \
+  uint64x1_t __ret; \
+  __ret = (uint64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 19); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define splat_lane_u16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __s0 = __p0; \
+  uint16x4_t __ret; \
+  __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \
+  __ret; \
+})
+#else
+#define splat_lane_u16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __s0 = __p0; \
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  uint16x4_t __ret; \
+  __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 17); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_lane_u16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __s0 = __p0; \
+  uint16x4_t __ret; \
+  __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_lane_s8(__p0, __p1) __extension__ ({ \
+  int8x8_t __s0 = __p0; \
+  int8x8_t __ret; \
+  __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \
+  __ret; \
+})
+#else
+#define splat_lane_s8(__p0, __p1) __extension__ ({ \
+  int8x8_t __s0 = __p0; \
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __ret; \
+  __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 0); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_lane_s8(__p0, __p1) __extension__ ({ \
+  int8x8_t __s0 = __p0; \
+  int8x8_t __ret; \
+  __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \
+  __ret; \
+})
+#endif
+
+#define splat_lane_f64(__p0, __p1) __extension__ ({ \
+  float64x1_t __s0 = __p0; \
+  float64x1_t __ret; \
+  __ret = (float64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 10); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define splat_lane_f32(__p0, __p1) __extension__ ({ \
+  float32x2_t __s0 = __p0; \
+  float32x2_t __ret; \
+  __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \
+  __ret; \
+})
+#else
+#define splat_lane_f32(__p0, __p1) __extension__ ({ \
+  float32x2_t __s0 = __p0; \
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  float32x2_t __ret; \
+  __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 9); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_lane_f32(__p0, __p1) __extension__ ({ \
+  float32x2_t __s0 = __p0; \
+  float32x2_t __ret; \
+  __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_lane_f16(__p0, __p1) __extension__ ({ \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __ret; \
+  __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \
+  __ret; \
+})
+#else
+#define splat_lane_f16(__p0, __p1) __extension__ ({ \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  float16x4_t __ret; \
+  __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 8); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_lane_f16(__p0, __p1) __extension__ ({ \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __ret; \
+  __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_lane_s32(__p0, __p1) __extension__ ({ \
+  int32x2_t __s0 = __p0; \
+  int32x2_t __ret; \
+  __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \
+  __ret; \
+})
+#else
+#define splat_lane_s32(__p0, __p1) __extension__ ({ \
+  int32x2_t __s0 = __p0; \
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  int32x2_t __ret; \
+  __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 2); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_lane_s32(__p0, __p1) __extension__ ({ \
+  int32x2_t __s0 = __p0; \
+  int32x2_t __ret; \
+  __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \
+  __ret; \
+})
+#endif
+
+#define splat_lane_s64(__p0, __p1) __extension__ ({ \
+  int64x1_t __s0 = __p0; \
+  int64x1_t __ret; \
+  __ret = (int64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 3); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define splat_lane_s16(__p0, __p1) __extension__ ({ \
+  int16x4_t __s0 = __p0; \
+  int16x4_t __ret; \
+  __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \
+  __ret; \
+})
+#else
+#define splat_lane_s16(__p0, __p1) __extension__ ({ \
+  int16x4_t __s0 = __p0; \
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  int16x4_t __ret; \
+  __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 1); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_lane_s16(__p0, __p1) __extension__ ({ \
+  int16x4_t __s0 = __p0; \
+  int16x4_t __ret; \
+  __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_p8(__p0, __p1) __extension__ ({ \
+  poly8x16_t __s0 = __p0; \
+  poly8x8_t __ret; \
+  __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \
+  __ret; \
+})
+#else
+#define splat_laneq_p8(__p0, __p1) __extension__ ({ \
+  poly8x16_t __s0 = __p0; \
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __ret; \
+  __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 36); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_laneq_p8(__p0, __p1) __extension__ ({ \
+  poly8x16_t __s0 = __p0; \
+  poly8x8_t __ret; \
+  __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_p64(__p0, __p1) __extension__ ({ \
+  poly64x2_t __s0 = __p0; \
+  poly64x1_t __ret; \
+  __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \
+  __ret; \
+})
+#else
+#define splat_laneq_p64(__p0, __p1) __extension__ ({ \
+  poly64x2_t __s0 = __p0; \
+  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  poly64x1_t __ret; \
+  __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 38); \
+  __ret; \
+})
+#define __noswap_splat_laneq_p64(__p0, __p1) __extension__ ({ \
+  poly64x2_t __s0 = __p0; \
+  poly64x1_t __ret; \
+  __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_p16(__p0, __p1) __extension__ ({ \
+  poly16x8_t __s0 = __p0; \
+  poly16x4_t __ret; \
+  __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \
+  __ret; \
+})
+#else
+#define splat_laneq_p16(__p0, __p1) __extension__ ({ \
+  poly16x8_t __s0 = __p0; \
+  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x4_t __ret; \
+  __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 37); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_laneq_p16(__p0, __p1) __extension__ ({ \
+  poly16x8_t __s0 = __p0; \
+  poly16x4_t __ret; \
+  __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_p8(__p0, __p1) __extension__ ({ \
+  poly8x16_t __s0 = __p0; \
+  poly8x16_t __ret; \
+  __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \
+  __ret; \
+})
+#else
+#define splatq_laneq_p8(__p0, __p1) __extension__ ({ \
+  poly8x16_t __s0 = __p0; \
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __ret; \
+  __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 36); \
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_p8(__p0, __p1) __extension__ ({ \
+  poly8x16_t __s0 = __p0; \
+  poly8x16_t __ret; \
+  __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_p64(__p0, __p1) __extension__ ({ \
+  poly64x2_t __s0 = __p0; \
+  poly64x2_t __ret; \
+  __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \
+  __ret; \
+})
+#else
+#define splatq_laneq_p64(__p0, __p1) __extension__ ({ \
+  poly64x2_t __s0 = __p0; \
+  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  poly64x2_t __ret; \
+  __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 38); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_p64(__p0, __p1) __extension__ ({ \
+  poly64x2_t __s0 = __p0; \
+  poly64x2_t __ret; \
+  __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_p16(__p0, __p1) __extension__ ({ \
+  poly16x8_t __s0 = __p0; \
+  poly16x8_t __ret; \
+  __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \
+  __ret; \
+})
+#else
+#define splatq_laneq_p16(__p0, __p1) __extension__ ({ \
+  poly16x8_t __s0 = __p0; \
+  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x8_t __ret; \
+  __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 37); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_p16(__p0, __p1) __extension__ ({ \
+  poly16x8_t __s0 = __p0; \
+  poly16x8_t __ret; \
+  __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_u8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __s0 = __p0; \
+  uint8x16_t __ret; \
+  __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \
+  __ret; \
+})
+#else
+#define splatq_laneq_u8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __s0 = __p0; \
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret; \
+  __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 48); \
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_u8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __s0 = __p0; \
+  uint8x16_t __ret; \
+  __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_u32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __s0 = __p0; \
+  uint32x4_t __ret; \
+  __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \
+  __ret; \
+})
+#else
+#define splatq_laneq_u32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __s0 = __p0; \
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  uint32x4_t __ret; \
+  __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 50); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_u32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __s0 = __p0; \
+  uint32x4_t __ret; \
+  __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_u64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __s0 = __p0; \
+  uint64x2_t __ret; \
+  __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \
+  __ret; \
+})
+#else
+#define splatq_laneq_u64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __s0 = __p0; \
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  uint64x2_t __ret; \
+  __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 51); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_u64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __s0 = __p0; \
+  uint64x2_t __ret; \
+  __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_u16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __s0 = __p0; \
+  uint16x8_t __ret; \
+  __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \
+  __ret; \
+})
+#else
+#define splatq_laneq_u16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __s0 = __p0; \
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret; \
+  __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 49); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_u16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __s0 = __p0; \
+  uint16x8_t __ret; \
+  __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_s8(__p0, __p1) __extension__ ({ \
+  int8x16_t __s0 = __p0; \
+  int8x16_t __ret; \
+  __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \
+  __ret; \
+})
+#else
+#define splatq_laneq_s8(__p0, __p1) __extension__ ({ \
+  int8x16_t __s0 = __p0; \
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret; \
+  __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 32); \
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_s8(__p0, __p1) __extension__ ({ \
+  int8x16_t __s0 = __p0; \
+  int8x16_t __ret; \
+  __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_f64(__p0, __p1) __extension__ ({ \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __ret; \
+  __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \
+  __ret; \
+})
+#else
+#define splatq_laneq_f64(__p0, __p1) __extension__ ({ \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  float64x2_t __ret; \
+  __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 42); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_f64(__p0, __p1) __extension__ ({ \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __ret; \
+  __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_f32(__p0, __p1) __extension__ ({ \
+  float32x4_t __s0 = __p0; \
+  float32x4_t __ret; \
+  __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \
+  __ret; \
+})
+#else
+#define splatq_laneq_f32(__p0, __p1) __extension__ ({ \
+  float32x4_t __s0 = __p0; \
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  float32x4_t __ret; \
+  __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 41); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_f32(__p0, __p1) __extension__ ({ \
+  float32x4_t __s0 = __p0; \
+  float32x4_t __ret; \
+  __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_f16(__p0, __p1) __extension__ ({ \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __ret; \
+  __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \
+  __ret; \
+})
+#else
+#define splatq_laneq_f16(__p0, __p1) __extension__ ({ \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __ret; \
+  __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 40); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_f16(__p0, __p1) __extension__ ({ \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __ret; \
+  __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_s32(__p0, __p1) __extension__ ({ \
+  int32x4_t __s0 = __p0; \
+  int32x4_t __ret; \
+  __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \
+  __ret; \
+})
+#else
+#define splatq_laneq_s32(__p0, __p1) __extension__ ({ \
+  int32x4_t __s0 = __p0; \
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  int32x4_t __ret; \
+  __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 34); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_s32(__p0, __p1) __extension__ ({ \
+  int32x4_t __s0 = __p0; \
+  int32x4_t __ret; \
+  __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_s64(__p0, __p1) __extension__ ({ \
+  int64x2_t __s0 = __p0; \
+  int64x2_t __ret; \
+  __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \
+  __ret; \
+})
+#else
+#define splatq_laneq_s64(__p0, __p1) __extension__ ({ \
+  int64x2_t __s0 = __p0; \
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  int64x2_t __ret; \
+  __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 35); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_s64(__p0, __p1) __extension__ ({ \
+  int64x2_t __s0 = __p0; \
+  int64x2_t __ret; \
+  __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_s16(__p0, __p1) __extension__ ({ \
+  int16x8_t __s0 = __p0; \
+  int16x8_t __ret; \
+  __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \
+  __ret; \
+})
+#else
+#define splatq_laneq_s16(__p0, __p1) __extension__ ({ \
+  int16x8_t __s0 = __p0; \
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret; \
+  __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 33); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_s16(__p0, __p1) __extension__ ({ \
+  int16x8_t __s0 = __p0; \
+  int16x8_t __ret; \
+  __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_u8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __s0 = __p0; \
+  uint8x8_t __ret; \
+  __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \
+  __ret; \
+})
+#else
+#define splat_laneq_u8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __s0 = __p0; \
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __ret; \
+  __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 48); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_laneq_u8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __s0 = __p0; \
+  uint8x8_t __ret; \
+  __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_u32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __s0 = __p0; \
+  uint32x2_t __ret; \
+  __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \
+  __ret; \
+})
+#else
+#define splat_laneq_u32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __s0 = __p0; \
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  uint32x2_t __ret; \
+  __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 50); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_laneq_u32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __s0 = __p0; \
+  uint32x2_t __ret; \
+  __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_u64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __s0 = __p0; \
+  uint64x1_t __ret; \
+  __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \
+  __ret; \
+})
+#else
+#define splat_laneq_u64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __s0 = __p0; \
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  uint64x1_t __ret; \
+  __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 51); \
+  __ret; \
+})
+#define __noswap_splat_laneq_u64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __s0 = __p0; \
+  uint64x1_t __ret; \
+  __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_u16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __s0 = __p0; \
+  uint16x4_t __ret; \
+  __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \
+  __ret; \
+})
+#else
+#define splat_laneq_u16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __s0 = __p0; \
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __ret; \
+  __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 49); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_laneq_u16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __s0 = __p0; \
+  uint16x4_t __ret; \
+  __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_s8(__p0, __p1) __extension__ ({ \
+  int8x16_t __s0 = __p0; \
+  int8x8_t __ret; \
+  __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \
+  __ret; \
+})
+#else
+#define splat_laneq_s8(__p0, __p1) __extension__ ({ \
+  int8x16_t __s0 = __p0; \
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __ret; \
+  __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 32); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_laneq_s8(__p0, __p1) __extension__ ({ \
+  int8x16_t __s0 = __p0; \
+  int8x8_t __ret; \
+  __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_f64(__p0, __p1) __extension__ ({ \
+  float64x2_t __s0 = __p0; \
+  float64x1_t __ret; \
+  __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \
+  __ret; \
+})
+#else
+#define splat_laneq_f64(__p0, __p1) __extension__ ({ \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  float64x1_t __ret; \
+  __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 42); \
+  __ret; \
+})
+#define __noswap_splat_laneq_f64(__p0, __p1) __extension__ ({ \
+  float64x2_t __s0 = __p0; \
+  float64x1_t __ret; \
+  __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_f32(__p0, __p1) __extension__ ({ \
+  float32x4_t __s0 = __p0; \
+  float32x2_t __ret; \
+  __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \
+  __ret; \
+})
+#else
+#define splat_laneq_f32(__p0, __p1) __extension__ ({ \
+  float32x4_t __s0 = __p0; \
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  float32x2_t __ret; \
+  __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 41); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_laneq_f32(__p0, __p1) __extension__ ({ \
+  float32x4_t __s0 = __p0; \
+  float32x2_t __ret; \
+  __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_f16(__p0, __p1) __extension__ ({ \
+  float16x8_t __s0 = __p0; \
+  float16x4_t __ret; \
+  __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \
+  __ret; \
+})
+#else
+#define splat_laneq_f16(__p0, __p1) __extension__ ({ \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __ret; \
+  __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 40); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_laneq_f16(__p0, __p1) __extension__ ({ \
+  float16x8_t __s0 = __p0; \
+  float16x4_t __ret; \
+  __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_s32(__p0, __p1) __extension__ ({ \
+  int32x4_t __s0 = __p0; \
+  int32x2_t __ret; \
+  __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \
+  __ret; \
+})
+#else
+#define splat_laneq_s32(__p0, __p1) __extension__ ({ \
+  int32x4_t __s0 = __p0; \
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  int32x2_t __ret; \
+  __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 34); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_laneq_s32(__p0, __p1) __extension__ ({ \
+  int32x4_t __s0 = __p0; \
+  int32x2_t __ret; \
+  __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_s64(__p0, __p1) __extension__ ({ \
+  int64x2_t __s0 = __p0; \
+  int64x1_t __ret; \
+  __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \
+  __ret; \
+})
+#else
+#define splat_laneq_s64(__p0, __p1) __extension__ ({ \
+  int64x2_t __s0 = __p0; \
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  int64x1_t __ret; \
+  __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 35); \
+  __ret; \
+})
+#define __noswap_splat_laneq_s64(__p0, __p1) __extension__ ({ \
+  int64x2_t __s0 = __p0; \
+  int64x1_t __ret; \
+  __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_s16(__p0, __p1) __extension__ ({ \
+  int16x8_t __s0 = __p0; \
+  int16x4_t __ret; \
+  __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \
+  __ret; \
+})
+#else
+#define splat_laneq_s16(__p0, __p1) __extension__ ({ \
+  int16x8_t __s0 = __p0; \
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __ret; \
+  __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 33); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_laneq_s16(__p0, __p1) __extension__ ({ \
+  int16x8_t __s0 = __p0; \
+  int16x4_t __ret; \
+  __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
 __ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
   uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
@@ -4464,372 +5759,372 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_lane_p8(__p0_0, __p1_0) __extension__ ({ \
+  poly8x8_t __s0_0 = __p0_0; \
+  poly8x8_t __ret_0; \
+  __ret_0 = splat_lane_p8(__s0_0, __p1_0); \
+  __ret_0; \
 })
 #else
-#define vdup_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_lane_p8(__p0_1, __p1_1) __extension__ ({ \
+  poly8x8_t __s0_1 = __p0_1; \
+  poly8x8_t __rev0_1;  __rev0_1 = __builtin_shufflevector(__s0_1, __s0_1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __ret_1; \
+  __ret_1 = __noswap_splat_lane_p8(__rev0_1, __p1_1); \
+  __ret_1 = __builtin_shufflevector(__ret_1, __ret_1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_1; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_lane_p16(__p0_2, __p1_2) __extension__ ({ \
+  poly16x4_t __s0_2 = __p0_2; \
+  poly16x4_t __ret_2; \
+  __ret_2 = splat_lane_p16(__s0_2, __p1_2); \
+  __ret_2; \
 })
 #else
-#define vdup_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_lane_p16(__p0_3, __p1_3) __extension__ ({ \
+  poly16x4_t __s0_3 = __p0_3; \
+  poly16x4_t __rev0_3;  __rev0_3 = __builtin_shufflevector(__s0_3, __s0_3, 3, 2, 1, 0); \
+  poly16x4_t __ret_3; \
+  __ret_3 = __noswap_splat_lane_p16(__rev0_3, __p1_3); \
+  __ret_3 = __builtin_shufflevector(__ret_3, __ret_3, 3, 2, 1, 0); \
+  __ret_3; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x16_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_p8(__p0_4, __p1_4) __extension__ ({ \
+  poly8x8_t __s0_4 = __p0_4; \
+  poly8x16_t __ret_4; \
+  __ret_4 = splatq_lane_p8(__s0_4, __p1_4); \
+  __ret_4; \
 })
 #else
-#define vdupq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_lane_p8(__p0_5, __p1_5) __extension__ ({ \
+  poly8x8_t __s0_5 = __p0_5; \
+  poly8x8_t __rev0_5;  __rev0_5 = __builtin_shufflevector(__s0_5, __s0_5, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __ret_5; \
+  __ret_5 = __noswap_splatq_lane_p8(__rev0_5, __p1_5); \
+  __ret_5 = __builtin_shufflevector(__ret_5, __ret_5, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_5; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_p16(__p0_6, __p1_6) __extension__ ({ \
+  poly16x4_t __s0_6 = __p0_6; \
+  poly16x8_t __ret_6; \
+  __ret_6 = splatq_lane_p16(__s0_6, __p1_6); \
+  __ret_6; \
 })
 #else
-#define vdupq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_lane_p16(__p0_7, __p1_7) __extension__ ({ \
+  poly16x4_t __s0_7 = __p0_7; \
+  poly16x4_t __rev0_7;  __rev0_7 = __builtin_shufflevector(__s0_7, __s0_7, 3, 2, 1, 0); \
+  poly16x8_t __ret_7; \
+  __ret_7 = __noswap_splatq_lane_p16(__rev0_7, __p1_7); \
+  __ret_7 = __builtin_shufflevector(__ret_7, __ret_7, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_7; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_u8(__p0_8, __p1_8) __extension__ ({ \
+  uint8x8_t __s0_8 = __p0_8; \
+  uint8x16_t __ret_8; \
+  __ret_8 = splatq_lane_u8(__s0_8, __p1_8); \
+  __ret_8; \
 })
 #else
-#define vdupq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_lane_u8(__p0_9, __p1_9) __extension__ ({ \
+  uint8x8_t __s0_9 = __p0_9; \
+  uint8x8_t __rev0_9;  __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_9; \
+  __ret_9 = __noswap_splatq_lane_u8(__rev0_9, __p1_9); \
+  __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_9; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_u32(__p0_10, __p1_10) __extension__ ({ \
+  uint32x2_t __s0_10 = __p0_10; \
+  uint32x4_t __ret_10; \
+  __ret_10 = splatq_lane_u32(__s0_10, __p1_10); \
+  __ret_10; \
 })
 #else
-#define vdupq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_lane_u32(__p0_11, __p1_11) __extension__ ({ \
+  uint32x2_t __s0_11 = __p0_11; \
+  uint32x2_t __rev0_11;  __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 1, 0); \
+  uint32x4_t __ret_11; \
+  __ret_11 = __noswap_splatq_lane_u32(__rev0_11, __p1_11); \
+  __ret_11 = __builtin_shufflevector(__ret_11, __ret_11, 3, 2, 1, 0); \
+  __ret_11; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_u64(__p0_12, __p1_12) __extension__ ({ \
+  uint64x1_t __s0_12 = __p0_12; \
+  uint64x2_t __ret_12; \
+  __ret_12 = splatq_lane_u64(__s0_12, __p1_12); \
+  __ret_12; \
 })
 #else
-#define vdupq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdupq_lane_u64(__p0_13, __p1_13) __extension__ ({ \
+  uint64x1_t __s0_13 = __p0_13; \
+  uint64x2_t __ret_13; \
+  __ret_13 = __noswap_splatq_lane_u64(__s0_13, __p1_13); \
+  __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 1, 0); \
+  __ret_13; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_u16(__p0_14, __p1_14) __extension__ ({ \
+  uint16x4_t __s0_14 = __p0_14; \
+  uint16x8_t __ret_14; \
+  __ret_14 = splatq_lane_u16(__s0_14, __p1_14); \
+  __ret_14; \
 })
 #else
-#define vdupq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_lane_u16(__p0_15, __p1_15) __extension__ ({ \
+  uint16x4_t __s0_15 = __p0_15; \
+  uint16x4_t __rev0_15;  __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 3, 2, 1, 0); \
+  uint16x8_t __ret_15; \
+  __ret_15 = __noswap_splatq_lane_u16(__rev0_15, __p1_15); \
+  __ret_15 = __builtin_shufflevector(__ret_15, __ret_15, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_15; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_s8(__p0_16, __p1_16) __extension__ ({ \
+  int8x8_t __s0_16 = __p0_16; \
+  int8x16_t __ret_16; \
+  __ret_16 = splatq_lane_s8(__s0_16, __p1_16); \
+  __ret_16; \
 })
 #else
-#define vdupq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_lane_s8(__p0_17, __p1_17) __extension__ ({ \
+  int8x8_t __s0_17 = __p0_17; \
+  int8x8_t __rev0_17;  __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_17; \
+  __ret_17 = __noswap_splatq_lane_s8(__rev0_17, __p1_17); \
+  __ret_17 = __builtin_shufflevector(__ret_17, __ret_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_17; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_f32(__p0_18, __p1_18) __extension__ ({ \
+  float32x2_t __s0_18 = __p0_18; \
+  float32x4_t __ret_18; \
+  __ret_18 = splatq_lane_f32(__s0_18, __p1_18); \
+  __ret_18; \
 })
 #else
-#define vdupq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_lane_f32(__p0_19, __p1_19) __extension__ ({ \
+  float32x2_t __s0_19 = __p0_19; \
+  float32x2_t __rev0_19;  __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 1, 0); \
+  float32x4_t __ret_19; \
+  __ret_19 = __noswap_splatq_lane_f32(__rev0_19, __p1_19); \
+  __ret_19 = __builtin_shufflevector(__ret_19, __ret_19, 3, 2, 1, 0); \
+  __ret_19; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_s32(__p0_20, __p1_20) __extension__ ({ \
+  int32x2_t __s0_20 = __p0_20; \
+  int32x4_t __ret_20; \
+  __ret_20 = splatq_lane_s32(__s0_20, __p1_20); \
+  __ret_20; \
 })
 #else
-#define vdupq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_lane_s32(__p0_21, __p1_21) __extension__ ({ \
+  int32x2_t __s0_21 = __p0_21; \
+  int32x2_t __rev0_21;  __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 1, 0); \
+  int32x4_t __ret_21; \
+  __ret_21 = __noswap_splatq_lane_s32(__rev0_21, __p1_21); \
+  __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 3, 2, 1, 0); \
+  __ret_21; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_s64(__p0_22, __p1_22) __extension__ ({ \
+  int64x1_t __s0_22 = __p0_22; \
+  int64x2_t __ret_22; \
+  __ret_22 = splatq_lane_s64(__s0_22, __p1_22); \
+  __ret_22; \
 })
 #else
-#define vdupq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdupq_lane_s64(__p0_23, __p1_23) __extension__ ({ \
+  int64x1_t __s0_23 = __p0_23; \
+  int64x2_t __ret_23; \
+  __ret_23 = __noswap_splatq_lane_s64(__s0_23, __p1_23); \
+  __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 1, 0); \
+  __ret_23; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_s16(__p0_24, __p1_24) __extension__ ({ \
+  int16x4_t __s0_24 = __p0_24; \
+  int16x8_t __ret_24; \
+  __ret_24 = splatq_lane_s16(__s0_24, __p1_24); \
+  __ret_24; \
 })
 #else
-#define vdupq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_lane_s16(__p0_25, __p1_25) __extension__ ({ \
+  int16x4_t __s0_25 = __p0_25; \
+  int16x4_t __rev0_25;  __rev0_25 = __builtin_shufflevector(__s0_25, __s0_25, 3, 2, 1, 0); \
+  int16x8_t __ret_25; \
+  __ret_25 = __noswap_splatq_lane_s16(__rev0_25, __p1_25); \
+  __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_25; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_lane_u8(__p0_26, __p1_26) __extension__ ({ \
+  uint8x8_t __s0_26 = __p0_26; \
+  uint8x8_t __ret_26; \
+  __ret_26 = splat_lane_u8(__s0_26, __p1_26); \
+  __ret_26; \
 })
 #else
-#define vdup_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_lane_u8(__p0_27, __p1_27) __extension__ ({ \
+  uint8x8_t __s0_27 = __p0_27; \
+  uint8x8_t __rev0_27;  __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __ret_27; \
+  __ret_27 = __noswap_splat_lane_u8(__rev0_27, __p1_27); \
+  __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_27; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdup_lane_u32(__p0_28, __p1_28) __extension__ ({ \
+  uint32x2_t __s0_28 = __p0_28; \
+  uint32x2_t __ret_28; \
+  __ret_28 = splat_lane_u32(__s0_28, __p1_28); \
+  __ret_28; \
 })
 #else
-#define vdup_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdup_lane_u32(__p0_29, __p1_29) __extension__ ({ \
+  uint32x2_t __s0_29 = __p0_29; \
+  uint32x2_t __rev0_29;  __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 1, 0); \
+  uint32x2_t __ret_29; \
+  __ret_29 = __noswap_splat_lane_u32(__rev0_29, __p1_29); \
+  __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 1, 0); \
+  __ret_29; \
 })
 #endif
 
-#define vdup_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
+#define vdup_lane_u64(__p0_30, __p1_30) __extension__ ({ \
+  uint64x1_t __s0_30 = __p0_30; \
+  uint64x1_t __ret_30; \
+  __ret_30 = splat_lane_u64(__s0_30, __p1_30); \
+  __ret_30; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_lane_u16(__p0_31, __p1_31) __extension__ ({ \
+  uint16x4_t __s0_31 = __p0_31; \
+  uint16x4_t __ret_31; \
+  __ret_31 = splat_lane_u16(__s0_31, __p1_31); \
+  __ret_31; \
 })
 #else
-#define vdup_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_lane_u16(__p0_32, __p1_32) __extension__ ({ \
+  uint16x4_t __s0_32 = __p0_32; \
+  uint16x4_t __rev0_32;  __rev0_32 = __builtin_shufflevector(__s0_32, __s0_32, 3, 2, 1, 0); \
+  uint16x4_t __ret_32; \
+  __ret_32 = __noswap_splat_lane_u16(__rev0_32, __p1_32); \
+  __ret_32 = __builtin_shufflevector(__ret_32, __ret_32, 3, 2, 1, 0); \
+  __ret_32; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_lane_s8(__p0_33, __p1_33) __extension__ ({ \
+  int8x8_t __s0_33 = __p0_33; \
+  int8x8_t __ret_33; \
+  __ret_33 = splat_lane_s8(__s0_33, __p1_33); \
+  __ret_33; \
 })
 #else
-#define vdup_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_lane_s8(__p0_34, __p1_34) __extension__ ({ \
+  int8x8_t __s0_34 = __p0_34; \
+  int8x8_t __rev0_34;  __rev0_34 = __builtin_shufflevector(__s0_34, __s0_34, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __ret_34; \
+  __ret_34 = __noswap_splat_lane_s8(__rev0_34, __p1_34); \
+  __ret_34 = __builtin_shufflevector(__ret_34, __ret_34, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_34; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdup_lane_f32(__p0_35, __p1_35) __extension__ ({ \
+  float32x2_t __s0_35 = __p0_35; \
+  float32x2_t __ret_35; \
+  __ret_35 = splat_lane_f32(__s0_35, __p1_35); \
+  __ret_35; \
 })
 #else
-#define vdup_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdup_lane_f32(__p0_36, __p1_36) __extension__ ({ \
+  float32x2_t __s0_36 = __p0_36; \
+  float32x2_t __rev0_36;  __rev0_36 = __builtin_shufflevector(__s0_36, __s0_36, 1, 0); \
+  float32x2_t __ret_36; \
+  __ret_36 = __noswap_splat_lane_f32(__rev0_36, __p1_36); \
+  __ret_36 = __builtin_shufflevector(__ret_36, __ret_36, 1, 0); \
+  __ret_36; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdup_lane_s32(__p0_37, __p1_37) __extension__ ({ \
+  int32x2_t __s0_37 = __p0_37; \
+  int32x2_t __ret_37; \
+  __ret_37 = splat_lane_s32(__s0_37, __p1_37); \
+  __ret_37; \
 })
 #else
-#define vdup_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdup_lane_s32(__p0_38, __p1_38) __extension__ ({ \
+  int32x2_t __s0_38 = __p0_38; \
+  int32x2_t __rev0_38;  __rev0_38 = __builtin_shufflevector(__s0_38, __s0_38, 1, 0); \
+  int32x2_t __ret_38; \
+  __ret_38 = __noswap_splat_lane_s32(__rev0_38, __p1_38); \
+  __ret_38 = __builtin_shufflevector(__ret_38, __ret_38, 1, 0); \
+  __ret_38; \
 })
 #endif
 
-#define vdup_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
+#define vdup_lane_s64(__p0_39, __p1_39) __extension__ ({ \
+  int64x1_t __s0_39 = __p0_39; \
+  int64x1_t __ret_39; \
+  __ret_39 = splat_lane_s64(__s0_39, __p1_39); \
+  __ret_39; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_lane_s16(__p0_40, __p1_40) __extension__ ({ \
+  int16x4_t __s0_40 = __p0_40; \
+  int16x4_t __ret_40; \
+  __ret_40 = splat_lane_s16(__s0_40, __p1_40); \
+  __ret_40; \
 })
 #else
-#define vdup_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_lane_s16(__p0_41, __p1_41) __extension__ ({ \
+  int16x4_t __s0_41 = __p0_41; \
+  int16x4_t __rev0_41;  __rev0_41 = __builtin_shufflevector(__s0_41, __s0_41, 3, 2, 1, 0); \
+  int16x4_t __ret_41; \
+  __ret_41 = __noswap_splat_lane_s16(__rev0_41, __p1_41); \
+  __ret_41 = __builtin_shufflevector(__ret_41, __ret_41, 3, 2, 1, 0); \
+  __ret_41; \
 })
 #endif
 
@@ -13187,242 +14482,242 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlaq_lane_u32(__p0_42, __p1_42, __p2_42, __p3_42) __extension__ ({ \
+  uint32x4_t __s0_42 = __p0_42; \
+  uint32x4_t __s1_42 = __p1_42; \
+  uint32x2_t __s2_42 = __p2_42; \
+  uint32x4_t __ret_42; \
+  __ret_42 = __s0_42 + __s1_42 * splatq_lane_u32(__s2_42, __p3_42); \
+  __ret_42; \
 })
 #else
-#define vmlaq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlaq_lane_u32(__p0_43, __p1_43, __p2_43, __p3_43) __extension__ ({ \
+  uint32x4_t __s0_43 = __p0_43; \
+  uint32x4_t __s1_43 = __p1_43; \
+  uint32x2_t __s2_43 = __p2_43; \
+  uint32x4_t __rev0_43;  __rev0_43 = __builtin_shufflevector(__s0_43, __s0_43, 3, 2, 1, 0); \
+  uint32x4_t __rev1_43;  __rev1_43 = __builtin_shufflevector(__s1_43, __s1_43, 3, 2, 1, 0); \
+  uint32x2_t __rev2_43;  __rev2_43 = __builtin_shufflevector(__s2_43, __s2_43, 1, 0); \
+  uint32x4_t __ret_43; \
+  __ret_43 = __rev0_43 + __rev1_43 * __noswap_splatq_lane_u32(__rev2_43, __p3_43); \
+  __ret_43 = __builtin_shufflevector(__ret_43, __ret_43, 3, 2, 1, 0); \
+  __ret_43; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x8_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlaq_lane_u16(__p0_44, __p1_44, __p2_44, __p3_44) __extension__ ({ \
+  uint16x8_t __s0_44 = __p0_44; \
+  uint16x8_t __s1_44 = __p1_44; \
+  uint16x4_t __s2_44 = __p2_44; \
+  uint16x8_t __ret_44; \
+  __ret_44 = __s0_44 + __s1_44 * splatq_lane_u16(__s2_44, __p3_44); \
+  __ret_44; \
 })
 #else
-#define vmlaq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmlaq_lane_u16(__p0_45, __p1_45, __p2_45, __p3_45) __extension__ ({ \
+  uint16x8_t __s0_45 = __p0_45; \
+  uint16x8_t __s1_45 = __p1_45; \
+  uint16x4_t __s2_45 = __p2_45; \
+  uint16x8_t __rev0_45;  __rev0_45 = __builtin_shufflevector(__s0_45, __s0_45, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_45;  __rev1_45 = __builtin_shufflevector(__s1_45, __s1_45, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_45;  __rev2_45 = __builtin_shufflevector(__s2_45, __s2_45, 3, 2, 1, 0); \
+  uint16x8_t __ret_45; \
+  __ret_45 = __rev0_45 + __rev1_45 * __noswap_splatq_lane_u16(__rev2_45, __p3_45); \
+  __ret_45 = __builtin_shufflevector(__ret_45, __ret_45, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_45; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlaq_lane_f32(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \
+  float32x4_t __s0_46 = __p0_46; \
+  float32x4_t __s1_46 = __p1_46; \
+  float32x2_t __s2_46 = __p2_46; \
+  float32x4_t __ret_46; \
+  __ret_46 = __s0_46 + __s1_46 * splatq_lane_f32(__s2_46, __p3_46); \
+  __ret_46; \
 })
 #else
-#define vmlaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlaq_lane_f32(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \
+  float32x4_t __s0_47 = __p0_47; \
+  float32x4_t __s1_47 = __p1_47; \
+  float32x2_t __s2_47 = __p2_47; \
+  float32x4_t __rev0_47;  __rev0_47 = __builtin_shufflevector(__s0_47, __s0_47, 3, 2, 1, 0); \
+  float32x4_t __rev1_47;  __rev1_47 = __builtin_shufflevector(__s1_47, __s1_47, 3, 2, 1, 0); \
+  float32x2_t __rev2_47;  __rev2_47 = __builtin_shufflevector(__s2_47, __s2_47, 1, 0); \
+  float32x4_t __ret_47; \
+  __ret_47 = __rev0_47 + __rev1_47 * __noswap_splatq_lane_f32(__rev2_47, __p3_47); \
+  __ret_47 = __builtin_shufflevector(__ret_47, __ret_47, 3, 2, 1, 0); \
+  __ret_47; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlaq_lane_s32(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \
+  int32x4_t __s0_48 = __p0_48; \
+  int32x4_t __s1_48 = __p1_48; \
+  int32x2_t __s2_48 = __p2_48; \
+  int32x4_t __ret_48; \
+  __ret_48 = __s0_48 + __s1_48 * splatq_lane_s32(__s2_48, __p3_48); \
+  __ret_48; \
 })
 #else
-#define vmlaq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlaq_lane_s32(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \
+  int32x4_t __s0_49 = __p0_49; \
+  int32x4_t __s1_49 = __p1_49; \
+  int32x2_t __s2_49 = __p2_49; \
+  int32x4_t __rev0_49;  __rev0_49 = __builtin_shufflevector(__s0_49, __s0_49, 3, 2, 1, 0); \
+  int32x4_t __rev1_49;  __rev1_49 = __builtin_shufflevector(__s1_49, __s1_49, 3, 2, 1, 0); \
+  int32x2_t __rev2_49;  __rev2_49 = __builtin_shufflevector(__s2_49, __s2_49, 1, 0); \
+  int32x4_t __ret_49; \
+  __ret_49 = __rev0_49 + __rev1_49 * __noswap_splatq_lane_s32(__rev2_49, __p3_49); \
+  __ret_49 = __builtin_shufflevector(__ret_49, __ret_49, 3, 2, 1, 0); \
+  __ret_49; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlaq_lane_s16(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \
+  int16x8_t __s0_50 = __p0_50; \
+  int16x8_t __s1_50 = __p1_50; \
+  int16x4_t __s2_50 = __p2_50; \
+  int16x8_t __ret_50; \
+  __ret_50 = __s0_50 + __s1_50 * splatq_lane_s16(__s2_50, __p3_50); \
+  __ret_50; \
 })
 #else
-#define vmlaq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmlaq_lane_s16(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \
+  int16x8_t __s0_51 = __p0_51; \
+  int16x8_t __s1_51 = __p1_51; \
+  int16x4_t __s2_51 = __p2_51; \
+  int16x8_t __rev0_51;  __rev0_51 = __builtin_shufflevector(__s0_51, __s0_51, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_51;  __rev1_51 = __builtin_shufflevector(__s1_51, __s1_51, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_51;  __rev2_51 = __builtin_shufflevector(__s2_51, __s2_51, 3, 2, 1, 0); \
+  int16x8_t __ret_51; \
+  __ret_51 = __rev0_51 + __rev1_51 * __noswap_splatq_lane_s16(__rev2_51, __p3_51); \
+  __ret_51 = __builtin_shufflevector(__ret_51, __ret_51, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_51; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x2_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
+#define vmla_lane_u32(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \
+  uint32x2_t __s0_52 = __p0_52; \
+  uint32x2_t __s1_52 = __p1_52; \
+  uint32x2_t __s2_52 = __p2_52; \
+  uint32x2_t __ret_52; \
+  __ret_52 = __s0_52 + __s1_52 * splat_lane_u32(__s2_52, __p3_52); \
+  __ret_52; \
 })
 #else
-#define vmla_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmla_lane_u32(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \
+  uint32x2_t __s0_53 = __p0_53; \
+  uint32x2_t __s1_53 = __p1_53; \
+  uint32x2_t __s2_53 = __p2_53; \
+  uint32x2_t __rev0_53;  __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 1, 0); \
+  uint32x2_t __rev1_53;  __rev1_53 = __builtin_shufflevector(__s1_53, __s1_53, 1, 0); \
+  uint32x2_t __rev2_53;  __rev2_53 = __builtin_shufflevector(__s2_53, __s2_53, 1, 0); \
+  uint32x2_t __ret_53; \
+  __ret_53 = __rev0_53 + __rev1_53 * __noswap_splat_lane_u32(__rev2_53, __p3_53); \
+  __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 1, 0); \
+  __ret_53; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmla_lane_u16(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \
+  uint16x4_t __s0_54 = __p0_54; \
+  uint16x4_t __s1_54 = __p1_54; \
+  uint16x4_t __s2_54 = __p2_54; \
+  uint16x4_t __ret_54; \
+  __ret_54 = __s0_54 + __s1_54 * splat_lane_u16(__s2_54, __p3_54); \
+  __ret_54; \
 })
 #else
-#define vmla_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmla_lane_u16(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \
+  uint16x4_t __s0_55 = __p0_55; \
+  uint16x4_t __s1_55 = __p1_55; \
+  uint16x4_t __s2_55 = __p2_55; \
+  uint16x4_t __rev0_55;  __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 3, 2, 1, 0); \
+  uint16x4_t __rev1_55;  __rev1_55 = __builtin_shufflevector(__s1_55, __s1_55, 3, 2, 1, 0); \
+  uint16x4_t __rev2_55;  __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 3, 2, 1, 0); \
+  uint16x4_t __ret_55; \
+  __ret_55 = __rev0_55 + __rev1_55 * __noswap_splat_lane_u16(__rev2_55, __p3_55); \
+  __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 3, 2, 1, 0); \
+  __ret_55; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
+#define vmla_lane_f32(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \
+  float32x2_t __s0_56 = __p0_56; \
+  float32x2_t __s1_56 = __p1_56; \
+  float32x2_t __s2_56 = __p2_56; \
+  float32x2_t __ret_56; \
+  __ret_56 = __s0_56 + __s1_56 * splat_lane_f32(__s2_56, __p3_56); \
+  __ret_56; \
 })
 #else
-#define vmla_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmla_lane_f32(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \
+  float32x2_t __s0_57 = __p0_57; \
+  float32x2_t __s1_57 = __p1_57; \
+  float32x2_t __s2_57 = __p2_57; \
+  float32x2_t __rev0_57;  __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 1, 0); \
+  float32x2_t __rev1_57;  __rev1_57 = __builtin_shufflevector(__s1_57, __s1_57, 1, 0); \
+  float32x2_t __rev2_57;  __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 1, 0); \
+  float32x2_t __ret_57; \
+  __ret_57 = __rev0_57 + __rev1_57 * __noswap_splat_lane_f32(__rev2_57, __p3_57); \
+  __ret_57 = __builtin_shufflevector(__ret_57, __ret_57, 1, 0); \
+  __ret_57; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
+#define vmla_lane_s32(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \
+  int32x2_t __s0_58 = __p0_58; \
+  int32x2_t __s1_58 = __p1_58; \
+  int32x2_t __s2_58 = __p2_58; \
+  int32x2_t __ret_58; \
+  __ret_58 = __s0_58 + __s1_58 * splat_lane_s32(__s2_58, __p3_58); \
+  __ret_58; \
 })
 #else
-#define vmla_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmla_lane_s32(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \
+  int32x2_t __s0_59 = __p0_59; \
+  int32x2_t __s1_59 = __p1_59; \
+  int32x2_t __s2_59 = __p2_59; \
+  int32x2_t __rev0_59;  __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 1, 0); \
+  int32x2_t __rev1_59;  __rev1_59 = __builtin_shufflevector(__s1_59, __s1_59, 1, 0); \
+  int32x2_t __rev2_59;  __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 1, 0); \
+  int32x2_t __ret_59; \
+  __ret_59 = __rev0_59 + __rev1_59 * __noswap_splat_lane_s32(__rev2_59, __p3_59); \
+  __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 1, 0); \
+  __ret_59; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmla_lane_s16(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \
+  int16x4_t __s0_60 = __p0_60; \
+  int16x4_t __s1_60 = __p1_60; \
+  int16x4_t __s2_60 = __p2_60; \
+  int16x4_t __ret_60; \
+  __ret_60 = __s0_60 + __s1_60 * splat_lane_s16(__s2_60, __p3_60); \
+  __ret_60; \
 })
 #else
-#define vmla_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmla_lane_s16(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \
+  int16x4_t __s0_61 = __p0_61; \
+  int16x4_t __s1_61 = __p1_61; \
+  int16x4_t __s2_61 = __p2_61; \
+  int16x4_t __rev0_61;  __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 3, 2, 1, 0); \
+  int16x4_t __rev1_61;  __rev1_61 = __builtin_shufflevector(__s1_61, __s1_61, 3, 2, 1, 0); \
+  int16x4_t __rev2_61;  __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 3, 2, 1, 0); \
+  int16x4_t __ret_61; \
+  __ret_61 = __rev0_61 + __rev1_61 * __noswap_splat_lane_s16(__rev2_61, __p3_61); \
+  __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 3, 2, 1, 0); \
+  __ret_61; \
 })
 #endif
 
@@ -13849,242 +15144,242 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlsq_lane_u32(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \
+  uint32x4_t __s0_62 = __p0_62; \
+  uint32x4_t __s1_62 = __p1_62; \
+  uint32x2_t __s2_62 = __p2_62; \
+  uint32x4_t __ret_62; \
+  __ret_62 = __s0_62 - __s1_62 * splatq_lane_u32(__s2_62, __p3_62); \
+  __ret_62; \
 })
 #else
-#define vmlsq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsq_lane_u32(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \
+  uint32x4_t __s0_63 = __p0_63; \
+  uint32x4_t __s1_63 = __p1_63; \
+  uint32x2_t __s2_63 = __p2_63; \
+  uint32x4_t __rev0_63;  __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 3, 2, 1, 0); \
+  uint32x4_t __rev1_63;  __rev1_63 = __builtin_shufflevector(__s1_63, __s1_63, 3, 2, 1, 0); \
+  uint32x2_t __rev2_63;  __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 1, 0); \
+  uint32x4_t __ret_63; \
+  __ret_63 = __rev0_63 - __rev1_63 * __noswap_splatq_lane_u32(__rev2_63, __p3_63); \
+  __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 3, 2, 1, 0); \
+  __ret_63; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x8_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlsq_lane_u16(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \
+  uint16x8_t __s0_64 = __p0_64; \
+  uint16x8_t __s1_64 = __p1_64; \
+  uint16x4_t __s2_64 = __p2_64; \
+  uint16x8_t __ret_64; \
+  __ret_64 = __s0_64 - __s1_64 * splatq_lane_u16(__s2_64, __p3_64); \
+  __ret_64; \
 })
 #else
-#define vmlsq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsq_lane_u16(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \
+  uint16x8_t __s0_65 = __p0_65; \
+  uint16x8_t __s1_65 = __p1_65; \
+  uint16x4_t __s2_65 = __p2_65; \
+  uint16x8_t __rev0_65;  __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_65;  __rev1_65 = __builtin_shufflevector(__s1_65, __s1_65, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_65;  __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 3, 2, 1, 0); \
+  uint16x8_t __ret_65; \
+  __ret_65 = __rev0_65 - __rev1_65 * __noswap_splatq_lane_u16(__rev2_65, __p3_65); \
+  __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_65; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlsq_lane_f32(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \
+  float32x4_t __s0_66 = __p0_66; \
+  float32x4_t __s1_66 = __p1_66; \
+  float32x2_t __s2_66 = __p2_66; \
+  float32x4_t __ret_66; \
+  __ret_66 = __s0_66 - __s1_66 * splatq_lane_f32(__s2_66, __p3_66); \
+  __ret_66; \
 })
 #else
-#define vmlsq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsq_lane_f32(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \
+  float32x4_t __s0_67 = __p0_67; \
+  float32x4_t __s1_67 = __p1_67; \
+  float32x2_t __s2_67 = __p2_67; \
+  float32x4_t __rev0_67;  __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 3, 2, 1, 0); \
+  float32x4_t __rev1_67;  __rev1_67 = __builtin_shufflevector(__s1_67, __s1_67, 3, 2, 1, 0); \
+  float32x2_t __rev2_67;  __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 1, 0); \
+  float32x4_t __ret_67; \
+  __ret_67 = __rev0_67 - __rev1_67 * __noswap_splatq_lane_f32(__rev2_67, __p3_67); \
+  __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 3, 2, 1, 0); \
+  __ret_67; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlsq_lane_s32(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \
+  int32x4_t __s0_68 = __p0_68; \
+  int32x4_t __s1_68 = __p1_68; \
+  int32x2_t __s2_68 = __p2_68; \
+  int32x4_t __ret_68; \
+  __ret_68 = __s0_68 - __s1_68 * splatq_lane_s32(__s2_68, __p3_68); \
+  __ret_68; \
 })
 #else
-#define vmlsq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsq_lane_s32(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \
+  int32x4_t __s0_69 = __p0_69; \
+  int32x4_t __s1_69 = __p1_69; \
+  int32x2_t __s2_69 = __p2_69; \
+  int32x4_t __rev0_69;  __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 3, 2, 1, 0); \
+  int32x4_t __rev1_69;  __rev1_69 = __builtin_shufflevector(__s1_69, __s1_69, 3, 2, 1, 0); \
+  int32x2_t __rev2_69;  __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 1, 0); \
+  int32x4_t __ret_69; \
+  __ret_69 = __rev0_69 - __rev1_69 * __noswap_splatq_lane_s32(__rev2_69, __p3_69); \
+  __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 3, 2, 1, 0); \
+  __ret_69; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlsq_lane_s16(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \
+  int16x8_t __s0_70 = __p0_70; \
+  int16x8_t __s1_70 = __p1_70; \
+  int16x4_t __s2_70 = __p2_70; \
+  int16x8_t __ret_70; \
+  __ret_70 = __s0_70 - __s1_70 * splatq_lane_s16(__s2_70, __p3_70); \
+  __ret_70; \
 })
 #else
-#define vmlsq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsq_lane_s16(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \
+  int16x8_t __s0_71 = __p0_71; \
+  int16x8_t __s1_71 = __p1_71; \
+  int16x4_t __s2_71 = __p2_71; \
+  int16x8_t __rev0_71;  __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_71;  __rev1_71 = __builtin_shufflevector(__s1_71, __s1_71, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_71;  __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 3, 2, 1, 0); \
+  int16x8_t __ret_71; \
+  __ret_71 = __rev0_71 - __rev1_71 * __noswap_splatq_lane_s16(__rev2_71, __p3_71); \
+  __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_71; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x2_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
+#define vmls_lane_u32(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \
+  uint32x2_t __s0_72 = __p0_72; \
+  uint32x2_t __s1_72 = __p1_72; \
+  uint32x2_t __s2_72 = __p2_72; \
+  uint32x2_t __ret_72; \
+  __ret_72 = __s0_72 - __s1_72 * splat_lane_u32(__s2_72, __p3_72); \
+  __ret_72; \
 })
 #else
-#define vmls_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmls_lane_u32(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \
+  uint32x2_t __s0_73 = __p0_73; \
+  uint32x2_t __s1_73 = __p1_73; \
+  uint32x2_t __s2_73 = __p2_73; \
+  uint32x2_t __rev0_73;  __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 1, 0); \
+  uint32x2_t __rev1_73;  __rev1_73 = __builtin_shufflevector(__s1_73, __s1_73, 1, 0); \
+  uint32x2_t __rev2_73;  __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 1, 0); \
+  uint32x2_t __ret_73; \
+  __ret_73 = __rev0_73 - __rev1_73 * __noswap_splat_lane_u32(__rev2_73, __p3_73); \
+  __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 1, 0); \
+  __ret_73; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmls_lane_u16(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \
+  uint16x4_t __s0_74 = __p0_74; \
+  uint16x4_t __s1_74 = __p1_74; \
+  uint16x4_t __s2_74 = __p2_74; \
+  uint16x4_t __ret_74; \
+  __ret_74 = __s0_74 - __s1_74 * splat_lane_u16(__s2_74, __p3_74); \
+  __ret_74; \
 })
 #else
-#define vmls_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmls_lane_u16(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \
+  uint16x4_t __s0_75 = __p0_75; \
+  uint16x4_t __s1_75 = __p1_75; \
+  uint16x4_t __s2_75 = __p2_75; \
+  uint16x4_t __rev0_75;  __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 3, 2, 1, 0); \
+  uint16x4_t __rev1_75;  __rev1_75 = __builtin_shufflevector(__s1_75, __s1_75, 3, 2, 1, 0); \
+  uint16x4_t __rev2_75;  __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 3, 2, 1, 0); \
+  uint16x4_t __ret_75; \
+  __ret_75 = __rev0_75 - __rev1_75 * __noswap_splat_lane_u16(__rev2_75, __p3_75); \
+  __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 3, 2, 1, 0); \
+  __ret_75; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
+#define vmls_lane_f32(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \
+  float32x2_t __s0_76 = __p0_76; \
+  float32x2_t __s1_76 = __p1_76; \
+  float32x2_t __s2_76 = __p2_76; \
+  float32x2_t __ret_76; \
+  __ret_76 = __s0_76 - __s1_76 * splat_lane_f32(__s2_76, __p3_76); \
+  __ret_76; \
 })
 #else
-#define vmls_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmls_lane_f32(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \
+  float32x2_t __s0_77 = __p0_77; \
+  float32x2_t __s1_77 = __p1_77; \
+  float32x2_t __s2_77 = __p2_77; \
+  float32x2_t __rev0_77;  __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 1, 0); \
+  float32x2_t __rev1_77;  __rev1_77 = __builtin_shufflevector(__s1_77, __s1_77, 1, 0); \
+  float32x2_t __rev2_77;  __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 1, 0); \
+  float32x2_t __ret_77; \
+  __ret_77 = __rev0_77 - __rev1_77 * __noswap_splat_lane_f32(__rev2_77, __p3_77); \
+  __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 1, 0); \
+  __ret_77; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
+#define vmls_lane_s32(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \
+  int32x2_t __s0_78 = __p0_78; \
+  int32x2_t __s1_78 = __p1_78; \
+  int32x2_t __s2_78 = __p2_78; \
+  int32x2_t __ret_78; \
+  __ret_78 = __s0_78 - __s1_78 * splat_lane_s32(__s2_78, __p3_78); \
+  __ret_78; \
 })
 #else
-#define vmls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmls_lane_s32(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \
+  int32x2_t __s0_79 = __p0_79; \
+  int32x2_t __s1_79 = __p1_79; \
+  int32x2_t __s2_79 = __p2_79; \
+  int32x2_t __rev0_79;  __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 1, 0); \
+  int32x2_t __rev1_79;  __rev1_79 = __builtin_shufflevector(__s1_79, __s1_79, 1, 0); \
+  int32x2_t __rev2_79;  __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 1, 0); \
+  int32x2_t __ret_79; \
+  __ret_79 = __rev0_79 - __rev1_79 * __noswap_splat_lane_s32(__rev2_79, __p3_79); \
+  __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 1, 0); \
+  __ret_79; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmls_lane_s16(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \
+  int16x4_t __s0_80 = __p0_80; \
+  int16x4_t __s1_80 = __p1_80; \
+  int16x4_t __s2_80 = __p2_80; \
+  int16x4_t __ret_80; \
+  __ret_80 = __s0_80 - __s1_80 * splat_lane_s16(__s2_80, __p3_80); \
+  __ret_80; \
 })
 #else
-#define vmls_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmls_lane_s16(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \
+  int16x4_t __s0_81 = __p0_81; \
+  int16x4_t __s1_81 = __p1_81; \
+  int16x4_t __s2_81 = __p2_81; \
+  int16x4_t __rev0_81;  __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 3, 2, 1, 0); \
+  int16x4_t __rev1_81;  __rev1_81 = __builtin_shufflevector(__s1_81, __s1_81, 3, 2, 1, 0); \
+  int16x4_t __rev2_81;  __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 3, 2, 1, 0); \
+  int16x4_t __ret_81; \
+  __ret_81 = __rev0_81 - __rev1_81 * __noswap_splat_lane_s16(__rev2_81, __p3_81); \
+  __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 3, 2, 1, 0); \
+  __ret_81; \
 })
 #endif
 
@@ -15127,212 +16422,212 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmulq_lane_u32(__p0_82, __p1_82, __p2_82) __extension__ ({ \
+  uint32x4_t __s0_82 = __p0_82; \
+  uint32x2_t __s1_82 = __p1_82; \
+  uint32x4_t __ret_82; \
+  __ret_82 = __s0_82 * splatq_lane_u32(__s1_82, __p2_82); \
+  __ret_82; \
 })
 #else
-#define vmulq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmulq_lane_u32(__p0_83, __p1_83, __p2_83) __extension__ ({ \
+  uint32x4_t __s0_83 = __p0_83; \
+  uint32x2_t __s1_83 = __p1_83; \
+  uint32x4_t __rev0_83;  __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 3, 2, 1, 0); \
+  uint32x2_t __rev1_83;  __rev1_83 = __builtin_shufflevector(__s1_83, __s1_83, 1, 0); \
+  uint32x4_t __ret_83; \
+  __ret_83 = __rev0_83 * __noswap_splatq_lane_u32(__rev1_83, __p2_83); \
+  __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 3, 2, 1, 0); \
+  __ret_83; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmulq_lane_u16(__p0_84, __p1_84, __p2_84) __extension__ ({ \
+  uint16x8_t __s0_84 = __p0_84; \
+  uint16x4_t __s1_84 = __p1_84; \
+  uint16x8_t __ret_84; \
+  __ret_84 = __s0_84 * splatq_lane_u16(__s1_84, __p2_84); \
+  __ret_84; \
 })
 #else
-#define vmulq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmulq_lane_u16(__p0_85, __p1_85, __p2_85) __extension__ ({ \
+  uint16x8_t __s0_85 = __p0_85; \
+  uint16x4_t __s1_85 = __p1_85; \
+  uint16x8_t __rev0_85;  __rev0_85 = __builtin_shufflevector(__s0_85, __s0_85, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev1_85;  __rev1_85 = __builtin_shufflevector(__s1_85, __s1_85, 3, 2, 1, 0); \
+  uint16x8_t __ret_85; \
+  __ret_85 = __rev0_85 * __noswap_splatq_lane_u16(__rev1_85, __p2_85); \
+  __ret_85 = __builtin_shufflevector(__ret_85, __ret_85, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_85; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmulq_lane_f32(__p0_86, __p1_86, __p2_86) __extension__ ({ \
+  float32x4_t __s0_86 = __p0_86; \
+  float32x2_t __s1_86 = __p1_86; \
+  float32x4_t __ret_86; \
+  __ret_86 = __s0_86 * splatq_lane_f32(__s1_86, __p2_86); \
+  __ret_86; \
 })
 #else
-#define vmulq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmulq_lane_f32(__p0_87, __p1_87, __p2_87) __extension__ ({ \
+  float32x4_t __s0_87 = __p0_87; \
+  float32x2_t __s1_87 = __p1_87; \
+  float32x4_t __rev0_87;  __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \
+  float32x2_t __rev1_87;  __rev1_87 = __builtin_shufflevector(__s1_87, __s1_87, 1, 0); \
+  float32x4_t __ret_87; \
+  __ret_87 = __rev0_87 * __noswap_splatq_lane_f32(__rev1_87, __p2_87); \
+  __ret_87 = __builtin_shufflevector(__ret_87, __ret_87, 3, 2, 1, 0); \
+  __ret_87; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmulq_lane_s32(__p0_88, __p1_88, __p2_88) __extension__ ({ \
+  int32x4_t __s0_88 = __p0_88; \
+  int32x2_t __s1_88 = __p1_88; \
+  int32x4_t __ret_88; \
+  __ret_88 = __s0_88 * splatq_lane_s32(__s1_88, __p2_88); \
+  __ret_88; \
 })
 #else
-#define vmulq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmulq_lane_s32(__p0_89, __p1_89, __p2_89) __extension__ ({ \
+  int32x4_t __s0_89 = __p0_89; \
+  int32x2_t __s1_89 = __p1_89; \
+  int32x4_t __rev0_89;  __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 3, 2, 1, 0); \
+  int32x2_t __rev1_89;  __rev1_89 = __builtin_shufflevector(__s1_89, __s1_89, 1, 0); \
+  int32x4_t __ret_89; \
+  __ret_89 = __rev0_89 * __noswap_splatq_lane_s32(__rev1_89, __p2_89); \
+  __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 3, 2, 1, 0); \
+  __ret_89; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmulq_lane_s16(__p0_90, __p1_90, __p2_90) __extension__ ({ \
+  int16x8_t __s0_90 = __p0_90; \
+  int16x4_t __s1_90 = __p1_90; \
+  int16x8_t __ret_90; \
+  __ret_90 = __s0_90 * splatq_lane_s16(__s1_90, __p2_90); \
+  __ret_90; \
 })
 #else
-#define vmulq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmulq_lane_s16(__p0_91, __p1_91, __p2_91) __extension__ ({ \
+  int16x8_t __s0_91 = __p0_91; \
+  int16x4_t __s1_91 = __p1_91; \
+  int16x8_t __rev0_91;  __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev1_91;  __rev1_91 = __builtin_shufflevector(__s1_91, __s1_91, 3, 2, 1, 0); \
+  int16x8_t __ret_91; \
+  __ret_91 = __rev0_91 * __noswap_splatq_lane_s16(__rev1_91, __p2_91); \
+  __ret_91 = __builtin_shufflevector(__ret_91, __ret_91, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_91; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
+#define vmul_lane_u32(__p0_92, __p1_92, __p2_92) __extension__ ({ \
+  uint32x2_t __s0_92 = __p0_92; \
+  uint32x2_t __s1_92 = __p1_92; \
+  uint32x2_t __ret_92; \
+  __ret_92 = __s0_92 * splat_lane_u32(__s1_92, __p2_92); \
+  __ret_92; \
 })
 #else
-#define vmul_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmul_lane_u32(__p0_93, __p1_93, __p2_93) __extension__ ({ \
+  uint32x2_t __s0_93 = __p0_93; \
+  uint32x2_t __s1_93 = __p1_93; \
+  uint32x2_t __rev0_93;  __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 1, 0); \
+  uint32x2_t __rev1_93;  __rev1_93 = __builtin_shufflevector(__s1_93, __s1_93, 1, 0); \
+  uint32x2_t __ret_93; \
+  __ret_93 = __rev0_93 * __noswap_splat_lane_u32(__rev1_93, __p2_93); \
+  __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 1, 0); \
+  __ret_93; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmul_lane_u16(__p0_94, __p1_94, __p2_94) __extension__ ({ \
+  uint16x4_t __s0_94 = __p0_94; \
+  uint16x4_t __s1_94 = __p1_94; \
+  uint16x4_t __ret_94; \
+  __ret_94 = __s0_94 * splat_lane_u16(__s1_94, __p2_94); \
+  __ret_94; \
 })
 #else
-#define vmul_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmul_lane_u16(__p0_95, __p1_95, __p2_95) __extension__ ({ \
+  uint16x4_t __s0_95 = __p0_95; \
+  uint16x4_t __s1_95 = __p1_95; \
+  uint16x4_t __rev0_95;  __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 3, 2, 1, 0); \
+  uint16x4_t __rev1_95;  __rev1_95 = __builtin_shufflevector(__s1_95, __s1_95, 3, 2, 1, 0); \
+  uint16x4_t __ret_95; \
+  __ret_95 = __rev0_95 * __noswap_splat_lane_u16(__rev1_95, __p2_95); \
+  __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 3, 2, 1, 0); \
+  __ret_95; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
+#define vmul_lane_f32(__p0_96, __p1_96, __p2_96) __extension__ ({ \
+  float32x2_t __s0_96 = __p0_96; \
+  float32x2_t __s1_96 = __p1_96; \
+  float32x2_t __ret_96; \
+  __ret_96 = __s0_96 * splat_lane_f32(__s1_96, __p2_96); \
+  __ret_96; \
 })
 #else
-#define vmul_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmul_lane_f32(__p0_97, __p1_97, __p2_97) __extension__ ({ \
+  float32x2_t __s0_97 = __p0_97; \
+  float32x2_t __s1_97 = __p1_97; \
+  float32x2_t __rev0_97;  __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, 1, 0); \
+  float32x2_t __rev1_97;  __rev1_97 = __builtin_shufflevector(__s1_97, __s1_97, 1, 0); \
+  float32x2_t __ret_97; \
+  __ret_97 = __rev0_97 * __noswap_splat_lane_f32(__rev1_97, __p2_97); \
+  __ret_97 = __builtin_shufflevector(__ret_97, __ret_97, 1, 0); \
+  __ret_97; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
+#define vmul_lane_s32(__p0_98, __p1_98, __p2_98) __extension__ ({ \
+  int32x2_t __s0_98 = __p0_98; \
+  int32x2_t __s1_98 = __p1_98; \
+  int32x2_t __ret_98; \
+  __ret_98 = __s0_98 * splat_lane_s32(__s1_98, __p2_98); \
+  __ret_98; \
 })
 #else
-#define vmul_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmul_lane_s32(__p0_99, __p1_99, __p2_99) __extension__ ({ \
+  int32x2_t __s0_99 = __p0_99; \
+  int32x2_t __s1_99 = __p1_99; \
+  int32x2_t __rev0_99;  __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 1, 0); \
+  int32x2_t __rev1_99;  __rev1_99 = __builtin_shufflevector(__s1_99, __s1_99, 1, 0); \
+  int32x2_t __ret_99; \
+  __ret_99 = __rev0_99 * __noswap_splat_lane_s32(__rev1_99, __p2_99); \
+  __ret_99 = __builtin_shufflevector(__ret_99, __ret_99, 1, 0); \
+  __ret_99; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmul_lane_s16(__p0_100, __p1_100, __p2_100) __extension__ ({ \
+  int16x4_t __s0_100 = __p0_100; \
+  int16x4_t __s1_100 = __p1_100; \
+  int16x4_t __ret_100; \
+  __ret_100 = __s0_100 * splat_lane_s16(__s1_100, __p2_100); \
+  __ret_100; \
 })
 #else
-#define vmul_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmul_lane_s16(__p0_101, __p1_101, __p2_101) __extension__ ({ \
+  int16x4_t __s0_101 = __p0_101; \
+  int16x4_t __s1_101 = __p1_101; \
+  int16x4_t __rev0_101;  __rev0_101 = __builtin_shufflevector(__s0_101, __s0_101, 3, 2, 1, 0); \
+  int16x4_t __rev1_101;  __rev1_101 = __builtin_shufflevector(__s1_101, __s1_101, 3, 2, 1, 0); \
+  int16x4_t __ret_101; \
+  __ret_101 = __rev0_101 * __noswap_splat_lane_s16(__rev1_101, __p2_101); \
+  __ret_101 = __builtin_shufflevector(__ret_101, __ret_101, 3, 2, 1, 0); \
+  __ret_101; \
 })
 #endif
 
@@ -15651,86 +16946,86 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = vmull_u32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vmull_lane_u32(__p0_102, __p1_102, __p2_102) __extension__ ({ \
+  uint32x2_t __s0_102 = __p0_102; \
+  uint32x2_t __s1_102 = __p1_102; \
+  uint64x2_t __ret_102; \
+  __ret_102 = vmull_u32(__s0_102, splat_lane_u32(__s1_102, __p2_102)); \
+  __ret_102; \
 })
 #else
-#define vmull_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __noswap_vmull_u32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmull_lane_u32(__p0_103, __p1_103, __p2_103) __extension__ ({ \
+  uint32x2_t __s0_103 = __p0_103; \
+  uint32x2_t __s1_103 = __p1_103; \
+  uint32x2_t __rev0_103;  __rev0_103 = __builtin_shufflevector(__s0_103, __s0_103, 1, 0); \
+  uint32x2_t __rev1_103;  __rev1_103 = __builtin_shufflevector(__s1_103, __s1_103, 1, 0); \
+  uint64x2_t __ret_103; \
+  __ret_103 = __noswap_vmull_u32(__rev0_103, __noswap_splat_lane_u32(__rev1_103, __p2_103)); \
+  __ret_103 = __builtin_shufflevector(__ret_103, __ret_103, 1, 0); \
+  __ret_103; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = vmull_u16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmull_lane_u16(__p0_104, __p1_104, __p2_104) __extension__ ({ \
+  uint16x4_t __s0_104 = __p0_104; \
+  uint16x4_t __s1_104 = __p1_104; \
+  uint32x4_t __ret_104; \
+  __ret_104 = vmull_u16(__s0_104, splat_lane_u16(__s1_104, __p2_104)); \
+  __ret_104; \
 })
 #else
-#define vmull_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __noswap_vmull_u16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmull_lane_u16(__p0_105, __p1_105, __p2_105) __extension__ ({ \
+  uint16x4_t __s0_105 = __p0_105; \
+  uint16x4_t __s1_105 = __p1_105; \
+  uint16x4_t __rev0_105;  __rev0_105 = __builtin_shufflevector(__s0_105, __s0_105, 3, 2, 1, 0); \
+  uint16x4_t __rev1_105;  __rev1_105 = __builtin_shufflevector(__s1_105, __s1_105, 3, 2, 1, 0); \
+  uint32x4_t __ret_105; \
+  __ret_105 = __noswap_vmull_u16(__rev0_105, __noswap_splat_lane_u16(__rev1_105, __p2_105)); \
+  __ret_105 = __builtin_shufflevector(__ret_105, __ret_105, 3, 2, 1, 0); \
+  __ret_105; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vmull_lane_s32(__p0_106, __p1_106, __p2_106) __extension__ ({ \
+  int32x2_t __s0_106 = __p0_106; \
+  int32x2_t __s1_106 = __p1_106; \
+  int64x2_t __ret_106; \
+  __ret_106 = vmull_s32(__s0_106, splat_lane_s32(__s1_106, __p2_106)); \
+  __ret_106; \
 })
 #else
-#define vmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmull_lane_s32(__p0_107, __p1_107, __p2_107) __extension__ ({ \
+  int32x2_t __s0_107 = __p0_107; \
+  int32x2_t __s1_107 = __p1_107; \
+  int32x2_t __rev0_107;  __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 1, 0); \
+  int32x2_t __rev1_107;  __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 1, 0); \
+  int64x2_t __ret_107; \
+  __ret_107 = __noswap_vmull_s32(__rev0_107, __noswap_splat_lane_s32(__rev1_107, __p2_107)); \
+  __ret_107 = __builtin_shufflevector(__ret_107, __ret_107, 1, 0); \
+  __ret_107; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmull_lane_s16(__p0_108, __p1_108, __p2_108) __extension__ ({ \
+  int16x4_t __s0_108 = __p0_108; \
+  int16x4_t __s1_108 = __p1_108; \
+  int32x4_t __ret_108; \
+  __ret_108 = vmull_s16(__s0_108, splat_lane_s16(__s1_108, __p2_108)); \
+  __ret_108; \
 })
 #else
-#define vmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmull_lane_s16(__p0_109, __p1_109, __p2_109) __extension__ ({ \
+  int16x4_t __s0_109 = __p0_109; \
+  int16x4_t __s1_109 = __p1_109; \
+  int16x4_t __rev0_109;  __rev0_109 = __builtin_shufflevector(__s0_109, __s0_109, 3, 2, 1, 0); \
+  int16x4_t __rev1_109;  __rev1_109 = __builtin_shufflevector(__s1_109, __s1_109, 3, 2, 1, 0); \
+  int32x4_t __ret_109; \
+  __ret_109 = __noswap_vmull_s16(__rev0_109, __noswap_splat_lane_s16(__rev1_109, __p2_109)); \
+  __ret_109 = __builtin_shufflevector(__ret_109, __ret_109, 3, 2, 1, 0); \
+  __ret_109; \
 })
 #endif
 
@@ -17824,50 +19119,50 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlal_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vqdmlal_lane_s32(__p0_110, __p1_110, __p2_110, __p3_110) __extension__ ({ \
+  int64x2_t __s0_110 = __p0_110; \
+  int32x2_t __s1_110 = __p1_110; \
+  int32x2_t __s2_110 = __p2_110; \
+  int64x2_t __ret_110; \
+  __ret_110 = vqdmlal_s32(__s0_110, __s1_110, splat_lane_s32(__s2_110, __p3_110)); \
+  __ret_110; \
 })
 #else
-#define vqdmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlal_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmlal_lane_s32(__p0_111, __p1_111, __p2_111, __p3_111) __extension__ ({ \
+  int64x2_t __s0_111 = __p0_111; \
+  int32x2_t __s1_111 = __p1_111; \
+  int32x2_t __s2_111 = __p2_111; \
+  int64x2_t __rev0_111;  __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 1, 0); \
+  int32x2_t __rev1_111;  __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 1, 0); \
+  int32x2_t __rev2_111;  __rev2_111 = __builtin_shufflevector(__s2_111, __s2_111, 1, 0); \
+  int64x2_t __ret_111; \
+  __ret_111 = __noswap_vqdmlal_s32(__rev0_111, __rev1_111, __noswap_splat_lane_s32(__rev2_111, __p3_111)); \
+  __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 1, 0); \
+  __ret_111; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlal_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vqdmlal_lane_s16(__p0_112, __p1_112, __p2_112, __p3_112) __extension__ ({ \
+  int32x4_t __s0_112 = __p0_112; \
+  int16x4_t __s1_112 = __p1_112; \
+  int16x4_t __s2_112 = __p2_112; \
+  int32x4_t __ret_112; \
+  __ret_112 = vqdmlal_s16(__s0_112, __s1_112, splat_lane_s16(__s2_112, __p3_112)); \
+  __ret_112; \
 })
 #else
-#define vqdmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlal_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmlal_lane_s16(__p0_113, __p1_113, __p2_113, __p3_113) __extension__ ({ \
+  int32x4_t __s0_113 = __p0_113; \
+  int16x4_t __s1_113 = __p1_113; \
+  int16x4_t __s2_113 = __p2_113; \
+  int32x4_t __rev0_113;  __rev0_113 = __builtin_shufflevector(__s0_113, __s0_113, 3, 2, 1, 0); \
+  int16x4_t __rev1_113;  __rev1_113 = __builtin_shufflevector(__s1_113, __s1_113, 3, 2, 1, 0); \
+  int16x4_t __rev2_113;  __rev2_113 = __builtin_shufflevector(__s2_113, __s2_113, 3, 2, 1, 0); \
+  int32x4_t __ret_113; \
+  __ret_113 = __noswap_vqdmlal_s16(__rev0_113, __rev1_113, __noswap_splat_lane_s16(__rev2_113, __p3_113)); \
+  __ret_113 = __builtin_shufflevector(__ret_113, __ret_113, 3, 2, 1, 0); \
+  __ret_113; \
 })
 #endif
 
@@ -17962,50 +19257,50 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlsl_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vqdmlsl_lane_s32(__p0_114, __p1_114, __p2_114, __p3_114) __extension__ ({ \
+  int64x2_t __s0_114 = __p0_114; \
+  int32x2_t __s1_114 = __p1_114; \
+  int32x2_t __s2_114 = __p2_114; \
+  int64x2_t __ret_114; \
+  __ret_114 = vqdmlsl_s32(__s0_114, __s1_114, splat_lane_s32(__s2_114, __p3_114)); \
+  __ret_114; \
 })
 #else
-#define vqdmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmlsl_lane_s32(__p0_115, __p1_115, __p2_115, __p3_115) __extension__ ({ \
+  int64x2_t __s0_115 = __p0_115; \
+  int32x2_t __s1_115 = __p1_115; \
+  int32x2_t __s2_115 = __p2_115; \
+  int64x2_t __rev0_115;  __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 1, 0); \
+  int32x2_t __rev1_115;  __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, 1, 0); \
+  int32x2_t __rev2_115;  __rev2_115 = __builtin_shufflevector(__s2_115, __s2_115, 1, 0); \
+  int64x2_t __ret_115; \
+  __ret_115 = __noswap_vqdmlsl_s32(__rev0_115, __rev1_115, __noswap_splat_lane_s32(__rev2_115, __p3_115)); \
+  __ret_115 = __builtin_shufflevector(__ret_115, __ret_115, 1, 0); \
+  __ret_115; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlsl_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vqdmlsl_lane_s16(__p0_116, __p1_116, __p2_116, __p3_116) __extension__ ({ \
+  int32x4_t __s0_116 = __p0_116; \
+  int16x4_t __s1_116 = __p1_116; \
+  int16x4_t __s2_116 = __p2_116; \
+  int32x4_t __ret_116; \
+  __ret_116 = vqdmlsl_s16(__s0_116, __s1_116, splat_lane_s16(__s2_116, __p3_116)); \
+  __ret_116; \
 })
 #else
-#define vqdmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmlsl_lane_s16(__p0_117, __p1_117, __p2_117, __p3_117) __extension__ ({ \
+  int32x4_t __s0_117 = __p0_117; \
+  int16x4_t __s1_117 = __p1_117; \
+  int16x4_t __s2_117 = __p2_117; \
+  int32x4_t __rev0_117;  __rev0_117 = __builtin_shufflevector(__s0_117, __s0_117, 3, 2, 1, 0); \
+  int16x4_t __rev1_117;  __rev1_117 = __builtin_shufflevector(__s1_117, __s1_117, 3, 2, 1, 0); \
+  int16x4_t __rev2_117;  __rev2_117 = __builtin_shufflevector(__s2_117, __s2_117, 3, 2, 1, 0); \
+  int32x4_t __ret_117; \
+  __ret_117 = __noswap_vqdmlsl_s16(__rev0_117, __rev1_117, __noswap_splat_lane_s16(__rev2_117, __p3_117)); \
+  __ret_117 = __builtin_shufflevector(__ret_117, __ret_117, 3, 2, 1, 0); \
+  __ret_117; \
 })
 #endif
 
@@ -18250,44 +19545,44 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vqdmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vqdmull_lane_s32(__p0_118, __p1_118, __p2_118) __extension__ ({ \
+  int32x2_t __s0_118 = __p0_118; \
+  int32x2_t __s1_118 = __p1_118; \
+  int64x2_t __ret_118; \
+  __ret_118 = vqdmull_s32(__s0_118, splat_lane_s32(__s1_118, __p2_118)); \
+  __ret_118; \
 })
 #else
-#define vqdmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmull_lane_s32(__p0_119, __p1_119, __p2_119) __extension__ ({ \
+  int32x2_t __s0_119 = __p0_119; \
+  int32x2_t __s1_119 = __p1_119; \
+  int32x2_t __rev0_119;  __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, 1, 0); \
+  int32x2_t __rev1_119;  __rev1_119 = __builtin_shufflevector(__s1_119, __s1_119, 1, 0); \
+  int64x2_t __ret_119; \
+  __ret_119 = __noswap_vqdmull_s32(__rev0_119, __noswap_splat_lane_s32(__rev1_119, __p2_119)); \
+  __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 1, 0); \
+  __ret_119; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqdmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vqdmull_lane_s16(__p0_120, __p1_120, __p2_120) __extension__ ({ \
+  int16x4_t __s0_120 = __p0_120; \
+  int16x4_t __s1_120 = __p1_120; \
+  int32x4_t __ret_120; \
+  __ret_120 = vqdmull_s16(__s0_120, splat_lane_s16(__s1_120, __p2_120)); \
+  __ret_120; \
 })
 #else
-#define vqdmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmull_lane_s16(__p0_121, __p1_121, __p2_121) __extension__ ({ \
+  int16x4_t __s0_121 = __p0_121; \
+  int16x4_t __s1_121 = __p1_121; \
+  int16x4_t __rev0_121;  __rev0_121 = __builtin_shufflevector(__s0_121, __s0_121, 3, 2, 1, 0); \
+  int16x4_t __rev1_121;  __rev1_121 = __builtin_shufflevector(__s1_121, __s1_121, 3, 2, 1, 0); \
+  int32x4_t __ret_121; \
+  __ret_121 = __noswap_vqdmull_s16(__rev0_121, __noswap_splat_lane_s16(__rev1_121, __p2_121)); \
+  __ret_121 = __builtin_shufflevector(__ret_121, __ret_121, 3, 2, 1, 0); \
+  __ret_121; \
 })
 #endif
 
@@ -30796,38 +32091,38 @@
 
 #if !defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_f16(__p0_122, __p1_122) __extension__ ({ \
+  float16x4_t __s0_122 = __p0_122; \
+  float16x8_t __ret_122; \
+  __ret_122 = splatq_lane_f16(__s0_122, __p1_122); \
+  __ret_122; \
 })
 #else
-#define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_lane_f16(__p0_123, __p1_123) __extension__ ({ \
+  float16x4_t __s0_123 = __p0_123; \
+  float16x4_t __rev0_123;  __rev0_123 = __builtin_shufflevector(__s0_123, __s0_123, 3, 2, 1, 0); \
+  float16x8_t __ret_123; \
+  __ret_123 = __noswap_splatq_lane_f16(__rev0_123, __p1_123); \
+  __ret_123 = __builtin_shufflevector(__ret_123, __ret_123, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_123; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_lane_f16(__p0_124, __p1_124) __extension__ ({ \
+  float16x4_t __s0_124 = __p0_124; \
+  float16x4_t __ret_124; \
+  __ret_124 = splat_lane_f16(__s0_124, __p1_124); \
+  __ret_124; \
 })
 #else
-#define vdup_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_lane_f16(__p0_125, __p1_125) __extension__ ({ \
+  float16x4_t __s0_125 = __p0_125; \
+  float16x4_t __rev0_125;  __rev0_125 = __builtin_shufflevector(__s0_125, __s0_125, 3, 2, 1, 0); \
+  float16x4_t __ret_125; \
+  __ret_125 = __noswap_splat_lane_f16(__rev0_125, __p1_125); \
+  __ret_125 = __builtin_shufflevector(__ret_125, __ret_125, 3, 2, 1, 0); \
+  __ret_125; \
 })
 #endif
 
@@ -30900,170 +32195,170 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vqdmulhq_lane_s32(__p0_126, __p1_126, __p2_126) __extension__ ({ \
+  int32x4_t __s0_126 = __p0_126; \
+  int32x2_t __s1_126 = __p1_126; \
+  int32x4_t __ret_126; \
+  __ret_126 = vqdmulhq_s32(__s0_126, splatq_lane_s32(__s1_126, __p2_126)); \
+  __ret_126; \
 })
 #else
-#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmulhq_lane_s32(__p0_127, __p1_127, __p2_127) __extension__ ({ \
+  int32x4_t __s0_127 = __p0_127; \
+  int32x2_t __s1_127 = __p1_127; \
+  int32x4_t __rev0_127;  __rev0_127 = __builtin_shufflevector(__s0_127, __s0_127, 3, 2, 1, 0); \
+  int32x2_t __rev1_127;  __rev1_127 = __builtin_shufflevector(__s1_127, __s1_127, 1, 0); \
+  int32x4_t __ret_127; \
+  __ret_127 = __noswap_vqdmulhq_s32(__rev0_127, __noswap_splatq_lane_s32(__rev1_127, __p2_127)); \
+  __ret_127 = __builtin_shufflevector(__ret_127, __ret_127, 3, 2, 1, 0); \
+  __ret_127; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = vqdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vqdmulhq_lane_s16(__p0_128, __p1_128, __p2_128) __extension__ ({ \
+  int16x8_t __s0_128 = __p0_128; \
+  int16x4_t __s1_128 = __p1_128; \
+  int16x8_t __ret_128; \
+  __ret_128 = vqdmulhq_s16(__s0_128, splatq_lane_s16(__s1_128, __p2_128)); \
+  __ret_128; \
 })
 #else
-#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmulhq_lane_s16(__p0_129, __p1_129, __p2_129) __extension__ ({ \
+  int16x8_t __s0_129 = __p0_129; \
+  int16x4_t __s1_129 = __p1_129; \
+  int16x8_t __rev0_129;  __rev0_129 = __builtin_shufflevector(__s0_129, __s0_129, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev1_129;  __rev1_129 = __builtin_shufflevector(__s1_129, __s1_129, 3, 2, 1, 0); \
+  int16x8_t __ret_129; \
+  __ret_129 = __noswap_vqdmulhq_s16(__rev0_129, __noswap_splatq_lane_s16(__rev1_129, __p2_129)); \
+  __ret_129 = __builtin_shufflevector(__ret_129, __ret_129, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_129; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = vqdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vqdmulh_lane_s32(__p0_130, __p1_130, __p2_130) __extension__ ({ \
+  int32x2_t __s0_130 = __p0_130; \
+  int32x2_t __s1_130 = __p1_130; \
+  int32x2_t __ret_130; \
+  __ret_130 = vqdmulh_s32(__s0_130, splat_lane_s32(__s1_130, __p2_130)); \
+  __ret_130; \
 })
 #else
-#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmulh_lane_s32(__p0_131, __p1_131, __p2_131) __extension__ ({ \
+  int32x2_t __s0_131 = __p0_131; \
+  int32x2_t __s1_131 = __p1_131; \
+  int32x2_t __rev0_131;  __rev0_131 = __builtin_shufflevector(__s0_131, __s0_131, 1, 0); \
+  int32x2_t __rev1_131;  __rev1_131 = __builtin_shufflevector(__s1_131, __s1_131, 1, 0); \
+  int32x2_t __ret_131; \
+  __ret_131 = __noswap_vqdmulh_s32(__rev0_131, __noswap_splat_lane_s32(__rev1_131, __p2_131)); \
+  __ret_131 = __builtin_shufflevector(__ret_131, __ret_131, 1, 0); \
+  __ret_131; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = vqdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vqdmulh_lane_s16(__p0_132, __p1_132, __p2_132) __extension__ ({ \
+  int16x4_t __s0_132 = __p0_132; \
+  int16x4_t __s1_132 = __p1_132; \
+  int16x4_t __ret_132; \
+  __ret_132 = vqdmulh_s16(__s0_132, splat_lane_s16(__s1_132, __p2_132)); \
+  __ret_132; \
 })
 #else
-#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmulh_lane_s16(__p0_133, __p1_133, __p2_133) __extension__ ({ \
+  int16x4_t __s0_133 = __p0_133; \
+  int16x4_t __s1_133 = __p1_133; \
+  int16x4_t __rev0_133;  __rev0_133 = __builtin_shufflevector(__s0_133, __s0_133, 3, 2, 1, 0); \
+  int16x4_t __rev1_133;  __rev1_133 = __builtin_shufflevector(__s1_133, __s1_133, 3, 2, 1, 0); \
+  int16x4_t __ret_133; \
+  __ret_133 = __noswap_vqdmulh_s16(__rev0_133, __noswap_splat_lane_s16(__rev1_133, __p2_133)); \
+  __ret_133 = __builtin_shufflevector(__ret_133, __ret_133, 3, 2, 1, 0); \
+  __ret_133; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqrdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vqrdmulhq_lane_s32(__p0_134, __p1_134, __p2_134) __extension__ ({ \
+  int32x4_t __s0_134 = __p0_134; \
+  int32x2_t __s1_134 = __p1_134; \
+  int32x4_t __ret_134; \
+  __ret_134 = vqrdmulhq_s32(__s0_134, splatq_lane_s32(__s1_134, __p2_134)); \
+  __ret_134; \
 })
 #else
-#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqrdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmulhq_lane_s32(__p0_135, __p1_135, __p2_135) __extension__ ({ \
+  int32x4_t __s0_135 = __p0_135; \
+  int32x2_t __s1_135 = __p1_135; \
+  int32x4_t __rev0_135;  __rev0_135 = __builtin_shufflevector(__s0_135, __s0_135, 3, 2, 1, 0); \
+  int32x2_t __rev1_135;  __rev1_135 = __builtin_shufflevector(__s1_135, __s1_135, 1, 0); \
+  int32x4_t __ret_135; \
+  __ret_135 = __noswap_vqrdmulhq_s32(__rev0_135, __noswap_splatq_lane_s32(__rev1_135, __p2_135)); \
+  __ret_135 = __builtin_shufflevector(__ret_135, __ret_135, 3, 2, 1, 0); \
+  __ret_135; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = vqrdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vqrdmulhq_lane_s16(__p0_136, __p1_136, __p2_136) __extension__ ({ \
+  int16x8_t __s0_136 = __p0_136; \
+  int16x4_t __s1_136 = __p1_136; \
+  int16x8_t __ret_136; \
+  __ret_136 = vqrdmulhq_s16(__s0_136, splatq_lane_s16(__s1_136, __p2_136)); \
+  __ret_136; \
 })
 #else
-#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqrdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmulhq_lane_s16(__p0_137, __p1_137, __p2_137) __extension__ ({ \
+  int16x8_t __s0_137 = __p0_137; \
+  int16x4_t __s1_137 = __p1_137; \
+  int16x8_t __rev0_137;  __rev0_137 = __builtin_shufflevector(__s0_137, __s0_137, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev1_137;  __rev1_137 = __builtin_shufflevector(__s1_137, __s1_137, 3, 2, 1, 0); \
+  int16x8_t __ret_137; \
+  __ret_137 = __noswap_vqrdmulhq_s16(__rev0_137, __noswap_splatq_lane_s16(__rev1_137, __p2_137)); \
+  __ret_137 = __builtin_shufflevector(__ret_137, __ret_137, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_137; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = vqrdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vqrdmulh_lane_s32(__p0_138, __p1_138, __p2_138) __extension__ ({ \
+  int32x2_t __s0_138 = __p0_138; \
+  int32x2_t __s1_138 = __p1_138; \
+  int32x2_t __ret_138; \
+  __ret_138 = vqrdmulh_s32(__s0_138, splat_lane_s32(__s1_138, __p2_138)); \
+  __ret_138; \
 })
 #else
-#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqrdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqrdmulh_lane_s32(__p0_139, __p1_139, __p2_139) __extension__ ({ \
+  int32x2_t __s0_139 = __p0_139; \
+  int32x2_t __s1_139 = __p1_139; \
+  int32x2_t __rev0_139;  __rev0_139 = __builtin_shufflevector(__s0_139, __s0_139, 1, 0); \
+  int32x2_t __rev1_139;  __rev1_139 = __builtin_shufflevector(__s1_139, __s1_139, 1, 0); \
+  int32x2_t __ret_139; \
+  __ret_139 = __noswap_vqrdmulh_s32(__rev0_139, __noswap_splat_lane_s32(__rev1_139, __p2_139)); \
+  __ret_139 = __builtin_shufflevector(__ret_139, __ret_139, 1, 0); \
+  __ret_139; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = vqrdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vqrdmulh_lane_s16(__p0_140, __p1_140, __p2_140) __extension__ ({ \
+  int16x4_t __s0_140 = __p0_140; \
+  int16x4_t __s1_140 = __p1_140; \
+  int16x4_t __ret_140; \
+  __ret_140 = vqrdmulh_s16(__s0_140, splat_lane_s16(__s1_140, __p2_140)); \
+  __ret_140; \
 })
 #else
-#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqrdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmulh_lane_s16(__p0_141, __p1_141, __p2_141) __extension__ ({ \
+  int16x4_t __s0_141 = __p0_141; \
+  int16x4_t __s1_141 = __p1_141; \
+  int16x4_t __rev0_141;  __rev0_141 = __builtin_shufflevector(__s0_141, __s0_141, 3, 2, 1, 0); \
+  int16x4_t __rev1_141;  __rev1_141 = __builtin_shufflevector(__s1_141, __s1_141, 3, 2, 1, 0); \
+  int16x4_t __ret_141; \
+  __ret_141 = __noswap_vqrdmulh_s16(__rev0_141, __noswap_splat_lane_s16(__rev1_141, __p2_141)); \
+  __ret_141 = __builtin_shufflevector(__ret_141, __ret_141, 3, 2, 1, 0); \
+  __ret_141; \
 })
 #endif
 
@@ -36714,6 +38009,2357 @@
   return __ret;
 }
 #endif
+#if defined(__ARM_FEATURE_BF16) && !defined(__aarch64__)
+__ai poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t)(__p0);
+  return __ret;
+}
+__ai poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) {
+  poly64x1_t __ret;
+  __ret = (poly64x1_t)(__p0);
+  return __ret;
+}
+__ai poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) {
+  poly16x4_t __ret;
+  __ret = (poly16x4_t)(__p0);
+  return __ret;
+}
+__ai poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t)(__p0);
+  return __ret;
+}
+__ai poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) {
+  poly64x2_t __ret;
+  __ret = (poly64x2_t)(__p0);
+  return __ret;
+}
+__ai poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) {
+  poly16x8_t __ret;
+  __ret = (poly16x8_t)(__p0);
+  return __ret;
+}
+__ai uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t)(__p0);
+  return __ret;
+}
+__ai uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t)(__p0);
+  return __ret;
+}
+__ai uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0);
+  return __ret;
+}
+__ai uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t)(__p0);
+  return __ret;
+}
+__ai int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) {
+  int8x16_t __ret;
+  __ret = (int8x16_t)(__p0);
+  return __ret;
+}
+__ai float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) {
+  float32x4_t __ret;
+  __ret = (float32x4_t)(__p0);
+  return __ret;
+}
+__ai float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = (float16x8_t)(__p0);
+  return __ret;
+}
+__ai int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) {
+  int32x4_t __ret;
+  __ret = (int32x4_t)(__p0);
+  return __ret;
+}
+__ai int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) {
+  int64x2_t __ret;
+  __ret = (int64x2_t)(__p0);
+  return __ret;
+}
+__ai int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) {
+  int16x8_t __ret;
+  __ret = (int16x8_t)(__p0);
+  return __ret;
+}
+__ai uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t)(__p0);
+  return __ret;
+}
+__ai uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t)(__p0);
+  return __ret;
+}
+__ai uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0);
+  return __ret;
+}
+__ai uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t)(__p0);
+  return __ret;
+}
+__ai int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) {
+  int8x8_t __ret;
+  __ret = (int8x8_t)(__p0);
+  return __ret;
+}
+__ai float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) {
+  float32x2_t __ret;
+  __ret = (float32x2_t)(__p0);
+  return __ret;
+}
+__ai float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = (float16x4_t)(__p0);
+  return __ret;
+}
+__ai int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) {
+  int32x2_t __ret;
+  __ret = (int32x2_t)(__p0);
+  return __ret;
+}
+__ai int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) {
+  int64x1_t __ret;
+  __ret = (int64x1_t)(__p0);
+  return __ret;
+}
+__ai int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) {
+  int16x4_t __ret;
+  __ret = (int16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+#endif
+#if defined(__ARM_FEATURE_BF16) && defined(__aarch64__)
+__ai poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t)(__p0);
+  return __ret;
+}
+__ai poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) {
+  poly64x1_t __ret;
+  __ret = (poly64x1_t)(__p0);
+  return __ret;
+}
+__ai poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) {
+  poly16x4_t __ret;
+  __ret = (poly16x4_t)(__p0);
+  return __ret;
+}
+__ai poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t)(__p0);
+  return __ret;
+}
+__ai poly128_t vreinterpretq_p128_bf16(bfloat16x8_t __p0) {
+  poly128_t __ret;
+  __ret = (poly128_t)(__p0);
+  return __ret;
+}
+__ai poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) {
+  poly64x2_t __ret;
+  __ret = (poly64x2_t)(__p0);
+  return __ret;
+}
+__ai poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) {
+  poly16x8_t __ret;
+  __ret = (poly16x8_t)(__p0);
+  return __ret;
+}
+__ai uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t)(__p0);
+  return __ret;
+}
+__ai uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t)(__p0);
+  return __ret;
+}
+__ai uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0);
+  return __ret;
+}
+__ai uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t)(__p0);
+  return __ret;
+}
+__ai int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) {
+  int8x16_t __ret;
+  __ret = (int8x16_t)(__p0);
+  return __ret;
+}
+__ai float64x2_t vreinterpretq_f64_bf16(bfloat16x8_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t)(__p0);
+  return __ret;
+}
+__ai float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) {
+  float32x4_t __ret;
+  __ret = (float32x4_t)(__p0);
+  return __ret;
+}
+__ai float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = (float16x8_t)(__p0);
+  return __ret;
+}
+__ai int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) {
+  int32x4_t __ret;
+  __ret = (int32x4_t)(__p0);
+  return __ret;
+}
+__ai int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) {
+  int64x2_t __ret;
+  __ret = (int64x2_t)(__p0);
+  return __ret;
+}
+__ai int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) {
+  int16x8_t __ret;
+  __ret = (int16x8_t)(__p0);
+  return __ret;
+}
+__ai uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t)(__p0);
+  return __ret;
+}
+__ai uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t)(__p0);
+  return __ret;
+}
+__ai uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0);
+  return __ret;
+}
+__ai uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t)(__p0);
+  return __ret;
+}
+__ai int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) {
+  int8x8_t __ret;
+  __ret = (int8x8_t)(__p0);
+  return __ret;
+}
+__ai float64x1_t vreinterpret_f64_bf16(bfloat16x4_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t)(__p0);
+  return __ret;
+}
+__ai float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) {
+  float32x2_t __ret;
+  __ret = (float32x2_t)(__p0);
+  return __ret;
+}
+__ai float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = (float16x4_t)(__p0);
+  return __ret;
+}
+__ai int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) {
+  int32x2_t __ret;
+  __ret = (int32x2_t)(__p0);
+  return __ret;
+}
+__ai int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) {
+  int64x1_t __ret;
+  __ret = (int64x1_t)(__p0);
+  return __ret;
+}
+__ai int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) {
+  int16x4_t __ret;
+  __ret = (int16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p128(poly128_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_f64(float64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_f64(float64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+#endif
+#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 11); \
+  __ret; \
+})
+#else
+#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 11); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 11); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 11); \
+  __ret; \
+})
+#else
+#define splat_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 11); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 11); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 43); \
+  __ret; \
+})
+#else
+#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 43); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 43); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 43); \
+  __ret; \
+})
+#else
+#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 43); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 43); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  bfloat16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vbfdotq_lane_f32(__p0_142, __p1_142, __p2_142, __p3_142) __extension__ ({ \
+  float32x4_t __s0_142 = __p0_142; \
+  bfloat16x8_t __s1_142 = __p1_142; \
+  bfloat16x4_t __s2_142 = __p2_142; \
+  float32x4_t __ret_142; \
+bfloat16x4_t __reint_142 = __s2_142; \
+float32x4_t __reint1_142 = splatq_lane_f32(*(float32x2_t *) &__reint_142, __p3_142); \
+  __ret_142 = vbfdotq_f32(__s0_142, __s1_142, *(bfloat16x8_t *) &__reint1_142); \
+  __ret_142; \
+})
+#else
+#define vbfdotq_lane_f32(__p0_143, __p1_143, __p2_143, __p3_143) __extension__ ({ \
+  float32x4_t __s0_143 = __p0_143; \
+  bfloat16x8_t __s1_143 = __p1_143; \
+  bfloat16x4_t __s2_143 = __p2_143; \
+  float32x4_t __rev0_143;  __rev0_143 = __builtin_shufflevector(__s0_143, __s0_143, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_143;  __rev1_143 = __builtin_shufflevector(__s1_143, __s1_143, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_143;  __rev2_143 = __builtin_shufflevector(__s2_143, __s2_143, 3, 2, 1, 0); \
+  float32x4_t __ret_143; \
+bfloat16x4_t __reint_143 = __rev2_143; \
+float32x4_t __reint1_143 = __noswap_splatq_lane_f32(*(float32x2_t *) &__reint_143, __p3_143); \
+  __ret_143 = __noswap_vbfdotq_f32(__rev0_143, __rev1_143, *(bfloat16x8_t *) &__reint1_143); \
+  __ret_143 = __builtin_shufflevector(__ret_143, __ret_143, 3, 2, 1, 0); \
+  __ret_143; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vbfdot_lane_f32(__p0_144, __p1_144, __p2_144, __p3_144) __extension__ ({ \
+  float32x2_t __s0_144 = __p0_144; \
+  bfloat16x4_t __s1_144 = __p1_144; \
+  bfloat16x4_t __s2_144 = __p2_144; \
+  float32x2_t __ret_144; \
+bfloat16x4_t __reint_144 = __s2_144; \
+float32x2_t __reint1_144 = splat_lane_f32(*(float32x2_t *) &__reint_144, __p3_144); \
+  __ret_144 = vbfdot_f32(__s0_144, __s1_144, *(bfloat16x4_t *) &__reint1_144); \
+  __ret_144; \
+})
+#else
+#define vbfdot_lane_f32(__p0_145, __p1_145, __p2_145, __p3_145) __extension__ ({ \
+  float32x2_t __s0_145 = __p0_145; \
+  bfloat16x4_t __s1_145 = __p1_145; \
+  bfloat16x4_t __s2_145 = __p2_145; \
+  float32x2_t __rev0_145;  __rev0_145 = __builtin_shufflevector(__s0_145, __s0_145, 1, 0); \
+  bfloat16x4_t __rev1_145;  __rev1_145 = __builtin_shufflevector(__s1_145, __s1_145, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_145;  __rev2_145 = __builtin_shufflevector(__s2_145, __s2_145, 3, 2, 1, 0); \
+  float32x2_t __ret_145; \
+bfloat16x4_t __reint_145 = __rev2_145; \
+float32x2_t __reint1_145 = __noswap_splat_lane_f32(*(float32x2_t *) &__reint_145, __p3_145); \
+  __ret_145 = __noswap_vbfdot_f32(__rev0_145, __rev1_145, *(bfloat16x4_t *) &__reint1_145); \
+  __ret_145 = __builtin_shufflevector(__ret_145, __ret_145, 1, 0); \
+  __ret_145; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vbfdotq_laneq_f32(__p0_146, __p1_146, __p2_146, __p3_146) __extension__ ({ \
+  float32x4_t __s0_146 = __p0_146; \
+  bfloat16x8_t __s1_146 = __p1_146; \
+  bfloat16x8_t __s2_146 = __p2_146; \
+  float32x4_t __ret_146; \
+bfloat16x8_t __reint_146 = __s2_146; \
+float32x4_t __reint1_146 = splatq_laneq_f32(*(float32x4_t *) &__reint_146, __p3_146); \
+  __ret_146 = vbfdotq_f32(__s0_146, __s1_146, *(bfloat16x8_t *) &__reint1_146); \
+  __ret_146; \
+})
+#else
+#define vbfdotq_laneq_f32(__p0_147, __p1_147, __p2_147, __p3_147) __extension__ ({ \
+  float32x4_t __s0_147 = __p0_147; \
+  bfloat16x8_t __s1_147 = __p1_147; \
+  bfloat16x8_t __s2_147 = __p2_147; \
+  float32x4_t __rev0_147;  __rev0_147 = __builtin_shufflevector(__s0_147, __s0_147, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_147;  __rev1_147 = __builtin_shufflevector(__s1_147, __s1_147, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_147;  __rev2_147 = __builtin_shufflevector(__s2_147, __s2_147, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x4_t __ret_147; \
+bfloat16x8_t __reint_147 = __rev2_147; \
+float32x4_t __reint1_147 = __noswap_splatq_laneq_f32(*(float32x4_t *) &__reint_147, __p3_147); \
+  __ret_147 = __noswap_vbfdotq_f32(__rev0_147, __rev1_147, *(bfloat16x8_t *) &__reint1_147); \
+  __ret_147 = __builtin_shufflevector(__ret_147, __ret_147, 3, 2, 1, 0); \
+  __ret_147; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vbfdot_laneq_f32(__p0_148, __p1_148, __p2_148, __p3_148) __extension__ ({ \
+  float32x2_t __s0_148 = __p0_148; \
+  bfloat16x4_t __s1_148 = __p1_148; \
+  bfloat16x8_t __s2_148 = __p2_148; \
+  float32x2_t __ret_148; \
+bfloat16x8_t __reint_148 = __s2_148; \
+float32x2_t __reint1_148 = splat_laneq_f32(*(float32x4_t *) &__reint_148, __p3_148); \
+  __ret_148 = vbfdot_f32(__s0_148, __s1_148, *(bfloat16x4_t *) &__reint1_148); \
+  __ret_148; \
+})
+#else
+#define vbfdot_laneq_f32(__p0_149, __p1_149, __p2_149, __p3_149) __extension__ ({ \
+  float32x2_t __s0_149 = __p0_149; \
+  bfloat16x4_t __s1_149 = __p1_149; \
+  bfloat16x8_t __s2_149 = __p2_149; \
+  float32x2_t __rev0_149;  __rev0_149 = __builtin_shufflevector(__s0_149, __s0_149, 1, 0); \
+  bfloat16x4_t __rev1_149;  __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_149;  __rev2_149 = __builtin_shufflevector(__s2_149, __s2_149, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x2_t __ret_149; \
+bfloat16x8_t __reint_149 = __rev2_149; \
+float32x2_t __reint1_149 = __noswap_splat_laneq_f32(*(float32x4_t *) &__reint_149, __p3_149); \
+  __ret_149 = __noswap_vbfdot_f32(__rev0_149, __rev1_149, *(bfloat16x4_t *) &__reint1_149); \
+  __ret_149 = __builtin_shufflevector(__ret_149, __ret_149, 1, 0); \
+  __ret_149; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
+  bfloat16x8_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
+  return __ret;
+}
+#else
+__ai bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
+  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  bfloat16x8_t __ret;
+  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+__ai bfloat16x8_t __noswap_vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
+  bfloat16x8_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
+  return __ret;
+}
+#endif
+
+#define vcreate_bf16(__p0) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  uint64_t __promote = __p0; \
+  __ret = (bfloat16x4_t)(__promote); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_150) {
+  float32x4_t __ret_150;
+bfloat16x4_t __reint_150 = __p0_150;
+int32x4_t __reint1_150 = vshll_n_s16(*(int16x4_t *) &__reint_150, 16);
+  __ret_150 = *(float32x4_t *) &__reint1_150;
+  return __ret_150;
+}
+#else
+__ai float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_151) {
+  bfloat16x4_t __rev0_151;  __rev0_151 = __builtin_shufflevector(__p0_151, __p0_151, 3, 2, 1, 0);
+  float32x4_t __ret_151;
+bfloat16x4_t __reint_151 = __rev0_151;
+int32x4_t __reint1_151 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_151, 16);
+  __ret_151 = *(float32x4_t *) &__reint1_151;
+  __ret_151 = __builtin_shufflevector(__ret_151, __ret_151, 3, 2, 1, 0);
+  return __ret_151;
+}
+__ai float32x4_t __noswap_vcvt_f32_bf16(bfloat16x4_t __p0_152) {
+  float32x4_t __ret_152;
+bfloat16x4_t __reint_152 = __p0_152;
+int32x4_t __reint1_152 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_152, 16);
+  __ret_152 = *(float32x4_t *) &__reint1_152;
+  return __ret_152;
+}
+#endif
+
+__ai float32_t vcvtah_f32_bf16(bfloat16_t __p0) {
+  float32_t __ret;
+bfloat16_t __reint = __p0;
+int32_t __reint1 = *(int32_t *) &__reint << 16;
+  __ret = *(float32_t *) &__reint1;
+  return __ret;
+}
+__ai bfloat16_t vcvth_bf16_f32(float32_t __p0) {
+  bfloat16_t __ret;
+  __ret = (bfloat16_t) __builtin_neon_vcvth_bf16_f32(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16_t __ret; \
+  __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  bfloat16_t __ret; \
+  __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_bf16(__p0_153, __p1_153) __extension__ ({ \
+  bfloat16x4_t __s0_153 = __p0_153; \
+  bfloat16x8_t __ret_153; \
+  __ret_153 = splatq_lane_bf16(__s0_153, __p1_153); \
+  __ret_153; \
+})
+#else
+#define vdupq_lane_bf16(__p0_154, __p1_154) __extension__ ({ \
+  bfloat16x4_t __s0_154 = __p0_154; \
+  bfloat16x4_t __rev0_154;  __rev0_154 = __builtin_shufflevector(__s0_154, __s0_154, 3, 2, 1, 0); \
+  bfloat16x8_t __ret_154; \
+  __ret_154 = __noswap_splatq_lane_bf16(__rev0_154, __p1_154); \
+  __ret_154 = __builtin_shufflevector(__ret_154, __ret_154, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_154; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_bf16(__p0_155, __p1_155) __extension__ ({ \
+  bfloat16x4_t __s0_155 = __p0_155; \
+  bfloat16x4_t __ret_155; \
+  __ret_155 = splat_lane_bf16(__s0_155, __p1_155); \
+  __ret_155; \
+})
+#else
+#define vdup_lane_bf16(__p0_156, __p1_156) __extension__ ({ \
+  bfloat16x4_t __s0_156 = __p0_156; \
+  bfloat16x4_t __rev0_156;  __rev0_156 = __builtin_shufflevector(__s0_156, __s0_156, 3, 2, 1, 0); \
+  bfloat16x4_t __ret_156; \
+  __ret_156 = __noswap_splat_lane_bf16(__rev0_156, __p1_156); \
+  __ret_156 = __builtin_shufflevector(__ret_156, __ret_156, 3, 2, 1, 0); \
+  __ret_156; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16_t __ret; \
+  __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16_t __ret; \
+  __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_bf16(__p0_157, __p1_157) __extension__ ({ \
+  bfloat16x8_t __s0_157 = __p0_157; \
+  bfloat16x8_t __ret_157; \
+  __ret_157 = splatq_laneq_bf16(__s0_157, __p1_157); \
+  __ret_157; \
+})
+#else
+#define vdupq_laneq_bf16(__p0_158, __p1_158) __extension__ ({ \
+  bfloat16x8_t __s0_158 = __p0_158; \
+  bfloat16x8_t __rev0_158;  __rev0_158 = __builtin_shufflevector(__s0_158, __s0_158, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __ret_158; \
+  __ret_158 = __noswap_splatq_laneq_bf16(__rev0_158, __p1_158); \
+  __ret_158 = __builtin_shufflevector(__ret_158, __ret_158, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_158; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_bf16(__p0_159, __p1_159) __extension__ ({ \
+  bfloat16x8_t __s0_159 = __p0_159; \
+  bfloat16x4_t __ret_159; \
+  __ret_159 = splat_laneq_bf16(__s0_159, __p1_159); \
+  __ret_159; \
+})
+#else
+#define vdup_laneq_bf16(__p0_160, __p1_160) __extension__ ({ \
+  bfloat16x8_t __s0_160 = __p0_160; \
+  bfloat16x8_t __rev0_160;  __rev0_160 = __builtin_shufflevector(__s0_160, __s0_160, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __ret_160; \
+  __ret_160 = __noswap_splat_laneq_bf16(__rev0_160, __p1_160); \
+  __ret_160 = __builtin_shufflevector(__ret_160, __ret_160, 3, 2, 1, 0); \
+  __ret_160; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+  return __ret;
+}
+#else
+__ai bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0};
+  return __ret;
+}
+#else
+__ai bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0};
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
+  return __ret;
+}
+#else
+__ai bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) {
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  bfloat16x4_t __ret;
+  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai bfloat16x4_t __noswap_vget_high_bf16(bfloat16x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16_t __ret; \
+  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16_t __ret; \
+  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__rev0, __p1); \
+  __ret; \
+})
+#define __noswap_vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16_t __ret; \
+  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16_t __ret; \
+  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vget_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  bfloat16_t __ret; \
+  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__rev0, __p1); \
+  __ret; \
+})
+#define __noswap_vget_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16_t __ret; \
+  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
+  return __ret;
+}
+#else
+__ai bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) {
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  bfloat16x4_t __ret;
+  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai bfloat16x4_t __noswap_vget_low_bf16(bfloat16x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_bf16(__p0) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vld1q_v(__p0, 43); \
+  __ret; \
+})
+#else
+#define vld1q_bf16(__p0) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vld1q_v(__p0, 43); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_bf16(__p0) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vld1_v(__p0, 11); \
+  __ret; \
+})
+#else
+#define vld1_bf16(__p0) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vld1_v(__p0, 11); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_v(__p0, 43); \
+  __ret; \
+})
+#else
+#define vld1q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_v(__p0, 43); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_v(__p0, 11); \
+  __ret; \
+})
+#else
+#define vld1_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_v(__p0, 11); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8_t __s1 = __p1; \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 43); \
+  __ret; \
+})
+#else
+#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8_t __s1 = __p1; \
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 43); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4_t __s1 = __p1; \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 11); \
+  __ret; \
+})
+#else
+#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4_t __s1 = __p1; \
+  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 11); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_bf16_x2(__p0) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld1q_x2_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld1q_bf16_x2(__p0) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld1q_x2_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_bf16_x2(__p0) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld1_x2_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld1_bf16_x2(__p0) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld1_x2_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_bf16_x3(__p0) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld1q_x3_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld1q_bf16_x3(__p0) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld1q_x3_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_bf16_x3(__p0) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld1_x3_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld1_bf16_x3(__p0) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld1_x3_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_bf16_x4(__p0) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld1q_x4_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld1q_bf16_x4(__p0) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld1q_x4_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_bf16_x4(__p0) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld1_x4_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld1_bf16_x4(__p0) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld1_x4_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_bf16(__p0) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld2q_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld2q_bf16(__p0) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld2q_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_bf16(__p0) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld2_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld2_bf16(__p0) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld2_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld2q_dup_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld2q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld2q_dup_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld2_dup_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld2_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld2_dup_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \
+  __ret; \
+})
+#else
+#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  bfloat16x8x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \
+  __ret; \
+})
+#else
+#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  bfloat16x4x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_bf16(__p0) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld3q_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld3q_bf16(__p0) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld3q_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_bf16(__p0) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld3_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld3_bf16(__p0) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld3_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld3q_dup_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld3q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld3q_dup_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld3_dup_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld3_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld3_dup_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \
+  __ret; \
+})
+#else
+#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  bfloat16x8x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \
+  __ret; \
+})
+#else
+#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  bfloat16x4x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_bf16(__p0) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld4q_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld4q_bf16(__p0) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld4q_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_bf16(__p0) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld4_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld4_bf16(__p0) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld4_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld4q_dup_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld4q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld4q_dup_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld4_dup_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld4_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld4_dup_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \
+  __ret; \
+})
+#else
+#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  bfloat16x8x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \
+  __ret; \
+})
+#else
+#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  bfloat16x4x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16_t __s0 = __p0; \
+  bfloat16x8_t __s1 = __p1; \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \
+  __ret; \
+})
+#else
+#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16_t __s0 = __p0; \
+  bfloat16x8_t __s1 = __p1; \
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__rev1, __p2); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16_t __s0 = __p0; \
+  bfloat16x8_t __s1 = __p1; \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16_t __s0 = __p0; \
+  bfloat16x4_t __s1 = __p1; \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \
+  __ret; \
+})
+#else
+#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16_t __s0 = __p0; \
+  bfloat16x4_t __s1 = __p1; \
+  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__rev1, __p2); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16_t __s0 = __p0; \
+  bfloat16x4_t __s1 = __p1; \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s1 = __p1; \
+  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 43); \
+})
+#else
+#define vst1q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s1 = __p1; \
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s1 = __p1; \
+  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 11); \
+})
+#else
+#define vst1_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s1 = __p1; \
+  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8_t __s1 = __p1; \
+  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 43); \
+})
+#else
+#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8_t __s1 = __p1; \
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4_t __s1 = __p1; \
+  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 11); \
+})
+#else
+#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4_t __s1 = __p1; \
+  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \
+})
+#else
+#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  bfloat16x8x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \
+})
+#else
+#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  bfloat16x4x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \
+})
+#else
+#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  bfloat16x8x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \
+})
+#else
+#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  bfloat16x4x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \
+})
+#else
+#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  bfloat16x8x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \
+})
+#else
+#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  bfloat16x4x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \
+})
+#else
+#define vst2q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  bfloat16x8x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \
+})
+#else
+#define vst2_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  bfloat16x4x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \
+})
+#else
+#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  bfloat16x8x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \
+})
+#else
+#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  bfloat16x4x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \
+})
+#else
+#define vst3q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  bfloat16x8x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \
+})
+#else
+#define vst3_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  bfloat16x4x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \
+})
+#else
+#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  bfloat16x8x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \
+})
+#else
+#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  bfloat16x4x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \
+})
+#else
+#define vst4q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  bfloat16x8x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \
+})
+#else
+#define vst4_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  bfloat16x4x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \
+})
+#else
+#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  bfloat16x8x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \
+})
+#else
+#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  bfloat16x4x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \
+})
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && !defined(__aarch64__)
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__p0, 11);
+  return __ret;
+}
+#else
+__ai bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) {
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__rev0, 11);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai bfloat16x4_t __noswap___a32_vcvt_bf16_f32(float32x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__p0, 11);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = __a32_vcvt_bf16_f32(__p0);
+  return __ret;
+}
+#else
+__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x4_t __ret;
+  __ret = __noswap___a32_vcvt_bf16_f32(__rev0);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
+  bfloat16x8_t __ret;
+  __ret = vcombine_bf16(__a32_vcvt_bf16_f32(__p1), vget_low_bf16(__p0));
+  return __ret;
+}
+#else
+__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  bfloat16x8_t __ret;
+  __ret = __noswap_vcombine_bf16(__noswap___a32_vcvt_bf16_f32(__rev1), __noswap_vget_low_bf16(__rev0));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = vcombine_bf16((bfloat16x4_t)(0ULL), __a32_vcvt_bf16_f32(__p0));
+  return __ret;
+}
+#else
+__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x8_t __ret;
+  __ret = __noswap_vcombine_bf16((bfloat16x4_t)(0ULL), __noswap___a32_vcvt_bf16_f32(__rev0));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && defined(__aarch64__)
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__p0, 43);
+  return __ret;
+}
+#else
+__ai bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__rev0, 43);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+__ai bfloat16x8_t __noswap___a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__p0, 43);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_bf16(__p0_161, __p1_161, __p2_161, __p3_161) __extension__ ({ \
+  bfloat16x8_t __s0_161 = __p0_161; \
+  bfloat16x4_t __s2_161 = __p2_161; \
+  bfloat16x8_t __ret_161; \
+  __ret_161 = vsetq_lane_bf16(vget_lane_bf16(__s2_161, __p3_161), __s0_161, __p1_161); \
+  __ret_161; \
+})
+#else
+#define vcopyq_lane_bf16(__p0_162, __p1_162, __p2_162, __p3_162) __extension__ ({ \
+  bfloat16x8_t __s0_162 = __p0_162; \
+  bfloat16x4_t __s2_162 = __p2_162; \
+  bfloat16x8_t __rev0_162;  __rev0_162 = __builtin_shufflevector(__s0_162, __s0_162, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_162;  __rev2_162 = __builtin_shufflevector(__s2_162, __s2_162, 3, 2, 1, 0); \
+  bfloat16x8_t __ret_162; \
+  __ret_162 = __noswap_vsetq_lane_bf16(__noswap_vget_lane_bf16(__rev2_162, __p3_162), __rev0_162, __p1_162); \
+  __ret_162 = __builtin_shufflevector(__ret_162, __ret_162, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_162; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_bf16(__p0_163, __p1_163, __p2_163, __p3_163) __extension__ ({ \
+  bfloat16x4_t __s0_163 = __p0_163; \
+  bfloat16x4_t __s2_163 = __p2_163; \
+  bfloat16x4_t __ret_163; \
+  __ret_163 = vset_lane_bf16(vget_lane_bf16(__s2_163, __p3_163), __s0_163, __p1_163); \
+  __ret_163; \
+})
+#else
+#define vcopy_lane_bf16(__p0_164, __p1_164, __p2_164, __p3_164) __extension__ ({ \
+  bfloat16x4_t __s0_164 = __p0_164; \
+  bfloat16x4_t __s2_164 = __p2_164; \
+  bfloat16x4_t __rev0_164;  __rev0_164 = __builtin_shufflevector(__s0_164, __s0_164, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_164;  __rev2_164 = __builtin_shufflevector(__s2_164, __s2_164, 3, 2, 1, 0); \
+  bfloat16x4_t __ret_164; \
+  __ret_164 = __noswap_vset_lane_bf16(__noswap_vget_lane_bf16(__rev2_164, __p3_164), __rev0_164, __p1_164); \
+  __ret_164 = __builtin_shufflevector(__ret_164, __ret_164, 3, 2, 1, 0); \
+  __ret_164; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_bf16(__p0_165, __p1_165, __p2_165, __p3_165) __extension__ ({ \
+  bfloat16x8_t __s0_165 = __p0_165; \
+  bfloat16x8_t __s2_165 = __p2_165; \
+  bfloat16x8_t __ret_165; \
+  __ret_165 = vsetq_lane_bf16(vgetq_lane_bf16(__s2_165, __p3_165), __s0_165, __p1_165); \
+  __ret_165; \
+})
+#else
+#define vcopyq_laneq_bf16(__p0_166, __p1_166, __p2_166, __p3_166) __extension__ ({ \
+  bfloat16x8_t __s0_166 = __p0_166; \
+  bfloat16x8_t __s2_166 = __p2_166; \
+  bfloat16x8_t __rev0_166;  __rev0_166 = __builtin_shufflevector(__s0_166, __s0_166, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_166;  __rev2_166 = __builtin_shufflevector(__s2_166, __s2_166, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __ret_166; \
+  __ret_166 = __noswap_vsetq_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_166, __p3_166), __rev0_166, __p1_166); \
+  __ret_166 = __builtin_shufflevector(__ret_166, __ret_166, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_166; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_bf16(__p0_167, __p1_167, __p2_167, __p3_167) __extension__ ({ \
+  bfloat16x4_t __s0_167 = __p0_167; \
+  bfloat16x8_t __s2_167 = __p2_167; \
+  bfloat16x4_t __ret_167; \
+  __ret_167 = vset_lane_bf16(vgetq_lane_bf16(__s2_167, __p3_167), __s0_167, __p1_167); \
+  __ret_167; \
+})
+#else
+#define vcopy_laneq_bf16(__p0_168, __p1_168, __p2_168, __p3_168) __extension__ ({ \
+  bfloat16x4_t __s0_168 = __p0_168; \
+  bfloat16x8_t __s2_168 = __p2_168; \
+  bfloat16x4_t __rev0_168;  __rev0_168 = __builtin_shufflevector(__s0_168, __s0_168, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_168;  __rev2_168 = __builtin_shufflevector(__s2_168, __s2_168, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __ret_168; \
+  __ret_168 = __noswap_vset_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_168, __p3_168), __rev0_168, __p1_168); \
+  __ret_168 = __builtin_shufflevector(__ret_168, __ret_168, 3, 2, 1, 0); \
+  __ret_168; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = vget_low_bf16(__a64_vcvtq_low_bf16_f32(__p0));
+  return __ret;
+}
+#else
+__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x4_t __ret;
+  __ret = __noswap_vget_low_bf16(__noswap___a64_vcvtq_low_bf16_f32(__rev0));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_v((int8x16_t)__p0, (int8x16_t)__p1, 43);
+  return __ret;
+}
+#else
+__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_v((int8x16_t)__rev0, (int8x16_t)__rev1, 43);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = __a64_vcvtq_low_bf16_f32(__p0);
+  return __ret;
+}
+#else
+__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x8_t __ret;
+  __ret = __noswap___a64_vcvtq_low_bf16_f32(__rev0);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#endif
 #if defined(__ARM_FEATURE_COMPLEX)
 #ifdef __LITTLE_ENDIAN__
 __ai float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) {
@@ -36984,228 +40630,228 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdotq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x8_t __s2 = __p2; \
-  uint32x4_t __ret; \
-uint8x8_t __reint = __s2; \
-uint32x4_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = vdotq_u32(__s0, __s1, *(uint8x16_t *) &__reint1); \
-  __ret; \
+#define vdotq_lane_u32(__p0_169, __p1_169, __p2_169, __p3_169) __extension__ ({ \
+  uint32x4_t __s0_169 = __p0_169; \
+  uint8x16_t __s1_169 = __p1_169; \
+  uint8x8_t __s2_169 = __p2_169; \
+  uint32x4_t __ret_169; \
+uint8x8_t __reint_169 = __s2_169; \
+uint32x4_t __reint1_169 = splatq_lane_u32(*(uint32x2_t *) &__reint_169, __p3_169); \
+  __ret_169 = vdotq_u32(__s0_169, __s1_169, *(uint8x16_t *) &__reint1_169); \
+  __ret_169; \
 })
 #else
-#define vdotq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x8_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-uint8x8_t __reint = __rev2; \
-uint32x4_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = __noswap_vdotq_u32(__rev0, __rev1, *(uint8x16_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdotq_lane_u32(__p0_170, __p1_170, __p2_170, __p3_170) __extension__ ({ \
+  uint32x4_t __s0_170 = __p0_170; \
+  uint8x16_t __s1_170 = __p1_170; \
+  uint8x8_t __s2_170 = __p2_170; \
+  uint32x4_t __rev0_170;  __rev0_170 = __builtin_shufflevector(__s0_170, __s0_170, 3, 2, 1, 0); \
+  uint8x16_t __rev1_170;  __rev1_170 = __builtin_shufflevector(__s1_170, __s1_170, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_170;  __rev2_170 = __builtin_shufflevector(__s2_170, __s2_170, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_170; \
+uint8x8_t __reint_170 = __rev2_170; \
+uint32x4_t __reint1_170 = __noswap_splatq_lane_u32(*(uint32x2_t *) &__reint_170, __p3_170); \
+  __ret_170 = __noswap_vdotq_u32(__rev0_170, __rev1_170, *(uint8x16_t *) &__reint1_170); \
+  __ret_170 = __builtin_shufflevector(__ret_170, __ret_170, 3, 2, 1, 0); \
+  __ret_170; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdotq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-int8x8_t __reint = __s2; \
-int32x4_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = vdotq_s32(__s0, __s1, *(int8x16_t *) &__reint1); \
-  __ret; \
+#define vdotq_lane_s32(__p0_171, __p1_171, __p2_171, __p3_171) __extension__ ({ \
+  int32x4_t __s0_171 = __p0_171; \
+  int8x16_t __s1_171 = __p1_171; \
+  int8x8_t __s2_171 = __p2_171; \
+  int32x4_t __ret_171; \
+int8x8_t __reint_171 = __s2_171; \
+int32x4_t __reint1_171 = splatq_lane_s32(*(int32x2_t *) &__reint_171, __p3_171); \
+  __ret_171 = vdotq_s32(__s0_171, __s1_171, *(int8x16_t *) &__reint1_171); \
+  __ret_171; \
 })
 #else
-#define vdotq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-int8x8_t __reint = __rev2; \
-int32x4_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = __noswap_vdotq_s32(__rev0, __rev1, *(int8x16_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdotq_lane_s32(__p0_172, __p1_172, __p2_172, __p3_172) __extension__ ({ \
+  int32x4_t __s0_172 = __p0_172; \
+  int8x16_t __s1_172 = __p1_172; \
+  int8x8_t __s2_172 = __p2_172; \
+  int32x4_t __rev0_172;  __rev0_172 = __builtin_shufflevector(__s0_172, __s0_172, 3, 2, 1, 0); \
+  int8x16_t __rev1_172;  __rev1_172 = __builtin_shufflevector(__s1_172, __s1_172, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_172;  __rev2_172 = __builtin_shufflevector(__s2_172, __s2_172, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_172; \
+int8x8_t __reint_172 = __rev2_172; \
+int32x4_t __reint1_172 = __noswap_splatq_lane_s32(*(int32x2_t *) &__reint_172, __p3_172); \
+  __ret_172 = __noswap_vdotq_s32(__rev0_172, __rev1_172, *(int8x16_t *) &__reint1_172); \
+  __ret_172 = __builtin_shufflevector(__ret_172, __ret_172, 3, 2, 1, 0); \
+  __ret_172; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdot_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __s2 = __p2; \
-  uint32x2_t __ret; \
-uint8x8_t __reint = __s2; \
-uint32x2_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3); \
-  __ret = vdot_u32(__s0, __s1, *(uint8x8_t *) &__reint1); \
-  __ret; \
+#define vdot_lane_u32(__p0_173, __p1_173, __p2_173, __p3_173) __extension__ ({ \
+  uint32x2_t __s0_173 = __p0_173; \
+  uint8x8_t __s1_173 = __p1_173; \
+  uint8x8_t __s2_173 = __p2_173; \
+  uint32x2_t __ret_173; \
+uint8x8_t __reint_173 = __s2_173; \
+uint32x2_t __reint1_173 = splat_lane_u32(*(uint32x2_t *) &__reint_173, __p3_173); \
+  __ret_173 = vdot_u32(__s0_173, __s1_173, *(uint8x8_t *) &__reint1_173); \
+  __ret_173; \
 })
 #else
-#define vdot_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __s2 = __p2; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-uint8x8_t __reint = __rev2; \
-uint32x2_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3); \
-  __ret = __noswap_vdot_u32(__rev0, __rev1, *(uint8x8_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdot_lane_u32(__p0_174, __p1_174, __p2_174, __p3_174) __extension__ ({ \
+  uint32x2_t __s0_174 = __p0_174; \
+  uint8x8_t __s1_174 = __p1_174; \
+  uint8x8_t __s2_174 = __p2_174; \
+  uint32x2_t __rev0_174;  __rev0_174 = __builtin_shufflevector(__s0_174, __s0_174, 1, 0); \
+  uint8x8_t __rev1_174;  __rev1_174 = __builtin_shufflevector(__s1_174, __s1_174, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_174;  __rev2_174 = __builtin_shufflevector(__s2_174, __s2_174, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x2_t __ret_174; \
+uint8x8_t __reint_174 = __rev2_174; \
+uint32x2_t __reint1_174 = __noswap_splat_lane_u32(*(uint32x2_t *) &__reint_174, __p3_174); \
+  __ret_174 = __noswap_vdot_u32(__rev0_174, __rev1_174, *(uint8x8_t *) &__reint1_174); \
+  __ret_174 = __builtin_shufflevector(__ret_174, __ret_174, 1, 0); \
+  __ret_174; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdot_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __s2 = __p2; \
-  int32x2_t __ret; \
-int8x8_t __reint = __s2; \
-int32x2_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3); \
-  __ret = vdot_s32(__s0, __s1, *(int8x8_t *) &__reint1); \
-  __ret; \
+#define vdot_lane_s32(__p0_175, __p1_175, __p2_175, __p3_175) __extension__ ({ \
+  int32x2_t __s0_175 = __p0_175; \
+  int8x8_t __s1_175 = __p1_175; \
+  int8x8_t __s2_175 = __p2_175; \
+  int32x2_t __ret_175; \
+int8x8_t __reint_175 = __s2_175; \
+int32x2_t __reint1_175 = splat_lane_s32(*(int32x2_t *) &__reint_175, __p3_175); \
+  __ret_175 = vdot_s32(__s0_175, __s1_175, *(int8x8_t *) &__reint1_175); \
+  __ret_175; \
 })
 #else
-#define vdot_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-int8x8_t __reint = __rev2; \
-int32x2_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3); \
-  __ret = __noswap_vdot_s32(__rev0, __rev1, *(int8x8_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdot_lane_s32(__p0_176, __p1_176, __p2_176, __p3_176) __extension__ ({ \
+  int32x2_t __s0_176 = __p0_176; \
+  int8x8_t __s1_176 = __p1_176; \
+  int8x8_t __s2_176 = __p2_176; \
+  int32x2_t __rev0_176;  __rev0_176 = __builtin_shufflevector(__s0_176, __s0_176, 1, 0); \
+  int8x8_t __rev1_176;  __rev1_176 = __builtin_shufflevector(__s1_176, __s1_176, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_176;  __rev2_176 = __builtin_shufflevector(__s2_176, __s2_176, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x2_t __ret_176; \
+int8x8_t __reint_176 = __rev2_176; \
+int32x2_t __reint1_176 = __noswap_splat_lane_s32(*(int32x2_t *) &__reint_176, __p3_176); \
+  __ret_176 = __noswap_vdot_s32(__rev0_176, __rev1_176, *(int8x8_t *) &__reint1_176); \
+  __ret_176 = __builtin_shufflevector(__ret_176, __ret_176, 1, 0); \
+  __ret_176; \
 })
 #endif
 
 #endif
 #if defined(__ARM_FEATURE_DOTPROD) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
-#define vdotq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __s2 = __p2; \
-  uint32x4_t __ret; \
-uint8x16_t __reint = __s2; \
-uint32x4_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = vdotq_u32(__s0, __s1, *(uint8x16_t *) &__reint1); \
-  __ret; \
+#define vdotq_laneq_u32(__p0_177, __p1_177, __p2_177, __p3_177) __extension__ ({ \
+  uint32x4_t __s0_177 = __p0_177; \
+  uint8x16_t __s1_177 = __p1_177; \
+  uint8x16_t __s2_177 = __p2_177; \
+  uint32x4_t __ret_177; \
+uint8x16_t __reint_177 = __s2_177; \
+uint32x4_t __reint1_177 = splatq_laneq_u32(*(uint32x4_t *) &__reint_177, __p3_177); \
+  __ret_177 = vdotq_u32(__s0_177, __s1_177, *(uint8x16_t *) &__reint1_177); \
+  __ret_177; \
 })
 #else
-#define vdotq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-uint8x16_t __reint = __rev2; \
-uint32x4_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = __noswap_vdotq_u32(__rev0, __rev1, *(uint8x16_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdotq_laneq_u32(__p0_178, __p1_178, __p2_178, __p3_178) __extension__ ({ \
+  uint32x4_t __s0_178 = __p0_178; \
+  uint8x16_t __s1_178 = __p1_178; \
+  uint8x16_t __s2_178 = __p2_178; \
+  uint32x4_t __rev0_178;  __rev0_178 = __builtin_shufflevector(__s0_178, __s0_178, 3, 2, 1, 0); \
+  uint8x16_t __rev1_178;  __rev1_178 = __builtin_shufflevector(__s1_178, __s1_178, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_178;  __rev2_178 = __builtin_shufflevector(__s2_178, __s2_178, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_178; \
+uint8x16_t __reint_178 = __rev2_178; \
+uint32x4_t __reint1_178 = __noswap_splatq_laneq_u32(*(uint32x4_t *) &__reint_178, __p3_178); \
+  __ret_178 = __noswap_vdotq_u32(__rev0_178, __rev1_178, *(uint8x16_t *) &__reint1_178); \
+  __ret_178 = __builtin_shufflevector(__ret_178, __ret_178, 3, 2, 1, 0); \
+  __ret_178; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdotq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __s2 = __p2; \
-  int32x4_t __ret; \
-int8x16_t __reint = __s2; \
-int32x4_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = vdotq_s32(__s0, __s1, *(int8x16_t *) &__reint1); \
-  __ret; \
+#define vdotq_laneq_s32(__p0_179, __p1_179, __p2_179, __p3_179) __extension__ ({ \
+  int32x4_t __s0_179 = __p0_179; \
+  int8x16_t __s1_179 = __p1_179; \
+  int8x16_t __s2_179 = __p2_179; \
+  int32x4_t __ret_179; \
+int8x16_t __reint_179 = __s2_179; \
+int32x4_t __reint1_179 = splatq_laneq_s32(*(int32x4_t *) &__reint_179, __p3_179); \
+  __ret_179 = vdotq_s32(__s0_179, __s1_179, *(int8x16_t *) &__reint1_179); \
+  __ret_179; \
 })
 #else
-#define vdotq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-int8x16_t __reint = __rev2; \
-int32x4_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = __noswap_vdotq_s32(__rev0, __rev1, *(int8x16_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdotq_laneq_s32(__p0_180, __p1_180, __p2_180, __p3_180) __extension__ ({ \
+  int32x4_t __s0_180 = __p0_180; \
+  int8x16_t __s1_180 = __p1_180; \
+  int8x16_t __s2_180 = __p2_180; \
+  int32x4_t __rev0_180;  __rev0_180 = __builtin_shufflevector(__s0_180, __s0_180, 3, 2, 1, 0); \
+  int8x16_t __rev1_180;  __rev1_180 = __builtin_shufflevector(__s1_180, __s1_180, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_180;  __rev2_180 = __builtin_shufflevector(__s2_180, __s2_180, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_180; \
+int8x16_t __reint_180 = __rev2_180; \
+int32x4_t __reint1_180 = __noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_180, __p3_180); \
+  __ret_180 = __noswap_vdotq_s32(__rev0_180, __rev1_180, *(int8x16_t *) &__reint1_180); \
+  __ret_180 = __builtin_shufflevector(__ret_180, __ret_180, 3, 2, 1, 0); \
+  __ret_180; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdot_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x16_t __s2 = __p2; \
-  uint32x2_t __ret; \
-uint8x16_t __reint = __s2; \
-uint32x2_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3); \
-  __ret = vdot_u32(__s0, __s1, *(uint8x8_t *) &__reint1); \
-  __ret; \
+#define vdot_laneq_u32(__p0_181, __p1_181, __p2_181, __p3_181) __extension__ ({ \
+  uint32x2_t __s0_181 = __p0_181; \
+  uint8x8_t __s1_181 = __p1_181; \
+  uint8x16_t __s2_181 = __p2_181; \
+  uint32x2_t __ret_181; \
+uint8x16_t __reint_181 = __s2_181; \
+uint32x2_t __reint1_181 = splat_laneq_u32(*(uint32x4_t *) &__reint_181, __p3_181); \
+  __ret_181 = vdot_u32(__s0_181, __s1_181, *(uint8x8_t *) &__reint1_181); \
+  __ret_181; \
 })
 #else
-#define vdot_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x16_t __s2 = __p2; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-uint8x16_t __reint = __rev2; \
-uint32x2_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3); \
-  __ret = __noswap_vdot_u32(__rev0, __rev1, *(uint8x8_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdot_laneq_u32(__p0_182, __p1_182, __p2_182, __p3_182) __extension__ ({ \
+  uint32x2_t __s0_182 = __p0_182; \
+  uint8x8_t __s1_182 = __p1_182; \
+  uint8x16_t __s2_182 = __p2_182; \
+  uint32x2_t __rev0_182;  __rev0_182 = __builtin_shufflevector(__s0_182, __s0_182, 1, 0); \
+  uint8x8_t __rev1_182;  __rev1_182 = __builtin_shufflevector(__s1_182, __s1_182, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_182;  __rev2_182 = __builtin_shufflevector(__s2_182, __s2_182, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x2_t __ret_182; \
+uint8x16_t __reint_182 = __rev2_182; \
+uint32x2_t __reint1_182 = __noswap_splat_laneq_u32(*(uint32x4_t *) &__reint_182, __p3_182); \
+  __ret_182 = __noswap_vdot_u32(__rev0_182, __rev1_182, *(uint8x8_t *) &__reint1_182); \
+  __ret_182 = __builtin_shufflevector(__ret_182, __ret_182, 1, 0); \
+  __ret_182; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdot_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x16_t __s2 = __p2; \
-  int32x2_t __ret; \
-int8x16_t __reint = __s2; \
-int32x2_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3); \
-  __ret = vdot_s32(__s0, __s1, *(int8x8_t *) &__reint1); \
-  __ret; \
+#define vdot_laneq_s32(__p0_183, __p1_183, __p2_183, __p3_183) __extension__ ({ \
+  int32x2_t __s0_183 = __p0_183; \
+  int8x8_t __s1_183 = __p1_183; \
+  int8x16_t __s2_183 = __p2_183; \
+  int32x2_t __ret_183; \
+int8x16_t __reint_183 = __s2_183; \
+int32x2_t __reint1_183 = splat_laneq_s32(*(int32x4_t *) &__reint_183, __p3_183); \
+  __ret_183 = vdot_s32(__s0_183, __s1_183, *(int8x8_t *) &__reint1_183); \
+  __ret_183; \
 })
 #else
-#define vdot_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x16_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-int8x16_t __reint = __rev2; \
-int32x2_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3); \
-  __ret = __noswap_vdot_s32(__rev0, __rev1, *(int8x8_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdot_laneq_s32(__p0_184, __p1_184, __p2_184, __p3_184) __extension__ ({ \
+  int32x2_t __s0_184 = __p0_184; \
+  int8x8_t __s1_184 = __p1_184; \
+  int8x16_t __s2_184 = __p2_184; \
+  int32x2_t __rev0_184;  __rev0_184 = __builtin_shufflevector(__s0_184, __s0_184, 1, 0); \
+  int8x8_t __rev1_184;  __rev1_184 = __builtin_shufflevector(__s1_184, __s1_184, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_184;  __rev2_184 = __builtin_shufflevector(__s2_184, __s2_184, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x2_t __ret_184; \
+int8x16_t __reint_184 = __rev2_184; \
+int32x2_t __reint1_184 = __noswap_splat_laneq_s32(*(int32x4_t *) &__reint_184, __p3_184); \
+  __ret_184 = __noswap_vdot_s32(__rev0_184, __rev1_184, *(int8x8_t *) &__reint1_184); \
+  __ret_184 = __builtin_shufflevector(__ret_184, __ret_184, 1, 0); \
+  __ret_184; \
 })
 #endif
 
@@ -38872,44 +42518,44 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmulq_lane_f16(__p0_185, __p1_185, __p2_185) __extension__ ({ \
+  float16x8_t __s0_185 = __p0_185; \
+  float16x4_t __s1_185 = __p1_185; \
+  float16x8_t __ret_185; \
+  __ret_185 = __s0_185 * splatq_lane_f16(__s1_185, __p2_185); \
+  __ret_185; \
 })
 #else
-#define vmulq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmulq_lane_f16(__p0_186, __p1_186, __p2_186) __extension__ ({ \
+  float16x8_t __s0_186 = __p0_186; \
+  float16x4_t __s1_186 = __p1_186; \
+  float16x8_t __rev0_186;  __rev0_186 = __builtin_shufflevector(__s0_186, __s0_186, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev1_186;  __rev1_186 = __builtin_shufflevector(__s1_186, __s1_186, 3, 2, 1, 0); \
+  float16x8_t __ret_186; \
+  __ret_186 = __rev0_186 * __noswap_splatq_lane_f16(__rev1_186, __p2_186); \
+  __ret_186 = __builtin_shufflevector(__ret_186, __ret_186, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_186; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmul_lane_f16(__p0_187, __p1_187, __p2_187) __extension__ ({ \
+  float16x4_t __s0_187 = __p0_187; \
+  float16x4_t __s1_187 = __p1_187; \
+  float16x4_t __ret_187; \
+  __ret_187 = __s0_187 * splat_lane_f16(__s1_187, __p2_187); \
+  __ret_187; \
 })
 #else
-#define vmul_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmul_lane_f16(__p0_188, __p1_188, __p2_188) __extension__ ({ \
+  float16x4_t __s0_188 = __p0_188; \
+  float16x4_t __s1_188 = __p1_188; \
+  float16x4_t __rev0_188;  __rev0_188 = __builtin_shufflevector(__s0_188, __s0_188, 3, 2, 1, 0); \
+  float16x4_t __rev1_188;  __rev1_188 = __builtin_shufflevector(__s1_188, __s1_188, 3, 2, 1, 0); \
+  float16x4_t __ret_188; \
+  __ret_188 = __rev0_188 * __noswap_splat_lane_f16(__rev1_188, __p2_188); \
+  __ret_188 = __builtin_shufflevector(__ret_188, __ret_188, 3, 2, 1, 0); \
+  __ret_188; \
 })
 #endif
 
@@ -39651,140 +43297,140 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmsh_lane_f16(__p0_0, __p1_0, __p2_0, __p3_0) __extension__ ({ \
-  float16_t __s0_0 = __p0_0; \
-  float16_t __s1_0 = __p1_0; \
-  float16x4_t __s2_0 = __p2_0; \
-  float16_t __ret_0; \
-  __ret_0 = vfmah_lane_f16(__s0_0, -__s1_0, __s2_0, __p3_0); \
-  __ret_0; \
+#define vfmsh_lane_f16(__p0_189, __p1_189, __p2_189, __p3_189) __extension__ ({ \
+  float16_t __s0_189 = __p0_189; \
+  float16_t __s1_189 = __p1_189; \
+  float16x4_t __s2_189 = __p2_189; \
+  float16_t __ret_189; \
+  __ret_189 = vfmah_lane_f16(__s0_189, -__s1_189, __s2_189, __p3_189); \
+  __ret_189; \
 })
 #else
-#define vfmsh_lane_f16(__p0_1, __p1_1, __p2_1, __p3_1) __extension__ ({ \
-  float16_t __s0_1 = __p0_1; \
-  float16_t __s1_1 = __p1_1; \
-  float16x4_t __s2_1 = __p2_1; \
-  float16x4_t __rev2_1;  __rev2_1 = __builtin_shufflevector(__s2_1, __s2_1, 3, 2, 1, 0); \
-  float16_t __ret_1; \
-  __ret_1 = __noswap_vfmah_lane_f16(__s0_1, -__s1_1, __rev2_1, __p3_1); \
-  __ret_1; \
+#define vfmsh_lane_f16(__p0_190, __p1_190, __p2_190, __p3_190) __extension__ ({ \
+  float16_t __s0_190 = __p0_190; \
+  float16_t __s1_190 = __p1_190; \
+  float16x4_t __s2_190 = __p2_190; \
+  float16x4_t __rev2_190;  __rev2_190 = __builtin_shufflevector(__s2_190, __s2_190, 3, 2, 1, 0); \
+  float16_t __ret_190; \
+  __ret_190 = __noswap_vfmah_lane_f16(__s0_190, -__s1_190, __rev2_190, __p3_190); \
+  __ret_190; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f16(__p0_2, __p1_2, __p2_2, __p3_2) __extension__ ({ \
-  float16x8_t __s0_2 = __p0_2; \
-  float16x8_t __s1_2 = __p1_2; \
-  float16x4_t __s2_2 = __p2_2; \
-  float16x8_t __ret_2; \
-  __ret_2 = vfmaq_lane_f16(__s0_2, -__s1_2, __s2_2, __p3_2); \
-  __ret_2; \
+#define vfmsq_lane_f16(__p0_191, __p1_191, __p2_191, __p3_191) __extension__ ({ \
+  float16x8_t __s0_191 = __p0_191; \
+  float16x8_t __s1_191 = __p1_191; \
+  float16x4_t __s2_191 = __p2_191; \
+  float16x8_t __ret_191; \
+  __ret_191 = vfmaq_lane_f16(__s0_191, -__s1_191, __s2_191, __p3_191); \
+  __ret_191; \
 })
 #else
-#define vfmsq_lane_f16(__p0_3, __p1_3, __p2_3, __p3_3) __extension__ ({ \
-  float16x8_t __s0_3 = __p0_3; \
-  float16x8_t __s1_3 = __p1_3; \
-  float16x4_t __s2_3 = __p2_3; \
-  float16x8_t __rev0_3;  __rev0_3 = __builtin_shufflevector(__s0_3, __s0_3, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_3;  __rev1_3 = __builtin_shufflevector(__s1_3, __s1_3, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_3;  __rev2_3 = __builtin_shufflevector(__s2_3, __s2_3, 3, 2, 1, 0); \
-  float16x8_t __ret_3; \
-  __ret_3 = __noswap_vfmaq_lane_f16(__rev0_3, -__rev1_3, __rev2_3, __p3_3); \
-  __ret_3 = __builtin_shufflevector(__ret_3, __ret_3, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_3; \
+#define vfmsq_lane_f16(__p0_192, __p1_192, __p2_192, __p3_192) __extension__ ({ \
+  float16x8_t __s0_192 = __p0_192; \
+  float16x8_t __s1_192 = __p1_192; \
+  float16x4_t __s2_192 = __p2_192; \
+  float16x8_t __rev0_192;  __rev0_192 = __builtin_shufflevector(__s0_192, __s0_192, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_192;  __rev1_192 = __builtin_shufflevector(__s1_192, __s1_192, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_192;  __rev2_192 = __builtin_shufflevector(__s2_192, __s2_192, 3, 2, 1, 0); \
+  float16x8_t __ret_192; \
+  __ret_192 = __noswap_vfmaq_lane_f16(__rev0_192, -__rev1_192, __rev2_192, __p3_192); \
+  __ret_192 = __builtin_shufflevector(__ret_192, __ret_192, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_192; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfms_lane_f16(__p0_4, __p1_4, __p2_4, __p3_4) __extension__ ({ \
-  float16x4_t __s0_4 = __p0_4; \
-  float16x4_t __s1_4 = __p1_4; \
-  float16x4_t __s2_4 = __p2_4; \
-  float16x4_t __ret_4; \
-  __ret_4 = vfma_lane_f16(__s0_4, -__s1_4, __s2_4, __p3_4); \
-  __ret_4; \
+#define vfms_lane_f16(__p0_193, __p1_193, __p2_193, __p3_193) __extension__ ({ \
+  float16x4_t __s0_193 = __p0_193; \
+  float16x4_t __s1_193 = __p1_193; \
+  float16x4_t __s2_193 = __p2_193; \
+  float16x4_t __ret_193; \
+  __ret_193 = vfma_lane_f16(__s0_193, -__s1_193, __s2_193, __p3_193); \
+  __ret_193; \
 })
 #else
-#define vfms_lane_f16(__p0_5, __p1_5, __p2_5, __p3_5) __extension__ ({ \
-  float16x4_t __s0_5 = __p0_5; \
-  float16x4_t __s1_5 = __p1_5; \
-  float16x4_t __s2_5 = __p2_5; \
-  float16x4_t __rev0_5;  __rev0_5 = __builtin_shufflevector(__s0_5, __s0_5, 3, 2, 1, 0); \
-  float16x4_t __rev1_5;  __rev1_5 = __builtin_shufflevector(__s1_5, __s1_5, 3, 2, 1, 0); \
-  float16x4_t __rev2_5;  __rev2_5 = __builtin_shufflevector(__s2_5, __s2_5, 3, 2, 1, 0); \
-  float16x4_t __ret_5; \
-  __ret_5 = __noswap_vfma_lane_f16(__rev0_5, -__rev1_5, __rev2_5, __p3_5); \
-  __ret_5 = __builtin_shufflevector(__ret_5, __ret_5, 3, 2, 1, 0); \
-  __ret_5; \
+#define vfms_lane_f16(__p0_194, __p1_194, __p2_194, __p3_194) __extension__ ({ \
+  float16x4_t __s0_194 = __p0_194; \
+  float16x4_t __s1_194 = __p1_194; \
+  float16x4_t __s2_194 = __p2_194; \
+  float16x4_t __rev0_194;  __rev0_194 = __builtin_shufflevector(__s0_194, __s0_194, 3, 2, 1, 0); \
+  float16x4_t __rev1_194;  __rev1_194 = __builtin_shufflevector(__s1_194, __s1_194, 3, 2, 1, 0); \
+  float16x4_t __rev2_194;  __rev2_194 = __builtin_shufflevector(__s2_194, __s2_194, 3, 2, 1, 0); \
+  float16x4_t __ret_194; \
+  __ret_194 = __noswap_vfma_lane_f16(__rev0_194, -__rev1_194, __rev2_194, __p3_194); \
+  __ret_194 = __builtin_shufflevector(__ret_194, __ret_194, 3, 2, 1, 0); \
+  __ret_194; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmsh_laneq_f16(__p0_6, __p1_6, __p2_6, __p3_6) __extension__ ({ \
-  float16_t __s0_6 = __p0_6; \
-  float16_t __s1_6 = __p1_6; \
-  float16x8_t __s2_6 = __p2_6; \
-  float16_t __ret_6; \
-  __ret_6 = vfmah_laneq_f16(__s0_6, -__s1_6, __s2_6, __p3_6); \
-  __ret_6; \
+#define vfmsh_laneq_f16(__p0_195, __p1_195, __p2_195, __p3_195) __extension__ ({ \
+  float16_t __s0_195 = __p0_195; \
+  float16_t __s1_195 = __p1_195; \
+  float16x8_t __s2_195 = __p2_195; \
+  float16_t __ret_195; \
+  __ret_195 = vfmah_laneq_f16(__s0_195, -__s1_195, __s2_195, __p3_195); \
+  __ret_195; \
 })
 #else
-#define vfmsh_laneq_f16(__p0_7, __p1_7, __p2_7, __p3_7) __extension__ ({ \
-  float16_t __s0_7 = __p0_7; \
-  float16_t __s1_7 = __p1_7; \
-  float16x8_t __s2_7 = __p2_7; \
-  float16x8_t __rev2_7;  __rev2_7 = __builtin_shufflevector(__s2_7, __s2_7, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_7; \
-  __ret_7 = __noswap_vfmah_laneq_f16(__s0_7, -__s1_7, __rev2_7, __p3_7); \
-  __ret_7; \
+#define vfmsh_laneq_f16(__p0_196, __p1_196, __p2_196, __p3_196) __extension__ ({ \
+  float16_t __s0_196 = __p0_196; \
+  float16_t __s1_196 = __p1_196; \
+  float16x8_t __s2_196 = __p2_196; \
+  float16x8_t __rev2_196;  __rev2_196 = __builtin_shufflevector(__s2_196, __s2_196, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16_t __ret_196; \
+  __ret_196 = __noswap_vfmah_laneq_f16(__s0_196, -__s1_196, __rev2_196, __p3_196); \
+  __ret_196; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f16(__p0_8, __p1_8, __p2_8, __p3_8) __extension__ ({ \
-  float16x8_t __s0_8 = __p0_8; \
-  float16x8_t __s1_8 = __p1_8; \
-  float16x8_t __s2_8 = __p2_8; \
-  float16x8_t __ret_8; \
-  __ret_8 = vfmaq_laneq_f16(__s0_8, -__s1_8, __s2_8, __p3_8); \
-  __ret_8; \
+#define vfmsq_laneq_f16(__p0_197, __p1_197, __p2_197, __p3_197) __extension__ ({ \
+  float16x8_t __s0_197 = __p0_197; \
+  float16x8_t __s1_197 = __p1_197; \
+  float16x8_t __s2_197 = __p2_197; \
+  float16x8_t __ret_197; \
+  __ret_197 = vfmaq_laneq_f16(__s0_197, -__s1_197, __s2_197, __p3_197); \
+  __ret_197; \
 })
 #else
-#define vfmsq_laneq_f16(__p0_9, __p1_9, __p2_9, __p3_9) __extension__ ({ \
-  float16x8_t __s0_9 = __p0_9; \
-  float16x8_t __s1_9 = __p1_9; \
-  float16x8_t __s2_9 = __p2_9; \
-  float16x8_t __rev0_9;  __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_9;  __rev1_9 = __builtin_shufflevector(__s1_9, __s1_9, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_9;  __rev2_9 = __builtin_shufflevector(__s2_9, __s2_9, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_9; \
-  __ret_9 = __noswap_vfmaq_laneq_f16(__rev0_9, -__rev1_9, __rev2_9, __p3_9); \
-  __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_9; \
+#define vfmsq_laneq_f16(__p0_198, __p1_198, __p2_198, __p3_198) __extension__ ({ \
+  float16x8_t __s0_198 = __p0_198; \
+  float16x8_t __s1_198 = __p1_198; \
+  float16x8_t __s2_198 = __p2_198; \
+  float16x8_t __rev0_198;  __rev0_198 = __builtin_shufflevector(__s0_198, __s0_198, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_198;  __rev1_198 = __builtin_shufflevector(__s1_198, __s1_198, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_198;  __rev2_198 = __builtin_shufflevector(__s2_198, __s2_198, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __ret_198; \
+  __ret_198 = __noswap_vfmaq_laneq_f16(__rev0_198, -__rev1_198, __rev2_198, __p3_198); \
+  __ret_198 = __builtin_shufflevector(__ret_198, __ret_198, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_198; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f16(__p0_10, __p1_10, __p2_10, __p3_10) __extension__ ({ \
-  float16x4_t __s0_10 = __p0_10; \
-  float16x4_t __s1_10 = __p1_10; \
-  float16x8_t __s2_10 = __p2_10; \
-  float16x4_t __ret_10; \
-  __ret_10 = vfma_laneq_f16(__s0_10, -__s1_10, __s2_10, __p3_10); \
-  __ret_10; \
+#define vfms_laneq_f16(__p0_199, __p1_199, __p2_199, __p3_199) __extension__ ({ \
+  float16x4_t __s0_199 = __p0_199; \
+  float16x4_t __s1_199 = __p1_199; \
+  float16x8_t __s2_199 = __p2_199; \
+  float16x4_t __ret_199; \
+  __ret_199 = vfma_laneq_f16(__s0_199, -__s1_199, __s2_199, __p3_199); \
+  __ret_199; \
 })
 #else
-#define vfms_laneq_f16(__p0_11, __p1_11, __p2_11, __p3_11) __extension__ ({ \
-  float16x4_t __s0_11 = __p0_11; \
-  float16x4_t __s1_11 = __p1_11; \
-  float16x8_t __s2_11 = __p2_11; \
-  float16x4_t __rev0_11;  __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 3, 2, 1, 0); \
-  float16x4_t __rev1_11;  __rev1_11 = __builtin_shufflevector(__s1_11, __s1_11, 3, 2, 1, 0); \
-  float16x8_t __rev2_11;  __rev2_11 = __builtin_shufflevector(__s2_11, __s2_11, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_11; \
-  __ret_11 = __noswap_vfma_laneq_f16(__rev0_11, -__rev1_11, __rev2_11, __p3_11); \
-  __ret_11 = __builtin_shufflevector(__ret_11, __ret_11, 3, 2, 1, 0); \
-  __ret_11; \
+#define vfms_laneq_f16(__p0_200, __p1_200, __p2_200, __p3_200) __extension__ ({ \
+  float16x4_t __s0_200 = __p0_200; \
+  float16x4_t __s1_200 = __p1_200; \
+  float16x8_t __s2_200 = __p2_200; \
+  float16x4_t __rev0_200;  __rev0_200 = __builtin_shufflevector(__s0_200, __s0_200, 3, 2, 1, 0); \
+  float16x4_t __rev1_200;  __rev1_200 = __builtin_shufflevector(__s1_200, __s1_200, 3, 2, 1, 0); \
+  float16x8_t __rev2_200;  __rev2_200 = __builtin_shufflevector(__s2_200, __s2_200, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __ret_200; \
+  __ret_200 = __noswap_vfma_laneq_f16(__rev0_200, -__rev1_200, __rev2_200, __p3_200); \
+  __ret_200 = __builtin_shufflevector(__ret_200, __ret_200, 3, 2, 1, 0); \
+  __ret_200; \
 })
 #endif
 
@@ -39971,44 +43617,44 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmulq_laneq_f16(__p0_201, __p1_201, __p2_201) __extension__ ({ \
+  float16x8_t __s0_201 = __p0_201; \
+  float16x8_t __s1_201 = __p1_201; \
+  float16x8_t __ret_201; \
+  __ret_201 = __s0_201 * splatq_laneq_f16(__s1_201, __p2_201); \
+  __ret_201; \
 })
 #else
-#define vmulq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmulq_laneq_f16(__p0_202, __p1_202, __p2_202) __extension__ ({ \
+  float16x8_t __s0_202 = __p0_202; \
+  float16x8_t __s1_202 = __p1_202; \
+  float16x8_t __rev0_202;  __rev0_202 = __builtin_shufflevector(__s0_202, __s0_202, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_202;  __rev1_202 = __builtin_shufflevector(__s1_202, __s1_202, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __ret_202; \
+  __ret_202 = __rev0_202 * __noswap_splatq_laneq_f16(__rev1_202, __p2_202); \
+  __ret_202 = __builtin_shufflevector(__ret_202, __ret_202, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_202; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmul_laneq_f16(__p0_203, __p1_203, __p2_203) __extension__ ({ \
+  float16x4_t __s0_203 = __p0_203; \
+  float16x8_t __s1_203 = __p1_203; \
+  float16x4_t __ret_203; \
+  __ret_203 = __s0_203 * splat_laneq_f16(__s1_203, __p2_203); \
+  __ret_203; \
 })
 #else
-#define vmul_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmul_laneq_f16(__p0_204, __p1_204, __p2_204) __extension__ ({ \
+  float16x4_t __s0_204 = __p0_204; \
+  float16x8_t __s1_204 = __p1_204; \
+  float16x4_t __rev0_204;  __rev0_204 = __builtin_shufflevector(__s0_204, __s0_204, 3, 2, 1, 0); \
+  float16x8_t __rev1_204;  __rev1_204 = __builtin_shufflevector(__s1_204, __s1_204, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __ret_204; \
+  __ret_204 = __rev0_204 * __noswap_splat_laneq_f16(__rev1_204, __p2_204); \
+  __ret_204 = __builtin_shufflevector(__ret_204, __ret_204, 3, 2, 1, 0); \
+  __ret_204; \
 })
 #endif
 
@@ -40076,44 +43722,44 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = vmulxq_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmulxq_lane_f16(__p0_205, __p1_205, __p2_205) __extension__ ({ \
+  float16x8_t __s0_205 = __p0_205; \
+  float16x4_t __s1_205 = __p1_205; \
+  float16x8_t __ret_205; \
+  __ret_205 = vmulxq_f16(__s0_205, splatq_lane_f16(__s1_205, __p2_205)); \
+  __ret_205; \
 })
 #else
-#define vmulxq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vmulxq_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmulxq_lane_f16(__p0_206, __p1_206, __p2_206) __extension__ ({ \
+  float16x8_t __s0_206 = __p0_206; \
+  float16x4_t __s1_206 = __p1_206; \
+  float16x8_t __rev0_206;  __rev0_206 = __builtin_shufflevector(__s0_206, __s0_206, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev1_206;  __rev1_206 = __builtin_shufflevector(__s1_206, __s1_206, 3, 2, 1, 0); \
+  float16x8_t __ret_206; \
+  __ret_206 = __noswap_vmulxq_f16(__rev0_206, __noswap_splatq_lane_f16(__rev1_206, __p2_206)); \
+  __ret_206 = __builtin_shufflevector(__ret_206, __ret_206, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_206; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulx_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = vmulx_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmulx_lane_f16(__p0_207, __p1_207, __p2_207) __extension__ ({ \
+  float16x4_t __s0_207 = __p0_207; \
+  float16x4_t __s1_207 = __p1_207; \
+  float16x4_t __ret_207; \
+  __ret_207 = vmulx_f16(__s0_207, splat_lane_f16(__s1_207, __p2_207)); \
+  __ret_207; \
 })
 #else
-#define vmulx_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vmulx_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmulx_lane_f16(__p0_208, __p1_208, __p2_208) __extension__ ({ \
+  float16x4_t __s0_208 = __p0_208; \
+  float16x4_t __s1_208 = __p1_208; \
+  float16x4_t __rev0_208;  __rev0_208 = __builtin_shufflevector(__s0_208, __s0_208, 3, 2, 1, 0); \
+  float16x4_t __rev1_208;  __rev1_208 = __builtin_shufflevector(__s1_208, __s1_208, 3, 2, 1, 0); \
+  float16x4_t __ret_208; \
+  __ret_208 = __noswap_vmulx_f16(__rev0_208, __noswap_splat_lane_f16(__rev1_208, __p2_208)); \
+  __ret_208 = __builtin_shufflevector(__ret_208, __ret_208, 3, 2, 1, 0); \
+  __ret_208; \
 })
 #endif
 
@@ -40137,44 +43783,44 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = vmulxq_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmulxq_laneq_f16(__p0_209, __p1_209, __p2_209) __extension__ ({ \
+  float16x8_t __s0_209 = __p0_209; \
+  float16x8_t __s1_209 = __p1_209; \
+  float16x8_t __ret_209; \
+  __ret_209 = vmulxq_f16(__s0_209, splatq_laneq_f16(__s1_209, __p2_209)); \
+  __ret_209; \
 })
 #else
-#define vmulxq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vmulxq_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmulxq_laneq_f16(__p0_210, __p1_210, __p2_210) __extension__ ({ \
+  float16x8_t __s0_210 = __p0_210; \
+  float16x8_t __s1_210 = __p1_210; \
+  float16x8_t __rev0_210;  __rev0_210 = __builtin_shufflevector(__s0_210, __s0_210, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_210;  __rev1_210 = __builtin_shufflevector(__s1_210, __s1_210, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __ret_210; \
+  __ret_210 = __noswap_vmulxq_f16(__rev0_210, __noswap_splatq_laneq_f16(__rev1_210, __p2_210)); \
+  __ret_210 = __builtin_shufflevector(__ret_210, __ret_210, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_210; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = vmulx_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmulx_laneq_f16(__p0_211, __p1_211, __p2_211) __extension__ ({ \
+  float16x4_t __s0_211 = __p0_211; \
+  float16x8_t __s1_211 = __p1_211; \
+  float16x4_t __ret_211; \
+  __ret_211 = vmulx_f16(__s0_211, splat_laneq_f16(__s1_211, __p2_211)); \
+  __ret_211; \
 })
 #else
-#define vmulx_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vmulx_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmulx_laneq_f16(__p0_212, __p1_212, __p2_212) __extension__ ({ \
+  float16x4_t __s0_212 = __p0_212; \
+  float16x8_t __s1_212 = __p1_212; \
+  float16x4_t __rev0_212;  __rev0_212 = __builtin_shufflevector(__s0_212, __s0_212, 3, 2, 1, 0); \
+  float16x8_t __rev1_212;  __rev1_212 = __builtin_shufflevector(__s1_212, __s1_212, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __ret_212; \
+  __ret_212 = __noswap_vmulx_f16(__rev0_212, __noswap_splat_laneq_f16(__rev1_212, __p2_212)); \
+  __ret_212 = __builtin_shufflevector(__ret_212, __ret_212, 3, 2, 1, 0); \
+  __ret_212; \
 })
 #endif
 
@@ -40606,6 +44252,160 @@
 #endif
 
 #endif
+#if defined(__ARM_FEATURE_MATMUL_INT8)
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#else
+__ai int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#else
+__ai int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai int32x4_t __noswap_vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+  return __ret;
+}
+#else
+__ai int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai int32x2_t __noswap_vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vusdotq_lane_s32(__p0_213, __p1_213, __p2_213, __p3_213) __extension__ ({ \
+  int32x4_t __s0_213 = __p0_213; \
+  uint8x16_t __s1_213 = __p1_213; \
+  int8x8_t __s2_213 = __p2_213; \
+  int32x4_t __ret_213; \
+int8x8_t __reint_213 = __s2_213; \
+  __ret_213 = vusdotq_s32(__s0_213, __s1_213, (int8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_213, __p3_213))); \
+  __ret_213; \
+})
+#else
+#define vusdotq_lane_s32(__p0_214, __p1_214, __p2_214, __p3_214) __extension__ ({ \
+  int32x4_t __s0_214 = __p0_214; \
+  uint8x16_t __s1_214 = __p1_214; \
+  int8x8_t __s2_214 = __p2_214; \
+  int32x4_t __rev0_214;  __rev0_214 = __builtin_shufflevector(__s0_214, __s0_214, 3, 2, 1, 0); \
+  uint8x16_t __rev1_214;  __rev1_214 = __builtin_shufflevector(__s1_214, __s1_214, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_214;  __rev2_214 = __builtin_shufflevector(__s2_214, __s2_214, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_214; \
+int8x8_t __reint_214 = __rev2_214; \
+  __ret_214 = __noswap_vusdotq_s32(__rev0_214, __rev1_214, (int8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_214, __p3_214))); \
+  __ret_214 = __builtin_shufflevector(__ret_214, __ret_214, 3, 2, 1, 0); \
+  __ret_214; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vusdot_lane_s32(__p0_215, __p1_215, __p2_215, __p3_215) __extension__ ({ \
+  int32x2_t __s0_215 = __p0_215; \
+  uint8x8_t __s1_215 = __p1_215; \
+  int8x8_t __s2_215 = __p2_215; \
+  int32x2_t __ret_215; \
+int8x8_t __reint_215 = __s2_215; \
+  __ret_215 = vusdot_s32(__s0_215, __s1_215, (int8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_215, __p3_215))); \
+  __ret_215; \
+})
+#else
+#define vusdot_lane_s32(__p0_216, __p1_216, __p2_216, __p3_216) __extension__ ({ \
+  int32x2_t __s0_216 = __p0_216; \
+  uint8x8_t __s1_216 = __p1_216; \
+  int8x8_t __s2_216 = __p2_216; \
+  int32x2_t __rev0_216;  __rev0_216 = __builtin_shufflevector(__s0_216, __s0_216, 1, 0); \
+  uint8x8_t __rev1_216;  __rev1_216 = __builtin_shufflevector(__s1_216, __s1_216, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_216;  __rev2_216 = __builtin_shufflevector(__s2_216, __s2_216, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x2_t __ret_216; \
+int8x8_t __reint_216 = __rev2_216; \
+  __ret_216 = __noswap_vusdot_s32(__rev0_216, __rev1_216, (int8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_216, __p3_216))); \
+  __ret_216 = __builtin_shufflevector(__ret_216, __ret_216, 1, 0); \
+  __ret_216; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vusmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#else
+__ai int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vusmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#endif
 #if defined(__ARM_FEATURE_QRDMX)
 #ifdef __LITTLE_ENDIAN__
 __ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
@@ -40680,98 +44480,98 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqaddq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
+#define vqrdmlahq_lane_s32(__p0_217, __p1_217, __p2_217, __p3_217) __extension__ ({ \
+  int32x4_t __s0_217 = __p0_217; \
+  int32x4_t __s1_217 = __p1_217; \
+  int32x2_t __s2_217 = __p2_217; \
+  int32x4_t __ret_217; \
+  __ret_217 = vqaddq_s32(__s0_217, vqrdmulhq_s32(__s1_217, splatq_lane_s32(__s2_217, __p3_217))); \
+  __ret_217; \
 })
 #else
-#define vqrdmlahq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmlahq_lane_s32(__p0_218, __p1_218, __p2_218, __p3_218) __extension__ ({ \
+  int32x4_t __s0_218 = __p0_218; \
+  int32x4_t __s1_218 = __p1_218; \
+  int32x2_t __s2_218 = __p2_218; \
+  int32x4_t __rev0_218;  __rev0_218 = __builtin_shufflevector(__s0_218, __s0_218, 3, 2, 1, 0); \
+  int32x4_t __rev1_218;  __rev1_218 = __builtin_shufflevector(__s1_218, __s1_218, 3, 2, 1, 0); \
+  int32x2_t __rev2_218;  __rev2_218 = __builtin_shufflevector(__s2_218, __s2_218, 1, 0); \
+  int32x4_t __ret_218; \
+  __ret_218 = __noswap_vqaddq_s32(__rev0_218, __noswap_vqrdmulhq_s32(__rev1_218, __noswap_splatq_lane_s32(__rev2_218, __p3_218))); \
+  __ret_218 = __builtin_shufflevector(__ret_218, __ret_218, 3, 2, 1, 0); \
+  __ret_218; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = vqaddq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret; \
+#define vqrdmlahq_lane_s16(__p0_219, __p1_219, __p2_219, __p3_219) __extension__ ({ \
+  int16x8_t __s0_219 = __p0_219; \
+  int16x8_t __s1_219 = __p1_219; \
+  int16x4_t __s2_219 = __p2_219; \
+  int16x8_t __ret_219; \
+  __ret_219 = vqaddq_s16(__s0_219, vqrdmulhq_s16(__s1_219, splatq_lane_s16(__s2_219, __p3_219))); \
+  __ret_219; \
 })
 #else
-#define vqrdmlahq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmlahq_lane_s16(__p0_220, __p1_220, __p2_220, __p3_220) __extension__ ({ \
+  int16x8_t __s0_220 = __p0_220; \
+  int16x8_t __s1_220 = __p1_220; \
+  int16x4_t __s2_220 = __p2_220; \
+  int16x8_t __rev0_220;  __rev0_220 = __builtin_shufflevector(__s0_220, __s0_220, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_220;  __rev1_220 = __builtin_shufflevector(__s1_220, __s1_220, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_220;  __rev2_220 = __builtin_shufflevector(__s2_220, __s2_220, 3, 2, 1, 0); \
+  int16x8_t __ret_220; \
+  __ret_220 = __noswap_vqaddq_s16(__rev0_220, __noswap_vqrdmulhq_s16(__rev1_220, __noswap_splatq_lane_s16(__rev2_220, __p3_220))); \
+  __ret_220 = __builtin_shufflevector(__ret_220, __ret_220, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_220; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = vqadd_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
-  __ret; \
+#define vqrdmlah_lane_s32(__p0_221, __p1_221, __p2_221, __p3_221) __extension__ ({ \
+  int32x2_t __s0_221 = __p0_221; \
+  int32x2_t __s1_221 = __p1_221; \
+  int32x2_t __s2_221 = __p2_221; \
+  int32x2_t __ret_221; \
+  __ret_221 = vqadd_s32(__s0_221, vqrdmulh_s32(__s1_221, splat_lane_s32(__s2_221, __p3_221))); \
+  __ret_221; \
 })
 #else
-#define vqrdmlah_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqrdmlah_lane_s32(__p0_222, __p1_222, __p2_222, __p3_222) __extension__ ({ \
+  int32x2_t __s0_222 = __p0_222; \
+  int32x2_t __s1_222 = __p1_222; \
+  int32x2_t __s2_222 = __p2_222; \
+  int32x2_t __rev0_222;  __rev0_222 = __builtin_shufflevector(__s0_222, __s0_222, 1, 0); \
+  int32x2_t __rev1_222;  __rev1_222 = __builtin_shufflevector(__s1_222, __s1_222, 1, 0); \
+  int32x2_t __rev2_222;  __rev2_222 = __builtin_shufflevector(__s2_222, __s2_222, 1, 0); \
+  int32x2_t __ret_222; \
+  __ret_222 = __noswap_vqadd_s32(__rev0_222, __noswap_vqrdmulh_s32(__rev1_222, __noswap_splat_lane_s32(__rev2_222, __p3_222))); \
+  __ret_222 = __builtin_shufflevector(__ret_222, __ret_222, 1, 0); \
+  __ret_222; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = vqadd_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
+#define vqrdmlah_lane_s16(__p0_223, __p1_223, __p2_223, __p3_223) __extension__ ({ \
+  int16x4_t __s0_223 = __p0_223; \
+  int16x4_t __s1_223 = __p1_223; \
+  int16x4_t __s2_223 = __p2_223; \
+  int16x4_t __ret_223; \
+  __ret_223 = vqadd_s16(__s0_223, vqrdmulh_s16(__s1_223, splat_lane_s16(__s2_223, __p3_223))); \
+  __ret_223; \
 })
 #else
-#define vqrdmlah_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmlah_lane_s16(__p0_224, __p1_224, __p2_224, __p3_224) __extension__ ({ \
+  int16x4_t __s0_224 = __p0_224; \
+  int16x4_t __s1_224 = __p1_224; \
+  int16x4_t __s2_224 = __p2_224; \
+  int16x4_t __rev0_224;  __rev0_224 = __builtin_shufflevector(__s0_224, __s0_224, 3, 2, 1, 0); \
+  int16x4_t __rev1_224;  __rev1_224 = __builtin_shufflevector(__s1_224, __s1_224, 3, 2, 1, 0); \
+  int16x4_t __rev2_224;  __rev2_224 = __builtin_shufflevector(__s2_224, __s2_224, 3, 2, 1, 0); \
+  int16x4_t __ret_224; \
+  __ret_224 = __noswap_vqadd_s16(__rev0_224, __noswap_vqrdmulh_s16(__rev1_224, __noswap_splat_lane_s16(__rev2_224, __p3_224))); \
+  __ret_224 = __builtin_shufflevector(__ret_224, __ret_224, 3, 2, 1, 0); \
+  __ret_224; \
 })
 #endif
 
@@ -40848,292 +44648,292 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqsubq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
+#define vqrdmlshq_lane_s32(__p0_225, __p1_225, __p2_225, __p3_225) __extension__ ({ \
+  int32x4_t __s0_225 = __p0_225; \
+  int32x4_t __s1_225 = __p1_225; \
+  int32x2_t __s2_225 = __p2_225; \
+  int32x4_t __ret_225; \
+  __ret_225 = vqsubq_s32(__s0_225, vqrdmulhq_s32(__s1_225, splatq_lane_s32(__s2_225, __p3_225))); \
+  __ret_225; \
 })
 #else
-#define vqrdmlshq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmlshq_lane_s32(__p0_226, __p1_226, __p2_226, __p3_226) __extension__ ({ \
+  int32x4_t __s0_226 = __p0_226; \
+  int32x4_t __s1_226 = __p1_226; \
+  int32x2_t __s2_226 = __p2_226; \
+  int32x4_t __rev0_226;  __rev0_226 = __builtin_shufflevector(__s0_226, __s0_226, 3, 2, 1, 0); \
+  int32x4_t __rev1_226;  __rev1_226 = __builtin_shufflevector(__s1_226, __s1_226, 3, 2, 1, 0); \
+  int32x2_t __rev2_226;  __rev2_226 = __builtin_shufflevector(__s2_226, __s2_226, 1, 0); \
+  int32x4_t __ret_226; \
+  __ret_226 = __noswap_vqsubq_s32(__rev0_226, __noswap_vqrdmulhq_s32(__rev1_226, __noswap_splatq_lane_s32(__rev2_226, __p3_226))); \
+  __ret_226 = __builtin_shufflevector(__ret_226, __ret_226, 3, 2, 1, 0); \
+  __ret_226; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = vqsubq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret; \
+#define vqrdmlshq_lane_s16(__p0_227, __p1_227, __p2_227, __p3_227) __extension__ ({ \
+  int16x8_t __s0_227 = __p0_227; \
+  int16x8_t __s1_227 = __p1_227; \
+  int16x4_t __s2_227 = __p2_227; \
+  int16x8_t __ret_227; \
+  __ret_227 = vqsubq_s16(__s0_227, vqrdmulhq_s16(__s1_227, splatq_lane_s16(__s2_227, __p3_227))); \
+  __ret_227; \
 })
 #else
-#define vqrdmlshq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmlshq_lane_s16(__p0_228, __p1_228, __p2_228, __p3_228) __extension__ ({ \
+  int16x8_t __s0_228 = __p0_228; \
+  int16x8_t __s1_228 = __p1_228; \
+  int16x4_t __s2_228 = __p2_228; \
+  int16x8_t __rev0_228;  __rev0_228 = __builtin_shufflevector(__s0_228, __s0_228, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_228;  __rev1_228 = __builtin_shufflevector(__s1_228, __s1_228, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_228;  __rev2_228 = __builtin_shufflevector(__s2_228, __s2_228, 3, 2, 1, 0); \
+  int16x8_t __ret_228; \
+  __ret_228 = __noswap_vqsubq_s16(__rev0_228, __noswap_vqrdmulhq_s16(__rev1_228, __noswap_splatq_lane_s16(__rev2_228, __p3_228))); \
+  __ret_228 = __builtin_shufflevector(__ret_228, __ret_228, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_228; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = vqsub_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
-  __ret; \
+#define vqrdmlsh_lane_s32(__p0_229, __p1_229, __p2_229, __p3_229) __extension__ ({ \
+  int32x2_t __s0_229 = __p0_229; \
+  int32x2_t __s1_229 = __p1_229; \
+  int32x2_t __s2_229 = __p2_229; \
+  int32x2_t __ret_229; \
+  __ret_229 = vqsub_s32(__s0_229, vqrdmulh_s32(__s1_229, splat_lane_s32(__s2_229, __p3_229))); \
+  __ret_229; \
 })
 #else
-#define vqrdmlsh_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqrdmlsh_lane_s32(__p0_230, __p1_230, __p2_230, __p3_230) __extension__ ({ \
+  int32x2_t __s0_230 = __p0_230; \
+  int32x2_t __s1_230 = __p1_230; \
+  int32x2_t __s2_230 = __p2_230; \
+  int32x2_t __rev0_230;  __rev0_230 = __builtin_shufflevector(__s0_230, __s0_230, 1, 0); \
+  int32x2_t __rev1_230;  __rev1_230 = __builtin_shufflevector(__s1_230, __s1_230, 1, 0); \
+  int32x2_t __rev2_230;  __rev2_230 = __builtin_shufflevector(__s2_230, __s2_230, 1, 0); \
+  int32x2_t __ret_230; \
+  __ret_230 = __noswap_vqsub_s32(__rev0_230, __noswap_vqrdmulh_s32(__rev1_230, __noswap_splat_lane_s32(__rev2_230, __p3_230))); \
+  __ret_230 = __builtin_shufflevector(__ret_230, __ret_230, 1, 0); \
+  __ret_230; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = vqsub_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
+#define vqrdmlsh_lane_s16(__p0_231, __p1_231, __p2_231, __p3_231) __extension__ ({ \
+  int16x4_t __s0_231 = __p0_231; \
+  int16x4_t __s1_231 = __p1_231; \
+  int16x4_t __s2_231 = __p2_231; \
+  int16x4_t __ret_231; \
+  __ret_231 = vqsub_s16(__s0_231, vqrdmulh_s16(__s1_231, splat_lane_s16(__s2_231, __p3_231))); \
+  __ret_231; \
 })
 #else
-#define vqrdmlsh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmlsh_lane_s16(__p0_232, __p1_232, __p2_232, __p3_232) __extension__ ({ \
+  int16x4_t __s0_232 = __p0_232; \
+  int16x4_t __s1_232 = __p1_232; \
+  int16x4_t __s2_232 = __p2_232; \
+  int16x4_t __rev0_232;  __rev0_232 = __builtin_shufflevector(__s0_232, __s0_232, 3, 2, 1, 0); \
+  int16x4_t __rev1_232;  __rev1_232 = __builtin_shufflevector(__s1_232, __s1_232, 3, 2, 1, 0); \
+  int16x4_t __rev2_232;  __rev2_232 = __builtin_shufflevector(__s2_232, __s2_232, 3, 2, 1, 0); \
+  int16x4_t __ret_232; \
+  __ret_232 = __noswap_vqsub_s16(__rev0_232, __noswap_vqrdmulh_s16(__rev1_232, __noswap_splat_lane_s16(__rev2_232, __p3_232))); \
+  __ret_232 = __builtin_shufflevector(__ret_232, __ret_232, 3, 2, 1, 0); \
+  __ret_232; \
 })
 #endif
 
 #endif
 #if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqaddq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
+#define vqrdmlahq_laneq_s32(__p0_233, __p1_233, __p2_233, __p3_233) __extension__ ({ \
+  int32x4_t __s0_233 = __p0_233; \
+  int32x4_t __s1_233 = __p1_233; \
+  int32x4_t __s2_233 = __p2_233; \
+  int32x4_t __ret_233; \
+  __ret_233 = vqaddq_s32(__s0_233, vqrdmulhq_s32(__s1_233, splatq_laneq_s32(__s2_233, __p3_233))); \
+  __ret_233; \
 })
 #else
-#define vqrdmlahq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmlahq_laneq_s32(__p0_234, __p1_234, __p2_234, __p3_234) __extension__ ({ \
+  int32x4_t __s0_234 = __p0_234; \
+  int32x4_t __s1_234 = __p1_234; \
+  int32x4_t __s2_234 = __p2_234; \
+  int32x4_t __rev0_234;  __rev0_234 = __builtin_shufflevector(__s0_234, __s0_234, 3, 2, 1, 0); \
+  int32x4_t __rev1_234;  __rev1_234 = __builtin_shufflevector(__s1_234, __s1_234, 3, 2, 1, 0); \
+  int32x4_t __rev2_234;  __rev2_234 = __builtin_shufflevector(__s2_234, __s2_234, 3, 2, 1, 0); \
+  int32x4_t __ret_234; \
+  __ret_234 = __noswap_vqaddq_s32(__rev0_234, __noswap_vqrdmulhq_s32(__rev1_234, __noswap_splatq_laneq_s32(__rev2_234, __p3_234))); \
+  __ret_234 = __builtin_shufflevector(__ret_234, __ret_234, 3, 2, 1, 0); \
+  __ret_234; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = vqaddq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret; \
+#define vqrdmlahq_laneq_s16(__p0_235, __p1_235, __p2_235, __p3_235) __extension__ ({ \
+  int16x8_t __s0_235 = __p0_235; \
+  int16x8_t __s1_235 = __p1_235; \
+  int16x8_t __s2_235 = __p2_235; \
+  int16x8_t __ret_235; \
+  __ret_235 = vqaddq_s16(__s0_235, vqrdmulhq_s16(__s1_235, splatq_laneq_s16(__s2_235, __p3_235))); \
+  __ret_235; \
 })
 #else
-#define vqrdmlahq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmlahq_laneq_s16(__p0_236, __p1_236, __p2_236, __p3_236) __extension__ ({ \
+  int16x8_t __s0_236 = __p0_236; \
+  int16x8_t __s1_236 = __p1_236; \
+  int16x8_t __s2_236 = __p2_236; \
+  int16x8_t __rev0_236;  __rev0_236 = __builtin_shufflevector(__s0_236, __s0_236, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_236;  __rev1_236 = __builtin_shufflevector(__s1_236, __s1_236, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_236;  __rev2_236 = __builtin_shufflevector(__s2_236, __s2_236, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_236; \
+  __ret_236 = __noswap_vqaddq_s16(__rev0_236, __noswap_vqrdmulhq_s16(__rev1_236, __noswap_splatq_laneq_s16(__rev2_236, __p3_236))); \
+  __ret_236 = __builtin_shufflevector(__ret_236, __ret_236, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_236; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = vqadd_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
-  __ret; \
+#define vqrdmlah_laneq_s32(__p0_237, __p1_237, __p2_237, __p3_237) __extension__ ({ \
+  int32x2_t __s0_237 = __p0_237; \
+  int32x2_t __s1_237 = __p1_237; \
+  int32x4_t __s2_237 = __p2_237; \
+  int32x2_t __ret_237; \
+  __ret_237 = vqadd_s32(__s0_237, vqrdmulh_s32(__s1_237, splat_laneq_s32(__s2_237, __p3_237))); \
+  __ret_237; \
 })
 #else
-#define vqrdmlah_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqrdmlah_laneq_s32(__p0_238, __p1_238, __p2_238, __p3_238) __extension__ ({ \
+  int32x2_t __s0_238 = __p0_238; \
+  int32x2_t __s1_238 = __p1_238; \
+  int32x4_t __s2_238 = __p2_238; \
+  int32x2_t __rev0_238;  __rev0_238 = __builtin_shufflevector(__s0_238, __s0_238, 1, 0); \
+  int32x2_t __rev1_238;  __rev1_238 = __builtin_shufflevector(__s1_238, __s1_238, 1, 0); \
+  int32x4_t __rev2_238;  __rev2_238 = __builtin_shufflevector(__s2_238, __s2_238, 3, 2, 1, 0); \
+  int32x2_t __ret_238; \
+  __ret_238 = __noswap_vqadd_s32(__rev0_238, __noswap_vqrdmulh_s32(__rev1_238, __noswap_splat_laneq_s32(__rev2_238, __p3_238))); \
+  __ret_238 = __builtin_shufflevector(__ret_238, __ret_238, 1, 0); \
+  __ret_238; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = vqadd_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
+#define vqrdmlah_laneq_s16(__p0_239, __p1_239, __p2_239, __p3_239) __extension__ ({ \
+  int16x4_t __s0_239 = __p0_239; \
+  int16x4_t __s1_239 = __p1_239; \
+  int16x8_t __s2_239 = __p2_239; \
+  int16x4_t __ret_239; \
+  __ret_239 = vqadd_s16(__s0_239, vqrdmulh_s16(__s1_239, splat_laneq_s16(__s2_239, __p3_239))); \
+  __ret_239; \
 })
 #else
-#define vqrdmlah_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmlah_laneq_s16(__p0_240, __p1_240, __p2_240, __p3_240) __extension__ ({ \
+  int16x4_t __s0_240 = __p0_240; \
+  int16x4_t __s1_240 = __p1_240; \
+  int16x8_t __s2_240 = __p2_240; \
+  int16x4_t __rev0_240;  __rev0_240 = __builtin_shufflevector(__s0_240, __s0_240, 3, 2, 1, 0); \
+  int16x4_t __rev1_240;  __rev1_240 = __builtin_shufflevector(__s1_240, __s1_240, 3, 2, 1, 0); \
+  int16x8_t __rev2_240;  __rev2_240 = __builtin_shufflevector(__s2_240, __s2_240, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __ret_240; \
+  __ret_240 = __noswap_vqadd_s16(__rev0_240, __noswap_vqrdmulh_s16(__rev1_240, __noswap_splat_laneq_s16(__rev2_240, __p3_240))); \
+  __ret_240 = __builtin_shufflevector(__ret_240, __ret_240, 3, 2, 1, 0); \
+  __ret_240; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqsubq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
+#define vqrdmlshq_laneq_s32(__p0_241, __p1_241, __p2_241, __p3_241) __extension__ ({ \
+  int32x4_t __s0_241 = __p0_241; \
+  int32x4_t __s1_241 = __p1_241; \
+  int32x4_t __s2_241 = __p2_241; \
+  int32x4_t __ret_241; \
+  __ret_241 = vqsubq_s32(__s0_241, vqrdmulhq_s32(__s1_241, splatq_laneq_s32(__s2_241, __p3_241))); \
+  __ret_241; \
 })
 #else
-#define vqrdmlshq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmlshq_laneq_s32(__p0_242, __p1_242, __p2_242, __p3_242) __extension__ ({ \
+  int32x4_t __s0_242 = __p0_242; \
+  int32x4_t __s1_242 = __p1_242; \
+  int32x4_t __s2_242 = __p2_242; \
+  int32x4_t __rev0_242;  __rev0_242 = __builtin_shufflevector(__s0_242, __s0_242, 3, 2, 1, 0); \
+  int32x4_t __rev1_242;  __rev1_242 = __builtin_shufflevector(__s1_242, __s1_242, 3, 2, 1, 0); \
+  int32x4_t __rev2_242;  __rev2_242 = __builtin_shufflevector(__s2_242, __s2_242, 3, 2, 1, 0); \
+  int32x4_t __ret_242; \
+  __ret_242 = __noswap_vqsubq_s32(__rev0_242, __noswap_vqrdmulhq_s32(__rev1_242, __noswap_splatq_laneq_s32(__rev2_242, __p3_242))); \
+  __ret_242 = __builtin_shufflevector(__ret_242, __ret_242, 3, 2, 1, 0); \
+  __ret_242; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = vqsubq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret; \
+#define vqrdmlshq_laneq_s16(__p0_243, __p1_243, __p2_243, __p3_243) __extension__ ({ \
+  int16x8_t __s0_243 = __p0_243; \
+  int16x8_t __s1_243 = __p1_243; \
+  int16x8_t __s2_243 = __p2_243; \
+  int16x8_t __ret_243; \
+  __ret_243 = vqsubq_s16(__s0_243, vqrdmulhq_s16(__s1_243, splatq_laneq_s16(__s2_243, __p3_243))); \
+  __ret_243; \
 })
 #else
-#define vqrdmlshq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmlshq_laneq_s16(__p0_244, __p1_244, __p2_244, __p3_244) __extension__ ({ \
+  int16x8_t __s0_244 = __p0_244; \
+  int16x8_t __s1_244 = __p1_244; \
+  int16x8_t __s2_244 = __p2_244; \
+  int16x8_t __rev0_244;  __rev0_244 = __builtin_shufflevector(__s0_244, __s0_244, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_244;  __rev1_244 = __builtin_shufflevector(__s1_244, __s1_244, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_244;  __rev2_244 = __builtin_shufflevector(__s2_244, __s2_244, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_244; \
+  __ret_244 = __noswap_vqsubq_s16(__rev0_244, __noswap_vqrdmulhq_s16(__rev1_244, __noswap_splatq_laneq_s16(__rev2_244, __p3_244))); \
+  __ret_244 = __builtin_shufflevector(__ret_244, __ret_244, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_244; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = vqsub_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
-  __ret; \
+#define vqrdmlsh_laneq_s32(__p0_245, __p1_245, __p2_245, __p3_245) __extension__ ({ \
+  int32x2_t __s0_245 = __p0_245; \
+  int32x2_t __s1_245 = __p1_245; \
+  int32x4_t __s2_245 = __p2_245; \
+  int32x2_t __ret_245; \
+  __ret_245 = vqsub_s32(__s0_245, vqrdmulh_s32(__s1_245, splat_laneq_s32(__s2_245, __p3_245))); \
+  __ret_245; \
 })
 #else
-#define vqrdmlsh_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqrdmlsh_laneq_s32(__p0_246, __p1_246, __p2_246, __p3_246) __extension__ ({ \
+  int32x2_t __s0_246 = __p0_246; \
+  int32x2_t __s1_246 = __p1_246; \
+  int32x4_t __s2_246 = __p2_246; \
+  int32x2_t __rev0_246;  __rev0_246 = __builtin_shufflevector(__s0_246, __s0_246, 1, 0); \
+  int32x2_t __rev1_246;  __rev1_246 = __builtin_shufflevector(__s1_246, __s1_246, 1, 0); \
+  int32x4_t __rev2_246;  __rev2_246 = __builtin_shufflevector(__s2_246, __s2_246, 3, 2, 1, 0); \
+  int32x2_t __ret_246; \
+  __ret_246 = __noswap_vqsub_s32(__rev0_246, __noswap_vqrdmulh_s32(__rev1_246, __noswap_splat_laneq_s32(__rev2_246, __p3_246))); \
+  __ret_246 = __builtin_shufflevector(__ret_246, __ret_246, 1, 0); \
+  __ret_246; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = vqsub_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
+#define vqrdmlsh_laneq_s16(__p0_247, __p1_247, __p2_247, __p3_247) __extension__ ({ \
+  int16x4_t __s0_247 = __p0_247; \
+  int16x4_t __s1_247 = __p1_247; \
+  int16x8_t __s2_247 = __p2_247; \
+  int16x4_t __ret_247; \
+  __ret_247 = vqsub_s16(__s0_247, vqrdmulh_s16(__s1_247, splat_laneq_s16(__s2_247, __p3_247))); \
+  __ret_247; \
 })
 #else
-#define vqrdmlsh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmlsh_laneq_s16(__p0_248, __p1_248, __p2_248, __p3_248) __extension__ ({ \
+  int16x4_t __s0_248 = __p0_248; \
+  int16x4_t __s1_248 = __p1_248; \
+  int16x8_t __s2_248 = __p2_248; \
+  int16x4_t __rev0_248;  __rev0_248 = __builtin_shufflevector(__s0_248, __s0_248, 3, 2, 1, 0); \
+  int16x4_t __rev1_248;  __rev1_248 = __builtin_shufflevector(__s1_248, __s1_248, 3, 2, 1, 0); \
+  int16x8_t __rev2_248;  __rev2_248 = __builtin_shufflevector(__s2_248, __s2_248, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __ret_248; \
+  __ret_248 = __noswap_vqsub_s16(__rev0_248, __noswap_vqrdmulh_s16(__rev1_248, __noswap_splat_laneq_s16(__rev2_248, __p3_248))); \
+  __ret_248 = __builtin_shufflevector(__ret_248, __ret_248, 3, 2, 1, 0); \
+  __ret_248; \
 })
 #endif
 
@@ -43582,892 +47382,892 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p8(__p0_12, __p1_12, __p2_12, __p3_12) __extension__ ({ \
-  poly8x16_t __s0_12 = __p0_12; \
-  poly8x8_t __s2_12 = __p2_12; \
-  poly8x16_t __ret_12; \
-  __ret_12 = vsetq_lane_p8(vget_lane_p8(__s2_12, __p3_12), __s0_12, __p1_12); \
-  __ret_12; \
+#define vcopyq_lane_p8(__p0_249, __p1_249, __p2_249, __p3_249) __extension__ ({ \
+  poly8x16_t __s0_249 = __p0_249; \
+  poly8x8_t __s2_249 = __p2_249; \
+  poly8x16_t __ret_249; \
+  __ret_249 = vsetq_lane_p8(vget_lane_p8(__s2_249, __p3_249), __s0_249, __p1_249); \
+  __ret_249; \
 })
 #else
-#define vcopyq_lane_p8(__p0_13, __p1_13, __p2_13, __p3_13) __extension__ ({ \
-  poly8x16_t __s0_13 = __p0_13; \
-  poly8x8_t __s2_13 = __p2_13; \
-  poly8x16_t __rev0_13;  __rev0_13 = __builtin_shufflevector(__s0_13, __s0_13, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev2_13;  __rev2_13 = __builtin_shufflevector(__s2_13, __s2_13, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_13; \
-  __ret_13 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_13, __p3_13), __rev0_13, __p1_13); \
-  __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_13; \
+#define vcopyq_lane_p8(__p0_250, __p1_250, __p2_250, __p3_250) __extension__ ({ \
+  poly8x16_t __s0_250 = __p0_250; \
+  poly8x8_t __s2_250 = __p2_250; \
+  poly8x16_t __rev0_250;  __rev0_250 = __builtin_shufflevector(__s0_250, __s0_250, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __rev2_250;  __rev2_250 = __builtin_shufflevector(__s2_250, __s2_250, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __ret_250; \
+  __ret_250 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_250, __p3_250), __rev0_250, __p1_250); \
+  __ret_250 = __builtin_shufflevector(__ret_250, __ret_250, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_250; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p16(__p0_14, __p1_14, __p2_14, __p3_14) __extension__ ({ \
-  poly16x8_t __s0_14 = __p0_14; \
-  poly16x4_t __s2_14 = __p2_14; \
-  poly16x8_t __ret_14; \
-  __ret_14 = vsetq_lane_p16(vget_lane_p16(__s2_14, __p3_14), __s0_14, __p1_14); \
-  __ret_14; \
+#define vcopyq_lane_p16(__p0_251, __p1_251, __p2_251, __p3_251) __extension__ ({ \
+  poly16x8_t __s0_251 = __p0_251; \
+  poly16x4_t __s2_251 = __p2_251; \
+  poly16x8_t __ret_251; \
+  __ret_251 = vsetq_lane_p16(vget_lane_p16(__s2_251, __p3_251), __s0_251, __p1_251); \
+  __ret_251; \
 })
 #else
-#define vcopyq_lane_p16(__p0_15, __p1_15, __p2_15, __p3_15) __extension__ ({ \
-  poly16x8_t __s0_15 = __p0_15; \
-  poly16x4_t __s2_15 = __p2_15; \
-  poly16x8_t __rev0_15;  __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __rev2_15;  __rev2_15 = __builtin_shufflevector(__s2_15, __s2_15, 3, 2, 1, 0); \
-  poly16x8_t __ret_15; \
-  __ret_15 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_15, __p3_15), __rev0_15, __p1_15); \
-  __ret_15 = __builtin_shufflevector(__ret_15, __ret_15, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_15; \
+#define vcopyq_lane_p16(__p0_252, __p1_252, __p2_252, __p3_252) __extension__ ({ \
+  poly16x8_t __s0_252 = __p0_252; \
+  poly16x4_t __s2_252 = __p2_252; \
+  poly16x8_t __rev0_252;  __rev0_252 = __builtin_shufflevector(__s0_252, __s0_252, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x4_t __rev2_252;  __rev2_252 = __builtin_shufflevector(__s2_252, __s2_252, 3, 2, 1, 0); \
+  poly16x8_t __ret_252; \
+  __ret_252 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_252, __p3_252), __rev0_252, __p1_252); \
+  __ret_252 = __builtin_shufflevector(__ret_252, __ret_252, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_252; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u8(__p0_16, __p1_16, __p2_16, __p3_16) __extension__ ({ \
-  uint8x16_t __s0_16 = __p0_16; \
-  uint8x8_t __s2_16 = __p2_16; \
-  uint8x16_t __ret_16; \
-  __ret_16 = vsetq_lane_u8(vget_lane_u8(__s2_16, __p3_16), __s0_16, __p1_16); \
-  __ret_16; \
+#define vcopyq_lane_u8(__p0_253, __p1_253, __p2_253, __p3_253) __extension__ ({ \
+  uint8x16_t __s0_253 = __p0_253; \
+  uint8x8_t __s2_253 = __p2_253; \
+  uint8x16_t __ret_253; \
+  __ret_253 = vsetq_lane_u8(vget_lane_u8(__s2_253, __p3_253), __s0_253, __p1_253); \
+  __ret_253; \
 })
 #else
-#define vcopyq_lane_u8(__p0_17, __p1_17, __p2_17, __p3_17) __extension__ ({ \
-  uint8x16_t __s0_17 = __p0_17; \
-  uint8x8_t __s2_17 = __p2_17; \
-  uint8x16_t __rev0_17;  __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_17;  __rev2_17 = __builtin_shufflevector(__s2_17, __s2_17, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_17; \
-  __ret_17 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_17, __p3_17), __rev0_17, __p1_17); \
-  __ret_17 = __builtin_shufflevector(__ret_17, __ret_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_17; \
+#define vcopyq_lane_u8(__p0_254, __p1_254, __p2_254, __p3_254) __extension__ ({ \
+  uint8x16_t __s0_254 = __p0_254; \
+  uint8x8_t __s2_254 = __p2_254; \
+  uint8x16_t __rev0_254;  __rev0_254 = __builtin_shufflevector(__s0_254, __s0_254, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_254;  __rev2_254 = __builtin_shufflevector(__s2_254, __s2_254, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_254; \
+  __ret_254 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_254, __p3_254), __rev0_254, __p1_254); \
+  __ret_254 = __builtin_shufflevector(__ret_254, __ret_254, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_254; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u32(__p0_18, __p1_18, __p2_18, __p3_18) __extension__ ({ \
-  uint32x4_t __s0_18 = __p0_18; \
-  uint32x2_t __s2_18 = __p2_18; \
-  uint32x4_t __ret_18; \
-  __ret_18 = vsetq_lane_u32(vget_lane_u32(__s2_18, __p3_18), __s0_18, __p1_18); \
-  __ret_18; \
+#define vcopyq_lane_u32(__p0_255, __p1_255, __p2_255, __p3_255) __extension__ ({ \
+  uint32x4_t __s0_255 = __p0_255; \
+  uint32x2_t __s2_255 = __p2_255; \
+  uint32x4_t __ret_255; \
+  __ret_255 = vsetq_lane_u32(vget_lane_u32(__s2_255, __p3_255), __s0_255, __p1_255); \
+  __ret_255; \
 })
 #else
-#define vcopyq_lane_u32(__p0_19, __p1_19, __p2_19, __p3_19) __extension__ ({ \
-  uint32x4_t __s0_19 = __p0_19; \
-  uint32x2_t __s2_19 = __p2_19; \
-  uint32x4_t __rev0_19;  __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 3, 2, 1, 0); \
-  uint32x2_t __rev2_19;  __rev2_19 = __builtin_shufflevector(__s2_19, __s2_19, 1, 0); \
-  uint32x4_t __ret_19; \
-  __ret_19 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_19, __p3_19), __rev0_19, __p1_19); \
-  __ret_19 = __builtin_shufflevector(__ret_19, __ret_19, 3, 2, 1, 0); \
-  __ret_19; \
+#define vcopyq_lane_u32(__p0_256, __p1_256, __p2_256, __p3_256) __extension__ ({ \
+  uint32x4_t __s0_256 = __p0_256; \
+  uint32x2_t __s2_256 = __p2_256; \
+  uint32x4_t __rev0_256;  __rev0_256 = __builtin_shufflevector(__s0_256, __s0_256, 3, 2, 1, 0); \
+  uint32x2_t __rev2_256;  __rev2_256 = __builtin_shufflevector(__s2_256, __s2_256, 1, 0); \
+  uint32x4_t __ret_256; \
+  __ret_256 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_256, __p3_256), __rev0_256, __p1_256); \
+  __ret_256 = __builtin_shufflevector(__ret_256, __ret_256, 3, 2, 1, 0); \
+  __ret_256; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u64(__p0_20, __p1_20, __p2_20, __p3_20) __extension__ ({ \
-  uint64x2_t __s0_20 = __p0_20; \
-  uint64x1_t __s2_20 = __p2_20; \
-  uint64x2_t __ret_20; \
-  __ret_20 = vsetq_lane_u64(vget_lane_u64(__s2_20, __p3_20), __s0_20, __p1_20); \
-  __ret_20; \
+#define vcopyq_lane_u64(__p0_257, __p1_257, __p2_257, __p3_257) __extension__ ({ \
+  uint64x2_t __s0_257 = __p0_257; \
+  uint64x1_t __s2_257 = __p2_257; \
+  uint64x2_t __ret_257; \
+  __ret_257 = vsetq_lane_u64(vget_lane_u64(__s2_257, __p3_257), __s0_257, __p1_257); \
+  __ret_257; \
 })
 #else
-#define vcopyq_lane_u64(__p0_21, __p1_21, __p2_21, __p3_21) __extension__ ({ \
-  uint64x2_t __s0_21 = __p0_21; \
-  uint64x1_t __s2_21 = __p2_21; \
-  uint64x2_t __rev0_21;  __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 1, 0); \
-  uint64x2_t __ret_21; \
-  __ret_21 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_21, __p3_21), __rev0_21, __p1_21); \
-  __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 1, 0); \
-  __ret_21; \
+#define vcopyq_lane_u64(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \
+  uint64x2_t __s0_258 = __p0_258; \
+  uint64x1_t __s2_258 = __p2_258; \
+  uint64x2_t __rev0_258;  __rev0_258 = __builtin_shufflevector(__s0_258, __s0_258, 1, 0); \
+  uint64x2_t __ret_258; \
+  __ret_258 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_258, __p3_258), __rev0_258, __p1_258); \
+  __ret_258 = __builtin_shufflevector(__ret_258, __ret_258, 1, 0); \
+  __ret_258; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u16(__p0_22, __p1_22, __p2_22, __p3_22) __extension__ ({ \
-  uint16x8_t __s0_22 = __p0_22; \
-  uint16x4_t __s2_22 = __p2_22; \
-  uint16x8_t __ret_22; \
-  __ret_22 = vsetq_lane_u16(vget_lane_u16(__s2_22, __p3_22), __s0_22, __p1_22); \
-  __ret_22; \
+#define vcopyq_lane_u16(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \
+  uint16x8_t __s0_259 = __p0_259; \
+  uint16x4_t __s2_259 = __p2_259; \
+  uint16x8_t __ret_259; \
+  __ret_259 = vsetq_lane_u16(vget_lane_u16(__s2_259, __p3_259), __s0_259, __p1_259); \
+  __ret_259; \
 })
 #else
-#define vcopyq_lane_u16(__p0_23, __p1_23, __p2_23, __p3_23) __extension__ ({ \
-  uint16x8_t __s0_23 = __p0_23; \
-  uint16x4_t __s2_23 = __p2_23; \
-  uint16x8_t __rev0_23;  __rev0_23 = __builtin_shufflevector(__s0_23, __s0_23, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_23;  __rev2_23 = __builtin_shufflevector(__s2_23, __s2_23, 3, 2, 1, 0); \
-  uint16x8_t __ret_23; \
-  __ret_23 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_23, __p3_23), __rev0_23, __p1_23); \
-  __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_23; \
+#define vcopyq_lane_u16(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \
+  uint16x8_t __s0_260 = __p0_260; \
+  uint16x4_t __s2_260 = __p2_260; \
+  uint16x8_t __rev0_260;  __rev0_260 = __builtin_shufflevector(__s0_260, __s0_260, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_260;  __rev2_260 = __builtin_shufflevector(__s2_260, __s2_260, 3, 2, 1, 0); \
+  uint16x8_t __ret_260; \
+  __ret_260 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_260, __p3_260), __rev0_260, __p1_260); \
+  __ret_260 = __builtin_shufflevector(__ret_260, __ret_260, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_260; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s8(__p0_24, __p1_24, __p2_24, __p3_24) __extension__ ({ \
-  int8x16_t __s0_24 = __p0_24; \
-  int8x8_t __s2_24 = __p2_24; \
-  int8x16_t __ret_24; \
-  __ret_24 = vsetq_lane_s8(vget_lane_s8(__s2_24, __p3_24), __s0_24, __p1_24); \
-  __ret_24; \
+#define vcopyq_lane_s8(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \
+  int8x16_t __s0_261 = __p0_261; \
+  int8x8_t __s2_261 = __p2_261; \
+  int8x16_t __ret_261; \
+  __ret_261 = vsetq_lane_s8(vget_lane_s8(__s2_261, __p3_261), __s0_261, __p1_261); \
+  __ret_261; \
 })
 #else
-#define vcopyq_lane_s8(__p0_25, __p1_25, __p2_25, __p3_25) __extension__ ({ \
-  int8x16_t __s0_25 = __p0_25; \
-  int8x8_t __s2_25 = __p2_25; \
-  int8x16_t __rev0_25;  __rev0_25 = __builtin_shufflevector(__s0_25, __s0_25, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_25;  __rev2_25 = __builtin_shufflevector(__s2_25, __s2_25, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_25; \
-  __ret_25 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_25, __p3_25), __rev0_25, __p1_25); \
-  __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_25; \
+#define vcopyq_lane_s8(__p0_262, __p1_262, __p2_262, __p3_262) __extension__ ({ \
+  int8x16_t __s0_262 = __p0_262; \
+  int8x8_t __s2_262 = __p2_262; \
+  int8x16_t __rev0_262;  __rev0_262 = __builtin_shufflevector(__s0_262, __s0_262, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_262;  __rev2_262 = __builtin_shufflevector(__s2_262, __s2_262, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_262; \
+  __ret_262 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_262, __p3_262), __rev0_262, __p1_262); \
+  __ret_262 = __builtin_shufflevector(__ret_262, __ret_262, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_262; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_f32(__p0_26, __p1_26, __p2_26, __p3_26) __extension__ ({ \
-  float32x4_t __s0_26 = __p0_26; \
-  float32x2_t __s2_26 = __p2_26; \
-  float32x4_t __ret_26; \
-  __ret_26 = vsetq_lane_f32(vget_lane_f32(__s2_26, __p3_26), __s0_26, __p1_26); \
-  __ret_26; \
+#define vcopyq_lane_f32(__p0_263, __p1_263, __p2_263, __p3_263) __extension__ ({ \
+  float32x4_t __s0_263 = __p0_263; \
+  float32x2_t __s2_263 = __p2_263; \
+  float32x4_t __ret_263; \
+  __ret_263 = vsetq_lane_f32(vget_lane_f32(__s2_263, __p3_263), __s0_263, __p1_263); \
+  __ret_263; \
 })
 #else
-#define vcopyq_lane_f32(__p0_27, __p1_27, __p2_27, __p3_27) __extension__ ({ \
-  float32x4_t __s0_27 = __p0_27; \
-  float32x2_t __s2_27 = __p2_27; \
-  float32x4_t __rev0_27;  __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 3, 2, 1, 0); \
-  float32x2_t __rev2_27;  __rev2_27 = __builtin_shufflevector(__s2_27, __s2_27, 1, 0); \
-  float32x4_t __ret_27; \
-  __ret_27 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_27, __p3_27), __rev0_27, __p1_27); \
-  __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 3, 2, 1, 0); \
-  __ret_27; \
+#define vcopyq_lane_f32(__p0_264, __p1_264, __p2_264, __p3_264) __extension__ ({ \
+  float32x4_t __s0_264 = __p0_264; \
+  float32x2_t __s2_264 = __p2_264; \
+  float32x4_t __rev0_264;  __rev0_264 = __builtin_shufflevector(__s0_264, __s0_264, 3, 2, 1, 0); \
+  float32x2_t __rev2_264;  __rev2_264 = __builtin_shufflevector(__s2_264, __s2_264, 1, 0); \
+  float32x4_t __ret_264; \
+  __ret_264 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_264, __p3_264), __rev0_264, __p1_264); \
+  __ret_264 = __builtin_shufflevector(__ret_264, __ret_264, 3, 2, 1, 0); \
+  __ret_264; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s32(__p0_28, __p1_28, __p2_28, __p3_28) __extension__ ({ \
-  int32x4_t __s0_28 = __p0_28; \
-  int32x2_t __s2_28 = __p2_28; \
-  int32x4_t __ret_28; \
-  __ret_28 = vsetq_lane_s32(vget_lane_s32(__s2_28, __p3_28), __s0_28, __p1_28); \
-  __ret_28; \
+#define vcopyq_lane_s32(__p0_265, __p1_265, __p2_265, __p3_265) __extension__ ({ \
+  int32x4_t __s0_265 = __p0_265; \
+  int32x2_t __s2_265 = __p2_265; \
+  int32x4_t __ret_265; \
+  __ret_265 = vsetq_lane_s32(vget_lane_s32(__s2_265, __p3_265), __s0_265, __p1_265); \
+  __ret_265; \
 })
 #else
-#define vcopyq_lane_s32(__p0_29, __p1_29, __p2_29, __p3_29) __extension__ ({ \
-  int32x4_t __s0_29 = __p0_29; \
-  int32x2_t __s2_29 = __p2_29; \
-  int32x4_t __rev0_29;  __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 3, 2, 1, 0); \
-  int32x2_t __rev2_29;  __rev2_29 = __builtin_shufflevector(__s2_29, __s2_29, 1, 0); \
-  int32x4_t __ret_29; \
-  __ret_29 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_29, __p3_29), __rev0_29, __p1_29); \
-  __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 3, 2, 1, 0); \
-  __ret_29; \
+#define vcopyq_lane_s32(__p0_266, __p1_266, __p2_266, __p3_266) __extension__ ({ \
+  int32x4_t __s0_266 = __p0_266; \
+  int32x2_t __s2_266 = __p2_266; \
+  int32x4_t __rev0_266;  __rev0_266 = __builtin_shufflevector(__s0_266, __s0_266, 3, 2, 1, 0); \
+  int32x2_t __rev2_266;  __rev2_266 = __builtin_shufflevector(__s2_266, __s2_266, 1, 0); \
+  int32x4_t __ret_266; \
+  __ret_266 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_266, __p3_266), __rev0_266, __p1_266); \
+  __ret_266 = __builtin_shufflevector(__ret_266, __ret_266, 3, 2, 1, 0); \
+  __ret_266; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s64(__p0_30, __p1_30, __p2_30, __p3_30) __extension__ ({ \
-  int64x2_t __s0_30 = __p0_30; \
-  int64x1_t __s2_30 = __p2_30; \
-  int64x2_t __ret_30; \
-  __ret_30 = vsetq_lane_s64(vget_lane_s64(__s2_30, __p3_30), __s0_30, __p1_30); \
-  __ret_30; \
+#define vcopyq_lane_s64(__p0_267, __p1_267, __p2_267, __p3_267) __extension__ ({ \
+  int64x2_t __s0_267 = __p0_267; \
+  int64x1_t __s2_267 = __p2_267; \
+  int64x2_t __ret_267; \
+  __ret_267 = vsetq_lane_s64(vget_lane_s64(__s2_267, __p3_267), __s0_267, __p1_267); \
+  __ret_267; \
 })
 #else
-#define vcopyq_lane_s64(__p0_31, __p1_31, __p2_31, __p3_31) __extension__ ({ \
-  int64x2_t __s0_31 = __p0_31; \
-  int64x1_t __s2_31 = __p2_31; \
-  int64x2_t __rev0_31;  __rev0_31 = __builtin_shufflevector(__s0_31, __s0_31, 1, 0); \
-  int64x2_t __ret_31; \
-  __ret_31 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_31, __p3_31), __rev0_31, __p1_31); \
-  __ret_31 = __builtin_shufflevector(__ret_31, __ret_31, 1, 0); \
-  __ret_31; \
+#define vcopyq_lane_s64(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \
+  int64x2_t __s0_268 = __p0_268; \
+  int64x1_t __s2_268 = __p2_268; \
+  int64x2_t __rev0_268;  __rev0_268 = __builtin_shufflevector(__s0_268, __s0_268, 1, 0); \
+  int64x2_t __ret_268; \
+  __ret_268 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_268, __p3_268), __rev0_268, __p1_268); \
+  __ret_268 = __builtin_shufflevector(__ret_268, __ret_268, 1, 0); \
+  __ret_268; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s16(__p0_32, __p1_32, __p2_32, __p3_32) __extension__ ({ \
-  int16x8_t __s0_32 = __p0_32; \
-  int16x4_t __s2_32 = __p2_32; \
-  int16x8_t __ret_32; \
-  __ret_32 = vsetq_lane_s16(vget_lane_s16(__s2_32, __p3_32), __s0_32, __p1_32); \
-  __ret_32; \
+#define vcopyq_lane_s16(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \
+  int16x8_t __s0_269 = __p0_269; \
+  int16x4_t __s2_269 = __p2_269; \
+  int16x8_t __ret_269; \
+  __ret_269 = vsetq_lane_s16(vget_lane_s16(__s2_269, __p3_269), __s0_269, __p1_269); \
+  __ret_269; \
 })
 #else
-#define vcopyq_lane_s16(__p0_33, __p1_33, __p2_33, __p3_33) __extension__ ({ \
-  int16x8_t __s0_33 = __p0_33; \
-  int16x4_t __s2_33 = __p2_33; \
-  int16x8_t __rev0_33;  __rev0_33 = __builtin_shufflevector(__s0_33, __s0_33, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_33;  __rev2_33 = __builtin_shufflevector(__s2_33, __s2_33, 3, 2, 1, 0); \
-  int16x8_t __ret_33; \
-  __ret_33 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_33, __p3_33), __rev0_33, __p1_33); \
-  __ret_33 = __builtin_shufflevector(__ret_33, __ret_33, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_33; \
+#define vcopyq_lane_s16(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \
+  int16x8_t __s0_270 = __p0_270; \
+  int16x4_t __s2_270 = __p2_270; \
+  int16x8_t __rev0_270;  __rev0_270 = __builtin_shufflevector(__s0_270, __s0_270, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_270;  __rev2_270 = __builtin_shufflevector(__s2_270, __s2_270, 3, 2, 1, 0); \
+  int16x8_t __ret_270; \
+  __ret_270 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_270, __p3_270), __rev0_270, __p1_270); \
+  __ret_270 = __builtin_shufflevector(__ret_270, __ret_270, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_270; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_p8(__p0_34, __p1_34, __p2_34, __p3_34) __extension__ ({ \
-  poly8x8_t __s0_34 = __p0_34; \
-  poly8x8_t __s2_34 = __p2_34; \
-  poly8x8_t __ret_34; \
-  __ret_34 = vset_lane_p8(vget_lane_p8(__s2_34, __p3_34), __s0_34, __p1_34); \
-  __ret_34; \
+#define vcopy_lane_p8(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \
+  poly8x8_t __s0_271 = __p0_271; \
+  poly8x8_t __s2_271 = __p2_271; \
+  poly8x8_t __ret_271; \
+  __ret_271 = vset_lane_p8(vget_lane_p8(__s2_271, __p3_271), __s0_271, __p1_271); \
+  __ret_271; \
 })
 #else
-#define vcopy_lane_p8(__p0_35, __p1_35, __p2_35, __p3_35) __extension__ ({ \
-  poly8x8_t __s0_35 = __p0_35; \
-  poly8x8_t __s2_35 = __p2_35; \
-  poly8x8_t __rev0_35;  __rev0_35 = __builtin_shufflevector(__s0_35, __s0_35, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev2_35;  __rev2_35 = __builtin_shufflevector(__s2_35, __s2_35, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_35; \
-  __ret_35 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_35, __p3_35), __rev0_35, __p1_35); \
-  __ret_35 = __builtin_shufflevector(__ret_35, __ret_35, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_35; \
+#define vcopy_lane_p8(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \
+  poly8x8_t __s0_272 = __p0_272; \
+  poly8x8_t __s2_272 = __p2_272; \
+  poly8x8_t __rev0_272;  __rev0_272 = __builtin_shufflevector(__s0_272, __s0_272, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __rev2_272;  __rev2_272 = __builtin_shufflevector(__s2_272, __s2_272, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __ret_272; \
+  __ret_272 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_272, __p3_272), __rev0_272, __p1_272); \
+  __ret_272 = __builtin_shufflevector(__ret_272, __ret_272, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_272; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_p16(__p0_36, __p1_36, __p2_36, __p3_36) __extension__ ({ \
-  poly16x4_t __s0_36 = __p0_36; \
-  poly16x4_t __s2_36 = __p2_36; \
-  poly16x4_t __ret_36; \
-  __ret_36 = vset_lane_p16(vget_lane_p16(__s2_36, __p3_36), __s0_36, __p1_36); \
-  __ret_36; \
+#define vcopy_lane_p16(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \
+  poly16x4_t __s0_273 = __p0_273; \
+  poly16x4_t __s2_273 = __p2_273; \
+  poly16x4_t __ret_273; \
+  __ret_273 = vset_lane_p16(vget_lane_p16(__s2_273, __p3_273), __s0_273, __p1_273); \
+  __ret_273; \
 })
 #else
-#define vcopy_lane_p16(__p0_37, __p1_37, __p2_37, __p3_37) __extension__ ({ \
-  poly16x4_t __s0_37 = __p0_37; \
-  poly16x4_t __s2_37 = __p2_37; \
-  poly16x4_t __rev0_37;  __rev0_37 = __builtin_shufflevector(__s0_37, __s0_37, 3, 2, 1, 0); \
-  poly16x4_t __rev2_37;  __rev2_37 = __builtin_shufflevector(__s2_37, __s2_37, 3, 2, 1, 0); \
-  poly16x4_t __ret_37; \
-  __ret_37 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_37, __p3_37), __rev0_37, __p1_37); \
-  __ret_37 = __builtin_shufflevector(__ret_37, __ret_37, 3, 2, 1, 0); \
-  __ret_37; \
+#define vcopy_lane_p16(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \
+  poly16x4_t __s0_274 = __p0_274; \
+  poly16x4_t __s2_274 = __p2_274; \
+  poly16x4_t __rev0_274;  __rev0_274 = __builtin_shufflevector(__s0_274, __s0_274, 3, 2, 1, 0); \
+  poly16x4_t __rev2_274;  __rev2_274 = __builtin_shufflevector(__s2_274, __s2_274, 3, 2, 1, 0); \
+  poly16x4_t __ret_274; \
+  __ret_274 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_274, __p3_274), __rev0_274, __p1_274); \
+  __ret_274 = __builtin_shufflevector(__ret_274, __ret_274, 3, 2, 1, 0); \
+  __ret_274; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u8(__p0_38, __p1_38, __p2_38, __p3_38) __extension__ ({ \
-  uint8x8_t __s0_38 = __p0_38; \
-  uint8x8_t __s2_38 = __p2_38; \
-  uint8x8_t __ret_38; \
-  __ret_38 = vset_lane_u8(vget_lane_u8(__s2_38, __p3_38), __s0_38, __p1_38); \
-  __ret_38; \
+#define vcopy_lane_u8(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \
+  uint8x8_t __s0_275 = __p0_275; \
+  uint8x8_t __s2_275 = __p2_275; \
+  uint8x8_t __ret_275; \
+  __ret_275 = vset_lane_u8(vget_lane_u8(__s2_275, __p3_275), __s0_275, __p1_275); \
+  __ret_275; \
 })
 #else
-#define vcopy_lane_u8(__p0_39, __p1_39, __p2_39, __p3_39) __extension__ ({ \
-  uint8x8_t __s0_39 = __p0_39; \
-  uint8x8_t __s2_39 = __p2_39; \
-  uint8x8_t __rev0_39;  __rev0_39 = __builtin_shufflevector(__s0_39, __s0_39, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_39;  __rev2_39 = __builtin_shufflevector(__s2_39, __s2_39, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_39; \
-  __ret_39 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_39, __p3_39), __rev0_39, __p1_39); \
-  __ret_39 = __builtin_shufflevector(__ret_39, __ret_39, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_39; \
+#define vcopy_lane_u8(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \
+  uint8x8_t __s0_276 = __p0_276; \
+  uint8x8_t __s2_276 = __p2_276; \
+  uint8x8_t __rev0_276;  __rev0_276 = __builtin_shufflevector(__s0_276, __s0_276, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_276;  __rev2_276 = __builtin_shufflevector(__s2_276, __s2_276, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __ret_276; \
+  __ret_276 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_276, __p3_276), __rev0_276, __p1_276); \
+  __ret_276 = __builtin_shufflevector(__ret_276, __ret_276, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_276; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u32(__p0_40, __p1_40, __p2_40, __p3_40) __extension__ ({ \
-  uint32x2_t __s0_40 = __p0_40; \
-  uint32x2_t __s2_40 = __p2_40; \
-  uint32x2_t __ret_40; \
-  __ret_40 = vset_lane_u32(vget_lane_u32(__s2_40, __p3_40), __s0_40, __p1_40); \
-  __ret_40; \
+#define vcopy_lane_u32(__p0_277, __p1_277, __p2_277, __p3_277) __extension__ ({ \
+  uint32x2_t __s0_277 = __p0_277; \
+  uint32x2_t __s2_277 = __p2_277; \
+  uint32x2_t __ret_277; \
+  __ret_277 = vset_lane_u32(vget_lane_u32(__s2_277, __p3_277), __s0_277, __p1_277); \
+  __ret_277; \
 })
 #else
-#define vcopy_lane_u32(__p0_41, __p1_41, __p2_41, __p3_41) __extension__ ({ \
-  uint32x2_t __s0_41 = __p0_41; \
-  uint32x2_t __s2_41 = __p2_41; \
-  uint32x2_t __rev0_41;  __rev0_41 = __builtin_shufflevector(__s0_41, __s0_41, 1, 0); \
-  uint32x2_t __rev2_41;  __rev2_41 = __builtin_shufflevector(__s2_41, __s2_41, 1, 0); \
-  uint32x2_t __ret_41; \
-  __ret_41 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_41, __p3_41), __rev0_41, __p1_41); \
-  __ret_41 = __builtin_shufflevector(__ret_41, __ret_41, 1, 0); \
-  __ret_41; \
+#define vcopy_lane_u32(__p0_278, __p1_278, __p2_278, __p3_278) __extension__ ({ \
+  uint32x2_t __s0_278 = __p0_278; \
+  uint32x2_t __s2_278 = __p2_278; \
+  uint32x2_t __rev0_278;  __rev0_278 = __builtin_shufflevector(__s0_278, __s0_278, 1, 0); \
+  uint32x2_t __rev2_278;  __rev2_278 = __builtin_shufflevector(__s2_278, __s2_278, 1, 0); \
+  uint32x2_t __ret_278; \
+  __ret_278 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_278, __p3_278), __rev0_278, __p1_278); \
+  __ret_278 = __builtin_shufflevector(__ret_278, __ret_278, 1, 0); \
+  __ret_278; \
 })
 #endif
 
-#define vcopy_lane_u64(__p0_42, __p1_42, __p2_42, __p3_42) __extension__ ({ \
-  uint64x1_t __s0_42 = __p0_42; \
-  uint64x1_t __s2_42 = __p2_42; \
-  uint64x1_t __ret_42; \
-  __ret_42 = vset_lane_u64(vget_lane_u64(__s2_42, __p3_42), __s0_42, __p1_42); \
-  __ret_42; \
+#define vcopy_lane_u64(__p0_279, __p1_279, __p2_279, __p3_279) __extension__ ({ \
+  uint64x1_t __s0_279 = __p0_279; \
+  uint64x1_t __s2_279 = __p2_279; \
+  uint64x1_t __ret_279; \
+  __ret_279 = vset_lane_u64(vget_lane_u64(__s2_279, __p3_279), __s0_279, __p1_279); \
+  __ret_279; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u16(__p0_43, __p1_43, __p2_43, __p3_43) __extension__ ({ \
-  uint16x4_t __s0_43 = __p0_43; \
-  uint16x4_t __s2_43 = __p2_43; \
-  uint16x4_t __ret_43; \
-  __ret_43 = vset_lane_u16(vget_lane_u16(__s2_43, __p3_43), __s0_43, __p1_43); \
-  __ret_43; \
+#define vcopy_lane_u16(__p0_280, __p1_280, __p2_280, __p3_280) __extension__ ({ \
+  uint16x4_t __s0_280 = __p0_280; \
+  uint16x4_t __s2_280 = __p2_280; \
+  uint16x4_t __ret_280; \
+  __ret_280 = vset_lane_u16(vget_lane_u16(__s2_280, __p3_280), __s0_280, __p1_280); \
+  __ret_280; \
 })
 #else
-#define vcopy_lane_u16(__p0_44, __p1_44, __p2_44, __p3_44) __extension__ ({ \
-  uint16x4_t __s0_44 = __p0_44; \
-  uint16x4_t __s2_44 = __p2_44; \
-  uint16x4_t __rev0_44;  __rev0_44 = __builtin_shufflevector(__s0_44, __s0_44, 3, 2, 1, 0); \
-  uint16x4_t __rev2_44;  __rev2_44 = __builtin_shufflevector(__s2_44, __s2_44, 3, 2, 1, 0); \
-  uint16x4_t __ret_44; \
-  __ret_44 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_44, __p3_44), __rev0_44, __p1_44); \
-  __ret_44 = __builtin_shufflevector(__ret_44, __ret_44, 3, 2, 1, 0); \
-  __ret_44; \
+#define vcopy_lane_u16(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \
+  uint16x4_t __s0_281 = __p0_281; \
+  uint16x4_t __s2_281 = __p2_281; \
+  uint16x4_t __rev0_281;  __rev0_281 = __builtin_shufflevector(__s0_281, __s0_281, 3, 2, 1, 0); \
+  uint16x4_t __rev2_281;  __rev2_281 = __builtin_shufflevector(__s2_281, __s2_281, 3, 2, 1, 0); \
+  uint16x4_t __ret_281; \
+  __ret_281 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_281, __p3_281), __rev0_281, __p1_281); \
+  __ret_281 = __builtin_shufflevector(__ret_281, __ret_281, 3, 2, 1, 0); \
+  __ret_281; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s8(__p0_45, __p1_45, __p2_45, __p3_45) __extension__ ({ \
-  int8x8_t __s0_45 = __p0_45; \
-  int8x8_t __s2_45 = __p2_45; \
-  int8x8_t __ret_45; \
-  __ret_45 = vset_lane_s8(vget_lane_s8(__s2_45, __p3_45), __s0_45, __p1_45); \
-  __ret_45; \
+#define vcopy_lane_s8(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \
+  int8x8_t __s0_282 = __p0_282; \
+  int8x8_t __s2_282 = __p2_282; \
+  int8x8_t __ret_282; \
+  __ret_282 = vset_lane_s8(vget_lane_s8(__s2_282, __p3_282), __s0_282, __p1_282); \
+  __ret_282; \
 })
 #else
-#define vcopy_lane_s8(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \
-  int8x8_t __s0_46 = __p0_46; \
-  int8x8_t __s2_46 = __p2_46; \
-  int8x8_t __rev0_46;  __rev0_46 = __builtin_shufflevector(__s0_46, __s0_46, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_46;  __rev2_46 = __builtin_shufflevector(__s2_46, __s2_46, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_46; \
-  __ret_46 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_46, __p3_46), __rev0_46, __p1_46); \
-  __ret_46 = __builtin_shufflevector(__ret_46, __ret_46, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_46; \
+#define vcopy_lane_s8(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \
+  int8x8_t __s0_283 = __p0_283; \
+  int8x8_t __s2_283 = __p2_283; \
+  int8x8_t __rev0_283;  __rev0_283 = __builtin_shufflevector(__s0_283, __s0_283, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_283;  __rev2_283 = __builtin_shufflevector(__s2_283, __s2_283, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __ret_283; \
+  __ret_283 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_283, __p3_283), __rev0_283, __p1_283); \
+  __ret_283 = __builtin_shufflevector(__ret_283, __ret_283, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_283; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_f32(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \
-  float32x2_t __s0_47 = __p0_47; \
-  float32x2_t __s2_47 = __p2_47; \
-  float32x2_t __ret_47; \
-  __ret_47 = vset_lane_f32(vget_lane_f32(__s2_47, __p3_47), __s0_47, __p1_47); \
-  __ret_47; \
+#define vcopy_lane_f32(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \
+  float32x2_t __s0_284 = __p0_284; \
+  float32x2_t __s2_284 = __p2_284; \
+  float32x2_t __ret_284; \
+  __ret_284 = vset_lane_f32(vget_lane_f32(__s2_284, __p3_284), __s0_284, __p1_284); \
+  __ret_284; \
 })
 #else
-#define vcopy_lane_f32(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \
-  float32x2_t __s0_48 = __p0_48; \
-  float32x2_t __s2_48 = __p2_48; \
-  float32x2_t __rev0_48;  __rev0_48 = __builtin_shufflevector(__s0_48, __s0_48, 1, 0); \
-  float32x2_t __rev2_48;  __rev2_48 = __builtin_shufflevector(__s2_48, __s2_48, 1, 0); \
-  float32x2_t __ret_48; \
-  __ret_48 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_48, __p3_48), __rev0_48, __p1_48); \
-  __ret_48 = __builtin_shufflevector(__ret_48, __ret_48, 1, 0); \
-  __ret_48; \
+#define vcopy_lane_f32(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \
+  float32x2_t __s0_285 = __p0_285; \
+  float32x2_t __s2_285 = __p2_285; \
+  float32x2_t __rev0_285;  __rev0_285 = __builtin_shufflevector(__s0_285, __s0_285, 1, 0); \
+  float32x2_t __rev2_285;  __rev2_285 = __builtin_shufflevector(__s2_285, __s2_285, 1, 0); \
+  float32x2_t __ret_285; \
+  __ret_285 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_285, __p3_285), __rev0_285, __p1_285); \
+  __ret_285 = __builtin_shufflevector(__ret_285, __ret_285, 1, 0); \
+  __ret_285; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s32(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \
-  int32x2_t __s0_49 = __p0_49; \
-  int32x2_t __s2_49 = __p2_49; \
-  int32x2_t __ret_49; \
-  __ret_49 = vset_lane_s32(vget_lane_s32(__s2_49, __p3_49), __s0_49, __p1_49); \
-  __ret_49; \
+#define vcopy_lane_s32(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \
+  int32x2_t __s0_286 = __p0_286; \
+  int32x2_t __s2_286 = __p2_286; \
+  int32x2_t __ret_286; \
+  __ret_286 = vset_lane_s32(vget_lane_s32(__s2_286, __p3_286), __s0_286, __p1_286); \
+  __ret_286; \
 })
 #else
-#define vcopy_lane_s32(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \
-  int32x2_t __s0_50 = __p0_50; \
-  int32x2_t __s2_50 = __p2_50; \
-  int32x2_t __rev0_50;  __rev0_50 = __builtin_shufflevector(__s0_50, __s0_50, 1, 0); \
-  int32x2_t __rev2_50;  __rev2_50 = __builtin_shufflevector(__s2_50, __s2_50, 1, 0); \
-  int32x2_t __ret_50; \
-  __ret_50 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_50, __p3_50), __rev0_50, __p1_50); \
-  __ret_50 = __builtin_shufflevector(__ret_50, __ret_50, 1, 0); \
-  __ret_50; \
+#define vcopy_lane_s32(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \
+  int32x2_t __s0_287 = __p0_287; \
+  int32x2_t __s2_287 = __p2_287; \
+  int32x2_t __rev0_287;  __rev0_287 = __builtin_shufflevector(__s0_287, __s0_287, 1, 0); \
+  int32x2_t __rev2_287;  __rev2_287 = __builtin_shufflevector(__s2_287, __s2_287, 1, 0); \
+  int32x2_t __ret_287; \
+  __ret_287 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_287, __p3_287), __rev0_287, __p1_287); \
+  __ret_287 = __builtin_shufflevector(__ret_287, __ret_287, 1, 0); \
+  __ret_287; \
 })
 #endif
 
-#define vcopy_lane_s64(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \
-  int64x1_t __s0_51 = __p0_51; \
-  int64x1_t __s2_51 = __p2_51; \
-  int64x1_t __ret_51; \
-  __ret_51 = vset_lane_s64(vget_lane_s64(__s2_51, __p3_51), __s0_51, __p1_51); \
-  __ret_51; \
+#define vcopy_lane_s64(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \
+  int64x1_t __s0_288 = __p0_288; \
+  int64x1_t __s2_288 = __p2_288; \
+  int64x1_t __ret_288; \
+  __ret_288 = vset_lane_s64(vget_lane_s64(__s2_288, __p3_288), __s0_288, __p1_288); \
+  __ret_288; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s16(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \
-  int16x4_t __s0_52 = __p0_52; \
-  int16x4_t __s2_52 = __p2_52; \
-  int16x4_t __ret_52; \
-  __ret_52 = vset_lane_s16(vget_lane_s16(__s2_52, __p3_52), __s0_52, __p1_52); \
-  __ret_52; \
+#define vcopy_lane_s16(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \
+  int16x4_t __s0_289 = __p0_289; \
+  int16x4_t __s2_289 = __p2_289; \
+  int16x4_t __ret_289; \
+  __ret_289 = vset_lane_s16(vget_lane_s16(__s2_289, __p3_289), __s0_289, __p1_289); \
+  __ret_289; \
 })
 #else
-#define vcopy_lane_s16(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \
-  int16x4_t __s0_53 = __p0_53; \
-  int16x4_t __s2_53 = __p2_53; \
-  int16x4_t __rev0_53;  __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 3, 2, 1, 0); \
-  int16x4_t __rev2_53;  __rev2_53 = __builtin_shufflevector(__s2_53, __s2_53, 3, 2, 1, 0); \
-  int16x4_t __ret_53; \
-  __ret_53 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_53, __p3_53), __rev0_53, __p1_53); \
-  __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 3, 2, 1, 0); \
-  __ret_53; \
+#define vcopy_lane_s16(__p0_290, __p1_290, __p2_290, __p3_290) __extension__ ({ \
+  int16x4_t __s0_290 = __p0_290; \
+  int16x4_t __s2_290 = __p2_290; \
+  int16x4_t __rev0_290;  __rev0_290 = __builtin_shufflevector(__s0_290, __s0_290, 3, 2, 1, 0); \
+  int16x4_t __rev2_290;  __rev2_290 = __builtin_shufflevector(__s2_290, __s2_290, 3, 2, 1, 0); \
+  int16x4_t __ret_290; \
+  __ret_290 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_290, __p3_290), __rev0_290, __p1_290); \
+  __ret_290 = __builtin_shufflevector(__ret_290, __ret_290, 3, 2, 1, 0); \
+  __ret_290; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p8(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \
-  poly8x16_t __s0_54 = __p0_54; \
-  poly8x16_t __s2_54 = __p2_54; \
-  poly8x16_t __ret_54; \
-  __ret_54 = vsetq_lane_p8(vgetq_lane_p8(__s2_54, __p3_54), __s0_54, __p1_54); \
-  __ret_54; \
+#define vcopyq_laneq_p8(__p0_291, __p1_291, __p2_291, __p3_291) __extension__ ({ \
+  poly8x16_t __s0_291 = __p0_291; \
+  poly8x16_t __s2_291 = __p2_291; \
+  poly8x16_t __ret_291; \
+  __ret_291 = vsetq_lane_p8(vgetq_lane_p8(__s2_291, __p3_291), __s0_291, __p1_291); \
+  __ret_291; \
 })
 #else
-#define vcopyq_laneq_p8(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \
-  poly8x16_t __s0_55 = __p0_55; \
-  poly8x16_t __s2_55 = __p2_55; \
-  poly8x16_t __rev0_55;  __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev2_55;  __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_55; \
-  __ret_55 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_55, __p3_55), __rev0_55, __p1_55); \
-  __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_55; \
+#define vcopyq_laneq_p8(__p0_292, __p1_292, __p2_292, __p3_292) __extension__ ({ \
+  poly8x16_t __s0_292 = __p0_292; \
+  poly8x16_t __s2_292 = __p2_292; \
+  poly8x16_t __rev0_292;  __rev0_292 = __builtin_shufflevector(__s0_292, __s0_292, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __rev2_292;  __rev2_292 = __builtin_shufflevector(__s2_292, __s2_292, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __ret_292; \
+  __ret_292 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_292, __p3_292), __rev0_292, __p1_292); \
+  __ret_292 = __builtin_shufflevector(__ret_292, __ret_292, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_292; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p16(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \
-  poly16x8_t __s0_56 = __p0_56; \
-  poly16x8_t __s2_56 = __p2_56; \
-  poly16x8_t __ret_56; \
-  __ret_56 = vsetq_lane_p16(vgetq_lane_p16(__s2_56, __p3_56), __s0_56, __p1_56); \
-  __ret_56; \
+#define vcopyq_laneq_p16(__p0_293, __p1_293, __p2_293, __p3_293) __extension__ ({ \
+  poly16x8_t __s0_293 = __p0_293; \
+  poly16x8_t __s2_293 = __p2_293; \
+  poly16x8_t __ret_293; \
+  __ret_293 = vsetq_lane_p16(vgetq_lane_p16(__s2_293, __p3_293), __s0_293, __p1_293); \
+  __ret_293; \
 })
 #else
-#define vcopyq_laneq_p16(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \
-  poly16x8_t __s0_57 = __p0_57; \
-  poly16x8_t __s2_57 = __p2_57; \
-  poly16x8_t __rev0_57;  __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev2_57;  __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret_57; \
-  __ret_57 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_57, __p3_57), __rev0_57, __p1_57); \
-  __ret_57 = __builtin_shufflevector(__ret_57, __ret_57, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_57; \
+#define vcopyq_laneq_p16(__p0_294, __p1_294, __p2_294, __p3_294) __extension__ ({ \
+  poly16x8_t __s0_294 = __p0_294; \
+  poly16x8_t __s2_294 = __p2_294; \
+  poly16x8_t __rev0_294;  __rev0_294 = __builtin_shufflevector(__s0_294, __s0_294, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x8_t __rev2_294;  __rev2_294 = __builtin_shufflevector(__s2_294, __s2_294, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x8_t __ret_294; \
+  __ret_294 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_294, __p3_294), __rev0_294, __p1_294); \
+  __ret_294 = __builtin_shufflevector(__ret_294, __ret_294, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_294; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u8(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \
-  uint8x16_t __s0_58 = __p0_58; \
-  uint8x16_t __s2_58 = __p2_58; \
-  uint8x16_t __ret_58; \
-  __ret_58 = vsetq_lane_u8(vgetq_lane_u8(__s2_58, __p3_58), __s0_58, __p1_58); \
-  __ret_58; \
+#define vcopyq_laneq_u8(__p0_295, __p1_295, __p2_295, __p3_295) __extension__ ({ \
+  uint8x16_t __s0_295 = __p0_295; \
+  uint8x16_t __s2_295 = __p2_295; \
+  uint8x16_t __ret_295; \
+  __ret_295 = vsetq_lane_u8(vgetq_lane_u8(__s2_295, __p3_295), __s0_295, __p1_295); \
+  __ret_295; \
 })
 #else
-#define vcopyq_laneq_u8(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \
-  uint8x16_t __s0_59 = __p0_59; \
-  uint8x16_t __s2_59 = __p2_59; \
-  uint8x16_t __rev0_59;  __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_59;  __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_59; \
-  __ret_59 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_59, __p3_59), __rev0_59, __p1_59); \
-  __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_59; \
+#define vcopyq_laneq_u8(__p0_296, __p1_296, __p2_296, __p3_296) __extension__ ({ \
+  uint8x16_t __s0_296 = __p0_296; \
+  uint8x16_t __s2_296 = __p2_296; \
+  uint8x16_t __rev0_296;  __rev0_296 = __builtin_shufflevector(__s0_296, __s0_296, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_296;  __rev2_296 = __builtin_shufflevector(__s2_296, __s2_296, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_296; \
+  __ret_296 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_296, __p3_296), __rev0_296, __p1_296); \
+  __ret_296 = __builtin_shufflevector(__ret_296, __ret_296, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_296; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u32(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \
-  uint32x4_t __s0_60 = __p0_60; \
-  uint32x4_t __s2_60 = __p2_60; \
-  uint32x4_t __ret_60; \
-  __ret_60 = vsetq_lane_u32(vgetq_lane_u32(__s2_60, __p3_60), __s0_60, __p1_60); \
-  __ret_60; \
+#define vcopyq_laneq_u32(__p0_297, __p1_297, __p2_297, __p3_297) __extension__ ({ \
+  uint32x4_t __s0_297 = __p0_297; \
+  uint32x4_t __s2_297 = __p2_297; \
+  uint32x4_t __ret_297; \
+  __ret_297 = vsetq_lane_u32(vgetq_lane_u32(__s2_297, __p3_297), __s0_297, __p1_297); \
+  __ret_297; \
 })
 #else
-#define vcopyq_laneq_u32(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \
-  uint32x4_t __s0_61 = __p0_61; \
-  uint32x4_t __s2_61 = __p2_61; \
-  uint32x4_t __rev0_61;  __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 3, 2, 1, 0); \
-  uint32x4_t __rev2_61;  __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 3, 2, 1, 0); \
-  uint32x4_t __ret_61; \
-  __ret_61 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_61, __p3_61), __rev0_61, __p1_61); \
-  __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 3, 2, 1, 0); \
-  __ret_61; \
+#define vcopyq_laneq_u32(__p0_298, __p1_298, __p2_298, __p3_298) __extension__ ({ \
+  uint32x4_t __s0_298 = __p0_298; \
+  uint32x4_t __s2_298 = __p2_298; \
+  uint32x4_t __rev0_298;  __rev0_298 = __builtin_shufflevector(__s0_298, __s0_298, 3, 2, 1, 0); \
+  uint32x4_t __rev2_298;  __rev2_298 = __builtin_shufflevector(__s2_298, __s2_298, 3, 2, 1, 0); \
+  uint32x4_t __ret_298; \
+  __ret_298 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_298, __p3_298), __rev0_298, __p1_298); \
+  __ret_298 = __builtin_shufflevector(__ret_298, __ret_298, 3, 2, 1, 0); \
+  __ret_298; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u64(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \
-  uint64x2_t __s0_62 = __p0_62; \
-  uint64x2_t __s2_62 = __p2_62; \
-  uint64x2_t __ret_62; \
-  __ret_62 = vsetq_lane_u64(vgetq_lane_u64(__s2_62, __p3_62), __s0_62, __p1_62); \
-  __ret_62; \
+#define vcopyq_laneq_u64(__p0_299, __p1_299, __p2_299, __p3_299) __extension__ ({ \
+  uint64x2_t __s0_299 = __p0_299; \
+  uint64x2_t __s2_299 = __p2_299; \
+  uint64x2_t __ret_299; \
+  __ret_299 = vsetq_lane_u64(vgetq_lane_u64(__s2_299, __p3_299), __s0_299, __p1_299); \
+  __ret_299; \
 })
 #else
-#define vcopyq_laneq_u64(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \
-  uint64x2_t __s0_63 = __p0_63; \
-  uint64x2_t __s2_63 = __p2_63; \
-  uint64x2_t __rev0_63;  __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 1, 0); \
-  uint64x2_t __rev2_63;  __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 1, 0); \
-  uint64x2_t __ret_63; \
-  __ret_63 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_63, __p3_63), __rev0_63, __p1_63); \
-  __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 1, 0); \
-  __ret_63; \
+#define vcopyq_laneq_u64(__p0_300, __p1_300, __p2_300, __p3_300) __extension__ ({ \
+  uint64x2_t __s0_300 = __p0_300; \
+  uint64x2_t __s2_300 = __p2_300; \
+  uint64x2_t __rev0_300;  __rev0_300 = __builtin_shufflevector(__s0_300, __s0_300, 1, 0); \
+  uint64x2_t __rev2_300;  __rev2_300 = __builtin_shufflevector(__s2_300, __s2_300, 1, 0); \
+  uint64x2_t __ret_300; \
+  __ret_300 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_300, __p3_300), __rev0_300, __p1_300); \
+  __ret_300 = __builtin_shufflevector(__ret_300, __ret_300, 1, 0); \
+  __ret_300; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u16(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \
-  uint16x8_t __s0_64 = __p0_64; \
-  uint16x8_t __s2_64 = __p2_64; \
-  uint16x8_t __ret_64; \
-  __ret_64 = vsetq_lane_u16(vgetq_lane_u16(__s2_64, __p3_64), __s0_64, __p1_64); \
-  __ret_64; \
+#define vcopyq_laneq_u16(__p0_301, __p1_301, __p2_301, __p3_301) __extension__ ({ \
+  uint16x8_t __s0_301 = __p0_301; \
+  uint16x8_t __s2_301 = __p2_301; \
+  uint16x8_t __ret_301; \
+  __ret_301 = vsetq_lane_u16(vgetq_lane_u16(__s2_301, __p3_301), __s0_301, __p1_301); \
+  __ret_301; \
 })
 #else
-#define vcopyq_laneq_u16(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \
-  uint16x8_t __s0_65 = __p0_65; \
-  uint16x8_t __s2_65 = __p2_65; \
-  uint16x8_t __rev0_65;  __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_65;  __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_65; \
-  __ret_65 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_65, __p3_65), __rev0_65, __p1_65); \
-  __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_65; \
+#define vcopyq_laneq_u16(__p0_302, __p1_302, __p2_302, __p3_302) __extension__ ({ \
+  uint16x8_t __s0_302 = __p0_302; \
+  uint16x8_t __s2_302 = __p2_302; \
+  uint16x8_t __rev0_302;  __rev0_302 = __builtin_shufflevector(__s0_302, __s0_302, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_302;  __rev2_302 = __builtin_shufflevector(__s2_302, __s2_302, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret_302; \
+  __ret_302 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_302, __p3_302), __rev0_302, __p1_302); \
+  __ret_302 = __builtin_shufflevector(__ret_302, __ret_302, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_302; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s8(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \
-  int8x16_t __s0_66 = __p0_66; \
-  int8x16_t __s2_66 = __p2_66; \
-  int8x16_t __ret_66; \
-  __ret_66 = vsetq_lane_s8(vgetq_lane_s8(__s2_66, __p3_66), __s0_66, __p1_66); \
-  __ret_66; \
+#define vcopyq_laneq_s8(__p0_303, __p1_303, __p2_303, __p3_303) __extension__ ({ \
+  int8x16_t __s0_303 = __p0_303; \
+  int8x16_t __s2_303 = __p2_303; \
+  int8x16_t __ret_303; \
+  __ret_303 = vsetq_lane_s8(vgetq_lane_s8(__s2_303, __p3_303), __s0_303, __p1_303); \
+  __ret_303; \
 })
 #else
-#define vcopyq_laneq_s8(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \
-  int8x16_t __s0_67 = __p0_67; \
-  int8x16_t __s2_67 = __p2_67; \
-  int8x16_t __rev0_67;  __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_67;  __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_67; \
-  __ret_67 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_67, __p3_67), __rev0_67, __p1_67); \
-  __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_67; \
+#define vcopyq_laneq_s8(__p0_304, __p1_304, __p2_304, __p3_304) __extension__ ({ \
+  int8x16_t __s0_304 = __p0_304; \
+  int8x16_t __s2_304 = __p2_304; \
+  int8x16_t __rev0_304;  __rev0_304 = __builtin_shufflevector(__s0_304, __s0_304, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_304;  __rev2_304 = __builtin_shufflevector(__s2_304, __s2_304, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_304; \
+  __ret_304 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_304, __p3_304), __rev0_304, __p1_304); \
+  __ret_304 = __builtin_shufflevector(__ret_304, __ret_304, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_304; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_f32(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \
-  float32x4_t __s0_68 = __p0_68; \
-  float32x4_t __s2_68 = __p2_68; \
-  float32x4_t __ret_68; \
-  __ret_68 = vsetq_lane_f32(vgetq_lane_f32(__s2_68, __p3_68), __s0_68, __p1_68); \
-  __ret_68; \
+#define vcopyq_laneq_f32(__p0_305, __p1_305, __p2_305, __p3_305) __extension__ ({ \
+  float32x4_t __s0_305 = __p0_305; \
+  float32x4_t __s2_305 = __p2_305; \
+  float32x4_t __ret_305; \
+  __ret_305 = vsetq_lane_f32(vgetq_lane_f32(__s2_305, __p3_305), __s0_305, __p1_305); \
+  __ret_305; \
 })
 #else
-#define vcopyq_laneq_f32(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \
-  float32x4_t __s0_69 = __p0_69; \
-  float32x4_t __s2_69 = __p2_69; \
-  float32x4_t __rev0_69;  __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 3, 2, 1, 0); \
-  float32x4_t __rev2_69;  __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 3, 2, 1, 0); \
-  float32x4_t __ret_69; \
-  __ret_69 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_69, __p3_69), __rev0_69, __p1_69); \
-  __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 3, 2, 1, 0); \
-  __ret_69; \
+#define vcopyq_laneq_f32(__p0_306, __p1_306, __p2_306, __p3_306) __extension__ ({ \
+  float32x4_t __s0_306 = __p0_306; \
+  float32x4_t __s2_306 = __p2_306; \
+  float32x4_t __rev0_306;  __rev0_306 = __builtin_shufflevector(__s0_306, __s0_306, 3, 2, 1, 0); \
+  float32x4_t __rev2_306;  __rev2_306 = __builtin_shufflevector(__s2_306, __s2_306, 3, 2, 1, 0); \
+  float32x4_t __ret_306; \
+  __ret_306 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_306, __p3_306), __rev0_306, __p1_306); \
+  __ret_306 = __builtin_shufflevector(__ret_306, __ret_306, 3, 2, 1, 0); \
+  __ret_306; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s32(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \
-  int32x4_t __s0_70 = __p0_70; \
-  int32x4_t __s2_70 = __p2_70; \
-  int32x4_t __ret_70; \
-  __ret_70 = vsetq_lane_s32(vgetq_lane_s32(__s2_70, __p3_70), __s0_70, __p1_70); \
-  __ret_70; \
+#define vcopyq_laneq_s32(__p0_307, __p1_307, __p2_307, __p3_307) __extension__ ({ \
+  int32x4_t __s0_307 = __p0_307; \
+  int32x4_t __s2_307 = __p2_307; \
+  int32x4_t __ret_307; \
+  __ret_307 = vsetq_lane_s32(vgetq_lane_s32(__s2_307, __p3_307), __s0_307, __p1_307); \
+  __ret_307; \
 })
 #else
-#define vcopyq_laneq_s32(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \
-  int32x4_t __s0_71 = __p0_71; \
-  int32x4_t __s2_71 = __p2_71; \
-  int32x4_t __rev0_71;  __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 3, 2, 1, 0); \
-  int32x4_t __rev2_71;  __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 3, 2, 1, 0); \
-  int32x4_t __ret_71; \
-  __ret_71 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_71, __p3_71), __rev0_71, __p1_71); \
-  __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 3, 2, 1, 0); \
-  __ret_71; \
+#define vcopyq_laneq_s32(__p0_308, __p1_308, __p2_308, __p3_308) __extension__ ({ \
+  int32x4_t __s0_308 = __p0_308; \
+  int32x4_t __s2_308 = __p2_308; \
+  int32x4_t __rev0_308;  __rev0_308 = __builtin_shufflevector(__s0_308, __s0_308, 3, 2, 1, 0); \
+  int32x4_t __rev2_308;  __rev2_308 = __builtin_shufflevector(__s2_308, __s2_308, 3, 2, 1, 0); \
+  int32x4_t __ret_308; \
+  __ret_308 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_308, __p3_308), __rev0_308, __p1_308); \
+  __ret_308 = __builtin_shufflevector(__ret_308, __ret_308, 3, 2, 1, 0); \
+  __ret_308; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s64(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \
-  int64x2_t __s0_72 = __p0_72; \
-  int64x2_t __s2_72 = __p2_72; \
-  int64x2_t __ret_72; \
-  __ret_72 = vsetq_lane_s64(vgetq_lane_s64(__s2_72, __p3_72), __s0_72, __p1_72); \
-  __ret_72; \
+#define vcopyq_laneq_s64(__p0_309, __p1_309, __p2_309, __p3_309) __extension__ ({ \
+  int64x2_t __s0_309 = __p0_309; \
+  int64x2_t __s2_309 = __p2_309; \
+  int64x2_t __ret_309; \
+  __ret_309 = vsetq_lane_s64(vgetq_lane_s64(__s2_309, __p3_309), __s0_309, __p1_309); \
+  __ret_309; \
 })
 #else
-#define vcopyq_laneq_s64(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \
-  int64x2_t __s0_73 = __p0_73; \
-  int64x2_t __s2_73 = __p2_73; \
-  int64x2_t __rev0_73;  __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 1, 0); \
-  int64x2_t __rev2_73;  __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 1, 0); \
-  int64x2_t __ret_73; \
-  __ret_73 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_73, __p3_73), __rev0_73, __p1_73); \
-  __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 1, 0); \
-  __ret_73; \
+#define vcopyq_laneq_s64(__p0_310, __p1_310, __p2_310, __p3_310) __extension__ ({ \
+  int64x2_t __s0_310 = __p0_310; \
+  int64x2_t __s2_310 = __p2_310; \
+  int64x2_t __rev0_310;  __rev0_310 = __builtin_shufflevector(__s0_310, __s0_310, 1, 0); \
+  int64x2_t __rev2_310;  __rev2_310 = __builtin_shufflevector(__s2_310, __s2_310, 1, 0); \
+  int64x2_t __ret_310; \
+  __ret_310 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_310, __p3_310), __rev0_310, __p1_310); \
+  __ret_310 = __builtin_shufflevector(__ret_310, __ret_310, 1, 0); \
+  __ret_310; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s16(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \
-  int16x8_t __s0_74 = __p0_74; \
-  int16x8_t __s2_74 = __p2_74; \
-  int16x8_t __ret_74; \
-  __ret_74 = vsetq_lane_s16(vgetq_lane_s16(__s2_74, __p3_74), __s0_74, __p1_74); \
-  __ret_74; \
+#define vcopyq_laneq_s16(__p0_311, __p1_311, __p2_311, __p3_311) __extension__ ({ \
+  int16x8_t __s0_311 = __p0_311; \
+  int16x8_t __s2_311 = __p2_311; \
+  int16x8_t __ret_311; \
+  __ret_311 = vsetq_lane_s16(vgetq_lane_s16(__s2_311, __p3_311), __s0_311, __p1_311); \
+  __ret_311; \
 })
 #else
-#define vcopyq_laneq_s16(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \
-  int16x8_t __s0_75 = __p0_75; \
-  int16x8_t __s2_75 = __p2_75; \
-  int16x8_t __rev0_75;  __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_75;  __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_75; \
-  __ret_75 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_75, __p3_75), __rev0_75, __p1_75); \
-  __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_75; \
+#define vcopyq_laneq_s16(__p0_312, __p1_312, __p2_312, __p3_312) __extension__ ({ \
+  int16x8_t __s0_312 = __p0_312; \
+  int16x8_t __s2_312 = __p2_312; \
+  int16x8_t __rev0_312;  __rev0_312 = __builtin_shufflevector(__s0_312, __s0_312, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_312;  __rev2_312 = __builtin_shufflevector(__s2_312, __s2_312, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_312; \
+  __ret_312 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_312, __p3_312), __rev0_312, __p1_312); \
+  __ret_312 = __builtin_shufflevector(__ret_312, __ret_312, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_312; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p8(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \
-  poly8x8_t __s0_76 = __p0_76; \
-  poly8x16_t __s2_76 = __p2_76; \
-  poly8x8_t __ret_76; \
-  __ret_76 = vset_lane_p8(vgetq_lane_p8(__s2_76, __p3_76), __s0_76, __p1_76); \
-  __ret_76; \
+#define vcopy_laneq_p8(__p0_313, __p1_313, __p2_313, __p3_313) __extension__ ({ \
+  poly8x8_t __s0_313 = __p0_313; \
+  poly8x16_t __s2_313 = __p2_313; \
+  poly8x8_t __ret_313; \
+  __ret_313 = vset_lane_p8(vgetq_lane_p8(__s2_313, __p3_313), __s0_313, __p1_313); \
+  __ret_313; \
 })
 #else
-#define vcopy_laneq_p8(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \
-  poly8x8_t __s0_77 = __p0_77; \
-  poly8x16_t __s2_77 = __p2_77; \
-  poly8x8_t __rev0_77;  __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev2_77;  __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_77; \
-  __ret_77 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_77, __p3_77), __rev0_77, __p1_77); \
-  __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_77; \
+#define vcopy_laneq_p8(__p0_314, __p1_314, __p2_314, __p3_314) __extension__ ({ \
+  poly8x8_t __s0_314 = __p0_314; \
+  poly8x16_t __s2_314 = __p2_314; \
+  poly8x8_t __rev0_314;  __rev0_314 = __builtin_shufflevector(__s0_314, __s0_314, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __rev2_314;  __rev2_314 = __builtin_shufflevector(__s2_314, __s2_314, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __ret_314; \
+  __ret_314 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_314, __p3_314), __rev0_314, __p1_314); \
+  __ret_314 = __builtin_shufflevector(__ret_314, __ret_314, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_314; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p16(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \
-  poly16x4_t __s0_78 = __p0_78; \
-  poly16x8_t __s2_78 = __p2_78; \
-  poly16x4_t __ret_78; \
-  __ret_78 = vset_lane_p16(vgetq_lane_p16(__s2_78, __p3_78), __s0_78, __p1_78); \
-  __ret_78; \
+#define vcopy_laneq_p16(__p0_315, __p1_315, __p2_315, __p3_315) __extension__ ({ \
+  poly16x4_t __s0_315 = __p0_315; \
+  poly16x8_t __s2_315 = __p2_315; \
+  poly16x4_t __ret_315; \
+  __ret_315 = vset_lane_p16(vgetq_lane_p16(__s2_315, __p3_315), __s0_315, __p1_315); \
+  __ret_315; \
 })
 #else
-#define vcopy_laneq_p16(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \
-  poly16x4_t __s0_79 = __p0_79; \
-  poly16x8_t __s2_79 = __p2_79; \
-  poly16x4_t __rev0_79;  __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 3, 2, 1, 0); \
-  poly16x8_t __rev2_79;  __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret_79; \
-  __ret_79 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_79, __p3_79), __rev0_79, __p1_79); \
-  __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 3, 2, 1, 0); \
-  __ret_79; \
+#define vcopy_laneq_p16(__p0_316, __p1_316, __p2_316, __p3_316) __extension__ ({ \
+  poly16x4_t __s0_316 = __p0_316; \
+  poly16x8_t __s2_316 = __p2_316; \
+  poly16x4_t __rev0_316;  __rev0_316 = __builtin_shufflevector(__s0_316, __s0_316, 3, 2, 1, 0); \
+  poly16x8_t __rev2_316;  __rev2_316 = __builtin_shufflevector(__s2_316, __s2_316, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x4_t __ret_316; \
+  __ret_316 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_316, __p3_316), __rev0_316, __p1_316); \
+  __ret_316 = __builtin_shufflevector(__ret_316, __ret_316, 3, 2, 1, 0); \
+  __ret_316; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u8(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \
-  uint8x8_t __s0_80 = __p0_80; \
-  uint8x16_t __s2_80 = __p2_80; \
-  uint8x8_t __ret_80; \
-  __ret_80 = vset_lane_u8(vgetq_lane_u8(__s2_80, __p3_80), __s0_80, __p1_80); \
-  __ret_80; \
+#define vcopy_laneq_u8(__p0_317, __p1_317, __p2_317, __p3_317) __extension__ ({ \
+  uint8x8_t __s0_317 = __p0_317; \
+  uint8x16_t __s2_317 = __p2_317; \
+  uint8x8_t __ret_317; \
+  __ret_317 = vset_lane_u8(vgetq_lane_u8(__s2_317, __p3_317), __s0_317, __p1_317); \
+  __ret_317; \
 })
 #else
-#define vcopy_laneq_u8(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \
-  uint8x8_t __s0_81 = __p0_81; \
-  uint8x16_t __s2_81 = __p2_81; \
-  uint8x8_t __rev0_81;  __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_81;  __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_81; \
-  __ret_81 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_81, __p3_81), __rev0_81, __p1_81); \
-  __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_81; \
+#define vcopy_laneq_u8(__p0_318, __p1_318, __p2_318, __p3_318) __extension__ ({ \
+  uint8x8_t __s0_318 = __p0_318; \
+  uint8x16_t __s2_318 = __p2_318; \
+  uint8x8_t __rev0_318;  __rev0_318 = __builtin_shufflevector(__s0_318, __s0_318, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_318;  __rev2_318 = __builtin_shufflevector(__s2_318, __s2_318, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __ret_318; \
+  __ret_318 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_318, __p3_318), __rev0_318, __p1_318); \
+  __ret_318 = __builtin_shufflevector(__ret_318, __ret_318, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_318; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u32(__p0_82, __p1_82, __p2_82, __p3_82) __extension__ ({ \
-  uint32x2_t __s0_82 = __p0_82; \
-  uint32x4_t __s2_82 = __p2_82; \
-  uint32x2_t __ret_82; \
-  __ret_82 = vset_lane_u32(vgetq_lane_u32(__s2_82, __p3_82), __s0_82, __p1_82); \
-  __ret_82; \
+#define vcopy_laneq_u32(__p0_319, __p1_319, __p2_319, __p3_319) __extension__ ({ \
+  uint32x2_t __s0_319 = __p0_319; \
+  uint32x4_t __s2_319 = __p2_319; \
+  uint32x2_t __ret_319; \
+  __ret_319 = vset_lane_u32(vgetq_lane_u32(__s2_319, __p3_319), __s0_319, __p1_319); \
+  __ret_319; \
 })
 #else
-#define vcopy_laneq_u32(__p0_83, __p1_83, __p2_83, __p3_83) __extension__ ({ \
-  uint32x2_t __s0_83 = __p0_83; \
-  uint32x4_t __s2_83 = __p2_83; \
-  uint32x2_t __rev0_83;  __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 1, 0); \
-  uint32x4_t __rev2_83;  __rev2_83 = __builtin_shufflevector(__s2_83, __s2_83, 3, 2, 1, 0); \
-  uint32x2_t __ret_83; \
-  __ret_83 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_83, __p3_83), __rev0_83, __p1_83); \
-  __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 1, 0); \
-  __ret_83; \
+#define vcopy_laneq_u32(__p0_320, __p1_320, __p2_320, __p3_320) __extension__ ({ \
+  uint32x2_t __s0_320 = __p0_320; \
+  uint32x4_t __s2_320 = __p2_320; \
+  uint32x2_t __rev0_320;  __rev0_320 = __builtin_shufflevector(__s0_320, __s0_320, 1, 0); \
+  uint32x4_t __rev2_320;  __rev2_320 = __builtin_shufflevector(__s2_320, __s2_320, 3, 2, 1, 0); \
+  uint32x2_t __ret_320; \
+  __ret_320 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_320, __p3_320), __rev0_320, __p1_320); \
+  __ret_320 = __builtin_shufflevector(__ret_320, __ret_320, 1, 0); \
+  __ret_320; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u64(__p0_84, __p1_84, __p2_84, __p3_84) __extension__ ({ \
-  uint64x1_t __s0_84 = __p0_84; \
-  uint64x2_t __s2_84 = __p2_84; \
-  uint64x1_t __ret_84; \
-  __ret_84 = vset_lane_u64(vgetq_lane_u64(__s2_84, __p3_84), __s0_84, __p1_84); \
-  __ret_84; \
+#define vcopy_laneq_u64(__p0_321, __p1_321, __p2_321, __p3_321) __extension__ ({ \
+  uint64x1_t __s0_321 = __p0_321; \
+  uint64x2_t __s2_321 = __p2_321; \
+  uint64x1_t __ret_321; \
+  __ret_321 = vset_lane_u64(vgetq_lane_u64(__s2_321, __p3_321), __s0_321, __p1_321); \
+  __ret_321; \
 })
 #else
-#define vcopy_laneq_u64(__p0_85, __p1_85, __p2_85, __p3_85) __extension__ ({ \
-  uint64x1_t __s0_85 = __p0_85; \
-  uint64x2_t __s2_85 = __p2_85; \
-  uint64x2_t __rev2_85;  __rev2_85 = __builtin_shufflevector(__s2_85, __s2_85, 1, 0); \
-  uint64x1_t __ret_85; \
-  __ret_85 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_85, __p3_85), __s0_85, __p1_85); \
-  __ret_85; \
+#define vcopy_laneq_u64(__p0_322, __p1_322, __p2_322, __p3_322) __extension__ ({ \
+  uint64x1_t __s0_322 = __p0_322; \
+  uint64x2_t __s2_322 = __p2_322; \
+  uint64x2_t __rev2_322;  __rev2_322 = __builtin_shufflevector(__s2_322, __s2_322, 1, 0); \
+  uint64x1_t __ret_322; \
+  __ret_322 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_322, __p3_322), __s0_322, __p1_322); \
+  __ret_322; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u16(__p0_86, __p1_86, __p2_86, __p3_86) __extension__ ({ \
-  uint16x4_t __s0_86 = __p0_86; \
-  uint16x8_t __s2_86 = __p2_86; \
-  uint16x4_t __ret_86; \
-  __ret_86 = vset_lane_u16(vgetq_lane_u16(__s2_86, __p3_86), __s0_86, __p1_86); \
-  __ret_86; \
+#define vcopy_laneq_u16(__p0_323, __p1_323, __p2_323, __p3_323) __extension__ ({ \
+  uint16x4_t __s0_323 = __p0_323; \
+  uint16x8_t __s2_323 = __p2_323; \
+  uint16x4_t __ret_323; \
+  __ret_323 = vset_lane_u16(vgetq_lane_u16(__s2_323, __p3_323), __s0_323, __p1_323); \
+  __ret_323; \
 })
 #else
-#define vcopy_laneq_u16(__p0_87, __p1_87, __p2_87, __p3_87) __extension__ ({ \
-  uint16x4_t __s0_87 = __p0_87; \
-  uint16x8_t __s2_87 = __p2_87; \
-  uint16x4_t __rev0_87;  __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \
-  uint16x8_t __rev2_87;  __rev2_87 = __builtin_shufflevector(__s2_87, __s2_87, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_87; \
-  __ret_87 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_87, __p3_87), __rev0_87, __p1_87); \
-  __ret_87 = __builtin_shufflevector(__ret_87, __ret_87, 3, 2, 1, 0); \
-  __ret_87; \
+#define vcopy_laneq_u16(__p0_324, __p1_324, __p2_324, __p3_324) __extension__ ({ \
+  uint16x4_t __s0_324 = __p0_324; \
+  uint16x8_t __s2_324 = __p2_324; \
+  uint16x4_t __rev0_324;  __rev0_324 = __builtin_shufflevector(__s0_324, __s0_324, 3, 2, 1, 0); \
+  uint16x8_t __rev2_324;  __rev2_324 = __builtin_shufflevector(__s2_324, __s2_324, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __ret_324; \
+  __ret_324 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_324, __p3_324), __rev0_324, __p1_324); \
+  __ret_324 = __builtin_shufflevector(__ret_324, __ret_324, 3, 2, 1, 0); \
+  __ret_324; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s8(__p0_88, __p1_88, __p2_88, __p3_88) __extension__ ({ \
-  int8x8_t __s0_88 = __p0_88; \
-  int8x16_t __s2_88 = __p2_88; \
-  int8x8_t __ret_88; \
-  __ret_88 = vset_lane_s8(vgetq_lane_s8(__s2_88, __p3_88), __s0_88, __p1_88); \
-  __ret_88; \
+#define vcopy_laneq_s8(__p0_325, __p1_325, __p2_325, __p3_325) __extension__ ({ \
+  int8x8_t __s0_325 = __p0_325; \
+  int8x16_t __s2_325 = __p2_325; \
+  int8x8_t __ret_325; \
+  __ret_325 = vset_lane_s8(vgetq_lane_s8(__s2_325, __p3_325), __s0_325, __p1_325); \
+  __ret_325; \
 })
 #else
-#define vcopy_laneq_s8(__p0_89, __p1_89, __p2_89, __p3_89) __extension__ ({ \
-  int8x8_t __s0_89 = __p0_89; \
-  int8x16_t __s2_89 = __p2_89; \
-  int8x8_t __rev0_89;  __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_89;  __rev2_89 = __builtin_shufflevector(__s2_89, __s2_89, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_89; \
-  __ret_89 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_89, __p3_89), __rev0_89, __p1_89); \
-  __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_89; \
+#define vcopy_laneq_s8(__p0_326, __p1_326, __p2_326, __p3_326) __extension__ ({ \
+  int8x8_t __s0_326 = __p0_326; \
+  int8x16_t __s2_326 = __p2_326; \
+  int8x8_t __rev0_326;  __rev0_326 = __builtin_shufflevector(__s0_326, __s0_326, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_326;  __rev2_326 = __builtin_shufflevector(__s2_326, __s2_326, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __ret_326; \
+  __ret_326 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_326, __p3_326), __rev0_326, __p1_326); \
+  __ret_326 = __builtin_shufflevector(__ret_326, __ret_326, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_326; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_f32(__p0_90, __p1_90, __p2_90, __p3_90) __extension__ ({ \
-  float32x2_t __s0_90 = __p0_90; \
-  float32x4_t __s2_90 = __p2_90; \
-  float32x2_t __ret_90; \
-  __ret_90 = vset_lane_f32(vgetq_lane_f32(__s2_90, __p3_90), __s0_90, __p1_90); \
-  __ret_90; \
+#define vcopy_laneq_f32(__p0_327, __p1_327, __p2_327, __p3_327) __extension__ ({ \
+  float32x2_t __s0_327 = __p0_327; \
+  float32x4_t __s2_327 = __p2_327; \
+  float32x2_t __ret_327; \
+  __ret_327 = vset_lane_f32(vgetq_lane_f32(__s2_327, __p3_327), __s0_327, __p1_327); \
+  __ret_327; \
 })
 #else
-#define vcopy_laneq_f32(__p0_91, __p1_91, __p2_91, __p3_91) __extension__ ({ \
-  float32x2_t __s0_91 = __p0_91; \
-  float32x4_t __s2_91 = __p2_91; \
-  float32x2_t __rev0_91;  __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, 1, 0); \
-  float32x4_t __rev2_91;  __rev2_91 = __builtin_shufflevector(__s2_91, __s2_91, 3, 2, 1, 0); \
-  float32x2_t __ret_91; \
-  __ret_91 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_91, __p3_91), __rev0_91, __p1_91); \
-  __ret_91 = __builtin_shufflevector(__ret_91, __ret_91, 1, 0); \
-  __ret_91; \
+#define vcopy_laneq_f32(__p0_328, __p1_328, __p2_328, __p3_328) __extension__ ({ \
+  float32x2_t __s0_328 = __p0_328; \
+  float32x4_t __s2_328 = __p2_328; \
+  float32x2_t __rev0_328;  __rev0_328 = __builtin_shufflevector(__s0_328, __s0_328, 1, 0); \
+  float32x4_t __rev2_328;  __rev2_328 = __builtin_shufflevector(__s2_328, __s2_328, 3, 2, 1, 0); \
+  float32x2_t __ret_328; \
+  __ret_328 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_328, __p3_328), __rev0_328, __p1_328); \
+  __ret_328 = __builtin_shufflevector(__ret_328, __ret_328, 1, 0); \
+  __ret_328; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s32(__p0_92, __p1_92, __p2_92, __p3_92) __extension__ ({ \
-  int32x2_t __s0_92 = __p0_92; \
-  int32x4_t __s2_92 = __p2_92; \
-  int32x2_t __ret_92; \
-  __ret_92 = vset_lane_s32(vgetq_lane_s32(__s2_92, __p3_92), __s0_92, __p1_92); \
-  __ret_92; \
+#define vcopy_laneq_s32(__p0_329, __p1_329, __p2_329, __p3_329) __extension__ ({ \
+  int32x2_t __s0_329 = __p0_329; \
+  int32x4_t __s2_329 = __p2_329; \
+  int32x2_t __ret_329; \
+  __ret_329 = vset_lane_s32(vgetq_lane_s32(__s2_329, __p3_329), __s0_329, __p1_329); \
+  __ret_329; \
 })
 #else
-#define vcopy_laneq_s32(__p0_93, __p1_93, __p2_93, __p3_93) __extension__ ({ \
-  int32x2_t __s0_93 = __p0_93; \
-  int32x4_t __s2_93 = __p2_93; \
-  int32x2_t __rev0_93;  __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 1, 0); \
-  int32x4_t __rev2_93;  __rev2_93 = __builtin_shufflevector(__s2_93, __s2_93, 3, 2, 1, 0); \
-  int32x2_t __ret_93; \
-  __ret_93 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_93, __p3_93), __rev0_93, __p1_93); \
-  __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 1, 0); \
-  __ret_93; \
+#define vcopy_laneq_s32(__p0_330, __p1_330, __p2_330, __p3_330) __extension__ ({ \
+  int32x2_t __s0_330 = __p0_330; \
+  int32x4_t __s2_330 = __p2_330; \
+  int32x2_t __rev0_330;  __rev0_330 = __builtin_shufflevector(__s0_330, __s0_330, 1, 0); \
+  int32x4_t __rev2_330;  __rev2_330 = __builtin_shufflevector(__s2_330, __s2_330, 3, 2, 1, 0); \
+  int32x2_t __ret_330; \
+  __ret_330 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_330, __p3_330), __rev0_330, __p1_330); \
+  __ret_330 = __builtin_shufflevector(__ret_330, __ret_330, 1, 0); \
+  __ret_330; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s64(__p0_94, __p1_94, __p2_94, __p3_94) __extension__ ({ \
-  int64x1_t __s0_94 = __p0_94; \
-  int64x2_t __s2_94 = __p2_94; \
-  int64x1_t __ret_94; \
-  __ret_94 = vset_lane_s64(vgetq_lane_s64(__s2_94, __p3_94), __s0_94, __p1_94); \
-  __ret_94; \
+#define vcopy_laneq_s64(__p0_331, __p1_331, __p2_331, __p3_331) __extension__ ({ \
+  int64x1_t __s0_331 = __p0_331; \
+  int64x2_t __s2_331 = __p2_331; \
+  int64x1_t __ret_331; \
+  __ret_331 = vset_lane_s64(vgetq_lane_s64(__s2_331, __p3_331), __s0_331, __p1_331); \
+  __ret_331; \
 })
 #else
-#define vcopy_laneq_s64(__p0_95, __p1_95, __p2_95, __p3_95) __extension__ ({ \
-  int64x1_t __s0_95 = __p0_95; \
-  int64x2_t __s2_95 = __p2_95; \
-  int64x2_t __rev2_95;  __rev2_95 = __builtin_shufflevector(__s2_95, __s2_95, 1, 0); \
-  int64x1_t __ret_95; \
-  __ret_95 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_95, __p3_95), __s0_95, __p1_95); \
-  __ret_95; \
+#define vcopy_laneq_s64(__p0_332, __p1_332, __p2_332, __p3_332) __extension__ ({ \
+  int64x1_t __s0_332 = __p0_332; \
+  int64x2_t __s2_332 = __p2_332; \
+  int64x2_t __rev2_332;  __rev2_332 = __builtin_shufflevector(__s2_332, __s2_332, 1, 0); \
+  int64x1_t __ret_332; \
+  __ret_332 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_332, __p3_332), __s0_332, __p1_332); \
+  __ret_332; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s16(__p0_96, __p1_96, __p2_96, __p3_96) __extension__ ({ \
-  int16x4_t __s0_96 = __p0_96; \
-  int16x8_t __s2_96 = __p2_96; \
-  int16x4_t __ret_96; \
-  __ret_96 = vset_lane_s16(vgetq_lane_s16(__s2_96, __p3_96), __s0_96, __p1_96); \
-  __ret_96; \
+#define vcopy_laneq_s16(__p0_333, __p1_333, __p2_333, __p3_333) __extension__ ({ \
+  int16x4_t __s0_333 = __p0_333; \
+  int16x8_t __s2_333 = __p2_333; \
+  int16x4_t __ret_333; \
+  __ret_333 = vset_lane_s16(vgetq_lane_s16(__s2_333, __p3_333), __s0_333, __p1_333); \
+  __ret_333; \
 })
 #else
-#define vcopy_laneq_s16(__p0_97, __p1_97, __p2_97, __p3_97) __extension__ ({ \
-  int16x4_t __s0_97 = __p0_97; \
-  int16x8_t __s2_97 = __p2_97; \
-  int16x4_t __rev0_97;  __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, 3, 2, 1, 0); \
-  int16x8_t __rev2_97;  __rev2_97 = __builtin_shufflevector(__s2_97, __s2_97, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_97; \
-  __ret_97 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_97, __p3_97), __rev0_97, __p1_97); \
-  __ret_97 = __builtin_shufflevector(__ret_97, __ret_97, 3, 2, 1, 0); \
-  __ret_97; \
+#define vcopy_laneq_s16(__p0_334, __p1_334, __p2_334, __p3_334) __extension__ ({ \
+  int16x4_t __s0_334 = __p0_334; \
+  int16x8_t __s2_334 = __p2_334; \
+  int16x4_t __rev0_334;  __rev0_334 = __builtin_shufflevector(__s0_334, __s0_334, 3, 2, 1, 0); \
+  int16x8_t __rev2_334;  __rev2_334 = __builtin_shufflevector(__s2_334, __s2_334, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __ret_334; \
+  __ret_334 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_334, __p3_334), __rev0_334, __p1_334); \
+  __ret_334 = __builtin_shufflevector(__ret_334, __ret_334, 3, 2, 1, 0); \
+  __ret_334; \
 })
 #endif
 
@@ -45209,85 +49009,85 @@
 })
 #endif
 
-#define vdup_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
+#define vdup_lane_p64(__p0_335, __p1_335) __extension__ ({ \
+  poly64x1_t __s0_335 = __p0_335; \
+  poly64x1_t __ret_335; \
+  __ret_335 = splat_lane_p64(__s0_335, __p1_335); \
+  __ret_335; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_p64(__p0_336, __p1_336) __extension__ ({ \
+  poly64x1_t __s0_336 = __p0_336; \
+  poly64x2_t __ret_336; \
+  __ret_336 = splatq_lane_p64(__s0_336, __p1_336); \
+  __ret_336; \
 })
 #else
-#define vdupq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdupq_lane_p64(__p0_337, __p1_337) __extension__ ({ \
+  poly64x1_t __s0_337 = __p0_337; \
+  poly64x2_t __ret_337; \
+  __ret_337 = __noswap_splatq_lane_p64(__s0_337, __p1_337); \
+  __ret_337 = __builtin_shufflevector(__ret_337, __ret_337, 1, 0); \
+  __ret_337; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_f64(__p0_338, __p1_338) __extension__ ({ \
+  float64x1_t __s0_338 = __p0_338; \
+  float64x2_t __ret_338; \
+  __ret_338 = splatq_lane_f64(__s0_338, __p1_338); \
+  __ret_338; \
 })
 #else
-#define vdupq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdupq_lane_f64(__p0_339, __p1_339) __extension__ ({ \
+  float64x1_t __s0_339 = __p0_339; \
+  float64x2_t __ret_339; \
+  __ret_339 = __noswap_splatq_lane_f64(__s0_339, __p1_339); \
+  __ret_339 = __builtin_shufflevector(__ret_339, __ret_339, 1, 0); \
+  __ret_339; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_f16(__p0_340, __p1_340) __extension__ ({ \
+  float16x4_t __s0_340 = __p0_340; \
+  float16x8_t __ret_340; \
+  __ret_340 = splatq_lane_f16(__s0_340, __p1_340); \
+  __ret_340; \
 })
 #else
-#define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_lane_f16(__p0_341, __p1_341) __extension__ ({ \
+  float16x4_t __s0_341 = __p0_341; \
+  float16x4_t __rev0_341;  __rev0_341 = __builtin_shufflevector(__s0_341, __s0_341, 3, 2, 1, 0); \
+  float16x8_t __ret_341; \
+  __ret_341 = __noswap_splatq_lane_f16(__rev0_341, __p1_341); \
+  __ret_341 = __builtin_shufflevector(__ret_341, __ret_341, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_341; \
 })
 #endif
 
-#define vdup_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
+#define vdup_lane_f64(__p0_342, __p1_342) __extension__ ({ \
+  float64x1_t __s0_342 = __p0_342; \
+  float64x1_t __ret_342; \
+  __ret_342 = splat_lane_f64(__s0_342, __p1_342); \
+  __ret_342; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_lane_f16(__p0_343, __p1_343) __extension__ ({ \
+  float16x4_t __s0_343 = __p0_343; \
+  float16x4_t __ret_343; \
+  __ret_343 = splat_lane_f16(__s0_343, __p1_343); \
+  __ret_343; \
 })
 #else
-#define vdup_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_lane_f16(__p0_344, __p1_344) __extension__ ({ \
+  float16x4_t __s0_344 = __p0_344; \
+  float16x4_t __rev0_344;  __rev0_344 = __builtin_shufflevector(__s0_344, __s0_344, 3, 2, 1, 0); \
+  float16x4_t __ret_344; \
+  __ret_344 = __noswap_splat_lane_f16(__rev0_344, __p1_344); \
+  __ret_344 = __builtin_shufflevector(__ret_344, __ret_344, 3, 2, 1, 0); \
+  __ret_344; \
 })
 #endif
 
@@ -45496,502 +49296,502 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_laneq_p8(__p0_345, __p1_345) __extension__ ({ \
+  poly8x16_t __s0_345 = __p0_345; \
+  poly8x8_t __ret_345; \
+  __ret_345 = splat_laneq_p8(__s0_345, __p1_345); \
+  __ret_345; \
 })
 #else
-#define vdup_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_laneq_p8(__p0_346, __p1_346) __extension__ ({ \
+  poly8x16_t __s0_346 = __p0_346; \
+  poly8x16_t __rev0_346;  __rev0_346 = __builtin_shufflevector(__s0_346, __s0_346, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __ret_346; \
+  __ret_346 = __noswap_splat_laneq_p8(__rev0_346, __p1_346); \
+  __ret_346 = __builtin_shufflevector(__ret_346, __ret_346, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_346; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
+#define vdup_laneq_p64(__p0_347, __p1_347) __extension__ ({ \
+  poly64x2_t __s0_347 = __p0_347; \
+  poly64x1_t __ret_347; \
+  __ret_347 = splat_laneq_p64(__s0_347, __p1_347); \
+  __ret_347; \
 })
 #else
-#define vdup_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x1_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
-  __ret; \
+#define vdup_laneq_p64(__p0_348, __p1_348) __extension__ ({ \
+  poly64x2_t __s0_348 = __p0_348; \
+  poly64x2_t __rev0_348;  __rev0_348 = __builtin_shufflevector(__s0_348, __s0_348, 1, 0); \
+  poly64x1_t __ret_348; \
+  __ret_348 = __noswap_splat_laneq_p64(__rev0_348, __p1_348); \
+  __ret_348; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_laneq_p16(__p0_349, __p1_349) __extension__ ({ \
+  poly16x8_t __s0_349 = __p0_349; \
+  poly16x4_t __ret_349; \
+  __ret_349 = splat_laneq_p16(__s0_349, __p1_349); \
+  __ret_349; \
 })
 #else
-#define vdup_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_laneq_p16(__p0_350, __p1_350) __extension__ ({ \
+  poly16x8_t __s0_350 = __p0_350; \
+  poly16x8_t __rev0_350;  __rev0_350 = __builtin_shufflevector(__s0_350, __s0_350, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x4_t __ret_350; \
+  __ret_350 = __noswap_splat_laneq_p16(__rev0_350, __p1_350); \
+  __ret_350 = __builtin_shufflevector(__ret_350, __ret_350, 3, 2, 1, 0); \
+  __ret_350; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_p8(__p0_351, __p1_351) __extension__ ({ \
+  poly8x16_t __s0_351 = __p0_351; \
+  poly8x16_t __ret_351; \
+  __ret_351 = splatq_laneq_p8(__s0_351, __p1_351); \
+  __ret_351; \
 })
 #else
-#define vdupq_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_laneq_p8(__p0_352, __p1_352) __extension__ ({ \
+  poly8x16_t __s0_352 = __p0_352; \
+  poly8x16_t __rev0_352;  __rev0_352 = __builtin_shufflevector(__s0_352, __s0_352, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __ret_352; \
+  __ret_352 = __noswap_splatq_laneq_p8(__rev0_352, __p1_352); \
+  __ret_352 = __builtin_shufflevector(__ret_352, __ret_352, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_352; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_p64(__p0_353, __p1_353) __extension__ ({ \
+  poly64x2_t __s0_353 = __p0_353; \
+  poly64x2_t __ret_353; \
+  __ret_353 = splatq_laneq_p64(__s0_353, __p1_353); \
+  __ret_353; \
 })
 #else
-#define vdupq_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdupq_laneq_p64(__p0_354, __p1_354) __extension__ ({ \
+  poly64x2_t __s0_354 = __p0_354; \
+  poly64x2_t __rev0_354;  __rev0_354 = __builtin_shufflevector(__s0_354, __s0_354, 1, 0); \
+  poly64x2_t __ret_354; \
+  __ret_354 = __noswap_splatq_laneq_p64(__rev0_354, __p1_354); \
+  __ret_354 = __builtin_shufflevector(__ret_354, __ret_354, 1, 0); \
+  __ret_354; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_p16(__p0_355, __p1_355) __extension__ ({ \
+  poly16x8_t __s0_355 = __p0_355; \
+  poly16x8_t __ret_355; \
+  __ret_355 = splatq_laneq_p16(__s0_355, __p1_355); \
+  __ret_355; \
 })
 #else
-#define vdupq_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_laneq_p16(__p0_356, __p1_356) __extension__ ({ \
+  poly16x8_t __s0_356 = __p0_356; \
+  poly16x8_t __rev0_356;  __rev0_356 = __builtin_shufflevector(__s0_356, __s0_356, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x8_t __ret_356; \
+  __ret_356 = __noswap_splatq_laneq_p16(__rev0_356, __p1_356); \
+  __ret_356 = __builtin_shufflevector(__ret_356, __ret_356, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_356; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_u8(__p0_357, __p1_357) __extension__ ({ \
+  uint8x16_t __s0_357 = __p0_357; \
+  uint8x16_t __ret_357; \
+  __ret_357 = splatq_laneq_u8(__s0_357, __p1_357); \
+  __ret_357; \
 })
 #else
-#define vdupq_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_laneq_u8(__p0_358, __p1_358) __extension__ ({ \
+  uint8x16_t __s0_358 = __p0_358; \
+  uint8x16_t __rev0_358;  __rev0_358 = __builtin_shufflevector(__s0_358, __s0_358, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_358; \
+  __ret_358 = __noswap_splatq_laneq_u8(__rev0_358, __p1_358); \
+  __ret_358 = __builtin_shufflevector(__ret_358, __ret_358, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_358; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_u32(__p0_359, __p1_359) __extension__ ({ \
+  uint32x4_t __s0_359 = __p0_359; \
+  uint32x4_t __ret_359; \
+  __ret_359 = splatq_laneq_u32(__s0_359, __p1_359); \
+  __ret_359; \
 })
 #else
-#define vdupq_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_laneq_u32(__p0_360, __p1_360) __extension__ ({ \
+  uint32x4_t __s0_360 = __p0_360; \
+  uint32x4_t __rev0_360;  __rev0_360 = __builtin_shufflevector(__s0_360, __s0_360, 3, 2, 1, 0); \
+  uint32x4_t __ret_360; \
+  __ret_360 = __noswap_splatq_laneq_u32(__rev0_360, __p1_360); \
+  __ret_360 = __builtin_shufflevector(__ret_360, __ret_360, 3, 2, 1, 0); \
+  __ret_360; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_u64(__p0_361, __p1_361) __extension__ ({ \
+  uint64x2_t __s0_361 = __p0_361; \
+  uint64x2_t __ret_361; \
+  __ret_361 = splatq_laneq_u64(__s0_361, __p1_361); \
+  __ret_361; \
 })
 #else
-#define vdupq_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdupq_laneq_u64(__p0_362, __p1_362) __extension__ ({ \
+  uint64x2_t __s0_362 = __p0_362; \
+  uint64x2_t __rev0_362;  __rev0_362 = __builtin_shufflevector(__s0_362, __s0_362, 1, 0); \
+  uint64x2_t __ret_362; \
+  __ret_362 = __noswap_splatq_laneq_u64(__rev0_362, __p1_362); \
+  __ret_362 = __builtin_shufflevector(__ret_362, __ret_362, 1, 0); \
+  __ret_362; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_u16(__p0_363, __p1_363) __extension__ ({ \
+  uint16x8_t __s0_363 = __p0_363; \
+  uint16x8_t __ret_363; \
+  __ret_363 = splatq_laneq_u16(__s0_363, __p1_363); \
+  __ret_363; \
 })
 #else
-#define vdupq_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_laneq_u16(__p0_364, __p1_364) __extension__ ({ \
+  uint16x8_t __s0_364 = __p0_364; \
+  uint16x8_t __rev0_364;  __rev0_364 = __builtin_shufflevector(__s0_364, __s0_364, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret_364; \
+  __ret_364 = __noswap_splatq_laneq_u16(__rev0_364, __p1_364); \
+  __ret_364 = __builtin_shufflevector(__ret_364, __ret_364, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_364; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_s8(__p0_365, __p1_365) __extension__ ({ \
+  int8x16_t __s0_365 = __p0_365; \
+  int8x16_t __ret_365; \
+  __ret_365 = splatq_laneq_s8(__s0_365, __p1_365); \
+  __ret_365; \
 })
 #else
-#define vdupq_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_laneq_s8(__p0_366, __p1_366) __extension__ ({ \
+  int8x16_t __s0_366 = __p0_366; \
+  int8x16_t __rev0_366;  __rev0_366 = __builtin_shufflevector(__s0_366, __s0_366, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_366; \
+  __ret_366 = __noswap_splatq_laneq_s8(__rev0_366, __p1_366); \
+  __ret_366 = __builtin_shufflevector(__ret_366, __ret_366, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_366; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_f64(__p0_367, __p1_367) __extension__ ({ \
+  float64x2_t __s0_367 = __p0_367; \
+  float64x2_t __ret_367; \
+  __ret_367 = splatq_laneq_f64(__s0_367, __p1_367); \
+  __ret_367; \
 })
 #else
-#define vdupq_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdupq_laneq_f64(__p0_368, __p1_368) __extension__ ({ \
+  float64x2_t __s0_368 = __p0_368; \
+  float64x2_t __rev0_368;  __rev0_368 = __builtin_shufflevector(__s0_368, __s0_368, 1, 0); \
+  float64x2_t __ret_368; \
+  __ret_368 = __noswap_splatq_laneq_f64(__rev0_368, __p1_368); \
+  __ret_368 = __builtin_shufflevector(__ret_368, __ret_368, 1, 0); \
+  __ret_368; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_f32(__p0_369, __p1_369) __extension__ ({ \
+  float32x4_t __s0_369 = __p0_369; \
+  float32x4_t __ret_369; \
+  __ret_369 = splatq_laneq_f32(__s0_369, __p1_369); \
+  __ret_369; \
 })
 #else
-#define vdupq_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_laneq_f32(__p0_370, __p1_370) __extension__ ({ \
+  float32x4_t __s0_370 = __p0_370; \
+  float32x4_t __rev0_370;  __rev0_370 = __builtin_shufflevector(__s0_370, __s0_370, 3, 2, 1, 0); \
+  float32x4_t __ret_370; \
+  __ret_370 = __noswap_splatq_laneq_f32(__rev0_370, __p1_370); \
+  __ret_370 = __builtin_shufflevector(__ret_370, __ret_370, 3, 2, 1, 0); \
+  __ret_370; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_f16(__p0_371, __p1_371) __extension__ ({ \
+  float16x8_t __s0_371 = __p0_371; \
+  float16x8_t __ret_371; \
+  __ret_371 = splatq_laneq_f16(__s0_371, __p1_371); \
+  __ret_371; \
 })
 #else
-#define vdupq_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_laneq_f16(__p0_372, __p1_372) __extension__ ({ \
+  float16x8_t __s0_372 = __p0_372; \
+  float16x8_t __rev0_372;  __rev0_372 = __builtin_shufflevector(__s0_372, __s0_372, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __ret_372; \
+  __ret_372 = __noswap_splatq_laneq_f16(__rev0_372, __p1_372); \
+  __ret_372 = __builtin_shufflevector(__ret_372, __ret_372, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_372; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_s32(__p0_373, __p1_373) __extension__ ({ \
+  int32x4_t __s0_373 = __p0_373; \
+  int32x4_t __ret_373; \
+  __ret_373 = splatq_laneq_s32(__s0_373, __p1_373); \
+  __ret_373; \
 })
 #else
-#define vdupq_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_laneq_s32(__p0_374, __p1_374) __extension__ ({ \
+  int32x4_t __s0_374 = __p0_374; \
+  int32x4_t __rev0_374;  __rev0_374 = __builtin_shufflevector(__s0_374, __s0_374, 3, 2, 1, 0); \
+  int32x4_t __ret_374; \
+  __ret_374 = __noswap_splatq_laneq_s32(__rev0_374, __p1_374); \
+  __ret_374 = __builtin_shufflevector(__ret_374, __ret_374, 3, 2, 1, 0); \
+  __ret_374; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_s64(__p0_375, __p1_375) __extension__ ({ \
+  int64x2_t __s0_375 = __p0_375; \
+  int64x2_t __ret_375; \
+  __ret_375 = splatq_laneq_s64(__s0_375, __p1_375); \
+  __ret_375; \
 })
 #else
-#define vdupq_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdupq_laneq_s64(__p0_376, __p1_376) __extension__ ({ \
+  int64x2_t __s0_376 = __p0_376; \
+  int64x2_t __rev0_376;  __rev0_376 = __builtin_shufflevector(__s0_376, __s0_376, 1, 0); \
+  int64x2_t __ret_376; \
+  __ret_376 = __noswap_splatq_laneq_s64(__rev0_376, __p1_376); \
+  __ret_376 = __builtin_shufflevector(__ret_376, __ret_376, 1, 0); \
+  __ret_376; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_s16(__p0_377, __p1_377) __extension__ ({ \
+  int16x8_t __s0_377 = __p0_377; \
+  int16x8_t __ret_377; \
+  __ret_377 = splatq_laneq_s16(__s0_377, __p1_377); \
+  __ret_377; \
 })
 #else
-#define vdupq_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_laneq_s16(__p0_378, __p1_378) __extension__ ({ \
+  int16x8_t __s0_378 = __p0_378; \
+  int16x8_t __rev0_378;  __rev0_378 = __builtin_shufflevector(__s0_378, __s0_378, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_378; \
+  __ret_378 = __noswap_splatq_laneq_s16(__rev0_378, __p1_378); \
+  __ret_378 = __builtin_shufflevector(__ret_378, __ret_378, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_378; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_laneq_u8(__p0_379, __p1_379) __extension__ ({ \
+  uint8x16_t __s0_379 = __p0_379; \
+  uint8x8_t __ret_379; \
+  __ret_379 = splat_laneq_u8(__s0_379, __p1_379); \
+  __ret_379; \
 })
 #else
-#define vdup_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_laneq_u8(__p0_380, __p1_380) __extension__ ({ \
+  uint8x16_t __s0_380 = __p0_380; \
+  uint8x16_t __rev0_380;  __rev0_380 = __builtin_shufflevector(__s0_380, __s0_380, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __ret_380; \
+  __ret_380 = __noswap_splat_laneq_u8(__rev0_380, __p1_380); \
+  __ret_380 = __builtin_shufflevector(__ret_380, __ret_380, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_380; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdup_laneq_u32(__p0_381, __p1_381) __extension__ ({ \
+  uint32x4_t __s0_381 = __p0_381; \
+  uint32x2_t __ret_381; \
+  __ret_381 = splat_laneq_u32(__s0_381, __p1_381); \
+  __ret_381; \
 })
 #else
-#define vdup_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdup_laneq_u32(__p0_382, __p1_382) __extension__ ({ \
+  uint32x4_t __s0_382 = __p0_382; \
+  uint32x4_t __rev0_382;  __rev0_382 = __builtin_shufflevector(__s0_382, __s0_382, 3, 2, 1, 0); \
+  uint32x2_t __ret_382; \
+  __ret_382 = __noswap_splat_laneq_u32(__rev0_382, __p1_382); \
+  __ret_382 = __builtin_shufflevector(__ret_382, __ret_382, 1, 0); \
+  __ret_382; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
+#define vdup_laneq_u64(__p0_383, __p1_383) __extension__ ({ \
+  uint64x2_t __s0_383 = __p0_383; \
+  uint64x1_t __ret_383; \
+  __ret_383 = splat_laneq_u64(__s0_383, __p1_383); \
+  __ret_383; \
 })
 #else
-#define vdup_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x1_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
-  __ret; \
+#define vdup_laneq_u64(__p0_384, __p1_384) __extension__ ({ \
+  uint64x2_t __s0_384 = __p0_384; \
+  uint64x2_t __rev0_384;  __rev0_384 = __builtin_shufflevector(__s0_384, __s0_384, 1, 0); \
+  uint64x1_t __ret_384; \
+  __ret_384 = __noswap_splat_laneq_u64(__rev0_384, __p1_384); \
+  __ret_384; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_laneq_u16(__p0_385, __p1_385) __extension__ ({ \
+  uint16x8_t __s0_385 = __p0_385; \
+  uint16x4_t __ret_385; \
+  __ret_385 = splat_laneq_u16(__s0_385, __p1_385); \
+  __ret_385; \
 })
 #else
-#define vdup_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_laneq_u16(__p0_386, __p1_386) __extension__ ({ \
+  uint16x8_t __s0_386 = __p0_386; \
+  uint16x8_t __rev0_386;  __rev0_386 = __builtin_shufflevector(__s0_386, __s0_386, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __ret_386; \
+  __ret_386 = __noswap_splat_laneq_u16(__rev0_386, __p1_386); \
+  __ret_386 = __builtin_shufflevector(__ret_386, __ret_386, 3, 2, 1, 0); \
+  __ret_386; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_laneq_s8(__p0_387, __p1_387) __extension__ ({ \
+  int8x16_t __s0_387 = __p0_387; \
+  int8x8_t __ret_387; \
+  __ret_387 = splat_laneq_s8(__s0_387, __p1_387); \
+  __ret_387; \
 })
 #else
-#define vdup_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_laneq_s8(__p0_388, __p1_388) __extension__ ({ \
+  int8x16_t __s0_388 = __p0_388; \
+  int8x16_t __rev0_388;  __rev0_388 = __builtin_shufflevector(__s0_388, __s0_388, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __ret_388; \
+  __ret_388 = __noswap_splat_laneq_s8(__rev0_388, __p1_388); \
+  __ret_388 = __builtin_shufflevector(__ret_388, __ret_388, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_388; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
+#define vdup_laneq_f64(__p0_389, __p1_389) __extension__ ({ \
+  float64x2_t __s0_389 = __p0_389; \
+  float64x1_t __ret_389; \
+  __ret_389 = splat_laneq_f64(__s0_389, __p1_389); \
+  __ret_389; \
 })
 #else
-#define vdup_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x1_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
-  __ret; \
+#define vdup_laneq_f64(__p0_390, __p1_390) __extension__ ({ \
+  float64x2_t __s0_390 = __p0_390; \
+  float64x2_t __rev0_390;  __rev0_390 = __builtin_shufflevector(__s0_390, __s0_390, 1, 0); \
+  float64x1_t __ret_390; \
+  __ret_390 = __noswap_splat_laneq_f64(__rev0_390, __p1_390); \
+  __ret_390; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdup_laneq_f32(__p0_391, __p1_391) __extension__ ({ \
+  float32x4_t __s0_391 = __p0_391; \
+  float32x2_t __ret_391; \
+  __ret_391 = splat_laneq_f32(__s0_391, __p1_391); \
+  __ret_391; \
 })
 #else
-#define vdup_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdup_laneq_f32(__p0_392, __p1_392) __extension__ ({ \
+  float32x4_t __s0_392 = __p0_392; \
+  float32x4_t __rev0_392;  __rev0_392 = __builtin_shufflevector(__s0_392, __s0_392, 3, 2, 1, 0); \
+  float32x2_t __ret_392; \
+  __ret_392 = __noswap_splat_laneq_f32(__rev0_392, __p1_392); \
+  __ret_392 = __builtin_shufflevector(__ret_392, __ret_392, 1, 0); \
+  __ret_392; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_laneq_f16(__p0_393, __p1_393) __extension__ ({ \
+  float16x8_t __s0_393 = __p0_393; \
+  float16x4_t __ret_393; \
+  __ret_393 = splat_laneq_f16(__s0_393, __p1_393); \
+  __ret_393; \
 })
 #else
-#define vdup_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_laneq_f16(__p0_394, __p1_394) __extension__ ({ \
+  float16x8_t __s0_394 = __p0_394; \
+  float16x8_t __rev0_394;  __rev0_394 = __builtin_shufflevector(__s0_394, __s0_394, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __ret_394; \
+  __ret_394 = __noswap_splat_laneq_f16(__rev0_394, __p1_394); \
+  __ret_394 = __builtin_shufflevector(__ret_394, __ret_394, 3, 2, 1, 0); \
+  __ret_394; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdup_laneq_s32(__p0_395, __p1_395) __extension__ ({ \
+  int32x4_t __s0_395 = __p0_395; \
+  int32x2_t __ret_395; \
+  __ret_395 = splat_laneq_s32(__s0_395, __p1_395); \
+  __ret_395; \
 })
 #else
-#define vdup_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdup_laneq_s32(__p0_396, __p1_396) __extension__ ({ \
+  int32x4_t __s0_396 = __p0_396; \
+  int32x4_t __rev0_396;  __rev0_396 = __builtin_shufflevector(__s0_396, __s0_396, 3, 2, 1, 0); \
+  int32x2_t __ret_396; \
+  __ret_396 = __noswap_splat_laneq_s32(__rev0_396, __p1_396); \
+  __ret_396 = __builtin_shufflevector(__ret_396, __ret_396, 1, 0); \
+  __ret_396; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
+#define vdup_laneq_s64(__p0_397, __p1_397) __extension__ ({ \
+  int64x2_t __s0_397 = __p0_397; \
+  int64x1_t __ret_397; \
+  __ret_397 = splat_laneq_s64(__s0_397, __p1_397); \
+  __ret_397; \
 })
 #else
-#define vdup_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x1_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
-  __ret; \
+#define vdup_laneq_s64(__p0_398, __p1_398) __extension__ ({ \
+  int64x2_t __s0_398 = __p0_398; \
+  int64x2_t __rev0_398;  __rev0_398 = __builtin_shufflevector(__s0_398, __s0_398, 1, 0); \
+  int64x1_t __ret_398; \
+  __ret_398 = __noswap_splat_laneq_s64(__rev0_398, __p1_398); \
+  __ret_398; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_laneq_s16(__p0_399, __p1_399) __extension__ ({ \
+  int16x8_t __s0_399 = __p0_399; \
+  int16x4_t __ret_399; \
+  __ret_399 = splat_laneq_s16(__s0_399, __p1_399); \
+  __ret_399; \
 })
 #else
-#define vdup_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_laneq_s16(__p0_400, __p1_400) __extension__ ({ \
+  int16x8_t __s0_400 = __p0_400; \
+  int16x8_t __rev0_400;  __rev0_400 = __builtin_shufflevector(__s0_400, __s0_400, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __ret_400; \
+  __ret_400 = __noswap_splat_laneq_s16(__rev0_400, __p1_400); \
+  __ret_400 = __builtin_shufflevector(__ret_400, __ret_400, 3, 2, 1, 0); \
+  __ret_400; \
 })
 #endif
 
@@ -46487,246 +50287,246 @@
   __ret = vfma_f64(__p0, -__p1, __p2);
   return __ret;
 }
-#define vfmsd_lane_f64(__p0_98, __p1_98, __p2_98, __p3_98) __extension__ ({ \
-  float64_t __s0_98 = __p0_98; \
-  float64_t __s1_98 = __p1_98; \
-  float64x1_t __s2_98 = __p2_98; \
-  float64_t __ret_98; \
-  __ret_98 = vfmad_lane_f64(__s0_98, -__s1_98, __s2_98, __p3_98); \
-  __ret_98; \
+#define vfmsd_lane_f64(__p0_401, __p1_401, __p2_401, __p3_401) __extension__ ({ \
+  float64_t __s0_401 = __p0_401; \
+  float64_t __s1_401 = __p1_401; \
+  float64x1_t __s2_401 = __p2_401; \
+  float64_t __ret_401; \
+  __ret_401 = vfmad_lane_f64(__s0_401, -__s1_401, __s2_401, __p3_401); \
+  __ret_401; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vfmss_lane_f32(__p0_99, __p1_99, __p2_99, __p3_99) __extension__ ({ \
-  float32_t __s0_99 = __p0_99; \
-  float32_t __s1_99 = __p1_99; \
-  float32x2_t __s2_99 = __p2_99; \
-  float32_t __ret_99; \
-  __ret_99 = vfmas_lane_f32(__s0_99, -__s1_99, __s2_99, __p3_99); \
-  __ret_99; \
+#define vfmss_lane_f32(__p0_402, __p1_402, __p2_402, __p3_402) __extension__ ({ \
+  float32_t __s0_402 = __p0_402; \
+  float32_t __s1_402 = __p1_402; \
+  float32x2_t __s2_402 = __p2_402; \
+  float32_t __ret_402; \
+  __ret_402 = vfmas_lane_f32(__s0_402, -__s1_402, __s2_402, __p3_402); \
+  __ret_402; \
 })
 #else
-#define vfmss_lane_f32(__p0_100, __p1_100, __p2_100, __p3_100) __extension__ ({ \
-  float32_t __s0_100 = __p0_100; \
-  float32_t __s1_100 = __p1_100; \
-  float32x2_t __s2_100 = __p2_100; \
-  float32x2_t __rev2_100;  __rev2_100 = __builtin_shufflevector(__s2_100, __s2_100, 1, 0); \
-  float32_t __ret_100; \
-  __ret_100 = __noswap_vfmas_lane_f32(__s0_100, -__s1_100, __rev2_100, __p3_100); \
-  __ret_100; \
+#define vfmss_lane_f32(__p0_403, __p1_403, __p2_403, __p3_403) __extension__ ({ \
+  float32_t __s0_403 = __p0_403; \
+  float32_t __s1_403 = __p1_403; \
+  float32x2_t __s2_403 = __p2_403; \
+  float32x2_t __rev2_403;  __rev2_403 = __builtin_shufflevector(__s2_403, __s2_403, 1, 0); \
+  float32_t __ret_403; \
+  __ret_403 = __noswap_vfmas_lane_f32(__s0_403, -__s1_403, __rev2_403, __p3_403); \
+  __ret_403; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f64(__p0_101, __p1_101, __p2_101, __p3_101) __extension__ ({ \
-  float64x2_t __s0_101 = __p0_101; \
-  float64x2_t __s1_101 = __p1_101; \
-  float64x1_t __s2_101 = __p2_101; \
-  float64x2_t __ret_101; \
-  __ret_101 = vfmaq_lane_f64(__s0_101, -__s1_101, __s2_101, __p3_101); \
-  __ret_101; \
+#define vfmsq_lane_f64(__p0_404, __p1_404, __p2_404, __p3_404) __extension__ ({ \
+  float64x2_t __s0_404 = __p0_404; \
+  float64x2_t __s1_404 = __p1_404; \
+  float64x1_t __s2_404 = __p2_404; \
+  float64x2_t __ret_404; \
+  __ret_404 = vfmaq_lane_f64(__s0_404, -__s1_404, __s2_404, __p3_404); \
+  __ret_404; \
 })
 #else
-#define vfmsq_lane_f64(__p0_102, __p1_102, __p2_102, __p3_102) __extension__ ({ \
-  float64x2_t __s0_102 = __p0_102; \
-  float64x2_t __s1_102 = __p1_102; \
-  float64x1_t __s2_102 = __p2_102; \
-  float64x2_t __rev0_102;  __rev0_102 = __builtin_shufflevector(__s0_102, __s0_102, 1, 0); \
-  float64x2_t __rev1_102;  __rev1_102 = __builtin_shufflevector(__s1_102, __s1_102, 1, 0); \
-  float64x2_t __ret_102; \
-  __ret_102 = __noswap_vfmaq_lane_f64(__rev0_102, -__rev1_102, __s2_102, __p3_102); \
-  __ret_102 = __builtin_shufflevector(__ret_102, __ret_102, 1, 0); \
-  __ret_102; \
+#define vfmsq_lane_f64(__p0_405, __p1_405, __p2_405, __p3_405) __extension__ ({ \
+  float64x2_t __s0_405 = __p0_405; \
+  float64x2_t __s1_405 = __p1_405; \
+  float64x1_t __s2_405 = __p2_405; \
+  float64x2_t __rev0_405;  __rev0_405 = __builtin_shufflevector(__s0_405, __s0_405, 1, 0); \
+  float64x2_t __rev1_405;  __rev1_405 = __builtin_shufflevector(__s1_405, __s1_405, 1, 0); \
+  float64x2_t __ret_405; \
+  __ret_405 = __noswap_vfmaq_lane_f64(__rev0_405, -__rev1_405, __s2_405, __p3_405); \
+  __ret_405 = __builtin_shufflevector(__ret_405, __ret_405, 1, 0); \
+  __ret_405; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f32(__p0_103, __p1_103, __p2_103, __p3_103) __extension__ ({ \
-  float32x4_t __s0_103 = __p0_103; \
-  float32x4_t __s1_103 = __p1_103; \
-  float32x2_t __s2_103 = __p2_103; \
-  float32x4_t __ret_103; \
-  __ret_103 = vfmaq_lane_f32(__s0_103, -__s1_103, __s2_103, __p3_103); \
-  __ret_103; \
+#define vfmsq_lane_f32(__p0_406, __p1_406, __p2_406, __p3_406) __extension__ ({ \
+  float32x4_t __s0_406 = __p0_406; \
+  float32x4_t __s1_406 = __p1_406; \
+  float32x2_t __s2_406 = __p2_406; \
+  float32x4_t __ret_406; \
+  __ret_406 = vfmaq_lane_f32(__s0_406, -__s1_406, __s2_406, __p3_406); \
+  __ret_406; \
 })
 #else
-#define vfmsq_lane_f32(__p0_104, __p1_104, __p2_104, __p3_104) __extension__ ({ \
-  float32x4_t __s0_104 = __p0_104; \
-  float32x4_t __s1_104 = __p1_104; \
-  float32x2_t __s2_104 = __p2_104; \
-  float32x4_t __rev0_104;  __rev0_104 = __builtin_shufflevector(__s0_104, __s0_104, 3, 2, 1, 0); \
-  float32x4_t __rev1_104;  __rev1_104 = __builtin_shufflevector(__s1_104, __s1_104, 3, 2, 1, 0); \
-  float32x2_t __rev2_104;  __rev2_104 = __builtin_shufflevector(__s2_104, __s2_104, 1, 0); \
-  float32x4_t __ret_104; \
-  __ret_104 = __noswap_vfmaq_lane_f32(__rev0_104, -__rev1_104, __rev2_104, __p3_104); \
-  __ret_104 = __builtin_shufflevector(__ret_104, __ret_104, 3, 2, 1, 0); \
-  __ret_104; \
+#define vfmsq_lane_f32(__p0_407, __p1_407, __p2_407, __p3_407) __extension__ ({ \
+  float32x4_t __s0_407 = __p0_407; \
+  float32x4_t __s1_407 = __p1_407; \
+  float32x2_t __s2_407 = __p2_407; \
+  float32x4_t __rev0_407;  __rev0_407 = __builtin_shufflevector(__s0_407, __s0_407, 3, 2, 1, 0); \
+  float32x4_t __rev1_407;  __rev1_407 = __builtin_shufflevector(__s1_407, __s1_407, 3, 2, 1, 0); \
+  float32x2_t __rev2_407;  __rev2_407 = __builtin_shufflevector(__s2_407, __s2_407, 1, 0); \
+  float32x4_t __ret_407; \
+  __ret_407 = __noswap_vfmaq_lane_f32(__rev0_407, -__rev1_407, __rev2_407, __p3_407); \
+  __ret_407 = __builtin_shufflevector(__ret_407, __ret_407, 3, 2, 1, 0); \
+  __ret_407; \
 })
 #endif
 
-#define vfms_lane_f64(__p0_105, __p1_105, __p2_105, __p3_105) __extension__ ({ \
-  float64x1_t __s0_105 = __p0_105; \
-  float64x1_t __s1_105 = __p1_105; \
-  float64x1_t __s2_105 = __p2_105; \
-  float64x1_t __ret_105; \
-  __ret_105 = vfma_lane_f64(__s0_105, -__s1_105, __s2_105, __p3_105); \
-  __ret_105; \
+#define vfms_lane_f64(__p0_408, __p1_408, __p2_408, __p3_408) __extension__ ({ \
+  float64x1_t __s0_408 = __p0_408; \
+  float64x1_t __s1_408 = __p1_408; \
+  float64x1_t __s2_408 = __p2_408; \
+  float64x1_t __ret_408; \
+  __ret_408 = vfma_lane_f64(__s0_408, -__s1_408, __s2_408, __p3_408); \
+  __ret_408; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vfms_lane_f32(__p0_106, __p1_106, __p2_106, __p3_106) __extension__ ({ \
-  float32x2_t __s0_106 = __p0_106; \
-  float32x2_t __s1_106 = __p1_106; \
-  float32x2_t __s2_106 = __p2_106; \
-  float32x2_t __ret_106; \
-  __ret_106 = vfma_lane_f32(__s0_106, -__s1_106, __s2_106, __p3_106); \
-  __ret_106; \
+#define vfms_lane_f32(__p0_409, __p1_409, __p2_409, __p3_409) __extension__ ({ \
+  float32x2_t __s0_409 = __p0_409; \
+  float32x2_t __s1_409 = __p1_409; \
+  float32x2_t __s2_409 = __p2_409; \
+  float32x2_t __ret_409; \
+  __ret_409 = vfma_lane_f32(__s0_409, -__s1_409, __s2_409, __p3_409); \
+  __ret_409; \
 })
 #else
-#define vfms_lane_f32(__p0_107, __p1_107, __p2_107, __p3_107) __extension__ ({ \
-  float32x2_t __s0_107 = __p0_107; \
-  float32x2_t __s1_107 = __p1_107; \
-  float32x2_t __s2_107 = __p2_107; \
-  float32x2_t __rev0_107;  __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 1, 0); \
-  float32x2_t __rev1_107;  __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 1, 0); \
-  float32x2_t __rev2_107;  __rev2_107 = __builtin_shufflevector(__s2_107, __s2_107, 1, 0); \
-  float32x2_t __ret_107; \
-  __ret_107 = __noswap_vfma_lane_f32(__rev0_107, -__rev1_107, __rev2_107, __p3_107); \
-  __ret_107 = __builtin_shufflevector(__ret_107, __ret_107, 1, 0); \
-  __ret_107; \
+#define vfms_lane_f32(__p0_410, __p1_410, __p2_410, __p3_410) __extension__ ({ \
+  float32x2_t __s0_410 = __p0_410; \
+  float32x2_t __s1_410 = __p1_410; \
+  float32x2_t __s2_410 = __p2_410; \
+  float32x2_t __rev0_410;  __rev0_410 = __builtin_shufflevector(__s0_410, __s0_410, 1, 0); \
+  float32x2_t __rev1_410;  __rev1_410 = __builtin_shufflevector(__s1_410, __s1_410, 1, 0); \
+  float32x2_t __rev2_410;  __rev2_410 = __builtin_shufflevector(__s2_410, __s2_410, 1, 0); \
+  float32x2_t __ret_410; \
+  __ret_410 = __noswap_vfma_lane_f32(__rev0_410, -__rev1_410, __rev2_410, __p3_410); \
+  __ret_410 = __builtin_shufflevector(__ret_410, __ret_410, 1, 0); \
+  __ret_410; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmsd_laneq_f64(__p0_108, __p1_108, __p2_108, __p3_108) __extension__ ({ \
-  float64_t __s0_108 = __p0_108; \
-  float64_t __s1_108 = __p1_108; \
-  float64x2_t __s2_108 = __p2_108; \
-  float64_t __ret_108; \
-  __ret_108 = vfmad_laneq_f64(__s0_108, -__s1_108, __s2_108, __p3_108); \
-  __ret_108; \
+#define vfmsd_laneq_f64(__p0_411, __p1_411, __p2_411, __p3_411) __extension__ ({ \
+  float64_t __s0_411 = __p0_411; \
+  float64_t __s1_411 = __p1_411; \
+  float64x2_t __s2_411 = __p2_411; \
+  float64_t __ret_411; \
+  __ret_411 = vfmad_laneq_f64(__s0_411, -__s1_411, __s2_411, __p3_411); \
+  __ret_411; \
 })
 #else
-#define vfmsd_laneq_f64(__p0_109, __p1_109, __p2_109, __p3_109) __extension__ ({ \
-  float64_t __s0_109 = __p0_109; \
-  float64_t __s1_109 = __p1_109; \
-  float64x2_t __s2_109 = __p2_109; \
-  float64x2_t __rev2_109;  __rev2_109 = __builtin_shufflevector(__s2_109, __s2_109, 1, 0); \
-  float64_t __ret_109; \
-  __ret_109 = __noswap_vfmad_laneq_f64(__s0_109, -__s1_109, __rev2_109, __p3_109); \
-  __ret_109; \
+#define vfmsd_laneq_f64(__p0_412, __p1_412, __p2_412, __p3_412) __extension__ ({ \
+  float64_t __s0_412 = __p0_412; \
+  float64_t __s1_412 = __p1_412; \
+  float64x2_t __s2_412 = __p2_412; \
+  float64x2_t __rev2_412;  __rev2_412 = __builtin_shufflevector(__s2_412, __s2_412, 1, 0); \
+  float64_t __ret_412; \
+  __ret_412 = __noswap_vfmad_laneq_f64(__s0_412, -__s1_412, __rev2_412, __p3_412); \
+  __ret_412; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmss_laneq_f32(__p0_110, __p1_110, __p2_110, __p3_110) __extension__ ({ \
-  float32_t __s0_110 = __p0_110; \
-  float32_t __s1_110 = __p1_110; \
-  float32x4_t __s2_110 = __p2_110; \
-  float32_t __ret_110; \
-  __ret_110 = vfmas_laneq_f32(__s0_110, -__s1_110, __s2_110, __p3_110); \
-  __ret_110; \
+#define vfmss_laneq_f32(__p0_413, __p1_413, __p2_413, __p3_413) __extension__ ({ \
+  float32_t __s0_413 = __p0_413; \
+  float32_t __s1_413 = __p1_413; \
+  float32x4_t __s2_413 = __p2_413; \
+  float32_t __ret_413; \
+  __ret_413 = vfmas_laneq_f32(__s0_413, -__s1_413, __s2_413, __p3_413); \
+  __ret_413; \
 })
 #else
-#define vfmss_laneq_f32(__p0_111, __p1_111, __p2_111, __p3_111) __extension__ ({ \
-  float32_t __s0_111 = __p0_111; \
-  float32_t __s1_111 = __p1_111; \
-  float32x4_t __s2_111 = __p2_111; \
-  float32x4_t __rev2_111;  __rev2_111 = __builtin_shufflevector(__s2_111, __s2_111, 3, 2, 1, 0); \
-  float32_t __ret_111; \
-  __ret_111 = __noswap_vfmas_laneq_f32(__s0_111, -__s1_111, __rev2_111, __p3_111); \
-  __ret_111; \
+#define vfmss_laneq_f32(__p0_414, __p1_414, __p2_414, __p3_414) __extension__ ({ \
+  float32_t __s0_414 = __p0_414; \
+  float32_t __s1_414 = __p1_414; \
+  float32x4_t __s2_414 = __p2_414; \
+  float32x4_t __rev2_414;  __rev2_414 = __builtin_shufflevector(__s2_414, __s2_414, 3, 2, 1, 0); \
+  float32_t __ret_414; \
+  __ret_414 = __noswap_vfmas_laneq_f32(__s0_414, -__s1_414, __rev2_414, __p3_414); \
+  __ret_414; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f64(__p0_112, __p1_112, __p2_112, __p3_112) __extension__ ({ \
-  float64x2_t __s0_112 = __p0_112; \
-  float64x2_t __s1_112 = __p1_112; \
-  float64x2_t __s2_112 = __p2_112; \
-  float64x2_t __ret_112; \
-  __ret_112 = vfmaq_laneq_f64(__s0_112, -__s1_112, __s2_112, __p3_112); \
-  __ret_112; \
+#define vfmsq_laneq_f64(__p0_415, __p1_415, __p2_415, __p3_415) __extension__ ({ \
+  float64x2_t __s0_415 = __p0_415; \
+  float64x2_t __s1_415 = __p1_415; \
+  float64x2_t __s2_415 = __p2_415; \
+  float64x2_t __ret_415; \
+  __ret_415 = vfmaq_laneq_f64(__s0_415, -__s1_415, __s2_415, __p3_415); \
+  __ret_415; \
 })
 #else
-#define vfmsq_laneq_f64(__p0_113, __p1_113, __p2_113, __p3_113) __extension__ ({ \
-  float64x2_t __s0_113 = __p0_113; \
-  float64x2_t __s1_113 = __p1_113; \
-  float64x2_t __s2_113 = __p2_113; \
-  float64x2_t __rev0_113;  __rev0_113 = __builtin_shufflevector(__s0_113, __s0_113, 1, 0); \
-  float64x2_t __rev1_113;  __rev1_113 = __builtin_shufflevector(__s1_113, __s1_113, 1, 0); \
-  float64x2_t __rev2_113;  __rev2_113 = __builtin_shufflevector(__s2_113, __s2_113, 1, 0); \
-  float64x2_t __ret_113; \
-  __ret_113 = __noswap_vfmaq_laneq_f64(__rev0_113, -__rev1_113, __rev2_113, __p3_113); \
-  __ret_113 = __builtin_shufflevector(__ret_113, __ret_113, 1, 0); \
-  __ret_113; \
+#define vfmsq_laneq_f64(__p0_416, __p1_416, __p2_416, __p3_416) __extension__ ({ \
+  float64x2_t __s0_416 = __p0_416; \
+  float64x2_t __s1_416 = __p1_416; \
+  float64x2_t __s2_416 = __p2_416; \
+  float64x2_t __rev0_416;  __rev0_416 = __builtin_shufflevector(__s0_416, __s0_416, 1, 0); \
+  float64x2_t __rev1_416;  __rev1_416 = __builtin_shufflevector(__s1_416, __s1_416, 1, 0); \
+  float64x2_t __rev2_416;  __rev2_416 = __builtin_shufflevector(__s2_416, __s2_416, 1, 0); \
+  float64x2_t __ret_416; \
+  __ret_416 = __noswap_vfmaq_laneq_f64(__rev0_416, -__rev1_416, __rev2_416, __p3_416); \
+  __ret_416 = __builtin_shufflevector(__ret_416, __ret_416, 1, 0); \
+  __ret_416; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f32(__p0_114, __p1_114, __p2_114, __p3_114) __extension__ ({ \
-  float32x4_t __s0_114 = __p0_114; \
-  float32x4_t __s1_114 = __p1_114; \
-  float32x4_t __s2_114 = __p2_114; \
-  float32x4_t __ret_114; \
-  __ret_114 = vfmaq_laneq_f32(__s0_114, -__s1_114, __s2_114, __p3_114); \
-  __ret_114; \
+#define vfmsq_laneq_f32(__p0_417, __p1_417, __p2_417, __p3_417) __extension__ ({ \
+  float32x4_t __s0_417 = __p0_417; \
+  float32x4_t __s1_417 = __p1_417; \
+  float32x4_t __s2_417 = __p2_417; \
+  float32x4_t __ret_417; \
+  __ret_417 = vfmaq_laneq_f32(__s0_417, -__s1_417, __s2_417, __p3_417); \
+  __ret_417; \
 })
 #else
-#define vfmsq_laneq_f32(__p0_115, __p1_115, __p2_115, __p3_115) __extension__ ({ \
-  float32x4_t __s0_115 = __p0_115; \
-  float32x4_t __s1_115 = __p1_115; \
-  float32x4_t __s2_115 = __p2_115; \
-  float32x4_t __rev0_115;  __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 3, 2, 1, 0); \
-  float32x4_t __rev1_115;  __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, 3, 2, 1, 0); \
-  float32x4_t __rev2_115;  __rev2_115 = __builtin_shufflevector(__s2_115, __s2_115, 3, 2, 1, 0); \
-  float32x4_t __ret_115; \
-  __ret_115 = __noswap_vfmaq_laneq_f32(__rev0_115, -__rev1_115, __rev2_115, __p3_115); \
-  __ret_115 = __builtin_shufflevector(__ret_115, __ret_115, 3, 2, 1, 0); \
-  __ret_115; \
+#define vfmsq_laneq_f32(__p0_418, __p1_418, __p2_418, __p3_418) __extension__ ({ \
+  float32x4_t __s0_418 = __p0_418; \
+  float32x4_t __s1_418 = __p1_418; \
+  float32x4_t __s2_418 = __p2_418; \
+  float32x4_t __rev0_418;  __rev0_418 = __builtin_shufflevector(__s0_418, __s0_418, 3, 2, 1, 0); \
+  float32x4_t __rev1_418;  __rev1_418 = __builtin_shufflevector(__s1_418, __s1_418, 3, 2, 1, 0); \
+  float32x4_t __rev2_418;  __rev2_418 = __builtin_shufflevector(__s2_418, __s2_418, 3, 2, 1, 0); \
+  float32x4_t __ret_418; \
+  __ret_418 = __noswap_vfmaq_laneq_f32(__rev0_418, -__rev1_418, __rev2_418, __p3_418); \
+  __ret_418 = __builtin_shufflevector(__ret_418, __ret_418, 3, 2, 1, 0); \
+  __ret_418; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f64(__p0_116, __p1_116, __p2_116, __p3_116) __extension__ ({ \
-  float64x1_t __s0_116 = __p0_116; \
-  float64x1_t __s1_116 = __p1_116; \
-  float64x2_t __s2_116 = __p2_116; \
-  float64x1_t __ret_116; \
-  __ret_116 = vfma_laneq_f64(__s0_116, -__s1_116, __s2_116, __p3_116); \
-  __ret_116; \
+#define vfms_laneq_f64(__p0_419, __p1_419, __p2_419, __p3_419) __extension__ ({ \
+  float64x1_t __s0_419 = __p0_419; \
+  float64x1_t __s1_419 = __p1_419; \
+  float64x2_t __s2_419 = __p2_419; \
+  float64x1_t __ret_419; \
+  __ret_419 = vfma_laneq_f64(__s0_419, -__s1_419, __s2_419, __p3_419); \
+  __ret_419; \
 })
 #else
-#define vfms_laneq_f64(__p0_117, __p1_117, __p2_117, __p3_117) __extension__ ({ \
-  float64x1_t __s0_117 = __p0_117; \
-  float64x1_t __s1_117 = __p1_117; \
-  float64x2_t __s2_117 = __p2_117; \
-  float64x2_t __rev2_117;  __rev2_117 = __builtin_shufflevector(__s2_117, __s2_117, 1, 0); \
-  float64x1_t __ret_117; \
-  __ret_117 = __noswap_vfma_laneq_f64(__s0_117, -__s1_117, __rev2_117, __p3_117); \
-  __ret_117; \
+#define vfms_laneq_f64(__p0_420, __p1_420, __p2_420, __p3_420) __extension__ ({ \
+  float64x1_t __s0_420 = __p0_420; \
+  float64x1_t __s1_420 = __p1_420; \
+  float64x2_t __s2_420 = __p2_420; \
+  float64x2_t __rev2_420;  __rev2_420 = __builtin_shufflevector(__s2_420, __s2_420, 1, 0); \
+  float64x1_t __ret_420; \
+  __ret_420 = __noswap_vfma_laneq_f64(__s0_420, -__s1_420, __rev2_420, __p3_420); \
+  __ret_420; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f32(__p0_118, __p1_118, __p2_118, __p3_118) __extension__ ({ \
-  float32x2_t __s0_118 = __p0_118; \
-  float32x2_t __s1_118 = __p1_118; \
-  float32x4_t __s2_118 = __p2_118; \
-  float32x2_t __ret_118; \
-  __ret_118 = vfma_laneq_f32(__s0_118, -__s1_118, __s2_118, __p3_118); \
-  __ret_118; \
+#define vfms_laneq_f32(__p0_421, __p1_421, __p2_421, __p3_421) __extension__ ({ \
+  float32x2_t __s0_421 = __p0_421; \
+  float32x2_t __s1_421 = __p1_421; \
+  float32x4_t __s2_421 = __p2_421; \
+  float32x2_t __ret_421; \
+  __ret_421 = vfma_laneq_f32(__s0_421, -__s1_421, __s2_421, __p3_421); \
+  __ret_421; \
 })
 #else
-#define vfms_laneq_f32(__p0_119, __p1_119, __p2_119, __p3_119) __extension__ ({ \
-  float32x2_t __s0_119 = __p0_119; \
-  float32x2_t __s1_119 = __p1_119; \
-  float32x4_t __s2_119 = __p2_119; \
-  float32x2_t __rev0_119;  __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, 1, 0); \
-  float32x2_t __rev1_119;  __rev1_119 = __builtin_shufflevector(__s1_119, __s1_119, 1, 0); \
-  float32x4_t __rev2_119;  __rev2_119 = __builtin_shufflevector(__s2_119, __s2_119, 3, 2, 1, 0); \
-  float32x2_t __ret_119; \
-  __ret_119 = __noswap_vfma_laneq_f32(__rev0_119, -__rev1_119, __rev2_119, __p3_119); \
-  __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 1, 0); \
-  __ret_119; \
+#define vfms_laneq_f32(__p0_422, __p1_422, __p2_422, __p3_422) __extension__ ({ \
+  float32x2_t __s0_422 = __p0_422; \
+  float32x2_t __s1_422 = __p1_422; \
+  float32x4_t __s2_422 = __p2_422; \
+  float32x2_t __rev0_422;  __rev0_422 = __builtin_shufflevector(__s0_422, __s0_422, 1, 0); \
+  float32x2_t __rev1_422;  __rev1_422 = __builtin_shufflevector(__s1_422, __s1_422, 1, 0); \
+  float32x4_t __rev2_422;  __rev2_422 = __builtin_shufflevector(__s2_422, __s2_422, 3, 2, 1, 0); \
+  float32x2_t __ret_422; \
+  __ret_422 = __noswap_vfma_laneq_f32(__rev0_422, -__rev1_422, __rev2_422, __p3_422); \
+  __ret_422 = __builtin_shufflevector(__ret_422, __ret_422, 1, 0); \
+  __ret_422; \
 })
 #endif
 
@@ -48748,242 +52548,242 @@
   return __ret;
 }
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlaq_laneq_u32(__p0_423, __p1_423, __p2_423, __p3_423) __extension__ ({ \
+  uint32x4_t __s0_423 = __p0_423; \
+  uint32x4_t __s1_423 = __p1_423; \
+  uint32x4_t __s2_423 = __p2_423; \
+  uint32x4_t __ret_423; \
+  __ret_423 = __s0_423 + __s1_423 * splatq_laneq_u32(__s2_423, __p3_423); \
+  __ret_423; \
 })
 #else
-#define vmlaq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlaq_laneq_u32(__p0_424, __p1_424, __p2_424, __p3_424) __extension__ ({ \
+  uint32x4_t __s0_424 = __p0_424; \
+  uint32x4_t __s1_424 = __p1_424; \
+  uint32x4_t __s2_424 = __p2_424; \
+  uint32x4_t __rev0_424;  __rev0_424 = __builtin_shufflevector(__s0_424, __s0_424, 3, 2, 1, 0); \
+  uint32x4_t __rev1_424;  __rev1_424 = __builtin_shufflevector(__s1_424, __s1_424, 3, 2, 1, 0); \
+  uint32x4_t __rev2_424;  __rev2_424 = __builtin_shufflevector(__s2_424, __s2_424, 3, 2, 1, 0); \
+  uint32x4_t __ret_424; \
+  __ret_424 = __rev0_424 + __rev1_424 * __noswap_splatq_laneq_u32(__rev2_424, __p3_424); \
+  __ret_424 = __builtin_shufflevector(__ret_424, __ret_424, 3, 2, 1, 0); \
+  __ret_424; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x8_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlaq_laneq_u16(__p0_425, __p1_425, __p2_425, __p3_425) __extension__ ({ \
+  uint16x8_t __s0_425 = __p0_425; \
+  uint16x8_t __s1_425 = __p1_425; \
+  uint16x8_t __s2_425 = __p2_425; \
+  uint16x8_t __ret_425; \
+  __ret_425 = __s0_425 + __s1_425 * splatq_laneq_u16(__s2_425, __p3_425); \
+  __ret_425; \
 })
 #else
-#define vmlaq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmlaq_laneq_u16(__p0_426, __p1_426, __p2_426, __p3_426) __extension__ ({ \
+  uint16x8_t __s0_426 = __p0_426; \
+  uint16x8_t __s1_426 = __p1_426; \
+  uint16x8_t __s2_426 = __p2_426; \
+  uint16x8_t __rev0_426;  __rev0_426 = __builtin_shufflevector(__s0_426, __s0_426, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_426;  __rev1_426 = __builtin_shufflevector(__s1_426, __s1_426, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_426;  __rev2_426 = __builtin_shufflevector(__s2_426, __s2_426, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret_426; \
+  __ret_426 = __rev0_426 + __rev1_426 * __noswap_splatq_laneq_u16(__rev2_426, __p3_426); \
+  __ret_426 = __builtin_shufflevector(__ret_426, __ret_426, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_426; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlaq_laneq_f32(__p0_427, __p1_427, __p2_427, __p3_427) __extension__ ({ \
+  float32x4_t __s0_427 = __p0_427; \
+  float32x4_t __s1_427 = __p1_427; \
+  float32x4_t __s2_427 = __p2_427; \
+  float32x4_t __ret_427; \
+  __ret_427 = __s0_427 + __s1_427 * splatq_laneq_f32(__s2_427, __p3_427); \
+  __ret_427; \
 })
 #else
-#define vmlaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlaq_laneq_f32(__p0_428, __p1_428, __p2_428, __p3_428) __extension__ ({ \
+  float32x4_t __s0_428 = __p0_428; \
+  float32x4_t __s1_428 = __p1_428; \
+  float32x4_t __s2_428 = __p2_428; \
+  float32x4_t __rev0_428;  __rev0_428 = __builtin_shufflevector(__s0_428, __s0_428, 3, 2, 1, 0); \
+  float32x4_t __rev1_428;  __rev1_428 = __builtin_shufflevector(__s1_428, __s1_428, 3, 2, 1, 0); \
+  float32x4_t __rev2_428;  __rev2_428 = __builtin_shufflevector(__s2_428, __s2_428, 3, 2, 1, 0); \
+  float32x4_t __ret_428; \
+  __ret_428 = __rev0_428 + __rev1_428 * __noswap_splatq_laneq_f32(__rev2_428, __p3_428); \
+  __ret_428 = __builtin_shufflevector(__ret_428, __ret_428, 3, 2, 1, 0); \
+  __ret_428; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlaq_laneq_s32(__p0_429, __p1_429, __p2_429, __p3_429) __extension__ ({ \
+  int32x4_t __s0_429 = __p0_429; \
+  int32x4_t __s1_429 = __p1_429; \
+  int32x4_t __s2_429 = __p2_429; \
+  int32x4_t __ret_429; \
+  __ret_429 = __s0_429 + __s1_429 * splatq_laneq_s32(__s2_429, __p3_429); \
+  __ret_429; \
 })
 #else
-#define vmlaq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlaq_laneq_s32(__p0_430, __p1_430, __p2_430, __p3_430) __extension__ ({ \
+  int32x4_t __s0_430 = __p0_430; \
+  int32x4_t __s1_430 = __p1_430; \
+  int32x4_t __s2_430 = __p2_430; \
+  int32x4_t __rev0_430;  __rev0_430 = __builtin_shufflevector(__s0_430, __s0_430, 3, 2, 1, 0); \
+  int32x4_t __rev1_430;  __rev1_430 = __builtin_shufflevector(__s1_430, __s1_430, 3, 2, 1, 0); \
+  int32x4_t __rev2_430;  __rev2_430 = __builtin_shufflevector(__s2_430, __s2_430, 3, 2, 1, 0); \
+  int32x4_t __ret_430; \
+  __ret_430 = __rev0_430 + __rev1_430 * __noswap_splatq_laneq_s32(__rev2_430, __p3_430); \
+  __ret_430 = __builtin_shufflevector(__ret_430, __ret_430, 3, 2, 1, 0); \
+  __ret_430; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlaq_laneq_s16(__p0_431, __p1_431, __p2_431, __p3_431) __extension__ ({ \
+  int16x8_t __s0_431 = __p0_431; \
+  int16x8_t __s1_431 = __p1_431; \
+  int16x8_t __s2_431 = __p2_431; \
+  int16x8_t __ret_431; \
+  __ret_431 = __s0_431 + __s1_431 * splatq_laneq_s16(__s2_431, __p3_431); \
+  __ret_431; \
 })
 #else
-#define vmlaq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmlaq_laneq_s16(__p0_432, __p1_432, __p2_432, __p3_432) __extension__ ({ \
+  int16x8_t __s0_432 = __p0_432; \
+  int16x8_t __s1_432 = __p1_432; \
+  int16x8_t __s2_432 = __p2_432; \
+  int16x8_t __rev0_432;  __rev0_432 = __builtin_shufflevector(__s0_432, __s0_432, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_432;  __rev1_432 = __builtin_shufflevector(__s1_432, __s1_432, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_432;  __rev2_432 = __builtin_shufflevector(__s2_432, __s2_432, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_432; \
+  __ret_432 = __rev0_432 + __rev1_432 * __noswap_splatq_laneq_s16(__rev2_432, __p3_432); \
+  __ret_432 = __builtin_shufflevector(__ret_432, __ret_432, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_432; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x2_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
+#define vmla_laneq_u32(__p0_433, __p1_433, __p2_433, __p3_433) __extension__ ({ \
+  uint32x2_t __s0_433 = __p0_433; \
+  uint32x2_t __s1_433 = __p1_433; \
+  uint32x4_t __s2_433 = __p2_433; \
+  uint32x2_t __ret_433; \
+  __ret_433 = __s0_433 + __s1_433 * splat_laneq_u32(__s2_433, __p3_433); \
+  __ret_433; \
 })
 #else
-#define vmla_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmla_laneq_u32(__p0_434, __p1_434, __p2_434, __p3_434) __extension__ ({ \
+  uint32x2_t __s0_434 = __p0_434; \
+  uint32x2_t __s1_434 = __p1_434; \
+  uint32x4_t __s2_434 = __p2_434; \
+  uint32x2_t __rev0_434;  __rev0_434 = __builtin_shufflevector(__s0_434, __s0_434, 1, 0); \
+  uint32x2_t __rev1_434;  __rev1_434 = __builtin_shufflevector(__s1_434, __s1_434, 1, 0); \
+  uint32x4_t __rev2_434;  __rev2_434 = __builtin_shufflevector(__s2_434, __s2_434, 3, 2, 1, 0); \
+  uint32x2_t __ret_434; \
+  __ret_434 = __rev0_434 + __rev1_434 * __noswap_splat_laneq_u32(__rev2_434, __p3_434); \
+  __ret_434 = __builtin_shufflevector(__ret_434, __ret_434, 1, 0); \
+  __ret_434; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmla_laneq_u16(__p0_435, __p1_435, __p2_435, __p3_435) __extension__ ({ \
+  uint16x4_t __s0_435 = __p0_435; \
+  uint16x4_t __s1_435 = __p1_435; \
+  uint16x8_t __s2_435 = __p2_435; \
+  uint16x4_t __ret_435; \
+  __ret_435 = __s0_435 + __s1_435 * splat_laneq_u16(__s2_435, __p3_435); \
+  __ret_435; \
 })
 #else
-#define vmla_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmla_laneq_u16(__p0_436, __p1_436, __p2_436, __p3_436) __extension__ ({ \
+  uint16x4_t __s0_436 = __p0_436; \
+  uint16x4_t __s1_436 = __p1_436; \
+  uint16x8_t __s2_436 = __p2_436; \
+  uint16x4_t __rev0_436;  __rev0_436 = __builtin_shufflevector(__s0_436, __s0_436, 3, 2, 1, 0); \
+  uint16x4_t __rev1_436;  __rev1_436 = __builtin_shufflevector(__s1_436, __s1_436, 3, 2, 1, 0); \
+  uint16x8_t __rev2_436;  __rev2_436 = __builtin_shufflevector(__s2_436, __s2_436, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __ret_436; \
+  __ret_436 = __rev0_436 + __rev1_436 * __noswap_splat_laneq_u16(__rev2_436, __p3_436); \
+  __ret_436 = __builtin_shufflevector(__ret_436, __ret_436, 3, 2, 1, 0); \
+  __ret_436; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
+#define vmla_laneq_f32(__p0_437, __p1_437, __p2_437, __p3_437) __extension__ ({ \
+  float32x2_t __s0_437 = __p0_437; \
+  float32x2_t __s1_437 = __p1_437; \
+  float32x4_t __s2_437 = __p2_437; \
+  float32x2_t __ret_437; \
+  __ret_437 = __s0_437 + __s1_437 * splat_laneq_f32(__s2_437, __p3_437); \
+  __ret_437; \
 })
 #else
-#define vmla_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmla_laneq_f32(__p0_438, __p1_438, __p2_438, __p3_438) __extension__ ({ \
+  float32x2_t __s0_438 = __p0_438; \
+  float32x2_t __s1_438 = __p1_438; \
+  float32x4_t __s2_438 = __p2_438; \
+  float32x2_t __rev0_438;  __rev0_438 = __builtin_shufflevector(__s0_438, __s0_438, 1, 0); \
+  float32x2_t __rev1_438;  __rev1_438 = __builtin_shufflevector(__s1_438, __s1_438, 1, 0); \
+  float32x4_t __rev2_438;  __rev2_438 = __builtin_shufflevector(__s2_438, __s2_438, 3, 2, 1, 0); \
+  float32x2_t __ret_438; \
+  __ret_438 = __rev0_438 + __rev1_438 * __noswap_splat_laneq_f32(__rev2_438, __p3_438); \
+  __ret_438 = __builtin_shufflevector(__ret_438, __ret_438, 1, 0); \
+  __ret_438; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
+#define vmla_laneq_s32(__p0_439, __p1_439, __p2_439, __p3_439) __extension__ ({ \
+  int32x2_t __s0_439 = __p0_439; \
+  int32x2_t __s1_439 = __p1_439; \
+  int32x4_t __s2_439 = __p2_439; \
+  int32x2_t __ret_439; \
+  __ret_439 = __s0_439 + __s1_439 * splat_laneq_s32(__s2_439, __p3_439); \
+  __ret_439; \
 })
 #else
-#define vmla_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmla_laneq_s32(__p0_440, __p1_440, __p2_440, __p3_440) __extension__ ({ \
+  int32x2_t __s0_440 = __p0_440; \
+  int32x2_t __s1_440 = __p1_440; \
+  int32x4_t __s2_440 = __p2_440; \
+  int32x2_t __rev0_440;  __rev0_440 = __builtin_shufflevector(__s0_440, __s0_440, 1, 0); \
+  int32x2_t __rev1_440;  __rev1_440 = __builtin_shufflevector(__s1_440, __s1_440, 1, 0); \
+  int32x4_t __rev2_440;  __rev2_440 = __builtin_shufflevector(__s2_440, __s2_440, 3, 2, 1, 0); \
+  int32x2_t __ret_440; \
+  __ret_440 = __rev0_440 + __rev1_440 * __noswap_splat_laneq_s32(__rev2_440, __p3_440); \
+  __ret_440 = __builtin_shufflevector(__ret_440, __ret_440, 1, 0); \
+  __ret_440; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmla_laneq_s16(__p0_441, __p1_441, __p2_441, __p3_441) __extension__ ({ \
+  int16x4_t __s0_441 = __p0_441; \
+  int16x4_t __s1_441 = __p1_441; \
+  int16x8_t __s2_441 = __p2_441; \
+  int16x4_t __ret_441; \
+  __ret_441 = __s0_441 + __s1_441 * splat_laneq_s16(__s2_441, __p3_441); \
+  __ret_441; \
 })
 #else
-#define vmla_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmla_laneq_s16(__p0_442, __p1_442, __p2_442, __p3_442) __extension__ ({ \
+  int16x4_t __s0_442 = __p0_442; \
+  int16x4_t __s1_442 = __p1_442; \
+  int16x8_t __s2_442 = __p2_442; \
+  int16x4_t __rev0_442;  __rev0_442 = __builtin_shufflevector(__s0_442, __s0_442, 3, 2, 1, 0); \
+  int16x4_t __rev1_442;  __rev1_442 = __builtin_shufflevector(__s1_442, __s1_442, 3, 2, 1, 0); \
+  int16x8_t __rev2_442;  __rev2_442 = __builtin_shufflevector(__s2_442, __s2_442, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __ret_442; \
+  __ret_442 = __rev0_442 + __rev1_442 * __noswap_splat_laneq_s16(__rev2_442, __p3_442); \
+  __ret_442 = __builtin_shufflevector(__ret_442, __ret_442, 3, 2, 1, 0); \
+  __ret_442; \
 })
 #endif
 
@@ -49005,290 +52805,290 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 + vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlal_high_lane_u32(__p0_443, __p1_443, __p2_443, __p3_443) __extension__ ({ \
+  uint64x2_t __s0_443 = __p0_443; \
+  uint32x4_t __s1_443 = __p1_443; \
+  uint32x2_t __s2_443 = __p2_443; \
+  uint64x2_t __ret_443; \
+  __ret_443 = __s0_443 + vmull_u32(vget_high_u32(__s1_443), splat_lane_u32(__s2_443, __p3_443)); \
+  __ret_443; \
 })
 #else
-#define vmlal_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlal_high_lane_u32(__p0_444, __p1_444, __p2_444, __p3_444) __extension__ ({ \
+  uint64x2_t __s0_444 = __p0_444; \
+  uint32x4_t __s1_444 = __p1_444; \
+  uint32x2_t __s2_444 = __p2_444; \
+  uint64x2_t __rev0_444;  __rev0_444 = __builtin_shufflevector(__s0_444, __s0_444, 1, 0); \
+  uint32x4_t __rev1_444;  __rev1_444 = __builtin_shufflevector(__s1_444, __s1_444, 3, 2, 1, 0); \
+  uint32x2_t __rev2_444;  __rev2_444 = __builtin_shufflevector(__s2_444, __s2_444, 1, 0); \
+  uint64x2_t __ret_444; \
+  __ret_444 = __rev0_444 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_444), __noswap_splat_lane_u32(__rev2_444, __p3_444)); \
+  __ret_444 = __builtin_shufflevector(__ret_444, __ret_444, 1, 0); \
+  __ret_444; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 + vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlal_high_lane_u16(__p0_445, __p1_445, __p2_445, __p3_445) __extension__ ({ \
+  uint32x4_t __s0_445 = __p0_445; \
+  uint16x8_t __s1_445 = __p1_445; \
+  uint16x4_t __s2_445 = __p2_445; \
+  uint32x4_t __ret_445; \
+  __ret_445 = __s0_445 + vmull_u16(vget_high_u16(__s1_445), splat_lane_u16(__s2_445, __p3_445)); \
+  __ret_445; \
 })
 #else
-#define vmlal_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlal_high_lane_u16(__p0_446, __p1_446, __p2_446, __p3_446) __extension__ ({ \
+  uint32x4_t __s0_446 = __p0_446; \
+  uint16x8_t __s1_446 = __p1_446; \
+  uint16x4_t __s2_446 = __p2_446; \
+  uint32x4_t __rev0_446;  __rev0_446 = __builtin_shufflevector(__s0_446, __s0_446, 3, 2, 1, 0); \
+  uint16x8_t __rev1_446;  __rev1_446 = __builtin_shufflevector(__s1_446, __s1_446, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_446;  __rev2_446 = __builtin_shufflevector(__s2_446, __s2_446, 3, 2, 1, 0); \
+  uint32x4_t __ret_446; \
+  __ret_446 = __rev0_446 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_446), __noswap_splat_lane_u16(__rev2_446, __p3_446)); \
+  __ret_446 = __builtin_shufflevector(__ret_446, __ret_446, 3, 2, 1, 0); \
+  __ret_446; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 + vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlal_high_lane_s32(__p0_447, __p1_447, __p2_447, __p3_447) __extension__ ({ \
+  int64x2_t __s0_447 = __p0_447; \
+  int32x4_t __s1_447 = __p1_447; \
+  int32x2_t __s2_447 = __p2_447; \
+  int64x2_t __ret_447; \
+  __ret_447 = __s0_447 + vmull_s32(vget_high_s32(__s1_447), splat_lane_s32(__s2_447, __p3_447)); \
+  __ret_447; \
 })
 #else
-#define vmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlal_high_lane_s32(__p0_448, __p1_448, __p2_448, __p3_448) __extension__ ({ \
+  int64x2_t __s0_448 = __p0_448; \
+  int32x4_t __s1_448 = __p1_448; \
+  int32x2_t __s2_448 = __p2_448; \
+  int64x2_t __rev0_448;  __rev0_448 = __builtin_shufflevector(__s0_448, __s0_448, 1, 0); \
+  int32x4_t __rev1_448;  __rev1_448 = __builtin_shufflevector(__s1_448, __s1_448, 3, 2, 1, 0); \
+  int32x2_t __rev2_448;  __rev2_448 = __builtin_shufflevector(__s2_448, __s2_448, 1, 0); \
+  int64x2_t __ret_448; \
+  __ret_448 = __rev0_448 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_448), __noswap_splat_lane_s32(__rev2_448, __p3_448)); \
+  __ret_448 = __builtin_shufflevector(__ret_448, __ret_448, 1, 0); \
+  __ret_448; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 + vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlal_high_lane_s16(__p0_449, __p1_449, __p2_449, __p3_449) __extension__ ({ \
+  int32x4_t __s0_449 = __p0_449; \
+  int16x8_t __s1_449 = __p1_449; \
+  int16x4_t __s2_449 = __p2_449; \
+  int32x4_t __ret_449; \
+  __ret_449 = __s0_449 + vmull_s16(vget_high_s16(__s1_449), splat_lane_s16(__s2_449, __p3_449)); \
+  __ret_449; \
 })
 #else
-#define vmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlal_high_lane_s16(__p0_450, __p1_450, __p2_450, __p3_450) __extension__ ({ \
+  int32x4_t __s0_450 = __p0_450; \
+  int16x8_t __s1_450 = __p1_450; \
+  int16x4_t __s2_450 = __p2_450; \
+  int32x4_t __rev0_450;  __rev0_450 = __builtin_shufflevector(__s0_450, __s0_450, 3, 2, 1, 0); \
+  int16x8_t __rev1_450;  __rev1_450 = __builtin_shufflevector(__s1_450, __s1_450, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_450;  __rev2_450 = __builtin_shufflevector(__s2_450, __s2_450, 3, 2, 1, 0); \
+  int32x4_t __ret_450; \
+  __ret_450 = __rev0_450 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_450), __noswap_splat_lane_s16(__rev2_450, __p3_450)); \
+  __ret_450 = __builtin_shufflevector(__ret_450, __ret_450, 3, 2, 1, 0); \
+  __ret_450; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 + vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlal_high_laneq_u32(__p0_451, __p1_451, __p2_451, __p3_451) __extension__ ({ \
+  uint64x2_t __s0_451 = __p0_451; \
+  uint32x4_t __s1_451 = __p1_451; \
+  uint32x4_t __s2_451 = __p2_451; \
+  uint64x2_t __ret_451; \
+  __ret_451 = __s0_451 + vmull_u32(vget_high_u32(__s1_451), splat_laneq_u32(__s2_451, __p3_451)); \
+  __ret_451; \
 })
 #else
-#define vmlal_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlal_high_laneq_u32(__p0_452, __p1_452, __p2_452, __p3_452) __extension__ ({ \
+  uint64x2_t __s0_452 = __p0_452; \
+  uint32x4_t __s1_452 = __p1_452; \
+  uint32x4_t __s2_452 = __p2_452; \
+  uint64x2_t __rev0_452;  __rev0_452 = __builtin_shufflevector(__s0_452, __s0_452, 1, 0); \
+  uint32x4_t __rev1_452;  __rev1_452 = __builtin_shufflevector(__s1_452, __s1_452, 3, 2, 1, 0); \
+  uint32x4_t __rev2_452;  __rev2_452 = __builtin_shufflevector(__s2_452, __s2_452, 3, 2, 1, 0); \
+  uint64x2_t __ret_452; \
+  __ret_452 = __rev0_452 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_452), __noswap_splat_laneq_u32(__rev2_452, __p3_452)); \
+  __ret_452 = __builtin_shufflevector(__ret_452, __ret_452, 1, 0); \
+  __ret_452; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 + vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlal_high_laneq_u16(__p0_453, __p1_453, __p2_453, __p3_453) __extension__ ({ \
+  uint32x4_t __s0_453 = __p0_453; \
+  uint16x8_t __s1_453 = __p1_453; \
+  uint16x8_t __s2_453 = __p2_453; \
+  uint32x4_t __ret_453; \
+  __ret_453 = __s0_453 + vmull_u16(vget_high_u16(__s1_453), splat_laneq_u16(__s2_453, __p3_453)); \
+  __ret_453; \
 })
 #else
-#define vmlal_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlal_high_laneq_u16(__p0_454, __p1_454, __p2_454, __p3_454) __extension__ ({ \
+  uint32x4_t __s0_454 = __p0_454; \
+  uint16x8_t __s1_454 = __p1_454; \
+  uint16x8_t __s2_454 = __p2_454; \
+  uint32x4_t __rev0_454;  __rev0_454 = __builtin_shufflevector(__s0_454, __s0_454, 3, 2, 1, 0); \
+  uint16x8_t __rev1_454;  __rev1_454 = __builtin_shufflevector(__s1_454, __s1_454, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_454;  __rev2_454 = __builtin_shufflevector(__s2_454, __s2_454, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_454; \
+  __ret_454 = __rev0_454 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_454), __noswap_splat_laneq_u16(__rev2_454, __p3_454)); \
+  __ret_454 = __builtin_shufflevector(__ret_454, __ret_454, 3, 2, 1, 0); \
+  __ret_454; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 + vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlal_high_laneq_s32(__p0_455, __p1_455, __p2_455, __p3_455) __extension__ ({ \
+  int64x2_t __s0_455 = __p0_455; \
+  int32x4_t __s1_455 = __p1_455; \
+  int32x4_t __s2_455 = __p2_455; \
+  int64x2_t __ret_455; \
+  __ret_455 = __s0_455 + vmull_s32(vget_high_s32(__s1_455), splat_laneq_s32(__s2_455, __p3_455)); \
+  __ret_455; \
 })
 #else
-#define vmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlal_high_laneq_s32(__p0_456, __p1_456, __p2_456, __p3_456) __extension__ ({ \
+  int64x2_t __s0_456 = __p0_456; \
+  int32x4_t __s1_456 = __p1_456; \
+  int32x4_t __s2_456 = __p2_456; \
+  int64x2_t __rev0_456;  __rev0_456 = __builtin_shufflevector(__s0_456, __s0_456, 1, 0); \
+  int32x4_t __rev1_456;  __rev1_456 = __builtin_shufflevector(__s1_456, __s1_456, 3, 2, 1, 0); \
+  int32x4_t __rev2_456;  __rev2_456 = __builtin_shufflevector(__s2_456, __s2_456, 3, 2, 1, 0); \
+  int64x2_t __ret_456; \
+  __ret_456 = __rev0_456 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_456), __noswap_splat_laneq_s32(__rev2_456, __p3_456)); \
+  __ret_456 = __builtin_shufflevector(__ret_456, __ret_456, 1, 0); \
+  __ret_456; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 + vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlal_high_laneq_s16(__p0_457, __p1_457, __p2_457, __p3_457) __extension__ ({ \
+  int32x4_t __s0_457 = __p0_457; \
+  int16x8_t __s1_457 = __p1_457; \
+  int16x8_t __s2_457 = __p2_457; \
+  int32x4_t __ret_457; \
+  __ret_457 = __s0_457 + vmull_s16(vget_high_s16(__s1_457), splat_laneq_s16(__s2_457, __p3_457)); \
+  __ret_457; \
 })
 #else
-#define vmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlal_high_laneq_s16(__p0_458, __p1_458, __p2_458, __p3_458) __extension__ ({ \
+  int32x4_t __s0_458 = __p0_458; \
+  int16x8_t __s1_458 = __p1_458; \
+  int16x8_t __s2_458 = __p2_458; \
+  int32x4_t __rev0_458;  __rev0_458 = __builtin_shufflevector(__s0_458, __s0_458, 3, 2, 1, 0); \
+  int16x8_t __rev1_458;  __rev1_458 = __builtin_shufflevector(__s1_458, __s1_458, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_458;  __rev2_458 = __builtin_shufflevector(__s2_458, __s2_458, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_458; \
+  __ret_458 = __rev0_458 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_458), __noswap_splat_laneq_s16(__rev2_458, __p3_458)); \
+  __ret_458 = __builtin_shufflevector(__ret_458, __ret_458, 3, 2, 1, 0); \
+  __ret_458; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 + vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlal_laneq_u32(__p0_459, __p1_459, __p2_459, __p3_459) __extension__ ({ \
+  uint64x2_t __s0_459 = __p0_459; \
+  uint32x2_t __s1_459 = __p1_459; \
+  uint32x4_t __s2_459 = __p2_459; \
+  uint64x2_t __ret_459; \
+  __ret_459 = __s0_459 + vmull_u32(__s1_459, splat_laneq_u32(__s2_459, __p3_459)); \
+  __ret_459; \
 })
 #else
-#define vmlal_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlal_laneq_u32(__p0_460, __p1_460, __p2_460, __p3_460) __extension__ ({ \
+  uint64x2_t __s0_460 = __p0_460; \
+  uint32x2_t __s1_460 = __p1_460; \
+  uint32x4_t __s2_460 = __p2_460; \
+  uint64x2_t __rev0_460;  __rev0_460 = __builtin_shufflevector(__s0_460, __s0_460, 1, 0); \
+  uint32x2_t __rev1_460;  __rev1_460 = __builtin_shufflevector(__s1_460, __s1_460, 1, 0); \
+  uint32x4_t __rev2_460;  __rev2_460 = __builtin_shufflevector(__s2_460, __s2_460, 3, 2, 1, 0); \
+  uint64x2_t __ret_460; \
+  __ret_460 = __rev0_460 + __noswap_vmull_u32(__rev1_460, __noswap_splat_laneq_u32(__rev2_460, __p3_460)); \
+  __ret_460 = __builtin_shufflevector(__ret_460, __ret_460, 1, 0); \
+  __ret_460; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 + vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlal_laneq_u16(__p0_461, __p1_461, __p2_461, __p3_461) __extension__ ({ \
+  uint32x4_t __s0_461 = __p0_461; \
+  uint16x4_t __s1_461 = __p1_461; \
+  uint16x8_t __s2_461 = __p2_461; \
+  uint32x4_t __ret_461; \
+  __ret_461 = __s0_461 + vmull_u16(__s1_461, splat_laneq_u16(__s2_461, __p3_461)); \
+  __ret_461; \
 })
 #else
-#define vmlal_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlal_laneq_u16(__p0_462, __p1_462, __p2_462, __p3_462) __extension__ ({ \
+  uint32x4_t __s0_462 = __p0_462; \
+  uint16x4_t __s1_462 = __p1_462; \
+  uint16x8_t __s2_462 = __p2_462; \
+  uint32x4_t __rev0_462;  __rev0_462 = __builtin_shufflevector(__s0_462, __s0_462, 3, 2, 1, 0); \
+  uint16x4_t __rev1_462;  __rev1_462 = __builtin_shufflevector(__s1_462, __s1_462, 3, 2, 1, 0); \
+  uint16x8_t __rev2_462;  __rev2_462 = __builtin_shufflevector(__s2_462, __s2_462, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_462; \
+  __ret_462 = __rev0_462 + __noswap_vmull_u16(__rev1_462, __noswap_splat_laneq_u16(__rev2_462, __p3_462)); \
+  __ret_462 = __builtin_shufflevector(__ret_462, __ret_462, 3, 2, 1, 0); \
+  __ret_462; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 + vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlal_laneq_s32(__p0_463, __p1_463, __p2_463, __p3_463) __extension__ ({ \
+  int64x2_t __s0_463 = __p0_463; \
+  int32x2_t __s1_463 = __p1_463; \
+  int32x4_t __s2_463 = __p2_463; \
+  int64x2_t __ret_463; \
+  __ret_463 = __s0_463 + vmull_s32(__s1_463, splat_laneq_s32(__s2_463, __p3_463)); \
+  __ret_463; \
 })
 #else
-#define vmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlal_laneq_s32(__p0_464, __p1_464, __p2_464, __p3_464) __extension__ ({ \
+  int64x2_t __s0_464 = __p0_464; \
+  int32x2_t __s1_464 = __p1_464; \
+  int32x4_t __s2_464 = __p2_464; \
+  int64x2_t __rev0_464;  __rev0_464 = __builtin_shufflevector(__s0_464, __s0_464, 1, 0); \
+  int32x2_t __rev1_464;  __rev1_464 = __builtin_shufflevector(__s1_464, __s1_464, 1, 0); \
+  int32x4_t __rev2_464;  __rev2_464 = __builtin_shufflevector(__s2_464, __s2_464, 3, 2, 1, 0); \
+  int64x2_t __ret_464; \
+  __ret_464 = __rev0_464 + __noswap_vmull_s32(__rev1_464, __noswap_splat_laneq_s32(__rev2_464, __p3_464)); \
+  __ret_464 = __builtin_shufflevector(__ret_464, __ret_464, 1, 0); \
+  __ret_464; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 + vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlal_laneq_s16(__p0_465, __p1_465, __p2_465, __p3_465) __extension__ ({ \
+  int32x4_t __s0_465 = __p0_465; \
+  int16x4_t __s1_465 = __p1_465; \
+  int16x8_t __s2_465 = __p2_465; \
+  int32x4_t __ret_465; \
+  __ret_465 = __s0_465 + vmull_s16(__s1_465, splat_laneq_s16(__s2_465, __p3_465)); \
+  __ret_465; \
 })
 #else
-#define vmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlal_laneq_s16(__p0_466, __p1_466, __p2_466, __p3_466) __extension__ ({ \
+  int32x4_t __s0_466 = __p0_466; \
+  int16x4_t __s1_466 = __p1_466; \
+  int16x8_t __s2_466 = __p2_466; \
+  int32x4_t __rev0_466;  __rev0_466 = __builtin_shufflevector(__s0_466, __s0_466, 3, 2, 1, 0); \
+  int16x4_t __rev1_466;  __rev1_466 = __builtin_shufflevector(__s1_466, __s1_466, 3, 2, 1, 0); \
+  int16x8_t __rev2_466;  __rev2_466 = __builtin_shufflevector(__s2_466, __s2_466, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_466; \
+  __ret_466 = __rev0_466 + __noswap_vmull_s16(__rev1_466, __noswap_splat_laneq_s16(__rev2_466, __p3_466)); \
+  __ret_466 = __builtin_shufflevector(__ret_466, __ret_466, 3, 2, 1, 0); \
+  __ret_466; \
 })
 #endif
 
@@ -49316,242 +53116,242 @@
   return __ret;
 }
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlsq_laneq_u32(__p0_467, __p1_467, __p2_467, __p3_467) __extension__ ({ \
+  uint32x4_t __s0_467 = __p0_467; \
+  uint32x4_t __s1_467 = __p1_467; \
+  uint32x4_t __s2_467 = __p2_467; \
+  uint32x4_t __ret_467; \
+  __ret_467 = __s0_467 - __s1_467 * splatq_laneq_u32(__s2_467, __p3_467); \
+  __ret_467; \
 })
 #else
-#define vmlsq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsq_laneq_u32(__p0_468, __p1_468, __p2_468, __p3_468) __extension__ ({ \
+  uint32x4_t __s0_468 = __p0_468; \
+  uint32x4_t __s1_468 = __p1_468; \
+  uint32x4_t __s2_468 = __p2_468; \
+  uint32x4_t __rev0_468;  __rev0_468 = __builtin_shufflevector(__s0_468, __s0_468, 3, 2, 1, 0); \
+  uint32x4_t __rev1_468;  __rev1_468 = __builtin_shufflevector(__s1_468, __s1_468, 3, 2, 1, 0); \
+  uint32x4_t __rev2_468;  __rev2_468 = __builtin_shufflevector(__s2_468, __s2_468, 3, 2, 1, 0); \
+  uint32x4_t __ret_468; \
+  __ret_468 = __rev0_468 - __rev1_468 * __noswap_splatq_laneq_u32(__rev2_468, __p3_468); \
+  __ret_468 = __builtin_shufflevector(__ret_468, __ret_468, 3, 2, 1, 0); \
+  __ret_468; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x8_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlsq_laneq_u16(__p0_469, __p1_469, __p2_469, __p3_469) __extension__ ({ \
+  uint16x8_t __s0_469 = __p0_469; \
+  uint16x8_t __s1_469 = __p1_469; \
+  uint16x8_t __s2_469 = __p2_469; \
+  uint16x8_t __ret_469; \
+  __ret_469 = __s0_469 - __s1_469 * splatq_laneq_u16(__s2_469, __p3_469); \
+  __ret_469; \
 })
 #else
-#define vmlsq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsq_laneq_u16(__p0_470, __p1_470, __p2_470, __p3_470) __extension__ ({ \
+  uint16x8_t __s0_470 = __p0_470; \
+  uint16x8_t __s1_470 = __p1_470; \
+  uint16x8_t __s2_470 = __p2_470; \
+  uint16x8_t __rev0_470;  __rev0_470 = __builtin_shufflevector(__s0_470, __s0_470, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_470;  __rev1_470 = __builtin_shufflevector(__s1_470, __s1_470, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_470;  __rev2_470 = __builtin_shufflevector(__s2_470, __s2_470, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret_470; \
+  __ret_470 = __rev0_470 - __rev1_470 * __noswap_splatq_laneq_u16(__rev2_470, __p3_470); \
+  __ret_470 = __builtin_shufflevector(__ret_470, __ret_470, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_470; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlsq_laneq_f32(__p0_471, __p1_471, __p2_471, __p3_471) __extension__ ({ \
+  float32x4_t __s0_471 = __p0_471; \
+  float32x4_t __s1_471 = __p1_471; \
+  float32x4_t __s2_471 = __p2_471; \
+  float32x4_t __ret_471; \
+  __ret_471 = __s0_471 - __s1_471 * splatq_laneq_f32(__s2_471, __p3_471); \
+  __ret_471; \
 })
 #else
-#define vmlsq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsq_laneq_f32(__p0_472, __p1_472, __p2_472, __p3_472) __extension__ ({ \
+  float32x4_t __s0_472 = __p0_472; \
+  float32x4_t __s1_472 = __p1_472; \
+  float32x4_t __s2_472 = __p2_472; \
+  float32x4_t __rev0_472;  __rev0_472 = __builtin_shufflevector(__s0_472, __s0_472, 3, 2, 1, 0); \
+  float32x4_t __rev1_472;  __rev1_472 = __builtin_shufflevector(__s1_472, __s1_472, 3, 2, 1, 0); \
+  float32x4_t __rev2_472;  __rev2_472 = __builtin_shufflevector(__s2_472, __s2_472, 3, 2, 1, 0); \
+  float32x4_t __ret_472; \
+  __ret_472 = __rev0_472 - __rev1_472 * __noswap_splatq_laneq_f32(__rev2_472, __p3_472); \
+  __ret_472 = __builtin_shufflevector(__ret_472, __ret_472, 3, 2, 1, 0); \
+  __ret_472; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlsq_laneq_s32(__p0_473, __p1_473, __p2_473, __p3_473) __extension__ ({ \
+  int32x4_t __s0_473 = __p0_473; \
+  int32x4_t __s1_473 = __p1_473; \
+  int32x4_t __s2_473 = __p2_473; \
+  int32x4_t __ret_473; \
+  __ret_473 = __s0_473 - __s1_473 * splatq_laneq_s32(__s2_473, __p3_473); \
+  __ret_473; \
 })
 #else
-#define vmlsq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsq_laneq_s32(__p0_474, __p1_474, __p2_474, __p3_474) __extension__ ({ \
+  int32x4_t __s0_474 = __p0_474; \
+  int32x4_t __s1_474 = __p1_474; \
+  int32x4_t __s2_474 = __p2_474; \
+  int32x4_t __rev0_474;  __rev0_474 = __builtin_shufflevector(__s0_474, __s0_474, 3, 2, 1, 0); \
+  int32x4_t __rev1_474;  __rev1_474 = __builtin_shufflevector(__s1_474, __s1_474, 3, 2, 1, 0); \
+  int32x4_t __rev2_474;  __rev2_474 = __builtin_shufflevector(__s2_474, __s2_474, 3, 2, 1, 0); \
+  int32x4_t __ret_474; \
+  __ret_474 = __rev0_474 - __rev1_474 * __noswap_splatq_laneq_s32(__rev2_474, __p3_474); \
+  __ret_474 = __builtin_shufflevector(__ret_474, __ret_474, 3, 2, 1, 0); \
+  __ret_474; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlsq_laneq_s16(__p0_475, __p1_475, __p2_475, __p3_475) __extension__ ({ \
+  int16x8_t __s0_475 = __p0_475; \
+  int16x8_t __s1_475 = __p1_475; \
+  int16x8_t __s2_475 = __p2_475; \
+  int16x8_t __ret_475; \
+  __ret_475 = __s0_475 - __s1_475 * splatq_laneq_s16(__s2_475, __p3_475); \
+  __ret_475; \
 })
 #else
-#define vmlsq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsq_laneq_s16(__p0_476, __p1_476, __p2_476, __p3_476) __extension__ ({ \
+  int16x8_t __s0_476 = __p0_476; \
+  int16x8_t __s1_476 = __p1_476; \
+  int16x8_t __s2_476 = __p2_476; \
+  int16x8_t __rev0_476;  __rev0_476 = __builtin_shufflevector(__s0_476, __s0_476, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_476;  __rev1_476 = __builtin_shufflevector(__s1_476, __s1_476, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_476;  __rev2_476 = __builtin_shufflevector(__s2_476, __s2_476, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_476; \
+  __ret_476 = __rev0_476 - __rev1_476 * __noswap_splatq_laneq_s16(__rev2_476, __p3_476); \
+  __ret_476 = __builtin_shufflevector(__ret_476, __ret_476, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_476; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x2_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
+#define vmls_laneq_u32(__p0_477, __p1_477, __p2_477, __p3_477) __extension__ ({ \
+  uint32x2_t __s0_477 = __p0_477; \
+  uint32x2_t __s1_477 = __p1_477; \
+  uint32x4_t __s2_477 = __p2_477; \
+  uint32x2_t __ret_477; \
+  __ret_477 = __s0_477 - __s1_477 * splat_laneq_u32(__s2_477, __p3_477); \
+  __ret_477; \
 })
 #else
-#define vmls_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmls_laneq_u32(__p0_478, __p1_478, __p2_478, __p3_478) __extension__ ({ \
+  uint32x2_t __s0_478 = __p0_478; \
+  uint32x2_t __s1_478 = __p1_478; \
+  uint32x4_t __s2_478 = __p2_478; \
+  uint32x2_t __rev0_478;  __rev0_478 = __builtin_shufflevector(__s0_478, __s0_478, 1, 0); \
+  uint32x2_t __rev1_478;  __rev1_478 = __builtin_shufflevector(__s1_478, __s1_478, 1, 0); \
+  uint32x4_t __rev2_478;  __rev2_478 = __builtin_shufflevector(__s2_478, __s2_478, 3, 2, 1, 0); \
+  uint32x2_t __ret_478; \
+  __ret_478 = __rev0_478 - __rev1_478 * __noswap_splat_laneq_u32(__rev2_478, __p3_478); \
+  __ret_478 = __builtin_shufflevector(__ret_478, __ret_478, 1, 0); \
+  __ret_478; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmls_laneq_u16(__p0_479, __p1_479, __p2_479, __p3_479) __extension__ ({ \
+  uint16x4_t __s0_479 = __p0_479; \
+  uint16x4_t __s1_479 = __p1_479; \
+  uint16x8_t __s2_479 = __p2_479; \
+  uint16x4_t __ret_479; \
+  __ret_479 = __s0_479 - __s1_479 * splat_laneq_u16(__s2_479, __p3_479); \
+  __ret_479; \
 })
 #else
-#define vmls_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmls_laneq_u16(__p0_480, __p1_480, __p2_480, __p3_480) __extension__ ({ \
+  uint16x4_t __s0_480 = __p0_480; \
+  uint16x4_t __s1_480 = __p1_480; \
+  uint16x8_t __s2_480 = __p2_480; \
+  uint16x4_t __rev0_480;  __rev0_480 = __builtin_shufflevector(__s0_480, __s0_480, 3, 2, 1, 0); \
+  uint16x4_t __rev1_480;  __rev1_480 = __builtin_shufflevector(__s1_480, __s1_480, 3, 2, 1, 0); \
+  uint16x8_t __rev2_480;  __rev2_480 = __builtin_shufflevector(__s2_480, __s2_480, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __ret_480; \
+  __ret_480 = __rev0_480 - __rev1_480 * __noswap_splat_laneq_u16(__rev2_480, __p3_480); \
+  __ret_480 = __builtin_shufflevector(__ret_480, __ret_480, 3, 2, 1, 0); \
+  __ret_480; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
+#define vmls_laneq_f32(__p0_481, __p1_481, __p2_481, __p3_481) __extension__ ({ \
+  float32x2_t __s0_481 = __p0_481; \
+  float32x2_t __s1_481 = __p1_481; \
+  float32x4_t __s2_481 = __p2_481; \
+  float32x2_t __ret_481; \
+  __ret_481 = __s0_481 - __s1_481 * splat_laneq_f32(__s2_481, __p3_481); \
+  __ret_481; \
 })
 #else
-#define vmls_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmls_laneq_f32(__p0_482, __p1_482, __p2_482, __p3_482) __extension__ ({ \
+  float32x2_t __s0_482 = __p0_482; \
+  float32x2_t __s1_482 = __p1_482; \
+  float32x4_t __s2_482 = __p2_482; \
+  float32x2_t __rev0_482;  __rev0_482 = __builtin_shufflevector(__s0_482, __s0_482, 1, 0); \
+  float32x2_t __rev1_482;  __rev1_482 = __builtin_shufflevector(__s1_482, __s1_482, 1, 0); \
+  float32x4_t __rev2_482;  __rev2_482 = __builtin_shufflevector(__s2_482, __s2_482, 3, 2, 1, 0); \
+  float32x2_t __ret_482; \
+  __ret_482 = __rev0_482 - __rev1_482 * __noswap_splat_laneq_f32(__rev2_482, __p3_482); \
+  __ret_482 = __builtin_shufflevector(__ret_482, __ret_482, 1, 0); \
+  __ret_482; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
+#define vmls_laneq_s32(__p0_483, __p1_483, __p2_483, __p3_483) __extension__ ({ \
+  int32x2_t __s0_483 = __p0_483; \
+  int32x2_t __s1_483 = __p1_483; \
+  int32x4_t __s2_483 = __p2_483; \
+  int32x2_t __ret_483; \
+  __ret_483 = __s0_483 - __s1_483 * splat_laneq_s32(__s2_483, __p3_483); \
+  __ret_483; \
 })
 #else
-#define vmls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmls_laneq_s32(__p0_484, __p1_484, __p2_484, __p3_484) __extension__ ({ \
+  int32x2_t __s0_484 = __p0_484; \
+  int32x2_t __s1_484 = __p1_484; \
+  int32x4_t __s2_484 = __p2_484; \
+  int32x2_t __rev0_484;  __rev0_484 = __builtin_shufflevector(__s0_484, __s0_484, 1, 0); \
+  int32x2_t __rev1_484;  __rev1_484 = __builtin_shufflevector(__s1_484, __s1_484, 1, 0); \
+  int32x4_t __rev2_484;  __rev2_484 = __builtin_shufflevector(__s2_484, __s2_484, 3, 2, 1, 0); \
+  int32x2_t __ret_484; \
+  __ret_484 = __rev0_484 - __rev1_484 * __noswap_splat_laneq_s32(__rev2_484, __p3_484); \
+  __ret_484 = __builtin_shufflevector(__ret_484, __ret_484, 1, 0); \
+  __ret_484; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmls_laneq_s16(__p0_485, __p1_485, __p2_485, __p3_485) __extension__ ({ \
+  int16x4_t __s0_485 = __p0_485; \
+  int16x4_t __s1_485 = __p1_485; \
+  int16x8_t __s2_485 = __p2_485; \
+  int16x4_t __ret_485; \
+  __ret_485 = __s0_485 - __s1_485 * splat_laneq_s16(__s2_485, __p3_485); \
+  __ret_485; \
 })
 #else
-#define vmls_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmls_laneq_s16(__p0_486, __p1_486, __p2_486, __p3_486) __extension__ ({ \
+  int16x4_t __s0_486 = __p0_486; \
+  int16x4_t __s1_486 = __p1_486; \
+  int16x8_t __s2_486 = __p2_486; \
+  int16x4_t __rev0_486;  __rev0_486 = __builtin_shufflevector(__s0_486, __s0_486, 3, 2, 1, 0); \
+  int16x4_t __rev1_486;  __rev1_486 = __builtin_shufflevector(__s1_486, __s1_486, 3, 2, 1, 0); \
+  int16x8_t __rev2_486;  __rev2_486 = __builtin_shufflevector(__s2_486, __s2_486, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __ret_486; \
+  __ret_486 = __rev0_486 - __rev1_486 * __noswap_splat_laneq_s16(__rev2_486, __p3_486); \
+  __ret_486 = __builtin_shufflevector(__ret_486, __ret_486, 3, 2, 1, 0); \
+  __ret_486; \
 })
 #endif
 
@@ -49573,290 +53373,290 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 - vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlsl_high_lane_u32(__p0_487, __p1_487, __p2_487, __p3_487) __extension__ ({ \
+  uint64x2_t __s0_487 = __p0_487; \
+  uint32x4_t __s1_487 = __p1_487; \
+  uint32x2_t __s2_487 = __p2_487; \
+  uint64x2_t __ret_487; \
+  __ret_487 = __s0_487 - vmull_u32(vget_high_u32(__s1_487), splat_lane_u32(__s2_487, __p3_487)); \
+  __ret_487; \
 })
 #else
-#define vmlsl_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlsl_high_lane_u32(__p0_488, __p1_488, __p2_488, __p3_488) __extension__ ({ \
+  uint64x2_t __s0_488 = __p0_488; \
+  uint32x4_t __s1_488 = __p1_488; \
+  uint32x2_t __s2_488 = __p2_488; \
+  uint64x2_t __rev0_488;  __rev0_488 = __builtin_shufflevector(__s0_488, __s0_488, 1, 0); \
+  uint32x4_t __rev1_488;  __rev1_488 = __builtin_shufflevector(__s1_488, __s1_488, 3, 2, 1, 0); \
+  uint32x2_t __rev2_488;  __rev2_488 = __builtin_shufflevector(__s2_488, __s2_488, 1, 0); \
+  uint64x2_t __ret_488; \
+  __ret_488 = __rev0_488 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_488), __noswap_splat_lane_u32(__rev2_488, __p3_488)); \
+  __ret_488 = __builtin_shufflevector(__ret_488, __ret_488, 1, 0); \
+  __ret_488; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 - vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlsl_high_lane_u16(__p0_489, __p1_489, __p2_489, __p3_489) __extension__ ({ \
+  uint32x4_t __s0_489 = __p0_489; \
+  uint16x8_t __s1_489 = __p1_489; \
+  uint16x4_t __s2_489 = __p2_489; \
+  uint32x4_t __ret_489; \
+  __ret_489 = __s0_489 - vmull_u16(vget_high_u16(__s1_489), splat_lane_u16(__s2_489, __p3_489)); \
+  __ret_489; \
 })
 #else
-#define vmlsl_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsl_high_lane_u16(__p0_490, __p1_490, __p2_490, __p3_490) __extension__ ({ \
+  uint32x4_t __s0_490 = __p0_490; \
+  uint16x8_t __s1_490 = __p1_490; \
+  uint16x4_t __s2_490 = __p2_490; \
+  uint32x4_t __rev0_490;  __rev0_490 = __builtin_shufflevector(__s0_490, __s0_490, 3, 2, 1, 0); \
+  uint16x8_t __rev1_490;  __rev1_490 = __builtin_shufflevector(__s1_490, __s1_490, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_490;  __rev2_490 = __builtin_shufflevector(__s2_490, __s2_490, 3, 2, 1, 0); \
+  uint32x4_t __ret_490; \
+  __ret_490 = __rev0_490 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_490), __noswap_splat_lane_u16(__rev2_490, __p3_490)); \
+  __ret_490 = __builtin_shufflevector(__ret_490, __ret_490, 3, 2, 1, 0); \
+  __ret_490; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 - vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlsl_high_lane_s32(__p0_491, __p1_491, __p2_491, __p3_491) __extension__ ({ \
+  int64x2_t __s0_491 = __p0_491; \
+  int32x4_t __s1_491 = __p1_491; \
+  int32x2_t __s2_491 = __p2_491; \
+  int64x2_t __ret_491; \
+  __ret_491 = __s0_491 - vmull_s32(vget_high_s32(__s1_491), splat_lane_s32(__s2_491, __p3_491)); \
+  __ret_491; \
 })
 #else
-#define vmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlsl_high_lane_s32(__p0_492, __p1_492, __p2_492, __p3_492) __extension__ ({ \
+  int64x2_t __s0_492 = __p0_492; \
+  int32x4_t __s1_492 = __p1_492; \
+  int32x2_t __s2_492 = __p2_492; \
+  int64x2_t __rev0_492;  __rev0_492 = __builtin_shufflevector(__s0_492, __s0_492, 1, 0); \
+  int32x4_t __rev1_492;  __rev1_492 = __builtin_shufflevector(__s1_492, __s1_492, 3, 2, 1, 0); \
+  int32x2_t __rev2_492;  __rev2_492 = __builtin_shufflevector(__s2_492, __s2_492, 1, 0); \
+  int64x2_t __ret_492; \
+  __ret_492 = __rev0_492 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_492), __noswap_splat_lane_s32(__rev2_492, __p3_492)); \
+  __ret_492 = __builtin_shufflevector(__ret_492, __ret_492, 1, 0); \
+  __ret_492; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 - vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlsl_high_lane_s16(__p0_493, __p1_493, __p2_493, __p3_493) __extension__ ({ \
+  int32x4_t __s0_493 = __p0_493; \
+  int16x8_t __s1_493 = __p1_493; \
+  int16x4_t __s2_493 = __p2_493; \
+  int32x4_t __ret_493; \
+  __ret_493 = __s0_493 - vmull_s16(vget_high_s16(__s1_493), splat_lane_s16(__s2_493, __p3_493)); \
+  __ret_493; \
 })
 #else
-#define vmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsl_high_lane_s16(__p0_494, __p1_494, __p2_494, __p3_494) __extension__ ({ \
+  int32x4_t __s0_494 = __p0_494; \
+  int16x8_t __s1_494 = __p1_494; \
+  int16x4_t __s2_494 = __p2_494; \
+  int32x4_t __rev0_494;  __rev0_494 = __builtin_shufflevector(__s0_494, __s0_494, 3, 2, 1, 0); \
+  int16x8_t __rev1_494;  __rev1_494 = __builtin_shufflevector(__s1_494, __s1_494, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_494;  __rev2_494 = __builtin_shufflevector(__s2_494, __s2_494, 3, 2, 1, 0); \
+  int32x4_t __ret_494; \
+  __ret_494 = __rev0_494 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_494), __noswap_splat_lane_s16(__rev2_494, __p3_494)); \
+  __ret_494 = __builtin_shufflevector(__ret_494, __ret_494, 3, 2, 1, 0); \
+  __ret_494; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 - vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlsl_high_laneq_u32(__p0_495, __p1_495, __p2_495, __p3_495) __extension__ ({ \
+  uint64x2_t __s0_495 = __p0_495; \
+  uint32x4_t __s1_495 = __p1_495; \
+  uint32x4_t __s2_495 = __p2_495; \
+  uint64x2_t __ret_495; \
+  __ret_495 = __s0_495 - vmull_u32(vget_high_u32(__s1_495), splat_laneq_u32(__s2_495, __p3_495)); \
+  __ret_495; \
 })
 #else
-#define vmlsl_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlsl_high_laneq_u32(__p0_496, __p1_496, __p2_496, __p3_496) __extension__ ({ \
+  uint64x2_t __s0_496 = __p0_496; \
+  uint32x4_t __s1_496 = __p1_496; \
+  uint32x4_t __s2_496 = __p2_496; \
+  uint64x2_t __rev0_496;  __rev0_496 = __builtin_shufflevector(__s0_496, __s0_496, 1, 0); \
+  uint32x4_t __rev1_496;  __rev1_496 = __builtin_shufflevector(__s1_496, __s1_496, 3, 2, 1, 0); \
+  uint32x4_t __rev2_496;  __rev2_496 = __builtin_shufflevector(__s2_496, __s2_496, 3, 2, 1, 0); \
+  uint64x2_t __ret_496; \
+  __ret_496 = __rev0_496 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_496), __noswap_splat_laneq_u32(__rev2_496, __p3_496)); \
+  __ret_496 = __builtin_shufflevector(__ret_496, __ret_496, 1, 0); \
+  __ret_496; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 - vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlsl_high_laneq_u16(__p0_497, __p1_497, __p2_497, __p3_497) __extension__ ({ \
+  uint32x4_t __s0_497 = __p0_497; \
+  uint16x8_t __s1_497 = __p1_497; \
+  uint16x8_t __s2_497 = __p2_497; \
+  uint32x4_t __ret_497; \
+  __ret_497 = __s0_497 - vmull_u16(vget_high_u16(__s1_497), splat_laneq_u16(__s2_497, __p3_497)); \
+  __ret_497; \
 })
 #else
-#define vmlsl_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsl_high_laneq_u16(__p0_498, __p1_498, __p2_498, __p3_498) __extension__ ({ \
+  uint32x4_t __s0_498 = __p0_498; \
+  uint16x8_t __s1_498 = __p1_498; \
+  uint16x8_t __s2_498 = __p2_498; \
+  uint32x4_t __rev0_498;  __rev0_498 = __builtin_shufflevector(__s0_498, __s0_498, 3, 2, 1, 0); \
+  uint16x8_t __rev1_498;  __rev1_498 = __builtin_shufflevector(__s1_498, __s1_498, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_498;  __rev2_498 = __builtin_shufflevector(__s2_498, __s2_498, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_498; \
+  __ret_498 = __rev0_498 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_498), __noswap_splat_laneq_u16(__rev2_498, __p3_498)); \
+  __ret_498 = __builtin_shufflevector(__ret_498, __ret_498, 3, 2, 1, 0); \
+  __ret_498; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 - vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlsl_high_laneq_s32(__p0_499, __p1_499, __p2_499, __p3_499) __extension__ ({ \
+  int64x2_t __s0_499 = __p0_499; \
+  int32x4_t __s1_499 = __p1_499; \
+  int32x4_t __s2_499 = __p2_499; \
+  int64x2_t __ret_499; \
+  __ret_499 = __s0_499 - vmull_s32(vget_high_s32(__s1_499), splat_laneq_s32(__s2_499, __p3_499)); \
+  __ret_499; \
 })
 #else
-#define vmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlsl_high_laneq_s32(__p0_500, __p1_500, __p2_500, __p3_500) __extension__ ({ \
+  int64x2_t __s0_500 = __p0_500; \
+  int32x4_t __s1_500 = __p1_500; \
+  int32x4_t __s2_500 = __p2_500; \
+  int64x2_t __rev0_500;  __rev0_500 = __builtin_shufflevector(__s0_500, __s0_500, 1, 0); \
+  int32x4_t __rev1_500;  __rev1_500 = __builtin_shufflevector(__s1_500, __s1_500, 3, 2, 1, 0); \
+  int32x4_t __rev2_500;  __rev2_500 = __builtin_shufflevector(__s2_500, __s2_500, 3, 2, 1, 0); \
+  int64x2_t __ret_500; \
+  __ret_500 = __rev0_500 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_500), __noswap_splat_laneq_s32(__rev2_500, __p3_500)); \
+  __ret_500 = __builtin_shufflevector(__ret_500, __ret_500, 1, 0); \
+  __ret_500; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 - vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlsl_high_laneq_s16(__p0_501, __p1_501, __p2_501, __p3_501) __extension__ ({ \
+  int32x4_t __s0_501 = __p0_501; \
+  int16x8_t __s1_501 = __p1_501; \
+  int16x8_t __s2_501 = __p2_501; \
+  int32x4_t __ret_501; \
+  __ret_501 = __s0_501 - vmull_s16(vget_high_s16(__s1_501), splat_laneq_s16(__s2_501, __p3_501)); \
+  __ret_501; \
 })
 #else
-#define vmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsl_high_laneq_s16(__p0_502, __p1_502, __p2_502, __p3_502) __extension__ ({ \
+  int32x4_t __s0_502 = __p0_502; \
+  int16x8_t __s1_502 = __p1_502; \
+  int16x8_t __s2_502 = __p2_502; \
+  int32x4_t __rev0_502;  __rev0_502 = __builtin_shufflevector(__s0_502, __s0_502, 3, 2, 1, 0); \
+  int16x8_t __rev1_502;  __rev1_502 = __builtin_shufflevector(__s1_502, __s1_502, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_502;  __rev2_502 = __builtin_shufflevector(__s2_502, __s2_502, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_502; \
+  __ret_502 = __rev0_502 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_502), __noswap_splat_laneq_s16(__rev2_502, __p3_502)); \
+  __ret_502 = __builtin_shufflevector(__ret_502, __ret_502, 3, 2, 1, 0); \
+  __ret_502; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 - vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlsl_laneq_u32(__p0_503, __p1_503, __p2_503, __p3_503) __extension__ ({ \
+  uint64x2_t __s0_503 = __p0_503; \
+  uint32x2_t __s1_503 = __p1_503; \
+  uint32x4_t __s2_503 = __p2_503; \
+  uint64x2_t __ret_503; \
+  __ret_503 = __s0_503 - vmull_u32(__s1_503, splat_laneq_u32(__s2_503, __p3_503)); \
+  __ret_503; \
 })
 #else
-#define vmlsl_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlsl_laneq_u32(__p0_504, __p1_504, __p2_504, __p3_504) __extension__ ({ \
+  uint64x2_t __s0_504 = __p0_504; \
+  uint32x2_t __s1_504 = __p1_504; \
+  uint32x4_t __s2_504 = __p2_504; \
+  uint64x2_t __rev0_504;  __rev0_504 = __builtin_shufflevector(__s0_504, __s0_504, 1, 0); \
+  uint32x2_t __rev1_504;  __rev1_504 = __builtin_shufflevector(__s1_504, __s1_504, 1, 0); \
+  uint32x4_t __rev2_504;  __rev2_504 = __builtin_shufflevector(__s2_504, __s2_504, 3, 2, 1, 0); \
+  uint64x2_t __ret_504; \
+  __ret_504 = __rev0_504 - __noswap_vmull_u32(__rev1_504, __noswap_splat_laneq_u32(__rev2_504, __p3_504)); \
+  __ret_504 = __builtin_shufflevector(__ret_504, __ret_504, 1, 0); \
+  __ret_504; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 - vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlsl_laneq_u16(__p0_505, __p1_505, __p2_505, __p3_505) __extension__ ({ \
+  uint32x4_t __s0_505 = __p0_505; \
+  uint16x4_t __s1_505 = __p1_505; \
+  uint16x8_t __s2_505 = __p2_505; \
+  uint32x4_t __ret_505; \
+  __ret_505 = __s0_505 - vmull_u16(__s1_505, splat_laneq_u16(__s2_505, __p3_505)); \
+  __ret_505; \
 })
 #else
-#define vmlsl_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsl_laneq_u16(__p0_506, __p1_506, __p2_506, __p3_506) __extension__ ({ \
+  uint32x4_t __s0_506 = __p0_506; \
+  uint16x4_t __s1_506 = __p1_506; \
+  uint16x8_t __s2_506 = __p2_506; \
+  uint32x4_t __rev0_506;  __rev0_506 = __builtin_shufflevector(__s0_506, __s0_506, 3, 2, 1, 0); \
+  uint16x4_t __rev1_506;  __rev1_506 = __builtin_shufflevector(__s1_506, __s1_506, 3, 2, 1, 0); \
+  uint16x8_t __rev2_506;  __rev2_506 = __builtin_shufflevector(__s2_506, __s2_506, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_506; \
+  __ret_506 = __rev0_506 - __noswap_vmull_u16(__rev1_506, __noswap_splat_laneq_u16(__rev2_506, __p3_506)); \
+  __ret_506 = __builtin_shufflevector(__ret_506, __ret_506, 3, 2, 1, 0); \
+  __ret_506; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 - vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlsl_laneq_s32(__p0_507, __p1_507, __p2_507, __p3_507) __extension__ ({ \
+  int64x2_t __s0_507 = __p0_507; \
+  int32x2_t __s1_507 = __p1_507; \
+  int32x4_t __s2_507 = __p2_507; \
+  int64x2_t __ret_507; \
+  __ret_507 = __s0_507 - vmull_s32(__s1_507, splat_laneq_s32(__s2_507, __p3_507)); \
+  __ret_507; \
 })
 #else
-#define vmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlsl_laneq_s32(__p0_508, __p1_508, __p2_508, __p3_508) __extension__ ({ \
+  int64x2_t __s0_508 = __p0_508; \
+  int32x2_t __s1_508 = __p1_508; \
+  int32x4_t __s2_508 = __p2_508; \
+  int64x2_t __rev0_508;  __rev0_508 = __builtin_shufflevector(__s0_508, __s0_508, 1, 0); \
+  int32x2_t __rev1_508;  __rev1_508 = __builtin_shufflevector(__s1_508, __s1_508, 1, 0); \
+  int32x4_t __rev2_508;  __rev2_508 = __builtin_shufflevector(__s2_508, __s2_508, 3, 2, 1, 0); \
+  int64x2_t __ret_508; \
+  __ret_508 = __rev0_508 - __noswap_vmull_s32(__rev1_508, __noswap_splat_laneq_s32(__rev2_508, __p3_508)); \
+  __ret_508 = __builtin_shufflevector(__ret_508, __ret_508, 1, 0); \
+  __ret_508; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 - vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlsl_laneq_s16(__p0_509, __p1_509, __p2_509, __p3_509) __extension__ ({ \
+  int32x4_t __s0_509 = __p0_509; \
+  int16x4_t __s1_509 = __p1_509; \
+  int16x8_t __s2_509 = __p2_509; \
+  int32x4_t __ret_509; \
+  __ret_509 = __s0_509 - vmull_s16(__s1_509, splat_laneq_s16(__s2_509, __p3_509)); \
+  __ret_509; \
 })
 #else
-#define vmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsl_laneq_s16(__p0_510, __p1_510, __p2_510, __p3_510) __extension__ ({ \
+  int32x4_t __s0_510 = __p0_510; \
+  int16x4_t __s1_510 = __p1_510; \
+  int16x8_t __s2_510 = __p2_510; \
+  int32x4_t __rev0_510;  __rev0_510 = __builtin_shufflevector(__s0_510, __s0_510, 3, 2, 1, 0); \
+  int16x4_t __rev1_510;  __rev1_510 = __builtin_shufflevector(__s1_510, __s1_510, 3, 2, 1, 0); \
+  int16x8_t __rev2_510;  __rev2_510 = __builtin_shufflevector(__s2_510, __s2_510, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_510; \
+  __ret_510 = __rev0_510 - __noswap_vmull_s16(__rev1_510, __noswap_splat_laneq_s16(__rev2_510, __p3_510)); \
+  __ret_510 = __builtin_shufflevector(__ret_510, __ret_510, 3, 2, 1, 0); \
+  __ret_510; \
 })
 #endif
 
@@ -49901,146 +53701,146 @@
   return __ret;
 }
 #ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_120) {
-  uint16x8_t __ret_120;
-  uint8x8_t __a1_120 = vget_high_u8(__p0_120);
-  __ret_120 = (uint16x8_t)(vshll_n_u8(__a1_120, 0));
-  return __ret_120;
+__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_511) {
+  uint16x8_t __ret_511;
+  uint8x8_t __a1_511 = vget_high_u8(__p0_511);
+  __ret_511 = (uint16x8_t)(vshll_n_u8(__a1_511, 0));
+  return __ret_511;
 }
 #else
-__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_121) {
-  uint8x16_t __rev0_121;  __rev0_121 = __builtin_shufflevector(__p0_121, __p0_121, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret_121;
-  uint8x8_t __a1_121 = __noswap_vget_high_u8(__rev0_121);
-  __ret_121 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_121, 0));
-  __ret_121 = __builtin_shufflevector(__ret_121, __ret_121, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret_121;
+__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_512) {
+  uint8x16_t __rev0_512;  __rev0_512 = __builtin_shufflevector(__p0_512, __p0_512, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x8_t __ret_512;
+  uint8x8_t __a1_512 = __noswap_vget_high_u8(__rev0_512);
+  __ret_512 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_512, 0));
+  __ret_512 = __builtin_shufflevector(__ret_512, __ret_512, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret_512;
 }
-__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_122) {
-  uint16x8_t __ret_122;
-  uint8x8_t __a1_122 = __noswap_vget_high_u8(__p0_122);
-  __ret_122 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_122, 0));
-  return __ret_122;
+__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_513) {
+  uint16x8_t __ret_513;
+  uint8x8_t __a1_513 = __noswap_vget_high_u8(__p0_513);
+  __ret_513 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_513, 0));
+  return __ret_513;
 }
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_123) {
-  uint64x2_t __ret_123;
-  uint32x2_t __a1_123 = vget_high_u32(__p0_123);
-  __ret_123 = (uint64x2_t)(vshll_n_u32(__a1_123, 0));
-  return __ret_123;
+__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_514) {
+  uint64x2_t __ret_514;
+  uint32x2_t __a1_514 = vget_high_u32(__p0_514);
+  __ret_514 = (uint64x2_t)(vshll_n_u32(__a1_514, 0));
+  return __ret_514;
 }
 #else
-__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_124) {
-  uint32x4_t __rev0_124;  __rev0_124 = __builtin_shufflevector(__p0_124, __p0_124, 3, 2, 1, 0);
-  uint64x2_t __ret_124;
-  uint32x2_t __a1_124 = __noswap_vget_high_u32(__rev0_124);
-  __ret_124 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_124, 0));
-  __ret_124 = __builtin_shufflevector(__ret_124, __ret_124, 1, 0);
-  return __ret_124;
+__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_515) {
+  uint32x4_t __rev0_515;  __rev0_515 = __builtin_shufflevector(__p0_515, __p0_515, 3, 2, 1, 0);
+  uint64x2_t __ret_515;
+  uint32x2_t __a1_515 = __noswap_vget_high_u32(__rev0_515);
+  __ret_515 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_515, 0));
+  __ret_515 = __builtin_shufflevector(__ret_515, __ret_515, 1, 0);
+  return __ret_515;
 }
-__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_125) {
-  uint64x2_t __ret_125;
-  uint32x2_t __a1_125 = __noswap_vget_high_u32(__p0_125);
-  __ret_125 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_125, 0));
-  return __ret_125;
+__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_516) {
+  uint64x2_t __ret_516;
+  uint32x2_t __a1_516 = __noswap_vget_high_u32(__p0_516);
+  __ret_516 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_516, 0));
+  return __ret_516;
 }
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_126) {
-  uint32x4_t __ret_126;
-  uint16x4_t __a1_126 = vget_high_u16(__p0_126);
-  __ret_126 = (uint32x4_t)(vshll_n_u16(__a1_126, 0));
-  return __ret_126;
+__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_517) {
+  uint32x4_t __ret_517;
+  uint16x4_t __a1_517 = vget_high_u16(__p0_517);
+  __ret_517 = (uint32x4_t)(vshll_n_u16(__a1_517, 0));
+  return __ret_517;
 }
 #else
-__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_127) {
-  uint16x8_t __rev0_127;  __rev0_127 = __builtin_shufflevector(__p0_127, __p0_127, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret_127;
-  uint16x4_t __a1_127 = __noswap_vget_high_u16(__rev0_127);
-  __ret_127 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_127, 0));
-  __ret_127 = __builtin_shufflevector(__ret_127, __ret_127, 3, 2, 1, 0);
-  return __ret_127;
+__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_518) {
+  uint16x8_t __rev0_518;  __rev0_518 = __builtin_shufflevector(__p0_518, __p0_518, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint32x4_t __ret_518;
+  uint16x4_t __a1_518 = __noswap_vget_high_u16(__rev0_518);
+  __ret_518 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_518, 0));
+  __ret_518 = __builtin_shufflevector(__ret_518, __ret_518, 3, 2, 1, 0);
+  return __ret_518;
 }
-__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_128) {
-  uint32x4_t __ret_128;
-  uint16x4_t __a1_128 = __noswap_vget_high_u16(__p0_128);
-  __ret_128 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_128, 0));
-  return __ret_128;
+__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_519) {
+  uint32x4_t __ret_519;
+  uint16x4_t __a1_519 = __noswap_vget_high_u16(__p0_519);
+  __ret_519 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_519, 0));
+  return __ret_519;
 }
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovl_high_s8(int8x16_t __p0_129) {
-  int16x8_t __ret_129;
-  int8x8_t __a1_129 = vget_high_s8(__p0_129);
-  __ret_129 = (int16x8_t)(vshll_n_s8(__a1_129, 0));
-  return __ret_129;
+__ai int16x8_t vmovl_high_s8(int8x16_t __p0_520) {
+  int16x8_t __ret_520;
+  int8x8_t __a1_520 = vget_high_s8(__p0_520);
+  __ret_520 = (int16x8_t)(vshll_n_s8(__a1_520, 0));
+  return __ret_520;
 }
 #else
-__ai int16x8_t vmovl_high_s8(int8x16_t __p0_130) {
-  int8x16_t __rev0_130;  __rev0_130 = __builtin_shufflevector(__p0_130, __p0_130, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret_130;
-  int8x8_t __a1_130 = __noswap_vget_high_s8(__rev0_130);
-  __ret_130 = (int16x8_t)(__noswap_vshll_n_s8(__a1_130, 0));
-  __ret_130 = __builtin_shufflevector(__ret_130, __ret_130, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret_130;
+__ai int16x8_t vmovl_high_s8(int8x16_t __p0_521) {
+  int8x16_t __rev0_521;  __rev0_521 = __builtin_shufflevector(__p0_521, __p0_521, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __ret_521;
+  int8x8_t __a1_521 = __noswap_vget_high_s8(__rev0_521);
+  __ret_521 = (int16x8_t)(__noswap_vshll_n_s8(__a1_521, 0));
+  __ret_521 = __builtin_shufflevector(__ret_521, __ret_521, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret_521;
 }
-__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_131) {
-  int16x8_t __ret_131;
-  int8x8_t __a1_131 = __noswap_vget_high_s8(__p0_131);
-  __ret_131 = (int16x8_t)(__noswap_vshll_n_s8(__a1_131, 0));
-  return __ret_131;
+__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_522) {
+  int16x8_t __ret_522;
+  int8x8_t __a1_522 = __noswap_vget_high_s8(__p0_522);
+  __ret_522 = (int16x8_t)(__noswap_vshll_n_s8(__a1_522, 0));
+  return __ret_522;
 }
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmovl_high_s32(int32x4_t __p0_132) {
-  int64x2_t __ret_132;
-  int32x2_t __a1_132 = vget_high_s32(__p0_132);
-  __ret_132 = (int64x2_t)(vshll_n_s32(__a1_132, 0));
-  return __ret_132;
+__ai int64x2_t vmovl_high_s32(int32x4_t __p0_523) {
+  int64x2_t __ret_523;
+  int32x2_t __a1_523 = vget_high_s32(__p0_523);
+  __ret_523 = (int64x2_t)(vshll_n_s32(__a1_523, 0));
+  return __ret_523;
 }
 #else
-__ai int64x2_t vmovl_high_s32(int32x4_t __p0_133) {
-  int32x4_t __rev0_133;  __rev0_133 = __builtin_shufflevector(__p0_133, __p0_133, 3, 2, 1, 0);
-  int64x2_t __ret_133;
-  int32x2_t __a1_133 = __noswap_vget_high_s32(__rev0_133);
-  __ret_133 = (int64x2_t)(__noswap_vshll_n_s32(__a1_133, 0));
-  __ret_133 = __builtin_shufflevector(__ret_133, __ret_133, 1, 0);
-  return __ret_133;
+__ai int64x2_t vmovl_high_s32(int32x4_t __p0_524) {
+  int32x4_t __rev0_524;  __rev0_524 = __builtin_shufflevector(__p0_524, __p0_524, 3, 2, 1, 0);
+  int64x2_t __ret_524;
+  int32x2_t __a1_524 = __noswap_vget_high_s32(__rev0_524);
+  __ret_524 = (int64x2_t)(__noswap_vshll_n_s32(__a1_524, 0));
+  __ret_524 = __builtin_shufflevector(__ret_524, __ret_524, 1, 0);
+  return __ret_524;
 }
-__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_134) {
-  int64x2_t __ret_134;
-  int32x2_t __a1_134 = __noswap_vget_high_s32(__p0_134);
-  __ret_134 = (int64x2_t)(__noswap_vshll_n_s32(__a1_134, 0));
-  return __ret_134;
+__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_525) {
+  int64x2_t __ret_525;
+  int32x2_t __a1_525 = __noswap_vget_high_s32(__p0_525);
+  __ret_525 = (int64x2_t)(__noswap_vshll_n_s32(__a1_525, 0));
+  return __ret_525;
 }
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovl_high_s16(int16x8_t __p0_135) {
-  int32x4_t __ret_135;
-  int16x4_t __a1_135 = vget_high_s16(__p0_135);
-  __ret_135 = (int32x4_t)(vshll_n_s16(__a1_135, 0));
-  return __ret_135;
+__ai int32x4_t vmovl_high_s16(int16x8_t __p0_526) {
+  int32x4_t __ret_526;
+  int16x4_t __a1_526 = vget_high_s16(__p0_526);
+  __ret_526 = (int32x4_t)(vshll_n_s16(__a1_526, 0));
+  return __ret_526;
 }
 #else
-__ai int32x4_t vmovl_high_s16(int16x8_t __p0_136) {
-  int16x8_t __rev0_136;  __rev0_136 = __builtin_shufflevector(__p0_136, __p0_136, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret_136;
-  int16x4_t __a1_136 = __noswap_vget_high_s16(__rev0_136);
-  __ret_136 = (int32x4_t)(__noswap_vshll_n_s16(__a1_136, 0));
-  __ret_136 = __builtin_shufflevector(__ret_136, __ret_136, 3, 2, 1, 0);
-  return __ret_136;
+__ai int32x4_t vmovl_high_s16(int16x8_t __p0_527) {
+  int16x8_t __rev0_527;  __rev0_527 = __builtin_shufflevector(__p0_527, __p0_527, 7, 6, 5, 4, 3, 2, 1, 0);
+  int32x4_t __ret_527;
+  int16x4_t __a1_527 = __noswap_vget_high_s16(__rev0_527);
+  __ret_527 = (int32x4_t)(__noswap_vshll_n_s16(__a1_527, 0));
+  __ret_527 = __builtin_shufflevector(__ret_527, __ret_527, 3, 2, 1, 0);
+  return __ret_527;
 }
-__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_137) {
-  int32x4_t __ret_137;
-  int16x4_t __a1_137 = __noswap_vget_high_s16(__p0_137);
-  __ret_137 = (int32x4_t)(__noswap_vshll_n_s16(__a1_137, 0));
-  return __ret_137;
+__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_528) {
+  int32x4_t __ret_528;
+  int16x4_t __a1_528 = __noswap_vget_high_s16(__p0_528);
+  __ret_528 = (int32x4_t)(__noswap_vshll_n_s16(__a1_528, 0));
+  return __ret_528;
 }
 #endif
 
@@ -50168,29 +53968,29 @@
   __ret = __p0 * __p1;
   return __ret;
 }
-#define vmuld_lane_f64(__p0_138, __p1_138, __p2_138) __extension__ ({ \
-  float64_t __s0_138 = __p0_138; \
-  float64x1_t __s1_138 = __p1_138; \
-  float64_t __ret_138; \
-  __ret_138 = __s0_138 * vget_lane_f64(__s1_138, __p2_138); \
-  __ret_138; \
+#define vmuld_lane_f64(__p0_529, __p1_529, __p2_529) __extension__ ({ \
+  float64_t __s0_529 = __p0_529; \
+  float64x1_t __s1_529 = __p1_529; \
+  float64_t __ret_529; \
+  __ret_529 = __s0_529 * vget_lane_f64(__s1_529, __p2_529); \
+  __ret_529; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vmuls_lane_f32(__p0_139, __p1_139, __p2_139) __extension__ ({ \
-  float32_t __s0_139 = __p0_139; \
-  float32x2_t __s1_139 = __p1_139; \
-  float32_t __ret_139; \
-  __ret_139 = __s0_139 * vget_lane_f32(__s1_139, __p2_139); \
-  __ret_139; \
+#define vmuls_lane_f32(__p0_530, __p1_530, __p2_530) __extension__ ({ \
+  float32_t __s0_530 = __p0_530; \
+  float32x2_t __s1_530 = __p1_530; \
+  float32_t __ret_530; \
+  __ret_530 = __s0_530 * vget_lane_f32(__s1_530, __p2_530); \
+  __ret_530; \
 })
 #else
-#define vmuls_lane_f32(__p0_140, __p1_140, __p2_140) __extension__ ({ \
-  float32_t __s0_140 = __p0_140; \
-  float32x2_t __s1_140 = __p1_140; \
-  float32x2_t __rev1_140;  __rev1_140 = __builtin_shufflevector(__s1_140, __s1_140, 1, 0); \
-  float32_t __ret_140; \
-  __ret_140 = __s0_140 * __noswap_vget_lane_f32(__rev1_140, __p2_140); \
-  __ret_140; \
+#define vmuls_lane_f32(__p0_531, __p1_531, __p2_531) __extension__ ({ \
+  float32_t __s0_531 = __p0_531; \
+  float32x2_t __s1_531 = __p1_531; \
+  float32x2_t __rev1_531;  __rev1_531 = __builtin_shufflevector(__s1_531, __s1_531, 1, 0); \
+  float32_t __ret_531; \
+  __ret_531 = __s0_531 * __noswap_vget_lane_f32(__rev1_531, __p2_531); \
+  __ret_531; \
 })
 #endif
 
@@ -50202,60 +54002,60 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
+#define vmulq_lane_f64(__p0_532, __p1_532, __p2_532) __extension__ ({ \
+  float64x2_t __s0_532 = __p0_532; \
+  float64x1_t __s1_532 = __p1_532; \
+  float64x2_t __ret_532; \
+  __ret_532 = __s0_532 * splatq_lane_f64(__s1_532, __p2_532); \
+  __ret_532; \
 })
 #else
-#define vmulq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmulq_lane_f64(__p0_533, __p1_533, __p2_533) __extension__ ({ \
+  float64x2_t __s0_533 = __p0_533; \
+  float64x1_t __s1_533 = __p1_533; \
+  float64x2_t __rev0_533;  __rev0_533 = __builtin_shufflevector(__s0_533, __s0_533, 1, 0); \
+  float64x2_t __ret_533; \
+  __ret_533 = __rev0_533 * __noswap_splatq_lane_f64(__s1_533, __p2_533); \
+  __ret_533 = __builtin_shufflevector(__ret_533, __ret_533, 1, 0); \
+  __ret_533; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmuld_laneq_f64(__p0_141, __p1_141, __p2_141) __extension__ ({ \
-  float64_t __s0_141 = __p0_141; \
-  float64x2_t __s1_141 = __p1_141; \
-  float64_t __ret_141; \
-  __ret_141 = __s0_141 * vgetq_lane_f64(__s1_141, __p2_141); \
-  __ret_141; \
+#define vmuld_laneq_f64(__p0_534, __p1_534, __p2_534) __extension__ ({ \
+  float64_t __s0_534 = __p0_534; \
+  float64x2_t __s1_534 = __p1_534; \
+  float64_t __ret_534; \
+  __ret_534 = __s0_534 * vgetq_lane_f64(__s1_534, __p2_534); \
+  __ret_534; \
 })
 #else
-#define vmuld_laneq_f64(__p0_142, __p1_142, __p2_142) __extension__ ({ \
-  float64_t __s0_142 = __p0_142; \
-  float64x2_t __s1_142 = __p1_142; \
-  float64x2_t __rev1_142;  __rev1_142 = __builtin_shufflevector(__s1_142, __s1_142, 1, 0); \
-  float64_t __ret_142; \
-  __ret_142 = __s0_142 * __noswap_vgetq_lane_f64(__rev1_142, __p2_142); \
-  __ret_142; \
+#define vmuld_laneq_f64(__p0_535, __p1_535, __p2_535) __extension__ ({ \
+  float64_t __s0_535 = __p0_535; \
+  float64x2_t __s1_535 = __p1_535; \
+  float64x2_t __rev1_535;  __rev1_535 = __builtin_shufflevector(__s1_535, __s1_535, 1, 0); \
+  float64_t __ret_535; \
+  __ret_535 = __s0_535 * __noswap_vgetq_lane_f64(__rev1_535, __p2_535); \
+  __ret_535; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmuls_laneq_f32(__p0_143, __p1_143, __p2_143) __extension__ ({ \
-  float32_t __s0_143 = __p0_143; \
-  float32x4_t __s1_143 = __p1_143; \
-  float32_t __ret_143; \
-  __ret_143 = __s0_143 * vgetq_lane_f32(__s1_143, __p2_143); \
-  __ret_143; \
+#define vmuls_laneq_f32(__p0_536, __p1_536, __p2_536) __extension__ ({ \
+  float32_t __s0_536 = __p0_536; \
+  float32x4_t __s1_536 = __p1_536; \
+  float32_t __ret_536; \
+  __ret_536 = __s0_536 * vgetq_lane_f32(__s1_536, __p2_536); \
+  __ret_536; \
 })
 #else
-#define vmuls_laneq_f32(__p0_144, __p1_144, __p2_144) __extension__ ({ \
-  float32_t __s0_144 = __p0_144; \
-  float32x4_t __s1_144 = __p1_144; \
-  float32x4_t __rev1_144;  __rev1_144 = __builtin_shufflevector(__s1_144, __s1_144, 3, 2, 1, 0); \
-  float32_t __ret_144; \
-  __ret_144 = __s0_144 * __noswap_vgetq_lane_f32(__rev1_144, __p2_144); \
-  __ret_144; \
+#define vmuls_laneq_f32(__p0_537, __p1_537, __p2_537) __extension__ ({ \
+  float32_t __s0_537 = __p0_537; \
+  float32x4_t __s1_537 = __p1_537; \
+  float32x4_t __rev1_537;  __rev1_537 = __builtin_shufflevector(__s1_537, __s1_537, 3, 2, 1, 0); \
+  float32_t __ret_537; \
+  __ret_537 = __s0_537 * __noswap_vgetq_lane_f32(__rev1_537, __p2_537); \
+  __ret_537; \
 })
 #endif
 
@@ -50279,233 +54079,233 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmulq_laneq_u32(__p0_538, __p1_538, __p2_538) __extension__ ({ \
+  uint32x4_t __s0_538 = __p0_538; \
+  uint32x4_t __s1_538 = __p1_538; \
+  uint32x4_t __ret_538; \
+  __ret_538 = __s0_538 * splatq_laneq_u32(__s1_538, __p2_538); \
+  __ret_538; \
 })
 #else
-#define vmulq_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmulq_laneq_u32(__p0_539, __p1_539, __p2_539) __extension__ ({ \
+  uint32x4_t __s0_539 = __p0_539; \
+  uint32x4_t __s1_539 = __p1_539; \
+  uint32x4_t __rev0_539;  __rev0_539 = __builtin_shufflevector(__s0_539, __s0_539, 3, 2, 1, 0); \
+  uint32x4_t __rev1_539;  __rev1_539 = __builtin_shufflevector(__s1_539, __s1_539, 3, 2, 1, 0); \
+  uint32x4_t __ret_539; \
+  __ret_539 = __rev0_539 * __noswap_splatq_laneq_u32(__rev1_539, __p2_539); \
+  __ret_539 = __builtin_shufflevector(__ret_539, __ret_539, 3, 2, 1, 0); \
+  __ret_539; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmulq_laneq_u16(__p0_540, __p1_540, __p2_540) __extension__ ({ \
+  uint16x8_t __s0_540 = __p0_540; \
+  uint16x8_t __s1_540 = __p1_540; \
+  uint16x8_t __ret_540; \
+  __ret_540 = __s0_540 * splatq_laneq_u16(__s1_540, __p2_540); \
+  __ret_540; \
 })
 #else
-#define vmulq_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmulq_laneq_u16(__p0_541, __p1_541, __p2_541) __extension__ ({ \
+  uint16x8_t __s0_541 = __p0_541; \
+  uint16x8_t __s1_541 = __p1_541; \
+  uint16x8_t __rev0_541;  __rev0_541 = __builtin_shufflevector(__s0_541, __s0_541, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_541;  __rev1_541 = __builtin_shufflevector(__s1_541, __s1_541, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret_541; \
+  __ret_541 = __rev0_541 * __noswap_splatq_laneq_u16(__rev1_541, __p2_541); \
+  __ret_541 = __builtin_shufflevector(__ret_541, __ret_541, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_541; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
+#define vmulq_laneq_f64(__p0_542, __p1_542, __p2_542) __extension__ ({ \
+  float64x2_t __s0_542 = __p0_542; \
+  float64x2_t __s1_542 = __p1_542; \
+  float64x2_t __ret_542; \
+  __ret_542 = __s0_542 * splatq_laneq_f64(__s1_542, __p2_542); \
+  __ret_542; \
 })
 #else
-#define vmulq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmulq_laneq_f64(__p0_543, __p1_543, __p2_543) __extension__ ({ \
+  float64x2_t __s0_543 = __p0_543; \
+  float64x2_t __s1_543 = __p1_543; \
+  float64x2_t __rev0_543;  __rev0_543 = __builtin_shufflevector(__s0_543, __s0_543, 1, 0); \
+  float64x2_t __rev1_543;  __rev1_543 = __builtin_shufflevector(__s1_543, __s1_543, 1, 0); \
+  float64x2_t __ret_543; \
+  __ret_543 = __rev0_543 * __noswap_splatq_laneq_f64(__rev1_543, __p2_543); \
+  __ret_543 = __builtin_shufflevector(__ret_543, __ret_543, 1, 0); \
+  __ret_543; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmulq_laneq_f32(__p0_544, __p1_544, __p2_544) __extension__ ({ \
+  float32x4_t __s0_544 = __p0_544; \
+  float32x4_t __s1_544 = __p1_544; \
+  float32x4_t __ret_544; \
+  __ret_544 = __s0_544 * splatq_laneq_f32(__s1_544, __p2_544); \
+  __ret_544; \
 })
 #else
-#define vmulq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmulq_laneq_f32(__p0_545, __p1_545, __p2_545) __extension__ ({ \
+  float32x4_t __s0_545 = __p0_545; \
+  float32x4_t __s1_545 = __p1_545; \
+  float32x4_t __rev0_545;  __rev0_545 = __builtin_shufflevector(__s0_545, __s0_545, 3, 2, 1, 0); \
+  float32x4_t __rev1_545;  __rev1_545 = __builtin_shufflevector(__s1_545, __s1_545, 3, 2, 1, 0); \
+  float32x4_t __ret_545; \
+  __ret_545 = __rev0_545 * __noswap_splatq_laneq_f32(__rev1_545, __p2_545); \
+  __ret_545 = __builtin_shufflevector(__ret_545, __ret_545, 3, 2, 1, 0); \
+  __ret_545; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmulq_laneq_s32(__p0_546, __p1_546, __p2_546) __extension__ ({ \
+  int32x4_t __s0_546 = __p0_546; \
+  int32x4_t __s1_546 = __p1_546; \
+  int32x4_t __ret_546; \
+  __ret_546 = __s0_546 * splatq_laneq_s32(__s1_546, __p2_546); \
+  __ret_546; \
 })
 #else
-#define vmulq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmulq_laneq_s32(__p0_547, __p1_547, __p2_547) __extension__ ({ \
+  int32x4_t __s0_547 = __p0_547; \
+  int32x4_t __s1_547 = __p1_547; \
+  int32x4_t __rev0_547;  __rev0_547 = __builtin_shufflevector(__s0_547, __s0_547, 3, 2, 1, 0); \
+  int32x4_t __rev1_547;  __rev1_547 = __builtin_shufflevector(__s1_547, __s1_547, 3, 2, 1, 0); \
+  int32x4_t __ret_547; \
+  __ret_547 = __rev0_547 * __noswap_splatq_laneq_s32(__rev1_547, __p2_547); \
+  __ret_547 = __builtin_shufflevector(__ret_547, __ret_547, 3, 2, 1, 0); \
+  __ret_547; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmulq_laneq_s16(__p0_548, __p1_548, __p2_548) __extension__ ({ \
+  int16x8_t __s0_548 = __p0_548; \
+  int16x8_t __s1_548 = __p1_548; \
+  int16x8_t __ret_548; \
+  __ret_548 = __s0_548 * splatq_laneq_s16(__s1_548, __p2_548); \
+  __ret_548; \
 })
 #else
-#define vmulq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmulq_laneq_s16(__p0_549, __p1_549, __p2_549) __extension__ ({ \
+  int16x8_t __s0_549 = __p0_549; \
+  int16x8_t __s1_549 = __p1_549; \
+  int16x8_t __rev0_549;  __rev0_549 = __builtin_shufflevector(__s0_549, __s0_549, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_549;  __rev1_549 = __builtin_shufflevector(__s1_549, __s1_549, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_549; \
+  __ret_549 = __rev0_549 * __noswap_splatq_laneq_s16(__rev1_549, __p2_549); \
+  __ret_549 = __builtin_shufflevector(__ret_549, __ret_549, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_549; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
+#define vmul_laneq_u32(__p0_550, __p1_550, __p2_550) __extension__ ({ \
+  uint32x2_t __s0_550 = __p0_550; \
+  uint32x4_t __s1_550 = __p1_550; \
+  uint32x2_t __ret_550; \
+  __ret_550 = __s0_550 * splat_laneq_u32(__s1_550, __p2_550); \
+  __ret_550; \
 })
 #else
-#define vmul_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmul_laneq_u32(__p0_551, __p1_551, __p2_551) __extension__ ({ \
+  uint32x2_t __s0_551 = __p0_551; \
+  uint32x4_t __s1_551 = __p1_551; \
+  uint32x2_t __rev0_551;  __rev0_551 = __builtin_shufflevector(__s0_551, __s0_551, 1, 0); \
+  uint32x4_t __rev1_551;  __rev1_551 = __builtin_shufflevector(__s1_551, __s1_551, 3, 2, 1, 0); \
+  uint32x2_t __ret_551; \
+  __ret_551 = __rev0_551 * __noswap_splat_laneq_u32(__rev1_551, __p2_551); \
+  __ret_551 = __builtin_shufflevector(__ret_551, __ret_551, 1, 0); \
+  __ret_551; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmul_laneq_u16(__p0_552, __p1_552, __p2_552) __extension__ ({ \
+  uint16x4_t __s0_552 = __p0_552; \
+  uint16x8_t __s1_552 = __p1_552; \
+  uint16x4_t __ret_552; \
+  __ret_552 = __s0_552 * splat_laneq_u16(__s1_552, __p2_552); \
+  __ret_552; \
 })
 #else
-#define vmul_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmul_laneq_u16(__p0_553, __p1_553, __p2_553) __extension__ ({ \
+  uint16x4_t __s0_553 = __p0_553; \
+  uint16x8_t __s1_553 = __p1_553; \
+  uint16x4_t __rev0_553;  __rev0_553 = __builtin_shufflevector(__s0_553, __s0_553, 3, 2, 1, 0); \
+  uint16x8_t __rev1_553;  __rev1_553 = __builtin_shufflevector(__s1_553, __s1_553, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __ret_553; \
+  __ret_553 = __rev0_553 * __noswap_splat_laneq_u16(__rev1_553, __p2_553); \
+  __ret_553 = __builtin_shufflevector(__ret_553, __ret_553, 3, 2, 1, 0); \
+  __ret_553; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
+#define vmul_laneq_f32(__p0_554, __p1_554, __p2_554) __extension__ ({ \
+  float32x2_t __s0_554 = __p0_554; \
+  float32x4_t __s1_554 = __p1_554; \
+  float32x2_t __ret_554; \
+  __ret_554 = __s0_554 * splat_laneq_f32(__s1_554, __p2_554); \
+  __ret_554; \
 })
 #else
-#define vmul_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmul_laneq_f32(__p0_555, __p1_555, __p2_555) __extension__ ({ \
+  float32x2_t __s0_555 = __p0_555; \
+  float32x4_t __s1_555 = __p1_555; \
+  float32x2_t __rev0_555;  __rev0_555 = __builtin_shufflevector(__s0_555, __s0_555, 1, 0); \
+  float32x4_t __rev1_555;  __rev1_555 = __builtin_shufflevector(__s1_555, __s1_555, 3, 2, 1, 0); \
+  float32x2_t __ret_555; \
+  __ret_555 = __rev0_555 * __noswap_splat_laneq_f32(__rev1_555, __p2_555); \
+  __ret_555 = __builtin_shufflevector(__ret_555, __ret_555, 1, 0); \
+  __ret_555; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
+#define vmul_laneq_s32(__p0_556, __p1_556, __p2_556) __extension__ ({ \
+  int32x2_t __s0_556 = __p0_556; \
+  int32x4_t __s1_556 = __p1_556; \
+  int32x2_t __ret_556; \
+  __ret_556 = __s0_556 * splat_laneq_s32(__s1_556, __p2_556); \
+  __ret_556; \
 })
 #else
-#define vmul_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmul_laneq_s32(__p0_557, __p1_557, __p2_557) __extension__ ({ \
+  int32x2_t __s0_557 = __p0_557; \
+  int32x4_t __s1_557 = __p1_557; \
+  int32x2_t __rev0_557;  __rev0_557 = __builtin_shufflevector(__s0_557, __s0_557, 1, 0); \
+  int32x4_t __rev1_557;  __rev1_557 = __builtin_shufflevector(__s1_557, __s1_557, 3, 2, 1, 0); \
+  int32x2_t __ret_557; \
+  __ret_557 = __rev0_557 * __noswap_splat_laneq_s32(__rev1_557, __p2_557); \
+  __ret_557 = __builtin_shufflevector(__ret_557, __ret_557, 1, 0); \
+  __ret_557; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmul_laneq_s16(__p0_558, __p1_558, __p2_558) __extension__ ({ \
+  int16x4_t __s0_558 = __p0_558; \
+  int16x8_t __s1_558 = __p1_558; \
+  int16x4_t __ret_558; \
+  __ret_558 = __s0_558 * splat_laneq_s16(__s1_558, __p2_558); \
+  __ret_558; \
 })
 #else
-#define vmul_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmul_laneq_s16(__p0_559, __p1_559, __p2_559) __extension__ ({ \
+  int16x4_t __s0_559 = __p0_559; \
+  int16x8_t __s1_559 = __p1_559; \
+  int16x4_t __rev0_559;  __rev0_559 = __builtin_shufflevector(__s0_559, __s0_559, 3, 2, 1, 0); \
+  int16x8_t __rev1_559;  __rev1_559 = __builtin_shufflevector(__s1_559, __s1_559, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __ret_559; \
+  __ret_559 = __rev0_559 * __noswap_splat_laneq_s16(__rev1_559, __p2_559); \
+  __ret_559 = __builtin_shufflevector(__ret_559, __ret_559, 3, 2, 1, 0); \
+  __ret_559; \
 })
 #endif
 
@@ -50671,170 +54471,170 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = vmull_u32(vget_high_u32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vmull_high_lane_u32(__p0_560, __p1_560, __p2_560) __extension__ ({ \
+  uint32x4_t __s0_560 = __p0_560; \
+  uint32x2_t __s1_560 = __p1_560; \
+  uint64x2_t __ret_560; \
+  __ret_560 = vmull_u32(vget_high_u32(__s0_560), splat_lane_u32(__s1_560, __p2_560)); \
+  __ret_560; \
 })
 #else
-#define vmull_high_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmull_high_lane_u32(__p0_561, __p1_561, __p2_561) __extension__ ({ \
+  uint32x4_t __s0_561 = __p0_561; \
+  uint32x2_t __s1_561 = __p1_561; \
+  uint32x4_t __rev0_561;  __rev0_561 = __builtin_shufflevector(__s0_561, __s0_561, 3, 2, 1, 0); \
+  uint32x2_t __rev1_561;  __rev1_561 = __builtin_shufflevector(__s1_561, __s1_561, 1, 0); \
+  uint64x2_t __ret_561; \
+  __ret_561 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_561), __noswap_splat_lane_u32(__rev1_561, __p2_561)); \
+  __ret_561 = __builtin_shufflevector(__ret_561, __ret_561, 1, 0); \
+  __ret_561; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = vmull_u16(vget_high_u16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmull_high_lane_u16(__p0_562, __p1_562, __p2_562) __extension__ ({ \
+  uint16x8_t __s0_562 = __p0_562; \
+  uint16x4_t __s1_562 = __p1_562; \
+  uint32x4_t __ret_562; \
+  __ret_562 = vmull_u16(vget_high_u16(__s0_562), splat_lane_u16(__s1_562, __p2_562)); \
+  __ret_562; \
 })
 #else
-#define vmull_high_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmull_high_lane_u16(__p0_563, __p1_563, __p2_563) __extension__ ({ \
+  uint16x8_t __s0_563 = __p0_563; \
+  uint16x4_t __s1_563 = __p1_563; \
+  uint16x8_t __rev0_563;  __rev0_563 = __builtin_shufflevector(__s0_563, __s0_563, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev1_563;  __rev1_563 = __builtin_shufflevector(__s1_563, __s1_563, 3, 2, 1, 0); \
+  uint32x4_t __ret_563; \
+  __ret_563 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_563), __noswap_splat_lane_u16(__rev1_563, __p2_563)); \
+  __ret_563 = __builtin_shufflevector(__ret_563, __ret_563, 3, 2, 1, 0); \
+  __ret_563; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vmull_high_lane_s32(__p0_564, __p1_564, __p2_564) __extension__ ({ \
+  int32x4_t __s0_564 = __p0_564; \
+  int32x2_t __s1_564 = __p1_564; \
+  int64x2_t __ret_564; \
+  __ret_564 = vmull_s32(vget_high_s32(__s0_564), splat_lane_s32(__s1_564, __p2_564)); \
+  __ret_564; \
 })
 #else
-#define vmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmull_high_lane_s32(__p0_565, __p1_565, __p2_565) __extension__ ({ \
+  int32x4_t __s0_565 = __p0_565; \
+  int32x2_t __s1_565 = __p1_565; \
+  int32x4_t __rev0_565;  __rev0_565 = __builtin_shufflevector(__s0_565, __s0_565, 3, 2, 1, 0); \
+  int32x2_t __rev1_565;  __rev1_565 = __builtin_shufflevector(__s1_565, __s1_565, 1, 0); \
+  int64x2_t __ret_565; \
+  __ret_565 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_565), __noswap_splat_lane_s32(__rev1_565, __p2_565)); \
+  __ret_565 = __builtin_shufflevector(__ret_565, __ret_565, 1, 0); \
+  __ret_565; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmull_high_lane_s16(__p0_566, __p1_566, __p2_566) __extension__ ({ \
+  int16x8_t __s0_566 = __p0_566; \
+  int16x4_t __s1_566 = __p1_566; \
+  int32x4_t __ret_566; \
+  __ret_566 = vmull_s16(vget_high_s16(__s0_566), splat_lane_s16(__s1_566, __p2_566)); \
+  __ret_566; \
 })
 #else
-#define vmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmull_high_lane_s16(__p0_567, __p1_567, __p2_567) __extension__ ({ \
+  int16x8_t __s0_567 = __p0_567; \
+  int16x4_t __s1_567 = __p1_567; \
+  int16x8_t __rev0_567;  __rev0_567 = __builtin_shufflevector(__s0_567, __s0_567, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev1_567;  __rev1_567 = __builtin_shufflevector(__s1_567, __s1_567, 3, 2, 1, 0); \
+  int32x4_t __ret_567; \
+  __ret_567 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_567), __noswap_splat_lane_s16(__rev1_567, __p2_567)); \
+  __ret_567 = __builtin_shufflevector(__ret_567, __ret_567, 3, 2, 1, 0); \
+  __ret_567; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = vmull_u32(vget_high_u32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vmull_high_laneq_u32(__p0_568, __p1_568, __p2_568) __extension__ ({ \
+  uint32x4_t __s0_568 = __p0_568; \
+  uint32x4_t __s1_568 = __p1_568; \
+  uint64x2_t __ret_568; \
+  __ret_568 = vmull_u32(vget_high_u32(__s0_568), splat_laneq_u32(__s1_568, __p2_568)); \
+  __ret_568; \
 })
 #else
-#define vmull_high_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmull_high_laneq_u32(__p0_569, __p1_569, __p2_569) __extension__ ({ \
+  uint32x4_t __s0_569 = __p0_569; \
+  uint32x4_t __s1_569 = __p1_569; \
+  uint32x4_t __rev0_569;  __rev0_569 = __builtin_shufflevector(__s0_569, __s0_569, 3, 2, 1, 0); \
+  uint32x4_t __rev1_569;  __rev1_569 = __builtin_shufflevector(__s1_569, __s1_569, 3, 2, 1, 0); \
+  uint64x2_t __ret_569; \
+  __ret_569 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_569), __noswap_splat_laneq_u32(__rev1_569, __p2_569)); \
+  __ret_569 = __builtin_shufflevector(__ret_569, __ret_569, 1, 0); \
+  __ret_569; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = vmull_u16(vget_high_u16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmull_high_laneq_u16(__p0_570, __p1_570, __p2_570) __extension__ ({ \
+  uint16x8_t __s0_570 = __p0_570; \
+  uint16x8_t __s1_570 = __p1_570; \
+  uint32x4_t __ret_570; \
+  __ret_570 = vmull_u16(vget_high_u16(__s0_570), splat_laneq_u16(__s1_570, __p2_570)); \
+  __ret_570; \
 })
 #else
-#define vmull_high_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmull_high_laneq_u16(__p0_571, __p1_571, __p2_571) __extension__ ({ \
+  uint16x8_t __s0_571 = __p0_571; \
+  uint16x8_t __s1_571 = __p1_571; \
+  uint16x8_t __rev0_571;  __rev0_571 = __builtin_shufflevector(__s0_571, __s0_571, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_571;  __rev1_571 = __builtin_shufflevector(__s1_571, __s1_571, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_571; \
+  __ret_571 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_571), __noswap_splat_laneq_u16(__rev1_571, __p2_571)); \
+  __ret_571 = __builtin_shufflevector(__ret_571, __ret_571, 3, 2, 1, 0); \
+  __ret_571; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vmull_high_laneq_s32(__p0_572, __p1_572, __p2_572) __extension__ ({ \
+  int32x4_t __s0_572 = __p0_572; \
+  int32x4_t __s1_572 = __p1_572; \
+  int64x2_t __ret_572; \
+  __ret_572 = vmull_s32(vget_high_s32(__s0_572), splat_laneq_s32(__s1_572, __p2_572)); \
+  __ret_572; \
 })
 #else
-#define vmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmull_high_laneq_s32(__p0_573, __p1_573, __p2_573) __extension__ ({ \
+  int32x4_t __s0_573 = __p0_573; \
+  int32x4_t __s1_573 = __p1_573; \
+  int32x4_t __rev0_573;  __rev0_573 = __builtin_shufflevector(__s0_573, __s0_573, 3, 2, 1, 0); \
+  int32x4_t __rev1_573;  __rev1_573 = __builtin_shufflevector(__s1_573, __s1_573, 3, 2, 1, 0); \
+  int64x2_t __ret_573; \
+  __ret_573 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_573), __noswap_splat_laneq_s32(__rev1_573, __p2_573)); \
+  __ret_573 = __builtin_shufflevector(__ret_573, __ret_573, 1, 0); \
+  __ret_573; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmull_high_laneq_s16(__p0_574, __p1_574, __p2_574) __extension__ ({ \
+  int16x8_t __s0_574 = __p0_574; \
+  int16x8_t __s1_574 = __p1_574; \
+  int32x4_t __ret_574; \
+  __ret_574 = vmull_s16(vget_high_s16(__s0_574), splat_laneq_s16(__s1_574, __p2_574)); \
+  __ret_574; \
 })
 #else
-#define vmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmull_high_laneq_s16(__p0_575, __p1_575, __p2_575) __extension__ ({ \
+  int16x8_t __s0_575 = __p0_575; \
+  int16x8_t __s1_575 = __p1_575; \
+  int16x8_t __rev0_575;  __rev0_575 = __builtin_shufflevector(__s0_575, __s0_575, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_575;  __rev1_575 = __builtin_shufflevector(__s1_575, __s1_575, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_575; \
+  __ret_575 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_575), __noswap_splat_laneq_s16(__rev1_575, __p2_575)); \
+  __ret_575 = __builtin_shufflevector(__ret_575, __ret_575, 3, 2, 1, 0); \
+  __ret_575; \
 })
 #endif
 
@@ -50903,86 +54703,86 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = vmull_u32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vmull_laneq_u32(__p0_576, __p1_576, __p2_576) __extension__ ({ \
+  uint32x2_t __s0_576 = __p0_576; \
+  uint32x4_t __s1_576 = __p1_576; \
+  uint64x2_t __ret_576; \
+  __ret_576 = vmull_u32(__s0_576, splat_laneq_u32(__s1_576, __p2_576)); \
+  __ret_576; \
 })
 #else
-#define vmull_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __noswap_vmull_u32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmull_laneq_u32(__p0_577, __p1_577, __p2_577) __extension__ ({ \
+  uint32x2_t __s0_577 = __p0_577; \
+  uint32x4_t __s1_577 = __p1_577; \
+  uint32x2_t __rev0_577;  __rev0_577 = __builtin_shufflevector(__s0_577, __s0_577, 1, 0); \
+  uint32x4_t __rev1_577;  __rev1_577 = __builtin_shufflevector(__s1_577, __s1_577, 3, 2, 1, 0); \
+  uint64x2_t __ret_577; \
+  __ret_577 = __noswap_vmull_u32(__rev0_577, __noswap_splat_laneq_u32(__rev1_577, __p2_577)); \
+  __ret_577 = __builtin_shufflevector(__ret_577, __ret_577, 1, 0); \
+  __ret_577; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = vmull_u16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmull_laneq_u16(__p0_578, __p1_578, __p2_578) __extension__ ({ \
+  uint16x4_t __s0_578 = __p0_578; \
+  uint16x8_t __s1_578 = __p1_578; \
+  uint32x4_t __ret_578; \
+  __ret_578 = vmull_u16(__s0_578, splat_laneq_u16(__s1_578, __p2_578)); \
+  __ret_578; \
 })
 #else
-#define vmull_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __noswap_vmull_u16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmull_laneq_u16(__p0_579, __p1_579, __p2_579) __extension__ ({ \
+  uint16x4_t __s0_579 = __p0_579; \
+  uint16x8_t __s1_579 = __p1_579; \
+  uint16x4_t __rev0_579;  __rev0_579 = __builtin_shufflevector(__s0_579, __s0_579, 3, 2, 1, 0); \
+  uint16x8_t __rev1_579;  __rev1_579 = __builtin_shufflevector(__s1_579, __s1_579, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_579; \
+  __ret_579 = __noswap_vmull_u16(__rev0_579, __noswap_splat_laneq_u16(__rev1_579, __p2_579)); \
+  __ret_579 = __builtin_shufflevector(__ret_579, __ret_579, 3, 2, 1, 0); \
+  __ret_579; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vmull_laneq_s32(__p0_580, __p1_580, __p2_580) __extension__ ({ \
+  int32x2_t __s0_580 = __p0_580; \
+  int32x4_t __s1_580 = __p1_580; \
+  int64x2_t __ret_580; \
+  __ret_580 = vmull_s32(__s0_580, splat_laneq_s32(__s1_580, __p2_580)); \
+  __ret_580; \
 })
 #else
-#define vmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmull_laneq_s32(__p0_581, __p1_581, __p2_581) __extension__ ({ \
+  int32x2_t __s0_581 = __p0_581; \
+  int32x4_t __s1_581 = __p1_581; \
+  int32x2_t __rev0_581;  __rev0_581 = __builtin_shufflevector(__s0_581, __s0_581, 1, 0); \
+  int32x4_t __rev1_581;  __rev1_581 = __builtin_shufflevector(__s1_581, __s1_581, 3, 2, 1, 0); \
+  int64x2_t __ret_581; \
+  __ret_581 = __noswap_vmull_s32(__rev0_581, __noswap_splat_laneq_s32(__rev1_581, __p2_581)); \
+  __ret_581 = __builtin_shufflevector(__ret_581, __ret_581, 1, 0); \
+  __ret_581; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmull_laneq_s16(__p0_582, __p1_582, __p2_582) __extension__ ({ \
+  int16x4_t __s0_582 = __p0_582; \
+  int16x8_t __s1_582 = __p1_582; \
+  int32x4_t __ret_582; \
+  __ret_582 = vmull_s16(__s0_582, splat_laneq_s16(__s1_582, __p2_582)); \
+  __ret_582; \
 })
 #else
-#define vmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmull_laneq_s16(__p0_583, __p1_583, __p2_583) __extension__ ({ \
+  int16x4_t __s0_583 = __p0_583; \
+  int16x8_t __s1_583 = __p1_583; \
+  int16x4_t __rev0_583;  __rev0_583 = __builtin_shufflevector(__s0_583, __s0_583, 3, 2, 1, 0); \
+  int16x8_t __rev1_583;  __rev1_583 = __builtin_shufflevector(__s1_583, __s1_583, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_583; \
+  __ret_583 = __noswap_vmull_s16(__rev0_583, __noswap_splat_laneq_s16(__rev1_583, __p2_583)); \
+  __ret_583 = __builtin_shufflevector(__ret_583, __ret_583, 3, 2, 1, 0); \
+  __ret_583; \
 })
 #endif
 
@@ -51067,192 +54867,192 @@
   __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
   return __ret;
 }
-#define vmulxd_lane_f64(__p0_145, __p1_145, __p2_145) __extension__ ({ \
-  float64_t __s0_145 = __p0_145; \
-  float64x1_t __s1_145 = __p1_145; \
-  float64_t __ret_145; \
-  __ret_145 = vmulxd_f64(__s0_145, vget_lane_f64(__s1_145, __p2_145)); \
-  __ret_145; \
+#define vmulxd_lane_f64(__p0_584, __p1_584, __p2_584) __extension__ ({ \
+  float64_t __s0_584 = __p0_584; \
+  float64x1_t __s1_584 = __p1_584; \
+  float64_t __ret_584; \
+  __ret_584 = vmulxd_f64(__s0_584, vget_lane_f64(__s1_584, __p2_584)); \
+  __ret_584; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vmulxs_lane_f32(__p0_146, __p1_146, __p2_146) __extension__ ({ \
-  float32_t __s0_146 = __p0_146; \
-  float32x2_t __s1_146 = __p1_146; \
-  float32_t __ret_146; \
-  __ret_146 = vmulxs_f32(__s0_146, vget_lane_f32(__s1_146, __p2_146)); \
-  __ret_146; \
+#define vmulxs_lane_f32(__p0_585, __p1_585, __p2_585) __extension__ ({ \
+  float32_t __s0_585 = __p0_585; \
+  float32x2_t __s1_585 = __p1_585; \
+  float32_t __ret_585; \
+  __ret_585 = vmulxs_f32(__s0_585, vget_lane_f32(__s1_585, __p2_585)); \
+  __ret_585; \
 })
 #else
-#define vmulxs_lane_f32(__p0_147, __p1_147, __p2_147) __extension__ ({ \
-  float32_t __s0_147 = __p0_147; \
-  float32x2_t __s1_147 = __p1_147; \
-  float32x2_t __rev1_147;  __rev1_147 = __builtin_shufflevector(__s1_147, __s1_147, 1, 0); \
-  float32_t __ret_147; \
-  __ret_147 = vmulxs_f32(__s0_147, __noswap_vget_lane_f32(__rev1_147, __p2_147)); \
-  __ret_147; \
+#define vmulxs_lane_f32(__p0_586, __p1_586, __p2_586) __extension__ ({ \
+  float32_t __s0_586 = __p0_586; \
+  float32x2_t __s1_586 = __p1_586; \
+  float32x2_t __rev1_586;  __rev1_586 = __builtin_shufflevector(__s1_586, __s1_586, 1, 0); \
+  float32_t __ret_586; \
+  __ret_586 = vmulxs_f32(__s0_586, __noswap_vget_lane_f32(__rev1_586, __p2_586)); \
+  __ret_586; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = vmulxq_f64(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vmulxq_lane_f64(__p0_587, __p1_587, __p2_587) __extension__ ({ \
+  float64x2_t __s0_587 = __p0_587; \
+  float64x1_t __s1_587 = __p1_587; \
+  float64x2_t __ret_587; \
+  __ret_587 = vmulxq_f64(__s0_587, splatq_lane_f64(__s1_587, __p2_587)); \
+  __ret_587; \
 })
 #else
-#define vmulxq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = __noswap_vmulxq_f64(__rev0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmulxq_lane_f64(__p0_588, __p1_588, __p2_588) __extension__ ({ \
+  float64x2_t __s0_588 = __p0_588; \
+  float64x1_t __s1_588 = __p1_588; \
+  float64x2_t __rev0_588;  __rev0_588 = __builtin_shufflevector(__s0_588, __s0_588, 1, 0); \
+  float64x2_t __ret_588; \
+  __ret_588 = __noswap_vmulxq_f64(__rev0_588, __noswap_splatq_lane_f64(__s1_588, __p2_588)); \
+  __ret_588 = __builtin_shufflevector(__ret_588, __ret_588, 1, 0); \
+  __ret_588; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = vmulxq_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmulxq_lane_f32(__p0_589, __p1_589, __p2_589) __extension__ ({ \
+  float32x4_t __s0_589 = __p0_589; \
+  float32x2_t __s1_589 = __p1_589; \
+  float32x4_t __ret_589; \
+  __ret_589 = vmulxq_f32(__s0_589, splatq_lane_f32(__s1_589, __p2_589)); \
+  __ret_589; \
 })
 #else
-#define vmulxq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __noswap_vmulxq_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmulxq_lane_f32(__p0_590, __p1_590, __p2_590) __extension__ ({ \
+  float32x4_t __s0_590 = __p0_590; \
+  float32x2_t __s1_590 = __p1_590; \
+  float32x4_t __rev0_590;  __rev0_590 = __builtin_shufflevector(__s0_590, __s0_590, 3, 2, 1, 0); \
+  float32x2_t __rev1_590;  __rev1_590 = __builtin_shufflevector(__s1_590, __s1_590, 1, 0); \
+  float32x4_t __ret_590; \
+  __ret_590 = __noswap_vmulxq_f32(__rev0_590, __noswap_splatq_lane_f32(__rev1_590, __p2_590)); \
+  __ret_590 = __builtin_shufflevector(__ret_590, __ret_590, 3, 2, 1, 0); \
+  __ret_590; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulx_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = vmulx_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vmulx_lane_f32(__p0_591, __p1_591, __p2_591) __extension__ ({ \
+  float32x2_t __s0_591 = __p0_591; \
+  float32x2_t __s1_591 = __p1_591; \
+  float32x2_t __ret_591; \
+  __ret_591 = vmulx_f32(__s0_591, splat_lane_f32(__s1_591, __p2_591)); \
+  __ret_591; \
 })
 #else
-#define vmulx_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __noswap_vmulx_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmulx_lane_f32(__p0_592, __p1_592, __p2_592) __extension__ ({ \
+  float32x2_t __s0_592 = __p0_592; \
+  float32x2_t __s1_592 = __p1_592; \
+  float32x2_t __rev0_592;  __rev0_592 = __builtin_shufflevector(__s0_592, __s0_592, 1, 0); \
+  float32x2_t __rev1_592;  __rev1_592 = __builtin_shufflevector(__s1_592, __s1_592, 1, 0); \
+  float32x2_t __ret_592; \
+  __ret_592 = __noswap_vmulx_f32(__rev0_592, __noswap_splat_lane_f32(__rev1_592, __p2_592)); \
+  __ret_592 = __builtin_shufflevector(__ret_592, __ret_592, 1, 0); \
+  __ret_592; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulxd_laneq_f64(__p0_148, __p1_148, __p2_148) __extension__ ({ \
-  float64_t __s0_148 = __p0_148; \
-  float64x2_t __s1_148 = __p1_148; \
-  float64_t __ret_148; \
-  __ret_148 = vmulxd_f64(__s0_148, vgetq_lane_f64(__s1_148, __p2_148)); \
-  __ret_148; \
+#define vmulxd_laneq_f64(__p0_593, __p1_593, __p2_593) __extension__ ({ \
+  float64_t __s0_593 = __p0_593; \
+  float64x2_t __s1_593 = __p1_593; \
+  float64_t __ret_593; \
+  __ret_593 = vmulxd_f64(__s0_593, vgetq_lane_f64(__s1_593, __p2_593)); \
+  __ret_593; \
 })
 #else
-#define vmulxd_laneq_f64(__p0_149, __p1_149, __p2_149) __extension__ ({ \
-  float64_t __s0_149 = __p0_149; \
-  float64x2_t __s1_149 = __p1_149; \
-  float64x2_t __rev1_149;  __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 1, 0); \
-  float64_t __ret_149; \
-  __ret_149 = vmulxd_f64(__s0_149, __noswap_vgetq_lane_f64(__rev1_149, __p2_149)); \
-  __ret_149; \
+#define vmulxd_laneq_f64(__p0_594, __p1_594, __p2_594) __extension__ ({ \
+  float64_t __s0_594 = __p0_594; \
+  float64x2_t __s1_594 = __p1_594; \
+  float64x2_t __rev1_594;  __rev1_594 = __builtin_shufflevector(__s1_594, __s1_594, 1, 0); \
+  float64_t __ret_594; \
+  __ret_594 = vmulxd_f64(__s0_594, __noswap_vgetq_lane_f64(__rev1_594, __p2_594)); \
+  __ret_594; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulxs_laneq_f32(__p0_150, __p1_150, __p2_150) __extension__ ({ \
-  float32_t __s0_150 = __p0_150; \
-  float32x4_t __s1_150 = __p1_150; \
-  float32_t __ret_150; \
-  __ret_150 = vmulxs_f32(__s0_150, vgetq_lane_f32(__s1_150, __p2_150)); \
-  __ret_150; \
+#define vmulxs_laneq_f32(__p0_595, __p1_595, __p2_595) __extension__ ({ \
+  float32_t __s0_595 = __p0_595; \
+  float32x4_t __s1_595 = __p1_595; \
+  float32_t __ret_595; \
+  __ret_595 = vmulxs_f32(__s0_595, vgetq_lane_f32(__s1_595, __p2_595)); \
+  __ret_595; \
 })
 #else
-#define vmulxs_laneq_f32(__p0_151, __p1_151, __p2_151) __extension__ ({ \
-  float32_t __s0_151 = __p0_151; \
-  float32x4_t __s1_151 = __p1_151; \
-  float32x4_t __rev1_151;  __rev1_151 = __builtin_shufflevector(__s1_151, __s1_151, 3, 2, 1, 0); \
-  float32_t __ret_151; \
-  __ret_151 = vmulxs_f32(__s0_151, __noswap_vgetq_lane_f32(__rev1_151, __p2_151)); \
-  __ret_151; \
+#define vmulxs_laneq_f32(__p0_596, __p1_596, __p2_596) __extension__ ({ \
+  float32_t __s0_596 = __p0_596; \
+  float32x4_t __s1_596 = __p1_596; \
+  float32x4_t __rev1_596;  __rev1_596 = __builtin_shufflevector(__s1_596, __s1_596, 3, 2, 1, 0); \
+  float32_t __ret_596; \
+  __ret_596 = vmulxs_f32(__s0_596, __noswap_vgetq_lane_f32(__rev1_596, __p2_596)); \
+  __ret_596; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = vmulxq_f64(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vmulxq_laneq_f64(__p0_597, __p1_597, __p2_597) __extension__ ({ \
+  float64x2_t __s0_597 = __p0_597; \
+  float64x2_t __s1_597 = __p1_597; \
+  float64x2_t __ret_597; \
+  __ret_597 = vmulxq_f64(__s0_597, splatq_laneq_f64(__s1_597, __p2_597)); \
+  __ret_597; \
 })
 #else
-#define vmulxq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = __noswap_vmulxq_f64(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmulxq_laneq_f64(__p0_598, __p1_598, __p2_598) __extension__ ({ \
+  float64x2_t __s0_598 = __p0_598; \
+  float64x2_t __s1_598 = __p1_598; \
+  float64x2_t __rev0_598;  __rev0_598 = __builtin_shufflevector(__s0_598, __s0_598, 1, 0); \
+  float64x2_t __rev1_598;  __rev1_598 = __builtin_shufflevector(__s1_598, __s1_598, 1, 0); \
+  float64x2_t __ret_598; \
+  __ret_598 = __noswap_vmulxq_f64(__rev0_598, __noswap_splatq_laneq_f64(__rev1_598, __p2_598)); \
+  __ret_598 = __builtin_shufflevector(__ret_598, __ret_598, 1, 0); \
+  __ret_598; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = vmulxq_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmulxq_laneq_f32(__p0_599, __p1_599, __p2_599) __extension__ ({ \
+  float32x4_t __s0_599 = __p0_599; \
+  float32x4_t __s1_599 = __p1_599; \
+  float32x4_t __ret_599; \
+  __ret_599 = vmulxq_f32(__s0_599, splatq_laneq_f32(__s1_599, __p2_599)); \
+  __ret_599; \
 })
 #else
-#define vmulxq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __noswap_vmulxq_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmulxq_laneq_f32(__p0_600, __p1_600, __p2_600) __extension__ ({ \
+  float32x4_t __s0_600 = __p0_600; \
+  float32x4_t __s1_600 = __p1_600; \
+  float32x4_t __rev0_600;  __rev0_600 = __builtin_shufflevector(__s0_600, __s0_600, 3, 2, 1, 0); \
+  float32x4_t __rev1_600;  __rev1_600 = __builtin_shufflevector(__s1_600, __s1_600, 3, 2, 1, 0); \
+  float32x4_t __ret_600; \
+  __ret_600 = __noswap_vmulxq_f32(__rev0_600, __noswap_splatq_laneq_f32(__rev1_600, __p2_600)); \
+  __ret_600 = __builtin_shufflevector(__ret_600, __ret_600, 3, 2, 1, 0); \
+  __ret_600; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = vmulx_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vmulx_laneq_f32(__p0_601, __p1_601, __p2_601) __extension__ ({ \
+  float32x2_t __s0_601 = __p0_601; \
+  float32x4_t __s1_601 = __p1_601; \
+  float32x2_t __ret_601; \
+  __ret_601 = vmulx_f32(__s0_601, splat_laneq_f32(__s1_601, __p2_601)); \
+  __ret_601; \
 })
 #else
-#define vmulx_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __noswap_vmulx_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmulx_laneq_f32(__p0_602, __p1_602, __p2_602) __extension__ ({ \
+  float32x2_t __s0_602 = __p0_602; \
+  float32x4_t __s1_602 = __p1_602; \
+  float32x2_t __rev0_602;  __rev0_602 = __builtin_shufflevector(__s0_602, __s0_602, 1, 0); \
+  float32x4_t __rev1_602;  __rev1_602 = __builtin_shufflevector(__s1_602, __s1_602, 3, 2, 1, 0); \
+  float32x2_t __ret_602; \
+  __ret_602 = __noswap_vmulx_f32(__rev0_602, __noswap_splat_laneq_f32(__rev1_602, __p2_602)); \
+  __ret_602 = __builtin_shufflevector(__ret_602, __ret_602, 1, 0); \
+  __ret_602; \
 })
 #endif
 
@@ -52155,98 +55955,98 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlal_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vqdmlal_high_lane_s32(__p0_603, __p1_603, __p2_603, __p3_603) __extension__ ({ \
+  int64x2_t __s0_603 = __p0_603; \
+  int32x4_t __s1_603 = __p1_603; \
+  int32x2_t __s2_603 = __p2_603; \
+  int64x2_t __ret_603; \
+  __ret_603 = vqdmlal_s32(__s0_603, vget_high_s32(__s1_603), splat_lane_s32(__s2_603, __p3_603)); \
+  __ret_603; \
 })
 #else
-#define vqdmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmlal_high_lane_s32(__p0_604, __p1_604, __p2_604, __p3_604) __extension__ ({ \
+  int64x2_t __s0_604 = __p0_604; \
+  int32x4_t __s1_604 = __p1_604; \
+  int32x2_t __s2_604 = __p2_604; \
+  int64x2_t __rev0_604;  __rev0_604 = __builtin_shufflevector(__s0_604, __s0_604, 1, 0); \
+  int32x4_t __rev1_604;  __rev1_604 = __builtin_shufflevector(__s1_604, __s1_604, 3, 2, 1, 0); \
+  int32x2_t __rev2_604;  __rev2_604 = __builtin_shufflevector(__s2_604, __s2_604, 1, 0); \
+  int64x2_t __ret_604; \
+  __ret_604 = __noswap_vqdmlal_s32(__rev0_604, __noswap_vget_high_s32(__rev1_604), __noswap_splat_lane_s32(__rev2_604, __p3_604)); \
+  __ret_604 = __builtin_shufflevector(__ret_604, __ret_604, 1, 0); \
+  __ret_604; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlal_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vqdmlal_high_lane_s16(__p0_605, __p1_605, __p2_605, __p3_605) __extension__ ({ \
+  int32x4_t __s0_605 = __p0_605; \
+  int16x8_t __s1_605 = __p1_605; \
+  int16x4_t __s2_605 = __p2_605; \
+  int32x4_t __ret_605; \
+  __ret_605 = vqdmlal_s16(__s0_605, vget_high_s16(__s1_605), splat_lane_s16(__s2_605, __p3_605)); \
+  __ret_605; \
 })
 #else
-#define vqdmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmlal_high_lane_s16(__p0_606, __p1_606, __p2_606, __p3_606) __extension__ ({ \
+  int32x4_t __s0_606 = __p0_606; \
+  int16x8_t __s1_606 = __p1_606; \
+  int16x4_t __s2_606 = __p2_606; \
+  int32x4_t __rev0_606;  __rev0_606 = __builtin_shufflevector(__s0_606, __s0_606, 3, 2, 1, 0); \
+  int16x8_t __rev1_606;  __rev1_606 = __builtin_shufflevector(__s1_606, __s1_606, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_606;  __rev2_606 = __builtin_shufflevector(__s2_606, __s2_606, 3, 2, 1, 0); \
+  int32x4_t __ret_606; \
+  __ret_606 = __noswap_vqdmlal_s16(__rev0_606, __noswap_vget_high_s16(__rev1_606), __noswap_splat_lane_s16(__rev2_606, __p3_606)); \
+  __ret_606 = __builtin_shufflevector(__ret_606, __ret_606, 3, 2, 1, 0); \
+  __ret_606; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlal_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vqdmlal_high_laneq_s32(__p0_607, __p1_607, __p2_607, __p3_607) __extension__ ({ \
+  int64x2_t __s0_607 = __p0_607; \
+  int32x4_t __s1_607 = __p1_607; \
+  int32x4_t __s2_607 = __p2_607; \
+  int64x2_t __ret_607; \
+  __ret_607 = vqdmlal_s32(__s0_607, vget_high_s32(__s1_607), splat_laneq_s32(__s2_607, __p3_607)); \
+  __ret_607; \
 })
 #else
-#define vqdmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmlal_high_laneq_s32(__p0_608, __p1_608, __p2_608, __p3_608) __extension__ ({ \
+  int64x2_t __s0_608 = __p0_608; \
+  int32x4_t __s1_608 = __p1_608; \
+  int32x4_t __s2_608 = __p2_608; \
+  int64x2_t __rev0_608;  __rev0_608 = __builtin_shufflevector(__s0_608, __s0_608, 1, 0); \
+  int32x4_t __rev1_608;  __rev1_608 = __builtin_shufflevector(__s1_608, __s1_608, 3, 2, 1, 0); \
+  int32x4_t __rev2_608;  __rev2_608 = __builtin_shufflevector(__s2_608, __s2_608, 3, 2, 1, 0); \
+  int64x2_t __ret_608; \
+  __ret_608 = __noswap_vqdmlal_s32(__rev0_608, __noswap_vget_high_s32(__rev1_608), __noswap_splat_laneq_s32(__rev2_608, __p3_608)); \
+  __ret_608 = __builtin_shufflevector(__ret_608, __ret_608, 1, 0); \
+  __ret_608; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlal_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vqdmlal_high_laneq_s16(__p0_609, __p1_609, __p2_609, __p3_609) __extension__ ({ \
+  int32x4_t __s0_609 = __p0_609; \
+  int16x8_t __s1_609 = __p1_609; \
+  int16x8_t __s2_609 = __p2_609; \
+  int32x4_t __ret_609; \
+  __ret_609 = vqdmlal_s16(__s0_609, vget_high_s16(__s1_609), splat_laneq_s16(__s2_609, __p3_609)); \
+  __ret_609; \
 })
 #else
-#define vqdmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmlal_high_laneq_s16(__p0_610, __p1_610, __p2_610, __p3_610) __extension__ ({ \
+  int32x4_t __s0_610 = __p0_610; \
+  int16x8_t __s1_610 = __p1_610; \
+  int16x8_t __s2_610 = __p2_610; \
+  int32x4_t __rev0_610;  __rev0_610 = __builtin_shufflevector(__s0_610, __s0_610, 3, 2, 1, 0); \
+  int16x8_t __rev1_610;  __rev1_610 = __builtin_shufflevector(__s1_610, __s1_610, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_610;  __rev2_610 = __builtin_shufflevector(__s2_610, __s2_610, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_610; \
+  __ret_610 = __noswap_vqdmlal_s16(__rev0_610, __noswap_vget_high_s16(__rev1_610), __noswap_splat_laneq_s16(__rev2_610, __p3_610)); \
+  __ret_610 = __builtin_shufflevector(__ret_610, __ret_610, 3, 2, 1, 0); \
+  __ret_610; \
 })
 #endif
 
@@ -52369,50 +56169,50 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlal_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vqdmlal_laneq_s32(__p0_611, __p1_611, __p2_611, __p3_611) __extension__ ({ \
+  int64x2_t __s0_611 = __p0_611; \
+  int32x2_t __s1_611 = __p1_611; \
+  int32x4_t __s2_611 = __p2_611; \
+  int64x2_t __ret_611; \
+  __ret_611 = vqdmlal_s32(__s0_611, __s1_611, splat_laneq_s32(__s2_611, __p3_611)); \
+  __ret_611; \
 })
 #else
-#define vqdmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlal_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmlal_laneq_s32(__p0_612, __p1_612, __p2_612, __p3_612) __extension__ ({ \
+  int64x2_t __s0_612 = __p0_612; \
+  int32x2_t __s1_612 = __p1_612; \
+  int32x4_t __s2_612 = __p2_612; \
+  int64x2_t __rev0_612;  __rev0_612 = __builtin_shufflevector(__s0_612, __s0_612, 1, 0); \
+  int32x2_t __rev1_612;  __rev1_612 = __builtin_shufflevector(__s1_612, __s1_612, 1, 0); \
+  int32x4_t __rev2_612;  __rev2_612 = __builtin_shufflevector(__s2_612, __s2_612, 3, 2, 1, 0); \
+  int64x2_t __ret_612; \
+  __ret_612 = __noswap_vqdmlal_s32(__rev0_612, __rev1_612, __noswap_splat_laneq_s32(__rev2_612, __p3_612)); \
+  __ret_612 = __builtin_shufflevector(__ret_612, __ret_612, 1, 0); \
+  __ret_612; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlal_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vqdmlal_laneq_s16(__p0_613, __p1_613, __p2_613, __p3_613) __extension__ ({ \
+  int32x4_t __s0_613 = __p0_613; \
+  int16x4_t __s1_613 = __p1_613; \
+  int16x8_t __s2_613 = __p2_613; \
+  int32x4_t __ret_613; \
+  __ret_613 = vqdmlal_s16(__s0_613, __s1_613, splat_laneq_s16(__s2_613, __p3_613)); \
+  __ret_613; \
 })
 #else
-#define vqdmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlal_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmlal_laneq_s16(__p0_614, __p1_614, __p2_614, __p3_614) __extension__ ({ \
+  int32x4_t __s0_614 = __p0_614; \
+  int16x4_t __s1_614 = __p1_614; \
+  int16x8_t __s2_614 = __p2_614; \
+  int32x4_t __rev0_614;  __rev0_614 = __builtin_shufflevector(__s0_614, __s0_614, 3, 2, 1, 0); \
+  int16x4_t __rev1_614;  __rev1_614 = __builtin_shufflevector(__s1_614, __s1_614, 3, 2, 1, 0); \
+  int16x8_t __rev2_614;  __rev2_614 = __builtin_shufflevector(__s2_614, __s2_614, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_614; \
+  __ret_614 = __noswap_vqdmlal_s16(__rev0_614, __rev1_614, __noswap_splat_laneq_s16(__rev2_614, __p3_614)); \
+  __ret_614 = __builtin_shufflevector(__ret_614, __ret_614, 3, 2, 1, 0); \
+  __ret_614; \
 })
 #endif
 
@@ -52463,98 +56263,98 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlsl_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vqdmlsl_high_lane_s32(__p0_615, __p1_615, __p2_615, __p3_615) __extension__ ({ \
+  int64x2_t __s0_615 = __p0_615; \
+  int32x4_t __s1_615 = __p1_615; \
+  int32x2_t __s2_615 = __p2_615; \
+  int64x2_t __ret_615; \
+  __ret_615 = vqdmlsl_s32(__s0_615, vget_high_s32(__s1_615), splat_lane_s32(__s2_615, __p3_615)); \
+  __ret_615; \
 })
 #else
-#define vqdmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmlsl_high_lane_s32(__p0_616, __p1_616, __p2_616, __p3_616) __extension__ ({ \
+  int64x2_t __s0_616 = __p0_616; \
+  int32x4_t __s1_616 = __p1_616; \
+  int32x2_t __s2_616 = __p2_616; \
+  int64x2_t __rev0_616;  __rev0_616 = __builtin_shufflevector(__s0_616, __s0_616, 1, 0); \
+  int32x4_t __rev1_616;  __rev1_616 = __builtin_shufflevector(__s1_616, __s1_616, 3, 2, 1, 0); \
+  int32x2_t __rev2_616;  __rev2_616 = __builtin_shufflevector(__s2_616, __s2_616, 1, 0); \
+  int64x2_t __ret_616; \
+  __ret_616 = __noswap_vqdmlsl_s32(__rev0_616, __noswap_vget_high_s32(__rev1_616), __noswap_splat_lane_s32(__rev2_616, __p3_616)); \
+  __ret_616 = __builtin_shufflevector(__ret_616, __ret_616, 1, 0); \
+  __ret_616; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlsl_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vqdmlsl_high_lane_s16(__p0_617, __p1_617, __p2_617, __p3_617) __extension__ ({ \
+  int32x4_t __s0_617 = __p0_617; \
+  int16x8_t __s1_617 = __p1_617; \
+  int16x4_t __s2_617 = __p2_617; \
+  int32x4_t __ret_617; \
+  __ret_617 = vqdmlsl_s16(__s0_617, vget_high_s16(__s1_617), splat_lane_s16(__s2_617, __p3_617)); \
+  __ret_617; \
 })
 #else
-#define vqdmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmlsl_high_lane_s16(__p0_618, __p1_618, __p2_618, __p3_618) __extension__ ({ \
+  int32x4_t __s0_618 = __p0_618; \
+  int16x8_t __s1_618 = __p1_618; \
+  int16x4_t __s2_618 = __p2_618; \
+  int32x4_t __rev0_618;  __rev0_618 = __builtin_shufflevector(__s0_618, __s0_618, 3, 2, 1, 0); \
+  int16x8_t __rev1_618;  __rev1_618 = __builtin_shufflevector(__s1_618, __s1_618, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_618;  __rev2_618 = __builtin_shufflevector(__s2_618, __s2_618, 3, 2, 1, 0); \
+  int32x4_t __ret_618; \
+  __ret_618 = __noswap_vqdmlsl_s16(__rev0_618, __noswap_vget_high_s16(__rev1_618), __noswap_splat_lane_s16(__rev2_618, __p3_618)); \
+  __ret_618 = __builtin_shufflevector(__ret_618, __ret_618, 3, 2, 1, 0); \
+  __ret_618; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlsl_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vqdmlsl_high_laneq_s32(__p0_619, __p1_619, __p2_619, __p3_619) __extension__ ({ \
+  int64x2_t __s0_619 = __p0_619; \
+  int32x4_t __s1_619 = __p1_619; \
+  int32x4_t __s2_619 = __p2_619; \
+  int64x2_t __ret_619; \
+  __ret_619 = vqdmlsl_s32(__s0_619, vget_high_s32(__s1_619), splat_laneq_s32(__s2_619, __p3_619)); \
+  __ret_619; \
 })
 #else
-#define vqdmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmlsl_high_laneq_s32(__p0_620, __p1_620, __p2_620, __p3_620) __extension__ ({ \
+  int64x2_t __s0_620 = __p0_620; \
+  int32x4_t __s1_620 = __p1_620; \
+  int32x4_t __s2_620 = __p2_620; \
+  int64x2_t __rev0_620;  __rev0_620 = __builtin_shufflevector(__s0_620, __s0_620, 1, 0); \
+  int32x4_t __rev1_620;  __rev1_620 = __builtin_shufflevector(__s1_620, __s1_620, 3, 2, 1, 0); \
+  int32x4_t __rev2_620;  __rev2_620 = __builtin_shufflevector(__s2_620, __s2_620, 3, 2, 1, 0); \
+  int64x2_t __ret_620; \
+  __ret_620 = __noswap_vqdmlsl_s32(__rev0_620, __noswap_vget_high_s32(__rev1_620), __noswap_splat_laneq_s32(__rev2_620, __p3_620)); \
+  __ret_620 = __builtin_shufflevector(__ret_620, __ret_620, 1, 0); \
+  __ret_620; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlsl_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vqdmlsl_high_laneq_s16(__p0_621, __p1_621, __p2_621, __p3_621) __extension__ ({ \
+  int32x4_t __s0_621 = __p0_621; \
+  int16x8_t __s1_621 = __p1_621; \
+  int16x8_t __s2_621 = __p2_621; \
+  int32x4_t __ret_621; \
+  __ret_621 = vqdmlsl_s16(__s0_621, vget_high_s16(__s1_621), splat_laneq_s16(__s2_621, __p3_621)); \
+  __ret_621; \
 })
 #else
-#define vqdmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmlsl_high_laneq_s16(__p0_622, __p1_622, __p2_622, __p3_622) __extension__ ({ \
+  int32x4_t __s0_622 = __p0_622; \
+  int16x8_t __s1_622 = __p1_622; \
+  int16x8_t __s2_622 = __p2_622; \
+  int32x4_t __rev0_622;  __rev0_622 = __builtin_shufflevector(__s0_622, __s0_622, 3, 2, 1, 0); \
+  int16x8_t __rev1_622;  __rev1_622 = __builtin_shufflevector(__s1_622, __s1_622, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_622;  __rev2_622 = __builtin_shufflevector(__s2_622, __s2_622, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_622; \
+  __ret_622 = __noswap_vqdmlsl_s16(__rev0_622, __noswap_vget_high_s16(__rev1_622), __noswap_splat_laneq_s16(__rev2_622, __p3_622)); \
+  __ret_622 = __builtin_shufflevector(__ret_622, __ret_622, 3, 2, 1, 0); \
+  __ret_622; \
 })
 #endif
 
@@ -52677,50 +56477,50 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlsl_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vqdmlsl_laneq_s32(__p0_623, __p1_623, __p2_623, __p3_623) __extension__ ({ \
+  int64x2_t __s0_623 = __p0_623; \
+  int32x2_t __s1_623 = __p1_623; \
+  int32x4_t __s2_623 = __p2_623; \
+  int64x2_t __ret_623; \
+  __ret_623 = vqdmlsl_s32(__s0_623, __s1_623, splat_laneq_s32(__s2_623, __p3_623)); \
+  __ret_623; \
 })
 #else
-#define vqdmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmlsl_laneq_s32(__p0_624, __p1_624, __p2_624, __p3_624) __extension__ ({ \
+  int64x2_t __s0_624 = __p0_624; \
+  int32x2_t __s1_624 = __p1_624; \
+  int32x4_t __s2_624 = __p2_624; \
+  int64x2_t __rev0_624;  __rev0_624 = __builtin_shufflevector(__s0_624, __s0_624, 1, 0); \
+  int32x2_t __rev1_624;  __rev1_624 = __builtin_shufflevector(__s1_624, __s1_624, 1, 0); \
+  int32x4_t __rev2_624;  __rev2_624 = __builtin_shufflevector(__s2_624, __s2_624, 3, 2, 1, 0); \
+  int64x2_t __ret_624; \
+  __ret_624 = __noswap_vqdmlsl_s32(__rev0_624, __rev1_624, __noswap_splat_laneq_s32(__rev2_624, __p3_624)); \
+  __ret_624 = __builtin_shufflevector(__ret_624, __ret_624, 1, 0); \
+  __ret_624; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlsl_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vqdmlsl_laneq_s16(__p0_625, __p1_625, __p2_625, __p3_625) __extension__ ({ \
+  int32x4_t __s0_625 = __p0_625; \
+  int16x4_t __s1_625 = __p1_625; \
+  int16x8_t __s2_625 = __p2_625; \
+  int32x4_t __ret_625; \
+  __ret_625 = vqdmlsl_s16(__s0_625, __s1_625, splat_laneq_s16(__s2_625, __p3_625)); \
+  __ret_625; \
 })
 #else
-#define vqdmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmlsl_laneq_s16(__p0_626, __p1_626, __p2_626, __p3_626) __extension__ ({ \
+  int32x4_t __s0_626 = __p0_626; \
+  int16x4_t __s1_626 = __p1_626; \
+  int16x8_t __s2_626 = __p2_626; \
+  int32x4_t __rev0_626;  __rev0_626 = __builtin_shufflevector(__s0_626, __s0_626, 3, 2, 1, 0); \
+  int16x4_t __rev1_626;  __rev1_626 = __builtin_shufflevector(__s1_626, __s1_626, 3, 2, 1, 0); \
+  int16x8_t __rev2_626;  __rev2_626 = __builtin_shufflevector(__s2_626, __s2_626, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_626; \
+  __ret_626 = __noswap_vqdmlsl_s16(__rev0_626, __rev1_626, __noswap_splat_laneq_s16(__rev2_626, __p3_626)); \
+  __ret_626 = __builtin_shufflevector(__ret_626, __ret_626, 3, 2, 1, 0); \
+  __ret_626; \
 })
 #endif
 
@@ -52739,7 +56539,7 @@
   int32x4_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
   int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 34); \
+  __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \
   __ret; \
 })
 #else
@@ -52749,7 +56549,7 @@
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
   int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 34); \
+  __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
@@ -52760,7 +56560,7 @@
   int16x8_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
   int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 33); \
+  __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \
   __ret; \
 })
 #else
@@ -52770,7 +56570,7 @@
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 33); \
+  __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
@@ -52819,78 +56619,78 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulhs_lane_s32(__p0_152, __p1_152, __p2_152) __extension__ ({ \
-  int32_t __s0_152 = __p0_152; \
-  int32x2_t __s1_152 = __p1_152; \
-  int32_t __ret_152; \
-  __ret_152 = vqdmulhs_s32(__s0_152, vget_lane_s32(__s1_152, __p2_152)); \
-  __ret_152; \
+#define vqdmulhs_lane_s32(__p0_627, __p1_627, __p2_627) __extension__ ({ \
+  int32_t __s0_627 = __p0_627; \
+  int32x2_t __s1_627 = __p1_627; \
+  int32_t __ret_627; \
+  __ret_627 = vqdmulhs_s32(__s0_627, vget_lane_s32(__s1_627, __p2_627)); \
+  __ret_627; \
 })
 #else
-#define vqdmulhs_lane_s32(__p0_153, __p1_153, __p2_153) __extension__ ({ \
-  int32_t __s0_153 = __p0_153; \
-  int32x2_t __s1_153 = __p1_153; \
-  int32x2_t __rev1_153;  __rev1_153 = __builtin_shufflevector(__s1_153, __s1_153, 1, 0); \
-  int32_t __ret_153; \
-  __ret_153 = vqdmulhs_s32(__s0_153, __noswap_vget_lane_s32(__rev1_153, __p2_153)); \
-  __ret_153; \
+#define vqdmulhs_lane_s32(__p0_628, __p1_628, __p2_628) __extension__ ({ \
+  int32_t __s0_628 = __p0_628; \
+  int32x2_t __s1_628 = __p1_628; \
+  int32x2_t __rev1_628;  __rev1_628 = __builtin_shufflevector(__s1_628, __s1_628, 1, 0); \
+  int32_t __ret_628; \
+  __ret_628 = vqdmulhs_s32(__s0_628, __noswap_vget_lane_s32(__rev1_628, __p2_628)); \
+  __ret_628; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulhh_lane_s16(__p0_154, __p1_154, __p2_154) __extension__ ({ \
-  int16_t __s0_154 = __p0_154; \
-  int16x4_t __s1_154 = __p1_154; \
-  int16_t __ret_154; \
-  __ret_154 = vqdmulhh_s16(__s0_154, vget_lane_s16(__s1_154, __p2_154)); \
-  __ret_154; \
+#define vqdmulhh_lane_s16(__p0_629, __p1_629, __p2_629) __extension__ ({ \
+  int16_t __s0_629 = __p0_629; \
+  int16x4_t __s1_629 = __p1_629; \
+  int16_t __ret_629; \
+  __ret_629 = vqdmulhh_s16(__s0_629, vget_lane_s16(__s1_629, __p2_629)); \
+  __ret_629; \
 })
 #else
-#define vqdmulhh_lane_s16(__p0_155, __p1_155, __p2_155) __extension__ ({ \
-  int16_t __s0_155 = __p0_155; \
-  int16x4_t __s1_155 = __p1_155; \
-  int16x4_t __rev1_155;  __rev1_155 = __builtin_shufflevector(__s1_155, __s1_155, 3, 2, 1, 0); \
-  int16_t __ret_155; \
-  __ret_155 = vqdmulhh_s16(__s0_155, __noswap_vget_lane_s16(__rev1_155, __p2_155)); \
-  __ret_155; \
+#define vqdmulhh_lane_s16(__p0_630, __p1_630, __p2_630) __extension__ ({ \
+  int16_t __s0_630 = __p0_630; \
+  int16x4_t __s1_630 = __p1_630; \
+  int16x4_t __rev1_630;  __rev1_630 = __builtin_shufflevector(__s1_630, __s1_630, 3, 2, 1, 0); \
+  int16_t __ret_630; \
+  __ret_630 = vqdmulhh_s16(__s0_630, __noswap_vget_lane_s16(__rev1_630, __p2_630)); \
+  __ret_630; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulhs_laneq_s32(__p0_156, __p1_156, __p2_156) __extension__ ({ \
-  int32_t __s0_156 = __p0_156; \
-  int32x4_t __s1_156 = __p1_156; \
-  int32_t __ret_156; \
-  __ret_156 = vqdmulhs_s32(__s0_156, vgetq_lane_s32(__s1_156, __p2_156)); \
-  __ret_156; \
+#define vqdmulhs_laneq_s32(__p0_631, __p1_631, __p2_631) __extension__ ({ \
+  int32_t __s0_631 = __p0_631; \
+  int32x4_t __s1_631 = __p1_631; \
+  int32_t __ret_631; \
+  __ret_631 = vqdmulhs_s32(__s0_631, vgetq_lane_s32(__s1_631, __p2_631)); \
+  __ret_631; \
 })
 #else
-#define vqdmulhs_laneq_s32(__p0_157, __p1_157, __p2_157) __extension__ ({ \
-  int32_t __s0_157 = __p0_157; \
-  int32x4_t __s1_157 = __p1_157; \
-  int32x4_t __rev1_157;  __rev1_157 = __builtin_shufflevector(__s1_157, __s1_157, 3, 2, 1, 0); \
-  int32_t __ret_157; \
-  __ret_157 = vqdmulhs_s32(__s0_157, __noswap_vgetq_lane_s32(__rev1_157, __p2_157)); \
-  __ret_157; \
+#define vqdmulhs_laneq_s32(__p0_632, __p1_632, __p2_632) __extension__ ({ \
+  int32_t __s0_632 = __p0_632; \
+  int32x4_t __s1_632 = __p1_632; \
+  int32x4_t __rev1_632;  __rev1_632 = __builtin_shufflevector(__s1_632, __s1_632, 3, 2, 1, 0); \
+  int32_t __ret_632; \
+  __ret_632 = vqdmulhs_s32(__s0_632, __noswap_vgetq_lane_s32(__rev1_632, __p2_632)); \
+  __ret_632; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulhh_laneq_s16(__p0_158, __p1_158, __p2_158) __extension__ ({ \
-  int16_t __s0_158 = __p0_158; \
-  int16x8_t __s1_158 = __p1_158; \
-  int16_t __ret_158; \
-  __ret_158 = vqdmulhh_s16(__s0_158, vgetq_lane_s16(__s1_158, __p2_158)); \
-  __ret_158; \
+#define vqdmulhh_laneq_s16(__p0_633, __p1_633, __p2_633) __extension__ ({ \
+  int16_t __s0_633 = __p0_633; \
+  int16x8_t __s1_633 = __p1_633; \
+  int16_t __ret_633; \
+  __ret_633 = vqdmulhh_s16(__s0_633, vgetq_lane_s16(__s1_633, __p2_633)); \
+  __ret_633; \
 })
 #else
-#define vqdmulhh_laneq_s16(__p0_159, __p1_159, __p2_159) __extension__ ({ \
-  int16_t __s0_159 = __p0_159; \
-  int16x8_t __s1_159 = __p1_159; \
-  int16x8_t __rev1_159;  __rev1_159 = __builtin_shufflevector(__s1_159, __s1_159, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_159; \
-  __ret_159 = vqdmulhh_s16(__s0_159, __noswap_vgetq_lane_s16(__rev1_159, __p2_159)); \
-  __ret_159; \
+#define vqdmulhh_laneq_s16(__p0_634, __p1_634, __p2_634) __extension__ ({ \
+  int16_t __s0_634 = __p0_634; \
+  int16x8_t __s1_634 = __p1_634; \
+  int16x8_t __rev1_634;  __rev1_634 = __builtin_shufflevector(__s1_634, __s1_634, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16_t __ret_634; \
+  __ret_634 = vqdmulhh_s16(__s0_634, __noswap_vgetq_lane_s16(__rev1_634, __p2_634)); \
+  __ret_634; \
 })
 #endif
 
@@ -53023,86 +56823,86 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vqdmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vqdmull_high_lane_s32(__p0_635, __p1_635, __p2_635) __extension__ ({ \
+  int32x4_t __s0_635 = __p0_635; \
+  int32x2_t __s1_635 = __p1_635; \
+  int64x2_t __ret_635; \
+  __ret_635 = vqdmull_s32(vget_high_s32(__s0_635), splat_lane_s32(__s1_635, __p2_635)); \
+  __ret_635; \
 })
 #else
-#define vqdmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmull_high_lane_s32(__p0_636, __p1_636, __p2_636) __extension__ ({ \
+  int32x4_t __s0_636 = __p0_636; \
+  int32x2_t __s1_636 = __p1_636; \
+  int32x4_t __rev0_636;  __rev0_636 = __builtin_shufflevector(__s0_636, __s0_636, 3, 2, 1, 0); \
+  int32x2_t __rev1_636;  __rev1_636 = __builtin_shufflevector(__s1_636, __s1_636, 1, 0); \
+  int64x2_t __ret_636; \
+  __ret_636 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_636), __noswap_splat_lane_s32(__rev1_636, __p2_636)); \
+  __ret_636 = __builtin_shufflevector(__ret_636, __ret_636, 1, 0); \
+  __ret_636; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqdmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vqdmull_high_lane_s16(__p0_637, __p1_637, __p2_637) __extension__ ({ \
+  int16x8_t __s0_637 = __p0_637; \
+  int16x4_t __s1_637 = __p1_637; \
+  int32x4_t __ret_637; \
+  __ret_637 = vqdmull_s16(vget_high_s16(__s0_637), splat_lane_s16(__s1_637, __p2_637)); \
+  __ret_637; \
 })
 #else
-#define vqdmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmull_high_lane_s16(__p0_638, __p1_638, __p2_638) __extension__ ({ \
+  int16x8_t __s0_638 = __p0_638; \
+  int16x4_t __s1_638 = __p1_638; \
+  int16x8_t __rev0_638;  __rev0_638 = __builtin_shufflevector(__s0_638, __s0_638, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev1_638;  __rev1_638 = __builtin_shufflevector(__s1_638, __s1_638, 3, 2, 1, 0); \
+  int32x4_t __ret_638; \
+  __ret_638 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_638), __noswap_splat_lane_s16(__rev1_638, __p2_638)); \
+  __ret_638 = __builtin_shufflevector(__ret_638, __ret_638, 3, 2, 1, 0); \
+  __ret_638; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vqdmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vqdmull_high_laneq_s32(__p0_639, __p1_639, __p2_639) __extension__ ({ \
+  int32x4_t __s0_639 = __p0_639; \
+  int32x4_t __s1_639 = __p1_639; \
+  int64x2_t __ret_639; \
+  __ret_639 = vqdmull_s32(vget_high_s32(__s0_639), splat_laneq_s32(__s1_639, __p2_639)); \
+  __ret_639; \
 })
 #else
-#define vqdmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmull_high_laneq_s32(__p0_640, __p1_640, __p2_640) __extension__ ({ \
+  int32x4_t __s0_640 = __p0_640; \
+  int32x4_t __s1_640 = __p1_640; \
+  int32x4_t __rev0_640;  __rev0_640 = __builtin_shufflevector(__s0_640, __s0_640, 3, 2, 1, 0); \
+  int32x4_t __rev1_640;  __rev1_640 = __builtin_shufflevector(__s1_640, __s1_640, 3, 2, 1, 0); \
+  int64x2_t __ret_640; \
+  __ret_640 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_640), __noswap_splat_laneq_s32(__rev1_640, __p2_640)); \
+  __ret_640 = __builtin_shufflevector(__ret_640, __ret_640, 1, 0); \
+  __ret_640; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqdmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vqdmull_high_laneq_s16(__p0_641, __p1_641, __p2_641) __extension__ ({ \
+  int16x8_t __s0_641 = __p0_641; \
+  int16x8_t __s1_641 = __p1_641; \
+  int32x4_t __ret_641; \
+  __ret_641 = vqdmull_s16(vget_high_s16(__s0_641), splat_laneq_s16(__s1_641, __p2_641)); \
+  __ret_641; \
 })
 #else
-#define vqdmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmull_high_laneq_s16(__p0_642, __p1_642, __p2_642) __extension__ ({ \
+  int16x8_t __s0_642 = __p0_642; \
+  int16x8_t __s1_642 = __p1_642; \
+  int16x8_t __rev0_642;  __rev0_642 = __builtin_shufflevector(__s0_642, __s0_642, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_642;  __rev1_642 = __builtin_shufflevector(__s1_642, __s1_642, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_642; \
+  __ret_642 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_642), __noswap_splat_laneq_s16(__rev1_642, __p2_642)); \
+  __ret_642 = __builtin_shufflevector(__ret_642, __ret_642, 3, 2, 1, 0); \
+  __ret_642; \
 })
 #endif
 
@@ -53139,120 +56939,120 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulls_lane_s32(__p0_160, __p1_160, __p2_160) __extension__ ({ \
-  int32_t __s0_160 = __p0_160; \
-  int32x2_t __s1_160 = __p1_160; \
-  int64_t __ret_160; \
-  __ret_160 = vqdmulls_s32(__s0_160, vget_lane_s32(__s1_160, __p2_160)); \
-  __ret_160; \
+#define vqdmulls_lane_s32(__p0_643, __p1_643, __p2_643) __extension__ ({ \
+  int32_t __s0_643 = __p0_643; \
+  int32x2_t __s1_643 = __p1_643; \
+  int64_t __ret_643; \
+  __ret_643 = vqdmulls_s32(__s0_643, vget_lane_s32(__s1_643, __p2_643)); \
+  __ret_643; \
 })
 #else
-#define vqdmulls_lane_s32(__p0_161, __p1_161, __p2_161) __extension__ ({ \
-  int32_t __s0_161 = __p0_161; \
-  int32x2_t __s1_161 = __p1_161; \
-  int32x2_t __rev1_161;  __rev1_161 = __builtin_shufflevector(__s1_161, __s1_161, 1, 0); \
-  int64_t __ret_161; \
-  __ret_161 = vqdmulls_s32(__s0_161, __noswap_vget_lane_s32(__rev1_161, __p2_161)); \
-  __ret_161; \
+#define vqdmulls_lane_s32(__p0_644, __p1_644, __p2_644) __extension__ ({ \
+  int32_t __s0_644 = __p0_644; \
+  int32x2_t __s1_644 = __p1_644; \
+  int32x2_t __rev1_644;  __rev1_644 = __builtin_shufflevector(__s1_644, __s1_644, 1, 0); \
+  int64_t __ret_644; \
+  __ret_644 = vqdmulls_s32(__s0_644, __noswap_vget_lane_s32(__rev1_644, __p2_644)); \
+  __ret_644; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmullh_lane_s16(__p0_162, __p1_162, __p2_162) __extension__ ({ \
-  int16_t __s0_162 = __p0_162; \
-  int16x4_t __s1_162 = __p1_162; \
-  int32_t __ret_162; \
-  __ret_162 = vqdmullh_s16(__s0_162, vget_lane_s16(__s1_162, __p2_162)); \
-  __ret_162; \
+#define vqdmullh_lane_s16(__p0_645, __p1_645, __p2_645) __extension__ ({ \
+  int16_t __s0_645 = __p0_645; \
+  int16x4_t __s1_645 = __p1_645; \
+  int32_t __ret_645; \
+  __ret_645 = vqdmullh_s16(__s0_645, vget_lane_s16(__s1_645, __p2_645)); \
+  __ret_645; \
 })
 #else
-#define vqdmullh_lane_s16(__p0_163, __p1_163, __p2_163) __extension__ ({ \
-  int16_t __s0_163 = __p0_163; \
-  int16x4_t __s1_163 = __p1_163; \
-  int16x4_t __rev1_163;  __rev1_163 = __builtin_shufflevector(__s1_163, __s1_163, 3, 2, 1, 0); \
-  int32_t __ret_163; \
-  __ret_163 = vqdmullh_s16(__s0_163, __noswap_vget_lane_s16(__rev1_163, __p2_163)); \
-  __ret_163; \
+#define vqdmullh_lane_s16(__p0_646, __p1_646, __p2_646) __extension__ ({ \
+  int16_t __s0_646 = __p0_646; \
+  int16x4_t __s1_646 = __p1_646; \
+  int16x4_t __rev1_646;  __rev1_646 = __builtin_shufflevector(__s1_646, __s1_646, 3, 2, 1, 0); \
+  int32_t __ret_646; \
+  __ret_646 = vqdmullh_s16(__s0_646, __noswap_vget_lane_s16(__rev1_646, __p2_646)); \
+  __ret_646; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulls_laneq_s32(__p0_164, __p1_164, __p2_164) __extension__ ({ \
-  int32_t __s0_164 = __p0_164; \
-  int32x4_t __s1_164 = __p1_164; \
-  int64_t __ret_164; \
-  __ret_164 = vqdmulls_s32(__s0_164, vgetq_lane_s32(__s1_164, __p2_164)); \
-  __ret_164; \
+#define vqdmulls_laneq_s32(__p0_647, __p1_647, __p2_647) __extension__ ({ \
+  int32_t __s0_647 = __p0_647; \
+  int32x4_t __s1_647 = __p1_647; \
+  int64_t __ret_647; \
+  __ret_647 = vqdmulls_s32(__s0_647, vgetq_lane_s32(__s1_647, __p2_647)); \
+  __ret_647; \
 })
 #else
-#define vqdmulls_laneq_s32(__p0_165, __p1_165, __p2_165) __extension__ ({ \
-  int32_t __s0_165 = __p0_165; \
-  int32x4_t __s1_165 = __p1_165; \
-  int32x4_t __rev1_165;  __rev1_165 = __builtin_shufflevector(__s1_165, __s1_165, 3, 2, 1, 0); \
-  int64_t __ret_165; \
-  __ret_165 = vqdmulls_s32(__s0_165, __noswap_vgetq_lane_s32(__rev1_165, __p2_165)); \
-  __ret_165; \
+#define vqdmulls_laneq_s32(__p0_648, __p1_648, __p2_648) __extension__ ({ \
+  int32_t __s0_648 = __p0_648; \
+  int32x4_t __s1_648 = __p1_648; \
+  int32x4_t __rev1_648;  __rev1_648 = __builtin_shufflevector(__s1_648, __s1_648, 3, 2, 1, 0); \
+  int64_t __ret_648; \
+  __ret_648 = vqdmulls_s32(__s0_648, __noswap_vgetq_lane_s32(__rev1_648, __p2_648)); \
+  __ret_648; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmullh_laneq_s16(__p0_166, __p1_166, __p2_166) __extension__ ({ \
-  int16_t __s0_166 = __p0_166; \
-  int16x8_t __s1_166 = __p1_166; \
-  int32_t __ret_166; \
-  __ret_166 = vqdmullh_s16(__s0_166, vgetq_lane_s16(__s1_166, __p2_166)); \
-  __ret_166; \
+#define vqdmullh_laneq_s16(__p0_649, __p1_649, __p2_649) __extension__ ({ \
+  int16_t __s0_649 = __p0_649; \
+  int16x8_t __s1_649 = __p1_649; \
+  int32_t __ret_649; \
+  __ret_649 = vqdmullh_s16(__s0_649, vgetq_lane_s16(__s1_649, __p2_649)); \
+  __ret_649; \
 })
 #else
-#define vqdmullh_laneq_s16(__p0_167, __p1_167, __p2_167) __extension__ ({ \
-  int16_t __s0_167 = __p0_167; \
-  int16x8_t __s1_167 = __p1_167; \
-  int16x8_t __rev1_167;  __rev1_167 = __builtin_shufflevector(__s1_167, __s1_167, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32_t __ret_167; \
-  __ret_167 = vqdmullh_s16(__s0_167, __noswap_vgetq_lane_s16(__rev1_167, __p2_167)); \
-  __ret_167; \
+#define vqdmullh_laneq_s16(__p0_650, __p1_650, __p2_650) __extension__ ({ \
+  int16_t __s0_650 = __p0_650; \
+  int16x8_t __s1_650 = __p1_650; \
+  int16x8_t __rev1_650;  __rev1_650 = __builtin_shufflevector(__s1_650, __s1_650, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32_t __ret_650; \
+  __ret_650 = vqdmullh_s16(__s0_650, __noswap_vgetq_lane_s16(__rev1_650, __p2_650)); \
+  __ret_650; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vqdmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vqdmull_laneq_s32(__p0_651, __p1_651, __p2_651) __extension__ ({ \
+  int32x2_t __s0_651 = __p0_651; \
+  int32x4_t __s1_651 = __p1_651; \
+  int64x2_t __ret_651; \
+  __ret_651 = vqdmull_s32(__s0_651, splat_laneq_s32(__s1_651, __p2_651)); \
+  __ret_651; \
 })
 #else
-#define vqdmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmull_laneq_s32(__p0_652, __p1_652, __p2_652) __extension__ ({ \
+  int32x2_t __s0_652 = __p0_652; \
+  int32x4_t __s1_652 = __p1_652; \
+  int32x2_t __rev0_652;  __rev0_652 = __builtin_shufflevector(__s0_652, __s0_652, 1, 0); \
+  int32x4_t __rev1_652;  __rev1_652 = __builtin_shufflevector(__s1_652, __s1_652, 3, 2, 1, 0); \
+  int64x2_t __ret_652; \
+  __ret_652 = __noswap_vqdmull_s32(__rev0_652, __noswap_splat_laneq_s32(__rev1_652, __p2_652)); \
+  __ret_652 = __builtin_shufflevector(__ret_652, __ret_652, 1, 0); \
+  __ret_652; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqdmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vqdmull_laneq_s16(__p0_653, __p1_653, __p2_653) __extension__ ({ \
+  int16x4_t __s0_653 = __p0_653; \
+  int16x8_t __s1_653 = __p1_653; \
+  int32x4_t __ret_653; \
+  __ret_653 = vqdmull_s16(__s0_653, splat_laneq_s16(__s1_653, __p2_653)); \
+  __ret_653; \
 })
 #else
-#define vqdmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmull_laneq_s16(__p0_654, __p1_654, __p2_654) __extension__ ({ \
+  int16x4_t __s0_654 = __p0_654; \
+  int16x8_t __s1_654 = __p1_654; \
+  int16x4_t __rev0_654;  __rev0_654 = __builtin_shufflevector(__s0_654, __s0_654, 3, 2, 1, 0); \
+  int16x8_t __rev1_654;  __rev1_654 = __builtin_shufflevector(__s1_654, __s1_654, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_654; \
+  __ret_654 = __noswap_vqdmull_s16(__rev0_654, __noswap_splat_laneq_s16(__rev1_654, __p2_654)); \
+  __ret_654 = __builtin_shufflevector(__ret_654, __ret_654, 3, 2, 1, 0); \
+  __ret_654; \
 })
 #endif
 
@@ -53510,7 +57310,7 @@
   int32x4_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
   int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 34); \
+  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \
   __ret; \
 })
 #else
@@ -53520,7 +57320,7 @@
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
   int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 34); \
+  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
@@ -53531,7 +57331,7 @@
   int16x8_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
   int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 33); \
+  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \
   __ret; \
 })
 #else
@@ -53541,7 +57341,7 @@
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 33); \
+  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
@@ -53590,78 +57390,78 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmulhs_lane_s32(__p0_168, __p1_168, __p2_168) __extension__ ({ \
-  int32_t __s0_168 = __p0_168; \
-  int32x2_t __s1_168 = __p1_168; \
-  int32_t __ret_168; \
-  __ret_168 = vqrdmulhs_s32(__s0_168, vget_lane_s32(__s1_168, __p2_168)); \
-  __ret_168; \
+#define vqrdmulhs_lane_s32(__p0_655, __p1_655, __p2_655) __extension__ ({ \
+  int32_t __s0_655 = __p0_655; \
+  int32x2_t __s1_655 = __p1_655; \
+  int32_t __ret_655; \
+  __ret_655 = vqrdmulhs_s32(__s0_655, vget_lane_s32(__s1_655, __p2_655)); \
+  __ret_655; \
 })
 #else
-#define vqrdmulhs_lane_s32(__p0_169, __p1_169, __p2_169) __extension__ ({ \
-  int32_t __s0_169 = __p0_169; \
-  int32x2_t __s1_169 = __p1_169; \
-  int32x2_t __rev1_169;  __rev1_169 = __builtin_shufflevector(__s1_169, __s1_169, 1, 0); \
-  int32_t __ret_169; \
-  __ret_169 = vqrdmulhs_s32(__s0_169, __noswap_vget_lane_s32(__rev1_169, __p2_169)); \
-  __ret_169; \
+#define vqrdmulhs_lane_s32(__p0_656, __p1_656, __p2_656) __extension__ ({ \
+  int32_t __s0_656 = __p0_656; \
+  int32x2_t __s1_656 = __p1_656; \
+  int32x2_t __rev1_656;  __rev1_656 = __builtin_shufflevector(__s1_656, __s1_656, 1, 0); \
+  int32_t __ret_656; \
+  __ret_656 = vqrdmulhs_s32(__s0_656, __noswap_vget_lane_s32(__rev1_656, __p2_656)); \
+  __ret_656; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmulhh_lane_s16(__p0_170, __p1_170, __p2_170) __extension__ ({ \
-  int16_t __s0_170 = __p0_170; \
-  int16x4_t __s1_170 = __p1_170; \
-  int16_t __ret_170; \
-  __ret_170 = vqrdmulhh_s16(__s0_170, vget_lane_s16(__s1_170, __p2_170)); \
-  __ret_170; \
+#define vqrdmulhh_lane_s16(__p0_657, __p1_657, __p2_657) __extension__ ({ \
+  int16_t __s0_657 = __p0_657; \
+  int16x4_t __s1_657 = __p1_657; \
+  int16_t __ret_657; \
+  __ret_657 = vqrdmulhh_s16(__s0_657, vget_lane_s16(__s1_657, __p2_657)); \
+  __ret_657; \
 })
 #else
-#define vqrdmulhh_lane_s16(__p0_171, __p1_171, __p2_171) __extension__ ({ \
-  int16_t __s0_171 = __p0_171; \
-  int16x4_t __s1_171 = __p1_171; \
-  int16x4_t __rev1_171;  __rev1_171 = __builtin_shufflevector(__s1_171, __s1_171, 3, 2, 1, 0); \
-  int16_t __ret_171; \
-  __ret_171 = vqrdmulhh_s16(__s0_171, __noswap_vget_lane_s16(__rev1_171, __p2_171)); \
-  __ret_171; \
+#define vqrdmulhh_lane_s16(__p0_658, __p1_658, __p2_658) __extension__ ({ \
+  int16_t __s0_658 = __p0_658; \
+  int16x4_t __s1_658 = __p1_658; \
+  int16x4_t __rev1_658;  __rev1_658 = __builtin_shufflevector(__s1_658, __s1_658, 3, 2, 1, 0); \
+  int16_t __ret_658; \
+  __ret_658 = vqrdmulhh_s16(__s0_658, __noswap_vget_lane_s16(__rev1_658, __p2_658)); \
+  __ret_658; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmulhs_laneq_s32(__p0_172, __p1_172, __p2_172) __extension__ ({ \
-  int32_t __s0_172 = __p0_172; \
-  int32x4_t __s1_172 = __p1_172; \
-  int32_t __ret_172; \
-  __ret_172 = vqrdmulhs_s32(__s0_172, vgetq_lane_s32(__s1_172, __p2_172)); \
-  __ret_172; \
+#define vqrdmulhs_laneq_s32(__p0_659, __p1_659, __p2_659) __extension__ ({ \
+  int32_t __s0_659 = __p0_659; \
+  int32x4_t __s1_659 = __p1_659; \
+  int32_t __ret_659; \
+  __ret_659 = vqrdmulhs_s32(__s0_659, vgetq_lane_s32(__s1_659, __p2_659)); \
+  __ret_659; \
 })
 #else
-#define vqrdmulhs_laneq_s32(__p0_173, __p1_173, __p2_173) __extension__ ({ \
-  int32_t __s0_173 = __p0_173; \
-  int32x4_t __s1_173 = __p1_173; \
-  int32x4_t __rev1_173;  __rev1_173 = __builtin_shufflevector(__s1_173, __s1_173, 3, 2, 1, 0); \
-  int32_t __ret_173; \
-  __ret_173 = vqrdmulhs_s32(__s0_173, __noswap_vgetq_lane_s32(__rev1_173, __p2_173)); \
-  __ret_173; \
+#define vqrdmulhs_laneq_s32(__p0_660, __p1_660, __p2_660) __extension__ ({ \
+  int32_t __s0_660 = __p0_660; \
+  int32x4_t __s1_660 = __p1_660; \
+  int32x4_t __rev1_660;  __rev1_660 = __builtin_shufflevector(__s1_660, __s1_660, 3, 2, 1, 0); \
+  int32_t __ret_660; \
+  __ret_660 = vqrdmulhs_s32(__s0_660, __noswap_vgetq_lane_s32(__rev1_660, __p2_660)); \
+  __ret_660; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmulhh_laneq_s16(__p0_174, __p1_174, __p2_174) __extension__ ({ \
-  int16_t __s0_174 = __p0_174; \
-  int16x8_t __s1_174 = __p1_174; \
-  int16_t __ret_174; \
-  __ret_174 = vqrdmulhh_s16(__s0_174, vgetq_lane_s16(__s1_174, __p2_174)); \
-  __ret_174; \
+#define vqrdmulhh_laneq_s16(__p0_661, __p1_661, __p2_661) __extension__ ({ \
+  int16_t __s0_661 = __p0_661; \
+  int16x8_t __s1_661 = __p1_661; \
+  int16_t __ret_661; \
+  __ret_661 = vqrdmulhh_s16(__s0_661, vgetq_lane_s16(__s1_661, __p2_661)); \
+  __ret_661; \
 })
 #else
-#define vqrdmulhh_laneq_s16(__p0_175, __p1_175, __p2_175) __extension__ ({ \
-  int16_t __s0_175 = __p0_175; \
-  int16x8_t __s1_175 = __p1_175; \
-  int16x8_t __rev1_175;  __rev1_175 = __builtin_shufflevector(__s1_175, __s1_175, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_175; \
-  __ret_175 = vqrdmulhh_s16(__s0_175, __noswap_vgetq_lane_s16(__rev1_175, __p2_175)); \
-  __ret_175; \
+#define vqrdmulhh_laneq_s16(__p0_662, __p1_662, __p2_662) __extension__ ({ \
+  int16_t __s0_662 = __p0_662; \
+  int16x8_t __s1_662 = __p1_662; \
+  int16x8_t __rev1_662;  __rev1_662 = __builtin_shufflevector(__s1_662, __s1_662, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16_t __ret_662; \
+  __ret_662 = vqrdmulhh_s16(__s0_662, __noswap_vgetq_lane_s16(__rev1_662, __p2_662)); \
+  __ret_662; \
 })
 #endif
 
@@ -53790,128 +57590,128 @@
   return __ret;
 }
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u32(__p0_176, __p1_176, __p2_176) __extension__ ({ \
-  uint16x4_t __s0_176 = __p0_176; \
-  uint32x4_t __s1_176 = __p1_176; \
-  uint16x8_t __ret_176; \
-  __ret_176 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_176), (uint16x4_t)(vqrshrn_n_u32(__s1_176, __p2_176)))); \
-  __ret_176; \
+#define vqrshrn_high_n_u32(__p0_663, __p1_663, __p2_663) __extension__ ({ \
+  uint16x4_t __s0_663 = __p0_663; \
+  uint32x4_t __s1_663 = __p1_663; \
+  uint16x8_t __ret_663; \
+  __ret_663 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_663), (uint16x4_t)(vqrshrn_n_u32(__s1_663, __p2_663)))); \
+  __ret_663; \
 })
 #else
-#define vqrshrn_high_n_u32(__p0_177, __p1_177, __p2_177) __extension__ ({ \
-  uint16x4_t __s0_177 = __p0_177; \
-  uint32x4_t __s1_177 = __p1_177; \
-  uint16x4_t __rev0_177;  __rev0_177 = __builtin_shufflevector(__s0_177, __s0_177, 3, 2, 1, 0); \
-  uint32x4_t __rev1_177;  __rev1_177 = __builtin_shufflevector(__s1_177, __s1_177, 3, 2, 1, 0); \
-  uint16x8_t __ret_177; \
-  __ret_177 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_177), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_177, __p2_177)))); \
-  __ret_177 = __builtin_shufflevector(__ret_177, __ret_177, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_177; \
+#define vqrshrn_high_n_u32(__p0_664, __p1_664, __p2_664) __extension__ ({ \
+  uint16x4_t __s0_664 = __p0_664; \
+  uint32x4_t __s1_664 = __p1_664; \
+  uint16x4_t __rev0_664;  __rev0_664 = __builtin_shufflevector(__s0_664, __s0_664, 3, 2, 1, 0); \
+  uint32x4_t __rev1_664;  __rev1_664 = __builtin_shufflevector(__s1_664, __s1_664, 3, 2, 1, 0); \
+  uint16x8_t __ret_664; \
+  __ret_664 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_664), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_664, __p2_664)))); \
+  __ret_664 = __builtin_shufflevector(__ret_664, __ret_664, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_664; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u64(__p0_178, __p1_178, __p2_178) __extension__ ({ \
-  uint32x2_t __s0_178 = __p0_178; \
-  uint64x2_t __s1_178 = __p1_178; \
-  uint32x4_t __ret_178; \
-  __ret_178 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_178), (uint32x2_t)(vqrshrn_n_u64(__s1_178, __p2_178)))); \
-  __ret_178; \
+#define vqrshrn_high_n_u64(__p0_665, __p1_665, __p2_665) __extension__ ({ \
+  uint32x2_t __s0_665 = __p0_665; \
+  uint64x2_t __s1_665 = __p1_665; \
+  uint32x4_t __ret_665; \
+  __ret_665 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_665), (uint32x2_t)(vqrshrn_n_u64(__s1_665, __p2_665)))); \
+  __ret_665; \
 })
 #else
-#define vqrshrn_high_n_u64(__p0_179, __p1_179, __p2_179) __extension__ ({ \
-  uint32x2_t __s0_179 = __p0_179; \
-  uint64x2_t __s1_179 = __p1_179; \
-  uint32x2_t __rev0_179;  __rev0_179 = __builtin_shufflevector(__s0_179, __s0_179, 1, 0); \
-  uint64x2_t __rev1_179;  __rev1_179 = __builtin_shufflevector(__s1_179, __s1_179, 1, 0); \
-  uint32x4_t __ret_179; \
-  __ret_179 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_179), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_179, __p2_179)))); \
-  __ret_179 = __builtin_shufflevector(__ret_179, __ret_179, 3, 2, 1, 0); \
-  __ret_179; \
+#define vqrshrn_high_n_u64(__p0_666, __p1_666, __p2_666) __extension__ ({ \
+  uint32x2_t __s0_666 = __p0_666; \
+  uint64x2_t __s1_666 = __p1_666; \
+  uint32x2_t __rev0_666;  __rev0_666 = __builtin_shufflevector(__s0_666, __s0_666, 1, 0); \
+  uint64x2_t __rev1_666;  __rev1_666 = __builtin_shufflevector(__s1_666, __s1_666, 1, 0); \
+  uint32x4_t __ret_666; \
+  __ret_666 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_666), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_666, __p2_666)))); \
+  __ret_666 = __builtin_shufflevector(__ret_666, __ret_666, 3, 2, 1, 0); \
+  __ret_666; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u16(__p0_180, __p1_180, __p2_180) __extension__ ({ \
-  uint8x8_t __s0_180 = __p0_180; \
-  uint16x8_t __s1_180 = __p1_180; \
-  uint8x16_t __ret_180; \
-  __ret_180 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_180), (uint8x8_t)(vqrshrn_n_u16(__s1_180, __p2_180)))); \
-  __ret_180; \
+#define vqrshrn_high_n_u16(__p0_667, __p1_667, __p2_667) __extension__ ({ \
+  uint8x8_t __s0_667 = __p0_667; \
+  uint16x8_t __s1_667 = __p1_667; \
+  uint8x16_t __ret_667; \
+  __ret_667 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_667), (uint8x8_t)(vqrshrn_n_u16(__s1_667, __p2_667)))); \
+  __ret_667; \
 })
 #else
-#define vqrshrn_high_n_u16(__p0_181, __p1_181, __p2_181) __extension__ ({ \
-  uint8x8_t __s0_181 = __p0_181; \
-  uint16x8_t __s1_181 = __p1_181; \
-  uint8x8_t __rev0_181;  __rev0_181 = __builtin_shufflevector(__s0_181, __s0_181, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_181;  __rev1_181 = __builtin_shufflevector(__s1_181, __s1_181, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_181; \
-  __ret_181 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_181), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_181, __p2_181)))); \
-  __ret_181 = __builtin_shufflevector(__ret_181, __ret_181, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_181; \
+#define vqrshrn_high_n_u16(__p0_668, __p1_668, __p2_668) __extension__ ({ \
+  uint8x8_t __s0_668 = __p0_668; \
+  uint16x8_t __s1_668 = __p1_668; \
+  uint8x8_t __rev0_668;  __rev0_668 = __builtin_shufflevector(__s0_668, __s0_668, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_668;  __rev1_668 = __builtin_shufflevector(__s1_668, __s1_668, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_668; \
+  __ret_668 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_668), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_668, __p2_668)))); \
+  __ret_668 = __builtin_shufflevector(__ret_668, __ret_668, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_668; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s32(__p0_182, __p1_182, __p2_182) __extension__ ({ \
-  int16x4_t __s0_182 = __p0_182; \
-  int32x4_t __s1_182 = __p1_182; \
-  int16x8_t __ret_182; \
-  __ret_182 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_182), (int16x4_t)(vqrshrn_n_s32(__s1_182, __p2_182)))); \
-  __ret_182; \
+#define vqrshrn_high_n_s32(__p0_669, __p1_669, __p2_669) __extension__ ({ \
+  int16x4_t __s0_669 = __p0_669; \
+  int32x4_t __s1_669 = __p1_669; \
+  int16x8_t __ret_669; \
+  __ret_669 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_669), (int16x4_t)(vqrshrn_n_s32(__s1_669, __p2_669)))); \
+  __ret_669; \
 })
 #else
-#define vqrshrn_high_n_s32(__p0_183, __p1_183, __p2_183) __extension__ ({ \
-  int16x4_t __s0_183 = __p0_183; \
-  int32x4_t __s1_183 = __p1_183; \
-  int16x4_t __rev0_183;  __rev0_183 = __builtin_shufflevector(__s0_183, __s0_183, 3, 2, 1, 0); \
-  int32x4_t __rev1_183;  __rev1_183 = __builtin_shufflevector(__s1_183, __s1_183, 3, 2, 1, 0); \
-  int16x8_t __ret_183; \
-  __ret_183 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_183), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_183, __p2_183)))); \
-  __ret_183 = __builtin_shufflevector(__ret_183, __ret_183, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_183; \
+#define vqrshrn_high_n_s32(__p0_670, __p1_670, __p2_670) __extension__ ({ \
+  int16x4_t __s0_670 = __p0_670; \
+  int32x4_t __s1_670 = __p1_670; \
+  int16x4_t __rev0_670;  __rev0_670 = __builtin_shufflevector(__s0_670, __s0_670, 3, 2, 1, 0); \
+  int32x4_t __rev1_670;  __rev1_670 = __builtin_shufflevector(__s1_670, __s1_670, 3, 2, 1, 0); \
+  int16x8_t __ret_670; \
+  __ret_670 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_670), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_670, __p2_670)))); \
+  __ret_670 = __builtin_shufflevector(__ret_670, __ret_670, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_670; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s64(__p0_184, __p1_184, __p2_184) __extension__ ({ \
-  int32x2_t __s0_184 = __p0_184; \
-  int64x2_t __s1_184 = __p1_184; \
-  int32x4_t __ret_184; \
-  __ret_184 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_184), (int32x2_t)(vqrshrn_n_s64(__s1_184, __p2_184)))); \
-  __ret_184; \
+#define vqrshrn_high_n_s64(__p0_671, __p1_671, __p2_671) __extension__ ({ \
+  int32x2_t __s0_671 = __p0_671; \
+  int64x2_t __s1_671 = __p1_671; \
+  int32x4_t __ret_671; \
+  __ret_671 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_671), (int32x2_t)(vqrshrn_n_s64(__s1_671, __p2_671)))); \
+  __ret_671; \
 })
 #else
-#define vqrshrn_high_n_s64(__p0_185, __p1_185, __p2_185) __extension__ ({ \
-  int32x2_t __s0_185 = __p0_185; \
-  int64x2_t __s1_185 = __p1_185; \
-  int32x2_t __rev0_185;  __rev0_185 = __builtin_shufflevector(__s0_185, __s0_185, 1, 0); \
-  int64x2_t __rev1_185;  __rev1_185 = __builtin_shufflevector(__s1_185, __s1_185, 1, 0); \
-  int32x4_t __ret_185; \
-  __ret_185 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_185), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_185, __p2_185)))); \
-  __ret_185 = __builtin_shufflevector(__ret_185, __ret_185, 3, 2, 1, 0); \
-  __ret_185; \
+#define vqrshrn_high_n_s64(__p0_672, __p1_672, __p2_672) __extension__ ({ \
+  int32x2_t __s0_672 = __p0_672; \
+  int64x2_t __s1_672 = __p1_672; \
+  int32x2_t __rev0_672;  __rev0_672 = __builtin_shufflevector(__s0_672, __s0_672, 1, 0); \
+  int64x2_t __rev1_672;  __rev1_672 = __builtin_shufflevector(__s1_672, __s1_672, 1, 0); \
+  int32x4_t __ret_672; \
+  __ret_672 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_672), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_672, __p2_672)))); \
+  __ret_672 = __builtin_shufflevector(__ret_672, __ret_672, 3, 2, 1, 0); \
+  __ret_672; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s16(__p0_186, __p1_186, __p2_186) __extension__ ({ \
-  int8x8_t __s0_186 = __p0_186; \
-  int16x8_t __s1_186 = __p1_186; \
-  int8x16_t __ret_186; \
-  __ret_186 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_186), (int8x8_t)(vqrshrn_n_s16(__s1_186, __p2_186)))); \
-  __ret_186; \
+#define vqrshrn_high_n_s16(__p0_673, __p1_673, __p2_673) __extension__ ({ \
+  int8x8_t __s0_673 = __p0_673; \
+  int16x8_t __s1_673 = __p1_673; \
+  int8x16_t __ret_673; \
+  __ret_673 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_673), (int8x8_t)(vqrshrn_n_s16(__s1_673, __p2_673)))); \
+  __ret_673; \
 })
 #else
-#define vqrshrn_high_n_s16(__p0_187, __p1_187, __p2_187) __extension__ ({ \
-  int8x8_t __s0_187 = __p0_187; \
-  int16x8_t __s1_187 = __p1_187; \
-  int8x8_t __rev0_187;  __rev0_187 = __builtin_shufflevector(__s0_187, __s0_187, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_187;  __rev1_187 = __builtin_shufflevector(__s1_187, __s1_187, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_187; \
-  __ret_187 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_187), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_187, __p2_187)))); \
-  __ret_187 = __builtin_shufflevector(__ret_187, __ret_187, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_187; \
+#define vqrshrn_high_n_s16(__p0_674, __p1_674, __p2_674) __extension__ ({ \
+  int8x8_t __s0_674 = __p0_674; \
+  int16x8_t __s1_674 = __p1_674; \
+  int8x8_t __rev0_674;  __rev0_674 = __builtin_shufflevector(__s0_674, __s0_674, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_674;  __rev1_674 = __builtin_shufflevector(__s1_674, __s1_674, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_674; \
+  __ret_674 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_674), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_674, __p2_674)))); \
+  __ret_674 = __builtin_shufflevector(__ret_674, __ret_674, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_674; \
 })
 #endif
 
@@ -53952,65 +57752,65 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s32(__p0_188, __p1_188, __p2_188) __extension__ ({ \
-  int16x4_t __s0_188 = __p0_188; \
-  int32x4_t __s1_188 = __p1_188; \
-  int16x8_t __ret_188; \
-  __ret_188 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_188), (int16x4_t)(vqrshrun_n_s32(__s1_188, __p2_188)))); \
-  __ret_188; \
+#define vqrshrun_high_n_s32(__p0_675, __p1_675, __p2_675) __extension__ ({ \
+  int16x4_t __s0_675 = __p0_675; \
+  int32x4_t __s1_675 = __p1_675; \
+  int16x8_t __ret_675; \
+  __ret_675 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_675), (int16x4_t)(vqrshrun_n_s32(__s1_675, __p2_675)))); \
+  __ret_675; \
 })
 #else
-#define vqrshrun_high_n_s32(__p0_189, __p1_189, __p2_189) __extension__ ({ \
-  int16x4_t __s0_189 = __p0_189; \
-  int32x4_t __s1_189 = __p1_189; \
-  int16x4_t __rev0_189;  __rev0_189 = __builtin_shufflevector(__s0_189, __s0_189, 3, 2, 1, 0); \
-  int32x4_t __rev1_189;  __rev1_189 = __builtin_shufflevector(__s1_189, __s1_189, 3, 2, 1, 0); \
-  int16x8_t __ret_189; \
-  __ret_189 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_189), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_189, __p2_189)))); \
-  __ret_189 = __builtin_shufflevector(__ret_189, __ret_189, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_189; \
+#define vqrshrun_high_n_s32(__p0_676, __p1_676, __p2_676) __extension__ ({ \
+  int16x4_t __s0_676 = __p0_676; \
+  int32x4_t __s1_676 = __p1_676; \
+  int16x4_t __rev0_676;  __rev0_676 = __builtin_shufflevector(__s0_676, __s0_676, 3, 2, 1, 0); \
+  int32x4_t __rev1_676;  __rev1_676 = __builtin_shufflevector(__s1_676, __s1_676, 3, 2, 1, 0); \
+  int16x8_t __ret_676; \
+  __ret_676 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_676), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_676, __p2_676)))); \
+  __ret_676 = __builtin_shufflevector(__ret_676, __ret_676, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_676; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s64(__p0_190, __p1_190, __p2_190) __extension__ ({ \
-  int32x2_t __s0_190 = __p0_190; \
-  int64x2_t __s1_190 = __p1_190; \
-  int32x4_t __ret_190; \
-  __ret_190 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_190), (int32x2_t)(vqrshrun_n_s64(__s1_190, __p2_190)))); \
-  __ret_190; \
+#define vqrshrun_high_n_s64(__p0_677, __p1_677, __p2_677) __extension__ ({ \
+  int32x2_t __s0_677 = __p0_677; \
+  int64x2_t __s1_677 = __p1_677; \
+  int32x4_t __ret_677; \
+  __ret_677 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_677), (int32x2_t)(vqrshrun_n_s64(__s1_677, __p2_677)))); \
+  __ret_677; \
 })
 #else
-#define vqrshrun_high_n_s64(__p0_191, __p1_191, __p2_191) __extension__ ({ \
-  int32x2_t __s0_191 = __p0_191; \
-  int64x2_t __s1_191 = __p1_191; \
-  int32x2_t __rev0_191;  __rev0_191 = __builtin_shufflevector(__s0_191, __s0_191, 1, 0); \
-  int64x2_t __rev1_191;  __rev1_191 = __builtin_shufflevector(__s1_191, __s1_191, 1, 0); \
-  int32x4_t __ret_191; \
-  __ret_191 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_191), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_191, __p2_191)))); \
-  __ret_191 = __builtin_shufflevector(__ret_191, __ret_191, 3, 2, 1, 0); \
-  __ret_191; \
+#define vqrshrun_high_n_s64(__p0_678, __p1_678, __p2_678) __extension__ ({ \
+  int32x2_t __s0_678 = __p0_678; \
+  int64x2_t __s1_678 = __p1_678; \
+  int32x2_t __rev0_678;  __rev0_678 = __builtin_shufflevector(__s0_678, __s0_678, 1, 0); \
+  int64x2_t __rev1_678;  __rev1_678 = __builtin_shufflevector(__s1_678, __s1_678, 1, 0); \
+  int32x4_t __ret_678; \
+  __ret_678 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_678), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_678, __p2_678)))); \
+  __ret_678 = __builtin_shufflevector(__ret_678, __ret_678, 3, 2, 1, 0); \
+  __ret_678; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s16(__p0_192, __p1_192, __p2_192) __extension__ ({ \
-  int8x8_t __s0_192 = __p0_192; \
-  int16x8_t __s1_192 = __p1_192; \
-  int8x16_t __ret_192; \
-  __ret_192 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_192), (int8x8_t)(vqrshrun_n_s16(__s1_192, __p2_192)))); \
-  __ret_192; \
+#define vqrshrun_high_n_s16(__p0_679, __p1_679, __p2_679) __extension__ ({ \
+  int8x8_t __s0_679 = __p0_679; \
+  int16x8_t __s1_679 = __p1_679; \
+  int8x16_t __ret_679; \
+  __ret_679 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_679), (int8x8_t)(vqrshrun_n_s16(__s1_679, __p2_679)))); \
+  __ret_679; \
 })
 #else
-#define vqrshrun_high_n_s16(__p0_193, __p1_193, __p2_193) __extension__ ({ \
-  int8x8_t __s0_193 = __p0_193; \
-  int16x8_t __s1_193 = __p1_193; \
-  int8x8_t __rev0_193;  __rev0_193 = __builtin_shufflevector(__s0_193, __s0_193, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_193;  __rev1_193 = __builtin_shufflevector(__s1_193, __s1_193, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_193; \
-  __ret_193 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_193), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_193, __p2_193)))); \
-  __ret_193 = __builtin_shufflevector(__ret_193, __ret_193, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_193; \
+#define vqrshrun_high_n_s16(__p0_680, __p1_680, __p2_680) __extension__ ({ \
+  int8x8_t __s0_680 = __p0_680; \
+  int16x8_t __s1_680 = __p1_680; \
+  int8x8_t __rev0_680;  __rev0_680 = __builtin_shufflevector(__s0_680, __s0_680, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_680;  __rev1_680 = __builtin_shufflevector(__s1_680, __s1_680, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_680; \
+  __ret_680 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_680), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_680, __p2_680)))); \
+  __ret_680 = __builtin_shufflevector(__ret_680, __ret_680, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_680; \
 })
 #endif
 
@@ -54145,128 +57945,128 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u32(__p0_194, __p1_194, __p2_194) __extension__ ({ \
-  uint16x4_t __s0_194 = __p0_194; \
-  uint32x4_t __s1_194 = __p1_194; \
-  uint16x8_t __ret_194; \
-  __ret_194 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_194), (uint16x4_t)(vqshrn_n_u32(__s1_194, __p2_194)))); \
-  __ret_194; \
+#define vqshrn_high_n_u32(__p0_681, __p1_681, __p2_681) __extension__ ({ \
+  uint16x4_t __s0_681 = __p0_681; \
+  uint32x4_t __s1_681 = __p1_681; \
+  uint16x8_t __ret_681; \
+  __ret_681 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_681), (uint16x4_t)(vqshrn_n_u32(__s1_681, __p2_681)))); \
+  __ret_681; \
 })
 #else
-#define vqshrn_high_n_u32(__p0_195, __p1_195, __p2_195) __extension__ ({ \
-  uint16x4_t __s0_195 = __p0_195; \
-  uint32x4_t __s1_195 = __p1_195; \
-  uint16x4_t __rev0_195;  __rev0_195 = __builtin_shufflevector(__s0_195, __s0_195, 3, 2, 1, 0); \
-  uint32x4_t __rev1_195;  __rev1_195 = __builtin_shufflevector(__s1_195, __s1_195, 3, 2, 1, 0); \
-  uint16x8_t __ret_195; \
-  __ret_195 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_195), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_195, __p2_195)))); \
-  __ret_195 = __builtin_shufflevector(__ret_195, __ret_195, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_195; \
+#define vqshrn_high_n_u32(__p0_682, __p1_682, __p2_682) __extension__ ({ \
+  uint16x4_t __s0_682 = __p0_682; \
+  uint32x4_t __s1_682 = __p1_682; \
+  uint16x4_t __rev0_682;  __rev0_682 = __builtin_shufflevector(__s0_682, __s0_682, 3, 2, 1, 0); \
+  uint32x4_t __rev1_682;  __rev1_682 = __builtin_shufflevector(__s1_682, __s1_682, 3, 2, 1, 0); \
+  uint16x8_t __ret_682; \
+  __ret_682 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_682), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_682, __p2_682)))); \
+  __ret_682 = __builtin_shufflevector(__ret_682, __ret_682, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_682; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u64(__p0_196, __p1_196, __p2_196) __extension__ ({ \
-  uint32x2_t __s0_196 = __p0_196; \
-  uint64x2_t __s1_196 = __p1_196; \
-  uint32x4_t __ret_196; \
-  __ret_196 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_196), (uint32x2_t)(vqshrn_n_u64(__s1_196, __p2_196)))); \
-  __ret_196; \
+#define vqshrn_high_n_u64(__p0_683, __p1_683, __p2_683) __extension__ ({ \
+  uint32x2_t __s0_683 = __p0_683; \
+  uint64x2_t __s1_683 = __p1_683; \
+  uint32x4_t __ret_683; \
+  __ret_683 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_683), (uint32x2_t)(vqshrn_n_u64(__s1_683, __p2_683)))); \
+  __ret_683; \
 })
 #else
-#define vqshrn_high_n_u64(__p0_197, __p1_197, __p2_197) __extension__ ({ \
-  uint32x2_t __s0_197 = __p0_197; \
-  uint64x2_t __s1_197 = __p1_197; \
-  uint32x2_t __rev0_197;  __rev0_197 = __builtin_shufflevector(__s0_197, __s0_197, 1, 0); \
-  uint64x2_t __rev1_197;  __rev1_197 = __builtin_shufflevector(__s1_197, __s1_197, 1, 0); \
-  uint32x4_t __ret_197; \
-  __ret_197 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_197), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_197, __p2_197)))); \
-  __ret_197 = __builtin_shufflevector(__ret_197, __ret_197, 3, 2, 1, 0); \
-  __ret_197; \
+#define vqshrn_high_n_u64(__p0_684, __p1_684, __p2_684) __extension__ ({ \
+  uint32x2_t __s0_684 = __p0_684; \
+  uint64x2_t __s1_684 = __p1_684; \
+  uint32x2_t __rev0_684;  __rev0_684 = __builtin_shufflevector(__s0_684, __s0_684, 1, 0); \
+  uint64x2_t __rev1_684;  __rev1_684 = __builtin_shufflevector(__s1_684, __s1_684, 1, 0); \
+  uint32x4_t __ret_684; \
+  __ret_684 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_684), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_684, __p2_684)))); \
+  __ret_684 = __builtin_shufflevector(__ret_684, __ret_684, 3, 2, 1, 0); \
+  __ret_684; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u16(__p0_198, __p1_198, __p2_198) __extension__ ({ \
-  uint8x8_t __s0_198 = __p0_198; \
-  uint16x8_t __s1_198 = __p1_198; \
-  uint8x16_t __ret_198; \
-  __ret_198 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_198), (uint8x8_t)(vqshrn_n_u16(__s1_198, __p2_198)))); \
-  __ret_198; \
+#define vqshrn_high_n_u16(__p0_685, __p1_685, __p2_685) __extension__ ({ \
+  uint8x8_t __s0_685 = __p0_685; \
+  uint16x8_t __s1_685 = __p1_685; \
+  uint8x16_t __ret_685; \
+  __ret_685 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_685), (uint8x8_t)(vqshrn_n_u16(__s1_685, __p2_685)))); \
+  __ret_685; \
 })
 #else
-#define vqshrn_high_n_u16(__p0_199, __p1_199, __p2_199) __extension__ ({ \
-  uint8x8_t __s0_199 = __p0_199; \
-  uint16x8_t __s1_199 = __p1_199; \
-  uint8x8_t __rev0_199;  __rev0_199 = __builtin_shufflevector(__s0_199, __s0_199, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_199;  __rev1_199 = __builtin_shufflevector(__s1_199, __s1_199, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_199; \
-  __ret_199 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_199), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_199, __p2_199)))); \
-  __ret_199 = __builtin_shufflevector(__ret_199, __ret_199, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_199; \
+#define vqshrn_high_n_u16(__p0_686, __p1_686, __p2_686) __extension__ ({ \
+  uint8x8_t __s0_686 = __p0_686; \
+  uint16x8_t __s1_686 = __p1_686; \
+  uint8x8_t __rev0_686;  __rev0_686 = __builtin_shufflevector(__s0_686, __s0_686, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_686;  __rev1_686 = __builtin_shufflevector(__s1_686, __s1_686, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_686; \
+  __ret_686 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_686), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_686, __p2_686)))); \
+  __ret_686 = __builtin_shufflevector(__ret_686, __ret_686, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_686; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s32(__p0_200, __p1_200, __p2_200) __extension__ ({ \
-  int16x4_t __s0_200 = __p0_200; \
-  int32x4_t __s1_200 = __p1_200; \
-  int16x8_t __ret_200; \
-  __ret_200 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_200), (int16x4_t)(vqshrn_n_s32(__s1_200, __p2_200)))); \
-  __ret_200; \
+#define vqshrn_high_n_s32(__p0_687, __p1_687, __p2_687) __extension__ ({ \
+  int16x4_t __s0_687 = __p0_687; \
+  int32x4_t __s1_687 = __p1_687; \
+  int16x8_t __ret_687; \
+  __ret_687 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_687), (int16x4_t)(vqshrn_n_s32(__s1_687, __p2_687)))); \
+  __ret_687; \
 })
 #else
-#define vqshrn_high_n_s32(__p0_201, __p1_201, __p2_201) __extension__ ({ \
-  int16x4_t __s0_201 = __p0_201; \
-  int32x4_t __s1_201 = __p1_201; \
-  int16x4_t __rev0_201;  __rev0_201 = __builtin_shufflevector(__s0_201, __s0_201, 3, 2, 1, 0); \
-  int32x4_t __rev1_201;  __rev1_201 = __builtin_shufflevector(__s1_201, __s1_201, 3, 2, 1, 0); \
-  int16x8_t __ret_201; \
-  __ret_201 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_201), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_201, __p2_201)))); \
-  __ret_201 = __builtin_shufflevector(__ret_201, __ret_201, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_201; \
+#define vqshrn_high_n_s32(__p0_688, __p1_688, __p2_688) __extension__ ({ \
+  int16x4_t __s0_688 = __p0_688; \
+  int32x4_t __s1_688 = __p1_688; \
+  int16x4_t __rev0_688;  __rev0_688 = __builtin_shufflevector(__s0_688, __s0_688, 3, 2, 1, 0); \
+  int32x4_t __rev1_688;  __rev1_688 = __builtin_shufflevector(__s1_688, __s1_688, 3, 2, 1, 0); \
+  int16x8_t __ret_688; \
+  __ret_688 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_688), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_688, __p2_688)))); \
+  __ret_688 = __builtin_shufflevector(__ret_688, __ret_688, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_688; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s64(__p0_202, __p1_202, __p2_202) __extension__ ({ \
-  int32x2_t __s0_202 = __p0_202; \
-  int64x2_t __s1_202 = __p1_202; \
-  int32x4_t __ret_202; \
-  __ret_202 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_202), (int32x2_t)(vqshrn_n_s64(__s1_202, __p2_202)))); \
-  __ret_202; \
+#define vqshrn_high_n_s64(__p0_689, __p1_689, __p2_689) __extension__ ({ \
+  int32x2_t __s0_689 = __p0_689; \
+  int64x2_t __s1_689 = __p1_689; \
+  int32x4_t __ret_689; \
+  __ret_689 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_689), (int32x2_t)(vqshrn_n_s64(__s1_689, __p2_689)))); \
+  __ret_689; \
 })
 #else
-#define vqshrn_high_n_s64(__p0_203, __p1_203, __p2_203) __extension__ ({ \
-  int32x2_t __s0_203 = __p0_203; \
-  int64x2_t __s1_203 = __p1_203; \
-  int32x2_t __rev0_203;  __rev0_203 = __builtin_shufflevector(__s0_203, __s0_203, 1, 0); \
-  int64x2_t __rev1_203;  __rev1_203 = __builtin_shufflevector(__s1_203, __s1_203, 1, 0); \
-  int32x4_t __ret_203; \
-  __ret_203 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_203), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_203, __p2_203)))); \
-  __ret_203 = __builtin_shufflevector(__ret_203, __ret_203, 3, 2, 1, 0); \
-  __ret_203; \
+#define vqshrn_high_n_s64(__p0_690, __p1_690, __p2_690) __extension__ ({ \
+  int32x2_t __s0_690 = __p0_690; \
+  int64x2_t __s1_690 = __p1_690; \
+  int32x2_t __rev0_690;  __rev0_690 = __builtin_shufflevector(__s0_690, __s0_690, 1, 0); \
+  int64x2_t __rev1_690;  __rev1_690 = __builtin_shufflevector(__s1_690, __s1_690, 1, 0); \
+  int32x4_t __ret_690; \
+  __ret_690 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_690), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_690, __p2_690)))); \
+  __ret_690 = __builtin_shufflevector(__ret_690, __ret_690, 3, 2, 1, 0); \
+  __ret_690; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s16(__p0_204, __p1_204, __p2_204) __extension__ ({ \
-  int8x8_t __s0_204 = __p0_204; \
-  int16x8_t __s1_204 = __p1_204; \
-  int8x16_t __ret_204; \
-  __ret_204 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_204), (int8x8_t)(vqshrn_n_s16(__s1_204, __p2_204)))); \
-  __ret_204; \
+#define vqshrn_high_n_s16(__p0_691, __p1_691, __p2_691) __extension__ ({ \
+  int8x8_t __s0_691 = __p0_691; \
+  int16x8_t __s1_691 = __p1_691; \
+  int8x16_t __ret_691; \
+  __ret_691 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_691), (int8x8_t)(vqshrn_n_s16(__s1_691, __p2_691)))); \
+  __ret_691; \
 })
 #else
-#define vqshrn_high_n_s16(__p0_205, __p1_205, __p2_205) __extension__ ({ \
-  int8x8_t __s0_205 = __p0_205; \
-  int16x8_t __s1_205 = __p1_205; \
-  int8x8_t __rev0_205;  __rev0_205 = __builtin_shufflevector(__s0_205, __s0_205, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_205;  __rev1_205 = __builtin_shufflevector(__s1_205, __s1_205, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_205; \
-  __ret_205 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_205), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_205, __p2_205)))); \
-  __ret_205 = __builtin_shufflevector(__ret_205, __ret_205, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_205; \
+#define vqshrn_high_n_s16(__p0_692, __p1_692, __p2_692) __extension__ ({ \
+  int8x8_t __s0_692 = __p0_692; \
+  int16x8_t __s1_692 = __p1_692; \
+  int8x8_t __rev0_692;  __rev0_692 = __builtin_shufflevector(__s0_692, __s0_692, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_692;  __rev1_692 = __builtin_shufflevector(__s1_692, __s1_692, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_692; \
+  __ret_692 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_692), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_692, __p2_692)))); \
+  __ret_692 = __builtin_shufflevector(__ret_692, __ret_692, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_692; \
 })
 #endif
 
@@ -54307,65 +58107,65 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s32(__p0_206, __p1_206, __p2_206) __extension__ ({ \
-  int16x4_t __s0_206 = __p0_206; \
-  int32x4_t __s1_206 = __p1_206; \
-  int16x8_t __ret_206; \
-  __ret_206 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_206), (int16x4_t)(vqshrun_n_s32(__s1_206, __p2_206)))); \
-  __ret_206; \
+#define vqshrun_high_n_s32(__p0_693, __p1_693, __p2_693) __extension__ ({ \
+  int16x4_t __s0_693 = __p0_693; \
+  int32x4_t __s1_693 = __p1_693; \
+  int16x8_t __ret_693; \
+  __ret_693 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_693), (int16x4_t)(vqshrun_n_s32(__s1_693, __p2_693)))); \
+  __ret_693; \
 })
 #else
-#define vqshrun_high_n_s32(__p0_207, __p1_207, __p2_207) __extension__ ({ \
-  int16x4_t __s0_207 = __p0_207; \
-  int32x4_t __s1_207 = __p1_207; \
-  int16x4_t __rev0_207;  __rev0_207 = __builtin_shufflevector(__s0_207, __s0_207, 3, 2, 1, 0); \
-  int32x4_t __rev1_207;  __rev1_207 = __builtin_shufflevector(__s1_207, __s1_207, 3, 2, 1, 0); \
-  int16x8_t __ret_207; \
-  __ret_207 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_207), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_207, __p2_207)))); \
-  __ret_207 = __builtin_shufflevector(__ret_207, __ret_207, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_207; \
+#define vqshrun_high_n_s32(__p0_694, __p1_694, __p2_694) __extension__ ({ \
+  int16x4_t __s0_694 = __p0_694; \
+  int32x4_t __s1_694 = __p1_694; \
+  int16x4_t __rev0_694;  __rev0_694 = __builtin_shufflevector(__s0_694, __s0_694, 3, 2, 1, 0); \
+  int32x4_t __rev1_694;  __rev1_694 = __builtin_shufflevector(__s1_694, __s1_694, 3, 2, 1, 0); \
+  int16x8_t __ret_694; \
+  __ret_694 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_694), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_694, __p2_694)))); \
+  __ret_694 = __builtin_shufflevector(__ret_694, __ret_694, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_694; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s64(__p0_208, __p1_208, __p2_208) __extension__ ({ \
-  int32x2_t __s0_208 = __p0_208; \
-  int64x2_t __s1_208 = __p1_208; \
-  int32x4_t __ret_208; \
-  __ret_208 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_208), (int32x2_t)(vqshrun_n_s64(__s1_208, __p2_208)))); \
-  __ret_208; \
+#define vqshrun_high_n_s64(__p0_695, __p1_695, __p2_695) __extension__ ({ \
+  int32x2_t __s0_695 = __p0_695; \
+  int64x2_t __s1_695 = __p1_695; \
+  int32x4_t __ret_695; \
+  __ret_695 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_695), (int32x2_t)(vqshrun_n_s64(__s1_695, __p2_695)))); \
+  __ret_695; \
 })
 #else
-#define vqshrun_high_n_s64(__p0_209, __p1_209, __p2_209) __extension__ ({ \
-  int32x2_t __s0_209 = __p0_209; \
-  int64x2_t __s1_209 = __p1_209; \
-  int32x2_t __rev0_209;  __rev0_209 = __builtin_shufflevector(__s0_209, __s0_209, 1, 0); \
-  int64x2_t __rev1_209;  __rev1_209 = __builtin_shufflevector(__s1_209, __s1_209, 1, 0); \
-  int32x4_t __ret_209; \
-  __ret_209 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_209), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_209, __p2_209)))); \
-  __ret_209 = __builtin_shufflevector(__ret_209, __ret_209, 3, 2, 1, 0); \
-  __ret_209; \
+#define vqshrun_high_n_s64(__p0_696, __p1_696, __p2_696) __extension__ ({ \
+  int32x2_t __s0_696 = __p0_696; \
+  int64x2_t __s1_696 = __p1_696; \
+  int32x2_t __rev0_696;  __rev0_696 = __builtin_shufflevector(__s0_696, __s0_696, 1, 0); \
+  int64x2_t __rev1_696;  __rev1_696 = __builtin_shufflevector(__s1_696, __s1_696, 1, 0); \
+  int32x4_t __ret_696; \
+  __ret_696 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_696), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_696, __p2_696)))); \
+  __ret_696 = __builtin_shufflevector(__ret_696, __ret_696, 3, 2, 1, 0); \
+  __ret_696; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s16(__p0_210, __p1_210, __p2_210) __extension__ ({ \
-  int8x8_t __s0_210 = __p0_210; \
-  int16x8_t __s1_210 = __p1_210; \
-  int8x16_t __ret_210; \
-  __ret_210 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_210), (int8x8_t)(vqshrun_n_s16(__s1_210, __p2_210)))); \
-  __ret_210; \
+#define vqshrun_high_n_s16(__p0_697, __p1_697, __p2_697) __extension__ ({ \
+  int8x8_t __s0_697 = __p0_697; \
+  int16x8_t __s1_697 = __p1_697; \
+  int8x16_t __ret_697; \
+  __ret_697 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_697), (int8x8_t)(vqshrun_n_s16(__s1_697, __p2_697)))); \
+  __ret_697; \
 })
 #else
-#define vqshrun_high_n_s16(__p0_211, __p1_211, __p2_211) __extension__ ({ \
-  int8x8_t __s0_211 = __p0_211; \
-  int16x8_t __s1_211 = __p1_211; \
-  int8x8_t __rev0_211;  __rev0_211 = __builtin_shufflevector(__s0_211, __s0_211, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_211;  __rev1_211 = __builtin_shufflevector(__s1_211, __s1_211, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_211; \
-  __ret_211 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_211), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_211, __p2_211)))); \
-  __ret_211 = __builtin_shufflevector(__ret_211, __ret_211, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_211; \
+#define vqshrun_high_n_s16(__p0_698, __p1_698, __p2_698) __extension__ ({ \
+  int8x8_t __s0_698 = __p0_698; \
+  int16x8_t __s1_698 = __p1_698; \
+  int8x8_t __rev0_698;  __rev0_698 = __builtin_shufflevector(__s0_698, __s0_698, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_698;  __rev1_698 = __builtin_shufflevector(__s1_698, __s1_698, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_698; \
+  __ret_698 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_698), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_698, __p2_698)))); \
+  __ret_698 = __builtin_shufflevector(__ret_698, __ret_698, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_698; \
 })
 #endif
 
@@ -55675,128 +59475,128 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u32(__p0_212, __p1_212, __p2_212) __extension__ ({ \
-  uint16x4_t __s0_212 = __p0_212; \
-  uint32x4_t __s1_212 = __p1_212; \
-  uint16x8_t __ret_212; \
-  __ret_212 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_212), (uint16x4_t)(vrshrn_n_u32(__s1_212, __p2_212)))); \
-  __ret_212; \
+#define vrshrn_high_n_u32(__p0_699, __p1_699, __p2_699) __extension__ ({ \
+  uint16x4_t __s0_699 = __p0_699; \
+  uint32x4_t __s1_699 = __p1_699; \
+  uint16x8_t __ret_699; \
+  __ret_699 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_699), (uint16x4_t)(vrshrn_n_u32(__s1_699, __p2_699)))); \
+  __ret_699; \
 })
 #else
-#define vrshrn_high_n_u32(__p0_213, __p1_213, __p2_213) __extension__ ({ \
-  uint16x4_t __s0_213 = __p0_213; \
-  uint32x4_t __s1_213 = __p1_213; \
-  uint16x4_t __rev0_213;  __rev0_213 = __builtin_shufflevector(__s0_213, __s0_213, 3, 2, 1, 0); \
-  uint32x4_t __rev1_213;  __rev1_213 = __builtin_shufflevector(__s1_213, __s1_213, 3, 2, 1, 0); \
-  uint16x8_t __ret_213; \
-  __ret_213 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_213), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_213, __p2_213)))); \
-  __ret_213 = __builtin_shufflevector(__ret_213, __ret_213, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_213; \
+#define vrshrn_high_n_u32(__p0_700, __p1_700, __p2_700) __extension__ ({ \
+  uint16x4_t __s0_700 = __p0_700; \
+  uint32x4_t __s1_700 = __p1_700; \
+  uint16x4_t __rev0_700;  __rev0_700 = __builtin_shufflevector(__s0_700, __s0_700, 3, 2, 1, 0); \
+  uint32x4_t __rev1_700;  __rev1_700 = __builtin_shufflevector(__s1_700, __s1_700, 3, 2, 1, 0); \
+  uint16x8_t __ret_700; \
+  __ret_700 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_700), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_700, __p2_700)))); \
+  __ret_700 = __builtin_shufflevector(__ret_700, __ret_700, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_700; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u64(__p0_214, __p1_214, __p2_214) __extension__ ({ \
-  uint32x2_t __s0_214 = __p0_214; \
-  uint64x2_t __s1_214 = __p1_214; \
-  uint32x4_t __ret_214; \
-  __ret_214 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_214), (uint32x2_t)(vrshrn_n_u64(__s1_214, __p2_214)))); \
-  __ret_214; \
+#define vrshrn_high_n_u64(__p0_701, __p1_701, __p2_701) __extension__ ({ \
+  uint32x2_t __s0_701 = __p0_701; \
+  uint64x2_t __s1_701 = __p1_701; \
+  uint32x4_t __ret_701; \
+  __ret_701 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_701), (uint32x2_t)(vrshrn_n_u64(__s1_701, __p2_701)))); \
+  __ret_701; \
 })
 #else
-#define vrshrn_high_n_u64(__p0_215, __p1_215, __p2_215) __extension__ ({ \
-  uint32x2_t __s0_215 = __p0_215; \
-  uint64x2_t __s1_215 = __p1_215; \
-  uint32x2_t __rev0_215;  __rev0_215 = __builtin_shufflevector(__s0_215, __s0_215, 1, 0); \
-  uint64x2_t __rev1_215;  __rev1_215 = __builtin_shufflevector(__s1_215, __s1_215, 1, 0); \
-  uint32x4_t __ret_215; \
-  __ret_215 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_215), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_215, __p2_215)))); \
-  __ret_215 = __builtin_shufflevector(__ret_215, __ret_215, 3, 2, 1, 0); \
-  __ret_215; \
+#define vrshrn_high_n_u64(__p0_702, __p1_702, __p2_702) __extension__ ({ \
+  uint32x2_t __s0_702 = __p0_702; \
+  uint64x2_t __s1_702 = __p1_702; \
+  uint32x2_t __rev0_702;  __rev0_702 = __builtin_shufflevector(__s0_702, __s0_702, 1, 0); \
+  uint64x2_t __rev1_702;  __rev1_702 = __builtin_shufflevector(__s1_702, __s1_702, 1, 0); \
+  uint32x4_t __ret_702; \
+  __ret_702 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_702), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_702, __p2_702)))); \
+  __ret_702 = __builtin_shufflevector(__ret_702, __ret_702, 3, 2, 1, 0); \
+  __ret_702; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u16(__p0_216, __p1_216, __p2_216) __extension__ ({ \
-  uint8x8_t __s0_216 = __p0_216; \
-  uint16x8_t __s1_216 = __p1_216; \
-  uint8x16_t __ret_216; \
-  __ret_216 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_216), (uint8x8_t)(vrshrn_n_u16(__s1_216, __p2_216)))); \
-  __ret_216; \
+#define vrshrn_high_n_u16(__p0_703, __p1_703, __p2_703) __extension__ ({ \
+  uint8x8_t __s0_703 = __p0_703; \
+  uint16x8_t __s1_703 = __p1_703; \
+  uint8x16_t __ret_703; \
+  __ret_703 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_703), (uint8x8_t)(vrshrn_n_u16(__s1_703, __p2_703)))); \
+  __ret_703; \
 })
 #else
-#define vrshrn_high_n_u16(__p0_217, __p1_217, __p2_217) __extension__ ({ \
-  uint8x8_t __s0_217 = __p0_217; \
-  uint16x8_t __s1_217 = __p1_217; \
-  uint8x8_t __rev0_217;  __rev0_217 = __builtin_shufflevector(__s0_217, __s0_217, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_217;  __rev1_217 = __builtin_shufflevector(__s1_217, __s1_217, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_217; \
-  __ret_217 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_217), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_217, __p2_217)))); \
-  __ret_217 = __builtin_shufflevector(__ret_217, __ret_217, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_217; \
+#define vrshrn_high_n_u16(__p0_704, __p1_704, __p2_704) __extension__ ({ \
+  uint8x8_t __s0_704 = __p0_704; \
+  uint16x8_t __s1_704 = __p1_704; \
+  uint8x8_t __rev0_704;  __rev0_704 = __builtin_shufflevector(__s0_704, __s0_704, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_704;  __rev1_704 = __builtin_shufflevector(__s1_704, __s1_704, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_704; \
+  __ret_704 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_704), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_704, __p2_704)))); \
+  __ret_704 = __builtin_shufflevector(__ret_704, __ret_704, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_704; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s32(__p0_218, __p1_218, __p2_218) __extension__ ({ \
-  int16x4_t __s0_218 = __p0_218; \
-  int32x4_t __s1_218 = __p1_218; \
-  int16x8_t __ret_218; \
-  __ret_218 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_218), (int16x4_t)(vrshrn_n_s32(__s1_218, __p2_218)))); \
-  __ret_218; \
+#define vrshrn_high_n_s32(__p0_705, __p1_705, __p2_705) __extension__ ({ \
+  int16x4_t __s0_705 = __p0_705; \
+  int32x4_t __s1_705 = __p1_705; \
+  int16x8_t __ret_705; \
+  __ret_705 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_705), (int16x4_t)(vrshrn_n_s32(__s1_705, __p2_705)))); \
+  __ret_705; \
 })
 #else
-#define vrshrn_high_n_s32(__p0_219, __p1_219, __p2_219) __extension__ ({ \
-  int16x4_t __s0_219 = __p0_219; \
-  int32x4_t __s1_219 = __p1_219; \
-  int16x4_t __rev0_219;  __rev0_219 = __builtin_shufflevector(__s0_219, __s0_219, 3, 2, 1, 0); \
-  int32x4_t __rev1_219;  __rev1_219 = __builtin_shufflevector(__s1_219, __s1_219, 3, 2, 1, 0); \
-  int16x8_t __ret_219; \
-  __ret_219 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_219), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_219, __p2_219)))); \
-  __ret_219 = __builtin_shufflevector(__ret_219, __ret_219, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_219; \
+#define vrshrn_high_n_s32(__p0_706, __p1_706, __p2_706) __extension__ ({ \
+  int16x4_t __s0_706 = __p0_706; \
+  int32x4_t __s1_706 = __p1_706; \
+  int16x4_t __rev0_706;  __rev0_706 = __builtin_shufflevector(__s0_706, __s0_706, 3, 2, 1, 0); \
+  int32x4_t __rev1_706;  __rev1_706 = __builtin_shufflevector(__s1_706, __s1_706, 3, 2, 1, 0); \
+  int16x8_t __ret_706; \
+  __ret_706 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_706), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_706, __p2_706)))); \
+  __ret_706 = __builtin_shufflevector(__ret_706, __ret_706, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_706; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s64(__p0_220, __p1_220, __p2_220) __extension__ ({ \
-  int32x2_t __s0_220 = __p0_220; \
-  int64x2_t __s1_220 = __p1_220; \
-  int32x4_t __ret_220; \
-  __ret_220 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_220), (int32x2_t)(vrshrn_n_s64(__s1_220, __p2_220)))); \
-  __ret_220; \
+#define vrshrn_high_n_s64(__p0_707, __p1_707, __p2_707) __extension__ ({ \
+  int32x2_t __s0_707 = __p0_707; \
+  int64x2_t __s1_707 = __p1_707; \
+  int32x4_t __ret_707; \
+  __ret_707 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_707), (int32x2_t)(vrshrn_n_s64(__s1_707, __p2_707)))); \
+  __ret_707; \
 })
 #else
-#define vrshrn_high_n_s64(__p0_221, __p1_221, __p2_221) __extension__ ({ \
-  int32x2_t __s0_221 = __p0_221; \
-  int64x2_t __s1_221 = __p1_221; \
-  int32x2_t __rev0_221;  __rev0_221 = __builtin_shufflevector(__s0_221, __s0_221, 1, 0); \
-  int64x2_t __rev1_221;  __rev1_221 = __builtin_shufflevector(__s1_221, __s1_221, 1, 0); \
-  int32x4_t __ret_221; \
-  __ret_221 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_221), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_221, __p2_221)))); \
-  __ret_221 = __builtin_shufflevector(__ret_221, __ret_221, 3, 2, 1, 0); \
-  __ret_221; \
+#define vrshrn_high_n_s64(__p0_708, __p1_708, __p2_708) __extension__ ({ \
+  int32x2_t __s0_708 = __p0_708; \
+  int64x2_t __s1_708 = __p1_708; \
+  int32x2_t __rev0_708;  __rev0_708 = __builtin_shufflevector(__s0_708, __s0_708, 1, 0); \
+  int64x2_t __rev1_708;  __rev1_708 = __builtin_shufflevector(__s1_708, __s1_708, 1, 0); \
+  int32x4_t __ret_708; \
+  __ret_708 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_708), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_708, __p2_708)))); \
+  __ret_708 = __builtin_shufflevector(__ret_708, __ret_708, 3, 2, 1, 0); \
+  __ret_708; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s16(__p0_222, __p1_222, __p2_222) __extension__ ({ \
-  int8x8_t __s0_222 = __p0_222; \
-  int16x8_t __s1_222 = __p1_222; \
-  int8x16_t __ret_222; \
-  __ret_222 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_222), (int8x8_t)(vrshrn_n_s16(__s1_222, __p2_222)))); \
-  __ret_222; \
+#define vrshrn_high_n_s16(__p0_709, __p1_709, __p2_709) __extension__ ({ \
+  int8x8_t __s0_709 = __p0_709; \
+  int16x8_t __s1_709 = __p1_709; \
+  int8x16_t __ret_709; \
+  __ret_709 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_709), (int8x8_t)(vrshrn_n_s16(__s1_709, __p2_709)))); \
+  __ret_709; \
 })
 #else
-#define vrshrn_high_n_s16(__p0_223, __p1_223, __p2_223) __extension__ ({ \
-  int8x8_t __s0_223 = __p0_223; \
-  int16x8_t __s1_223 = __p1_223; \
-  int8x8_t __rev0_223;  __rev0_223 = __builtin_shufflevector(__s0_223, __s0_223, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_223;  __rev1_223 = __builtin_shufflevector(__s1_223, __s1_223, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_223; \
-  __ret_223 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_223), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_223, __p2_223)))); \
-  __ret_223 = __builtin_shufflevector(__ret_223, __ret_223, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_223; \
+#define vrshrn_high_n_s16(__p0_710, __p1_710, __p2_710) __extension__ ({ \
+  int8x8_t __s0_710 = __p0_710; \
+  int16x8_t __s1_710 = __p1_710; \
+  int8x8_t __rev0_710;  __rev0_710 = __builtin_shufflevector(__s0_710, __s0_710, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_710;  __rev1_710 = __builtin_shufflevector(__s1_710, __s1_710, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_710; \
+  __ret_710 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_710), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_710, __p2_710)))); \
+  __ret_710 = __builtin_shufflevector(__ret_710, __ret_710, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_710; \
 })
 #endif
 
@@ -56076,110 +59876,110 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u8(__p0_224, __p1_224) __extension__ ({ \
-  uint8x16_t __s0_224 = __p0_224; \
-  uint16x8_t __ret_224; \
-  __ret_224 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_224), __p1_224)); \
-  __ret_224; \
+#define vshll_high_n_u8(__p0_711, __p1_711) __extension__ ({ \
+  uint8x16_t __s0_711 = __p0_711; \
+  uint16x8_t __ret_711; \
+  __ret_711 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_711), __p1_711)); \
+  __ret_711; \
 })
 #else
-#define vshll_high_n_u8(__p0_225, __p1_225) __extension__ ({ \
-  uint8x16_t __s0_225 = __p0_225; \
-  uint8x16_t __rev0_225;  __rev0_225 = __builtin_shufflevector(__s0_225, __s0_225, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_225; \
-  __ret_225 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_225), __p1_225)); \
-  __ret_225 = __builtin_shufflevector(__ret_225, __ret_225, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_225; \
+#define vshll_high_n_u8(__p0_712, __p1_712) __extension__ ({ \
+  uint8x16_t __s0_712 = __p0_712; \
+  uint8x16_t __rev0_712;  __rev0_712 = __builtin_shufflevector(__s0_712, __s0_712, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret_712; \
+  __ret_712 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_712), __p1_712)); \
+  __ret_712 = __builtin_shufflevector(__ret_712, __ret_712, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_712; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u32(__p0_226, __p1_226) __extension__ ({ \
-  uint32x4_t __s0_226 = __p0_226; \
-  uint64x2_t __ret_226; \
-  __ret_226 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_226), __p1_226)); \
-  __ret_226; \
+#define vshll_high_n_u32(__p0_713, __p1_713) __extension__ ({ \
+  uint32x4_t __s0_713 = __p0_713; \
+  uint64x2_t __ret_713; \
+  __ret_713 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_713), __p1_713)); \
+  __ret_713; \
 })
 #else
-#define vshll_high_n_u32(__p0_227, __p1_227) __extension__ ({ \
-  uint32x4_t __s0_227 = __p0_227; \
-  uint32x4_t __rev0_227;  __rev0_227 = __builtin_shufflevector(__s0_227, __s0_227, 3, 2, 1, 0); \
-  uint64x2_t __ret_227; \
-  __ret_227 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_227), __p1_227)); \
-  __ret_227 = __builtin_shufflevector(__ret_227, __ret_227, 1, 0); \
-  __ret_227; \
+#define vshll_high_n_u32(__p0_714, __p1_714) __extension__ ({ \
+  uint32x4_t __s0_714 = __p0_714; \
+  uint32x4_t __rev0_714;  __rev0_714 = __builtin_shufflevector(__s0_714, __s0_714, 3, 2, 1, 0); \
+  uint64x2_t __ret_714; \
+  __ret_714 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_714), __p1_714)); \
+  __ret_714 = __builtin_shufflevector(__ret_714, __ret_714, 1, 0); \
+  __ret_714; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u16(__p0_228, __p1_228) __extension__ ({ \
-  uint16x8_t __s0_228 = __p0_228; \
-  uint32x4_t __ret_228; \
-  __ret_228 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_228), __p1_228)); \
-  __ret_228; \
+#define vshll_high_n_u16(__p0_715, __p1_715) __extension__ ({ \
+  uint16x8_t __s0_715 = __p0_715; \
+  uint32x4_t __ret_715; \
+  __ret_715 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_715), __p1_715)); \
+  __ret_715; \
 })
 #else
-#define vshll_high_n_u16(__p0_229, __p1_229) __extension__ ({ \
-  uint16x8_t __s0_229 = __p0_229; \
-  uint16x8_t __rev0_229;  __rev0_229 = __builtin_shufflevector(__s0_229, __s0_229, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_229; \
-  __ret_229 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_229), __p1_229)); \
-  __ret_229 = __builtin_shufflevector(__ret_229, __ret_229, 3, 2, 1, 0); \
-  __ret_229; \
+#define vshll_high_n_u16(__p0_716, __p1_716) __extension__ ({ \
+  uint16x8_t __s0_716 = __p0_716; \
+  uint16x8_t __rev0_716;  __rev0_716 = __builtin_shufflevector(__s0_716, __s0_716, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_716; \
+  __ret_716 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_716), __p1_716)); \
+  __ret_716 = __builtin_shufflevector(__ret_716, __ret_716, 3, 2, 1, 0); \
+  __ret_716; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s8(__p0_230, __p1_230) __extension__ ({ \
-  int8x16_t __s0_230 = __p0_230; \
-  int16x8_t __ret_230; \
-  __ret_230 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_230), __p1_230)); \
-  __ret_230; \
+#define vshll_high_n_s8(__p0_717, __p1_717) __extension__ ({ \
+  int8x16_t __s0_717 = __p0_717; \
+  int16x8_t __ret_717; \
+  __ret_717 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_717), __p1_717)); \
+  __ret_717; \
 })
 #else
-#define vshll_high_n_s8(__p0_231, __p1_231) __extension__ ({ \
-  int8x16_t __s0_231 = __p0_231; \
-  int8x16_t __rev0_231;  __rev0_231 = __builtin_shufflevector(__s0_231, __s0_231, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_231; \
-  __ret_231 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_231), __p1_231)); \
-  __ret_231 = __builtin_shufflevector(__ret_231, __ret_231, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_231; \
+#define vshll_high_n_s8(__p0_718, __p1_718) __extension__ ({ \
+  int8x16_t __s0_718 = __p0_718; \
+  int8x16_t __rev0_718;  __rev0_718 = __builtin_shufflevector(__s0_718, __s0_718, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_718; \
+  __ret_718 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_718), __p1_718)); \
+  __ret_718 = __builtin_shufflevector(__ret_718, __ret_718, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_718; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s32(__p0_232, __p1_232) __extension__ ({ \
-  int32x4_t __s0_232 = __p0_232; \
-  int64x2_t __ret_232; \
-  __ret_232 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_232), __p1_232)); \
-  __ret_232; \
+#define vshll_high_n_s32(__p0_719, __p1_719) __extension__ ({ \
+  int32x4_t __s0_719 = __p0_719; \
+  int64x2_t __ret_719; \
+  __ret_719 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_719), __p1_719)); \
+  __ret_719; \
 })
 #else
-#define vshll_high_n_s32(__p0_233, __p1_233) __extension__ ({ \
-  int32x4_t __s0_233 = __p0_233; \
-  int32x4_t __rev0_233;  __rev0_233 = __builtin_shufflevector(__s0_233, __s0_233, 3, 2, 1, 0); \
-  int64x2_t __ret_233; \
-  __ret_233 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_233), __p1_233)); \
-  __ret_233 = __builtin_shufflevector(__ret_233, __ret_233, 1, 0); \
-  __ret_233; \
+#define vshll_high_n_s32(__p0_720, __p1_720) __extension__ ({ \
+  int32x4_t __s0_720 = __p0_720; \
+  int32x4_t __rev0_720;  __rev0_720 = __builtin_shufflevector(__s0_720, __s0_720, 3, 2, 1, 0); \
+  int64x2_t __ret_720; \
+  __ret_720 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_720), __p1_720)); \
+  __ret_720 = __builtin_shufflevector(__ret_720, __ret_720, 1, 0); \
+  __ret_720; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s16(__p0_234, __p1_234) __extension__ ({ \
-  int16x8_t __s0_234 = __p0_234; \
-  int32x4_t __ret_234; \
-  __ret_234 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_234), __p1_234)); \
-  __ret_234; \
+#define vshll_high_n_s16(__p0_721, __p1_721) __extension__ ({ \
+  int16x8_t __s0_721 = __p0_721; \
+  int32x4_t __ret_721; \
+  __ret_721 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_721), __p1_721)); \
+  __ret_721; \
 })
 #else
-#define vshll_high_n_s16(__p0_235, __p1_235) __extension__ ({ \
-  int16x8_t __s0_235 = __p0_235; \
-  int16x8_t __rev0_235;  __rev0_235 = __builtin_shufflevector(__s0_235, __s0_235, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_235; \
-  __ret_235 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_235), __p1_235)); \
-  __ret_235 = __builtin_shufflevector(__ret_235, __ret_235, 3, 2, 1, 0); \
-  __ret_235; \
+#define vshll_high_n_s16(__p0_722, __p1_722) __extension__ ({ \
+  int16x8_t __s0_722 = __p0_722; \
+  int16x8_t __rev0_722;  __rev0_722 = __builtin_shufflevector(__s0_722, __s0_722, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_722; \
+  __ret_722 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_722), __p1_722)); \
+  __ret_722 = __builtin_shufflevector(__ret_722, __ret_722, 3, 2, 1, 0); \
+  __ret_722; \
 })
 #endif
 
@@ -56196,128 +59996,128 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u32(__p0_236, __p1_236, __p2_236) __extension__ ({ \
-  uint16x4_t __s0_236 = __p0_236; \
-  uint32x4_t __s1_236 = __p1_236; \
-  uint16x8_t __ret_236; \
-  __ret_236 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_236), (uint16x4_t)(vshrn_n_u32(__s1_236, __p2_236)))); \
-  __ret_236; \
+#define vshrn_high_n_u32(__p0_723, __p1_723, __p2_723) __extension__ ({ \
+  uint16x4_t __s0_723 = __p0_723; \
+  uint32x4_t __s1_723 = __p1_723; \
+  uint16x8_t __ret_723; \
+  __ret_723 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_723), (uint16x4_t)(vshrn_n_u32(__s1_723, __p2_723)))); \
+  __ret_723; \
 })
 #else
-#define vshrn_high_n_u32(__p0_237, __p1_237, __p2_237) __extension__ ({ \
-  uint16x4_t __s0_237 = __p0_237; \
-  uint32x4_t __s1_237 = __p1_237; \
-  uint16x4_t __rev0_237;  __rev0_237 = __builtin_shufflevector(__s0_237, __s0_237, 3, 2, 1, 0); \
-  uint32x4_t __rev1_237;  __rev1_237 = __builtin_shufflevector(__s1_237, __s1_237, 3, 2, 1, 0); \
-  uint16x8_t __ret_237; \
-  __ret_237 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_237), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_237, __p2_237)))); \
-  __ret_237 = __builtin_shufflevector(__ret_237, __ret_237, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_237; \
+#define vshrn_high_n_u32(__p0_724, __p1_724, __p2_724) __extension__ ({ \
+  uint16x4_t __s0_724 = __p0_724; \
+  uint32x4_t __s1_724 = __p1_724; \
+  uint16x4_t __rev0_724;  __rev0_724 = __builtin_shufflevector(__s0_724, __s0_724, 3, 2, 1, 0); \
+  uint32x4_t __rev1_724;  __rev1_724 = __builtin_shufflevector(__s1_724, __s1_724, 3, 2, 1, 0); \
+  uint16x8_t __ret_724; \
+  __ret_724 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_724), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_724, __p2_724)))); \
+  __ret_724 = __builtin_shufflevector(__ret_724, __ret_724, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_724; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u64(__p0_238, __p1_238, __p2_238) __extension__ ({ \
-  uint32x2_t __s0_238 = __p0_238; \
-  uint64x2_t __s1_238 = __p1_238; \
-  uint32x4_t __ret_238; \
-  __ret_238 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_238), (uint32x2_t)(vshrn_n_u64(__s1_238, __p2_238)))); \
-  __ret_238; \
+#define vshrn_high_n_u64(__p0_725, __p1_725, __p2_725) __extension__ ({ \
+  uint32x2_t __s0_725 = __p0_725; \
+  uint64x2_t __s1_725 = __p1_725; \
+  uint32x4_t __ret_725; \
+  __ret_725 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_725), (uint32x2_t)(vshrn_n_u64(__s1_725, __p2_725)))); \
+  __ret_725; \
 })
 #else
-#define vshrn_high_n_u64(__p0_239, __p1_239, __p2_239) __extension__ ({ \
-  uint32x2_t __s0_239 = __p0_239; \
-  uint64x2_t __s1_239 = __p1_239; \
-  uint32x2_t __rev0_239;  __rev0_239 = __builtin_shufflevector(__s0_239, __s0_239, 1, 0); \
-  uint64x2_t __rev1_239;  __rev1_239 = __builtin_shufflevector(__s1_239, __s1_239, 1, 0); \
-  uint32x4_t __ret_239; \
-  __ret_239 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_239), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_239, __p2_239)))); \
-  __ret_239 = __builtin_shufflevector(__ret_239, __ret_239, 3, 2, 1, 0); \
-  __ret_239; \
+#define vshrn_high_n_u64(__p0_726, __p1_726, __p2_726) __extension__ ({ \
+  uint32x2_t __s0_726 = __p0_726; \
+  uint64x2_t __s1_726 = __p1_726; \
+  uint32x2_t __rev0_726;  __rev0_726 = __builtin_shufflevector(__s0_726, __s0_726, 1, 0); \
+  uint64x2_t __rev1_726;  __rev1_726 = __builtin_shufflevector(__s1_726, __s1_726, 1, 0); \
+  uint32x4_t __ret_726; \
+  __ret_726 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_726), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_726, __p2_726)))); \
+  __ret_726 = __builtin_shufflevector(__ret_726, __ret_726, 3, 2, 1, 0); \
+  __ret_726; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u16(__p0_240, __p1_240, __p2_240) __extension__ ({ \
-  uint8x8_t __s0_240 = __p0_240; \
-  uint16x8_t __s1_240 = __p1_240; \
-  uint8x16_t __ret_240; \
-  __ret_240 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_240), (uint8x8_t)(vshrn_n_u16(__s1_240, __p2_240)))); \
-  __ret_240; \
+#define vshrn_high_n_u16(__p0_727, __p1_727, __p2_727) __extension__ ({ \
+  uint8x8_t __s0_727 = __p0_727; \
+  uint16x8_t __s1_727 = __p1_727; \
+  uint8x16_t __ret_727; \
+  __ret_727 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_727), (uint8x8_t)(vshrn_n_u16(__s1_727, __p2_727)))); \
+  __ret_727; \
 })
 #else
-#define vshrn_high_n_u16(__p0_241, __p1_241, __p2_241) __extension__ ({ \
-  uint8x8_t __s0_241 = __p0_241; \
-  uint16x8_t __s1_241 = __p1_241; \
-  uint8x8_t __rev0_241;  __rev0_241 = __builtin_shufflevector(__s0_241, __s0_241, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_241;  __rev1_241 = __builtin_shufflevector(__s1_241, __s1_241, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_241; \
-  __ret_241 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_241), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_241, __p2_241)))); \
-  __ret_241 = __builtin_shufflevector(__ret_241, __ret_241, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_241; \
+#define vshrn_high_n_u16(__p0_728, __p1_728, __p2_728) __extension__ ({ \
+  uint8x8_t __s0_728 = __p0_728; \
+  uint16x8_t __s1_728 = __p1_728; \
+  uint8x8_t __rev0_728;  __rev0_728 = __builtin_shufflevector(__s0_728, __s0_728, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_728;  __rev1_728 = __builtin_shufflevector(__s1_728, __s1_728, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_728; \
+  __ret_728 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_728), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_728, __p2_728)))); \
+  __ret_728 = __builtin_shufflevector(__ret_728, __ret_728, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_728; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s32(__p0_242, __p1_242, __p2_242) __extension__ ({ \
-  int16x4_t __s0_242 = __p0_242; \
-  int32x4_t __s1_242 = __p1_242; \
-  int16x8_t __ret_242; \
-  __ret_242 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_242), (int16x4_t)(vshrn_n_s32(__s1_242, __p2_242)))); \
-  __ret_242; \
+#define vshrn_high_n_s32(__p0_729, __p1_729, __p2_729) __extension__ ({ \
+  int16x4_t __s0_729 = __p0_729; \
+  int32x4_t __s1_729 = __p1_729; \
+  int16x8_t __ret_729; \
+  __ret_729 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_729), (int16x4_t)(vshrn_n_s32(__s1_729, __p2_729)))); \
+  __ret_729; \
 })
 #else
-#define vshrn_high_n_s32(__p0_243, __p1_243, __p2_243) __extension__ ({ \
-  int16x4_t __s0_243 = __p0_243; \
-  int32x4_t __s1_243 = __p1_243; \
-  int16x4_t __rev0_243;  __rev0_243 = __builtin_shufflevector(__s0_243, __s0_243, 3, 2, 1, 0); \
-  int32x4_t __rev1_243;  __rev1_243 = __builtin_shufflevector(__s1_243, __s1_243, 3, 2, 1, 0); \
-  int16x8_t __ret_243; \
-  __ret_243 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_243), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_243, __p2_243)))); \
-  __ret_243 = __builtin_shufflevector(__ret_243, __ret_243, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_243; \
+#define vshrn_high_n_s32(__p0_730, __p1_730, __p2_730) __extension__ ({ \
+  int16x4_t __s0_730 = __p0_730; \
+  int32x4_t __s1_730 = __p1_730; \
+  int16x4_t __rev0_730;  __rev0_730 = __builtin_shufflevector(__s0_730, __s0_730, 3, 2, 1, 0); \
+  int32x4_t __rev1_730;  __rev1_730 = __builtin_shufflevector(__s1_730, __s1_730, 3, 2, 1, 0); \
+  int16x8_t __ret_730; \
+  __ret_730 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_730), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_730, __p2_730)))); \
+  __ret_730 = __builtin_shufflevector(__ret_730, __ret_730, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_730; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s64(__p0_244, __p1_244, __p2_244) __extension__ ({ \
-  int32x2_t __s0_244 = __p0_244; \
-  int64x2_t __s1_244 = __p1_244; \
-  int32x4_t __ret_244; \
-  __ret_244 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_244), (int32x2_t)(vshrn_n_s64(__s1_244, __p2_244)))); \
-  __ret_244; \
+#define vshrn_high_n_s64(__p0_731, __p1_731, __p2_731) __extension__ ({ \
+  int32x2_t __s0_731 = __p0_731; \
+  int64x2_t __s1_731 = __p1_731; \
+  int32x4_t __ret_731; \
+  __ret_731 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_731), (int32x2_t)(vshrn_n_s64(__s1_731, __p2_731)))); \
+  __ret_731; \
 })
 #else
-#define vshrn_high_n_s64(__p0_245, __p1_245, __p2_245) __extension__ ({ \
-  int32x2_t __s0_245 = __p0_245; \
-  int64x2_t __s1_245 = __p1_245; \
-  int32x2_t __rev0_245;  __rev0_245 = __builtin_shufflevector(__s0_245, __s0_245, 1, 0); \
-  int64x2_t __rev1_245;  __rev1_245 = __builtin_shufflevector(__s1_245, __s1_245, 1, 0); \
-  int32x4_t __ret_245; \
-  __ret_245 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_245), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_245, __p2_245)))); \
-  __ret_245 = __builtin_shufflevector(__ret_245, __ret_245, 3, 2, 1, 0); \
-  __ret_245; \
+#define vshrn_high_n_s64(__p0_732, __p1_732, __p2_732) __extension__ ({ \
+  int32x2_t __s0_732 = __p0_732; \
+  int64x2_t __s1_732 = __p1_732; \
+  int32x2_t __rev0_732;  __rev0_732 = __builtin_shufflevector(__s0_732, __s0_732, 1, 0); \
+  int64x2_t __rev1_732;  __rev1_732 = __builtin_shufflevector(__s1_732, __s1_732, 1, 0); \
+  int32x4_t __ret_732; \
+  __ret_732 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_732), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_732, __p2_732)))); \
+  __ret_732 = __builtin_shufflevector(__ret_732, __ret_732, 3, 2, 1, 0); \
+  __ret_732; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s16(__p0_246, __p1_246, __p2_246) __extension__ ({ \
-  int8x8_t __s0_246 = __p0_246; \
-  int16x8_t __s1_246 = __p1_246; \
-  int8x16_t __ret_246; \
-  __ret_246 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_246), (int8x8_t)(vshrn_n_s16(__s1_246, __p2_246)))); \
-  __ret_246; \
+#define vshrn_high_n_s16(__p0_733, __p1_733, __p2_733) __extension__ ({ \
+  int8x8_t __s0_733 = __p0_733; \
+  int16x8_t __s1_733 = __p1_733; \
+  int8x16_t __ret_733; \
+  __ret_733 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_733), (int8x8_t)(vshrn_n_s16(__s1_733, __p2_733)))); \
+  __ret_733; \
 })
 #else
-#define vshrn_high_n_s16(__p0_247, __p1_247, __p2_247) __extension__ ({ \
-  int8x8_t __s0_247 = __p0_247; \
-  int16x8_t __s1_247 = __p1_247; \
-  int8x8_t __rev0_247;  __rev0_247 = __builtin_shufflevector(__s0_247, __s0_247, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_247;  __rev1_247 = __builtin_shufflevector(__s1_247, __s1_247, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_247; \
-  __ret_247 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_247), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_247, __p2_247)))); \
-  __ret_247 = __builtin_shufflevector(__ret_247, __ret_247, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_247; \
+#define vshrn_high_n_s16(__p0_734, __p1_734, __p2_734) __extension__ ({ \
+  int8x8_t __s0_734 = __p0_734; \
+  int16x8_t __s1_734 = __p1_734; \
+  int8x8_t __rev0_734;  __rev0_734 = __builtin_shufflevector(__s0_734, __s0_734, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_734;  __rev1_734 = __builtin_shufflevector(__s1_734, __s1_734, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_734; \
+  __ret_734 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_734), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_734, __p2_734)))); \
+  __ret_734 = __builtin_shufflevector(__ret_734, __ret_734, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_734; \
 })
 #endif
 
@@ -57753,6 +61553,58 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
+#define vsudotq_laneq_s32(__p0_735, __p1_735, __p2_735, __p3_735) __extension__ ({ \
+  int32x4_t __s0_735 = __p0_735; \
+  int8x16_t __s1_735 = __p1_735; \
+  uint8x16_t __s2_735 = __p2_735; \
+  int32x4_t __ret_735; \
+uint8x16_t __reint_735 = __s2_735; \
+  __ret_735 = vusdotq_s32(__s0_735, (uint8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_735, __p3_735)), __s1_735); \
+  __ret_735; \
+})
+#else
+#define vsudotq_laneq_s32(__p0_736, __p1_736, __p2_736, __p3_736) __extension__ ({ \
+  int32x4_t __s0_736 = __p0_736; \
+  int8x16_t __s1_736 = __p1_736; \
+  uint8x16_t __s2_736 = __p2_736; \
+  int32x4_t __rev0_736;  __rev0_736 = __builtin_shufflevector(__s0_736, __s0_736, 3, 2, 1, 0); \
+  int8x16_t __rev1_736;  __rev1_736 = __builtin_shufflevector(__s1_736, __s1_736, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_736;  __rev2_736 = __builtin_shufflevector(__s2_736, __s2_736, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_736; \
+uint8x16_t __reint_736 = __rev2_736; \
+  __ret_736 = __noswap_vusdotq_s32(__rev0_736, (uint8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_736, __p3_736)), __rev1_736); \
+  __ret_736 = __builtin_shufflevector(__ret_736, __ret_736, 3, 2, 1, 0); \
+  __ret_736; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsudot_laneq_s32(__p0_737, __p1_737, __p2_737, __p3_737) __extension__ ({ \
+  int32x2_t __s0_737 = __p0_737; \
+  int8x8_t __s1_737 = __p1_737; \
+  uint8x16_t __s2_737 = __p2_737; \
+  int32x2_t __ret_737; \
+uint8x16_t __reint_737 = __s2_737; \
+  __ret_737 = vusdot_s32(__s0_737, (uint8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_737, __p3_737)), __s1_737); \
+  __ret_737; \
+})
+#else
+#define vsudot_laneq_s32(__p0_738, __p1_738, __p2_738, __p3_738) __extension__ ({ \
+  int32x2_t __s0_738 = __p0_738; \
+  int8x8_t __s1_738 = __p1_738; \
+  uint8x16_t __s2_738 = __p2_738; \
+  int32x2_t __rev0_738;  __rev0_738 = __builtin_shufflevector(__s0_738, __s0_738, 1, 0); \
+  int8x8_t __rev1_738;  __rev1_738 = __builtin_shufflevector(__s1_738, __s1_738, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_738;  __rev2_738 = __builtin_shufflevector(__s2_738, __s2_738, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x2_t __ret_738; \
+uint8x16_t __reint_738 = __rev2_738; \
+  __ret_738 = __noswap_vusdot_s32(__rev0_738, (uint8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_738, __p3_738)), __rev1_738); \
+  __ret_738 = __builtin_shufflevector(__ret_738, __ret_738, 1, 0); \
+  __ret_738; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
 __ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
   poly8x8_t __ret;
   __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
@@ -58721,6 +62573,58 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
+#define vusdotq_laneq_s32(__p0_739, __p1_739, __p2_739, __p3_739) __extension__ ({ \
+  int32x4_t __s0_739 = __p0_739; \
+  uint8x16_t __s1_739 = __p1_739; \
+  int8x16_t __s2_739 = __p2_739; \
+  int32x4_t __ret_739; \
+int8x16_t __reint_739 = __s2_739; \
+  __ret_739 = vusdotq_s32(__s0_739, __s1_739, (int8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_739, __p3_739))); \
+  __ret_739; \
+})
+#else
+#define vusdotq_laneq_s32(__p0_740, __p1_740, __p2_740, __p3_740) __extension__ ({ \
+  int32x4_t __s0_740 = __p0_740; \
+  uint8x16_t __s1_740 = __p1_740; \
+  int8x16_t __s2_740 = __p2_740; \
+  int32x4_t __rev0_740;  __rev0_740 = __builtin_shufflevector(__s0_740, __s0_740, 3, 2, 1, 0); \
+  uint8x16_t __rev1_740;  __rev1_740 = __builtin_shufflevector(__s1_740, __s1_740, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_740;  __rev2_740 = __builtin_shufflevector(__s2_740, __s2_740, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_740; \
+int8x16_t __reint_740 = __rev2_740; \
+  __ret_740 = __noswap_vusdotq_s32(__rev0_740, __rev1_740, (int8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_740, __p3_740))); \
+  __ret_740 = __builtin_shufflevector(__ret_740, __ret_740, 3, 2, 1, 0); \
+  __ret_740; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vusdot_laneq_s32(__p0_741, __p1_741, __p2_741, __p3_741) __extension__ ({ \
+  int32x2_t __s0_741 = __p0_741; \
+  uint8x8_t __s1_741 = __p1_741; \
+  int8x16_t __s2_741 = __p2_741; \
+  int32x2_t __ret_741; \
+int8x16_t __reint_741 = __s2_741; \
+  __ret_741 = vusdot_s32(__s0_741, __s1_741, (int8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_741, __p3_741))); \
+  __ret_741; \
+})
+#else
+#define vusdot_laneq_s32(__p0_742, __p1_742, __p2_742, __p3_742) __extension__ ({ \
+  int32x2_t __s0_742 = __p0_742; \
+  uint8x8_t __s1_742 = __p1_742; \
+  int8x16_t __s2_742 = __p2_742; \
+  int32x2_t __rev0_742;  __rev0_742 = __builtin_shufflevector(__s0_742, __s0_742, 1, 0); \
+  uint8x8_t __rev1_742;  __rev1_742 = __builtin_shufflevector(__s1_742, __s1_742, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_742;  __rev2_742 = __builtin_shufflevector(__s2_742, __s2_742, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x2_t __ret_742; \
+int8x16_t __reint_742 = __rev2_742; \
+  __ret_742 = __noswap_vusdot_s32(__rev0_742, __rev1_742, (int8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_742, __p3_742))); \
+  __ret_742 = __builtin_shufflevector(__ret_742, __ret_742, 1, 0); \
+  __ret_742; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
 __ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
   poly8x8_t __ret;
   __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
@@ -60770,60 +64674,60 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vget_lane_f16(__p0_248, __p1_248) __extension__ ({ \
-  float16x4_t __s0_248 = __p0_248; \
-  float16_t __ret_248; \
-float16x4_t __reint_248 = __s0_248; \
-int16_t __reint1_248 = vget_lane_s16(*(int16x4_t *) &__reint_248, __p1_248); \
-  __ret_248 = *(float16_t *) &__reint1_248; \
-  __ret_248; \
+#define vget_lane_f16(__p0_743, __p1_743) __extension__ ({ \
+  float16x4_t __s0_743 = __p0_743; \
+  float16_t __ret_743; \
+float16x4_t __reint_743 = __s0_743; \
+int16_t __reint1_743 = vget_lane_s16(*(int16x4_t *) &__reint_743, __p1_743); \
+  __ret_743 = *(float16_t *) &__reint1_743; \
+  __ret_743; \
 })
 #else
-#define vget_lane_f16(__p0_249, __p1_249) __extension__ ({ \
-  float16x4_t __s0_249 = __p0_249; \
-  float16x4_t __rev0_249;  __rev0_249 = __builtin_shufflevector(__s0_249, __s0_249, 3, 2, 1, 0); \
-  float16_t __ret_249; \
-float16x4_t __reint_249 = __rev0_249; \
-int16_t __reint1_249 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_249, __p1_249); \
-  __ret_249 = *(float16_t *) &__reint1_249; \
-  __ret_249; \
+#define vget_lane_f16(__p0_744, __p1_744) __extension__ ({ \
+  float16x4_t __s0_744 = __p0_744; \
+  float16x4_t __rev0_744;  __rev0_744 = __builtin_shufflevector(__s0_744, __s0_744, 3, 2, 1, 0); \
+  float16_t __ret_744; \
+float16x4_t __reint_744 = __rev0_744; \
+int16_t __reint1_744 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_744, __p1_744); \
+  __ret_744 = *(float16_t *) &__reint1_744; \
+  __ret_744; \
 })
-#define __noswap_vget_lane_f16(__p0_250, __p1_250) __extension__ ({ \
-  float16x4_t __s0_250 = __p0_250; \
-  float16_t __ret_250; \
-float16x4_t __reint_250 = __s0_250; \
-int16_t __reint1_250 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_250, __p1_250); \
-  __ret_250 = *(float16_t *) &__reint1_250; \
-  __ret_250; \
+#define __noswap_vget_lane_f16(__p0_745, __p1_745) __extension__ ({ \
+  float16x4_t __s0_745 = __p0_745; \
+  float16_t __ret_745; \
+float16x4_t __reint_745 = __s0_745; \
+int16_t __reint1_745 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_745, __p1_745); \
+  __ret_745 = *(float16_t *) &__reint1_745; \
+  __ret_745; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_f16(__p0_251, __p1_251) __extension__ ({ \
-  float16x8_t __s0_251 = __p0_251; \
-  float16_t __ret_251; \
-float16x8_t __reint_251 = __s0_251; \
-int16_t __reint1_251 = vgetq_lane_s16(*(int16x8_t *) &__reint_251, __p1_251); \
-  __ret_251 = *(float16_t *) &__reint1_251; \
-  __ret_251; \
+#define vgetq_lane_f16(__p0_746, __p1_746) __extension__ ({ \
+  float16x8_t __s0_746 = __p0_746; \
+  float16_t __ret_746; \
+float16x8_t __reint_746 = __s0_746; \
+int16_t __reint1_746 = vgetq_lane_s16(*(int16x8_t *) &__reint_746, __p1_746); \
+  __ret_746 = *(float16_t *) &__reint1_746; \
+  __ret_746; \
 })
 #else
-#define vgetq_lane_f16(__p0_252, __p1_252) __extension__ ({ \
-  float16x8_t __s0_252 = __p0_252; \
-  float16x8_t __rev0_252;  __rev0_252 = __builtin_shufflevector(__s0_252, __s0_252, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_252; \
-float16x8_t __reint_252 = __rev0_252; \
-int16_t __reint1_252 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_252, __p1_252); \
-  __ret_252 = *(float16_t *) &__reint1_252; \
-  __ret_252; \
+#define vgetq_lane_f16(__p0_747, __p1_747) __extension__ ({ \
+  float16x8_t __s0_747 = __p0_747; \
+  float16x8_t __rev0_747;  __rev0_747 = __builtin_shufflevector(__s0_747, __s0_747, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16_t __ret_747; \
+float16x8_t __reint_747 = __rev0_747; \
+int16_t __reint1_747 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_747, __p1_747); \
+  __ret_747 = *(float16_t *) &__reint1_747; \
+  __ret_747; \
 })
-#define __noswap_vgetq_lane_f16(__p0_253, __p1_253) __extension__ ({ \
-  float16x8_t __s0_253 = __p0_253; \
-  float16_t __ret_253; \
-float16x8_t __reint_253 = __s0_253; \
-int16_t __reint1_253 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_253, __p1_253); \
-  __ret_253 = *(float16_t *) &__reint1_253; \
-  __ret_253; \
+#define __noswap_vgetq_lane_f16(__p0_748, __p1_748) __extension__ ({ \
+  float16x8_t __s0_748 = __p0_748; \
+  float16_t __ret_748; \
+float16x8_t __reint_748 = __s0_748; \
+int16_t __reint1_748 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_748, __p1_748); \
+  __ret_748 = *(float16_t *) &__reint1_748; \
+  __ret_748; \
 })
 #endif
 
@@ -60966,98 +64870,98 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 + vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlal_lane_u32(__p0_749, __p1_749, __p2_749, __p3_749) __extension__ ({ \
+  uint64x2_t __s0_749 = __p0_749; \
+  uint32x2_t __s1_749 = __p1_749; \
+  uint32x2_t __s2_749 = __p2_749; \
+  uint64x2_t __ret_749; \
+  __ret_749 = __s0_749 + vmull_u32(__s1_749, splat_lane_u32(__s2_749, __p3_749)); \
+  __ret_749; \
 })
 #else
-#define vmlal_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlal_lane_u32(__p0_750, __p1_750, __p2_750, __p3_750) __extension__ ({ \
+  uint64x2_t __s0_750 = __p0_750; \
+  uint32x2_t __s1_750 = __p1_750; \
+  uint32x2_t __s2_750 = __p2_750; \
+  uint64x2_t __rev0_750;  __rev0_750 = __builtin_shufflevector(__s0_750, __s0_750, 1, 0); \
+  uint32x2_t __rev1_750;  __rev1_750 = __builtin_shufflevector(__s1_750, __s1_750, 1, 0); \
+  uint32x2_t __rev2_750;  __rev2_750 = __builtin_shufflevector(__s2_750, __s2_750, 1, 0); \
+  uint64x2_t __ret_750; \
+  __ret_750 = __rev0_750 + __noswap_vmull_u32(__rev1_750, __noswap_splat_lane_u32(__rev2_750, __p3_750)); \
+  __ret_750 = __builtin_shufflevector(__ret_750, __ret_750, 1, 0); \
+  __ret_750; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 + vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlal_lane_u16(__p0_751, __p1_751, __p2_751, __p3_751) __extension__ ({ \
+  uint32x4_t __s0_751 = __p0_751; \
+  uint16x4_t __s1_751 = __p1_751; \
+  uint16x4_t __s2_751 = __p2_751; \
+  uint32x4_t __ret_751; \
+  __ret_751 = __s0_751 + vmull_u16(__s1_751, splat_lane_u16(__s2_751, __p3_751)); \
+  __ret_751; \
 })
 #else
-#define vmlal_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlal_lane_u16(__p0_752, __p1_752, __p2_752, __p3_752) __extension__ ({ \
+  uint32x4_t __s0_752 = __p0_752; \
+  uint16x4_t __s1_752 = __p1_752; \
+  uint16x4_t __s2_752 = __p2_752; \
+  uint32x4_t __rev0_752;  __rev0_752 = __builtin_shufflevector(__s0_752, __s0_752, 3, 2, 1, 0); \
+  uint16x4_t __rev1_752;  __rev1_752 = __builtin_shufflevector(__s1_752, __s1_752, 3, 2, 1, 0); \
+  uint16x4_t __rev2_752;  __rev2_752 = __builtin_shufflevector(__s2_752, __s2_752, 3, 2, 1, 0); \
+  uint32x4_t __ret_752; \
+  __ret_752 = __rev0_752 + __noswap_vmull_u16(__rev1_752, __noswap_splat_lane_u16(__rev2_752, __p3_752)); \
+  __ret_752 = __builtin_shufflevector(__ret_752, __ret_752, 3, 2, 1, 0); \
+  __ret_752; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 + vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlal_lane_s32(__p0_753, __p1_753, __p2_753, __p3_753) __extension__ ({ \
+  int64x2_t __s0_753 = __p0_753; \
+  int32x2_t __s1_753 = __p1_753; \
+  int32x2_t __s2_753 = __p2_753; \
+  int64x2_t __ret_753; \
+  __ret_753 = __s0_753 + vmull_s32(__s1_753, splat_lane_s32(__s2_753, __p3_753)); \
+  __ret_753; \
 })
 #else
-#define vmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlal_lane_s32(__p0_754, __p1_754, __p2_754, __p3_754) __extension__ ({ \
+  int64x2_t __s0_754 = __p0_754; \
+  int32x2_t __s1_754 = __p1_754; \
+  int32x2_t __s2_754 = __p2_754; \
+  int64x2_t __rev0_754;  __rev0_754 = __builtin_shufflevector(__s0_754, __s0_754, 1, 0); \
+  int32x2_t __rev1_754;  __rev1_754 = __builtin_shufflevector(__s1_754, __s1_754, 1, 0); \
+  int32x2_t __rev2_754;  __rev2_754 = __builtin_shufflevector(__s2_754, __s2_754, 1, 0); \
+  int64x2_t __ret_754; \
+  __ret_754 = __rev0_754 + __noswap_vmull_s32(__rev1_754, __noswap_splat_lane_s32(__rev2_754, __p3_754)); \
+  __ret_754 = __builtin_shufflevector(__ret_754, __ret_754, 1, 0); \
+  __ret_754; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 + vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlal_lane_s16(__p0_755, __p1_755, __p2_755, __p3_755) __extension__ ({ \
+  int32x4_t __s0_755 = __p0_755; \
+  int16x4_t __s1_755 = __p1_755; \
+  int16x4_t __s2_755 = __p2_755; \
+  int32x4_t __ret_755; \
+  __ret_755 = __s0_755 + vmull_s16(__s1_755, splat_lane_s16(__s2_755, __p3_755)); \
+  __ret_755; \
 })
 #else
-#define vmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlal_lane_s16(__p0_756, __p1_756, __p2_756, __p3_756) __extension__ ({ \
+  int32x4_t __s0_756 = __p0_756; \
+  int16x4_t __s1_756 = __p1_756; \
+  int16x4_t __s2_756 = __p2_756; \
+  int32x4_t __rev0_756;  __rev0_756 = __builtin_shufflevector(__s0_756, __s0_756, 3, 2, 1, 0); \
+  int16x4_t __rev1_756;  __rev1_756 = __builtin_shufflevector(__s1_756, __s1_756, 3, 2, 1, 0); \
+  int16x4_t __rev2_756;  __rev2_756 = __builtin_shufflevector(__s2_756, __s2_756, 3, 2, 1, 0); \
+  int32x4_t __ret_756; \
+  __ret_756 = __rev0_756 + __noswap_vmull_s16(__rev1_756, __noswap_splat_lane_s16(__rev2_756, __p3_756)); \
+  __ret_756 = __builtin_shufflevector(__ret_756, __ret_756, 3, 2, 1, 0); \
+  __ret_756; \
 })
 #endif
 
@@ -61288,98 +65192,98 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 - vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlsl_lane_u32(__p0_757, __p1_757, __p2_757, __p3_757) __extension__ ({ \
+  uint64x2_t __s0_757 = __p0_757; \
+  uint32x2_t __s1_757 = __p1_757; \
+  uint32x2_t __s2_757 = __p2_757; \
+  uint64x2_t __ret_757; \
+  __ret_757 = __s0_757 - vmull_u32(__s1_757, splat_lane_u32(__s2_757, __p3_757)); \
+  __ret_757; \
 })
 #else
-#define vmlsl_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlsl_lane_u32(__p0_758, __p1_758, __p2_758, __p3_758) __extension__ ({ \
+  uint64x2_t __s0_758 = __p0_758; \
+  uint32x2_t __s1_758 = __p1_758; \
+  uint32x2_t __s2_758 = __p2_758; \
+  uint64x2_t __rev0_758;  __rev0_758 = __builtin_shufflevector(__s0_758, __s0_758, 1, 0); \
+  uint32x2_t __rev1_758;  __rev1_758 = __builtin_shufflevector(__s1_758, __s1_758, 1, 0); \
+  uint32x2_t __rev2_758;  __rev2_758 = __builtin_shufflevector(__s2_758, __s2_758, 1, 0); \
+  uint64x2_t __ret_758; \
+  __ret_758 = __rev0_758 - __noswap_vmull_u32(__rev1_758, __noswap_splat_lane_u32(__rev2_758, __p3_758)); \
+  __ret_758 = __builtin_shufflevector(__ret_758, __ret_758, 1, 0); \
+  __ret_758; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 - vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlsl_lane_u16(__p0_759, __p1_759, __p2_759, __p3_759) __extension__ ({ \
+  uint32x4_t __s0_759 = __p0_759; \
+  uint16x4_t __s1_759 = __p1_759; \
+  uint16x4_t __s2_759 = __p2_759; \
+  uint32x4_t __ret_759; \
+  __ret_759 = __s0_759 - vmull_u16(__s1_759, splat_lane_u16(__s2_759, __p3_759)); \
+  __ret_759; \
 })
 #else
-#define vmlsl_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsl_lane_u16(__p0_760, __p1_760, __p2_760, __p3_760) __extension__ ({ \
+  uint32x4_t __s0_760 = __p0_760; \
+  uint16x4_t __s1_760 = __p1_760; \
+  uint16x4_t __s2_760 = __p2_760; \
+  uint32x4_t __rev0_760;  __rev0_760 = __builtin_shufflevector(__s0_760, __s0_760, 3, 2, 1, 0); \
+  uint16x4_t __rev1_760;  __rev1_760 = __builtin_shufflevector(__s1_760, __s1_760, 3, 2, 1, 0); \
+  uint16x4_t __rev2_760;  __rev2_760 = __builtin_shufflevector(__s2_760, __s2_760, 3, 2, 1, 0); \
+  uint32x4_t __ret_760; \
+  __ret_760 = __rev0_760 - __noswap_vmull_u16(__rev1_760, __noswap_splat_lane_u16(__rev2_760, __p3_760)); \
+  __ret_760 = __builtin_shufflevector(__ret_760, __ret_760, 3, 2, 1, 0); \
+  __ret_760; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 - vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlsl_lane_s32(__p0_761, __p1_761, __p2_761, __p3_761) __extension__ ({ \
+  int64x2_t __s0_761 = __p0_761; \
+  int32x2_t __s1_761 = __p1_761; \
+  int32x2_t __s2_761 = __p2_761; \
+  int64x2_t __ret_761; \
+  __ret_761 = __s0_761 - vmull_s32(__s1_761, splat_lane_s32(__s2_761, __p3_761)); \
+  __ret_761; \
 })
 #else
-#define vmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlsl_lane_s32(__p0_762, __p1_762, __p2_762, __p3_762) __extension__ ({ \
+  int64x2_t __s0_762 = __p0_762; \
+  int32x2_t __s1_762 = __p1_762; \
+  int32x2_t __s2_762 = __p2_762; \
+  int64x2_t __rev0_762;  __rev0_762 = __builtin_shufflevector(__s0_762, __s0_762, 1, 0); \
+  int32x2_t __rev1_762;  __rev1_762 = __builtin_shufflevector(__s1_762, __s1_762, 1, 0); \
+  int32x2_t __rev2_762;  __rev2_762 = __builtin_shufflevector(__s2_762, __s2_762, 1, 0); \
+  int64x2_t __ret_762; \
+  __ret_762 = __rev0_762 - __noswap_vmull_s32(__rev1_762, __noswap_splat_lane_s32(__rev2_762, __p3_762)); \
+  __ret_762 = __builtin_shufflevector(__ret_762, __ret_762, 1, 0); \
+  __ret_762; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 - vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlsl_lane_s16(__p0_763, __p1_763, __p2_763, __p3_763) __extension__ ({ \
+  int32x4_t __s0_763 = __p0_763; \
+  int16x4_t __s1_763 = __p1_763; \
+  int16x4_t __s2_763 = __p2_763; \
+  int32x4_t __ret_763; \
+  __ret_763 = __s0_763 - vmull_s16(__s1_763, splat_lane_s16(__s2_763, __p3_763)); \
+  __ret_763; \
 })
 #else
-#define vmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsl_lane_s16(__p0_764, __p1_764, __p2_764, __p3_764) __extension__ ({ \
+  int32x4_t __s0_764 = __p0_764; \
+  int16x4_t __s1_764 = __p1_764; \
+  int16x4_t __s2_764 = __p2_764; \
+  int32x4_t __rev0_764;  __rev0_764 = __builtin_shufflevector(__s0_764, __s0_764, 3, 2, 1, 0); \
+  int16x4_t __rev1_764;  __rev1_764 = __builtin_shufflevector(__s1_764, __s1_764, 3, 2, 1, 0); \
+  int16x4_t __rev2_764;  __rev2_764 = __builtin_shufflevector(__s2_764, __s2_764, 3, 2, 1, 0); \
+  int32x4_t __ret_764; \
+  __ret_764 = __rev0_764 - __noswap_vmull_s16(__rev1_764, __noswap_splat_lane_s16(__rev2_764, __p3_764)); \
+  __ret_764 = __builtin_shufflevector(__ret_764, __ret_764, 3, 2, 1, 0); \
+  __ret_764; \
 })
 #endif
 
@@ -61472,479 +65376,663 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vset_lane_f16(__p0_254, __p1_254, __p2_254) __extension__ ({ \
-  float16_t __s0_254 = __p0_254; \
-  float16x4_t __s1_254 = __p1_254; \
-  float16x4_t __ret_254; \
-float16_t __reint_254 = __s0_254; \
-float16x4_t __reint1_254 = __s1_254; \
-int16x4_t __reint2_254 = vset_lane_s16(*(int16_t *) &__reint_254, *(int16x4_t *) &__reint1_254, __p2_254); \
-  __ret_254 = *(float16x4_t *) &__reint2_254; \
-  __ret_254; \
+#define vset_lane_f16(__p0_765, __p1_765, __p2_765) __extension__ ({ \
+  float16_t __s0_765 = __p0_765; \
+  float16x4_t __s1_765 = __p1_765; \
+  float16x4_t __ret_765; \
+float16_t __reint_765 = __s0_765; \
+float16x4_t __reint1_765 = __s1_765; \
+int16x4_t __reint2_765 = vset_lane_s16(*(int16_t *) &__reint_765, *(int16x4_t *) &__reint1_765, __p2_765); \
+  __ret_765 = *(float16x4_t *) &__reint2_765; \
+  __ret_765; \
 })
 #else
-#define vset_lane_f16(__p0_255, __p1_255, __p2_255) __extension__ ({ \
-  float16_t __s0_255 = __p0_255; \
-  float16x4_t __s1_255 = __p1_255; \
-  float16x4_t __rev1_255;  __rev1_255 = __builtin_shufflevector(__s1_255, __s1_255, 3, 2, 1, 0); \
-  float16x4_t __ret_255; \
-float16_t __reint_255 = __s0_255; \
-float16x4_t __reint1_255 = __rev1_255; \
-int16x4_t __reint2_255 = __noswap_vset_lane_s16(*(int16_t *) &__reint_255, *(int16x4_t *) &__reint1_255, __p2_255); \
-  __ret_255 = *(float16x4_t *) &__reint2_255; \
-  __ret_255 = __builtin_shufflevector(__ret_255, __ret_255, 3, 2, 1, 0); \
-  __ret_255; \
+#define vset_lane_f16(__p0_766, __p1_766, __p2_766) __extension__ ({ \
+  float16_t __s0_766 = __p0_766; \
+  float16x4_t __s1_766 = __p1_766; \
+  float16x4_t __rev1_766;  __rev1_766 = __builtin_shufflevector(__s1_766, __s1_766, 3, 2, 1, 0); \
+  float16x4_t __ret_766; \
+float16_t __reint_766 = __s0_766; \
+float16x4_t __reint1_766 = __rev1_766; \
+int16x4_t __reint2_766 = __noswap_vset_lane_s16(*(int16_t *) &__reint_766, *(int16x4_t *) &__reint1_766, __p2_766); \
+  __ret_766 = *(float16x4_t *) &__reint2_766; \
+  __ret_766 = __builtin_shufflevector(__ret_766, __ret_766, 3, 2, 1, 0); \
+  __ret_766; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_f16(__p0_256, __p1_256, __p2_256) __extension__ ({ \
-  float16_t __s0_256 = __p0_256; \
-  float16x8_t __s1_256 = __p1_256; \
-  float16x8_t __ret_256; \
-float16_t __reint_256 = __s0_256; \
-float16x8_t __reint1_256 = __s1_256; \
-int16x8_t __reint2_256 = vsetq_lane_s16(*(int16_t *) &__reint_256, *(int16x8_t *) &__reint1_256, __p2_256); \
-  __ret_256 = *(float16x8_t *) &__reint2_256; \
-  __ret_256; \
+#define vsetq_lane_f16(__p0_767, __p1_767, __p2_767) __extension__ ({ \
+  float16_t __s0_767 = __p0_767; \
+  float16x8_t __s1_767 = __p1_767; \
+  float16x8_t __ret_767; \
+float16_t __reint_767 = __s0_767; \
+float16x8_t __reint1_767 = __s1_767; \
+int16x8_t __reint2_767 = vsetq_lane_s16(*(int16_t *) &__reint_767, *(int16x8_t *) &__reint1_767, __p2_767); \
+  __ret_767 = *(float16x8_t *) &__reint2_767; \
+  __ret_767; \
 })
 #else
-#define vsetq_lane_f16(__p0_257, __p1_257, __p2_257) __extension__ ({ \
-  float16_t __s0_257 = __p0_257; \
-  float16x8_t __s1_257 = __p1_257; \
-  float16x8_t __rev1_257;  __rev1_257 = __builtin_shufflevector(__s1_257, __s1_257, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_257; \
-float16_t __reint_257 = __s0_257; \
-float16x8_t __reint1_257 = __rev1_257; \
-int16x8_t __reint2_257 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_257, *(int16x8_t *) &__reint1_257, __p2_257); \
-  __ret_257 = *(float16x8_t *) &__reint2_257; \
-  __ret_257 = __builtin_shufflevector(__ret_257, __ret_257, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_257; \
+#define vsetq_lane_f16(__p0_768, __p1_768, __p2_768) __extension__ ({ \
+  float16_t __s0_768 = __p0_768; \
+  float16x8_t __s1_768 = __p1_768; \
+  float16x8_t __rev1_768;  __rev1_768 = __builtin_shufflevector(__s1_768, __s1_768, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __ret_768; \
+float16_t __reint_768 = __s0_768; \
+float16x8_t __reint1_768 = __rev1_768; \
+int16x8_t __reint2_768 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_768, *(int16x8_t *) &__reint1_768, __p2_768); \
+  __ret_768 = *(float16x8_t *) &__reint2_768; \
+  __ret_768 = __builtin_shufflevector(__ret_768, __ret_768, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_768; \
 })
 #endif
 
+#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)
+#ifdef __LITTLE_ENDIAN__
+#define vbfmlalbq_lane_f32(__p0_769, __p1_769, __p2_769, __p3_769) __extension__ ({ \
+  float32x4_t __s0_769 = __p0_769; \
+  bfloat16x8_t __s1_769 = __p1_769; \
+  bfloat16x4_t __s2_769 = __p2_769; \
+  float32x4_t __ret_769; \
+  __ret_769 = vbfmlalbq_f32(__s0_769, __s1_769, (bfloat16x8_t) {vget_lane_bf16(__s2_769, __p3_769), vget_lane_bf16(__s2_769, __p3_769), vget_lane_bf16(__s2_769, __p3_769), vget_lane_bf16(__s2_769, __p3_769), vget_lane_bf16(__s2_769, __p3_769), vget_lane_bf16(__s2_769, __p3_769), vget_lane_bf16(__s2_769, __p3_769), vget_lane_bf16(__s2_769, __p3_769)}); \
+  __ret_769; \
+})
+#else
+#define vbfmlalbq_lane_f32(__p0_770, __p1_770, __p2_770, __p3_770) __extension__ ({ \
+  float32x4_t __s0_770 = __p0_770; \
+  bfloat16x8_t __s1_770 = __p1_770; \
+  bfloat16x4_t __s2_770 = __p2_770; \
+  float32x4_t __rev0_770;  __rev0_770 = __builtin_shufflevector(__s0_770, __s0_770, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_770;  __rev1_770 = __builtin_shufflevector(__s1_770, __s1_770, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_770;  __rev2_770 = __builtin_shufflevector(__s2_770, __s2_770, 3, 2, 1, 0); \
+  float32x4_t __ret_770; \
+  __ret_770 = __noswap_vbfmlalbq_f32(__rev0_770, __rev1_770, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_770, __p3_770), __noswap_vget_lane_bf16(__rev2_770, __p3_770), __noswap_vget_lane_bf16(__rev2_770, __p3_770), __noswap_vget_lane_bf16(__rev2_770, __p3_770), __noswap_vget_lane_bf16(__rev2_770, __p3_770), __noswap_vget_lane_bf16(__rev2_770, __p3_770), __noswap_vget_lane_bf16(__rev2_770, __p3_770), __noswap_vget_lane_bf16(__rev2_770, __p3_770)}); \
+  __ret_770 = __builtin_shufflevector(__ret_770, __ret_770, 3, 2, 1, 0); \
+  __ret_770; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vbfmlalbq_laneq_f32(__p0_771, __p1_771, __p2_771, __p3_771) __extension__ ({ \
+  float32x4_t __s0_771 = __p0_771; \
+  bfloat16x8_t __s1_771 = __p1_771; \
+  bfloat16x8_t __s2_771 = __p2_771; \
+  float32x4_t __ret_771; \
+  __ret_771 = vbfmlalbq_f32(__s0_771, __s1_771, (bfloat16x8_t) {vgetq_lane_bf16(__s2_771, __p3_771), vgetq_lane_bf16(__s2_771, __p3_771), vgetq_lane_bf16(__s2_771, __p3_771), vgetq_lane_bf16(__s2_771, __p3_771), vgetq_lane_bf16(__s2_771, __p3_771), vgetq_lane_bf16(__s2_771, __p3_771), vgetq_lane_bf16(__s2_771, __p3_771), vgetq_lane_bf16(__s2_771, __p3_771)}); \
+  __ret_771; \
+})
+#else
+#define vbfmlalbq_laneq_f32(__p0_772, __p1_772, __p2_772, __p3_772) __extension__ ({ \
+  float32x4_t __s0_772 = __p0_772; \
+  bfloat16x8_t __s1_772 = __p1_772; \
+  bfloat16x8_t __s2_772 = __p2_772; \
+  float32x4_t __rev0_772;  __rev0_772 = __builtin_shufflevector(__s0_772, __s0_772, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_772;  __rev1_772 = __builtin_shufflevector(__s1_772, __s1_772, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_772;  __rev2_772 = __builtin_shufflevector(__s2_772, __s2_772, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x4_t __ret_772; \
+  __ret_772 = __noswap_vbfmlalbq_f32(__rev0_772, __rev1_772, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_772, __p3_772), __noswap_vgetq_lane_bf16(__rev2_772, __p3_772), __noswap_vgetq_lane_bf16(__rev2_772, __p3_772), __noswap_vgetq_lane_bf16(__rev2_772, __p3_772), __noswap_vgetq_lane_bf16(__rev2_772, __p3_772), __noswap_vgetq_lane_bf16(__rev2_772, __p3_772), __noswap_vgetq_lane_bf16(__rev2_772, __p3_772), __noswap_vgetq_lane_bf16(__rev2_772, __p3_772)}); \
+  __ret_772 = __builtin_shufflevector(__ret_772, __ret_772, 3, 2, 1, 0); \
+  __ret_772; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vbfmlaltq_lane_f32(__p0_773, __p1_773, __p2_773, __p3_773) __extension__ ({ \
+  float32x4_t __s0_773 = __p0_773; \
+  bfloat16x8_t __s1_773 = __p1_773; \
+  bfloat16x4_t __s2_773 = __p2_773; \
+  float32x4_t __ret_773; \
+  __ret_773 = vbfmlaltq_f32(__s0_773, __s1_773, (bfloat16x8_t) {vget_lane_bf16(__s2_773, __p3_773), vget_lane_bf16(__s2_773, __p3_773), vget_lane_bf16(__s2_773, __p3_773), vget_lane_bf16(__s2_773, __p3_773), vget_lane_bf16(__s2_773, __p3_773), vget_lane_bf16(__s2_773, __p3_773), vget_lane_bf16(__s2_773, __p3_773), vget_lane_bf16(__s2_773, __p3_773)}); \
+  __ret_773; \
+})
+#else
+#define vbfmlaltq_lane_f32(__p0_774, __p1_774, __p2_774, __p3_774) __extension__ ({ \
+  float32x4_t __s0_774 = __p0_774; \
+  bfloat16x8_t __s1_774 = __p1_774; \
+  bfloat16x4_t __s2_774 = __p2_774; \
+  float32x4_t __rev0_774;  __rev0_774 = __builtin_shufflevector(__s0_774, __s0_774, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_774;  __rev1_774 = __builtin_shufflevector(__s1_774, __s1_774, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_774;  __rev2_774 = __builtin_shufflevector(__s2_774, __s2_774, 3, 2, 1, 0); \
+  float32x4_t __ret_774; \
+  __ret_774 = __noswap_vbfmlaltq_f32(__rev0_774, __rev1_774, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_774, __p3_774), __noswap_vget_lane_bf16(__rev2_774, __p3_774), __noswap_vget_lane_bf16(__rev2_774, __p3_774), __noswap_vget_lane_bf16(__rev2_774, __p3_774), __noswap_vget_lane_bf16(__rev2_774, __p3_774), __noswap_vget_lane_bf16(__rev2_774, __p3_774), __noswap_vget_lane_bf16(__rev2_774, __p3_774), __noswap_vget_lane_bf16(__rev2_774, __p3_774)}); \
+  __ret_774 = __builtin_shufflevector(__ret_774, __ret_774, 3, 2, 1, 0); \
+  __ret_774; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vbfmlaltq_laneq_f32(__p0_775, __p1_775, __p2_775, __p3_775) __extension__ ({ \
+  float32x4_t __s0_775 = __p0_775; \
+  bfloat16x8_t __s1_775 = __p1_775; \
+  bfloat16x8_t __s2_775 = __p2_775; \
+  float32x4_t __ret_775; \
+  __ret_775 = vbfmlaltq_f32(__s0_775, __s1_775, (bfloat16x8_t) {vgetq_lane_bf16(__s2_775, __p3_775), vgetq_lane_bf16(__s2_775, __p3_775), vgetq_lane_bf16(__s2_775, __p3_775), vgetq_lane_bf16(__s2_775, __p3_775), vgetq_lane_bf16(__s2_775, __p3_775), vgetq_lane_bf16(__s2_775, __p3_775), vgetq_lane_bf16(__s2_775, __p3_775), vgetq_lane_bf16(__s2_775, __p3_775)}); \
+  __ret_775; \
+})
+#else
+#define vbfmlaltq_laneq_f32(__p0_776, __p1_776, __p2_776, __p3_776) __extension__ ({ \
+  float32x4_t __s0_776 = __p0_776; \
+  bfloat16x8_t __s1_776 = __p1_776; \
+  bfloat16x8_t __s2_776 = __p2_776; \
+  float32x4_t __rev0_776;  __rev0_776 = __builtin_shufflevector(__s0_776, __s0_776, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_776;  __rev1_776 = __builtin_shufflevector(__s1_776, __s1_776, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_776;  __rev2_776 = __builtin_shufflevector(__s2_776, __s2_776, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x4_t __ret_776; \
+  __ret_776 = __noswap_vbfmlaltq_f32(__rev0_776, __rev1_776, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_776, __p3_776), __noswap_vgetq_lane_bf16(__rev2_776, __p3_776), __noswap_vgetq_lane_bf16(__rev2_776, __p3_776), __noswap_vgetq_lane_bf16(__rev2_776, __p3_776), __noswap_vgetq_lane_bf16(__rev2_776, __p3_776), __noswap_vgetq_lane_bf16(__rev2_776, __p3_776), __noswap_vgetq_lane_bf16(__rev2_776, __p3_776), __noswap_vgetq_lane_bf16(__rev2_776, __p3_776)}); \
+  __ret_776 = __builtin_shufflevector(__ret_776, __ret_776, 3, 2, 1, 0); \
+  __ret_776; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) {
+  float32x4_t __ret;
+  __ret = vcvt_f32_bf16(vget_high_bf16(__p0));
+  return __ret;
+}
+#else
+__ai float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) {
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float32x4_t __ret;
+  __ret = __noswap_vcvt_f32_bf16(__noswap_vget_high_bf16(__rev0));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) {
+  float32x4_t __ret;
+  __ret = vcvt_f32_bf16(vget_low_bf16(__p0));
+  return __ret;
+}
+#else
+__ai float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) {
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float32x4_t __ret;
+  __ret = __noswap_vcvt_f32_bf16(__noswap_vget_low_bf16(__rev0));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#endif
 #if defined(__ARM_FEATURE_FP16FML) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
-#define vfmlalq_lane_high_f16(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \
-  float32x4_t __s0_258 = __p0_258; \
-  float16x8_t __s1_258 = __p1_258; \
-  float16x4_t __s2_258 = __p2_258; \
-  float32x4_t __ret_258; \
-  __ret_258 = vfmlalq_high_f16(__s0_258, __s1_258, (float16x8_t) {vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258)}); \
-  __ret_258; \
+#define vfmlalq_lane_high_f16(__p0_777, __p1_777, __p2_777, __p3_777) __extension__ ({ \
+  float32x4_t __s0_777 = __p0_777; \
+  float16x8_t __s1_777 = __p1_777; \
+  float16x4_t __s2_777 = __p2_777; \
+  float32x4_t __ret_777; \
+  __ret_777 = vfmlalq_high_f16(__s0_777, __s1_777, (float16x8_t) {vget_lane_f16(__s2_777, __p3_777), vget_lane_f16(__s2_777, __p3_777), vget_lane_f16(__s2_777, __p3_777), vget_lane_f16(__s2_777, __p3_777), vget_lane_f16(__s2_777, __p3_777), vget_lane_f16(__s2_777, __p3_777), vget_lane_f16(__s2_777, __p3_777), vget_lane_f16(__s2_777, __p3_777)}); \
+  __ret_777; \
 })
 #else
-#define vfmlalq_lane_high_f16(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \
-  float32x4_t __s0_259 = __p0_259; \
-  float16x8_t __s1_259 = __p1_259; \
-  float16x4_t __s2_259 = __p2_259; \
-  float32x4_t __rev0_259;  __rev0_259 = __builtin_shufflevector(__s0_259, __s0_259, 3, 2, 1, 0); \
-  float16x8_t __rev1_259;  __rev1_259 = __builtin_shufflevector(__s1_259, __s1_259, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_259;  __rev2_259 = __builtin_shufflevector(__s2_259, __s2_259, 3, 2, 1, 0); \
-  float32x4_t __ret_259; \
-  __ret_259 = __noswap_vfmlalq_high_f16(__rev0_259, __rev1_259, (float16x8_t) {__noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259)}); \
-  __ret_259 = __builtin_shufflevector(__ret_259, __ret_259, 3, 2, 1, 0); \
-  __ret_259; \
+#define vfmlalq_lane_high_f16(__p0_778, __p1_778, __p2_778, __p3_778) __extension__ ({ \
+  float32x4_t __s0_778 = __p0_778; \
+  float16x8_t __s1_778 = __p1_778; \
+  float16x4_t __s2_778 = __p2_778; \
+  float32x4_t __rev0_778;  __rev0_778 = __builtin_shufflevector(__s0_778, __s0_778, 3, 2, 1, 0); \
+  float16x8_t __rev1_778;  __rev1_778 = __builtin_shufflevector(__s1_778, __s1_778, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_778;  __rev2_778 = __builtin_shufflevector(__s2_778, __s2_778, 3, 2, 1, 0); \
+  float32x4_t __ret_778; \
+  __ret_778 = __noswap_vfmlalq_high_f16(__rev0_778, __rev1_778, (float16x8_t) {__noswap_vget_lane_f16(__rev2_778, __p3_778), __noswap_vget_lane_f16(__rev2_778, __p3_778), __noswap_vget_lane_f16(__rev2_778, __p3_778), __noswap_vget_lane_f16(__rev2_778, __p3_778), __noswap_vget_lane_f16(__rev2_778, __p3_778), __noswap_vget_lane_f16(__rev2_778, __p3_778), __noswap_vget_lane_f16(__rev2_778, __p3_778), __noswap_vget_lane_f16(__rev2_778, __p3_778)}); \
+  __ret_778 = __builtin_shufflevector(__ret_778, __ret_778, 3, 2, 1, 0); \
+  __ret_778; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlal_lane_high_f16(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \
-  float32x2_t __s0_260 = __p0_260; \
-  float16x4_t __s1_260 = __p1_260; \
-  float16x4_t __s2_260 = __p2_260; \
-  float32x2_t __ret_260; \
-  __ret_260 = vfmlal_high_f16(__s0_260, __s1_260, (float16x4_t) {vget_lane_f16(__s2_260, __p3_260), vget_lane_f16(__s2_260, __p3_260), vget_lane_f16(__s2_260, __p3_260), vget_lane_f16(__s2_260, __p3_260)}); \
-  __ret_260; \
+#define vfmlal_lane_high_f16(__p0_779, __p1_779, __p2_779, __p3_779) __extension__ ({ \
+  float32x2_t __s0_779 = __p0_779; \
+  float16x4_t __s1_779 = __p1_779; \
+  float16x4_t __s2_779 = __p2_779; \
+  float32x2_t __ret_779; \
+  __ret_779 = vfmlal_high_f16(__s0_779, __s1_779, (float16x4_t) {vget_lane_f16(__s2_779, __p3_779), vget_lane_f16(__s2_779, __p3_779), vget_lane_f16(__s2_779, __p3_779), vget_lane_f16(__s2_779, __p3_779)}); \
+  __ret_779; \
 })
 #else
-#define vfmlal_lane_high_f16(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \
-  float32x2_t __s0_261 = __p0_261; \
-  float16x4_t __s1_261 = __p1_261; \
-  float16x4_t __s2_261 = __p2_261; \
-  float32x2_t __rev0_261;  __rev0_261 = __builtin_shufflevector(__s0_261, __s0_261, 1, 0); \
-  float16x4_t __rev1_261;  __rev1_261 = __builtin_shufflevector(__s1_261, __s1_261, 3, 2, 1, 0); \
-  float16x4_t __rev2_261;  __rev2_261 = __builtin_shufflevector(__s2_261, __s2_261, 3, 2, 1, 0); \
-  float32x2_t __ret_261; \
-  __ret_261 = __noswap_vfmlal_high_f16(__rev0_261, __rev1_261, (float16x4_t) {__noswap_vget_lane_f16(__rev2_261, __p3_261), __noswap_vget_lane_f16(__rev2_261, __p3_261), __noswap_vget_lane_f16(__rev2_261, __p3_261), __noswap_vget_lane_f16(__rev2_261, __p3_261)}); \
-  __ret_261 = __builtin_shufflevector(__ret_261, __ret_261, 1, 0); \
-  __ret_261; \
+#define vfmlal_lane_high_f16(__p0_780, __p1_780, __p2_780, __p3_780) __extension__ ({ \
+  float32x2_t __s0_780 = __p0_780; \
+  float16x4_t __s1_780 = __p1_780; \
+  float16x4_t __s2_780 = __p2_780; \
+  float32x2_t __rev0_780;  __rev0_780 = __builtin_shufflevector(__s0_780, __s0_780, 1, 0); \
+  float16x4_t __rev1_780;  __rev1_780 = __builtin_shufflevector(__s1_780, __s1_780, 3, 2, 1, 0); \
+  float16x4_t __rev2_780;  __rev2_780 = __builtin_shufflevector(__s2_780, __s2_780, 3, 2, 1, 0); \
+  float32x2_t __ret_780; \
+  __ret_780 = __noswap_vfmlal_high_f16(__rev0_780, __rev1_780, (float16x4_t) {__noswap_vget_lane_f16(__rev2_780, __p3_780), __noswap_vget_lane_f16(__rev2_780, __p3_780), __noswap_vget_lane_f16(__rev2_780, __p3_780), __noswap_vget_lane_f16(__rev2_780, __p3_780)}); \
+  __ret_780 = __builtin_shufflevector(__ret_780, __ret_780, 1, 0); \
+  __ret_780; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlalq_lane_low_f16(__p0_262, __p1_262, __p2_262, __p3_262) __extension__ ({ \
-  float32x4_t __s0_262 = __p0_262; \
-  float16x8_t __s1_262 = __p1_262; \
-  float16x4_t __s2_262 = __p2_262; \
-  float32x4_t __ret_262; \
-  __ret_262 = vfmlalq_low_f16(__s0_262, __s1_262, (float16x8_t) {vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262)}); \
-  __ret_262; \
+#define vfmlalq_lane_low_f16(__p0_781, __p1_781, __p2_781, __p3_781) __extension__ ({ \
+  float32x4_t __s0_781 = __p0_781; \
+  float16x8_t __s1_781 = __p1_781; \
+  float16x4_t __s2_781 = __p2_781; \
+  float32x4_t __ret_781; \
+  __ret_781 = vfmlalq_low_f16(__s0_781, __s1_781, (float16x8_t) {vget_lane_f16(__s2_781, __p3_781), vget_lane_f16(__s2_781, __p3_781), vget_lane_f16(__s2_781, __p3_781), vget_lane_f16(__s2_781, __p3_781), vget_lane_f16(__s2_781, __p3_781), vget_lane_f16(__s2_781, __p3_781), vget_lane_f16(__s2_781, __p3_781), vget_lane_f16(__s2_781, __p3_781)}); \
+  __ret_781; \
 })
 #else
-#define vfmlalq_lane_low_f16(__p0_263, __p1_263, __p2_263, __p3_263) __extension__ ({ \
-  float32x4_t __s0_263 = __p0_263; \
-  float16x8_t __s1_263 = __p1_263; \
-  float16x4_t __s2_263 = __p2_263; \
-  float32x4_t __rev0_263;  __rev0_263 = __builtin_shufflevector(__s0_263, __s0_263, 3, 2, 1, 0); \
-  float16x8_t __rev1_263;  __rev1_263 = __builtin_shufflevector(__s1_263, __s1_263, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_263;  __rev2_263 = __builtin_shufflevector(__s2_263, __s2_263, 3, 2, 1, 0); \
-  float32x4_t __ret_263; \
-  __ret_263 = __noswap_vfmlalq_low_f16(__rev0_263, __rev1_263, (float16x8_t) {__noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263)}); \
-  __ret_263 = __builtin_shufflevector(__ret_263, __ret_263, 3, 2, 1, 0); \
-  __ret_263; \
+#define vfmlalq_lane_low_f16(__p0_782, __p1_782, __p2_782, __p3_782) __extension__ ({ \
+  float32x4_t __s0_782 = __p0_782; \
+  float16x8_t __s1_782 = __p1_782; \
+  float16x4_t __s2_782 = __p2_782; \
+  float32x4_t __rev0_782;  __rev0_782 = __builtin_shufflevector(__s0_782, __s0_782, 3, 2, 1, 0); \
+  float16x8_t __rev1_782;  __rev1_782 = __builtin_shufflevector(__s1_782, __s1_782, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_782;  __rev2_782 = __builtin_shufflevector(__s2_782, __s2_782, 3, 2, 1, 0); \
+  float32x4_t __ret_782; \
+  __ret_782 = __noswap_vfmlalq_low_f16(__rev0_782, __rev1_782, (float16x8_t) {__noswap_vget_lane_f16(__rev2_782, __p3_782), __noswap_vget_lane_f16(__rev2_782, __p3_782), __noswap_vget_lane_f16(__rev2_782, __p3_782), __noswap_vget_lane_f16(__rev2_782, __p3_782), __noswap_vget_lane_f16(__rev2_782, __p3_782), __noswap_vget_lane_f16(__rev2_782, __p3_782), __noswap_vget_lane_f16(__rev2_782, __p3_782), __noswap_vget_lane_f16(__rev2_782, __p3_782)}); \
+  __ret_782 = __builtin_shufflevector(__ret_782, __ret_782, 3, 2, 1, 0); \
+  __ret_782; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlal_lane_low_f16(__p0_264, __p1_264, __p2_264, __p3_264) __extension__ ({ \
-  float32x2_t __s0_264 = __p0_264; \
-  float16x4_t __s1_264 = __p1_264; \
-  float16x4_t __s2_264 = __p2_264; \
-  float32x2_t __ret_264; \
-  __ret_264 = vfmlal_low_f16(__s0_264, __s1_264, (float16x4_t) {vget_lane_f16(__s2_264, __p3_264), vget_lane_f16(__s2_264, __p3_264), vget_lane_f16(__s2_264, __p3_264), vget_lane_f16(__s2_264, __p3_264)}); \
-  __ret_264; \
+#define vfmlal_lane_low_f16(__p0_783, __p1_783, __p2_783, __p3_783) __extension__ ({ \
+  float32x2_t __s0_783 = __p0_783; \
+  float16x4_t __s1_783 = __p1_783; \
+  float16x4_t __s2_783 = __p2_783; \
+  float32x2_t __ret_783; \
+  __ret_783 = vfmlal_low_f16(__s0_783, __s1_783, (float16x4_t) {vget_lane_f16(__s2_783, __p3_783), vget_lane_f16(__s2_783, __p3_783), vget_lane_f16(__s2_783, __p3_783), vget_lane_f16(__s2_783, __p3_783)}); \
+  __ret_783; \
 })
 #else
-#define vfmlal_lane_low_f16(__p0_265, __p1_265, __p2_265, __p3_265) __extension__ ({ \
-  float32x2_t __s0_265 = __p0_265; \
-  float16x4_t __s1_265 = __p1_265; \
-  float16x4_t __s2_265 = __p2_265; \
-  float32x2_t __rev0_265;  __rev0_265 = __builtin_shufflevector(__s0_265, __s0_265, 1, 0); \
-  float16x4_t __rev1_265;  __rev1_265 = __builtin_shufflevector(__s1_265, __s1_265, 3, 2, 1, 0); \
-  float16x4_t __rev2_265;  __rev2_265 = __builtin_shufflevector(__s2_265, __s2_265, 3, 2, 1, 0); \
-  float32x2_t __ret_265; \
-  __ret_265 = __noswap_vfmlal_low_f16(__rev0_265, __rev1_265, (float16x4_t) {__noswap_vget_lane_f16(__rev2_265, __p3_265), __noswap_vget_lane_f16(__rev2_265, __p3_265), __noswap_vget_lane_f16(__rev2_265, __p3_265), __noswap_vget_lane_f16(__rev2_265, __p3_265)}); \
-  __ret_265 = __builtin_shufflevector(__ret_265, __ret_265, 1, 0); \
-  __ret_265; \
+#define vfmlal_lane_low_f16(__p0_784, __p1_784, __p2_784, __p3_784) __extension__ ({ \
+  float32x2_t __s0_784 = __p0_784; \
+  float16x4_t __s1_784 = __p1_784; \
+  float16x4_t __s2_784 = __p2_784; \
+  float32x2_t __rev0_784;  __rev0_784 = __builtin_shufflevector(__s0_784, __s0_784, 1, 0); \
+  float16x4_t __rev1_784;  __rev1_784 = __builtin_shufflevector(__s1_784, __s1_784, 3, 2, 1, 0); \
+  float16x4_t __rev2_784;  __rev2_784 = __builtin_shufflevector(__s2_784, __s2_784, 3, 2, 1, 0); \
+  float32x2_t __ret_784; \
+  __ret_784 = __noswap_vfmlal_low_f16(__rev0_784, __rev1_784, (float16x4_t) {__noswap_vget_lane_f16(__rev2_784, __p3_784), __noswap_vget_lane_f16(__rev2_784, __p3_784), __noswap_vget_lane_f16(__rev2_784, __p3_784), __noswap_vget_lane_f16(__rev2_784, __p3_784)}); \
+  __ret_784 = __builtin_shufflevector(__ret_784, __ret_784, 1, 0); \
+  __ret_784; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlalq_laneq_high_f16(__p0_266, __p1_266, __p2_266, __p3_266) __extension__ ({ \
-  float32x4_t __s0_266 = __p0_266; \
-  float16x8_t __s1_266 = __p1_266; \
-  float16x8_t __s2_266 = __p2_266; \
-  float32x4_t __ret_266; \
-  __ret_266 = vfmlalq_high_f16(__s0_266, __s1_266, (float16x8_t) {vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266)}); \
-  __ret_266; \
+#define vfmlalq_laneq_high_f16(__p0_785, __p1_785, __p2_785, __p3_785) __extension__ ({ \
+  float32x4_t __s0_785 = __p0_785; \
+  float16x8_t __s1_785 = __p1_785; \
+  float16x8_t __s2_785 = __p2_785; \
+  float32x4_t __ret_785; \
+  __ret_785 = vfmlalq_high_f16(__s0_785, __s1_785, (float16x8_t) {vgetq_lane_f16(__s2_785, __p3_785), vgetq_lane_f16(__s2_785, __p3_785), vgetq_lane_f16(__s2_785, __p3_785), vgetq_lane_f16(__s2_785, __p3_785), vgetq_lane_f16(__s2_785, __p3_785), vgetq_lane_f16(__s2_785, __p3_785), vgetq_lane_f16(__s2_785, __p3_785), vgetq_lane_f16(__s2_785, __p3_785)}); \
+  __ret_785; \
 })
 #else
-#define vfmlalq_laneq_high_f16(__p0_267, __p1_267, __p2_267, __p3_267) __extension__ ({ \
-  float32x4_t __s0_267 = __p0_267; \
-  float16x8_t __s1_267 = __p1_267; \
-  float16x8_t __s2_267 = __p2_267; \
-  float32x4_t __rev0_267;  __rev0_267 = __builtin_shufflevector(__s0_267, __s0_267, 3, 2, 1, 0); \
-  float16x8_t __rev1_267;  __rev1_267 = __builtin_shufflevector(__s1_267, __s1_267, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_267;  __rev2_267 = __builtin_shufflevector(__s2_267, __s2_267, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_267; \
-  __ret_267 = __noswap_vfmlalq_high_f16(__rev0_267, __rev1_267, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267)}); \
-  __ret_267 = __builtin_shufflevector(__ret_267, __ret_267, 3, 2, 1, 0); \
-  __ret_267; \
+#define vfmlalq_laneq_high_f16(__p0_786, __p1_786, __p2_786, __p3_786) __extension__ ({ \
+  float32x4_t __s0_786 = __p0_786; \
+  float16x8_t __s1_786 = __p1_786; \
+  float16x8_t __s2_786 = __p2_786; \
+  float32x4_t __rev0_786;  __rev0_786 = __builtin_shufflevector(__s0_786, __s0_786, 3, 2, 1, 0); \
+  float16x8_t __rev1_786;  __rev1_786 = __builtin_shufflevector(__s1_786, __s1_786, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_786;  __rev2_786 = __builtin_shufflevector(__s2_786, __s2_786, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x4_t __ret_786; \
+  __ret_786 = __noswap_vfmlalq_high_f16(__rev0_786, __rev1_786, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_786, __p3_786), __noswap_vgetq_lane_f16(__rev2_786, __p3_786), __noswap_vgetq_lane_f16(__rev2_786, __p3_786), __noswap_vgetq_lane_f16(__rev2_786, __p3_786), __noswap_vgetq_lane_f16(__rev2_786, __p3_786), __noswap_vgetq_lane_f16(__rev2_786, __p3_786), __noswap_vgetq_lane_f16(__rev2_786, __p3_786), __noswap_vgetq_lane_f16(__rev2_786, __p3_786)}); \
+  __ret_786 = __builtin_shufflevector(__ret_786, __ret_786, 3, 2, 1, 0); \
+  __ret_786; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlal_laneq_high_f16(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \
-  float32x2_t __s0_268 = __p0_268; \
-  float16x4_t __s1_268 = __p1_268; \
-  float16x8_t __s2_268 = __p2_268; \
-  float32x2_t __ret_268; \
-  __ret_268 = vfmlal_high_f16(__s0_268, __s1_268, (float16x4_t) {vgetq_lane_f16(__s2_268, __p3_268), vgetq_lane_f16(__s2_268, __p3_268), vgetq_lane_f16(__s2_268, __p3_268), vgetq_lane_f16(__s2_268, __p3_268)}); \
-  __ret_268; \
+#define vfmlal_laneq_high_f16(__p0_787, __p1_787, __p2_787, __p3_787) __extension__ ({ \
+  float32x2_t __s0_787 = __p0_787; \
+  float16x4_t __s1_787 = __p1_787; \
+  float16x8_t __s2_787 = __p2_787; \
+  float32x2_t __ret_787; \
+  __ret_787 = vfmlal_high_f16(__s0_787, __s1_787, (float16x4_t) {vgetq_lane_f16(__s2_787, __p3_787), vgetq_lane_f16(__s2_787, __p3_787), vgetq_lane_f16(__s2_787, __p3_787), vgetq_lane_f16(__s2_787, __p3_787)}); \
+  __ret_787; \
 })
 #else
-#define vfmlal_laneq_high_f16(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \
-  float32x2_t __s0_269 = __p0_269; \
-  float16x4_t __s1_269 = __p1_269; \
-  float16x8_t __s2_269 = __p2_269; \
-  float32x2_t __rev0_269;  __rev0_269 = __builtin_shufflevector(__s0_269, __s0_269, 1, 0); \
-  float16x4_t __rev1_269;  __rev1_269 = __builtin_shufflevector(__s1_269, __s1_269, 3, 2, 1, 0); \
-  float16x8_t __rev2_269;  __rev2_269 = __builtin_shufflevector(__s2_269, __s2_269, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_269; \
-  __ret_269 = __noswap_vfmlal_high_f16(__rev0_269, __rev1_269, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_269, __p3_269), __noswap_vgetq_lane_f16(__rev2_269, __p3_269), __noswap_vgetq_lane_f16(__rev2_269, __p3_269), __noswap_vgetq_lane_f16(__rev2_269, __p3_269)}); \
-  __ret_269 = __builtin_shufflevector(__ret_269, __ret_269, 1, 0); \
-  __ret_269; \
+#define vfmlal_laneq_high_f16(__p0_788, __p1_788, __p2_788, __p3_788) __extension__ ({ \
+  float32x2_t __s0_788 = __p0_788; \
+  float16x4_t __s1_788 = __p1_788; \
+  float16x8_t __s2_788 = __p2_788; \
+  float32x2_t __rev0_788;  __rev0_788 = __builtin_shufflevector(__s0_788, __s0_788, 1, 0); \
+  float16x4_t __rev1_788;  __rev1_788 = __builtin_shufflevector(__s1_788, __s1_788, 3, 2, 1, 0); \
+  float16x8_t __rev2_788;  __rev2_788 = __builtin_shufflevector(__s2_788, __s2_788, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x2_t __ret_788; \
+  __ret_788 = __noswap_vfmlal_high_f16(__rev0_788, __rev1_788, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_788, __p3_788), __noswap_vgetq_lane_f16(__rev2_788, __p3_788), __noswap_vgetq_lane_f16(__rev2_788, __p3_788), __noswap_vgetq_lane_f16(__rev2_788, __p3_788)}); \
+  __ret_788 = __builtin_shufflevector(__ret_788, __ret_788, 1, 0); \
+  __ret_788; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlalq_laneq_low_f16(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \
-  float32x4_t __s0_270 = __p0_270; \
-  float16x8_t __s1_270 = __p1_270; \
-  float16x8_t __s2_270 = __p2_270; \
-  float32x4_t __ret_270; \
-  __ret_270 = vfmlalq_low_f16(__s0_270, __s1_270, (float16x8_t) {vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270)}); \
-  __ret_270; \
+#define vfmlalq_laneq_low_f16(__p0_789, __p1_789, __p2_789, __p3_789) __extension__ ({ \
+  float32x4_t __s0_789 = __p0_789; \
+  float16x8_t __s1_789 = __p1_789; \
+  float16x8_t __s2_789 = __p2_789; \
+  float32x4_t __ret_789; \
+  __ret_789 = vfmlalq_low_f16(__s0_789, __s1_789, (float16x8_t) {vgetq_lane_f16(__s2_789, __p3_789), vgetq_lane_f16(__s2_789, __p3_789), vgetq_lane_f16(__s2_789, __p3_789), vgetq_lane_f16(__s2_789, __p3_789), vgetq_lane_f16(__s2_789, __p3_789), vgetq_lane_f16(__s2_789, __p3_789), vgetq_lane_f16(__s2_789, __p3_789), vgetq_lane_f16(__s2_789, __p3_789)}); \
+  __ret_789; \
 })
 #else
-#define vfmlalq_laneq_low_f16(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \
-  float32x4_t __s0_271 = __p0_271; \
-  float16x8_t __s1_271 = __p1_271; \
-  float16x8_t __s2_271 = __p2_271; \
-  float32x4_t __rev0_271;  __rev0_271 = __builtin_shufflevector(__s0_271, __s0_271, 3, 2, 1, 0); \
-  float16x8_t __rev1_271;  __rev1_271 = __builtin_shufflevector(__s1_271, __s1_271, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_271;  __rev2_271 = __builtin_shufflevector(__s2_271, __s2_271, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_271; \
-  __ret_271 = __noswap_vfmlalq_low_f16(__rev0_271, __rev1_271, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271)}); \
-  __ret_271 = __builtin_shufflevector(__ret_271, __ret_271, 3, 2, 1, 0); \
-  __ret_271; \
+#define vfmlalq_laneq_low_f16(__p0_790, __p1_790, __p2_790, __p3_790) __extension__ ({ \
+  float32x4_t __s0_790 = __p0_790; \
+  float16x8_t __s1_790 = __p1_790; \
+  float16x8_t __s2_790 = __p2_790; \
+  float32x4_t __rev0_790;  __rev0_790 = __builtin_shufflevector(__s0_790, __s0_790, 3, 2, 1, 0); \
+  float16x8_t __rev1_790;  __rev1_790 = __builtin_shufflevector(__s1_790, __s1_790, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_790;  __rev2_790 = __builtin_shufflevector(__s2_790, __s2_790, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x4_t __ret_790; \
+  __ret_790 = __noswap_vfmlalq_low_f16(__rev0_790, __rev1_790, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_790, __p3_790), __noswap_vgetq_lane_f16(__rev2_790, __p3_790), __noswap_vgetq_lane_f16(__rev2_790, __p3_790), __noswap_vgetq_lane_f16(__rev2_790, __p3_790), __noswap_vgetq_lane_f16(__rev2_790, __p3_790), __noswap_vgetq_lane_f16(__rev2_790, __p3_790), __noswap_vgetq_lane_f16(__rev2_790, __p3_790), __noswap_vgetq_lane_f16(__rev2_790, __p3_790)}); \
+  __ret_790 = __builtin_shufflevector(__ret_790, __ret_790, 3, 2, 1, 0); \
+  __ret_790; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlal_laneq_low_f16(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \
-  float32x2_t __s0_272 = __p0_272; \
-  float16x4_t __s1_272 = __p1_272; \
-  float16x8_t __s2_272 = __p2_272; \
-  float32x2_t __ret_272; \
-  __ret_272 = vfmlal_low_f16(__s0_272, __s1_272, (float16x4_t) {vgetq_lane_f16(__s2_272, __p3_272), vgetq_lane_f16(__s2_272, __p3_272), vgetq_lane_f16(__s2_272, __p3_272), vgetq_lane_f16(__s2_272, __p3_272)}); \
-  __ret_272; \
+#define vfmlal_laneq_low_f16(__p0_791, __p1_791, __p2_791, __p3_791) __extension__ ({ \
+  float32x2_t __s0_791 = __p0_791; \
+  float16x4_t __s1_791 = __p1_791; \
+  float16x8_t __s2_791 = __p2_791; \
+  float32x2_t __ret_791; \
+  __ret_791 = vfmlal_low_f16(__s0_791, __s1_791, (float16x4_t) {vgetq_lane_f16(__s2_791, __p3_791), vgetq_lane_f16(__s2_791, __p3_791), vgetq_lane_f16(__s2_791, __p3_791), vgetq_lane_f16(__s2_791, __p3_791)}); \
+  __ret_791; \
 })
 #else
-#define vfmlal_laneq_low_f16(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \
-  float32x2_t __s0_273 = __p0_273; \
-  float16x4_t __s1_273 = __p1_273; \
-  float16x8_t __s2_273 = __p2_273; \
-  float32x2_t __rev0_273;  __rev0_273 = __builtin_shufflevector(__s0_273, __s0_273, 1, 0); \
-  float16x4_t __rev1_273;  __rev1_273 = __builtin_shufflevector(__s1_273, __s1_273, 3, 2, 1, 0); \
-  float16x8_t __rev2_273;  __rev2_273 = __builtin_shufflevector(__s2_273, __s2_273, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_273; \
-  __ret_273 = __noswap_vfmlal_low_f16(__rev0_273, __rev1_273, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_273, __p3_273), __noswap_vgetq_lane_f16(__rev2_273, __p3_273), __noswap_vgetq_lane_f16(__rev2_273, __p3_273), __noswap_vgetq_lane_f16(__rev2_273, __p3_273)}); \
-  __ret_273 = __builtin_shufflevector(__ret_273, __ret_273, 1, 0); \
-  __ret_273; \
+#define vfmlal_laneq_low_f16(__p0_792, __p1_792, __p2_792, __p3_792) __extension__ ({ \
+  float32x2_t __s0_792 = __p0_792; \
+  float16x4_t __s1_792 = __p1_792; \
+  float16x8_t __s2_792 = __p2_792; \
+  float32x2_t __rev0_792;  __rev0_792 = __builtin_shufflevector(__s0_792, __s0_792, 1, 0); \
+  float16x4_t __rev1_792;  __rev1_792 = __builtin_shufflevector(__s1_792, __s1_792, 3, 2, 1, 0); \
+  float16x8_t __rev2_792;  __rev2_792 = __builtin_shufflevector(__s2_792, __s2_792, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x2_t __ret_792; \
+  __ret_792 = __noswap_vfmlal_low_f16(__rev0_792, __rev1_792, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_792, __p3_792), __noswap_vgetq_lane_f16(__rev2_792, __p3_792), __noswap_vgetq_lane_f16(__rev2_792, __p3_792), __noswap_vgetq_lane_f16(__rev2_792, __p3_792)}); \
+  __ret_792 = __builtin_shufflevector(__ret_792, __ret_792, 1, 0); \
+  __ret_792; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlslq_lane_high_f16(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \
-  float32x4_t __s0_274 = __p0_274; \
-  float16x8_t __s1_274 = __p1_274; \
-  float16x4_t __s2_274 = __p2_274; \
-  float32x4_t __ret_274; \
-  __ret_274 = vfmlslq_high_f16(__s0_274, __s1_274, (float16x8_t) {vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274)}); \
-  __ret_274; \
+#define vfmlslq_lane_high_f16(__p0_793, __p1_793, __p2_793, __p3_793) __extension__ ({ \
+  float32x4_t __s0_793 = __p0_793; \
+  float16x8_t __s1_793 = __p1_793; \
+  float16x4_t __s2_793 = __p2_793; \
+  float32x4_t __ret_793; \
+  __ret_793 = vfmlslq_high_f16(__s0_793, __s1_793, (float16x8_t) {vget_lane_f16(__s2_793, __p3_793), vget_lane_f16(__s2_793, __p3_793), vget_lane_f16(__s2_793, __p3_793), vget_lane_f16(__s2_793, __p3_793), vget_lane_f16(__s2_793, __p3_793), vget_lane_f16(__s2_793, __p3_793), vget_lane_f16(__s2_793, __p3_793), vget_lane_f16(__s2_793, __p3_793)}); \
+  __ret_793; \
 })
 #else
-#define vfmlslq_lane_high_f16(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \
-  float32x4_t __s0_275 = __p0_275; \
-  float16x8_t __s1_275 = __p1_275; \
-  float16x4_t __s2_275 = __p2_275; \
-  float32x4_t __rev0_275;  __rev0_275 = __builtin_shufflevector(__s0_275, __s0_275, 3, 2, 1, 0); \
-  float16x8_t __rev1_275;  __rev1_275 = __builtin_shufflevector(__s1_275, __s1_275, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_275;  __rev2_275 = __builtin_shufflevector(__s2_275, __s2_275, 3, 2, 1, 0); \
-  float32x4_t __ret_275; \
-  __ret_275 = __noswap_vfmlslq_high_f16(__rev0_275, __rev1_275, (float16x8_t) {__noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275)}); \
-  __ret_275 = __builtin_shufflevector(__ret_275, __ret_275, 3, 2, 1, 0); \
-  __ret_275; \
+#define vfmlslq_lane_high_f16(__p0_794, __p1_794, __p2_794, __p3_794) __extension__ ({ \
+  float32x4_t __s0_794 = __p0_794; \
+  float16x8_t __s1_794 = __p1_794; \
+  float16x4_t __s2_794 = __p2_794; \
+  float32x4_t __rev0_794;  __rev0_794 = __builtin_shufflevector(__s0_794, __s0_794, 3, 2, 1, 0); \
+  float16x8_t __rev1_794;  __rev1_794 = __builtin_shufflevector(__s1_794, __s1_794, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_794;  __rev2_794 = __builtin_shufflevector(__s2_794, __s2_794, 3, 2, 1, 0); \
+  float32x4_t __ret_794; \
+  __ret_794 = __noswap_vfmlslq_high_f16(__rev0_794, __rev1_794, (float16x8_t) {__noswap_vget_lane_f16(__rev2_794, __p3_794), __noswap_vget_lane_f16(__rev2_794, __p3_794), __noswap_vget_lane_f16(__rev2_794, __p3_794), __noswap_vget_lane_f16(__rev2_794, __p3_794), __noswap_vget_lane_f16(__rev2_794, __p3_794), __noswap_vget_lane_f16(__rev2_794, __p3_794), __noswap_vget_lane_f16(__rev2_794, __p3_794), __noswap_vget_lane_f16(__rev2_794, __p3_794)}); \
+  __ret_794 = __builtin_shufflevector(__ret_794, __ret_794, 3, 2, 1, 0); \
+  __ret_794; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlsl_lane_high_f16(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \
-  float32x2_t __s0_276 = __p0_276; \
-  float16x4_t __s1_276 = __p1_276; \
-  float16x4_t __s2_276 = __p2_276; \
-  float32x2_t __ret_276; \
-  __ret_276 = vfmlsl_high_f16(__s0_276, __s1_276, (float16x4_t) {vget_lane_f16(__s2_276, __p3_276), vget_lane_f16(__s2_276, __p3_276), vget_lane_f16(__s2_276, __p3_276), vget_lane_f16(__s2_276, __p3_276)}); \
-  __ret_276; \
+#define vfmlsl_lane_high_f16(__p0_795, __p1_795, __p2_795, __p3_795) __extension__ ({ \
+  float32x2_t __s0_795 = __p0_795; \
+  float16x4_t __s1_795 = __p1_795; \
+  float16x4_t __s2_795 = __p2_795; \
+  float32x2_t __ret_795; \
+  __ret_795 = vfmlsl_high_f16(__s0_795, __s1_795, (float16x4_t) {vget_lane_f16(__s2_795, __p3_795), vget_lane_f16(__s2_795, __p3_795), vget_lane_f16(__s2_795, __p3_795), vget_lane_f16(__s2_795, __p3_795)}); \
+  __ret_795; \
 })
 #else
-#define vfmlsl_lane_high_f16(__p0_277, __p1_277, __p2_277, __p3_277) __extension__ ({ \
-  float32x2_t __s0_277 = __p0_277; \
-  float16x4_t __s1_277 = __p1_277; \
-  float16x4_t __s2_277 = __p2_277; \
-  float32x2_t __rev0_277;  __rev0_277 = __builtin_shufflevector(__s0_277, __s0_277, 1, 0); \
-  float16x4_t __rev1_277;  __rev1_277 = __builtin_shufflevector(__s1_277, __s1_277, 3, 2, 1, 0); \
-  float16x4_t __rev2_277;  __rev2_277 = __builtin_shufflevector(__s2_277, __s2_277, 3, 2, 1, 0); \
-  float32x2_t __ret_277; \
-  __ret_277 = __noswap_vfmlsl_high_f16(__rev0_277, __rev1_277, (float16x4_t) {__noswap_vget_lane_f16(__rev2_277, __p3_277), __noswap_vget_lane_f16(__rev2_277, __p3_277), __noswap_vget_lane_f16(__rev2_277, __p3_277), __noswap_vget_lane_f16(__rev2_277, __p3_277)}); \
-  __ret_277 = __builtin_shufflevector(__ret_277, __ret_277, 1, 0); \
-  __ret_277; \
+#define vfmlsl_lane_high_f16(__p0_796, __p1_796, __p2_796, __p3_796) __extension__ ({ \
+  float32x2_t __s0_796 = __p0_796; \
+  float16x4_t __s1_796 = __p1_796; \
+  float16x4_t __s2_796 = __p2_796; \
+  float32x2_t __rev0_796;  __rev0_796 = __builtin_shufflevector(__s0_796, __s0_796, 1, 0); \
+  float16x4_t __rev1_796;  __rev1_796 = __builtin_shufflevector(__s1_796, __s1_796, 3, 2, 1, 0); \
+  float16x4_t __rev2_796;  __rev2_796 = __builtin_shufflevector(__s2_796, __s2_796, 3, 2, 1, 0); \
+  float32x2_t __ret_796; \
+  __ret_796 = __noswap_vfmlsl_high_f16(__rev0_796, __rev1_796, (float16x4_t) {__noswap_vget_lane_f16(__rev2_796, __p3_796), __noswap_vget_lane_f16(__rev2_796, __p3_796), __noswap_vget_lane_f16(__rev2_796, __p3_796), __noswap_vget_lane_f16(__rev2_796, __p3_796)}); \
+  __ret_796 = __builtin_shufflevector(__ret_796, __ret_796, 1, 0); \
+  __ret_796; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlslq_lane_low_f16(__p0_278, __p1_278, __p2_278, __p3_278) __extension__ ({ \
-  float32x4_t __s0_278 = __p0_278; \
-  float16x8_t __s1_278 = __p1_278; \
-  float16x4_t __s2_278 = __p2_278; \
-  float32x4_t __ret_278; \
-  __ret_278 = vfmlslq_low_f16(__s0_278, __s1_278, (float16x8_t) {vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278)}); \
-  __ret_278; \
+#define vfmlslq_lane_low_f16(__p0_797, __p1_797, __p2_797, __p3_797) __extension__ ({ \
+  float32x4_t __s0_797 = __p0_797; \
+  float16x8_t __s1_797 = __p1_797; \
+  float16x4_t __s2_797 = __p2_797; \
+  float32x4_t __ret_797; \
+  __ret_797 = vfmlslq_low_f16(__s0_797, __s1_797, (float16x8_t) {vget_lane_f16(__s2_797, __p3_797), vget_lane_f16(__s2_797, __p3_797), vget_lane_f16(__s2_797, __p3_797), vget_lane_f16(__s2_797, __p3_797), vget_lane_f16(__s2_797, __p3_797), vget_lane_f16(__s2_797, __p3_797), vget_lane_f16(__s2_797, __p3_797), vget_lane_f16(__s2_797, __p3_797)}); \
+  __ret_797; \
 })
 #else
-#define vfmlslq_lane_low_f16(__p0_279, __p1_279, __p2_279, __p3_279) __extension__ ({ \
-  float32x4_t __s0_279 = __p0_279; \
-  float16x8_t __s1_279 = __p1_279; \
-  float16x4_t __s2_279 = __p2_279; \
-  float32x4_t __rev0_279;  __rev0_279 = __builtin_shufflevector(__s0_279, __s0_279, 3, 2, 1, 0); \
-  float16x8_t __rev1_279;  __rev1_279 = __builtin_shufflevector(__s1_279, __s1_279, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_279;  __rev2_279 = __builtin_shufflevector(__s2_279, __s2_279, 3, 2, 1, 0); \
-  float32x4_t __ret_279; \
-  __ret_279 = __noswap_vfmlslq_low_f16(__rev0_279, __rev1_279, (float16x8_t) {__noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279)}); \
-  __ret_279 = __builtin_shufflevector(__ret_279, __ret_279, 3, 2, 1, 0); \
-  __ret_279; \
+#define vfmlslq_lane_low_f16(__p0_798, __p1_798, __p2_798, __p3_798) __extension__ ({ \
+  float32x4_t __s0_798 = __p0_798; \
+  float16x8_t __s1_798 = __p1_798; \
+  float16x4_t __s2_798 = __p2_798; \
+  float32x4_t __rev0_798;  __rev0_798 = __builtin_shufflevector(__s0_798, __s0_798, 3, 2, 1, 0); \
+  float16x8_t __rev1_798;  __rev1_798 = __builtin_shufflevector(__s1_798, __s1_798, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_798;  __rev2_798 = __builtin_shufflevector(__s2_798, __s2_798, 3, 2, 1, 0); \
+  float32x4_t __ret_798; \
+  __ret_798 = __noswap_vfmlslq_low_f16(__rev0_798, __rev1_798, (float16x8_t) {__noswap_vget_lane_f16(__rev2_798, __p3_798), __noswap_vget_lane_f16(__rev2_798, __p3_798), __noswap_vget_lane_f16(__rev2_798, __p3_798), __noswap_vget_lane_f16(__rev2_798, __p3_798), __noswap_vget_lane_f16(__rev2_798, __p3_798), __noswap_vget_lane_f16(__rev2_798, __p3_798), __noswap_vget_lane_f16(__rev2_798, __p3_798), __noswap_vget_lane_f16(__rev2_798, __p3_798)}); \
+  __ret_798 = __builtin_shufflevector(__ret_798, __ret_798, 3, 2, 1, 0); \
+  __ret_798; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlsl_lane_low_f16(__p0_280, __p1_280, __p2_280, __p3_280) __extension__ ({ \
-  float32x2_t __s0_280 = __p0_280; \
-  float16x4_t __s1_280 = __p1_280; \
-  float16x4_t __s2_280 = __p2_280; \
-  float32x2_t __ret_280; \
-  __ret_280 = vfmlsl_low_f16(__s0_280, __s1_280, (float16x4_t) {vget_lane_f16(__s2_280, __p3_280), vget_lane_f16(__s2_280, __p3_280), vget_lane_f16(__s2_280, __p3_280), vget_lane_f16(__s2_280, __p3_280)}); \
-  __ret_280; \
+#define vfmlsl_lane_low_f16(__p0_799, __p1_799, __p2_799, __p3_799) __extension__ ({ \
+  float32x2_t __s0_799 = __p0_799; \
+  float16x4_t __s1_799 = __p1_799; \
+  float16x4_t __s2_799 = __p2_799; \
+  float32x2_t __ret_799; \
+  __ret_799 = vfmlsl_low_f16(__s0_799, __s1_799, (float16x4_t) {vget_lane_f16(__s2_799, __p3_799), vget_lane_f16(__s2_799, __p3_799), vget_lane_f16(__s2_799, __p3_799), vget_lane_f16(__s2_799, __p3_799)}); \
+  __ret_799; \
 })
 #else
-#define vfmlsl_lane_low_f16(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \
-  float32x2_t __s0_281 = __p0_281; \
-  float16x4_t __s1_281 = __p1_281; \
-  float16x4_t __s2_281 = __p2_281; \
-  float32x2_t __rev0_281;  __rev0_281 = __builtin_shufflevector(__s0_281, __s0_281, 1, 0); \
-  float16x4_t __rev1_281;  __rev1_281 = __builtin_shufflevector(__s1_281, __s1_281, 3, 2, 1, 0); \
-  float16x4_t __rev2_281;  __rev2_281 = __builtin_shufflevector(__s2_281, __s2_281, 3, 2, 1, 0); \
-  float32x2_t __ret_281; \
-  __ret_281 = __noswap_vfmlsl_low_f16(__rev0_281, __rev1_281, (float16x4_t) {__noswap_vget_lane_f16(__rev2_281, __p3_281), __noswap_vget_lane_f16(__rev2_281, __p3_281), __noswap_vget_lane_f16(__rev2_281, __p3_281), __noswap_vget_lane_f16(__rev2_281, __p3_281)}); \
-  __ret_281 = __builtin_shufflevector(__ret_281, __ret_281, 1, 0); \
-  __ret_281; \
+#define vfmlsl_lane_low_f16(__p0_800, __p1_800, __p2_800, __p3_800) __extension__ ({ \
+  float32x2_t __s0_800 = __p0_800; \
+  float16x4_t __s1_800 = __p1_800; \
+  float16x4_t __s2_800 = __p2_800; \
+  float32x2_t __rev0_800;  __rev0_800 = __builtin_shufflevector(__s0_800, __s0_800, 1, 0); \
+  float16x4_t __rev1_800;  __rev1_800 = __builtin_shufflevector(__s1_800, __s1_800, 3, 2, 1, 0); \
+  float16x4_t __rev2_800;  __rev2_800 = __builtin_shufflevector(__s2_800, __s2_800, 3, 2, 1, 0); \
+  float32x2_t __ret_800; \
+  __ret_800 = __noswap_vfmlsl_low_f16(__rev0_800, __rev1_800, (float16x4_t) {__noswap_vget_lane_f16(__rev2_800, __p3_800), __noswap_vget_lane_f16(__rev2_800, __p3_800), __noswap_vget_lane_f16(__rev2_800, __p3_800), __noswap_vget_lane_f16(__rev2_800, __p3_800)}); \
+  __ret_800 = __builtin_shufflevector(__ret_800, __ret_800, 1, 0); \
+  __ret_800; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlslq_laneq_high_f16(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \
-  float32x4_t __s0_282 = __p0_282; \
-  float16x8_t __s1_282 = __p1_282; \
-  float16x8_t __s2_282 = __p2_282; \
-  float32x4_t __ret_282; \
-  __ret_282 = vfmlslq_high_f16(__s0_282, __s1_282, (float16x8_t) {vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282)}); \
-  __ret_282; \
+#define vfmlslq_laneq_high_f16(__p0_801, __p1_801, __p2_801, __p3_801) __extension__ ({ \
+  float32x4_t __s0_801 = __p0_801; \
+  float16x8_t __s1_801 = __p1_801; \
+  float16x8_t __s2_801 = __p2_801; \
+  float32x4_t __ret_801; \
+  __ret_801 = vfmlslq_high_f16(__s0_801, __s1_801, (float16x8_t) {vgetq_lane_f16(__s2_801, __p3_801), vgetq_lane_f16(__s2_801, __p3_801), vgetq_lane_f16(__s2_801, __p3_801), vgetq_lane_f16(__s2_801, __p3_801), vgetq_lane_f16(__s2_801, __p3_801), vgetq_lane_f16(__s2_801, __p3_801), vgetq_lane_f16(__s2_801, __p3_801), vgetq_lane_f16(__s2_801, __p3_801)}); \
+  __ret_801; \
 })
 #else
-#define vfmlslq_laneq_high_f16(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \
-  float32x4_t __s0_283 = __p0_283; \
-  float16x8_t __s1_283 = __p1_283; \
-  float16x8_t __s2_283 = __p2_283; \
-  float32x4_t __rev0_283;  __rev0_283 = __builtin_shufflevector(__s0_283, __s0_283, 3, 2, 1, 0); \
-  float16x8_t __rev1_283;  __rev1_283 = __builtin_shufflevector(__s1_283, __s1_283, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_283;  __rev2_283 = __builtin_shufflevector(__s2_283, __s2_283, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_283; \
-  __ret_283 = __noswap_vfmlslq_high_f16(__rev0_283, __rev1_283, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283)}); \
-  __ret_283 = __builtin_shufflevector(__ret_283, __ret_283, 3, 2, 1, 0); \
-  __ret_283; \
+#define vfmlslq_laneq_high_f16(__p0_802, __p1_802, __p2_802, __p3_802) __extension__ ({ \
+  float32x4_t __s0_802 = __p0_802; \
+  float16x8_t __s1_802 = __p1_802; \
+  float16x8_t __s2_802 = __p2_802; \
+  float32x4_t __rev0_802;  __rev0_802 = __builtin_shufflevector(__s0_802, __s0_802, 3, 2, 1, 0); \
+  float16x8_t __rev1_802;  __rev1_802 = __builtin_shufflevector(__s1_802, __s1_802, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_802;  __rev2_802 = __builtin_shufflevector(__s2_802, __s2_802, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x4_t __ret_802; \
+  __ret_802 = __noswap_vfmlslq_high_f16(__rev0_802, __rev1_802, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_802, __p3_802), __noswap_vgetq_lane_f16(__rev2_802, __p3_802), __noswap_vgetq_lane_f16(__rev2_802, __p3_802), __noswap_vgetq_lane_f16(__rev2_802, __p3_802), __noswap_vgetq_lane_f16(__rev2_802, __p3_802), __noswap_vgetq_lane_f16(__rev2_802, __p3_802), __noswap_vgetq_lane_f16(__rev2_802, __p3_802), __noswap_vgetq_lane_f16(__rev2_802, __p3_802)}); \
+  __ret_802 = __builtin_shufflevector(__ret_802, __ret_802, 3, 2, 1, 0); \
+  __ret_802; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlsl_laneq_high_f16(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \
-  float32x2_t __s0_284 = __p0_284; \
-  float16x4_t __s1_284 = __p1_284; \
-  float16x8_t __s2_284 = __p2_284; \
-  float32x2_t __ret_284; \
-  __ret_284 = vfmlsl_high_f16(__s0_284, __s1_284, (float16x4_t) {vgetq_lane_f16(__s2_284, __p3_284), vgetq_lane_f16(__s2_284, __p3_284), vgetq_lane_f16(__s2_284, __p3_284), vgetq_lane_f16(__s2_284, __p3_284)}); \
-  __ret_284; \
+#define vfmlsl_laneq_high_f16(__p0_803, __p1_803, __p2_803, __p3_803) __extension__ ({ \
+  float32x2_t __s0_803 = __p0_803; \
+  float16x4_t __s1_803 = __p1_803; \
+  float16x8_t __s2_803 = __p2_803; \
+  float32x2_t __ret_803; \
+  __ret_803 = vfmlsl_high_f16(__s0_803, __s1_803, (float16x4_t) {vgetq_lane_f16(__s2_803, __p3_803), vgetq_lane_f16(__s2_803, __p3_803), vgetq_lane_f16(__s2_803, __p3_803), vgetq_lane_f16(__s2_803, __p3_803)}); \
+  __ret_803; \
 })
 #else
-#define vfmlsl_laneq_high_f16(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \
-  float32x2_t __s0_285 = __p0_285; \
-  float16x4_t __s1_285 = __p1_285; \
-  float16x8_t __s2_285 = __p2_285; \
-  float32x2_t __rev0_285;  __rev0_285 = __builtin_shufflevector(__s0_285, __s0_285, 1, 0); \
-  float16x4_t __rev1_285;  __rev1_285 = __builtin_shufflevector(__s1_285, __s1_285, 3, 2, 1, 0); \
-  float16x8_t __rev2_285;  __rev2_285 = __builtin_shufflevector(__s2_285, __s2_285, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_285; \
-  __ret_285 = __noswap_vfmlsl_high_f16(__rev0_285, __rev1_285, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_285, __p3_285), __noswap_vgetq_lane_f16(__rev2_285, __p3_285), __noswap_vgetq_lane_f16(__rev2_285, __p3_285), __noswap_vgetq_lane_f16(__rev2_285, __p3_285)}); \
-  __ret_285 = __builtin_shufflevector(__ret_285, __ret_285, 1, 0); \
-  __ret_285; \
+#define vfmlsl_laneq_high_f16(__p0_804, __p1_804, __p2_804, __p3_804) __extension__ ({ \
+  float32x2_t __s0_804 = __p0_804; \
+  float16x4_t __s1_804 = __p1_804; \
+  float16x8_t __s2_804 = __p2_804; \
+  float32x2_t __rev0_804;  __rev0_804 = __builtin_shufflevector(__s0_804, __s0_804, 1, 0); \
+  float16x4_t __rev1_804;  __rev1_804 = __builtin_shufflevector(__s1_804, __s1_804, 3, 2, 1, 0); \
+  float16x8_t __rev2_804;  __rev2_804 = __builtin_shufflevector(__s2_804, __s2_804, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x2_t __ret_804; \
+  __ret_804 = __noswap_vfmlsl_high_f16(__rev0_804, __rev1_804, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_804, __p3_804), __noswap_vgetq_lane_f16(__rev2_804, __p3_804), __noswap_vgetq_lane_f16(__rev2_804, __p3_804), __noswap_vgetq_lane_f16(__rev2_804, __p3_804)}); \
+  __ret_804 = __builtin_shufflevector(__ret_804, __ret_804, 1, 0); \
+  __ret_804; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlslq_laneq_low_f16(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \
-  float32x4_t __s0_286 = __p0_286; \
-  float16x8_t __s1_286 = __p1_286; \
-  float16x8_t __s2_286 = __p2_286; \
-  float32x4_t __ret_286; \
-  __ret_286 = vfmlslq_low_f16(__s0_286, __s1_286, (float16x8_t) {vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286)}); \
-  __ret_286; \
+#define vfmlslq_laneq_low_f16(__p0_805, __p1_805, __p2_805, __p3_805) __extension__ ({ \
+  float32x4_t __s0_805 = __p0_805; \
+  float16x8_t __s1_805 = __p1_805; \
+  float16x8_t __s2_805 = __p2_805; \
+  float32x4_t __ret_805; \
+  __ret_805 = vfmlslq_low_f16(__s0_805, __s1_805, (float16x8_t) {vgetq_lane_f16(__s2_805, __p3_805), vgetq_lane_f16(__s2_805, __p3_805), vgetq_lane_f16(__s2_805, __p3_805), vgetq_lane_f16(__s2_805, __p3_805), vgetq_lane_f16(__s2_805, __p3_805), vgetq_lane_f16(__s2_805, __p3_805), vgetq_lane_f16(__s2_805, __p3_805), vgetq_lane_f16(__s2_805, __p3_805)}); \
+  __ret_805; \
 })
 #else
-#define vfmlslq_laneq_low_f16(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \
-  float32x4_t __s0_287 = __p0_287; \
-  float16x8_t __s1_287 = __p1_287; \
-  float16x8_t __s2_287 = __p2_287; \
-  float32x4_t __rev0_287;  __rev0_287 = __builtin_shufflevector(__s0_287, __s0_287, 3, 2, 1, 0); \
-  float16x8_t __rev1_287;  __rev1_287 = __builtin_shufflevector(__s1_287, __s1_287, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_287;  __rev2_287 = __builtin_shufflevector(__s2_287, __s2_287, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_287; \
-  __ret_287 = __noswap_vfmlslq_low_f16(__rev0_287, __rev1_287, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287)}); \
-  __ret_287 = __builtin_shufflevector(__ret_287, __ret_287, 3, 2, 1, 0); \
-  __ret_287; \
+#define vfmlslq_laneq_low_f16(__p0_806, __p1_806, __p2_806, __p3_806) __extension__ ({ \
+  float32x4_t __s0_806 = __p0_806; \
+  float16x8_t __s1_806 = __p1_806; \
+  float16x8_t __s2_806 = __p2_806; \
+  float32x4_t __rev0_806;  __rev0_806 = __builtin_shufflevector(__s0_806, __s0_806, 3, 2, 1, 0); \
+  float16x8_t __rev1_806;  __rev1_806 = __builtin_shufflevector(__s1_806, __s1_806, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_806;  __rev2_806 = __builtin_shufflevector(__s2_806, __s2_806, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x4_t __ret_806; \
+  __ret_806 = __noswap_vfmlslq_low_f16(__rev0_806, __rev1_806, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_806, __p3_806), __noswap_vgetq_lane_f16(__rev2_806, __p3_806), __noswap_vgetq_lane_f16(__rev2_806, __p3_806), __noswap_vgetq_lane_f16(__rev2_806, __p3_806), __noswap_vgetq_lane_f16(__rev2_806, __p3_806), __noswap_vgetq_lane_f16(__rev2_806, __p3_806), __noswap_vgetq_lane_f16(__rev2_806, __p3_806), __noswap_vgetq_lane_f16(__rev2_806, __p3_806)}); \
+  __ret_806 = __builtin_shufflevector(__ret_806, __ret_806, 3, 2, 1, 0); \
+  __ret_806; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlsl_laneq_low_f16(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \
-  float32x2_t __s0_288 = __p0_288; \
-  float16x4_t __s1_288 = __p1_288; \
-  float16x8_t __s2_288 = __p2_288; \
-  float32x2_t __ret_288; \
-  __ret_288 = vfmlsl_low_f16(__s0_288, __s1_288, (float16x4_t) {vgetq_lane_f16(__s2_288, __p3_288), vgetq_lane_f16(__s2_288, __p3_288), vgetq_lane_f16(__s2_288, __p3_288), vgetq_lane_f16(__s2_288, __p3_288)}); \
-  __ret_288; \
+#define vfmlsl_laneq_low_f16(__p0_807, __p1_807, __p2_807, __p3_807) __extension__ ({ \
+  float32x2_t __s0_807 = __p0_807; \
+  float16x4_t __s1_807 = __p1_807; \
+  float16x8_t __s2_807 = __p2_807; \
+  float32x2_t __ret_807; \
+  __ret_807 = vfmlsl_low_f16(__s0_807, __s1_807, (float16x4_t) {vgetq_lane_f16(__s2_807, __p3_807), vgetq_lane_f16(__s2_807, __p3_807), vgetq_lane_f16(__s2_807, __p3_807), vgetq_lane_f16(__s2_807, __p3_807)}); \
+  __ret_807; \
 })
 #else
-#define vfmlsl_laneq_low_f16(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \
-  float32x2_t __s0_289 = __p0_289; \
-  float16x4_t __s1_289 = __p1_289; \
-  float16x8_t __s2_289 = __p2_289; \
-  float32x2_t __rev0_289;  __rev0_289 = __builtin_shufflevector(__s0_289, __s0_289, 1, 0); \
-  float16x4_t __rev1_289;  __rev1_289 = __builtin_shufflevector(__s1_289, __s1_289, 3, 2, 1, 0); \
-  float16x8_t __rev2_289;  __rev2_289 = __builtin_shufflevector(__s2_289, __s2_289, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_289; \
-  __ret_289 = __noswap_vfmlsl_low_f16(__rev0_289, __rev1_289, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_289, __p3_289), __noswap_vgetq_lane_f16(__rev2_289, __p3_289), __noswap_vgetq_lane_f16(__rev2_289, __p3_289), __noswap_vgetq_lane_f16(__rev2_289, __p3_289)}); \
-  __ret_289 = __builtin_shufflevector(__ret_289, __ret_289, 1, 0); \
-  __ret_289; \
+#define vfmlsl_laneq_low_f16(__p0_808, __p1_808, __p2_808, __p3_808) __extension__ ({ \
+  float32x2_t __s0_808 = __p0_808; \
+  float16x4_t __s1_808 = __p1_808; \
+  float16x8_t __s2_808 = __p2_808; \
+  float32x2_t __rev0_808;  __rev0_808 = __builtin_shufflevector(__s0_808, __s0_808, 1, 0); \
+  float16x4_t __rev1_808;  __rev1_808 = __builtin_shufflevector(__s1_808, __s1_808, 3, 2, 1, 0); \
+  float16x8_t __rev2_808;  __rev2_808 = __builtin_shufflevector(__s2_808, __s2_808, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x2_t __ret_808; \
+  __ret_808 = __noswap_vfmlsl_low_f16(__rev0_808, __rev1_808, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_808, __p3_808), __noswap_vgetq_lane_f16(__rev2_808, __p3_808), __noswap_vgetq_lane_f16(__rev2_808, __p3_808), __noswap_vgetq_lane_f16(__rev2_808, __p3_808)}); \
+  __ret_808 = __builtin_shufflevector(__ret_808, __ret_808, 1, 0); \
+  __ret_808; \
 })
 #endif
 
 #endif
 #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
-#define vmulh_lane_f16(__p0_290, __p1_290, __p2_290) __extension__ ({ \
-  float16_t __s0_290 = __p0_290; \
-  float16x4_t __s1_290 = __p1_290; \
-  float16_t __ret_290; \
-  __ret_290 = __s0_290 * vget_lane_f16(__s1_290, __p2_290); \
-  __ret_290; \
+#define vmulh_lane_f16(__p0_809, __p1_809, __p2_809) __extension__ ({ \
+  float16_t __s0_809 = __p0_809; \
+  float16x4_t __s1_809 = __p1_809; \
+  float16_t __ret_809; \
+  __ret_809 = __s0_809 * vget_lane_f16(__s1_809, __p2_809); \
+  __ret_809; \
 })
 #else
-#define vmulh_lane_f16(__p0_291, __p1_291, __p2_291) __extension__ ({ \
-  float16_t __s0_291 = __p0_291; \
-  float16x4_t __s1_291 = __p1_291; \
-  float16x4_t __rev1_291;  __rev1_291 = __builtin_shufflevector(__s1_291, __s1_291, 3, 2, 1, 0); \
-  float16_t __ret_291; \
-  __ret_291 = __s0_291 * __noswap_vget_lane_f16(__rev1_291, __p2_291); \
-  __ret_291; \
+#define vmulh_lane_f16(__p0_810, __p1_810, __p2_810) __extension__ ({ \
+  float16_t __s0_810 = __p0_810; \
+  float16x4_t __s1_810 = __p1_810; \
+  float16x4_t __rev1_810;  __rev1_810 = __builtin_shufflevector(__s1_810, __s1_810, 3, 2, 1, 0); \
+  float16_t __ret_810; \
+  __ret_810 = __s0_810 * __noswap_vget_lane_f16(__rev1_810, __p2_810); \
+  __ret_810; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulh_laneq_f16(__p0_292, __p1_292, __p2_292) __extension__ ({ \
-  float16_t __s0_292 = __p0_292; \
-  float16x8_t __s1_292 = __p1_292; \
-  float16_t __ret_292; \
-  __ret_292 = __s0_292 * vgetq_lane_f16(__s1_292, __p2_292); \
-  __ret_292; \
+#define vmulh_laneq_f16(__p0_811, __p1_811, __p2_811) __extension__ ({ \
+  float16_t __s0_811 = __p0_811; \
+  float16x8_t __s1_811 = __p1_811; \
+  float16_t __ret_811; \
+  __ret_811 = __s0_811 * vgetq_lane_f16(__s1_811, __p2_811); \
+  __ret_811; \
 })
 #else
-#define vmulh_laneq_f16(__p0_293, __p1_293, __p2_293) __extension__ ({ \
-  float16_t __s0_293 = __p0_293; \
-  float16x8_t __s1_293 = __p1_293; \
-  float16x8_t __rev1_293;  __rev1_293 = __builtin_shufflevector(__s1_293, __s1_293, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_293; \
-  __ret_293 = __s0_293 * __noswap_vgetq_lane_f16(__rev1_293, __p2_293); \
-  __ret_293; \
+#define vmulh_laneq_f16(__p0_812, __p1_812, __p2_812) __extension__ ({ \
+  float16_t __s0_812 = __p0_812; \
+  float16x8_t __s1_812 = __p1_812; \
+  float16x8_t __rev1_812;  __rev1_812 = __builtin_shufflevector(__s1_812, __s1_812, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16_t __ret_812; \
+  __ret_812 = __s0_812 * __noswap_vgetq_lane_f16(__rev1_812, __p2_812); \
+  __ret_812; \
+})
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_MATMUL_INT8)
+#ifdef __LITTLE_ENDIAN__
+#define vsudotq_lane_s32(__p0_813, __p1_813, __p2_813, __p3_813) __extension__ ({ \
+  int32x4_t __s0_813 = __p0_813; \
+  int8x16_t __s1_813 = __p1_813; \
+  uint8x8_t __s2_813 = __p2_813; \
+  int32x4_t __ret_813; \
+uint8x8_t __reint_813 = __s2_813; \
+  __ret_813 = vusdotq_s32(__s0_813, (uint8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_813, __p3_813)), __s1_813); \
+  __ret_813; \
+})
+#else
+#define vsudotq_lane_s32(__p0_814, __p1_814, __p2_814, __p3_814) __extension__ ({ \
+  int32x4_t __s0_814 = __p0_814; \
+  int8x16_t __s1_814 = __p1_814; \
+  uint8x8_t __s2_814 = __p2_814; \
+  int32x4_t __rev0_814;  __rev0_814 = __builtin_shufflevector(__s0_814, __s0_814, 3, 2, 1, 0); \
+  int8x16_t __rev1_814;  __rev1_814 = __builtin_shufflevector(__s1_814, __s1_814, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_814;  __rev2_814 = __builtin_shufflevector(__s2_814, __s2_814, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_814; \
+uint8x8_t __reint_814 = __rev2_814; \
+  __ret_814 = __noswap_vusdotq_s32(__rev0_814, (uint8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_814, __p3_814)), __rev1_814); \
+  __ret_814 = __builtin_shufflevector(__ret_814, __ret_814, 3, 2, 1, 0); \
+  __ret_814; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsudot_lane_s32(__p0_815, __p1_815, __p2_815, __p3_815) __extension__ ({ \
+  int32x2_t __s0_815 = __p0_815; \
+  int8x8_t __s1_815 = __p1_815; \
+  uint8x8_t __s2_815 = __p2_815; \
+  int32x2_t __ret_815; \
+uint8x8_t __reint_815 = __s2_815; \
+  __ret_815 = vusdot_s32(__s0_815, (uint8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_815, __p3_815)), __s1_815); \
+  __ret_815; \
+})
+#else
+#define vsudot_lane_s32(__p0_816, __p1_816, __p2_816, __p3_816) __extension__ ({ \
+  int32x2_t __s0_816 = __p0_816; \
+  int8x8_t __s1_816 = __p1_816; \
+  uint8x8_t __s2_816 = __p2_816; \
+  int32x2_t __rev0_816;  __rev0_816 = __builtin_shufflevector(__s0_816, __s0_816, 1, 0); \
+  int8x8_t __rev1_816;  __rev1_816 = __builtin_shufflevector(__s1_816, __s1_816, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_816;  __rev2_816 = __builtin_shufflevector(__s2_816, __s2_816, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x2_t __ret_816; \
+uint8x8_t __reint_816 = __rev2_816; \
+  __ret_816 = __noswap_vusdot_s32(__rev0_816, (uint8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_816, __p3_816)), __rev1_816); \
+  __ret_816 = __builtin_shufflevector(__ret_816, __ret_816, 1, 0); \
+  __ret_816; \
 })
 #endif
 
@@ -61961,86 +66049,86 @@
   return __ret;
 }
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahs_lane_s32(__p0_294, __p1_294, __p2_294, __p3_294) __extension__ ({ \
-  int32_t __s0_294 = __p0_294; \
-  int32_t __s1_294 = __p1_294; \
-  int32x2_t __s2_294 = __p2_294; \
-  int32_t __ret_294; \
-  __ret_294 = vqadds_s32(__s0_294, vqrdmulhs_s32(__s1_294, vget_lane_s32(__s2_294, __p3_294))); \
-  __ret_294; \
+#define vqrdmlahs_lane_s32(__p0_817, __p1_817, __p2_817, __p3_817) __extension__ ({ \
+  int32_t __s0_817 = __p0_817; \
+  int32_t __s1_817 = __p1_817; \
+  int32x2_t __s2_817 = __p2_817; \
+  int32_t __ret_817; \
+  __ret_817 = vqadds_s32(__s0_817, vqrdmulhs_s32(__s1_817, vget_lane_s32(__s2_817, __p3_817))); \
+  __ret_817; \
 })
 #else
-#define vqrdmlahs_lane_s32(__p0_295, __p1_295, __p2_295, __p3_295) __extension__ ({ \
-  int32_t __s0_295 = __p0_295; \
-  int32_t __s1_295 = __p1_295; \
-  int32x2_t __s2_295 = __p2_295; \
-  int32x2_t __rev2_295;  __rev2_295 = __builtin_shufflevector(__s2_295, __s2_295, 1, 0); \
-  int32_t __ret_295; \
-  __ret_295 = vqadds_s32(__s0_295, vqrdmulhs_s32(__s1_295, __noswap_vget_lane_s32(__rev2_295, __p3_295))); \
-  __ret_295; \
+#define vqrdmlahs_lane_s32(__p0_818, __p1_818, __p2_818, __p3_818) __extension__ ({ \
+  int32_t __s0_818 = __p0_818; \
+  int32_t __s1_818 = __p1_818; \
+  int32x2_t __s2_818 = __p2_818; \
+  int32x2_t __rev2_818;  __rev2_818 = __builtin_shufflevector(__s2_818, __s2_818, 1, 0); \
+  int32_t __ret_818; \
+  __ret_818 = vqadds_s32(__s0_818, vqrdmulhs_s32(__s1_818, __noswap_vget_lane_s32(__rev2_818, __p3_818))); \
+  __ret_818; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahh_lane_s16(__p0_296, __p1_296, __p2_296, __p3_296) __extension__ ({ \
-  int16_t __s0_296 = __p0_296; \
-  int16_t __s1_296 = __p1_296; \
-  int16x4_t __s2_296 = __p2_296; \
-  int16_t __ret_296; \
-  __ret_296 = vqaddh_s16(__s0_296, vqrdmulhh_s16(__s1_296, vget_lane_s16(__s2_296, __p3_296))); \
-  __ret_296; \
+#define vqrdmlahh_lane_s16(__p0_819, __p1_819, __p2_819, __p3_819) __extension__ ({ \
+  int16_t __s0_819 = __p0_819; \
+  int16_t __s1_819 = __p1_819; \
+  int16x4_t __s2_819 = __p2_819; \
+  int16_t __ret_819; \
+  __ret_819 = vqaddh_s16(__s0_819, vqrdmulhh_s16(__s1_819, vget_lane_s16(__s2_819, __p3_819))); \
+  __ret_819; \
 })
 #else
-#define vqrdmlahh_lane_s16(__p0_297, __p1_297, __p2_297, __p3_297) __extension__ ({ \
-  int16_t __s0_297 = __p0_297; \
-  int16_t __s1_297 = __p1_297; \
-  int16x4_t __s2_297 = __p2_297; \
-  int16x4_t __rev2_297;  __rev2_297 = __builtin_shufflevector(__s2_297, __s2_297, 3, 2, 1, 0); \
-  int16_t __ret_297; \
-  __ret_297 = vqaddh_s16(__s0_297, vqrdmulhh_s16(__s1_297, __noswap_vget_lane_s16(__rev2_297, __p3_297))); \
-  __ret_297; \
+#define vqrdmlahh_lane_s16(__p0_820, __p1_820, __p2_820, __p3_820) __extension__ ({ \
+  int16_t __s0_820 = __p0_820; \
+  int16_t __s1_820 = __p1_820; \
+  int16x4_t __s2_820 = __p2_820; \
+  int16x4_t __rev2_820;  __rev2_820 = __builtin_shufflevector(__s2_820, __s2_820, 3, 2, 1, 0); \
+  int16_t __ret_820; \
+  __ret_820 = vqaddh_s16(__s0_820, vqrdmulhh_s16(__s1_820, __noswap_vget_lane_s16(__rev2_820, __p3_820))); \
+  __ret_820; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahs_laneq_s32(__p0_298, __p1_298, __p2_298, __p3_298) __extension__ ({ \
-  int32_t __s0_298 = __p0_298; \
-  int32_t __s1_298 = __p1_298; \
-  int32x4_t __s2_298 = __p2_298; \
-  int32_t __ret_298; \
-  __ret_298 = vqadds_s32(__s0_298, vqrdmulhs_s32(__s1_298, vgetq_lane_s32(__s2_298, __p3_298))); \
-  __ret_298; \
+#define vqrdmlahs_laneq_s32(__p0_821, __p1_821, __p2_821, __p3_821) __extension__ ({ \
+  int32_t __s0_821 = __p0_821; \
+  int32_t __s1_821 = __p1_821; \
+  int32x4_t __s2_821 = __p2_821; \
+  int32_t __ret_821; \
+  __ret_821 = vqadds_s32(__s0_821, vqrdmulhs_s32(__s1_821, vgetq_lane_s32(__s2_821, __p3_821))); \
+  __ret_821; \
 })
 #else
-#define vqrdmlahs_laneq_s32(__p0_299, __p1_299, __p2_299, __p3_299) __extension__ ({ \
-  int32_t __s0_299 = __p0_299; \
-  int32_t __s1_299 = __p1_299; \
-  int32x4_t __s2_299 = __p2_299; \
-  int32x4_t __rev2_299;  __rev2_299 = __builtin_shufflevector(__s2_299, __s2_299, 3, 2, 1, 0); \
-  int32_t __ret_299; \
-  __ret_299 = vqadds_s32(__s0_299, vqrdmulhs_s32(__s1_299, __noswap_vgetq_lane_s32(__rev2_299, __p3_299))); \
-  __ret_299; \
+#define vqrdmlahs_laneq_s32(__p0_822, __p1_822, __p2_822, __p3_822) __extension__ ({ \
+  int32_t __s0_822 = __p0_822; \
+  int32_t __s1_822 = __p1_822; \
+  int32x4_t __s2_822 = __p2_822; \
+  int32x4_t __rev2_822;  __rev2_822 = __builtin_shufflevector(__s2_822, __s2_822, 3, 2, 1, 0); \
+  int32_t __ret_822; \
+  __ret_822 = vqadds_s32(__s0_822, vqrdmulhs_s32(__s1_822, __noswap_vgetq_lane_s32(__rev2_822, __p3_822))); \
+  __ret_822; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahh_laneq_s16(__p0_300, __p1_300, __p2_300, __p3_300) __extension__ ({ \
-  int16_t __s0_300 = __p0_300; \
-  int16_t __s1_300 = __p1_300; \
-  int16x8_t __s2_300 = __p2_300; \
-  int16_t __ret_300; \
-  __ret_300 = vqaddh_s16(__s0_300, vqrdmulhh_s16(__s1_300, vgetq_lane_s16(__s2_300, __p3_300))); \
-  __ret_300; \
+#define vqrdmlahh_laneq_s16(__p0_823, __p1_823, __p2_823, __p3_823) __extension__ ({ \
+  int16_t __s0_823 = __p0_823; \
+  int16_t __s1_823 = __p1_823; \
+  int16x8_t __s2_823 = __p2_823; \
+  int16_t __ret_823; \
+  __ret_823 = vqaddh_s16(__s0_823, vqrdmulhh_s16(__s1_823, vgetq_lane_s16(__s2_823, __p3_823))); \
+  __ret_823; \
 })
 #else
-#define vqrdmlahh_laneq_s16(__p0_301, __p1_301, __p2_301, __p3_301) __extension__ ({ \
-  int16_t __s0_301 = __p0_301; \
-  int16_t __s1_301 = __p1_301; \
-  int16x8_t __s2_301 = __p2_301; \
-  int16x8_t __rev2_301;  __rev2_301 = __builtin_shufflevector(__s2_301, __s2_301, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_301; \
-  __ret_301 = vqaddh_s16(__s0_301, vqrdmulhh_s16(__s1_301, __noswap_vgetq_lane_s16(__rev2_301, __p3_301))); \
-  __ret_301; \
+#define vqrdmlahh_laneq_s16(__p0_824, __p1_824, __p2_824, __p3_824) __extension__ ({ \
+  int16_t __s0_824 = __p0_824; \
+  int16_t __s1_824 = __p1_824; \
+  int16x8_t __s2_824 = __p2_824; \
+  int16x8_t __rev2_824;  __rev2_824 = __builtin_shufflevector(__s2_824, __s2_824, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16_t __ret_824; \
+  __ret_824 = vqaddh_s16(__s0_824, vqrdmulhh_s16(__s1_824, __noswap_vgetq_lane_s16(__rev2_824, __p3_824))); \
+  __ret_824; \
 })
 #endif
 
@@ -62055,86 +66143,86 @@
   return __ret;
 }
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshs_lane_s32(__p0_302, __p1_302, __p2_302, __p3_302) __extension__ ({ \
-  int32_t __s0_302 = __p0_302; \
-  int32_t __s1_302 = __p1_302; \
-  int32x2_t __s2_302 = __p2_302; \
-  int32_t __ret_302; \
-  __ret_302 = vqsubs_s32(__s0_302, vqrdmulhs_s32(__s1_302, vget_lane_s32(__s2_302, __p3_302))); \
-  __ret_302; \
+#define vqrdmlshs_lane_s32(__p0_825, __p1_825, __p2_825, __p3_825) __extension__ ({ \
+  int32_t __s0_825 = __p0_825; \
+  int32_t __s1_825 = __p1_825; \
+  int32x2_t __s2_825 = __p2_825; \
+  int32_t __ret_825; \
+  __ret_825 = vqsubs_s32(__s0_825, vqrdmulhs_s32(__s1_825, vget_lane_s32(__s2_825, __p3_825))); \
+  __ret_825; \
 })
 #else
-#define vqrdmlshs_lane_s32(__p0_303, __p1_303, __p2_303, __p3_303) __extension__ ({ \
-  int32_t __s0_303 = __p0_303; \
-  int32_t __s1_303 = __p1_303; \
-  int32x2_t __s2_303 = __p2_303; \
-  int32x2_t __rev2_303;  __rev2_303 = __builtin_shufflevector(__s2_303, __s2_303, 1, 0); \
-  int32_t __ret_303; \
-  __ret_303 = vqsubs_s32(__s0_303, vqrdmulhs_s32(__s1_303, __noswap_vget_lane_s32(__rev2_303, __p3_303))); \
-  __ret_303; \
+#define vqrdmlshs_lane_s32(__p0_826, __p1_826, __p2_826, __p3_826) __extension__ ({ \
+  int32_t __s0_826 = __p0_826; \
+  int32_t __s1_826 = __p1_826; \
+  int32x2_t __s2_826 = __p2_826; \
+  int32x2_t __rev2_826;  __rev2_826 = __builtin_shufflevector(__s2_826, __s2_826, 1, 0); \
+  int32_t __ret_826; \
+  __ret_826 = vqsubs_s32(__s0_826, vqrdmulhs_s32(__s1_826, __noswap_vget_lane_s32(__rev2_826, __p3_826))); \
+  __ret_826; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshh_lane_s16(__p0_304, __p1_304, __p2_304, __p3_304) __extension__ ({ \
-  int16_t __s0_304 = __p0_304; \
-  int16_t __s1_304 = __p1_304; \
-  int16x4_t __s2_304 = __p2_304; \
-  int16_t __ret_304; \
-  __ret_304 = vqsubh_s16(__s0_304, vqrdmulhh_s16(__s1_304, vget_lane_s16(__s2_304, __p3_304))); \
-  __ret_304; \
+#define vqrdmlshh_lane_s16(__p0_827, __p1_827, __p2_827, __p3_827) __extension__ ({ \
+  int16_t __s0_827 = __p0_827; \
+  int16_t __s1_827 = __p1_827; \
+  int16x4_t __s2_827 = __p2_827; \
+  int16_t __ret_827; \
+  __ret_827 = vqsubh_s16(__s0_827, vqrdmulhh_s16(__s1_827, vget_lane_s16(__s2_827, __p3_827))); \
+  __ret_827; \
 })
 #else
-#define vqrdmlshh_lane_s16(__p0_305, __p1_305, __p2_305, __p3_305) __extension__ ({ \
-  int16_t __s0_305 = __p0_305; \
-  int16_t __s1_305 = __p1_305; \
-  int16x4_t __s2_305 = __p2_305; \
-  int16x4_t __rev2_305;  __rev2_305 = __builtin_shufflevector(__s2_305, __s2_305, 3, 2, 1, 0); \
-  int16_t __ret_305; \
-  __ret_305 = vqsubh_s16(__s0_305, vqrdmulhh_s16(__s1_305, __noswap_vget_lane_s16(__rev2_305, __p3_305))); \
-  __ret_305; \
+#define vqrdmlshh_lane_s16(__p0_828, __p1_828, __p2_828, __p3_828) __extension__ ({ \
+  int16_t __s0_828 = __p0_828; \
+  int16_t __s1_828 = __p1_828; \
+  int16x4_t __s2_828 = __p2_828; \
+  int16x4_t __rev2_828;  __rev2_828 = __builtin_shufflevector(__s2_828, __s2_828, 3, 2, 1, 0); \
+  int16_t __ret_828; \
+  __ret_828 = vqsubh_s16(__s0_828, vqrdmulhh_s16(__s1_828, __noswap_vget_lane_s16(__rev2_828, __p3_828))); \
+  __ret_828; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshs_laneq_s32(__p0_306, __p1_306, __p2_306, __p3_306) __extension__ ({ \
-  int32_t __s0_306 = __p0_306; \
-  int32_t __s1_306 = __p1_306; \
-  int32x4_t __s2_306 = __p2_306; \
-  int32_t __ret_306; \
-  __ret_306 = vqsubs_s32(__s0_306, vqrdmulhs_s32(__s1_306, vgetq_lane_s32(__s2_306, __p3_306))); \
-  __ret_306; \
+#define vqrdmlshs_laneq_s32(__p0_829, __p1_829, __p2_829, __p3_829) __extension__ ({ \
+  int32_t __s0_829 = __p0_829; \
+  int32_t __s1_829 = __p1_829; \
+  int32x4_t __s2_829 = __p2_829; \
+  int32_t __ret_829; \
+  __ret_829 = vqsubs_s32(__s0_829, vqrdmulhs_s32(__s1_829, vgetq_lane_s32(__s2_829, __p3_829))); \
+  __ret_829; \
 })
 #else
-#define vqrdmlshs_laneq_s32(__p0_307, __p1_307, __p2_307, __p3_307) __extension__ ({ \
-  int32_t __s0_307 = __p0_307; \
-  int32_t __s1_307 = __p1_307; \
-  int32x4_t __s2_307 = __p2_307; \
-  int32x4_t __rev2_307;  __rev2_307 = __builtin_shufflevector(__s2_307, __s2_307, 3, 2, 1, 0); \
-  int32_t __ret_307; \
-  __ret_307 = vqsubs_s32(__s0_307, vqrdmulhs_s32(__s1_307, __noswap_vgetq_lane_s32(__rev2_307, __p3_307))); \
-  __ret_307; \
+#define vqrdmlshs_laneq_s32(__p0_830, __p1_830, __p2_830, __p3_830) __extension__ ({ \
+  int32_t __s0_830 = __p0_830; \
+  int32_t __s1_830 = __p1_830; \
+  int32x4_t __s2_830 = __p2_830; \
+  int32x4_t __rev2_830;  __rev2_830 = __builtin_shufflevector(__s2_830, __s2_830, 3, 2, 1, 0); \
+  int32_t __ret_830; \
+  __ret_830 = vqsubs_s32(__s0_830, vqrdmulhs_s32(__s1_830, __noswap_vgetq_lane_s32(__rev2_830, __p3_830))); \
+  __ret_830; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshh_laneq_s16(__p0_308, __p1_308, __p2_308, __p3_308) __extension__ ({ \
-  int16_t __s0_308 = __p0_308; \
-  int16_t __s1_308 = __p1_308; \
-  int16x8_t __s2_308 = __p2_308; \
-  int16_t __ret_308; \
-  __ret_308 = vqsubh_s16(__s0_308, vqrdmulhh_s16(__s1_308, vgetq_lane_s16(__s2_308, __p3_308))); \
-  __ret_308; \
+#define vqrdmlshh_laneq_s16(__p0_831, __p1_831, __p2_831, __p3_831) __extension__ ({ \
+  int16_t __s0_831 = __p0_831; \
+  int16_t __s1_831 = __p1_831; \
+  int16x8_t __s2_831 = __p2_831; \
+  int16_t __ret_831; \
+  __ret_831 = vqsubh_s16(__s0_831, vqrdmulhh_s16(__s1_831, vgetq_lane_s16(__s2_831, __p3_831))); \
+  __ret_831; \
 })
 #else
-#define vqrdmlshh_laneq_s16(__p0_309, __p1_309, __p2_309, __p3_309) __extension__ ({ \
-  int16_t __s0_309 = __p0_309; \
-  int16_t __s1_309 = __p1_309; \
-  int16x8_t __s2_309 = __p2_309; \
-  int16x8_t __rev2_309;  __rev2_309 = __builtin_shufflevector(__s2_309, __s2_309, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_309; \
-  __ret_309 = vqsubh_s16(__s0_309, vqrdmulhh_s16(__s1_309, __noswap_vgetq_lane_s16(__rev2_309, __p3_309))); \
-  __ret_309; \
+#define vqrdmlshh_laneq_s16(__p0_832, __p1_832, __p2_832, __p3_832) __extension__ ({ \
+  int16_t __s0_832 = __p0_832; \
+  int16_t __s1_832 = __p1_832; \
+  int16x8_t __s2_832 = __p2_832; \
+  int16x8_t __rev2_832;  __rev2_832 = __builtin_shufflevector(__s2_832, __s2_832, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16_t __ret_832; \
+  __ret_832 = vqsubh_s16(__s0_832, vqrdmulhh_s16(__s1_832, __noswap_vgetq_lane_s16(__rev2_832, __p3_832))); \
+  __ret_832; \
 })
 #endif
 
@@ -62447,136 +66535,136 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p64(__p0_310, __p1_310, __p2_310, __p3_310) __extension__ ({ \
-  poly64x2_t __s0_310 = __p0_310; \
-  poly64x1_t __s2_310 = __p2_310; \
-  poly64x2_t __ret_310; \
-  __ret_310 = vsetq_lane_p64(vget_lane_p64(__s2_310, __p3_310), __s0_310, __p1_310); \
-  __ret_310; \
+#define vcopyq_lane_p64(__p0_833, __p1_833, __p2_833, __p3_833) __extension__ ({ \
+  poly64x2_t __s0_833 = __p0_833; \
+  poly64x1_t __s2_833 = __p2_833; \
+  poly64x2_t __ret_833; \
+  __ret_833 = vsetq_lane_p64(vget_lane_p64(__s2_833, __p3_833), __s0_833, __p1_833); \
+  __ret_833; \
 })
 #else
-#define vcopyq_lane_p64(__p0_311, __p1_311, __p2_311, __p3_311) __extension__ ({ \
-  poly64x2_t __s0_311 = __p0_311; \
-  poly64x1_t __s2_311 = __p2_311; \
-  poly64x2_t __rev0_311;  __rev0_311 = __builtin_shufflevector(__s0_311, __s0_311, 1, 0); \
-  poly64x2_t __ret_311; \
-  __ret_311 = __noswap_vsetq_lane_p64(vget_lane_p64(__s2_311, __p3_311), __rev0_311, __p1_311); \
-  __ret_311 = __builtin_shufflevector(__ret_311, __ret_311, 1, 0); \
-  __ret_311; \
+#define vcopyq_lane_p64(__p0_834, __p1_834, __p2_834, __p3_834) __extension__ ({ \
+  poly64x2_t __s0_834 = __p0_834; \
+  poly64x1_t __s2_834 = __p2_834; \
+  poly64x2_t __rev0_834;  __rev0_834 = __builtin_shufflevector(__s0_834, __s0_834, 1, 0); \
+  poly64x2_t __ret_834; \
+  __ret_834 = __noswap_vsetq_lane_p64(vget_lane_p64(__s2_834, __p3_834), __rev0_834, __p1_834); \
+  __ret_834 = __builtin_shufflevector(__ret_834, __ret_834, 1, 0); \
+  __ret_834; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_f64(__p0_312, __p1_312, __p2_312, __p3_312) __extension__ ({ \
-  float64x2_t __s0_312 = __p0_312; \
-  float64x1_t __s2_312 = __p2_312; \
-  float64x2_t __ret_312; \
-  __ret_312 = vsetq_lane_f64(vget_lane_f64(__s2_312, __p3_312), __s0_312, __p1_312); \
-  __ret_312; \
+#define vcopyq_lane_f64(__p0_835, __p1_835, __p2_835, __p3_835) __extension__ ({ \
+  float64x2_t __s0_835 = __p0_835; \
+  float64x1_t __s2_835 = __p2_835; \
+  float64x2_t __ret_835; \
+  __ret_835 = vsetq_lane_f64(vget_lane_f64(__s2_835, __p3_835), __s0_835, __p1_835); \
+  __ret_835; \
 })
 #else
-#define vcopyq_lane_f64(__p0_313, __p1_313, __p2_313, __p3_313) __extension__ ({ \
-  float64x2_t __s0_313 = __p0_313; \
-  float64x1_t __s2_313 = __p2_313; \
-  float64x2_t __rev0_313;  __rev0_313 = __builtin_shufflevector(__s0_313, __s0_313, 1, 0); \
-  float64x2_t __ret_313; \
-  __ret_313 = __noswap_vsetq_lane_f64(vget_lane_f64(__s2_313, __p3_313), __rev0_313, __p1_313); \
-  __ret_313 = __builtin_shufflevector(__ret_313, __ret_313, 1, 0); \
-  __ret_313; \
+#define vcopyq_lane_f64(__p0_836, __p1_836, __p2_836, __p3_836) __extension__ ({ \
+  float64x2_t __s0_836 = __p0_836; \
+  float64x1_t __s2_836 = __p2_836; \
+  float64x2_t __rev0_836;  __rev0_836 = __builtin_shufflevector(__s0_836, __s0_836, 1, 0); \
+  float64x2_t __ret_836; \
+  __ret_836 = __noswap_vsetq_lane_f64(vget_lane_f64(__s2_836, __p3_836), __rev0_836, __p1_836); \
+  __ret_836 = __builtin_shufflevector(__ret_836, __ret_836, 1, 0); \
+  __ret_836; \
 })
 #endif
 
-#define vcopy_lane_p64(__p0_314, __p1_314, __p2_314, __p3_314) __extension__ ({ \
-  poly64x1_t __s0_314 = __p0_314; \
-  poly64x1_t __s2_314 = __p2_314; \
-  poly64x1_t __ret_314; \
-  __ret_314 = vset_lane_p64(vget_lane_p64(__s2_314, __p3_314), __s0_314, __p1_314); \
-  __ret_314; \
+#define vcopy_lane_p64(__p0_837, __p1_837, __p2_837, __p3_837) __extension__ ({ \
+  poly64x1_t __s0_837 = __p0_837; \
+  poly64x1_t __s2_837 = __p2_837; \
+  poly64x1_t __ret_837; \
+  __ret_837 = vset_lane_p64(vget_lane_p64(__s2_837, __p3_837), __s0_837, __p1_837); \
+  __ret_837; \
 })
-#define vcopy_lane_f64(__p0_315, __p1_315, __p2_315, __p3_315) __extension__ ({ \
-  float64x1_t __s0_315 = __p0_315; \
-  float64x1_t __s2_315 = __p2_315; \
-  float64x1_t __ret_315; \
-  __ret_315 = vset_lane_f64(vget_lane_f64(__s2_315, __p3_315), __s0_315, __p1_315); \
-  __ret_315; \
+#define vcopy_lane_f64(__p0_838, __p1_838, __p2_838, __p3_838) __extension__ ({ \
+  float64x1_t __s0_838 = __p0_838; \
+  float64x1_t __s2_838 = __p2_838; \
+  float64x1_t __ret_838; \
+  __ret_838 = vset_lane_f64(vget_lane_f64(__s2_838, __p3_838), __s0_838, __p1_838); \
+  __ret_838; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p64(__p0_316, __p1_316, __p2_316, __p3_316) __extension__ ({ \
-  poly64x2_t __s0_316 = __p0_316; \
-  poly64x2_t __s2_316 = __p2_316; \
-  poly64x2_t __ret_316; \
-  __ret_316 = vsetq_lane_p64(vgetq_lane_p64(__s2_316, __p3_316), __s0_316, __p1_316); \
-  __ret_316; \
+#define vcopyq_laneq_p64(__p0_839, __p1_839, __p2_839, __p3_839) __extension__ ({ \
+  poly64x2_t __s0_839 = __p0_839; \
+  poly64x2_t __s2_839 = __p2_839; \
+  poly64x2_t __ret_839; \
+  __ret_839 = vsetq_lane_p64(vgetq_lane_p64(__s2_839, __p3_839), __s0_839, __p1_839); \
+  __ret_839; \
 })
 #else
-#define vcopyq_laneq_p64(__p0_317, __p1_317, __p2_317, __p3_317) __extension__ ({ \
-  poly64x2_t __s0_317 = __p0_317; \
-  poly64x2_t __s2_317 = __p2_317; \
-  poly64x2_t __rev0_317;  __rev0_317 = __builtin_shufflevector(__s0_317, __s0_317, 1, 0); \
-  poly64x2_t __rev2_317;  __rev2_317 = __builtin_shufflevector(__s2_317, __s2_317, 1, 0); \
-  poly64x2_t __ret_317; \
-  __ret_317 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_317, __p3_317), __rev0_317, __p1_317); \
-  __ret_317 = __builtin_shufflevector(__ret_317, __ret_317, 1, 0); \
-  __ret_317; \
+#define vcopyq_laneq_p64(__p0_840, __p1_840, __p2_840, __p3_840) __extension__ ({ \
+  poly64x2_t __s0_840 = __p0_840; \
+  poly64x2_t __s2_840 = __p2_840; \
+  poly64x2_t __rev0_840;  __rev0_840 = __builtin_shufflevector(__s0_840, __s0_840, 1, 0); \
+  poly64x2_t __rev2_840;  __rev2_840 = __builtin_shufflevector(__s2_840, __s2_840, 1, 0); \
+  poly64x2_t __ret_840; \
+  __ret_840 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_840, __p3_840), __rev0_840, __p1_840); \
+  __ret_840 = __builtin_shufflevector(__ret_840, __ret_840, 1, 0); \
+  __ret_840; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_f64(__p0_318, __p1_318, __p2_318, __p3_318) __extension__ ({ \
-  float64x2_t __s0_318 = __p0_318; \
-  float64x2_t __s2_318 = __p2_318; \
-  float64x2_t __ret_318; \
-  __ret_318 = vsetq_lane_f64(vgetq_lane_f64(__s2_318, __p3_318), __s0_318, __p1_318); \
-  __ret_318; \
+#define vcopyq_laneq_f64(__p0_841, __p1_841, __p2_841, __p3_841) __extension__ ({ \
+  float64x2_t __s0_841 = __p0_841; \
+  float64x2_t __s2_841 = __p2_841; \
+  float64x2_t __ret_841; \
+  __ret_841 = vsetq_lane_f64(vgetq_lane_f64(__s2_841, __p3_841), __s0_841, __p1_841); \
+  __ret_841; \
 })
 #else
-#define vcopyq_laneq_f64(__p0_319, __p1_319, __p2_319, __p3_319) __extension__ ({ \
-  float64x2_t __s0_319 = __p0_319; \
-  float64x2_t __s2_319 = __p2_319; \
-  float64x2_t __rev0_319;  __rev0_319 = __builtin_shufflevector(__s0_319, __s0_319, 1, 0); \
-  float64x2_t __rev2_319;  __rev2_319 = __builtin_shufflevector(__s2_319, __s2_319, 1, 0); \
-  float64x2_t __ret_319; \
-  __ret_319 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_319, __p3_319), __rev0_319, __p1_319); \
-  __ret_319 = __builtin_shufflevector(__ret_319, __ret_319, 1, 0); \
-  __ret_319; \
+#define vcopyq_laneq_f64(__p0_842, __p1_842, __p2_842, __p3_842) __extension__ ({ \
+  float64x2_t __s0_842 = __p0_842; \
+  float64x2_t __s2_842 = __p2_842; \
+  float64x2_t __rev0_842;  __rev0_842 = __builtin_shufflevector(__s0_842, __s0_842, 1, 0); \
+  float64x2_t __rev2_842;  __rev2_842 = __builtin_shufflevector(__s2_842, __s2_842, 1, 0); \
+  float64x2_t __ret_842; \
+  __ret_842 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_842, __p3_842), __rev0_842, __p1_842); \
+  __ret_842 = __builtin_shufflevector(__ret_842, __ret_842, 1, 0); \
+  __ret_842; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p64(__p0_320, __p1_320, __p2_320, __p3_320) __extension__ ({ \
-  poly64x1_t __s0_320 = __p0_320; \
-  poly64x2_t __s2_320 = __p2_320; \
-  poly64x1_t __ret_320; \
-  __ret_320 = vset_lane_p64(vgetq_lane_p64(__s2_320, __p3_320), __s0_320, __p1_320); \
-  __ret_320; \
+#define vcopy_laneq_p64(__p0_843, __p1_843, __p2_843, __p3_843) __extension__ ({ \
+  poly64x1_t __s0_843 = __p0_843; \
+  poly64x2_t __s2_843 = __p2_843; \
+  poly64x1_t __ret_843; \
+  __ret_843 = vset_lane_p64(vgetq_lane_p64(__s2_843, __p3_843), __s0_843, __p1_843); \
+  __ret_843; \
 })
 #else
-#define vcopy_laneq_p64(__p0_321, __p1_321, __p2_321, __p3_321) __extension__ ({ \
-  poly64x1_t __s0_321 = __p0_321; \
-  poly64x2_t __s2_321 = __p2_321; \
-  poly64x2_t __rev2_321;  __rev2_321 = __builtin_shufflevector(__s2_321, __s2_321, 1, 0); \
-  poly64x1_t __ret_321; \
-  __ret_321 = vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_321, __p3_321), __s0_321, __p1_321); \
-  __ret_321; \
+#define vcopy_laneq_p64(__p0_844, __p1_844, __p2_844, __p3_844) __extension__ ({ \
+  poly64x1_t __s0_844 = __p0_844; \
+  poly64x2_t __s2_844 = __p2_844; \
+  poly64x2_t __rev2_844;  __rev2_844 = __builtin_shufflevector(__s2_844, __s2_844, 1, 0); \
+  poly64x1_t __ret_844; \
+  __ret_844 = vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_844, __p3_844), __s0_844, __p1_844); \
+  __ret_844; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_f64(__p0_322, __p1_322, __p2_322, __p3_322) __extension__ ({ \
-  float64x1_t __s0_322 = __p0_322; \
-  float64x2_t __s2_322 = __p2_322; \
-  float64x1_t __ret_322; \
-  __ret_322 = vset_lane_f64(vgetq_lane_f64(__s2_322, __p3_322), __s0_322, __p1_322); \
-  __ret_322; \
+#define vcopy_laneq_f64(__p0_845, __p1_845, __p2_845, __p3_845) __extension__ ({ \
+  float64x1_t __s0_845 = __p0_845; \
+  float64x2_t __s2_845 = __p2_845; \
+  float64x1_t __ret_845; \
+  __ret_845 = vset_lane_f64(vgetq_lane_f64(__s2_845, __p3_845), __s0_845, __p1_845); \
+  __ret_845; \
 })
 #else
-#define vcopy_laneq_f64(__p0_323, __p1_323, __p2_323, __p3_323) __extension__ ({ \
-  float64x1_t __s0_323 = __p0_323; \
-  float64x2_t __s2_323 = __p2_323; \
-  float64x2_t __rev2_323;  __rev2_323 = __builtin_shufflevector(__s2_323, __s2_323, 1, 0); \
-  float64x1_t __ret_323; \
-  __ret_323 = vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_323, __p3_323), __s0_323, __p1_323); \
-  __ret_323; \
+#define vcopy_laneq_f64(__p0_846, __p1_846, __p2_846, __p3_846) __extension__ ({ \
+  float64x1_t __s0_846 = __p0_846; \
+  float64x2_t __s2_846 = __p2_846; \
+  float64x2_t __rev2_846;  __rev2_846 = __builtin_shufflevector(__s2_846, __s2_846, 1, 0); \
+  float64x1_t __ret_846; \
+  __ret_846 = vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_846, __p3_846), __s0_846, __p1_846); \
+  __ret_846; \
 })
 #endif
 
@@ -62932,38 +67020,38 @@
 }
 #endif
 
-#define vmulx_lane_f64(__p0_324, __p1_324, __p2_324) __extension__ ({ \
-  float64x1_t __s0_324 = __p0_324; \
-  float64x1_t __s1_324 = __p1_324; \
-  float64x1_t __ret_324; \
-  float64_t __x_324 = vget_lane_f64(__s0_324, 0); \
-  float64_t __y_324 = vget_lane_f64(__s1_324, __p2_324); \
-  float64_t __z_324 = vmulxd_f64(__x_324, __y_324); \
-  __ret_324 = vset_lane_f64(__z_324, __s0_324, __p2_324); \
-  __ret_324; \
+#define vmulx_lane_f64(__p0_847, __p1_847, __p2_847) __extension__ ({ \
+  float64x1_t __s0_847 = __p0_847; \
+  float64x1_t __s1_847 = __p1_847; \
+  float64x1_t __ret_847; \
+  float64_t __x_847 = vget_lane_f64(__s0_847, 0); \
+  float64_t __y_847 = vget_lane_f64(__s1_847, __p2_847); \
+  float64_t __z_847 = vmulxd_f64(__x_847, __y_847); \
+  __ret_847 = vset_lane_f64(__z_847, __s0_847, __p2_847); \
+  __ret_847; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f64(__p0_325, __p1_325, __p2_325) __extension__ ({ \
-  float64x1_t __s0_325 = __p0_325; \
-  float64x2_t __s1_325 = __p1_325; \
-  float64x1_t __ret_325; \
-  float64_t __x_325 = vget_lane_f64(__s0_325, 0); \
-  float64_t __y_325 = vgetq_lane_f64(__s1_325, __p2_325); \
-  float64_t __z_325 = vmulxd_f64(__x_325, __y_325); \
-  __ret_325 = vset_lane_f64(__z_325, __s0_325, 0); \
-  __ret_325; \
+#define vmulx_laneq_f64(__p0_848, __p1_848, __p2_848) __extension__ ({ \
+  float64x1_t __s0_848 = __p0_848; \
+  float64x2_t __s1_848 = __p1_848; \
+  float64x1_t __ret_848; \
+  float64_t __x_848 = vget_lane_f64(__s0_848, 0); \
+  float64_t __y_848 = vgetq_lane_f64(__s1_848, __p2_848); \
+  float64_t __z_848 = vmulxd_f64(__x_848, __y_848); \
+  __ret_848 = vset_lane_f64(__z_848, __s0_848, 0); \
+  __ret_848; \
 })
 #else
-#define vmulx_laneq_f64(__p0_326, __p1_326, __p2_326) __extension__ ({ \
-  float64x1_t __s0_326 = __p0_326; \
-  float64x2_t __s1_326 = __p1_326; \
-  float64x2_t __rev1_326;  __rev1_326 = __builtin_shufflevector(__s1_326, __s1_326, 1, 0); \
-  float64x1_t __ret_326; \
-  float64_t __x_326 = vget_lane_f64(__s0_326, 0); \
-  float64_t __y_326 = __noswap_vgetq_lane_f64(__rev1_326, __p2_326); \
-  float64_t __z_326 = vmulxd_f64(__x_326, __y_326); \
-  __ret_326 = vset_lane_f64(__z_326, __s0_326, 0); \
-  __ret_326; \
+#define vmulx_laneq_f64(__p0_849, __p1_849, __p2_849) __extension__ ({ \
+  float64x1_t __s0_849 = __p0_849; \
+  float64x2_t __s1_849 = __p1_849; \
+  float64x2_t __rev1_849;  __rev1_849 = __builtin_shufflevector(__s1_849, __s1_849, 1, 0); \
+  float64x1_t __ret_849; \
+  float64_t __x_849 = vget_lane_f64(__s0_849, 0); \
+  float64_t __y_849 = __noswap_vgetq_lane_f64(__rev1_849, __p2_849); \
+  float64_t __z_849 = vmulxd_f64(__x_849, __y_849); \
+  __ret_849 = vset_lane_f64(__z_849, __s0_849, 0); \
+  __ret_849; \
 })
 #endif
 
@@ -63219,4 +67307,6 @@
 
 #undef __ai
 
+#endif /* if !defined(__ARM_NEON) */
+#endif /* ifndef __ARM_FP */
 #endif /* __ARM_NEON_H */
diff --git a/darwin-x86/lib64/clang/11.0.5/include/arm_sve.h b/darwin-x86/lib64/clang/11.0.5/include/arm_sve.h
new file mode 100644
index 0000000..1035d41
--- /dev/null
+++ b/darwin-x86/lib64/clang/11.0.5/include/arm_sve.h
@@ -0,0 +1,18148 @@
+/*===---- arm_sve.h - ARM SVE intrinsics -----------------------------------===
+ *
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ARM_SVE_H
+#define __ARM_SVE_H
+
+#if !defined(__ARM_FEATURE_SVE)
+#error "SVE support not enabled"
+#else
+
+#if !defined(__LITTLE_ENDIAN__)
+#error "Big endian is currently not supported for arm_sve.h"
+#endif
+#include <stdint.h>
+
+#ifdef  __cplusplus
+extern "C" {
+#else
+#include <stdbool.h>
+#endif
+
+typedef __fp16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+typedef __SVInt8_t svint8_t;
+typedef __SVInt16_t svint16_t;
+typedef __SVInt32_t svint32_t;
+typedef __SVInt64_t svint64_t;
+typedef __SVUint8_t svuint8_t;
+typedef __SVUint16_t svuint16_t;
+typedef __SVUint32_t svuint32_t;
+typedef __SVUint64_t svuint64_t;
+typedef __SVFloat16_t svfloat16_t;
+
+#if defined(__ARM_FEATURE_SVE_BF16) && !defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC)
+#error "__ARM_FEATURE_BF16_SCALAR_ARITHMETIC must be defined when __ARM_FEATURE_SVE_BF16 is defined"
+#endif
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+typedef __SVBFloat16_t svbfloat16_t;
+#endif
+
+#if defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC)
+#include <arm_bf16.h>
+typedef __bf16 bfloat16_t;
+#endif
+
+typedef __SVFloat32_t svfloat32_t;
+typedef __SVFloat64_t svfloat64_t;
+typedef __clang_svint8x2_t svint8x2_t;
+typedef __clang_svint16x2_t svint16x2_t;
+typedef __clang_svint32x2_t svint32x2_t;
+typedef __clang_svint64x2_t svint64x2_t;
+typedef __clang_svuint8x2_t svuint8x2_t;
+typedef __clang_svuint16x2_t svuint16x2_t;
+typedef __clang_svuint32x2_t svuint32x2_t;
+typedef __clang_svuint64x2_t svuint64x2_t;
+typedef __clang_svfloat16x2_t svfloat16x2_t;
+typedef __clang_svfloat32x2_t svfloat32x2_t;
+typedef __clang_svfloat64x2_t svfloat64x2_t;
+typedef __clang_svint8x3_t svint8x3_t;
+typedef __clang_svint16x3_t svint16x3_t;
+typedef __clang_svint32x3_t svint32x3_t;
+typedef __clang_svint64x3_t svint64x3_t;
+typedef __clang_svuint8x3_t svuint8x3_t;
+typedef __clang_svuint16x3_t svuint16x3_t;
+typedef __clang_svuint32x3_t svuint32x3_t;
+typedef __clang_svuint64x3_t svuint64x3_t;
+typedef __clang_svfloat16x3_t svfloat16x3_t;
+typedef __clang_svfloat32x3_t svfloat32x3_t;
+typedef __clang_svfloat64x3_t svfloat64x3_t;
+typedef __clang_svint8x4_t svint8x4_t;
+typedef __clang_svint16x4_t svint16x4_t;
+typedef __clang_svint32x4_t svint32x4_t;
+typedef __clang_svint64x4_t svint64x4_t;
+typedef __clang_svuint8x4_t svuint8x4_t;
+typedef __clang_svuint16x4_t svuint16x4_t;
+typedef __clang_svuint32x4_t svuint32x4_t;
+typedef __clang_svuint64x4_t svuint64x4_t;
+typedef __clang_svfloat16x4_t svfloat16x4_t;
+typedef __clang_svfloat32x4_t svfloat32x4_t;
+typedef __clang_svfloat64x4_t svfloat64x4_t;
+typedef __SVBool_t  svbool_t;
+
+#ifdef __ARM_FEATURE_SVE_BF16
+typedef __clang_svbfloat16x2_t svbfloat16x2_t;
+typedef __clang_svbfloat16x3_t svbfloat16x3_t;
+typedef __clang_svbfloat16x4_t svbfloat16x4_t;
+#endif
+typedef enum
+{
+  SV_POW2 = 0,
+  SV_VL1 = 1,
+  SV_VL2 = 2,
+  SV_VL3 = 3,
+  SV_VL4 = 4,
+  SV_VL5 = 5,
+  SV_VL6 = 6,
+  SV_VL7 = 7,
+  SV_VL8 = 8,
+  SV_VL16 = 9,
+  SV_VL32 = 10,
+  SV_VL64 = 11,
+  SV_VL128 = 12,
+  SV_VL256 = 13,
+  SV_MUL4 = 29,
+  SV_MUL3 = 30,
+  SV_ALL = 31
+} sv_pattern;
+
+typedef enum
+{
+  SV_PLDL1KEEP = 0,
+  SV_PLDL1STRM = 1,
+  SV_PLDL2KEEP = 2,
+  SV_PLDL2STRM = 3,
+  SV_PLDL3KEEP = 4,
+  SV_PLDL3STRM = 5,
+  SV_PSTL1KEEP = 8,
+  SV_PSTL1STRM = 9,
+  SV_PSTL2KEEP = 10,
+  SV_PSTL2STRM = 11,
+  SV_PSTL3KEEP = 12,
+  SV_PSTL3STRM = 13
+} sv_prfop;
+
+/* Function attributes */
+#define __aio static inline __attribute__((__always_inline__, __nodebug__, __overloadable__))
+
+#define svreinterpret_s8_s8(...) __builtin_sve_reinterpret_s8_s8(__VA_ARGS__)
+#define svreinterpret_s8_s16(...) __builtin_sve_reinterpret_s8_s16(__VA_ARGS__)
+#define svreinterpret_s8_s32(...) __builtin_sve_reinterpret_s8_s32(__VA_ARGS__)
+#define svreinterpret_s8_s64(...) __builtin_sve_reinterpret_s8_s64(__VA_ARGS__)
+#define svreinterpret_s8_u8(...) __builtin_sve_reinterpret_s8_u8(__VA_ARGS__)
+#define svreinterpret_s8_u16(...) __builtin_sve_reinterpret_s8_u16(__VA_ARGS__)
+#define svreinterpret_s8_u32(...) __builtin_sve_reinterpret_s8_u32(__VA_ARGS__)
+#define svreinterpret_s8_u64(...) __builtin_sve_reinterpret_s8_u64(__VA_ARGS__)
+#define svreinterpret_s8_f16(...) __builtin_sve_reinterpret_s8_f16(__VA_ARGS__)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_s8_bf16(...) __builtin_sve_reinterpret_s8_bf16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#define svreinterpret_s8_f32(...) __builtin_sve_reinterpret_s8_f32(__VA_ARGS__)
+#define svreinterpret_s8_f64(...) __builtin_sve_reinterpret_s8_f64(__VA_ARGS__)
+#define svreinterpret_s16_s8(...) __builtin_sve_reinterpret_s16_s8(__VA_ARGS__)
+#define svreinterpret_s16_s16(...) __builtin_sve_reinterpret_s16_s16(__VA_ARGS__)
+#define svreinterpret_s16_s32(...) __builtin_sve_reinterpret_s16_s32(__VA_ARGS__)
+#define svreinterpret_s16_s64(...) __builtin_sve_reinterpret_s16_s64(__VA_ARGS__)
+#define svreinterpret_s16_u8(...) __builtin_sve_reinterpret_s16_u8(__VA_ARGS__)
+#define svreinterpret_s16_u16(...) __builtin_sve_reinterpret_s16_u16(__VA_ARGS__)
+#define svreinterpret_s16_u32(...) __builtin_sve_reinterpret_s16_u32(__VA_ARGS__)
+#define svreinterpret_s16_u64(...) __builtin_sve_reinterpret_s16_u64(__VA_ARGS__)
+#define svreinterpret_s16_f16(...) __builtin_sve_reinterpret_s16_f16(__VA_ARGS__)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_s16_bf16(...) __builtin_sve_reinterpret_s16_bf16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#define svreinterpret_s16_f32(...) __builtin_sve_reinterpret_s16_f32(__VA_ARGS__)
+#define svreinterpret_s16_f64(...) __builtin_sve_reinterpret_s16_f64(__VA_ARGS__)
+#define svreinterpret_s32_s8(...) __builtin_sve_reinterpret_s32_s8(__VA_ARGS__)
+#define svreinterpret_s32_s16(...) __builtin_sve_reinterpret_s32_s16(__VA_ARGS__)
+#define svreinterpret_s32_s32(...) __builtin_sve_reinterpret_s32_s32(__VA_ARGS__)
+#define svreinterpret_s32_s64(...) __builtin_sve_reinterpret_s32_s64(__VA_ARGS__)
+#define svreinterpret_s32_u8(...) __builtin_sve_reinterpret_s32_u8(__VA_ARGS__)
+#define svreinterpret_s32_u16(...) __builtin_sve_reinterpret_s32_u16(__VA_ARGS__)
+#define svreinterpret_s32_u32(...) __builtin_sve_reinterpret_s32_u32(__VA_ARGS__)
+#define svreinterpret_s32_u64(...) __builtin_sve_reinterpret_s32_u64(__VA_ARGS__)
+#define svreinterpret_s32_f16(...) __builtin_sve_reinterpret_s32_f16(__VA_ARGS__)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_s32_bf16(...) __builtin_sve_reinterpret_s32_bf16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#define svreinterpret_s32_f32(...) __builtin_sve_reinterpret_s32_f32(__VA_ARGS__)
+#define svreinterpret_s32_f64(...) __builtin_sve_reinterpret_s32_f64(__VA_ARGS__)
+#define svreinterpret_s64_s8(...) __builtin_sve_reinterpret_s64_s8(__VA_ARGS__)
+#define svreinterpret_s64_s16(...) __builtin_sve_reinterpret_s64_s16(__VA_ARGS__)
+#define svreinterpret_s64_s32(...) __builtin_sve_reinterpret_s64_s32(__VA_ARGS__)
+#define svreinterpret_s64_s64(...) __builtin_sve_reinterpret_s64_s64(__VA_ARGS__)
+#define svreinterpret_s64_u8(...) __builtin_sve_reinterpret_s64_u8(__VA_ARGS__)
+#define svreinterpret_s64_u16(...) __builtin_sve_reinterpret_s64_u16(__VA_ARGS__)
+#define svreinterpret_s64_u32(...) __builtin_sve_reinterpret_s64_u32(__VA_ARGS__)
+#define svreinterpret_s64_u64(...) __builtin_sve_reinterpret_s64_u64(__VA_ARGS__)
+#define svreinterpret_s64_f16(...) __builtin_sve_reinterpret_s64_f16(__VA_ARGS__)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_s64_bf16(...) __builtin_sve_reinterpret_s64_bf16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#define svreinterpret_s64_f32(...) __builtin_sve_reinterpret_s64_f32(__VA_ARGS__)
+#define svreinterpret_s64_f64(...) __builtin_sve_reinterpret_s64_f64(__VA_ARGS__)
+#define svreinterpret_u8_s8(...) __builtin_sve_reinterpret_u8_s8(__VA_ARGS__)
+#define svreinterpret_u8_s16(...) __builtin_sve_reinterpret_u8_s16(__VA_ARGS__)
+#define svreinterpret_u8_s32(...) __builtin_sve_reinterpret_u8_s32(__VA_ARGS__)
+#define svreinterpret_u8_s64(...) __builtin_sve_reinterpret_u8_s64(__VA_ARGS__)
+#define svreinterpret_u8_u8(...) __builtin_sve_reinterpret_u8_u8(__VA_ARGS__)
+#define svreinterpret_u8_u16(...) __builtin_sve_reinterpret_u8_u16(__VA_ARGS__)
+#define svreinterpret_u8_u32(...) __builtin_sve_reinterpret_u8_u32(__VA_ARGS__)
+#define svreinterpret_u8_u64(...) __builtin_sve_reinterpret_u8_u64(__VA_ARGS__)
+#define svreinterpret_u8_f16(...) __builtin_sve_reinterpret_u8_f16(__VA_ARGS__)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_u8_bf16(...) __builtin_sve_reinterpret_u8_bf16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#define svreinterpret_u8_f32(...) __builtin_sve_reinterpret_u8_f32(__VA_ARGS__)
+#define svreinterpret_u8_f64(...) __builtin_sve_reinterpret_u8_f64(__VA_ARGS__)
+#define svreinterpret_u16_s8(...) __builtin_sve_reinterpret_u16_s8(__VA_ARGS__)
+#define svreinterpret_u16_s16(...) __builtin_sve_reinterpret_u16_s16(__VA_ARGS__)
+#define svreinterpret_u16_s32(...) __builtin_sve_reinterpret_u16_s32(__VA_ARGS__)
+#define svreinterpret_u16_s64(...) __builtin_sve_reinterpret_u16_s64(__VA_ARGS__)
+#define svreinterpret_u16_u8(...) __builtin_sve_reinterpret_u16_u8(__VA_ARGS__)
+#define svreinterpret_u16_u16(...) __builtin_sve_reinterpret_u16_u16(__VA_ARGS__)
+#define svreinterpret_u16_u32(...) __builtin_sve_reinterpret_u16_u32(__VA_ARGS__)
+#define svreinterpret_u16_u64(...) __builtin_sve_reinterpret_u16_u64(__VA_ARGS__)
+#define svreinterpret_u16_f16(...) __builtin_sve_reinterpret_u16_f16(__VA_ARGS__)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_u16_bf16(...) __builtin_sve_reinterpret_u16_bf16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#define svreinterpret_u16_f32(...) __builtin_sve_reinterpret_u16_f32(__VA_ARGS__)
+#define svreinterpret_u16_f64(...) __builtin_sve_reinterpret_u16_f64(__VA_ARGS__)
+#define svreinterpret_u32_s8(...) __builtin_sve_reinterpret_u32_s8(__VA_ARGS__)
+#define svreinterpret_u32_s16(...) __builtin_sve_reinterpret_u32_s16(__VA_ARGS__)
+#define svreinterpret_u32_s32(...) __builtin_sve_reinterpret_u32_s32(__VA_ARGS__)
+#define svreinterpret_u32_s64(...) __builtin_sve_reinterpret_u32_s64(__VA_ARGS__)
+#define svreinterpret_u32_u8(...) __builtin_sve_reinterpret_u32_u8(__VA_ARGS__)
+#define svreinterpret_u32_u16(...) __builtin_sve_reinterpret_u32_u16(__VA_ARGS__)
+#define svreinterpret_u32_u32(...) __builtin_sve_reinterpret_u32_u32(__VA_ARGS__)
+#define svreinterpret_u32_u64(...) __builtin_sve_reinterpret_u32_u64(__VA_ARGS__)
+#define svreinterpret_u32_f16(...) __builtin_sve_reinterpret_u32_f16(__VA_ARGS__)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_u32_bf16(...) __builtin_sve_reinterpret_u32_bf16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#define svreinterpret_u32_f32(...) __builtin_sve_reinterpret_u32_f32(__VA_ARGS__)
+#define svreinterpret_u32_f64(...) __builtin_sve_reinterpret_u32_f64(__VA_ARGS__)
+#define svreinterpret_u64_s8(...) __builtin_sve_reinterpret_u64_s8(__VA_ARGS__)
+#define svreinterpret_u64_s16(...) __builtin_sve_reinterpret_u64_s16(__VA_ARGS__)
+#define svreinterpret_u64_s32(...) __builtin_sve_reinterpret_u64_s32(__VA_ARGS__)
+#define svreinterpret_u64_s64(...) __builtin_sve_reinterpret_u64_s64(__VA_ARGS__)
+#define svreinterpret_u64_u8(...) __builtin_sve_reinterpret_u64_u8(__VA_ARGS__)
+#define svreinterpret_u64_u16(...) __builtin_sve_reinterpret_u64_u16(__VA_ARGS__)
+#define svreinterpret_u64_u32(...) __builtin_sve_reinterpret_u64_u32(__VA_ARGS__)
+#define svreinterpret_u64_u64(...) __builtin_sve_reinterpret_u64_u64(__VA_ARGS__)
+#define svreinterpret_u64_f16(...) __builtin_sve_reinterpret_u64_f16(__VA_ARGS__)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_u64_bf16(...) __builtin_sve_reinterpret_u64_bf16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#define svreinterpret_u64_f32(...) __builtin_sve_reinterpret_u64_f32(__VA_ARGS__)
+#define svreinterpret_u64_f64(...) __builtin_sve_reinterpret_u64_f64(__VA_ARGS__)
+#define svreinterpret_f16_s8(...) __builtin_sve_reinterpret_f16_s8(__VA_ARGS__)
+#define svreinterpret_f16_s16(...) __builtin_sve_reinterpret_f16_s16(__VA_ARGS__)
+#define svreinterpret_f16_s32(...) __builtin_sve_reinterpret_f16_s32(__VA_ARGS__)
+#define svreinterpret_f16_s64(...) __builtin_sve_reinterpret_f16_s64(__VA_ARGS__)
+#define svreinterpret_f16_u8(...) __builtin_sve_reinterpret_f16_u8(__VA_ARGS__)
+#define svreinterpret_f16_u16(...) __builtin_sve_reinterpret_f16_u16(__VA_ARGS__)
+#define svreinterpret_f16_u32(...) __builtin_sve_reinterpret_f16_u32(__VA_ARGS__)
+#define svreinterpret_f16_u64(...) __builtin_sve_reinterpret_f16_u64(__VA_ARGS__)
+#define svreinterpret_f16_f16(...) __builtin_sve_reinterpret_f16_f16(__VA_ARGS__)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_f16_bf16(...) __builtin_sve_reinterpret_f16_bf16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#define svreinterpret_f16_f32(...) __builtin_sve_reinterpret_f16_f32(__VA_ARGS__)
+#define svreinterpret_f16_f64(...) __builtin_sve_reinterpret_f16_f64(__VA_ARGS__)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_bf16_s8(...) __builtin_sve_reinterpret_bf16_s8(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_bf16_s16(...) __builtin_sve_reinterpret_bf16_s16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_bf16_s32(...) __builtin_sve_reinterpret_bf16_s32(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_bf16_s64(...) __builtin_sve_reinterpret_bf16_s64(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_bf16_u8(...) __builtin_sve_reinterpret_bf16_u8(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_bf16_u16(...) __builtin_sve_reinterpret_bf16_u16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_bf16_u32(...) __builtin_sve_reinterpret_bf16_u32(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_bf16_u64(...) __builtin_sve_reinterpret_bf16_u64(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_bf16_f16(...) __builtin_sve_reinterpret_bf16_f16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_bf16_bf16(...) __builtin_sve_reinterpret_bf16_bf16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_bf16_f32(...) __builtin_sve_reinterpret_bf16_f32(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_bf16_f64(...) __builtin_sve_reinterpret_bf16_f64(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#define svreinterpret_f32_s8(...) __builtin_sve_reinterpret_f32_s8(__VA_ARGS__)
+#define svreinterpret_f32_s16(...) __builtin_sve_reinterpret_f32_s16(__VA_ARGS__)
+#define svreinterpret_f32_s32(...) __builtin_sve_reinterpret_f32_s32(__VA_ARGS__)
+#define svreinterpret_f32_s64(...) __builtin_sve_reinterpret_f32_s64(__VA_ARGS__)
+#define svreinterpret_f32_u8(...) __builtin_sve_reinterpret_f32_u8(__VA_ARGS__)
+#define svreinterpret_f32_u16(...) __builtin_sve_reinterpret_f32_u16(__VA_ARGS__)
+#define svreinterpret_f32_u32(...) __builtin_sve_reinterpret_f32_u32(__VA_ARGS__)
+#define svreinterpret_f32_u64(...) __builtin_sve_reinterpret_f32_u64(__VA_ARGS__)
+#define svreinterpret_f32_f16(...) __builtin_sve_reinterpret_f32_f16(__VA_ARGS__)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_f32_bf16(...) __builtin_sve_reinterpret_f32_bf16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#define svreinterpret_f32_f32(...) __builtin_sve_reinterpret_f32_f32(__VA_ARGS__)
+#define svreinterpret_f32_f64(...) __builtin_sve_reinterpret_f32_f64(__VA_ARGS__)
+#define svreinterpret_f64_s8(...) __builtin_sve_reinterpret_f64_s8(__VA_ARGS__)
+#define svreinterpret_f64_s16(...) __builtin_sve_reinterpret_f64_s16(__VA_ARGS__)
+#define svreinterpret_f64_s32(...) __builtin_sve_reinterpret_f64_s32(__VA_ARGS__)
+#define svreinterpret_f64_s64(...) __builtin_sve_reinterpret_f64_s64(__VA_ARGS__)
+#define svreinterpret_f64_u8(...) __builtin_sve_reinterpret_f64_u8(__VA_ARGS__)
+#define svreinterpret_f64_u16(...) __builtin_sve_reinterpret_f64_u16(__VA_ARGS__)
+#define svreinterpret_f64_u32(...) __builtin_sve_reinterpret_f64_u32(__VA_ARGS__)
+#define svreinterpret_f64_u64(...) __builtin_sve_reinterpret_f64_u64(__VA_ARGS__)
+#define svreinterpret_f64_f16(...) __builtin_sve_reinterpret_f64_f16(__VA_ARGS__)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_f64_bf16(...) __builtin_sve_reinterpret_f64_bf16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#define svreinterpret_f64_f32(...) __builtin_sve_reinterpret_f64_f32(__VA_ARGS__)
+#define svreinterpret_f64_f64(...) __builtin_sve_reinterpret_f64_f64(__VA_ARGS__)
+__aio svint8_t svreinterpret_s8(svint8_t op) {
+  return __builtin_sve_reinterpret_s8_s8(op);
+}
+
+__aio svint8_t svreinterpret_s8(svint16_t op) {
+  return __builtin_sve_reinterpret_s8_s16(op);
+}
+
+__aio svint8_t svreinterpret_s8(svint32_t op) {
+  return __builtin_sve_reinterpret_s8_s32(op);
+}
+
+__aio svint8_t svreinterpret_s8(svint64_t op) {
+  return __builtin_sve_reinterpret_s8_s64(op);
+}
+
+__aio svint8_t svreinterpret_s8(svuint8_t op) {
+  return __builtin_sve_reinterpret_s8_u8(op);
+}
+
+__aio svint8_t svreinterpret_s8(svuint16_t op) {
+  return __builtin_sve_reinterpret_s8_u16(op);
+}
+
+__aio svint8_t svreinterpret_s8(svuint32_t op) {
+  return __builtin_sve_reinterpret_s8_u32(op);
+}
+
+__aio svint8_t svreinterpret_s8(svuint64_t op) {
+  return __builtin_sve_reinterpret_s8_u64(op);
+}
+
+__aio svint8_t svreinterpret_s8(svfloat16_t op) {
+  return __builtin_sve_reinterpret_s8_f16(op);
+}
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svint8_t svreinterpret_s8(svbfloat16_t op) {
+  return __builtin_sve_reinterpret_s8_bf16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+__aio svint8_t svreinterpret_s8(svfloat32_t op) {
+  return __builtin_sve_reinterpret_s8_f32(op);
+}
+
+__aio svint8_t svreinterpret_s8(svfloat64_t op) {
+  return __builtin_sve_reinterpret_s8_f64(op);
+}
+
+__aio svint16_t svreinterpret_s16(svint8_t op) {
+  return __builtin_sve_reinterpret_s16_s8(op);
+}
+
+__aio svint16_t svreinterpret_s16(svint16_t op) {
+  return __builtin_sve_reinterpret_s16_s16(op);
+}
+
+__aio svint16_t svreinterpret_s16(svint32_t op) {
+  return __builtin_sve_reinterpret_s16_s32(op);
+}
+
+__aio svint16_t svreinterpret_s16(svint64_t op) {
+  return __builtin_sve_reinterpret_s16_s64(op);
+}
+
+__aio svint16_t svreinterpret_s16(svuint8_t op) {
+  return __builtin_sve_reinterpret_s16_u8(op);
+}
+
+__aio svint16_t svreinterpret_s16(svuint16_t op) {
+  return __builtin_sve_reinterpret_s16_u16(op);
+}
+
+__aio svint16_t svreinterpret_s16(svuint32_t op) {
+  return __builtin_sve_reinterpret_s16_u32(op);
+}
+
+__aio svint16_t svreinterpret_s16(svuint64_t op) {
+  return __builtin_sve_reinterpret_s16_u64(op);
+}
+
+__aio svint16_t svreinterpret_s16(svfloat16_t op) {
+  return __builtin_sve_reinterpret_s16_f16(op);
+}
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svint16_t svreinterpret_s16(svbfloat16_t op) {
+  return __builtin_sve_reinterpret_s16_bf16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+__aio svint16_t svreinterpret_s16(svfloat32_t op) {
+  return __builtin_sve_reinterpret_s16_f32(op);
+}
+
+__aio svint16_t svreinterpret_s16(svfloat64_t op) {
+  return __builtin_sve_reinterpret_s16_f64(op);
+}
+
+__aio svint32_t svreinterpret_s32(svint8_t op) {
+  return __builtin_sve_reinterpret_s32_s8(op);
+}
+
+__aio svint32_t svreinterpret_s32(svint16_t op) {
+  return __builtin_sve_reinterpret_s32_s16(op);
+}
+
+__aio svint32_t svreinterpret_s32(svint32_t op) {
+  return __builtin_sve_reinterpret_s32_s32(op);
+}
+
+__aio svint32_t svreinterpret_s32(svint64_t op) {
+  return __builtin_sve_reinterpret_s32_s64(op);
+}
+
+__aio svint32_t svreinterpret_s32(svuint8_t op) {
+  return __builtin_sve_reinterpret_s32_u8(op);
+}
+
+__aio svint32_t svreinterpret_s32(svuint16_t op) {
+  return __builtin_sve_reinterpret_s32_u16(op);
+}
+
+__aio svint32_t svreinterpret_s32(svuint32_t op) {
+  return __builtin_sve_reinterpret_s32_u32(op);
+}
+
+__aio svint32_t svreinterpret_s32(svuint64_t op) {
+  return __builtin_sve_reinterpret_s32_u64(op);
+}
+
+__aio svint32_t svreinterpret_s32(svfloat16_t op) {
+  return __builtin_sve_reinterpret_s32_f16(op);
+}
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svint32_t svreinterpret_s32(svbfloat16_t op) {
+  return __builtin_sve_reinterpret_s32_bf16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+__aio svint32_t svreinterpret_s32(svfloat32_t op) {
+  return __builtin_sve_reinterpret_s32_f32(op);
+}
+
+__aio svint32_t svreinterpret_s32(svfloat64_t op) {
+  return __builtin_sve_reinterpret_s32_f64(op);
+}
+
+__aio svint64_t svreinterpret_s64(svint8_t op) {
+  return __builtin_sve_reinterpret_s64_s8(op);
+}
+
+__aio svint64_t svreinterpret_s64(svint16_t op) {
+  return __builtin_sve_reinterpret_s64_s16(op);
+}
+
+__aio svint64_t svreinterpret_s64(svint32_t op) {
+  return __builtin_sve_reinterpret_s64_s32(op);
+}
+
+__aio svint64_t svreinterpret_s64(svint64_t op) {
+  return __builtin_sve_reinterpret_s64_s64(op);
+}
+
+__aio svint64_t svreinterpret_s64(svuint8_t op) {
+  return __builtin_sve_reinterpret_s64_u8(op);
+}
+
+__aio svint64_t svreinterpret_s64(svuint16_t op) {
+  return __builtin_sve_reinterpret_s64_u16(op);
+}
+
+__aio svint64_t svreinterpret_s64(svuint32_t op) {
+  return __builtin_sve_reinterpret_s64_u32(op);
+}
+
+__aio svint64_t svreinterpret_s64(svuint64_t op) {
+  return __builtin_sve_reinterpret_s64_u64(op);
+}
+
+__aio svint64_t svreinterpret_s64(svfloat16_t op) {
+  return __builtin_sve_reinterpret_s64_f16(op);
+}
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svint64_t svreinterpret_s64(svbfloat16_t op) {
+  return __builtin_sve_reinterpret_s64_bf16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+__aio svint64_t svreinterpret_s64(svfloat32_t op) {
+  return __builtin_sve_reinterpret_s64_f32(op);
+}
+
+__aio svint64_t svreinterpret_s64(svfloat64_t op) {
+  return __builtin_sve_reinterpret_s64_f64(op);
+}
+
+__aio svuint8_t svreinterpret_u8(svint8_t op) {
+  return __builtin_sve_reinterpret_u8_s8(op);
+}
+
+__aio svuint8_t svreinterpret_u8(svint16_t op) {
+  return __builtin_sve_reinterpret_u8_s16(op);
+}
+
+__aio svuint8_t svreinterpret_u8(svint32_t op) {
+  return __builtin_sve_reinterpret_u8_s32(op);
+}
+
+__aio svuint8_t svreinterpret_u8(svint64_t op) {
+  return __builtin_sve_reinterpret_u8_s64(op);
+}
+
+__aio svuint8_t svreinterpret_u8(svuint8_t op) {
+  return __builtin_sve_reinterpret_u8_u8(op);
+}
+
+__aio svuint8_t svreinterpret_u8(svuint16_t op) {
+  return __builtin_sve_reinterpret_u8_u16(op);
+}
+
+__aio svuint8_t svreinterpret_u8(svuint32_t op) {
+  return __builtin_sve_reinterpret_u8_u32(op);
+}
+
+__aio svuint8_t svreinterpret_u8(svuint64_t op) {
+  return __builtin_sve_reinterpret_u8_u64(op);
+}
+
+__aio svuint8_t svreinterpret_u8(svfloat16_t op) {
+  return __builtin_sve_reinterpret_u8_f16(op);
+}
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svuint8_t svreinterpret_u8(svbfloat16_t op) {
+  return __builtin_sve_reinterpret_u8_bf16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+__aio svuint8_t svreinterpret_u8(svfloat32_t op) {
+  return __builtin_sve_reinterpret_u8_f32(op);
+}
+
+__aio svuint8_t svreinterpret_u8(svfloat64_t op) {
+  return __builtin_sve_reinterpret_u8_f64(op);
+}
+
+__aio svuint16_t svreinterpret_u16(svint8_t op) {
+  return __builtin_sve_reinterpret_u16_s8(op);
+}
+
+__aio svuint16_t svreinterpret_u16(svint16_t op) {
+  return __builtin_sve_reinterpret_u16_s16(op);
+}
+
+__aio svuint16_t svreinterpret_u16(svint32_t op) {
+  return __builtin_sve_reinterpret_u16_s32(op);
+}
+
+__aio svuint16_t svreinterpret_u16(svint64_t op) {
+  return __builtin_sve_reinterpret_u16_s64(op);
+}
+
+__aio svuint16_t svreinterpret_u16(svuint8_t op) {
+  return __builtin_sve_reinterpret_u16_u8(op);
+}
+
+__aio svuint16_t svreinterpret_u16(svuint16_t op) {
+  return __builtin_sve_reinterpret_u16_u16(op);
+}
+
+__aio svuint16_t svreinterpret_u16(svuint32_t op) {
+  return __builtin_sve_reinterpret_u16_u32(op);
+}
+
+__aio svuint16_t svreinterpret_u16(svuint64_t op) {
+  return __builtin_sve_reinterpret_u16_u64(op);
+}
+
+__aio svuint16_t svreinterpret_u16(svfloat16_t op) {
+  return __builtin_sve_reinterpret_u16_f16(op);
+}
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svuint16_t svreinterpret_u16(svbfloat16_t op) {
+  return __builtin_sve_reinterpret_u16_bf16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+__aio svuint16_t svreinterpret_u16(svfloat32_t op) {
+  return __builtin_sve_reinterpret_u16_f32(op);
+}
+
+__aio svuint16_t svreinterpret_u16(svfloat64_t op) {
+  return __builtin_sve_reinterpret_u16_f64(op);
+}
+
+__aio svuint32_t svreinterpret_u32(svint8_t op) {
+  return __builtin_sve_reinterpret_u32_s8(op);
+}
+
+__aio svuint32_t svreinterpret_u32(svint16_t op) {
+  return __builtin_sve_reinterpret_u32_s16(op);
+}
+
+__aio svuint32_t svreinterpret_u32(svint32_t op) {
+  return __builtin_sve_reinterpret_u32_s32(op);
+}
+
+__aio svuint32_t svreinterpret_u32(svint64_t op) {
+  return __builtin_sve_reinterpret_u32_s64(op);
+}
+
+__aio svuint32_t svreinterpret_u32(svuint8_t op) {
+  return __builtin_sve_reinterpret_u32_u8(op);
+}
+
+__aio svuint32_t svreinterpret_u32(svuint16_t op) {
+  return __builtin_sve_reinterpret_u32_u16(op);
+}
+
+__aio svuint32_t svreinterpret_u32(svuint32_t op) {
+  return __builtin_sve_reinterpret_u32_u32(op);
+}
+
+__aio svuint32_t svreinterpret_u32(svuint64_t op) {
+  return __builtin_sve_reinterpret_u32_u64(op);
+}
+
+__aio svuint32_t svreinterpret_u32(svfloat16_t op) {
+  return __builtin_sve_reinterpret_u32_f16(op);
+}
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svuint32_t svreinterpret_u32(svbfloat16_t op) {
+  return __builtin_sve_reinterpret_u32_bf16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+__aio svuint32_t svreinterpret_u32(svfloat32_t op) {
+  return __builtin_sve_reinterpret_u32_f32(op);
+}
+
+__aio svuint32_t svreinterpret_u32(svfloat64_t op) {
+  return __builtin_sve_reinterpret_u32_f64(op);
+}
+
+__aio svuint64_t svreinterpret_u64(svint8_t op) {
+  return __builtin_sve_reinterpret_u64_s8(op);
+}
+
+__aio svuint64_t svreinterpret_u64(svint16_t op) {
+  return __builtin_sve_reinterpret_u64_s16(op);
+}
+
+__aio svuint64_t svreinterpret_u64(svint32_t op) {
+  return __builtin_sve_reinterpret_u64_s32(op);
+}
+
+__aio svuint64_t svreinterpret_u64(svint64_t op) {
+  return __builtin_sve_reinterpret_u64_s64(op);
+}
+
+__aio svuint64_t svreinterpret_u64(svuint8_t op) {
+  return __builtin_sve_reinterpret_u64_u8(op);
+}
+
+__aio svuint64_t svreinterpret_u64(svuint16_t op) {
+  return __builtin_sve_reinterpret_u64_u16(op);
+}
+
+__aio svuint64_t svreinterpret_u64(svuint32_t op) {
+  return __builtin_sve_reinterpret_u64_u32(op);
+}
+
+__aio svuint64_t svreinterpret_u64(svuint64_t op) {
+  return __builtin_sve_reinterpret_u64_u64(op);
+}
+
+__aio svuint64_t svreinterpret_u64(svfloat16_t op) {
+  return __builtin_sve_reinterpret_u64_f16(op);
+}
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svuint64_t svreinterpret_u64(svbfloat16_t op) {
+  return __builtin_sve_reinterpret_u64_bf16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+__aio svuint64_t svreinterpret_u64(svfloat32_t op) {
+  return __builtin_sve_reinterpret_u64_f32(op);
+}
+
+__aio svuint64_t svreinterpret_u64(svfloat64_t op) {
+  return __builtin_sve_reinterpret_u64_f64(op);
+}
+
+__aio svfloat16_t svreinterpret_f16(svint8_t op) {
+  return __builtin_sve_reinterpret_f16_s8(op);
+}
+
+__aio svfloat16_t svreinterpret_f16(svint16_t op) {
+  return __builtin_sve_reinterpret_f16_s16(op);
+}
+
+__aio svfloat16_t svreinterpret_f16(svint32_t op) {
+  return __builtin_sve_reinterpret_f16_s32(op);
+}
+
+__aio svfloat16_t svreinterpret_f16(svint64_t op) {
+  return __builtin_sve_reinterpret_f16_s64(op);
+}
+
+__aio svfloat16_t svreinterpret_f16(svuint8_t op) {
+  return __builtin_sve_reinterpret_f16_u8(op);
+}
+
+__aio svfloat16_t svreinterpret_f16(svuint16_t op) {
+  return __builtin_sve_reinterpret_f16_u16(op);
+}
+
+__aio svfloat16_t svreinterpret_f16(svuint32_t op) {
+  return __builtin_sve_reinterpret_f16_u32(op);
+}
+
+__aio svfloat16_t svreinterpret_f16(svuint64_t op) {
+  return __builtin_sve_reinterpret_f16_u64(op);
+}
+
+__aio svfloat16_t svreinterpret_f16(svfloat16_t op) {
+  return __builtin_sve_reinterpret_f16_f16(op);
+}
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svfloat16_t svreinterpret_f16(svbfloat16_t op) {
+  return __builtin_sve_reinterpret_f16_bf16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+__aio svfloat16_t svreinterpret_f16(svfloat32_t op) {
+  return __builtin_sve_reinterpret_f16_f32(op);
+}
+
+__aio svfloat16_t svreinterpret_f16(svfloat64_t op) {
+  return __builtin_sve_reinterpret_f16_f64(op);
+}
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svbfloat16_t svreinterpret_bf16(svint8_t op) {
+  return __builtin_sve_reinterpret_bf16_s8(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svbfloat16_t svreinterpret_bf16(svint16_t op) {
+  return __builtin_sve_reinterpret_bf16_s16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svbfloat16_t svreinterpret_bf16(svint32_t op) {
+  return __builtin_sve_reinterpret_bf16_s32(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svbfloat16_t svreinterpret_bf16(svint64_t op) {
+  return __builtin_sve_reinterpret_bf16_s64(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svbfloat16_t svreinterpret_bf16(svuint8_t op) {
+  return __builtin_sve_reinterpret_bf16_u8(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svbfloat16_t svreinterpret_bf16(svuint16_t op) {
+  return __builtin_sve_reinterpret_bf16_u16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svbfloat16_t svreinterpret_bf16(svuint32_t op) {
+  return __builtin_sve_reinterpret_bf16_u32(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svbfloat16_t svreinterpret_bf16(svuint64_t op) {
+  return __builtin_sve_reinterpret_bf16_u64(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svbfloat16_t svreinterpret_bf16(svfloat16_t op) {
+  return __builtin_sve_reinterpret_bf16_f16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svbfloat16_t svreinterpret_bf16(svbfloat16_t op) {
+  return __builtin_sve_reinterpret_bf16_bf16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svbfloat16_t svreinterpret_bf16(svfloat32_t op) {
+  return __builtin_sve_reinterpret_bf16_f32(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svbfloat16_t svreinterpret_bf16(svfloat64_t op) {
+  return __builtin_sve_reinterpret_bf16_f64(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+__aio svfloat32_t svreinterpret_f32(svint8_t op) {
+  return __builtin_sve_reinterpret_f32_s8(op);
+}
+
+__aio svfloat32_t svreinterpret_f32(svint16_t op) {
+  return __builtin_sve_reinterpret_f32_s16(op);
+}
+
+__aio svfloat32_t svreinterpret_f32(svint32_t op) {
+  return __builtin_sve_reinterpret_f32_s32(op);
+}
+
+__aio svfloat32_t svreinterpret_f32(svint64_t op) {
+  return __builtin_sve_reinterpret_f32_s64(op);
+}
+
+__aio svfloat32_t svreinterpret_f32(svuint8_t op) {
+  return __builtin_sve_reinterpret_f32_u8(op);
+}
+
+__aio svfloat32_t svreinterpret_f32(svuint16_t op) {
+  return __builtin_sve_reinterpret_f32_u16(op);
+}
+
+__aio svfloat32_t svreinterpret_f32(svuint32_t op) {
+  return __builtin_sve_reinterpret_f32_u32(op);
+}
+
+__aio svfloat32_t svreinterpret_f32(svuint64_t op) {
+  return __builtin_sve_reinterpret_f32_u64(op);
+}
+
+__aio svfloat32_t svreinterpret_f32(svfloat16_t op) {
+  return __builtin_sve_reinterpret_f32_f16(op);
+}
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svfloat32_t svreinterpret_f32(svbfloat16_t op) {
+  return __builtin_sve_reinterpret_f32_bf16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+__aio svfloat32_t svreinterpret_f32(svfloat32_t op) {
+  return __builtin_sve_reinterpret_f32_f32(op);
+}
+
+__aio svfloat32_t svreinterpret_f32(svfloat64_t op) {
+  return __builtin_sve_reinterpret_f32_f64(op);
+}
+
+__aio svfloat64_t svreinterpret_f64(svint8_t op) {
+  return __builtin_sve_reinterpret_f64_s8(op);
+}
+
+__aio svfloat64_t svreinterpret_f64(svint16_t op) {
+  return __builtin_sve_reinterpret_f64_s16(op);
+}
+
+__aio svfloat64_t svreinterpret_f64(svint32_t op) {
+  return __builtin_sve_reinterpret_f64_s32(op);
+}
+
+__aio svfloat64_t svreinterpret_f64(svint64_t op) {
+  return __builtin_sve_reinterpret_f64_s64(op);
+}
+
+__aio svfloat64_t svreinterpret_f64(svuint8_t op) {
+  return __builtin_sve_reinterpret_f64_u8(op);
+}
+
+__aio svfloat64_t svreinterpret_f64(svuint16_t op) {
+  return __builtin_sve_reinterpret_f64_u16(op);
+}
+
+__aio svfloat64_t svreinterpret_f64(svuint32_t op) {
+  return __builtin_sve_reinterpret_f64_u32(op);
+}
+
+__aio svfloat64_t svreinterpret_f64(svuint64_t op) {
+  return __builtin_sve_reinterpret_f64_u64(op);
+}
+
+__aio svfloat64_t svreinterpret_f64(svfloat16_t op) {
+  return __builtin_sve_reinterpret_f64_f16(op);
+}
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svfloat64_t svreinterpret_f64(svbfloat16_t op) {
+  return __builtin_sve_reinterpret_f64_bf16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+__aio svfloat64_t svreinterpret_f64(svfloat32_t op) {
+  return __builtin_sve_reinterpret_f64_f32(op);
+}
+
+__aio svfloat64_t svreinterpret_f64(svfloat64_t op) {
+  return __builtin_sve_reinterpret_f64_f64(op);
+}
+
+#define svabd_n_f64_m(...) __builtin_sve_svabd_n_f64_m(__VA_ARGS__)
+#define svabd_n_f32_m(...) __builtin_sve_svabd_n_f32_m(__VA_ARGS__)
+#define svabd_n_f16_m(...) __builtin_sve_svabd_n_f16_m(__VA_ARGS__)
+#define svabd_n_f64_x(...) __builtin_sve_svabd_n_f64_x(__VA_ARGS__)
+#define svabd_n_f32_x(...) __builtin_sve_svabd_n_f32_x(__VA_ARGS__)
+#define svabd_n_f16_x(...) __builtin_sve_svabd_n_f16_x(__VA_ARGS__)
+#define svabd_n_f64_z(...) __builtin_sve_svabd_n_f64_z(__VA_ARGS__)
+#define svabd_n_f32_z(...) __builtin_sve_svabd_n_f32_z(__VA_ARGS__)
+#define svabd_n_f16_z(...) __builtin_sve_svabd_n_f16_z(__VA_ARGS__)
+#define svabd_n_s8_m(...) __builtin_sve_svabd_n_s8_m(__VA_ARGS__)
+#define svabd_n_s32_m(...) __builtin_sve_svabd_n_s32_m(__VA_ARGS__)
+#define svabd_n_s64_m(...) __builtin_sve_svabd_n_s64_m(__VA_ARGS__)
+#define svabd_n_s16_m(...) __builtin_sve_svabd_n_s16_m(__VA_ARGS__)
+#define svabd_n_s8_x(...) __builtin_sve_svabd_n_s8_x(__VA_ARGS__)
+#define svabd_n_s32_x(...) __builtin_sve_svabd_n_s32_x(__VA_ARGS__)
+#define svabd_n_s64_x(...) __builtin_sve_svabd_n_s64_x(__VA_ARGS__)
+#define svabd_n_s16_x(...) __builtin_sve_svabd_n_s16_x(__VA_ARGS__)
+#define svabd_n_s8_z(...) __builtin_sve_svabd_n_s8_z(__VA_ARGS__)
+#define svabd_n_s32_z(...) __builtin_sve_svabd_n_s32_z(__VA_ARGS__)
+#define svabd_n_s64_z(...) __builtin_sve_svabd_n_s64_z(__VA_ARGS__)
+#define svabd_n_s16_z(...) __builtin_sve_svabd_n_s16_z(__VA_ARGS__)
+#define svabd_n_u8_m(...) __builtin_sve_svabd_n_u8_m(__VA_ARGS__)
+#define svabd_n_u32_m(...) __builtin_sve_svabd_n_u32_m(__VA_ARGS__)
+#define svabd_n_u64_m(...) __builtin_sve_svabd_n_u64_m(__VA_ARGS__)
+#define svabd_n_u16_m(...) __builtin_sve_svabd_n_u16_m(__VA_ARGS__)
+#define svabd_n_u8_x(...) __builtin_sve_svabd_n_u8_x(__VA_ARGS__)
+#define svabd_n_u32_x(...) __builtin_sve_svabd_n_u32_x(__VA_ARGS__)
+#define svabd_n_u64_x(...) __builtin_sve_svabd_n_u64_x(__VA_ARGS__)
+#define svabd_n_u16_x(...) __builtin_sve_svabd_n_u16_x(__VA_ARGS__)
+#define svabd_n_u8_z(...) __builtin_sve_svabd_n_u8_z(__VA_ARGS__)
+#define svabd_n_u32_z(...) __builtin_sve_svabd_n_u32_z(__VA_ARGS__)
+#define svabd_n_u64_z(...) __builtin_sve_svabd_n_u64_z(__VA_ARGS__)
+#define svabd_n_u16_z(...) __builtin_sve_svabd_n_u16_z(__VA_ARGS__)
+#define svabd_f64_m(...) __builtin_sve_svabd_f64_m(__VA_ARGS__)
+#define svabd_f32_m(...) __builtin_sve_svabd_f32_m(__VA_ARGS__)
+#define svabd_f16_m(...) __builtin_sve_svabd_f16_m(__VA_ARGS__)
+#define svabd_f64_x(...) __builtin_sve_svabd_f64_x(__VA_ARGS__)
+#define svabd_f32_x(...) __builtin_sve_svabd_f32_x(__VA_ARGS__)
+#define svabd_f16_x(...) __builtin_sve_svabd_f16_x(__VA_ARGS__)
+#define svabd_f64_z(...) __builtin_sve_svabd_f64_z(__VA_ARGS__)
+#define svabd_f32_z(...) __builtin_sve_svabd_f32_z(__VA_ARGS__)
+#define svabd_f16_z(...) __builtin_sve_svabd_f16_z(__VA_ARGS__)
+#define svabd_s8_m(...) __builtin_sve_svabd_s8_m(__VA_ARGS__)
+#define svabd_s32_m(...) __builtin_sve_svabd_s32_m(__VA_ARGS__)
+#define svabd_s64_m(...) __builtin_sve_svabd_s64_m(__VA_ARGS__)
+#define svabd_s16_m(...) __builtin_sve_svabd_s16_m(__VA_ARGS__)
+#define svabd_s8_x(...) __builtin_sve_svabd_s8_x(__VA_ARGS__)
+#define svabd_s32_x(...) __builtin_sve_svabd_s32_x(__VA_ARGS__)
+#define svabd_s64_x(...) __builtin_sve_svabd_s64_x(__VA_ARGS__)
+#define svabd_s16_x(...) __builtin_sve_svabd_s16_x(__VA_ARGS__)
+#define svabd_s8_z(...) __builtin_sve_svabd_s8_z(__VA_ARGS__)
+#define svabd_s32_z(...) __builtin_sve_svabd_s32_z(__VA_ARGS__)
+#define svabd_s64_z(...) __builtin_sve_svabd_s64_z(__VA_ARGS__)
+#define svabd_s16_z(...) __builtin_sve_svabd_s16_z(__VA_ARGS__)
+#define svabd_u8_m(...) __builtin_sve_svabd_u8_m(__VA_ARGS__)
+#define svabd_u32_m(...) __builtin_sve_svabd_u32_m(__VA_ARGS__)
+#define svabd_u64_m(...) __builtin_sve_svabd_u64_m(__VA_ARGS__)
+#define svabd_u16_m(...) __builtin_sve_svabd_u16_m(__VA_ARGS__)
+#define svabd_u8_x(...) __builtin_sve_svabd_u8_x(__VA_ARGS__)
+#define svabd_u32_x(...) __builtin_sve_svabd_u32_x(__VA_ARGS__)
+#define svabd_u64_x(...) __builtin_sve_svabd_u64_x(__VA_ARGS__)
+#define svabd_u16_x(...) __builtin_sve_svabd_u16_x(__VA_ARGS__)
+#define svabd_u8_z(...) __builtin_sve_svabd_u8_z(__VA_ARGS__)
+#define svabd_u32_z(...) __builtin_sve_svabd_u32_z(__VA_ARGS__)
+#define svabd_u64_z(...) __builtin_sve_svabd_u64_z(__VA_ARGS__)
+#define svabd_u16_z(...) __builtin_sve_svabd_u16_z(__VA_ARGS__)
+#define svabs_f64_m(...) __builtin_sve_svabs_f64_m(__VA_ARGS__)
+#define svabs_f32_m(...) __builtin_sve_svabs_f32_m(__VA_ARGS__)
+#define svabs_f16_m(...) __builtin_sve_svabs_f16_m(__VA_ARGS__)
+#define svabs_f64_x(...) __builtin_sve_svabs_f64_x(__VA_ARGS__)
+#define svabs_f32_x(...) __builtin_sve_svabs_f32_x(__VA_ARGS__)
+#define svabs_f16_x(...) __builtin_sve_svabs_f16_x(__VA_ARGS__)
+#define svabs_f64_z(...) __builtin_sve_svabs_f64_z(__VA_ARGS__)
+#define svabs_f32_z(...) __builtin_sve_svabs_f32_z(__VA_ARGS__)
+#define svabs_f16_z(...) __builtin_sve_svabs_f16_z(__VA_ARGS__)
+#define svabs_s8_m(...) __builtin_sve_svabs_s8_m(__VA_ARGS__)
+#define svabs_s32_m(...) __builtin_sve_svabs_s32_m(__VA_ARGS__)
+#define svabs_s64_m(...) __builtin_sve_svabs_s64_m(__VA_ARGS__)
+#define svabs_s16_m(...) __builtin_sve_svabs_s16_m(__VA_ARGS__)
+#define svabs_s8_x(...) __builtin_sve_svabs_s8_x(__VA_ARGS__)
+#define svabs_s32_x(...) __builtin_sve_svabs_s32_x(__VA_ARGS__)
+#define svabs_s64_x(...) __builtin_sve_svabs_s64_x(__VA_ARGS__)
+#define svabs_s16_x(...) __builtin_sve_svabs_s16_x(__VA_ARGS__)
+#define svabs_s8_z(...) __builtin_sve_svabs_s8_z(__VA_ARGS__)
+#define svabs_s32_z(...) __builtin_sve_svabs_s32_z(__VA_ARGS__)
+#define svabs_s64_z(...) __builtin_sve_svabs_s64_z(__VA_ARGS__)
+#define svabs_s16_z(...) __builtin_sve_svabs_s16_z(__VA_ARGS__)
+#define svacge_n_f64(...) __builtin_sve_svacge_n_f64(__VA_ARGS__)
+#define svacge_n_f32(...) __builtin_sve_svacge_n_f32(__VA_ARGS__)
+#define svacge_n_f16(...) __builtin_sve_svacge_n_f16(__VA_ARGS__)
+#define svacge_f64(...) __builtin_sve_svacge_f64(__VA_ARGS__)
+#define svacge_f32(...) __builtin_sve_svacge_f32(__VA_ARGS__)
+#define svacge_f16(...) __builtin_sve_svacge_f16(__VA_ARGS__)
+#define svacgt_n_f64(...) __builtin_sve_svacgt_n_f64(__VA_ARGS__)
+#define svacgt_n_f32(...) __builtin_sve_svacgt_n_f32(__VA_ARGS__)
+#define svacgt_n_f16(...) __builtin_sve_svacgt_n_f16(__VA_ARGS__)
+#define svacgt_f64(...) __builtin_sve_svacgt_f64(__VA_ARGS__)
+#define svacgt_f32(...) __builtin_sve_svacgt_f32(__VA_ARGS__)
+#define svacgt_f16(...) __builtin_sve_svacgt_f16(__VA_ARGS__)
+#define svacle_n_f64(...) __builtin_sve_svacle_n_f64(__VA_ARGS__)
+#define svacle_n_f32(...) __builtin_sve_svacle_n_f32(__VA_ARGS__)
+#define svacle_n_f16(...) __builtin_sve_svacle_n_f16(__VA_ARGS__)
+#define svacle_f64(...) __builtin_sve_svacle_f64(__VA_ARGS__)
+#define svacle_f32(...) __builtin_sve_svacle_f32(__VA_ARGS__)
+#define svacle_f16(...) __builtin_sve_svacle_f16(__VA_ARGS__)
+#define svaclt_n_f64(...) __builtin_sve_svaclt_n_f64(__VA_ARGS__)
+#define svaclt_n_f32(...) __builtin_sve_svaclt_n_f32(__VA_ARGS__)
+#define svaclt_n_f16(...) __builtin_sve_svaclt_n_f16(__VA_ARGS__)
+#define svaclt_f64(...) __builtin_sve_svaclt_f64(__VA_ARGS__)
+#define svaclt_f32(...) __builtin_sve_svaclt_f32(__VA_ARGS__)
+#define svaclt_f16(...) __builtin_sve_svaclt_f16(__VA_ARGS__)
+#define svadd_n_f64_m(...) __builtin_sve_svadd_n_f64_m(__VA_ARGS__)
+#define svadd_n_f32_m(...) __builtin_sve_svadd_n_f32_m(__VA_ARGS__)
+#define svadd_n_f16_m(...) __builtin_sve_svadd_n_f16_m(__VA_ARGS__)
+#define svadd_n_f64_x(...) __builtin_sve_svadd_n_f64_x(__VA_ARGS__)
+#define svadd_n_f32_x(...) __builtin_sve_svadd_n_f32_x(__VA_ARGS__)
+#define svadd_n_f16_x(...) __builtin_sve_svadd_n_f16_x(__VA_ARGS__)
+#define svadd_n_f64_z(...) __builtin_sve_svadd_n_f64_z(__VA_ARGS__)
+#define svadd_n_f32_z(...) __builtin_sve_svadd_n_f32_z(__VA_ARGS__)
+#define svadd_n_f16_z(...) __builtin_sve_svadd_n_f16_z(__VA_ARGS__)
+#define svadd_n_u8_m(...) __builtin_sve_svadd_n_u8_m(__VA_ARGS__)
+#define svadd_n_u32_m(...) __builtin_sve_svadd_n_u32_m(__VA_ARGS__)
+#define svadd_n_u64_m(...) __builtin_sve_svadd_n_u64_m(__VA_ARGS__)
+#define svadd_n_u16_m(...) __builtin_sve_svadd_n_u16_m(__VA_ARGS__)
+#define svadd_n_s8_m(...) __builtin_sve_svadd_n_s8_m(__VA_ARGS__)
+#define svadd_n_s32_m(...) __builtin_sve_svadd_n_s32_m(__VA_ARGS__)
+#define svadd_n_s64_m(...) __builtin_sve_svadd_n_s64_m(__VA_ARGS__)
+#define svadd_n_s16_m(...) __builtin_sve_svadd_n_s16_m(__VA_ARGS__)
+#define svadd_n_u8_x(...) __builtin_sve_svadd_n_u8_x(__VA_ARGS__)
+#define svadd_n_u32_x(...) __builtin_sve_svadd_n_u32_x(__VA_ARGS__)
+#define svadd_n_u64_x(...) __builtin_sve_svadd_n_u64_x(__VA_ARGS__)
+#define svadd_n_u16_x(...) __builtin_sve_svadd_n_u16_x(__VA_ARGS__)
+#define svadd_n_s8_x(...) __builtin_sve_svadd_n_s8_x(__VA_ARGS__)
+#define svadd_n_s32_x(...) __builtin_sve_svadd_n_s32_x(__VA_ARGS__)
+#define svadd_n_s64_x(...) __builtin_sve_svadd_n_s64_x(__VA_ARGS__)
+#define svadd_n_s16_x(...) __builtin_sve_svadd_n_s16_x(__VA_ARGS__)
+#define svadd_n_u8_z(...) __builtin_sve_svadd_n_u8_z(__VA_ARGS__)
+#define svadd_n_u32_z(...) __builtin_sve_svadd_n_u32_z(__VA_ARGS__)
+#define svadd_n_u64_z(...) __builtin_sve_svadd_n_u64_z(__VA_ARGS__)
+#define svadd_n_u16_z(...) __builtin_sve_svadd_n_u16_z(__VA_ARGS__)
+#define svadd_n_s8_z(...) __builtin_sve_svadd_n_s8_z(__VA_ARGS__)
+#define svadd_n_s32_z(...) __builtin_sve_svadd_n_s32_z(__VA_ARGS__)
+#define svadd_n_s64_z(...) __builtin_sve_svadd_n_s64_z(__VA_ARGS__)
+#define svadd_n_s16_z(...) __builtin_sve_svadd_n_s16_z(__VA_ARGS__)
+#define svadd_f64_m(...) __builtin_sve_svadd_f64_m(__VA_ARGS__)
+#define svadd_f32_m(...) __builtin_sve_svadd_f32_m(__VA_ARGS__)
+#define svadd_f16_m(...) __builtin_sve_svadd_f16_m(__VA_ARGS__)
+#define svadd_f64_x(...) __builtin_sve_svadd_f64_x(__VA_ARGS__)
+#define svadd_f32_x(...) __builtin_sve_svadd_f32_x(__VA_ARGS__)
+#define svadd_f16_x(...) __builtin_sve_svadd_f16_x(__VA_ARGS__)
+#define svadd_f64_z(...) __builtin_sve_svadd_f64_z(__VA_ARGS__)
+#define svadd_f32_z(...) __builtin_sve_svadd_f32_z(__VA_ARGS__)
+#define svadd_f16_z(...) __builtin_sve_svadd_f16_z(__VA_ARGS__)
+#define svadd_u8_m(...) __builtin_sve_svadd_u8_m(__VA_ARGS__)
+#define svadd_u32_m(...) __builtin_sve_svadd_u32_m(__VA_ARGS__)
+#define svadd_u64_m(...) __builtin_sve_svadd_u64_m(__VA_ARGS__)
+#define svadd_u16_m(...) __builtin_sve_svadd_u16_m(__VA_ARGS__)
+#define svadd_s8_m(...) __builtin_sve_svadd_s8_m(__VA_ARGS__)
+#define svadd_s32_m(...) __builtin_sve_svadd_s32_m(__VA_ARGS__)
+#define svadd_s64_m(...) __builtin_sve_svadd_s64_m(__VA_ARGS__)
+#define svadd_s16_m(...) __builtin_sve_svadd_s16_m(__VA_ARGS__)
+#define svadd_u8_x(...) __builtin_sve_svadd_u8_x(__VA_ARGS__)
+#define svadd_u32_x(...) __builtin_sve_svadd_u32_x(__VA_ARGS__)
+#define svadd_u64_x(...) __builtin_sve_svadd_u64_x(__VA_ARGS__)
+#define svadd_u16_x(...) __builtin_sve_svadd_u16_x(__VA_ARGS__)
+#define svadd_s8_x(...) __builtin_sve_svadd_s8_x(__VA_ARGS__)
+#define svadd_s32_x(...) __builtin_sve_svadd_s32_x(__VA_ARGS__)
+#define svadd_s64_x(...) __builtin_sve_svadd_s64_x(__VA_ARGS__)
+#define svadd_s16_x(...) __builtin_sve_svadd_s16_x(__VA_ARGS__)
+#define svadd_u8_z(...) __builtin_sve_svadd_u8_z(__VA_ARGS__)
+#define svadd_u32_z(...) __builtin_sve_svadd_u32_z(__VA_ARGS__)
+#define svadd_u64_z(...) __builtin_sve_svadd_u64_z(__VA_ARGS__)
+#define svadd_u16_z(...) __builtin_sve_svadd_u16_z(__VA_ARGS__)
+#define svadd_s8_z(...) __builtin_sve_svadd_s8_z(__VA_ARGS__)
+#define svadd_s32_z(...) __builtin_sve_svadd_s32_z(__VA_ARGS__)
+#define svadd_s64_z(...) __builtin_sve_svadd_s64_z(__VA_ARGS__)
+#define svadd_s16_z(...) __builtin_sve_svadd_s16_z(__VA_ARGS__)
+#define svadda_f64(...) __builtin_sve_svadda_f64(__VA_ARGS__)
+#define svadda_f32(...) __builtin_sve_svadda_f32(__VA_ARGS__)
+#define svadda_f16(...) __builtin_sve_svadda_f16(__VA_ARGS__)
+#define svaddv_s8(...) __builtin_sve_svaddv_s8(__VA_ARGS__)
+#define svaddv_s32(...) __builtin_sve_svaddv_s32(__VA_ARGS__)
+#define svaddv_s64(...) __builtin_sve_svaddv_s64(__VA_ARGS__)
+#define svaddv_s16(...) __builtin_sve_svaddv_s16(__VA_ARGS__)
+#define svaddv_u8(...) __builtin_sve_svaddv_u8(__VA_ARGS__)
+#define svaddv_u32(...) __builtin_sve_svaddv_u32(__VA_ARGS__)
+#define svaddv_u64(...) __builtin_sve_svaddv_u64(__VA_ARGS__)
+#define svaddv_u16(...) __builtin_sve_svaddv_u16(__VA_ARGS__)
+#define svaddv_f64(...) __builtin_sve_svaddv_f64(__VA_ARGS__)
+#define svaddv_f32(...) __builtin_sve_svaddv_f32(__VA_ARGS__)
+#define svaddv_f16(...) __builtin_sve_svaddv_f16(__VA_ARGS__)
+#define svadrb_u32base_u32offset(...) __builtin_sve_svadrb_u32base_u32offset(__VA_ARGS__)
+#define svadrb_u64base_u64offset(...) __builtin_sve_svadrb_u64base_u64offset(__VA_ARGS__)
+#define svadrb_u32base_s32offset(...) __builtin_sve_svadrb_u32base_s32offset(__VA_ARGS__)
+#define svadrb_u64base_s64offset(...) __builtin_sve_svadrb_u64base_s64offset(__VA_ARGS__)
+#define svadrd_u32base_u32index(...) __builtin_sve_svadrd_u32base_u32index(__VA_ARGS__)
+#define svadrd_u64base_u64index(...) __builtin_sve_svadrd_u64base_u64index(__VA_ARGS__)
+#define svadrd_u32base_s32index(...) __builtin_sve_svadrd_u32base_s32index(__VA_ARGS__)
+#define svadrd_u64base_s64index(...) __builtin_sve_svadrd_u64base_s64index(__VA_ARGS__)
+#define svadrh_u32base_u32index(...) __builtin_sve_svadrh_u32base_u32index(__VA_ARGS__)
+#define svadrh_u64base_u64index(...) __builtin_sve_svadrh_u64base_u64index(__VA_ARGS__)
+#define svadrh_u32base_s32index(...) __builtin_sve_svadrh_u32base_s32index(__VA_ARGS__)
+#define svadrh_u64base_s64index(...) __builtin_sve_svadrh_u64base_s64index(__VA_ARGS__)
+#define svadrw_u32base_u32index(...) __builtin_sve_svadrw_u32base_u32index(__VA_ARGS__)
+#define svadrw_u64base_u64index(...) __builtin_sve_svadrw_u64base_u64index(__VA_ARGS__)
+#define svadrw_u32base_s32index(...) __builtin_sve_svadrw_u32base_s32index(__VA_ARGS__)
+#define svadrw_u64base_s64index(...) __builtin_sve_svadrw_u64base_s64index(__VA_ARGS__)
+#define svand_b_z(...) __builtin_sve_svand_b_z(__VA_ARGS__)
+#define svand_n_u8_m(...) __builtin_sve_svand_n_u8_m(__VA_ARGS__)
+#define svand_n_u32_m(...) __builtin_sve_svand_n_u32_m(__VA_ARGS__)
+#define svand_n_u64_m(...) __builtin_sve_svand_n_u64_m(__VA_ARGS__)
+#define svand_n_u16_m(...) __builtin_sve_svand_n_u16_m(__VA_ARGS__)
+#define svand_n_s8_m(...) __builtin_sve_svand_n_s8_m(__VA_ARGS__)
+#define svand_n_s32_m(...) __builtin_sve_svand_n_s32_m(__VA_ARGS__)
+#define svand_n_s64_m(...) __builtin_sve_svand_n_s64_m(__VA_ARGS__)
+#define svand_n_s16_m(...) __builtin_sve_svand_n_s16_m(__VA_ARGS__)
+#define svand_n_u8_x(...) __builtin_sve_svand_n_u8_x(__VA_ARGS__)
+#define svand_n_u32_x(...) __builtin_sve_svand_n_u32_x(__VA_ARGS__)
+#define svand_n_u64_x(...) __builtin_sve_svand_n_u64_x(__VA_ARGS__)
+#define svand_n_u16_x(...) __builtin_sve_svand_n_u16_x(__VA_ARGS__)
+#define svand_n_s8_x(...) __builtin_sve_svand_n_s8_x(__VA_ARGS__)
+#define svand_n_s32_x(...) __builtin_sve_svand_n_s32_x(__VA_ARGS__)
+#define svand_n_s64_x(...) __builtin_sve_svand_n_s64_x(__VA_ARGS__)
+#define svand_n_s16_x(...) __builtin_sve_svand_n_s16_x(__VA_ARGS__)
+#define svand_n_u8_z(...) __builtin_sve_svand_n_u8_z(__VA_ARGS__)
+#define svand_n_u32_z(...) __builtin_sve_svand_n_u32_z(__VA_ARGS__)
+#define svand_n_u64_z(...) __builtin_sve_svand_n_u64_z(__VA_ARGS__)
+#define svand_n_u16_z(...) __builtin_sve_svand_n_u16_z(__VA_ARGS__)
+#define svand_n_s8_z(...) __builtin_sve_svand_n_s8_z(__VA_ARGS__)
+#define svand_n_s32_z(...) __builtin_sve_svand_n_s32_z(__VA_ARGS__)
+#define svand_n_s64_z(...) __builtin_sve_svand_n_s64_z(__VA_ARGS__)
+#define svand_n_s16_z(...) __builtin_sve_svand_n_s16_z(__VA_ARGS__)
+#define svand_u8_m(...) __builtin_sve_svand_u8_m(__VA_ARGS__)
+#define svand_u32_m(...) __builtin_sve_svand_u32_m(__VA_ARGS__)
+#define svand_u64_m(...) __builtin_sve_svand_u64_m(__VA_ARGS__)
+#define svand_u16_m(...) __builtin_sve_svand_u16_m(__VA_ARGS__)
+#define svand_s8_m(...) __builtin_sve_svand_s8_m(__VA_ARGS__)
+#define svand_s32_m(...) __builtin_sve_svand_s32_m(__VA_ARGS__)
+#define svand_s64_m(...) __builtin_sve_svand_s64_m(__VA_ARGS__)
+#define svand_s16_m(...) __builtin_sve_svand_s16_m(__VA_ARGS__)
+#define svand_u8_x(...) __builtin_sve_svand_u8_x(__VA_ARGS__)
+#define svand_u32_x(...) __builtin_sve_svand_u32_x(__VA_ARGS__)
+#define svand_u64_x(...) __builtin_sve_svand_u64_x(__VA_ARGS__)
+#define svand_u16_x(...) __builtin_sve_svand_u16_x(__VA_ARGS__)
+#define svand_s8_x(...) __builtin_sve_svand_s8_x(__VA_ARGS__)
+#define svand_s32_x(...) __builtin_sve_svand_s32_x(__VA_ARGS__)
+#define svand_s64_x(...) __builtin_sve_svand_s64_x(__VA_ARGS__)
+#define svand_s16_x(...) __builtin_sve_svand_s16_x(__VA_ARGS__)
+#define svand_u8_z(...) __builtin_sve_svand_u8_z(__VA_ARGS__)
+#define svand_u32_z(...) __builtin_sve_svand_u32_z(__VA_ARGS__)
+#define svand_u64_z(...) __builtin_sve_svand_u64_z(__VA_ARGS__)
+#define svand_u16_z(...) __builtin_sve_svand_u16_z(__VA_ARGS__)
+#define svand_s8_z(...) __builtin_sve_svand_s8_z(__VA_ARGS__)
+#define svand_s32_z(...) __builtin_sve_svand_s32_z(__VA_ARGS__)
+#define svand_s64_z(...) __builtin_sve_svand_s64_z(__VA_ARGS__)
+#define svand_s16_z(...) __builtin_sve_svand_s16_z(__VA_ARGS__)
+#define svandv_u8(...) __builtin_sve_svandv_u8(__VA_ARGS__)
+#define svandv_u32(...) __builtin_sve_svandv_u32(__VA_ARGS__)
+#define svandv_u64(...) __builtin_sve_svandv_u64(__VA_ARGS__)
+#define svandv_u16(...) __builtin_sve_svandv_u16(__VA_ARGS__)
+#define svandv_s8(...) __builtin_sve_svandv_s8(__VA_ARGS__)
+#define svandv_s32(...) __builtin_sve_svandv_s32(__VA_ARGS__)
+#define svandv_s64(...) __builtin_sve_svandv_s64(__VA_ARGS__)
+#define svandv_s16(...) __builtin_sve_svandv_s16(__VA_ARGS__)
+#define svasr_n_s8_m(...) __builtin_sve_svasr_n_s8_m(__VA_ARGS__)
+#define svasr_n_s32_m(...) __builtin_sve_svasr_n_s32_m(__VA_ARGS__)
+#define svasr_n_s64_m(...) __builtin_sve_svasr_n_s64_m(__VA_ARGS__)
+#define svasr_n_s16_m(...) __builtin_sve_svasr_n_s16_m(__VA_ARGS__)
+#define svasr_n_s8_x(...) __builtin_sve_svasr_n_s8_x(__VA_ARGS__)
+#define svasr_n_s32_x(...) __builtin_sve_svasr_n_s32_x(__VA_ARGS__)
+#define svasr_n_s64_x(...) __builtin_sve_svasr_n_s64_x(__VA_ARGS__)
+#define svasr_n_s16_x(...) __builtin_sve_svasr_n_s16_x(__VA_ARGS__)
+#define svasr_n_s8_z(...) __builtin_sve_svasr_n_s8_z(__VA_ARGS__)
+#define svasr_n_s32_z(...) __builtin_sve_svasr_n_s32_z(__VA_ARGS__)
+#define svasr_n_s64_z(...) __builtin_sve_svasr_n_s64_z(__VA_ARGS__)
+#define svasr_n_s16_z(...) __builtin_sve_svasr_n_s16_z(__VA_ARGS__)
+#define svasr_s8_m(...) __builtin_sve_svasr_s8_m(__VA_ARGS__)
+#define svasr_s32_m(...) __builtin_sve_svasr_s32_m(__VA_ARGS__)
+#define svasr_s64_m(...) __builtin_sve_svasr_s64_m(__VA_ARGS__)
+#define svasr_s16_m(...) __builtin_sve_svasr_s16_m(__VA_ARGS__)
+#define svasr_s8_x(...) __builtin_sve_svasr_s8_x(__VA_ARGS__)
+#define svasr_s32_x(...) __builtin_sve_svasr_s32_x(__VA_ARGS__)
+#define svasr_s64_x(...) __builtin_sve_svasr_s64_x(__VA_ARGS__)
+#define svasr_s16_x(...) __builtin_sve_svasr_s16_x(__VA_ARGS__)
+#define svasr_s8_z(...) __builtin_sve_svasr_s8_z(__VA_ARGS__)
+#define svasr_s32_z(...) __builtin_sve_svasr_s32_z(__VA_ARGS__)
+#define svasr_s64_z(...) __builtin_sve_svasr_s64_z(__VA_ARGS__)
+#define svasr_s16_z(...) __builtin_sve_svasr_s16_z(__VA_ARGS__)
+#define svasr_wide_n_s8_m(...) __builtin_sve_svasr_wide_n_s8_m(__VA_ARGS__)
+#define svasr_wide_n_s32_m(...) __builtin_sve_svasr_wide_n_s32_m(__VA_ARGS__)
+#define svasr_wide_n_s16_m(...) __builtin_sve_svasr_wide_n_s16_m(__VA_ARGS__)
+#define svasr_wide_n_s8_x(...) __builtin_sve_svasr_wide_n_s8_x(__VA_ARGS__)
+#define svasr_wide_n_s32_x(...) __builtin_sve_svasr_wide_n_s32_x(__VA_ARGS__)
+#define svasr_wide_n_s16_x(...) __builtin_sve_svasr_wide_n_s16_x(__VA_ARGS__)
+#define svasr_wide_n_s8_z(...) __builtin_sve_svasr_wide_n_s8_z(__VA_ARGS__)
+#define svasr_wide_n_s32_z(...) __builtin_sve_svasr_wide_n_s32_z(__VA_ARGS__)
+#define svasr_wide_n_s16_z(...) __builtin_sve_svasr_wide_n_s16_z(__VA_ARGS__)
+#define svasr_wide_s8_m(...) __builtin_sve_svasr_wide_s8_m(__VA_ARGS__)
+#define svasr_wide_s32_m(...) __builtin_sve_svasr_wide_s32_m(__VA_ARGS__)
+#define svasr_wide_s16_m(...) __builtin_sve_svasr_wide_s16_m(__VA_ARGS__)
+#define svasr_wide_s8_x(...) __builtin_sve_svasr_wide_s8_x(__VA_ARGS__)
+#define svasr_wide_s32_x(...) __builtin_sve_svasr_wide_s32_x(__VA_ARGS__)
+#define svasr_wide_s16_x(...) __builtin_sve_svasr_wide_s16_x(__VA_ARGS__)
+#define svasr_wide_s8_z(...) __builtin_sve_svasr_wide_s8_z(__VA_ARGS__)
+#define svasr_wide_s32_z(...) __builtin_sve_svasr_wide_s32_z(__VA_ARGS__)
+#define svasr_wide_s16_z(...) __builtin_sve_svasr_wide_s16_z(__VA_ARGS__)
+#define svasrd_n_s8_m(...) __builtin_sve_svasrd_n_s8_m(__VA_ARGS__)
+#define svasrd_n_s32_m(...) __builtin_sve_svasrd_n_s32_m(__VA_ARGS__)
+#define svasrd_n_s64_m(...) __builtin_sve_svasrd_n_s64_m(__VA_ARGS__)
+#define svasrd_n_s16_m(...) __builtin_sve_svasrd_n_s16_m(__VA_ARGS__)
+#define svasrd_n_s8_x(...) __builtin_sve_svasrd_n_s8_x(__VA_ARGS__)
+#define svasrd_n_s32_x(...) __builtin_sve_svasrd_n_s32_x(__VA_ARGS__)
+#define svasrd_n_s64_x(...) __builtin_sve_svasrd_n_s64_x(__VA_ARGS__)
+#define svasrd_n_s16_x(...) __builtin_sve_svasrd_n_s16_x(__VA_ARGS__)
+#define svasrd_n_s8_z(...) __builtin_sve_svasrd_n_s8_z(__VA_ARGS__)
+#define svasrd_n_s32_z(...) __builtin_sve_svasrd_n_s32_z(__VA_ARGS__)
+#define svasrd_n_s64_z(...) __builtin_sve_svasrd_n_s64_z(__VA_ARGS__)
+#define svasrd_n_s16_z(...) __builtin_sve_svasrd_n_s16_z(__VA_ARGS__)
+#define svbic_b_z(...) __builtin_sve_svbic_b_z(__VA_ARGS__)
+#define svbic_n_u8_m(...) __builtin_sve_svbic_n_u8_m(__VA_ARGS__)
+#define svbic_n_u32_m(...) __builtin_sve_svbic_n_u32_m(__VA_ARGS__)
+#define svbic_n_u64_m(...) __builtin_sve_svbic_n_u64_m(__VA_ARGS__)
+#define svbic_n_u16_m(...) __builtin_sve_svbic_n_u16_m(__VA_ARGS__)
+#define svbic_n_s8_m(...) __builtin_sve_svbic_n_s8_m(__VA_ARGS__)
+#define svbic_n_s32_m(...) __builtin_sve_svbic_n_s32_m(__VA_ARGS__)
+#define svbic_n_s64_m(...) __builtin_sve_svbic_n_s64_m(__VA_ARGS__)
+#define svbic_n_s16_m(...) __builtin_sve_svbic_n_s16_m(__VA_ARGS__)
+#define svbic_n_u8_x(...) __builtin_sve_svbic_n_u8_x(__VA_ARGS__)
+#define svbic_n_u32_x(...) __builtin_sve_svbic_n_u32_x(__VA_ARGS__)
+#define svbic_n_u64_x(...) __builtin_sve_svbic_n_u64_x(__VA_ARGS__)
+#define svbic_n_u16_x(...) __builtin_sve_svbic_n_u16_x(__VA_ARGS__)
+#define svbic_n_s8_x(...) __builtin_sve_svbic_n_s8_x(__VA_ARGS__)
+#define svbic_n_s32_x(...) __builtin_sve_svbic_n_s32_x(__VA_ARGS__)
+#define svbic_n_s64_x(...) __builtin_sve_svbic_n_s64_x(__VA_ARGS__)
+#define svbic_n_s16_x(...) __builtin_sve_svbic_n_s16_x(__VA_ARGS__)
+#define svbic_n_u8_z(...) __builtin_sve_svbic_n_u8_z(__VA_ARGS__)
+#define svbic_n_u32_z(...) __builtin_sve_svbic_n_u32_z(__VA_ARGS__)
+#define svbic_n_u64_z(...) __builtin_sve_svbic_n_u64_z(__VA_ARGS__)
+#define svbic_n_u16_z(...) __builtin_sve_svbic_n_u16_z(__VA_ARGS__)
+#define svbic_n_s8_z(...) __builtin_sve_svbic_n_s8_z(__VA_ARGS__)
+#define svbic_n_s32_z(...) __builtin_sve_svbic_n_s32_z(__VA_ARGS__)
+#define svbic_n_s64_z(...) __builtin_sve_svbic_n_s64_z(__VA_ARGS__)
+#define svbic_n_s16_z(...) __builtin_sve_svbic_n_s16_z(__VA_ARGS__)
+#define svbic_u8_m(...) __builtin_sve_svbic_u8_m(__VA_ARGS__)
+#define svbic_u32_m(...) __builtin_sve_svbic_u32_m(__VA_ARGS__)
+#define svbic_u64_m(...) __builtin_sve_svbic_u64_m(__VA_ARGS__)
+#define svbic_u16_m(...) __builtin_sve_svbic_u16_m(__VA_ARGS__)
+#define svbic_s8_m(...) __builtin_sve_svbic_s8_m(__VA_ARGS__)
+#define svbic_s32_m(...) __builtin_sve_svbic_s32_m(__VA_ARGS__)
+#define svbic_s64_m(...) __builtin_sve_svbic_s64_m(__VA_ARGS__)
+#define svbic_s16_m(...) __builtin_sve_svbic_s16_m(__VA_ARGS__)
+#define svbic_u8_x(...) __builtin_sve_svbic_u8_x(__VA_ARGS__)
+#define svbic_u32_x(...) __builtin_sve_svbic_u32_x(__VA_ARGS__)
+#define svbic_u64_x(...) __builtin_sve_svbic_u64_x(__VA_ARGS__)
+#define svbic_u16_x(...) __builtin_sve_svbic_u16_x(__VA_ARGS__)
+#define svbic_s8_x(...) __builtin_sve_svbic_s8_x(__VA_ARGS__)
+#define svbic_s32_x(...) __builtin_sve_svbic_s32_x(__VA_ARGS__)
+#define svbic_s64_x(...) __builtin_sve_svbic_s64_x(__VA_ARGS__)
+#define svbic_s16_x(...) __builtin_sve_svbic_s16_x(__VA_ARGS__)
+#define svbic_u8_z(...) __builtin_sve_svbic_u8_z(__VA_ARGS__)
+#define svbic_u32_z(...) __builtin_sve_svbic_u32_z(__VA_ARGS__)
+#define svbic_u64_z(...) __builtin_sve_svbic_u64_z(__VA_ARGS__)
+#define svbic_u16_z(...) __builtin_sve_svbic_u16_z(__VA_ARGS__)
+#define svbic_s8_z(...) __builtin_sve_svbic_s8_z(__VA_ARGS__)
+#define svbic_s32_z(...) __builtin_sve_svbic_s32_z(__VA_ARGS__)
+#define svbic_s64_z(...) __builtin_sve_svbic_s64_z(__VA_ARGS__)
+#define svbic_s16_z(...) __builtin_sve_svbic_s16_z(__VA_ARGS__)
+#define svbrka_b_m(...) __builtin_sve_svbrka_b_m(__VA_ARGS__)
+#define svbrka_b_z(...) __builtin_sve_svbrka_b_z(__VA_ARGS__)
+#define svbrkb_b_m(...) __builtin_sve_svbrkb_b_m(__VA_ARGS__)
+#define svbrkb_b_z(...) __builtin_sve_svbrkb_b_z(__VA_ARGS__)
+#define svbrkn_b_z(...) __builtin_sve_svbrkn_b_z(__VA_ARGS__)
+#define svbrkpa_b_z(...) __builtin_sve_svbrkpa_b_z(__VA_ARGS__)
+#define svbrkpb_b_z(...) __builtin_sve_svbrkpb_b_z(__VA_ARGS__)
+#define svcadd_f64_m(...) __builtin_sve_svcadd_f64_m(__VA_ARGS__)
+#define svcadd_f32_m(...) __builtin_sve_svcadd_f32_m(__VA_ARGS__)
+#define svcadd_f16_m(...) __builtin_sve_svcadd_f16_m(__VA_ARGS__)
+#define svcadd_f64_x(...) __builtin_sve_svcadd_f64_x(__VA_ARGS__)
+#define svcadd_f32_x(...) __builtin_sve_svcadd_f32_x(__VA_ARGS__)
+#define svcadd_f16_x(...) __builtin_sve_svcadd_f16_x(__VA_ARGS__)
+#define svcadd_f64_z(...) __builtin_sve_svcadd_f64_z(__VA_ARGS__)
+#define svcadd_f32_z(...) __builtin_sve_svcadd_f32_z(__VA_ARGS__)
+#define svcadd_f16_z(...) __builtin_sve_svcadd_f16_z(__VA_ARGS__)
+#define svclasta_n_u8(...) __builtin_sve_svclasta_n_u8(__VA_ARGS__)
+#define svclasta_n_u32(...) __builtin_sve_svclasta_n_u32(__VA_ARGS__)
+#define svclasta_n_u64(...) __builtin_sve_svclasta_n_u64(__VA_ARGS__)
+#define svclasta_n_u16(...) __builtin_sve_svclasta_n_u16(__VA_ARGS__)
+#define svclasta_n_s8(...) __builtin_sve_svclasta_n_s8(__VA_ARGS__)
+#define svclasta_n_f64(...) __builtin_sve_svclasta_n_f64(__VA_ARGS__)
+#define svclasta_n_f32(...) __builtin_sve_svclasta_n_f32(__VA_ARGS__)
+#define svclasta_n_f16(...) __builtin_sve_svclasta_n_f16(__VA_ARGS__)
+#define svclasta_n_s32(...) __builtin_sve_svclasta_n_s32(__VA_ARGS__)
+#define svclasta_n_s64(...) __builtin_sve_svclasta_n_s64(__VA_ARGS__)
+#define svclasta_n_s16(...) __builtin_sve_svclasta_n_s16(__VA_ARGS__)
+#define svclasta_u8(...) __builtin_sve_svclasta_u8(__VA_ARGS__)
+#define svclasta_u32(...) __builtin_sve_svclasta_u32(__VA_ARGS__)
+#define svclasta_u64(...) __builtin_sve_svclasta_u64(__VA_ARGS__)
+#define svclasta_u16(...) __builtin_sve_svclasta_u16(__VA_ARGS__)
+#define svclasta_s8(...) __builtin_sve_svclasta_s8(__VA_ARGS__)
+#define svclasta_f64(...) __builtin_sve_svclasta_f64(__VA_ARGS__)
+#define svclasta_f32(...) __builtin_sve_svclasta_f32(__VA_ARGS__)
+#define svclasta_f16(...) __builtin_sve_svclasta_f16(__VA_ARGS__)
+#define svclasta_s32(...) __builtin_sve_svclasta_s32(__VA_ARGS__)
+#define svclasta_s64(...) __builtin_sve_svclasta_s64(__VA_ARGS__)
+#define svclasta_s16(...) __builtin_sve_svclasta_s16(__VA_ARGS__)
+#define svclastb_n_u8(...) __builtin_sve_svclastb_n_u8(__VA_ARGS__)
+#define svclastb_n_u32(...) __builtin_sve_svclastb_n_u32(__VA_ARGS__)
+#define svclastb_n_u64(...) __builtin_sve_svclastb_n_u64(__VA_ARGS__)
+#define svclastb_n_u16(...) __builtin_sve_svclastb_n_u16(__VA_ARGS__)
+#define svclastb_n_s8(...) __builtin_sve_svclastb_n_s8(__VA_ARGS__)
+#define svclastb_n_f64(...) __builtin_sve_svclastb_n_f64(__VA_ARGS__)
+#define svclastb_n_f32(...) __builtin_sve_svclastb_n_f32(__VA_ARGS__)
+#define svclastb_n_f16(...) __builtin_sve_svclastb_n_f16(__VA_ARGS__)
+#define svclastb_n_s32(...) __builtin_sve_svclastb_n_s32(__VA_ARGS__)
+#define svclastb_n_s64(...) __builtin_sve_svclastb_n_s64(__VA_ARGS__)
+#define svclastb_n_s16(...) __builtin_sve_svclastb_n_s16(__VA_ARGS__)
+#define svclastb_u8(...) __builtin_sve_svclastb_u8(__VA_ARGS__)
+#define svclastb_u32(...) __builtin_sve_svclastb_u32(__VA_ARGS__)
+#define svclastb_u64(...) __builtin_sve_svclastb_u64(__VA_ARGS__)
+#define svclastb_u16(...) __builtin_sve_svclastb_u16(__VA_ARGS__)
+#define svclastb_s8(...) __builtin_sve_svclastb_s8(__VA_ARGS__)
+#define svclastb_f64(...) __builtin_sve_svclastb_f64(__VA_ARGS__)
+#define svclastb_f32(...) __builtin_sve_svclastb_f32(__VA_ARGS__)
+#define svclastb_f16(...) __builtin_sve_svclastb_f16(__VA_ARGS__)
+#define svclastb_s32(...) __builtin_sve_svclastb_s32(__VA_ARGS__)
+#define svclastb_s64(...) __builtin_sve_svclastb_s64(__VA_ARGS__)
+#define svclastb_s16(...) __builtin_sve_svclastb_s16(__VA_ARGS__)
+#define svcls_s8_m(...) __builtin_sve_svcls_s8_m(__VA_ARGS__)
+#define svcls_s32_m(...) __builtin_sve_svcls_s32_m(__VA_ARGS__)
+#define svcls_s64_m(...) __builtin_sve_svcls_s64_m(__VA_ARGS__)
+#define svcls_s16_m(...) __builtin_sve_svcls_s16_m(__VA_ARGS__)
+#define svcls_s8_x(...) __builtin_sve_svcls_s8_x(__VA_ARGS__)
+#define svcls_s32_x(...) __builtin_sve_svcls_s32_x(__VA_ARGS__)
+#define svcls_s64_x(...) __builtin_sve_svcls_s64_x(__VA_ARGS__)
+#define svcls_s16_x(...) __builtin_sve_svcls_s16_x(__VA_ARGS__)
+#define svcls_s8_z(...) __builtin_sve_svcls_s8_z(__VA_ARGS__)
+#define svcls_s32_z(...) __builtin_sve_svcls_s32_z(__VA_ARGS__)
+#define svcls_s64_z(...) __builtin_sve_svcls_s64_z(__VA_ARGS__)
+#define svcls_s16_z(...) __builtin_sve_svcls_s16_z(__VA_ARGS__)
+#define svclz_u8_m(...) __builtin_sve_svclz_u8_m(__VA_ARGS__)
+#define svclz_u32_m(...) __builtin_sve_svclz_u32_m(__VA_ARGS__)
+#define svclz_u64_m(...) __builtin_sve_svclz_u64_m(__VA_ARGS__)
+#define svclz_u16_m(...) __builtin_sve_svclz_u16_m(__VA_ARGS__)
+#define svclz_s8_m(...) __builtin_sve_svclz_s8_m(__VA_ARGS__)
+#define svclz_s32_m(...) __builtin_sve_svclz_s32_m(__VA_ARGS__)
+#define svclz_s64_m(...) __builtin_sve_svclz_s64_m(__VA_ARGS__)
+#define svclz_s16_m(...) __builtin_sve_svclz_s16_m(__VA_ARGS__)
+#define svclz_u8_x(...) __builtin_sve_svclz_u8_x(__VA_ARGS__)
+#define svclz_u32_x(...) __builtin_sve_svclz_u32_x(__VA_ARGS__)
+#define svclz_u64_x(...) __builtin_sve_svclz_u64_x(__VA_ARGS__)
+#define svclz_u16_x(...) __builtin_sve_svclz_u16_x(__VA_ARGS__)
+#define svclz_s8_x(...) __builtin_sve_svclz_s8_x(__VA_ARGS__)
+#define svclz_s32_x(...) __builtin_sve_svclz_s32_x(__VA_ARGS__)
+#define svclz_s64_x(...) __builtin_sve_svclz_s64_x(__VA_ARGS__)
+#define svclz_s16_x(...) __builtin_sve_svclz_s16_x(__VA_ARGS__)
+#define svclz_u8_z(...) __builtin_sve_svclz_u8_z(__VA_ARGS__)
+#define svclz_u32_z(...) __builtin_sve_svclz_u32_z(__VA_ARGS__)
+#define svclz_u64_z(...) __builtin_sve_svclz_u64_z(__VA_ARGS__)
+#define svclz_u16_z(...) __builtin_sve_svclz_u16_z(__VA_ARGS__)
+#define svclz_s8_z(...) __builtin_sve_svclz_s8_z(__VA_ARGS__)
+#define svclz_s32_z(...) __builtin_sve_svclz_s32_z(__VA_ARGS__)
+#define svclz_s64_z(...) __builtin_sve_svclz_s64_z(__VA_ARGS__)
+#define svclz_s16_z(...) __builtin_sve_svclz_s16_z(__VA_ARGS__)
+#define svcmla_f64_m(...) __builtin_sve_svcmla_f64_m(__VA_ARGS__)
+#define svcmla_f32_m(...) __builtin_sve_svcmla_f32_m(__VA_ARGS__)
+#define svcmla_f16_m(...) __builtin_sve_svcmla_f16_m(__VA_ARGS__)
+#define svcmla_f64_x(...) __builtin_sve_svcmla_f64_x(__VA_ARGS__)
+#define svcmla_f32_x(...) __builtin_sve_svcmla_f32_x(__VA_ARGS__)
+#define svcmla_f16_x(...) __builtin_sve_svcmla_f16_x(__VA_ARGS__)
+#define svcmla_f64_z(...) __builtin_sve_svcmla_f64_z(__VA_ARGS__)
+#define svcmla_f32_z(...) __builtin_sve_svcmla_f32_z(__VA_ARGS__)
+#define svcmla_f16_z(...) __builtin_sve_svcmla_f16_z(__VA_ARGS__)
+#define svcmla_lane_f32(...) __builtin_sve_svcmla_lane_f32(__VA_ARGS__)
+#define svcmla_lane_f16(...) __builtin_sve_svcmla_lane_f16(__VA_ARGS__)
+#define svcmpeq_n_f64(...) __builtin_sve_svcmpeq_n_f64(__VA_ARGS__)
+#define svcmpeq_n_f32(...) __builtin_sve_svcmpeq_n_f32(__VA_ARGS__)
+#define svcmpeq_n_f16(...) __builtin_sve_svcmpeq_n_f16(__VA_ARGS__)
+#define svcmpeq_n_u8(...) __builtin_sve_svcmpeq_n_u8(__VA_ARGS__)
+#define svcmpeq_n_u32(...) __builtin_sve_svcmpeq_n_u32(__VA_ARGS__)
+#define svcmpeq_n_u64(...) __builtin_sve_svcmpeq_n_u64(__VA_ARGS__)
+#define svcmpeq_n_u16(...) __builtin_sve_svcmpeq_n_u16(__VA_ARGS__)
+#define svcmpeq_n_s8(...) __builtin_sve_svcmpeq_n_s8(__VA_ARGS__)
+#define svcmpeq_n_s32(...) __builtin_sve_svcmpeq_n_s32(__VA_ARGS__)
+#define svcmpeq_n_s64(...) __builtin_sve_svcmpeq_n_s64(__VA_ARGS__)
+#define svcmpeq_n_s16(...) __builtin_sve_svcmpeq_n_s16(__VA_ARGS__)
+#define svcmpeq_u8(...) __builtin_sve_svcmpeq_u8(__VA_ARGS__)
+#define svcmpeq_u32(...) __builtin_sve_svcmpeq_u32(__VA_ARGS__)
+#define svcmpeq_u64(...) __builtin_sve_svcmpeq_u64(__VA_ARGS__)
+#define svcmpeq_u16(...) __builtin_sve_svcmpeq_u16(__VA_ARGS__)
+#define svcmpeq_s8(...) __builtin_sve_svcmpeq_s8(__VA_ARGS__)
+#define svcmpeq_s32(...) __builtin_sve_svcmpeq_s32(__VA_ARGS__)
+#define svcmpeq_s64(...) __builtin_sve_svcmpeq_s64(__VA_ARGS__)
+#define svcmpeq_s16(...) __builtin_sve_svcmpeq_s16(__VA_ARGS__)
+#define svcmpeq_f64(...) __builtin_sve_svcmpeq_f64(__VA_ARGS__)
+#define svcmpeq_f32(...) __builtin_sve_svcmpeq_f32(__VA_ARGS__)
+#define svcmpeq_f16(...) __builtin_sve_svcmpeq_f16(__VA_ARGS__)
+#define svcmpeq_wide_n_s8(...) __builtin_sve_svcmpeq_wide_n_s8(__VA_ARGS__)
+#define svcmpeq_wide_n_s32(...) __builtin_sve_svcmpeq_wide_n_s32(__VA_ARGS__)
+#define svcmpeq_wide_n_s16(...) __builtin_sve_svcmpeq_wide_n_s16(__VA_ARGS__)
+#define svcmpeq_wide_s8(...) __builtin_sve_svcmpeq_wide_s8(__VA_ARGS__)
+#define svcmpeq_wide_s32(...) __builtin_sve_svcmpeq_wide_s32(__VA_ARGS__)
+#define svcmpeq_wide_s16(...) __builtin_sve_svcmpeq_wide_s16(__VA_ARGS__)
+#define svcmpge_n_f64(...) __builtin_sve_svcmpge_n_f64(__VA_ARGS__)
+#define svcmpge_n_f32(...) __builtin_sve_svcmpge_n_f32(__VA_ARGS__)
+#define svcmpge_n_f16(...) __builtin_sve_svcmpge_n_f16(__VA_ARGS__)
+#define svcmpge_n_s8(...) __builtin_sve_svcmpge_n_s8(__VA_ARGS__)
+#define svcmpge_n_s32(...) __builtin_sve_svcmpge_n_s32(__VA_ARGS__)
+#define svcmpge_n_s64(...) __builtin_sve_svcmpge_n_s64(__VA_ARGS__)
+#define svcmpge_n_s16(...) __builtin_sve_svcmpge_n_s16(__VA_ARGS__)
+#define svcmpge_n_u8(...) __builtin_sve_svcmpge_n_u8(__VA_ARGS__)
+#define svcmpge_n_u32(...) __builtin_sve_svcmpge_n_u32(__VA_ARGS__)
+#define svcmpge_n_u64(...) __builtin_sve_svcmpge_n_u64(__VA_ARGS__)
+#define svcmpge_n_u16(...) __builtin_sve_svcmpge_n_u16(__VA_ARGS__)
+#define svcmpge_s8(...) __builtin_sve_svcmpge_s8(__VA_ARGS__)
+#define svcmpge_s32(...) __builtin_sve_svcmpge_s32(__VA_ARGS__)
+#define svcmpge_s64(...) __builtin_sve_svcmpge_s64(__VA_ARGS__)
+#define svcmpge_s16(...) __builtin_sve_svcmpge_s16(__VA_ARGS__)
+#define svcmpge_f64(...) __builtin_sve_svcmpge_f64(__VA_ARGS__)
+#define svcmpge_f32(...) __builtin_sve_svcmpge_f32(__VA_ARGS__)
+#define svcmpge_f16(...) __builtin_sve_svcmpge_f16(__VA_ARGS__)
+#define svcmpge_u8(...) __builtin_sve_svcmpge_u8(__VA_ARGS__)
+#define svcmpge_u32(...) __builtin_sve_svcmpge_u32(__VA_ARGS__)
+#define svcmpge_u64(...) __builtin_sve_svcmpge_u64(__VA_ARGS__)
+#define svcmpge_u16(...) __builtin_sve_svcmpge_u16(__VA_ARGS__)
+#define svcmpge_wide_n_s8(...) __builtin_sve_svcmpge_wide_n_s8(__VA_ARGS__)
+#define svcmpge_wide_n_s32(...) __builtin_sve_svcmpge_wide_n_s32(__VA_ARGS__)
+#define svcmpge_wide_n_s16(...) __builtin_sve_svcmpge_wide_n_s16(__VA_ARGS__)
+#define svcmpge_wide_n_u8(...) __builtin_sve_svcmpge_wide_n_u8(__VA_ARGS__)
+#define svcmpge_wide_n_u32(...) __builtin_sve_svcmpge_wide_n_u32(__VA_ARGS__)
+#define svcmpge_wide_n_u16(...) __builtin_sve_svcmpge_wide_n_u16(__VA_ARGS__)
+#define svcmpge_wide_s8(...) __builtin_sve_svcmpge_wide_s8(__VA_ARGS__)
+#define svcmpge_wide_s32(...) __builtin_sve_svcmpge_wide_s32(__VA_ARGS__)
+#define svcmpge_wide_s16(...) __builtin_sve_svcmpge_wide_s16(__VA_ARGS__)
+#define svcmpge_wide_u8(...) __builtin_sve_svcmpge_wide_u8(__VA_ARGS__)
+#define svcmpge_wide_u32(...) __builtin_sve_svcmpge_wide_u32(__VA_ARGS__)
+#define svcmpge_wide_u16(...) __builtin_sve_svcmpge_wide_u16(__VA_ARGS__)
+#define svcmpgt_n_f64(...) __builtin_sve_svcmpgt_n_f64(__VA_ARGS__)
+#define svcmpgt_n_f32(...) __builtin_sve_svcmpgt_n_f32(__VA_ARGS__)
+#define svcmpgt_n_f16(...) __builtin_sve_svcmpgt_n_f16(__VA_ARGS__)
+#define svcmpgt_n_s8(...) __builtin_sve_svcmpgt_n_s8(__VA_ARGS__)
+#define svcmpgt_n_s32(...) __builtin_sve_svcmpgt_n_s32(__VA_ARGS__)
+#define svcmpgt_n_s64(...) __builtin_sve_svcmpgt_n_s64(__VA_ARGS__)
+#define svcmpgt_n_s16(...) __builtin_sve_svcmpgt_n_s16(__VA_ARGS__)
+#define svcmpgt_n_u8(...) __builtin_sve_svcmpgt_n_u8(__VA_ARGS__)
+#define svcmpgt_n_u32(...) __builtin_sve_svcmpgt_n_u32(__VA_ARGS__)
+#define svcmpgt_n_u64(...) __builtin_sve_svcmpgt_n_u64(__VA_ARGS__)
+#define svcmpgt_n_u16(...) __builtin_sve_svcmpgt_n_u16(__VA_ARGS__)
+#define svcmpgt_s8(...) __builtin_sve_svcmpgt_s8(__VA_ARGS__)
+#define svcmpgt_s32(...) __builtin_sve_svcmpgt_s32(__VA_ARGS__)
+#define svcmpgt_s64(...) __builtin_sve_svcmpgt_s64(__VA_ARGS__)
+#define svcmpgt_s16(...) __builtin_sve_svcmpgt_s16(__VA_ARGS__)
+#define svcmpgt_f64(...) __builtin_sve_svcmpgt_f64(__VA_ARGS__)
+#define svcmpgt_f32(...) __builtin_sve_svcmpgt_f32(__VA_ARGS__)
+#define svcmpgt_f16(...) __builtin_sve_svcmpgt_f16(__VA_ARGS__)
+#define svcmpgt_u8(...) __builtin_sve_svcmpgt_u8(__VA_ARGS__)
+#define svcmpgt_u32(...) __builtin_sve_svcmpgt_u32(__VA_ARGS__)
+#define svcmpgt_u64(...) __builtin_sve_svcmpgt_u64(__VA_ARGS__)
+#define svcmpgt_u16(...) __builtin_sve_svcmpgt_u16(__VA_ARGS__)
+#define svcmpgt_wide_n_s8(...) __builtin_sve_svcmpgt_wide_n_s8(__VA_ARGS__)
+#define svcmpgt_wide_n_s32(...) __builtin_sve_svcmpgt_wide_n_s32(__VA_ARGS__)
+#define svcmpgt_wide_n_s16(...) __builtin_sve_svcmpgt_wide_n_s16(__VA_ARGS__)
+#define svcmpgt_wide_n_u8(...) __builtin_sve_svcmpgt_wide_n_u8(__VA_ARGS__)
+#define svcmpgt_wide_n_u32(...) __builtin_sve_svcmpgt_wide_n_u32(__VA_ARGS__)
+#define svcmpgt_wide_n_u16(...) __builtin_sve_svcmpgt_wide_n_u16(__VA_ARGS__)
+#define svcmpgt_wide_s8(...) __builtin_sve_svcmpgt_wide_s8(__VA_ARGS__)
+#define svcmpgt_wide_s32(...) __builtin_sve_svcmpgt_wide_s32(__VA_ARGS__)
+#define svcmpgt_wide_s16(...) __builtin_sve_svcmpgt_wide_s16(__VA_ARGS__)
+#define svcmpgt_wide_u8(...) __builtin_sve_svcmpgt_wide_u8(__VA_ARGS__)
+#define svcmpgt_wide_u32(...) __builtin_sve_svcmpgt_wide_u32(__VA_ARGS__)
+#define svcmpgt_wide_u16(...) __builtin_sve_svcmpgt_wide_u16(__VA_ARGS__)
+#define svcmple_n_f64(...) __builtin_sve_svcmple_n_f64(__VA_ARGS__)
+#define svcmple_n_f32(...) __builtin_sve_svcmple_n_f32(__VA_ARGS__)
+#define svcmple_n_f16(...) __builtin_sve_svcmple_n_f16(__VA_ARGS__)
+#define svcmple_n_s8(...) __builtin_sve_svcmple_n_s8(__VA_ARGS__)
+#define svcmple_n_s32(...) __builtin_sve_svcmple_n_s32(__VA_ARGS__)
+#define svcmple_n_s64(...) __builtin_sve_svcmple_n_s64(__VA_ARGS__)
+#define svcmple_n_s16(...) __builtin_sve_svcmple_n_s16(__VA_ARGS__)
+#define svcmple_n_u8(...) __builtin_sve_svcmple_n_u8(__VA_ARGS__)
+#define svcmple_n_u32(...) __builtin_sve_svcmple_n_u32(__VA_ARGS__)
+#define svcmple_n_u64(...) __builtin_sve_svcmple_n_u64(__VA_ARGS__)
+#define svcmple_n_u16(...) __builtin_sve_svcmple_n_u16(__VA_ARGS__)
+#define svcmple_s8(...) __builtin_sve_svcmple_s8(__VA_ARGS__)
+#define svcmple_s32(...) __builtin_sve_svcmple_s32(__VA_ARGS__)
+#define svcmple_s64(...) __builtin_sve_svcmple_s64(__VA_ARGS__)
+#define svcmple_s16(...) __builtin_sve_svcmple_s16(__VA_ARGS__)
+#define svcmple_f64(...) __builtin_sve_svcmple_f64(__VA_ARGS__)
+#define svcmple_f32(...) __builtin_sve_svcmple_f32(__VA_ARGS__)
+#define svcmple_f16(...) __builtin_sve_svcmple_f16(__VA_ARGS__)
+#define svcmple_u8(...) __builtin_sve_svcmple_u8(__VA_ARGS__)
+#define svcmple_u32(...) __builtin_sve_svcmple_u32(__VA_ARGS__)
+#define svcmple_u64(...) __builtin_sve_svcmple_u64(__VA_ARGS__)
+#define svcmple_u16(...) __builtin_sve_svcmple_u16(__VA_ARGS__)
+#define svcmple_wide_n_s8(...) __builtin_sve_svcmple_wide_n_s8(__VA_ARGS__)
+#define svcmple_wide_n_s32(...) __builtin_sve_svcmple_wide_n_s32(__VA_ARGS__)
+#define svcmple_wide_n_s16(...) __builtin_sve_svcmple_wide_n_s16(__VA_ARGS__)
+#define svcmple_wide_n_u8(...) __builtin_sve_svcmple_wide_n_u8(__VA_ARGS__)
+#define svcmple_wide_n_u32(...) __builtin_sve_svcmple_wide_n_u32(__VA_ARGS__)
+#define svcmple_wide_n_u16(...) __builtin_sve_svcmple_wide_n_u16(__VA_ARGS__)
+#define svcmple_wide_s8(...) __builtin_sve_svcmple_wide_s8(__VA_ARGS__)
+#define svcmple_wide_s32(...) __builtin_sve_svcmple_wide_s32(__VA_ARGS__)
+#define svcmple_wide_s16(...) __builtin_sve_svcmple_wide_s16(__VA_ARGS__)
+#define svcmple_wide_u8(...) __builtin_sve_svcmple_wide_u8(__VA_ARGS__)
+#define svcmple_wide_u32(...) __builtin_sve_svcmple_wide_u32(__VA_ARGS__)
+#define svcmple_wide_u16(...) __builtin_sve_svcmple_wide_u16(__VA_ARGS__)
+#define svcmplt_n_u8(...) __builtin_sve_svcmplt_n_u8(__VA_ARGS__)
+#define svcmplt_n_u32(...) __builtin_sve_svcmplt_n_u32(__VA_ARGS__)
+#define svcmplt_n_u64(...) __builtin_sve_svcmplt_n_u64(__VA_ARGS__)
+#define svcmplt_n_u16(...) __builtin_sve_svcmplt_n_u16(__VA_ARGS__)
+#define svcmplt_n_f64(...) __builtin_sve_svcmplt_n_f64(__VA_ARGS__)
+#define svcmplt_n_f32(...) __builtin_sve_svcmplt_n_f32(__VA_ARGS__)
+#define svcmplt_n_f16(...) __builtin_sve_svcmplt_n_f16(__VA_ARGS__)
+#define svcmplt_n_s8(...) __builtin_sve_svcmplt_n_s8(__VA_ARGS__)
+#define svcmplt_n_s32(...) __builtin_sve_svcmplt_n_s32(__VA_ARGS__)
+#define svcmplt_n_s64(...) __builtin_sve_svcmplt_n_s64(__VA_ARGS__)
+#define svcmplt_n_s16(...) __builtin_sve_svcmplt_n_s16(__VA_ARGS__)
+#define svcmplt_u8(...) __builtin_sve_svcmplt_u8(__VA_ARGS__)
+#define svcmplt_u32(...) __builtin_sve_svcmplt_u32(__VA_ARGS__)
+#define svcmplt_u64(...) __builtin_sve_svcmplt_u64(__VA_ARGS__)
+#define svcmplt_u16(...) __builtin_sve_svcmplt_u16(__VA_ARGS__)
+#define svcmplt_s8(...) __builtin_sve_svcmplt_s8(__VA_ARGS__)
+#define svcmplt_s32(...) __builtin_sve_svcmplt_s32(__VA_ARGS__)
+#define svcmplt_s64(...) __builtin_sve_svcmplt_s64(__VA_ARGS__)
+#define svcmplt_s16(...) __builtin_sve_svcmplt_s16(__VA_ARGS__)
+#define svcmplt_f64(...) __builtin_sve_svcmplt_f64(__VA_ARGS__)
+#define svcmplt_f32(...) __builtin_sve_svcmplt_f32(__VA_ARGS__)
+#define svcmplt_f16(...) __builtin_sve_svcmplt_f16(__VA_ARGS__)
+#define svcmplt_wide_n_u8(...) __builtin_sve_svcmplt_wide_n_u8(__VA_ARGS__)
+#define svcmplt_wide_n_u32(...) __builtin_sve_svcmplt_wide_n_u32(__VA_ARGS__)
+#define svcmplt_wide_n_u16(...) __builtin_sve_svcmplt_wide_n_u16(__VA_ARGS__)
+#define svcmplt_wide_n_s8(...) __builtin_sve_svcmplt_wide_n_s8(__VA_ARGS__)
+#define svcmplt_wide_n_s32(...) __builtin_sve_svcmplt_wide_n_s32(__VA_ARGS__)
+#define svcmplt_wide_n_s16(...) __builtin_sve_svcmplt_wide_n_s16(__VA_ARGS__)
+#define svcmplt_wide_u8(...) __builtin_sve_svcmplt_wide_u8(__VA_ARGS__)
+#define svcmplt_wide_u32(...) __builtin_sve_svcmplt_wide_u32(__VA_ARGS__)
+#define svcmplt_wide_u16(...) __builtin_sve_svcmplt_wide_u16(__VA_ARGS__)
+#define svcmplt_wide_s8(...) __builtin_sve_svcmplt_wide_s8(__VA_ARGS__)
+#define svcmplt_wide_s32(...) __builtin_sve_svcmplt_wide_s32(__VA_ARGS__)
+#define svcmplt_wide_s16(...) __builtin_sve_svcmplt_wide_s16(__VA_ARGS__)
+#define svcmpne_n_f64(...) __builtin_sve_svcmpne_n_f64(__VA_ARGS__)
+#define svcmpne_n_f32(...) __builtin_sve_svcmpne_n_f32(__VA_ARGS__)
+#define svcmpne_n_f16(...) __builtin_sve_svcmpne_n_f16(__VA_ARGS__)
+#define svcmpne_n_u8(...) __builtin_sve_svcmpne_n_u8(__VA_ARGS__)
+#define svcmpne_n_u32(...) __builtin_sve_svcmpne_n_u32(__VA_ARGS__)
+#define svcmpne_n_u64(...) __builtin_sve_svcmpne_n_u64(__VA_ARGS__)
+#define svcmpne_n_u16(...) __builtin_sve_svcmpne_n_u16(__VA_ARGS__)
+#define svcmpne_n_s8(...) __builtin_sve_svcmpne_n_s8(__VA_ARGS__)
+#define svcmpne_n_s32(...) __builtin_sve_svcmpne_n_s32(__VA_ARGS__)
+#define svcmpne_n_s64(...) __builtin_sve_svcmpne_n_s64(__VA_ARGS__)
+#define svcmpne_n_s16(...) __builtin_sve_svcmpne_n_s16(__VA_ARGS__)
+#define svcmpne_u8(...) __builtin_sve_svcmpne_u8(__VA_ARGS__)
+#define svcmpne_u32(...) __builtin_sve_svcmpne_u32(__VA_ARGS__)
+#define svcmpne_u64(...) __builtin_sve_svcmpne_u64(__VA_ARGS__)
+#define svcmpne_u16(...) __builtin_sve_svcmpne_u16(__VA_ARGS__)
+#define svcmpne_s8(...) __builtin_sve_svcmpne_s8(__VA_ARGS__)
+#define svcmpne_s32(...) __builtin_sve_svcmpne_s32(__VA_ARGS__)
+#define svcmpne_s64(...) __builtin_sve_svcmpne_s64(__VA_ARGS__)
+#define svcmpne_s16(...) __builtin_sve_svcmpne_s16(__VA_ARGS__)
+#define svcmpne_f64(...) __builtin_sve_svcmpne_f64(__VA_ARGS__)
+#define svcmpne_f32(...) __builtin_sve_svcmpne_f32(__VA_ARGS__)
+#define svcmpne_f16(...) __builtin_sve_svcmpne_f16(__VA_ARGS__)
+#define svcmpne_wide_n_s8(...) __builtin_sve_svcmpne_wide_n_s8(__VA_ARGS__)
+#define svcmpne_wide_n_s32(...) __builtin_sve_svcmpne_wide_n_s32(__VA_ARGS__)
+#define svcmpne_wide_n_s16(...) __builtin_sve_svcmpne_wide_n_s16(__VA_ARGS__)
+#define svcmpne_wide_s8(...) __builtin_sve_svcmpne_wide_s8(__VA_ARGS__)
+#define svcmpne_wide_s32(...) __builtin_sve_svcmpne_wide_s32(__VA_ARGS__)
+#define svcmpne_wide_s16(...) __builtin_sve_svcmpne_wide_s16(__VA_ARGS__)
+#define svcmpuo_n_f64(...) __builtin_sve_svcmpuo_n_f64(__VA_ARGS__)
+#define svcmpuo_n_f32(...) __builtin_sve_svcmpuo_n_f32(__VA_ARGS__)
+#define svcmpuo_n_f16(...) __builtin_sve_svcmpuo_n_f16(__VA_ARGS__)
+#define svcmpuo_f64(...) __builtin_sve_svcmpuo_f64(__VA_ARGS__)
+#define svcmpuo_f32(...) __builtin_sve_svcmpuo_f32(__VA_ARGS__)
+#define svcmpuo_f16(...) __builtin_sve_svcmpuo_f16(__VA_ARGS__)
+#define svcnot_u8_m(...) __builtin_sve_svcnot_u8_m(__VA_ARGS__)
+#define svcnot_u32_m(...) __builtin_sve_svcnot_u32_m(__VA_ARGS__)
+#define svcnot_u64_m(...) __builtin_sve_svcnot_u64_m(__VA_ARGS__)
+#define svcnot_u16_m(...) __builtin_sve_svcnot_u16_m(__VA_ARGS__)
+#define svcnot_s8_m(...) __builtin_sve_svcnot_s8_m(__VA_ARGS__)
+#define svcnot_s32_m(...) __builtin_sve_svcnot_s32_m(__VA_ARGS__)
+#define svcnot_s64_m(...) __builtin_sve_svcnot_s64_m(__VA_ARGS__)
+#define svcnot_s16_m(...) __builtin_sve_svcnot_s16_m(__VA_ARGS__)
+#define svcnot_u8_x(...) __builtin_sve_svcnot_u8_x(__VA_ARGS__)
+#define svcnot_u32_x(...) __builtin_sve_svcnot_u32_x(__VA_ARGS__)
+#define svcnot_u64_x(...) __builtin_sve_svcnot_u64_x(__VA_ARGS__)
+#define svcnot_u16_x(...) __builtin_sve_svcnot_u16_x(__VA_ARGS__)
+#define svcnot_s8_x(...) __builtin_sve_svcnot_s8_x(__VA_ARGS__)
+#define svcnot_s32_x(...) __builtin_sve_svcnot_s32_x(__VA_ARGS__)
+#define svcnot_s64_x(...) __builtin_sve_svcnot_s64_x(__VA_ARGS__)
+#define svcnot_s16_x(...) __builtin_sve_svcnot_s16_x(__VA_ARGS__)
+#define svcnot_u8_z(...) __builtin_sve_svcnot_u8_z(__VA_ARGS__)
+#define svcnot_u32_z(...) __builtin_sve_svcnot_u32_z(__VA_ARGS__)
+#define svcnot_u64_z(...) __builtin_sve_svcnot_u64_z(__VA_ARGS__)
+#define svcnot_u16_z(...) __builtin_sve_svcnot_u16_z(__VA_ARGS__)
+#define svcnot_s8_z(...) __builtin_sve_svcnot_s8_z(__VA_ARGS__)
+#define svcnot_s32_z(...) __builtin_sve_svcnot_s32_z(__VA_ARGS__)
+#define svcnot_s64_z(...) __builtin_sve_svcnot_s64_z(__VA_ARGS__)
+#define svcnot_s16_z(...) __builtin_sve_svcnot_s16_z(__VA_ARGS__)
+#define svcnt_u8_m(...) __builtin_sve_svcnt_u8_m(__VA_ARGS__)
+#define svcnt_u32_m(...) __builtin_sve_svcnt_u32_m(__VA_ARGS__)
+#define svcnt_u64_m(...) __builtin_sve_svcnt_u64_m(__VA_ARGS__)
+#define svcnt_u16_m(...) __builtin_sve_svcnt_u16_m(__VA_ARGS__)
+#define svcnt_s8_m(...) __builtin_sve_svcnt_s8_m(__VA_ARGS__)
+#define svcnt_f64_m(...) __builtin_sve_svcnt_f64_m(__VA_ARGS__)
+#define svcnt_f32_m(...) __builtin_sve_svcnt_f32_m(__VA_ARGS__)
+#define svcnt_f16_m(...) __builtin_sve_svcnt_f16_m(__VA_ARGS__)
+#define svcnt_s32_m(...) __builtin_sve_svcnt_s32_m(__VA_ARGS__)
+#define svcnt_s64_m(...) __builtin_sve_svcnt_s64_m(__VA_ARGS__)
+#define svcnt_s16_m(...) __builtin_sve_svcnt_s16_m(__VA_ARGS__)
+#define svcnt_u8_x(...) __builtin_sve_svcnt_u8_x(__VA_ARGS__)
+#define svcnt_u32_x(...) __builtin_sve_svcnt_u32_x(__VA_ARGS__)
+#define svcnt_u64_x(...) __builtin_sve_svcnt_u64_x(__VA_ARGS__)
+#define svcnt_u16_x(...) __builtin_sve_svcnt_u16_x(__VA_ARGS__)
+#define svcnt_s8_x(...) __builtin_sve_svcnt_s8_x(__VA_ARGS__)
+#define svcnt_f64_x(...) __builtin_sve_svcnt_f64_x(__VA_ARGS__)
+#define svcnt_f32_x(...) __builtin_sve_svcnt_f32_x(__VA_ARGS__)
+#define svcnt_f16_x(...) __builtin_sve_svcnt_f16_x(__VA_ARGS__)
+#define svcnt_s32_x(...) __builtin_sve_svcnt_s32_x(__VA_ARGS__)
+#define svcnt_s64_x(...) __builtin_sve_svcnt_s64_x(__VA_ARGS__)
+#define svcnt_s16_x(...) __builtin_sve_svcnt_s16_x(__VA_ARGS__)
+#define svcnt_u8_z(...) __builtin_sve_svcnt_u8_z(__VA_ARGS__)
+#define svcnt_u32_z(...) __builtin_sve_svcnt_u32_z(__VA_ARGS__)
+#define svcnt_u64_z(...) __builtin_sve_svcnt_u64_z(__VA_ARGS__)
+#define svcnt_u16_z(...) __builtin_sve_svcnt_u16_z(__VA_ARGS__)
+#define svcnt_s8_z(...) __builtin_sve_svcnt_s8_z(__VA_ARGS__)
+#define svcnt_f64_z(...) __builtin_sve_svcnt_f64_z(__VA_ARGS__)
+#define svcnt_f32_z(...) __builtin_sve_svcnt_f32_z(__VA_ARGS__)
+#define svcnt_f16_z(...) __builtin_sve_svcnt_f16_z(__VA_ARGS__)
+#define svcnt_s32_z(...) __builtin_sve_svcnt_s32_z(__VA_ARGS__)
+#define svcnt_s64_z(...) __builtin_sve_svcnt_s64_z(__VA_ARGS__)
+#define svcnt_s16_z(...) __builtin_sve_svcnt_s16_z(__VA_ARGS__)
+#define svcntb(...) __builtin_sve_svcntb(__VA_ARGS__)
+#define svcntb_pat(...) __builtin_sve_svcntb_pat(__VA_ARGS__)
+#define svcntd(...) __builtin_sve_svcntd(__VA_ARGS__)
+#define svcntd_pat(...) __builtin_sve_svcntd_pat(__VA_ARGS__)
+#define svcnth(...) __builtin_sve_svcnth(__VA_ARGS__)
+#define svcnth_pat(...) __builtin_sve_svcnth_pat(__VA_ARGS__)
+#define svcntp_b8(...) __builtin_sve_svcntp_b8(__VA_ARGS__)
+#define svcntp_b32(...) __builtin_sve_svcntp_b32(__VA_ARGS__)
+#define svcntp_b64(...) __builtin_sve_svcntp_b64(__VA_ARGS__)
+#define svcntp_b16(...) __builtin_sve_svcntp_b16(__VA_ARGS__)
+#define svcntw(...) __builtin_sve_svcntw(__VA_ARGS__)
+#define svcntw_pat(...) __builtin_sve_svcntw_pat(__VA_ARGS__)
+#define svcompact_u32(...) __builtin_sve_svcompact_u32(__VA_ARGS__)
+#define svcompact_u64(...) __builtin_sve_svcompact_u64(__VA_ARGS__)
+#define svcompact_f64(...) __builtin_sve_svcompact_f64(__VA_ARGS__)
+#define svcompact_f32(...) __builtin_sve_svcompact_f32(__VA_ARGS__)
+#define svcompact_s32(...) __builtin_sve_svcompact_s32(__VA_ARGS__)
+#define svcompact_s64(...) __builtin_sve_svcompact_s64(__VA_ARGS__)
+#define svcreate2_u8(...) __builtin_sve_svcreate2_u8(__VA_ARGS__)
+#define svcreate2_u32(...) __builtin_sve_svcreate2_u32(__VA_ARGS__)
+#define svcreate2_u64(...) __builtin_sve_svcreate2_u64(__VA_ARGS__)
+#define svcreate2_u16(...) __builtin_sve_svcreate2_u16(__VA_ARGS__)
+#define svcreate2_s8(...) __builtin_sve_svcreate2_s8(__VA_ARGS__)
+#define svcreate2_f64(...) __builtin_sve_svcreate2_f64(__VA_ARGS__)
+#define svcreate2_f32(...) __builtin_sve_svcreate2_f32(__VA_ARGS__)
+#define svcreate2_f16(...) __builtin_sve_svcreate2_f16(__VA_ARGS__)
+#define svcreate2_s32(...) __builtin_sve_svcreate2_s32(__VA_ARGS__)
+#define svcreate2_s64(...) __builtin_sve_svcreate2_s64(__VA_ARGS__)
+#define svcreate2_s16(...) __builtin_sve_svcreate2_s16(__VA_ARGS__)
+#define svcreate3_u8(...) __builtin_sve_svcreate3_u8(__VA_ARGS__)
+#define svcreate3_u32(...) __builtin_sve_svcreate3_u32(__VA_ARGS__)
+#define svcreate3_u64(...) __builtin_sve_svcreate3_u64(__VA_ARGS__)
+#define svcreate3_u16(...) __builtin_sve_svcreate3_u16(__VA_ARGS__)
+#define svcreate3_s8(...) __builtin_sve_svcreate3_s8(__VA_ARGS__)
+#define svcreate3_f64(...) __builtin_sve_svcreate3_f64(__VA_ARGS__)
+#define svcreate3_f32(...) __builtin_sve_svcreate3_f32(__VA_ARGS__)
+#define svcreate3_f16(...) __builtin_sve_svcreate3_f16(__VA_ARGS__)
+#define svcreate3_s32(...) __builtin_sve_svcreate3_s32(__VA_ARGS__)
+#define svcreate3_s64(...) __builtin_sve_svcreate3_s64(__VA_ARGS__)
+#define svcreate3_s16(...) __builtin_sve_svcreate3_s16(__VA_ARGS__)
+#define svcreate4_u8(...) __builtin_sve_svcreate4_u8(__VA_ARGS__)
+#define svcreate4_u32(...) __builtin_sve_svcreate4_u32(__VA_ARGS__)
+#define svcreate4_u64(...) __builtin_sve_svcreate4_u64(__VA_ARGS__)
+#define svcreate4_u16(...) __builtin_sve_svcreate4_u16(__VA_ARGS__)
+#define svcreate4_s8(...) __builtin_sve_svcreate4_s8(__VA_ARGS__)
+#define svcreate4_f64(...) __builtin_sve_svcreate4_f64(__VA_ARGS__)
+#define svcreate4_f32(...) __builtin_sve_svcreate4_f32(__VA_ARGS__)
+#define svcreate4_f16(...) __builtin_sve_svcreate4_f16(__VA_ARGS__)
+#define svcreate4_s32(...) __builtin_sve_svcreate4_s32(__VA_ARGS__)
+#define svcreate4_s64(...) __builtin_sve_svcreate4_s64(__VA_ARGS__)
+#define svcreate4_s16(...) __builtin_sve_svcreate4_s16(__VA_ARGS__)
+#define svcvt_f16_f32_m(...) __builtin_sve_svcvt_f16_f32_m(__VA_ARGS__)
+#define svcvt_f16_f32_x(...) __builtin_sve_svcvt_f16_f32_x(__VA_ARGS__)
+#define svcvt_f16_f32_z(...) __builtin_sve_svcvt_f16_f32_z(__VA_ARGS__)
+#define svcvt_f16_f64_m(...) __builtin_sve_svcvt_f16_f64_m(__VA_ARGS__)
+#define svcvt_f16_f64_x(...) __builtin_sve_svcvt_f16_f64_x(__VA_ARGS__)
+#define svcvt_f16_f64_z(...) __builtin_sve_svcvt_f16_f64_z(__VA_ARGS__)
+#define svcvt_f16_s16_m(...) __builtin_sve_svcvt_f16_s16_m(__VA_ARGS__)
+#define svcvt_f16_s16_x(...) __builtin_sve_svcvt_f16_s16_x(__VA_ARGS__)
+#define svcvt_f16_s16_z(...) __builtin_sve_svcvt_f16_s16_z(__VA_ARGS__)
+#define svcvt_f16_s32_m(...) __builtin_sve_svcvt_f16_s32_m(__VA_ARGS__)
+#define svcvt_f16_s32_x(...) __builtin_sve_svcvt_f16_s32_x(__VA_ARGS__)
+#define svcvt_f16_s32_z(...) __builtin_sve_svcvt_f16_s32_z(__VA_ARGS__)
+#define svcvt_f16_s64_m(...) __builtin_sve_svcvt_f16_s64_m(__VA_ARGS__)
+#define svcvt_f16_s64_x(...) __builtin_sve_svcvt_f16_s64_x(__VA_ARGS__)
+#define svcvt_f16_s64_z(...) __builtin_sve_svcvt_f16_s64_z(__VA_ARGS__)
+#define svcvt_f16_u16_m(...) __builtin_sve_svcvt_f16_u16_m(__VA_ARGS__)
+#define svcvt_f16_u16_x(...) __builtin_sve_svcvt_f16_u16_x(__VA_ARGS__)
+#define svcvt_f16_u16_z(...) __builtin_sve_svcvt_f16_u16_z(__VA_ARGS__)
+#define svcvt_f16_u32_m(...) __builtin_sve_svcvt_f16_u32_m(__VA_ARGS__)
+#define svcvt_f16_u32_x(...) __builtin_sve_svcvt_f16_u32_x(__VA_ARGS__)
+#define svcvt_f16_u32_z(...) __builtin_sve_svcvt_f16_u32_z(__VA_ARGS__)
+#define svcvt_f16_u64_m(...) __builtin_sve_svcvt_f16_u64_m(__VA_ARGS__)
+#define svcvt_f16_u64_x(...) __builtin_sve_svcvt_f16_u64_x(__VA_ARGS__)
+#define svcvt_f16_u64_z(...) __builtin_sve_svcvt_f16_u64_z(__VA_ARGS__)
+#define svcvt_f32_f16_m(...) __builtin_sve_svcvt_f32_f16_m(__VA_ARGS__)
+#define svcvt_f32_f16_x(...) __builtin_sve_svcvt_f32_f16_x(__VA_ARGS__)
+#define svcvt_f32_f16_z(...) __builtin_sve_svcvt_f32_f16_z(__VA_ARGS__)
+#define svcvt_f32_f64_m(...) __builtin_sve_svcvt_f32_f64_m(__VA_ARGS__)
+#define svcvt_f32_f64_x(...) __builtin_sve_svcvt_f32_f64_x(__VA_ARGS__)
+#define svcvt_f32_f64_z(...) __builtin_sve_svcvt_f32_f64_z(__VA_ARGS__)
+#define svcvt_f32_s32_m(...) __builtin_sve_svcvt_f32_s32_m(__VA_ARGS__)
+#define svcvt_f32_s32_x(...) __builtin_sve_svcvt_f32_s32_x(__VA_ARGS__)
+#define svcvt_f32_s32_z(...) __builtin_sve_svcvt_f32_s32_z(__VA_ARGS__)
+#define svcvt_f32_s64_m(...) __builtin_sve_svcvt_f32_s64_m(__VA_ARGS__)
+#define svcvt_f32_s64_x(...) __builtin_sve_svcvt_f32_s64_x(__VA_ARGS__)
+#define svcvt_f32_s64_z(...) __builtin_sve_svcvt_f32_s64_z(__VA_ARGS__)
+#define svcvt_f32_u32_m(...) __builtin_sve_svcvt_f32_u32_m(__VA_ARGS__)
+#define svcvt_f32_u32_x(...) __builtin_sve_svcvt_f32_u32_x(__VA_ARGS__)
+#define svcvt_f32_u32_z(...) __builtin_sve_svcvt_f32_u32_z(__VA_ARGS__)
+#define svcvt_f32_u64_m(...) __builtin_sve_svcvt_f32_u64_m(__VA_ARGS__)
+#define svcvt_f32_u64_x(...) __builtin_sve_svcvt_f32_u64_x(__VA_ARGS__)
+#define svcvt_f32_u64_z(...) __builtin_sve_svcvt_f32_u64_z(__VA_ARGS__)
+#define svcvt_f64_f16_m(...) __builtin_sve_svcvt_f64_f16_m(__VA_ARGS__)
+#define svcvt_f64_f16_x(...) __builtin_sve_svcvt_f64_f16_x(__VA_ARGS__)
+#define svcvt_f64_f16_z(...) __builtin_sve_svcvt_f64_f16_z(__VA_ARGS__)
+#define svcvt_f64_f32_m(...) __builtin_sve_svcvt_f64_f32_m(__VA_ARGS__)
+#define svcvt_f64_f32_x(...) __builtin_sve_svcvt_f64_f32_x(__VA_ARGS__)
+#define svcvt_f64_f32_z(...) __builtin_sve_svcvt_f64_f32_z(__VA_ARGS__)
+#define svcvt_f64_s32_m(...) __builtin_sve_svcvt_f64_s32_m(__VA_ARGS__)
+#define svcvt_f64_s32_x(...) __builtin_sve_svcvt_f64_s32_x(__VA_ARGS__)
+#define svcvt_f64_s32_z(...) __builtin_sve_svcvt_f64_s32_z(__VA_ARGS__)
+#define svcvt_f64_s64_m(...) __builtin_sve_svcvt_f64_s64_m(__VA_ARGS__)
+#define svcvt_f64_s64_x(...) __builtin_sve_svcvt_f64_s64_x(__VA_ARGS__)
+#define svcvt_f64_s64_z(...) __builtin_sve_svcvt_f64_s64_z(__VA_ARGS__)
+#define svcvt_f64_u32_m(...) __builtin_sve_svcvt_f64_u32_m(__VA_ARGS__)
+#define svcvt_f64_u32_x(...) __builtin_sve_svcvt_f64_u32_x(__VA_ARGS__)
+#define svcvt_f64_u32_z(...) __builtin_sve_svcvt_f64_u32_z(__VA_ARGS__)
+#define svcvt_f64_u64_m(...) __builtin_sve_svcvt_f64_u64_m(__VA_ARGS__)
+#define svcvt_f64_u64_x(...) __builtin_sve_svcvt_f64_u64_x(__VA_ARGS__)
+#define svcvt_f64_u64_z(...) __builtin_sve_svcvt_f64_u64_z(__VA_ARGS__)
+#define svcvt_s16_f16_m(...) __builtin_sve_svcvt_s16_f16_m(__VA_ARGS__)
+#define svcvt_s16_f16_x(...) __builtin_sve_svcvt_s16_f16_x(__VA_ARGS__)
+#define svcvt_s16_f16_z(...) __builtin_sve_svcvt_s16_f16_z(__VA_ARGS__)
+#define svcvt_s32_f16_m(...) __builtin_sve_svcvt_s32_f16_m(__VA_ARGS__)
+#define svcvt_s32_f16_x(...) __builtin_sve_svcvt_s32_f16_x(__VA_ARGS__)
+#define svcvt_s32_f16_z(...) __builtin_sve_svcvt_s32_f16_z(__VA_ARGS__)
+#define svcvt_s32_f32_m(...) __builtin_sve_svcvt_s32_f32_m(__VA_ARGS__)
+#define svcvt_s32_f32_x(...) __builtin_sve_svcvt_s32_f32_x(__VA_ARGS__)
+#define svcvt_s32_f32_z(...) __builtin_sve_svcvt_s32_f32_z(__VA_ARGS__)
+#define svcvt_s32_f64_m(...) __builtin_sve_svcvt_s32_f64_m(__VA_ARGS__)
+#define svcvt_s32_f64_x(...) __builtin_sve_svcvt_s32_f64_x(__VA_ARGS__)
+#define svcvt_s32_f64_z(...) __builtin_sve_svcvt_s32_f64_z(__VA_ARGS__)
+#define svcvt_s64_f16_m(...) __builtin_sve_svcvt_s64_f16_m(__VA_ARGS__)
+#define svcvt_s64_f16_x(...) __builtin_sve_svcvt_s64_f16_x(__VA_ARGS__)
+#define svcvt_s64_f16_z(...) __builtin_sve_svcvt_s64_f16_z(__VA_ARGS__)
+#define svcvt_s64_f32_m(...) __builtin_sve_svcvt_s64_f32_m(__VA_ARGS__)
+#define svcvt_s64_f32_x(...) __builtin_sve_svcvt_s64_f32_x(__VA_ARGS__)
+#define svcvt_s64_f32_z(...) __builtin_sve_svcvt_s64_f32_z(__VA_ARGS__)
+#define svcvt_s64_f64_m(...) __builtin_sve_svcvt_s64_f64_m(__VA_ARGS__)
+#define svcvt_s64_f64_x(...) __builtin_sve_svcvt_s64_f64_x(__VA_ARGS__)
+#define svcvt_s64_f64_z(...) __builtin_sve_svcvt_s64_f64_z(__VA_ARGS__)
+#define svcvt_u16_f16_m(...) __builtin_sve_svcvt_u16_f16_m(__VA_ARGS__)
+#define svcvt_u16_f16_x(...) __builtin_sve_svcvt_u16_f16_x(__VA_ARGS__)
+#define svcvt_u16_f16_z(...) __builtin_sve_svcvt_u16_f16_z(__VA_ARGS__)
+#define svcvt_u32_f16_m(...) __builtin_sve_svcvt_u32_f16_m(__VA_ARGS__)
+#define svcvt_u32_f16_x(...) __builtin_sve_svcvt_u32_f16_x(__VA_ARGS__)
+#define svcvt_u32_f16_z(...) __builtin_sve_svcvt_u32_f16_z(__VA_ARGS__)
+#define svcvt_u32_f32_m(...) __builtin_sve_svcvt_u32_f32_m(__VA_ARGS__)
+#define svcvt_u32_f32_x(...) __builtin_sve_svcvt_u32_f32_x(__VA_ARGS__)
+#define svcvt_u32_f32_z(...) __builtin_sve_svcvt_u32_f32_z(__VA_ARGS__)
+#define svcvt_u32_f64_m(...) __builtin_sve_svcvt_u32_f64_m(__VA_ARGS__)
+#define svcvt_u32_f64_x(...) __builtin_sve_svcvt_u32_f64_x(__VA_ARGS__)
+#define svcvt_u32_f64_z(...) __builtin_sve_svcvt_u32_f64_z(__VA_ARGS__)
+#define svcvt_u64_f16_m(...) __builtin_sve_svcvt_u64_f16_m(__VA_ARGS__)
+#define svcvt_u64_f16_x(...) __builtin_sve_svcvt_u64_f16_x(__VA_ARGS__)
+#define svcvt_u64_f16_z(...) __builtin_sve_svcvt_u64_f16_z(__VA_ARGS__)
+#define svcvt_u64_f32_m(...) __builtin_sve_svcvt_u64_f32_m(__VA_ARGS__)
+#define svcvt_u64_f32_x(...) __builtin_sve_svcvt_u64_f32_x(__VA_ARGS__)
+#define svcvt_u64_f32_z(...) __builtin_sve_svcvt_u64_f32_z(__VA_ARGS__)
+#define svcvt_u64_f64_m(...) __builtin_sve_svcvt_u64_f64_m(__VA_ARGS__)
+#define svcvt_u64_f64_x(...) __builtin_sve_svcvt_u64_f64_x(__VA_ARGS__)
+#define svcvt_u64_f64_z(...) __builtin_sve_svcvt_u64_f64_z(__VA_ARGS__)
+#define svdiv_n_f64_m(...) __builtin_sve_svdiv_n_f64_m(__VA_ARGS__)
+#define svdiv_n_f32_m(...) __builtin_sve_svdiv_n_f32_m(__VA_ARGS__)
+#define svdiv_n_f16_m(...) __builtin_sve_svdiv_n_f16_m(__VA_ARGS__)
+#define svdiv_n_f64_x(...) __builtin_sve_svdiv_n_f64_x(__VA_ARGS__)
+#define svdiv_n_f32_x(...) __builtin_sve_svdiv_n_f32_x(__VA_ARGS__)
+#define svdiv_n_f16_x(...) __builtin_sve_svdiv_n_f16_x(__VA_ARGS__)
+#define svdiv_n_f64_z(...) __builtin_sve_svdiv_n_f64_z(__VA_ARGS__)
+#define svdiv_n_f32_z(...) __builtin_sve_svdiv_n_f32_z(__VA_ARGS__)
+#define svdiv_n_f16_z(...) __builtin_sve_svdiv_n_f16_z(__VA_ARGS__)
+#define svdiv_n_s32_m(...) __builtin_sve_svdiv_n_s32_m(__VA_ARGS__)
+#define svdiv_n_s64_m(...) __builtin_sve_svdiv_n_s64_m(__VA_ARGS__)
+#define svdiv_n_s32_x(...) __builtin_sve_svdiv_n_s32_x(__VA_ARGS__)
+#define svdiv_n_s64_x(...) __builtin_sve_svdiv_n_s64_x(__VA_ARGS__)
+#define svdiv_n_s32_z(...) __builtin_sve_svdiv_n_s32_z(__VA_ARGS__)
+#define svdiv_n_s64_z(...) __builtin_sve_svdiv_n_s64_z(__VA_ARGS__)
+#define svdiv_n_u32_m(...) __builtin_sve_svdiv_n_u32_m(__VA_ARGS__)
+#define svdiv_n_u64_m(...) __builtin_sve_svdiv_n_u64_m(__VA_ARGS__)
+#define svdiv_n_u32_x(...) __builtin_sve_svdiv_n_u32_x(__VA_ARGS__)
+#define svdiv_n_u64_x(...) __builtin_sve_svdiv_n_u64_x(__VA_ARGS__)
+#define svdiv_n_u32_z(...) __builtin_sve_svdiv_n_u32_z(__VA_ARGS__)
+#define svdiv_n_u64_z(...) __builtin_sve_svdiv_n_u64_z(__VA_ARGS__)
+#define svdiv_f64_m(...) __builtin_sve_svdiv_f64_m(__VA_ARGS__)
+#define svdiv_f32_m(...) __builtin_sve_svdiv_f32_m(__VA_ARGS__)
+#define svdiv_f16_m(...) __builtin_sve_svdiv_f16_m(__VA_ARGS__)
+#define svdiv_f64_x(...) __builtin_sve_svdiv_f64_x(__VA_ARGS__)
+#define svdiv_f32_x(...) __builtin_sve_svdiv_f32_x(__VA_ARGS__)
+#define svdiv_f16_x(...) __builtin_sve_svdiv_f16_x(__VA_ARGS__)
+#define svdiv_f64_z(...) __builtin_sve_svdiv_f64_z(__VA_ARGS__)
+#define svdiv_f32_z(...) __builtin_sve_svdiv_f32_z(__VA_ARGS__)
+#define svdiv_f16_z(...) __builtin_sve_svdiv_f16_z(__VA_ARGS__)
+#define svdiv_s32_m(...) __builtin_sve_svdiv_s32_m(__VA_ARGS__)
+#define svdiv_s64_m(...) __builtin_sve_svdiv_s64_m(__VA_ARGS__)
+#define svdiv_s32_x(...) __builtin_sve_svdiv_s32_x(__VA_ARGS__)
+#define svdiv_s64_x(...) __builtin_sve_svdiv_s64_x(__VA_ARGS__)
+#define svdiv_s32_z(...) __builtin_sve_svdiv_s32_z(__VA_ARGS__)
+#define svdiv_s64_z(...) __builtin_sve_svdiv_s64_z(__VA_ARGS__)
+#define svdiv_u32_m(...) __builtin_sve_svdiv_u32_m(__VA_ARGS__)
+#define svdiv_u64_m(...) __builtin_sve_svdiv_u64_m(__VA_ARGS__)
+#define svdiv_u32_x(...) __builtin_sve_svdiv_u32_x(__VA_ARGS__)
+#define svdiv_u64_x(...) __builtin_sve_svdiv_u64_x(__VA_ARGS__)
+#define svdiv_u32_z(...) __builtin_sve_svdiv_u32_z(__VA_ARGS__)
+#define svdiv_u64_z(...) __builtin_sve_svdiv_u64_z(__VA_ARGS__)
+#define svdivr_n_f64_m(...) __builtin_sve_svdivr_n_f64_m(__VA_ARGS__)
+#define svdivr_n_f32_m(...) __builtin_sve_svdivr_n_f32_m(__VA_ARGS__)
+#define svdivr_n_f16_m(...) __builtin_sve_svdivr_n_f16_m(__VA_ARGS__)
+#define svdivr_n_f64_x(...) __builtin_sve_svdivr_n_f64_x(__VA_ARGS__)
+#define svdivr_n_f32_x(...) __builtin_sve_svdivr_n_f32_x(__VA_ARGS__)
+#define svdivr_n_f16_x(...) __builtin_sve_svdivr_n_f16_x(__VA_ARGS__)
+#define svdivr_n_f64_z(...) __builtin_sve_svdivr_n_f64_z(__VA_ARGS__)
+#define svdivr_n_f32_z(...) __builtin_sve_svdivr_n_f32_z(__VA_ARGS__)
+#define svdivr_n_f16_z(...) __builtin_sve_svdivr_n_f16_z(__VA_ARGS__)
+#define svdivr_n_s32_m(...) __builtin_sve_svdivr_n_s32_m(__VA_ARGS__)
+#define svdivr_n_s64_m(...) __builtin_sve_svdivr_n_s64_m(__VA_ARGS__)
+#define svdivr_n_s32_x(...) __builtin_sve_svdivr_n_s32_x(__VA_ARGS__)
+#define svdivr_n_s64_x(...) __builtin_sve_svdivr_n_s64_x(__VA_ARGS__)
+#define svdivr_n_s32_z(...) __builtin_sve_svdivr_n_s32_z(__VA_ARGS__)
+#define svdivr_n_s64_z(...) __builtin_sve_svdivr_n_s64_z(__VA_ARGS__)
+#define svdivr_n_u32_m(...) __builtin_sve_svdivr_n_u32_m(__VA_ARGS__)
+#define svdivr_n_u64_m(...) __builtin_sve_svdivr_n_u64_m(__VA_ARGS__)
+#define svdivr_n_u32_x(...) __builtin_sve_svdivr_n_u32_x(__VA_ARGS__)
+#define svdivr_n_u64_x(...) __builtin_sve_svdivr_n_u64_x(__VA_ARGS__)
+#define svdivr_n_u32_z(...) __builtin_sve_svdivr_n_u32_z(__VA_ARGS__)
+#define svdivr_n_u64_z(...) __builtin_sve_svdivr_n_u64_z(__VA_ARGS__)
+#define svdivr_f64_m(...) __builtin_sve_svdivr_f64_m(__VA_ARGS__)
+#define svdivr_f32_m(...) __builtin_sve_svdivr_f32_m(__VA_ARGS__)
+#define svdivr_f16_m(...) __builtin_sve_svdivr_f16_m(__VA_ARGS__)
+#define svdivr_f64_x(...) __builtin_sve_svdivr_f64_x(__VA_ARGS__)
+#define svdivr_f32_x(...) __builtin_sve_svdivr_f32_x(__VA_ARGS__)
+#define svdivr_f16_x(...) __builtin_sve_svdivr_f16_x(__VA_ARGS__)
+#define svdivr_f64_z(...) __builtin_sve_svdivr_f64_z(__VA_ARGS__)
+#define svdivr_f32_z(...) __builtin_sve_svdivr_f32_z(__VA_ARGS__)
+#define svdivr_f16_z(...) __builtin_sve_svdivr_f16_z(__VA_ARGS__)
+#define svdivr_s32_m(...) __builtin_sve_svdivr_s32_m(__VA_ARGS__)
+#define svdivr_s64_m(...) __builtin_sve_svdivr_s64_m(__VA_ARGS__)
+#define svdivr_s32_x(...) __builtin_sve_svdivr_s32_x(__VA_ARGS__)
+#define svdivr_s64_x(...) __builtin_sve_svdivr_s64_x(__VA_ARGS__)
+#define svdivr_s32_z(...) __builtin_sve_svdivr_s32_z(__VA_ARGS__)
+#define svdivr_s64_z(...) __builtin_sve_svdivr_s64_z(__VA_ARGS__)
+#define svdivr_u32_m(...) __builtin_sve_svdivr_u32_m(__VA_ARGS__)
+#define svdivr_u64_m(...) __builtin_sve_svdivr_u64_m(__VA_ARGS__)
+#define svdivr_u32_x(...) __builtin_sve_svdivr_u32_x(__VA_ARGS__)
+#define svdivr_u64_x(...) __builtin_sve_svdivr_u64_x(__VA_ARGS__)
+#define svdivr_u32_z(...) __builtin_sve_svdivr_u32_z(__VA_ARGS__)
+#define svdivr_u64_z(...) __builtin_sve_svdivr_u64_z(__VA_ARGS__)
+#define svdot_n_s32(...) __builtin_sve_svdot_n_s32(__VA_ARGS__)
+#define svdot_n_s64(...) __builtin_sve_svdot_n_s64(__VA_ARGS__)
+#define svdot_n_u32(...) __builtin_sve_svdot_n_u32(__VA_ARGS__)
+#define svdot_n_u64(...) __builtin_sve_svdot_n_u64(__VA_ARGS__)
+#define svdot_s32(...) __builtin_sve_svdot_s32(__VA_ARGS__)
+#define svdot_s64(...) __builtin_sve_svdot_s64(__VA_ARGS__)
+#define svdot_u32(...) __builtin_sve_svdot_u32(__VA_ARGS__)
+#define svdot_u64(...) __builtin_sve_svdot_u64(__VA_ARGS__)
+#define svdot_lane_s32(...) __builtin_sve_svdot_lane_s32(__VA_ARGS__)
+#define svdot_lane_s64(...) __builtin_sve_svdot_lane_s64(__VA_ARGS__)
+#define svdot_lane_u32(...) __builtin_sve_svdot_lane_u32(__VA_ARGS__)
+#define svdot_lane_u64(...) __builtin_sve_svdot_lane_u64(__VA_ARGS__)
+#define svdup_n_u8(...) __builtin_sve_svdup_n_u8(__VA_ARGS__)
+#define svdup_n_u32(...) __builtin_sve_svdup_n_u32(__VA_ARGS__)
+#define svdup_n_u64(...) __builtin_sve_svdup_n_u64(__VA_ARGS__)
+#define svdup_n_u16(...) __builtin_sve_svdup_n_u16(__VA_ARGS__)
+#define svdup_n_s8(...) __builtin_sve_svdup_n_s8(__VA_ARGS__)
+#define svdup_n_f64(...) __builtin_sve_svdup_n_f64(__VA_ARGS__)
+#define svdup_n_f32(...) __builtin_sve_svdup_n_f32(__VA_ARGS__)
+#define svdup_n_f16(...) __builtin_sve_svdup_n_f16(__VA_ARGS__)
+#define svdup_n_s32(...) __builtin_sve_svdup_n_s32(__VA_ARGS__)
+#define svdup_n_s64(...) __builtin_sve_svdup_n_s64(__VA_ARGS__)
+#define svdup_n_s16(...) __builtin_sve_svdup_n_s16(__VA_ARGS__)
+#define svdup_n_u8_m(...) __builtin_sve_svdup_n_u8_m(__VA_ARGS__)
+#define svdup_n_u32_m(...) __builtin_sve_svdup_n_u32_m(__VA_ARGS__)
+#define svdup_n_u64_m(...) __builtin_sve_svdup_n_u64_m(__VA_ARGS__)
+#define svdup_n_u16_m(...) __builtin_sve_svdup_n_u16_m(__VA_ARGS__)
+#define svdup_n_s8_m(...) __builtin_sve_svdup_n_s8_m(__VA_ARGS__)
+#define svdup_n_f64_m(...) __builtin_sve_svdup_n_f64_m(__VA_ARGS__)
+#define svdup_n_f32_m(...) __builtin_sve_svdup_n_f32_m(__VA_ARGS__)
+#define svdup_n_f16_m(...) __builtin_sve_svdup_n_f16_m(__VA_ARGS__)
+#define svdup_n_s32_m(...) __builtin_sve_svdup_n_s32_m(__VA_ARGS__)
+#define svdup_n_s64_m(...) __builtin_sve_svdup_n_s64_m(__VA_ARGS__)
+#define svdup_n_s16_m(...) __builtin_sve_svdup_n_s16_m(__VA_ARGS__)
+#define svdup_n_b8(...) __builtin_sve_svdup_n_b8(__VA_ARGS__)
+#define svdup_n_b32(...) __builtin_sve_svdup_n_b32(__VA_ARGS__)
+#define svdup_n_b64(...) __builtin_sve_svdup_n_b64(__VA_ARGS__)
+#define svdup_n_b16(...) __builtin_sve_svdup_n_b16(__VA_ARGS__)
+#define svdup_n_u8_x(...) __builtin_sve_svdup_n_u8_x(__VA_ARGS__)
+#define svdup_n_u32_x(...) __builtin_sve_svdup_n_u32_x(__VA_ARGS__)
+#define svdup_n_u64_x(...) __builtin_sve_svdup_n_u64_x(__VA_ARGS__)
+#define svdup_n_u16_x(...) __builtin_sve_svdup_n_u16_x(__VA_ARGS__)
+#define svdup_n_s8_x(...) __builtin_sve_svdup_n_s8_x(__VA_ARGS__)
+#define svdup_n_f64_x(...) __builtin_sve_svdup_n_f64_x(__VA_ARGS__)
+#define svdup_n_f32_x(...) __builtin_sve_svdup_n_f32_x(__VA_ARGS__)
+#define svdup_n_f16_x(...) __builtin_sve_svdup_n_f16_x(__VA_ARGS__)
+#define svdup_n_s32_x(...) __builtin_sve_svdup_n_s32_x(__VA_ARGS__)
+#define svdup_n_s64_x(...) __builtin_sve_svdup_n_s64_x(__VA_ARGS__)
+#define svdup_n_s16_x(...) __builtin_sve_svdup_n_s16_x(__VA_ARGS__)
+#define svdup_n_u8_z(...) __builtin_sve_svdup_n_u8_z(__VA_ARGS__)
+#define svdup_n_u32_z(...) __builtin_sve_svdup_n_u32_z(__VA_ARGS__)
+#define svdup_n_u64_z(...) __builtin_sve_svdup_n_u64_z(__VA_ARGS__)
+#define svdup_n_u16_z(...) __builtin_sve_svdup_n_u16_z(__VA_ARGS__)
+#define svdup_n_s8_z(...) __builtin_sve_svdup_n_s8_z(__VA_ARGS__)
+#define svdup_n_f64_z(...) __builtin_sve_svdup_n_f64_z(__VA_ARGS__)
+#define svdup_n_f32_z(...) __builtin_sve_svdup_n_f32_z(__VA_ARGS__)
+#define svdup_n_f16_z(...) __builtin_sve_svdup_n_f16_z(__VA_ARGS__)
+#define svdup_n_s32_z(...) __builtin_sve_svdup_n_s32_z(__VA_ARGS__)
+#define svdup_n_s64_z(...) __builtin_sve_svdup_n_s64_z(__VA_ARGS__)
+#define svdup_n_s16_z(...) __builtin_sve_svdup_n_s16_z(__VA_ARGS__)
+#define svdup_lane_u8(...) __builtin_sve_svdup_lane_u8(__VA_ARGS__)
+#define svdup_lane_u32(...) __builtin_sve_svdup_lane_u32(__VA_ARGS__)
+#define svdup_lane_u64(...) __builtin_sve_svdup_lane_u64(__VA_ARGS__)
+#define svdup_lane_u16(...) __builtin_sve_svdup_lane_u16(__VA_ARGS__)
+#define svdup_lane_s8(...) __builtin_sve_svdup_lane_s8(__VA_ARGS__)
+#define svdup_lane_f64(...) __builtin_sve_svdup_lane_f64(__VA_ARGS__)
+#define svdup_lane_f32(...) __builtin_sve_svdup_lane_f32(__VA_ARGS__)
+#define svdup_lane_f16(...) __builtin_sve_svdup_lane_f16(__VA_ARGS__)
+#define svdup_lane_s32(...) __builtin_sve_svdup_lane_s32(__VA_ARGS__)
+#define svdup_lane_s64(...) __builtin_sve_svdup_lane_s64(__VA_ARGS__)
+#define svdup_lane_s16(...) __builtin_sve_svdup_lane_s16(__VA_ARGS__)
+#define svdupq_n_u16(...) __builtin_sve_svdupq_n_u16(__VA_ARGS__)
+#define svdupq_n_f16(...) __builtin_sve_svdupq_n_f16(__VA_ARGS__)
+#define svdupq_n_s16(...) __builtin_sve_svdupq_n_s16(__VA_ARGS__)
+#define svdupq_n_u32(...) __builtin_sve_svdupq_n_u32(__VA_ARGS__)
+#define svdupq_n_f32(...) __builtin_sve_svdupq_n_f32(__VA_ARGS__)
+#define svdupq_n_s32(...) __builtin_sve_svdupq_n_s32(__VA_ARGS__)
+#define svdupq_n_u64(...) __builtin_sve_svdupq_n_u64(__VA_ARGS__)
+#define svdupq_n_f64(...) __builtin_sve_svdupq_n_f64(__VA_ARGS__)
+#define svdupq_n_s64(...) __builtin_sve_svdupq_n_s64(__VA_ARGS__)
+#define svdupq_n_u8(...) __builtin_sve_svdupq_n_u8(__VA_ARGS__)
+#define svdupq_n_s8(...) __builtin_sve_svdupq_n_s8(__VA_ARGS__)
+#define svdupq_n_b16(...) __builtin_sve_svdupq_n_b16(__VA_ARGS__)
+#define svdupq_n_b32(...) __builtin_sve_svdupq_n_b32(__VA_ARGS__)
+#define svdupq_n_b64(...) __builtin_sve_svdupq_n_b64(__VA_ARGS__)
+#define svdupq_n_b8(...) __builtin_sve_svdupq_n_b8(__VA_ARGS__)
+#define svdupq_lane_u8(...) __builtin_sve_svdupq_lane_u8(__VA_ARGS__)
+#define svdupq_lane_u32(...) __builtin_sve_svdupq_lane_u32(__VA_ARGS__)
+#define svdupq_lane_u64(...) __builtin_sve_svdupq_lane_u64(__VA_ARGS__)
+#define svdupq_lane_u16(...) __builtin_sve_svdupq_lane_u16(__VA_ARGS__)
+#define svdupq_lane_s8(...) __builtin_sve_svdupq_lane_s8(__VA_ARGS__)
+#define svdupq_lane_f64(...) __builtin_sve_svdupq_lane_f64(__VA_ARGS__)
+#define svdupq_lane_f32(...) __builtin_sve_svdupq_lane_f32(__VA_ARGS__)
+#define svdupq_lane_f16(...) __builtin_sve_svdupq_lane_f16(__VA_ARGS__)
+#define svdupq_lane_s32(...) __builtin_sve_svdupq_lane_s32(__VA_ARGS__)
+#define svdupq_lane_s64(...) __builtin_sve_svdupq_lane_s64(__VA_ARGS__)
+#define svdupq_lane_s16(...) __builtin_sve_svdupq_lane_s16(__VA_ARGS__)
+#define sveor_b_z(...) __builtin_sve_sveor_b_z(__VA_ARGS__)
+#define sveor_n_u8_m(...) __builtin_sve_sveor_n_u8_m(__VA_ARGS__)
+#define sveor_n_u32_m(...) __builtin_sve_sveor_n_u32_m(__VA_ARGS__)
+#define sveor_n_u64_m(...) __builtin_sve_sveor_n_u64_m(__VA_ARGS__)
+#define sveor_n_u16_m(...) __builtin_sve_sveor_n_u16_m(__VA_ARGS__)
+#define sveor_n_s8_m(...) __builtin_sve_sveor_n_s8_m(__VA_ARGS__)
+#define sveor_n_s32_m(...) __builtin_sve_sveor_n_s32_m(__VA_ARGS__)
+#define sveor_n_s64_m(...) __builtin_sve_sveor_n_s64_m(__VA_ARGS__)
+#define sveor_n_s16_m(...) __builtin_sve_sveor_n_s16_m(__VA_ARGS__)
+#define sveor_n_u8_x(...) __builtin_sve_sveor_n_u8_x(__VA_ARGS__)
+#define sveor_n_u32_x(...) __builtin_sve_sveor_n_u32_x(__VA_ARGS__)
+#define sveor_n_u64_x(...) __builtin_sve_sveor_n_u64_x(__VA_ARGS__)
+#define sveor_n_u16_x(...) __builtin_sve_sveor_n_u16_x(__VA_ARGS__)
+#define sveor_n_s8_x(...) __builtin_sve_sveor_n_s8_x(__VA_ARGS__)
+#define sveor_n_s32_x(...) __builtin_sve_sveor_n_s32_x(__VA_ARGS__)
+#define sveor_n_s64_x(...) __builtin_sve_sveor_n_s64_x(__VA_ARGS__)
+#define sveor_n_s16_x(...) __builtin_sve_sveor_n_s16_x(__VA_ARGS__)
+#define sveor_n_u8_z(...) __builtin_sve_sveor_n_u8_z(__VA_ARGS__)
+#define sveor_n_u32_z(...) __builtin_sve_sveor_n_u32_z(__VA_ARGS__)
+#define sveor_n_u64_z(...) __builtin_sve_sveor_n_u64_z(__VA_ARGS__)
+#define sveor_n_u16_z(...) __builtin_sve_sveor_n_u16_z(__VA_ARGS__)
+#define sveor_n_s8_z(...) __builtin_sve_sveor_n_s8_z(__VA_ARGS__)
+#define sveor_n_s32_z(...) __builtin_sve_sveor_n_s32_z(__VA_ARGS__)
+#define sveor_n_s64_z(...) __builtin_sve_sveor_n_s64_z(__VA_ARGS__)
+#define sveor_n_s16_z(...) __builtin_sve_sveor_n_s16_z(__VA_ARGS__)
+#define sveor_u8_m(...) __builtin_sve_sveor_u8_m(__VA_ARGS__)
+#define sveor_u32_m(...) __builtin_sve_sveor_u32_m(__VA_ARGS__)
+#define sveor_u64_m(...) __builtin_sve_sveor_u64_m(__VA_ARGS__)
+#define sveor_u16_m(...) __builtin_sve_sveor_u16_m(__VA_ARGS__)
+#define sveor_s8_m(...) __builtin_sve_sveor_s8_m(__VA_ARGS__)
+#define sveor_s32_m(...) __builtin_sve_sveor_s32_m(__VA_ARGS__)
+#define sveor_s64_m(...) __builtin_sve_sveor_s64_m(__VA_ARGS__)
+#define sveor_s16_m(...) __builtin_sve_sveor_s16_m(__VA_ARGS__)
+#define sveor_u8_x(...) __builtin_sve_sveor_u8_x(__VA_ARGS__)
+#define sveor_u32_x(...) __builtin_sve_sveor_u32_x(__VA_ARGS__)
+#define sveor_u64_x(...) __builtin_sve_sveor_u64_x(__VA_ARGS__)
+#define sveor_u16_x(...) __builtin_sve_sveor_u16_x(__VA_ARGS__)
+#define sveor_s8_x(...) __builtin_sve_sveor_s8_x(__VA_ARGS__)
+#define sveor_s32_x(...) __builtin_sve_sveor_s32_x(__VA_ARGS__)
+#define sveor_s64_x(...) __builtin_sve_sveor_s64_x(__VA_ARGS__)
+#define sveor_s16_x(...) __builtin_sve_sveor_s16_x(__VA_ARGS__)
+#define sveor_u8_z(...) __builtin_sve_sveor_u8_z(__VA_ARGS__)
+#define sveor_u32_z(...) __builtin_sve_sveor_u32_z(__VA_ARGS__)
+#define sveor_u64_z(...) __builtin_sve_sveor_u64_z(__VA_ARGS__)
+#define sveor_u16_z(...) __builtin_sve_sveor_u16_z(__VA_ARGS__)
+#define sveor_s8_z(...) __builtin_sve_sveor_s8_z(__VA_ARGS__)
+#define sveor_s32_z(...) __builtin_sve_sveor_s32_z(__VA_ARGS__)
+#define sveor_s64_z(...) __builtin_sve_sveor_s64_z(__VA_ARGS__)
+#define sveor_s16_z(...) __builtin_sve_sveor_s16_z(__VA_ARGS__)
+#define sveorv_u8(...) __builtin_sve_sveorv_u8(__VA_ARGS__)
+#define sveorv_u32(...) __builtin_sve_sveorv_u32(__VA_ARGS__)
+#define sveorv_u64(...) __builtin_sve_sveorv_u64(__VA_ARGS__)
+#define sveorv_u16(...) __builtin_sve_sveorv_u16(__VA_ARGS__)
+#define sveorv_s8(...) __builtin_sve_sveorv_s8(__VA_ARGS__)
+#define sveorv_s32(...) __builtin_sve_sveorv_s32(__VA_ARGS__)
+#define sveorv_s64(...) __builtin_sve_sveorv_s64(__VA_ARGS__)
+#define sveorv_s16(...) __builtin_sve_sveorv_s16(__VA_ARGS__)
+#define svexpa_f64(...) __builtin_sve_svexpa_f64(__VA_ARGS__)
+#define svexpa_f32(...) __builtin_sve_svexpa_f32(__VA_ARGS__)
+#define svexpa_f16(...) __builtin_sve_svexpa_f16(__VA_ARGS__)
+#define svext_u8(...) __builtin_sve_svext_u8(__VA_ARGS__)
+#define svext_u32(...) __builtin_sve_svext_u32(__VA_ARGS__)
+#define svext_u64(...) __builtin_sve_svext_u64(__VA_ARGS__)
+#define svext_u16(...) __builtin_sve_svext_u16(__VA_ARGS__)
+#define svext_s8(...) __builtin_sve_svext_s8(__VA_ARGS__)
+#define svext_f64(...) __builtin_sve_svext_f64(__VA_ARGS__)
+#define svext_f32(...) __builtin_sve_svext_f32(__VA_ARGS__)
+#define svext_f16(...) __builtin_sve_svext_f16(__VA_ARGS__)
+#define svext_s32(...) __builtin_sve_svext_s32(__VA_ARGS__)
+#define svext_s64(...) __builtin_sve_svext_s64(__VA_ARGS__)
+#define svext_s16(...) __builtin_sve_svext_s16(__VA_ARGS__)
+#define svextb_s32_m(...) __builtin_sve_svextb_s32_m(__VA_ARGS__)
+#define svextb_s64_m(...) __builtin_sve_svextb_s64_m(__VA_ARGS__)
+#define svextb_s16_m(...) __builtin_sve_svextb_s16_m(__VA_ARGS__)
+#define svextb_s32_x(...) __builtin_sve_svextb_s32_x(__VA_ARGS__)
+#define svextb_s64_x(...) __builtin_sve_svextb_s64_x(__VA_ARGS__)
+#define svextb_s16_x(...) __builtin_sve_svextb_s16_x(__VA_ARGS__)
+#define svextb_s32_z(...) __builtin_sve_svextb_s32_z(__VA_ARGS__)
+#define svextb_s64_z(...) __builtin_sve_svextb_s64_z(__VA_ARGS__)
+#define svextb_s16_z(...) __builtin_sve_svextb_s16_z(__VA_ARGS__)
+#define svextb_u32_m(...) __builtin_sve_svextb_u32_m(__VA_ARGS__)
+#define svextb_u64_m(...) __builtin_sve_svextb_u64_m(__VA_ARGS__)
+#define svextb_u16_m(...) __builtin_sve_svextb_u16_m(__VA_ARGS__)
+#define svextb_u32_x(...) __builtin_sve_svextb_u32_x(__VA_ARGS__)
+#define svextb_u64_x(...) __builtin_sve_svextb_u64_x(__VA_ARGS__)
+#define svextb_u16_x(...) __builtin_sve_svextb_u16_x(__VA_ARGS__)
+#define svextb_u32_z(...) __builtin_sve_svextb_u32_z(__VA_ARGS__)
+#define svextb_u64_z(...) __builtin_sve_svextb_u64_z(__VA_ARGS__)
+#define svextb_u16_z(...) __builtin_sve_svextb_u16_z(__VA_ARGS__)
+#define svexth_s32_m(...) __builtin_sve_svexth_s32_m(__VA_ARGS__)
+#define svexth_s64_m(...) __builtin_sve_svexth_s64_m(__VA_ARGS__)
+#define svexth_s32_x(...) __builtin_sve_svexth_s32_x(__VA_ARGS__)
+#define svexth_s64_x(...) __builtin_sve_svexth_s64_x(__VA_ARGS__)
+#define svexth_s32_z(...) __builtin_sve_svexth_s32_z(__VA_ARGS__)
+#define svexth_s64_z(...) __builtin_sve_svexth_s64_z(__VA_ARGS__)
+#define svexth_u32_m(...) __builtin_sve_svexth_u32_m(__VA_ARGS__)
+#define svexth_u64_m(...) __builtin_sve_svexth_u64_m(__VA_ARGS__)
+#define svexth_u32_x(...) __builtin_sve_svexth_u32_x(__VA_ARGS__)
+#define svexth_u64_x(...) __builtin_sve_svexth_u64_x(__VA_ARGS__)
+#define svexth_u32_z(...) __builtin_sve_svexth_u32_z(__VA_ARGS__)
+#define svexth_u64_z(...) __builtin_sve_svexth_u64_z(__VA_ARGS__)
+#define svextw_s64_m(...) __builtin_sve_svextw_s64_m(__VA_ARGS__)
+#define svextw_s64_x(...) __builtin_sve_svextw_s64_x(__VA_ARGS__)
+#define svextw_s64_z(...) __builtin_sve_svextw_s64_z(__VA_ARGS__)
+#define svextw_u64_m(...) __builtin_sve_svextw_u64_m(__VA_ARGS__)
+#define svextw_u64_x(...) __builtin_sve_svextw_u64_x(__VA_ARGS__)
+#define svextw_u64_z(...) __builtin_sve_svextw_u64_z(__VA_ARGS__)
+#define svget2_u8(...) __builtin_sve_svget2_u8(__VA_ARGS__)
+#define svget2_u32(...) __builtin_sve_svget2_u32(__VA_ARGS__)
+#define svget2_u64(...) __builtin_sve_svget2_u64(__VA_ARGS__)
+#define svget2_u16(...) __builtin_sve_svget2_u16(__VA_ARGS__)
+#define svget2_s8(...) __builtin_sve_svget2_s8(__VA_ARGS__)
+#define svget2_f64(...) __builtin_sve_svget2_f64(__VA_ARGS__)
+#define svget2_f32(...) __builtin_sve_svget2_f32(__VA_ARGS__)
+#define svget2_f16(...) __builtin_sve_svget2_f16(__VA_ARGS__)
+#define svget2_s32(...) __builtin_sve_svget2_s32(__VA_ARGS__)
+#define svget2_s64(...) __builtin_sve_svget2_s64(__VA_ARGS__)
+#define svget2_s16(...) __builtin_sve_svget2_s16(__VA_ARGS__)
+#define svget3_u8(...) __builtin_sve_svget3_u8(__VA_ARGS__)
+#define svget3_u32(...) __builtin_sve_svget3_u32(__VA_ARGS__)
+#define svget3_u64(...) __builtin_sve_svget3_u64(__VA_ARGS__)
+#define svget3_u16(...) __builtin_sve_svget3_u16(__VA_ARGS__)
+#define svget3_s8(...) __builtin_sve_svget3_s8(__VA_ARGS__)
+#define svget3_f64(...) __builtin_sve_svget3_f64(__VA_ARGS__)
+#define svget3_f32(...) __builtin_sve_svget3_f32(__VA_ARGS__)
+#define svget3_f16(...) __builtin_sve_svget3_f16(__VA_ARGS__)
+#define svget3_s32(...) __builtin_sve_svget3_s32(__VA_ARGS__)
+#define svget3_s64(...) __builtin_sve_svget3_s64(__VA_ARGS__)
+#define svget3_s16(...) __builtin_sve_svget3_s16(__VA_ARGS__)
+#define svget4_u8(...) __builtin_sve_svget4_u8(__VA_ARGS__)
+#define svget4_u32(...) __builtin_sve_svget4_u32(__VA_ARGS__)
+#define svget4_u64(...) __builtin_sve_svget4_u64(__VA_ARGS__)
+#define svget4_u16(...) __builtin_sve_svget4_u16(__VA_ARGS__)
+#define svget4_s8(...) __builtin_sve_svget4_s8(__VA_ARGS__)
+#define svget4_f64(...) __builtin_sve_svget4_f64(__VA_ARGS__)
+#define svget4_f32(...) __builtin_sve_svget4_f32(__VA_ARGS__)
+#define svget4_f16(...) __builtin_sve_svget4_f16(__VA_ARGS__)
+#define svget4_s32(...) __builtin_sve_svget4_s32(__VA_ARGS__)
+#define svget4_s64(...) __builtin_sve_svget4_s64(__VA_ARGS__)
+#define svget4_s16(...) __builtin_sve_svget4_s16(__VA_ARGS__)
+#define svindex_u8(...) __builtin_sve_svindex_u8(__VA_ARGS__)
+#define svindex_u32(...) __builtin_sve_svindex_u32(__VA_ARGS__)
+#define svindex_u64(...) __builtin_sve_svindex_u64(__VA_ARGS__)
+#define svindex_u16(...) __builtin_sve_svindex_u16(__VA_ARGS__)
+#define svindex_s8(...) __builtin_sve_svindex_s8(__VA_ARGS__)
+#define svindex_s32(...) __builtin_sve_svindex_s32(__VA_ARGS__)
+#define svindex_s64(...) __builtin_sve_svindex_s64(__VA_ARGS__)
+#define svindex_s16(...) __builtin_sve_svindex_s16(__VA_ARGS__)
+#define svinsr_n_u8(...) __builtin_sve_svinsr_n_u8(__VA_ARGS__)
+#define svinsr_n_u32(...) __builtin_sve_svinsr_n_u32(__VA_ARGS__)
+#define svinsr_n_u64(...) __builtin_sve_svinsr_n_u64(__VA_ARGS__)
+#define svinsr_n_u16(...) __builtin_sve_svinsr_n_u16(__VA_ARGS__)
+#define svinsr_n_s8(...) __builtin_sve_svinsr_n_s8(__VA_ARGS__)
+#define svinsr_n_f64(...) __builtin_sve_svinsr_n_f64(__VA_ARGS__)
+#define svinsr_n_f32(...) __builtin_sve_svinsr_n_f32(__VA_ARGS__)
+#define svinsr_n_f16(...) __builtin_sve_svinsr_n_f16(__VA_ARGS__)
+#define svinsr_n_s32(...) __builtin_sve_svinsr_n_s32(__VA_ARGS__)
+#define svinsr_n_s64(...) __builtin_sve_svinsr_n_s64(__VA_ARGS__)
+#define svinsr_n_s16(...) __builtin_sve_svinsr_n_s16(__VA_ARGS__)
+#define svlasta_u8(...) __builtin_sve_svlasta_u8(__VA_ARGS__)
+#define svlasta_u32(...) __builtin_sve_svlasta_u32(__VA_ARGS__)
+#define svlasta_u64(...) __builtin_sve_svlasta_u64(__VA_ARGS__)
+#define svlasta_u16(...) __builtin_sve_svlasta_u16(__VA_ARGS__)
+#define svlasta_s8(...) __builtin_sve_svlasta_s8(__VA_ARGS__)
+#define svlasta_f64(...) __builtin_sve_svlasta_f64(__VA_ARGS__)
+#define svlasta_f32(...) __builtin_sve_svlasta_f32(__VA_ARGS__)
+#define svlasta_f16(...) __builtin_sve_svlasta_f16(__VA_ARGS__)
+#define svlasta_s32(...) __builtin_sve_svlasta_s32(__VA_ARGS__)
+#define svlasta_s64(...) __builtin_sve_svlasta_s64(__VA_ARGS__)
+#define svlasta_s16(...) __builtin_sve_svlasta_s16(__VA_ARGS__)
+#define svlastb_u8(...) __builtin_sve_svlastb_u8(__VA_ARGS__)
+#define svlastb_u32(...) __builtin_sve_svlastb_u32(__VA_ARGS__)
+#define svlastb_u64(...) __builtin_sve_svlastb_u64(__VA_ARGS__)
+#define svlastb_u16(...) __builtin_sve_svlastb_u16(__VA_ARGS__)
+#define svlastb_s8(...) __builtin_sve_svlastb_s8(__VA_ARGS__)
+#define svlastb_f64(...) __builtin_sve_svlastb_f64(__VA_ARGS__)
+#define svlastb_f32(...) __builtin_sve_svlastb_f32(__VA_ARGS__)
+#define svlastb_f16(...) __builtin_sve_svlastb_f16(__VA_ARGS__)
+#define svlastb_s32(...) __builtin_sve_svlastb_s32(__VA_ARGS__)
+#define svlastb_s64(...) __builtin_sve_svlastb_s64(__VA_ARGS__)
+#define svlastb_s16(...) __builtin_sve_svlastb_s16(__VA_ARGS__)
+#define svld1_u8(...) __builtin_sve_svld1_u8(__VA_ARGS__)
+#define svld1_u32(...) __builtin_sve_svld1_u32(__VA_ARGS__)
+#define svld1_u64(...) __builtin_sve_svld1_u64(__VA_ARGS__)
+#define svld1_u16(...) __builtin_sve_svld1_u16(__VA_ARGS__)
+#define svld1_s8(...) __builtin_sve_svld1_s8(__VA_ARGS__)
+#define svld1_f64(...) __builtin_sve_svld1_f64(__VA_ARGS__)
+#define svld1_f32(...) __builtin_sve_svld1_f32(__VA_ARGS__)
+#define svld1_f16(...) __builtin_sve_svld1_f16(__VA_ARGS__)
+#define svld1_s32(...) __builtin_sve_svld1_s32(__VA_ARGS__)
+#define svld1_s64(...) __builtin_sve_svld1_s64(__VA_ARGS__)
+#define svld1_s16(...) __builtin_sve_svld1_s16(__VA_ARGS__)
+#define svld1_gather_u32base_index_u32(...) __builtin_sve_svld1_gather_u32base_index_u32(__VA_ARGS__)
+#define svld1_gather_u64base_index_u64(...) __builtin_sve_svld1_gather_u64base_index_u64(__VA_ARGS__)
+#define svld1_gather_u64base_index_f64(...) __builtin_sve_svld1_gather_u64base_index_f64(__VA_ARGS__)
+#define svld1_gather_u32base_index_f32(...) __builtin_sve_svld1_gather_u32base_index_f32(__VA_ARGS__)
+#define svld1_gather_u32base_index_s32(...) __builtin_sve_svld1_gather_u32base_index_s32(__VA_ARGS__)
+#define svld1_gather_u64base_index_s64(...) __builtin_sve_svld1_gather_u64base_index_s64(__VA_ARGS__)
+#define svld1_gather_u32base_offset_u32(...) __builtin_sve_svld1_gather_u32base_offset_u32(__VA_ARGS__)
+#define svld1_gather_u64base_offset_u64(...) __builtin_sve_svld1_gather_u64base_offset_u64(__VA_ARGS__)
+#define svld1_gather_u64base_offset_f64(...) __builtin_sve_svld1_gather_u64base_offset_f64(__VA_ARGS__)
+#define svld1_gather_u32base_offset_f32(...) __builtin_sve_svld1_gather_u32base_offset_f32(__VA_ARGS__)
+#define svld1_gather_u32base_offset_s32(...) __builtin_sve_svld1_gather_u32base_offset_s32(__VA_ARGS__)
+#define svld1_gather_u64base_offset_s64(...) __builtin_sve_svld1_gather_u64base_offset_s64(__VA_ARGS__)
+#define svld1_gather_u32base_u32(...) __builtin_sve_svld1_gather_u32base_u32(__VA_ARGS__)
+#define svld1_gather_u64base_u64(...) __builtin_sve_svld1_gather_u64base_u64(__VA_ARGS__)
+#define svld1_gather_u64base_f64(...) __builtin_sve_svld1_gather_u64base_f64(__VA_ARGS__)
+#define svld1_gather_u32base_f32(...) __builtin_sve_svld1_gather_u32base_f32(__VA_ARGS__)
+#define svld1_gather_u32base_s32(...) __builtin_sve_svld1_gather_u32base_s32(__VA_ARGS__)
+#define svld1_gather_u64base_s64(...) __builtin_sve_svld1_gather_u64base_s64(__VA_ARGS__)
+#define svld1_gather_s32index_u32(...) __builtin_sve_svld1_gather_s32index_u32(__VA_ARGS__)
+#define svld1_gather_s32index_f32(...) __builtin_sve_svld1_gather_s32index_f32(__VA_ARGS__)
+#define svld1_gather_s32index_s32(...) __builtin_sve_svld1_gather_s32index_s32(__VA_ARGS__)
+#define svld1_gather_u32index_u32(...) __builtin_sve_svld1_gather_u32index_u32(__VA_ARGS__)
+#define svld1_gather_u32index_f32(...) __builtin_sve_svld1_gather_u32index_f32(__VA_ARGS__)
+#define svld1_gather_u32index_s32(...) __builtin_sve_svld1_gather_u32index_s32(__VA_ARGS__)
+#define svld1_gather_s64index_u64(...) __builtin_sve_svld1_gather_s64index_u64(__VA_ARGS__)
+#define svld1_gather_s64index_f64(...) __builtin_sve_svld1_gather_s64index_f64(__VA_ARGS__)
+#define svld1_gather_s64index_s64(...) __builtin_sve_svld1_gather_s64index_s64(__VA_ARGS__)
+#define svld1_gather_u64index_u64(...) __builtin_sve_svld1_gather_u64index_u64(__VA_ARGS__)
+#define svld1_gather_u64index_f64(...) __builtin_sve_svld1_gather_u64index_f64(__VA_ARGS__)
+#define svld1_gather_u64index_s64(...) __builtin_sve_svld1_gather_u64index_s64(__VA_ARGS__)
+#define svld1_gather_s32offset_u32(...) __builtin_sve_svld1_gather_s32offset_u32(__VA_ARGS__)
+#define svld1_gather_s32offset_f32(...) __builtin_sve_svld1_gather_s32offset_f32(__VA_ARGS__)
+#define svld1_gather_s32offset_s32(...) __builtin_sve_svld1_gather_s32offset_s32(__VA_ARGS__)
+#define svld1_gather_u32offset_u32(...) __builtin_sve_svld1_gather_u32offset_u32(__VA_ARGS__)
+#define svld1_gather_u32offset_f32(...) __builtin_sve_svld1_gather_u32offset_f32(__VA_ARGS__)
+#define svld1_gather_u32offset_s32(...) __builtin_sve_svld1_gather_u32offset_s32(__VA_ARGS__)
+#define svld1_gather_s64offset_u64(...) __builtin_sve_svld1_gather_s64offset_u64(__VA_ARGS__)
+#define svld1_gather_s64offset_f64(...) __builtin_sve_svld1_gather_s64offset_f64(__VA_ARGS__)
+#define svld1_gather_s64offset_s64(...) __builtin_sve_svld1_gather_s64offset_s64(__VA_ARGS__)
+#define svld1_gather_u64offset_u64(...) __builtin_sve_svld1_gather_u64offset_u64(__VA_ARGS__)
+#define svld1_gather_u64offset_f64(...) __builtin_sve_svld1_gather_u64offset_f64(__VA_ARGS__)
+#define svld1_gather_u64offset_s64(...) __builtin_sve_svld1_gather_u64offset_s64(__VA_ARGS__)
+#define svld1_vnum_u8(...) __builtin_sve_svld1_vnum_u8(__VA_ARGS__)
+#define svld1_vnum_u32(...) __builtin_sve_svld1_vnum_u32(__VA_ARGS__)
+#define svld1_vnum_u64(...) __builtin_sve_svld1_vnum_u64(__VA_ARGS__)
+#define svld1_vnum_u16(...) __builtin_sve_svld1_vnum_u16(__VA_ARGS__)
+#define svld1_vnum_s8(...) __builtin_sve_svld1_vnum_s8(__VA_ARGS__)
+#define svld1_vnum_f64(...) __builtin_sve_svld1_vnum_f64(__VA_ARGS__)
+#define svld1_vnum_f32(...) __builtin_sve_svld1_vnum_f32(__VA_ARGS__)
+#define svld1_vnum_f16(...) __builtin_sve_svld1_vnum_f16(__VA_ARGS__)
+#define svld1_vnum_s32(...) __builtin_sve_svld1_vnum_s32(__VA_ARGS__)
+#define svld1_vnum_s64(...) __builtin_sve_svld1_vnum_s64(__VA_ARGS__)
+#define svld1_vnum_s16(...) __builtin_sve_svld1_vnum_s16(__VA_ARGS__)
+#define svld1rq_u8(...) __builtin_sve_svld1rq_u8(__VA_ARGS__)
+#define svld1rq_u32(...) __builtin_sve_svld1rq_u32(__VA_ARGS__)
+#define svld1rq_u64(...) __builtin_sve_svld1rq_u64(__VA_ARGS__)
+#define svld1rq_u16(...) __builtin_sve_svld1rq_u16(__VA_ARGS__)
+#define svld1rq_s8(...) __builtin_sve_svld1rq_s8(__VA_ARGS__)
+#define svld1rq_f64(...) __builtin_sve_svld1rq_f64(__VA_ARGS__)
+#define svld1rq_f32(...) __builtin_sve_svld1rq_f32(__VA_ARGS__)
+#define svld1rq_f16(...) __builtin_sve_svld1rq_f16(__VA_ARGS__)
+#define svld1rq_s32(...) __builtin_sve_svld1rq_s32(__VA_ARGS__)
+#define svld1rq_s64(...) __builtin_sve_svld1rq_s64(__VA_ARGS__)
+#define svld1rq_s16(...) __builtin_sve_svld1rq_s16(__VA_ARGS__)
+#define svld1sb_gather_u32base_offset_u32(...) __builtin_sve_svld1sb_gather_u32base_offset_u32(__VA_ARGS__)
+#define svld1sb_gather_u64base_offset_u64(...) __builtin_sve_svld1sb_gather_u64base_offset_u64(__VA_ARGS__)
+#define svld1sb_gather_u32base_offset_s32(...) __builtin_sve_svld1sb_gather_u32base_offset_s32(__VA_ARGS__)
+#define svld1sb_gather_u64base_offset_s64(...) __builtin_sve_svld1sb_gather_u64base_offset_s64(__VA_ARGS__)
+#define svld1sb_gather_u32base_u32(...) __builtin_sve_svld1sb_gather_u32base_u32(__VA_ARGS__)
+#define svld1sb_gather_u64base_u64(...) __builtin_sve_svld1sb_gather_u64base_u64(__VA_ARGS__)
+#define svld1sb_gather_u32base_s32(...) __builtin_sve_svld1sb_gather_u32base_s32(__VA_ARGS__)
+#define svld1sb_gather_u64base_s64(...) __builtin_sve_svld1sb_gather_u64base_s64(__VA_ARGS__)
+#define svld1sb_gather_s32offset_u32(...) __builtin_sve_svld1sb_gather_s32offset_u32(__VA_ARGS__)
+#define svld1sb_gather_s32offset_s32(...) __builtin_sve_svld1sb_gather_s32offset_s32(__VA_ARGS__)
+#define svld1sb_gather_u32offset_u32(...) __builtin_sve_svld1sb_gather_u32offset_u32(__VA_ARGS__)
+#define svld1sb_gather_u32offset_s32(...) __builtin_sve_svld1sb_gather_u32offset_s32(__VA_ARGS__)
+#define svld1sb_gather_s64offset_u64(...) __builtin_sve_svld1sb_gather_s64offset_u64(__VA_ARGS__)
+#define svld1sb_gather_s64offset_s64(...) __builtin_sve_svld1sb_gather_s64offset_s64(__VA_ARGS__)
+#define svld1sb_gather_u64offset_u64(...) __builtin_sve_svld1sb_gather_u64offset_u64(__VA_ARGS__)
+#define svld1sb_gather_u64offset_s64(...) __builtin_sve_svld1sb_gather_u64offset_s64(__VA_ARGS__)
+#define svld1sb_vnum_u32(...) __builtin_sve_svld1sb_vnum_u32(__VA_ARGS__)
+#define svld1sb_vnum_u64(...) __builtin_sve_svld1sb_vnum_u64(__VA_ARGS__)
+#define svld1sb_vnum_u16(...) __builtin_sve_svld1sb_vnum_u16(__VA_ARGS__)
+#define svld1sb_vnum_s32(...) __builtin_sve_svld1sb_vnum_s32(__VA_ARGS__)
+#define svld1sb_vnum_s64(...) __builtin_sve_svld1sb_vnum_s64(__VA_ARGS__)
+#define svld1sb_vnum_s16(...) __builtin_sve_svld1sb_vnum_s16(__VA_ARGS__)
+#define svld1sb_u32(...) __builtin_sve_svld1sb_u32(__VA_ARGS__)
+#define svld1sb_u64(...) __builtin_sve_svld1sb_u64(__VA_ARGS__)
+#define svld1sb_u16(...) __builtin_sve_svld1sb_u16(__VA_ARGS__)
+#define svld1sb_s32(...) __builtin_sve_svld1sb_s32(__VA_ARGS__)
+#define svld1sb_s64(...) __builtin_sve_svld1sb_s64(__VA_ARGS__)
+#define svld1sb_s16(...) __builtin_sve_svld1sb_s16(__VA_ARGS__)
+#define svld1sh_gather_u32base_index_u32(...) __builtin_sve_svld1sh_gather_u32base_index_u32(__VA_ARGS__)
+#define svld1sh_gather_u64base_index_u64(...) __builtin_sve_svld1sh_gather_u64base_index_u64(__VA_ARGS__)
+#define svld1sh_gather_u32base_index_s32(...) __builtin_sve_svld1sh_gather_u32base_index_s32(__VA_ARGS__)
+#define svld1sh_gather_u64base_index_s64(...) __builtin_sve_svld1sh_gather_u64base_index_s64(__VA_ARGS__)
+#define svld1sh_gather_u32base_offset_u32(...) __builtin_sve_svld1sh_gather_u32base_offset_u32(__VA_ARGS__)
+#define svld1sh_gather_u64base_offset_u64(...) __builtin_sve_svld1sh_gather_u64base_offset_u64(__VA_ARGS__)
+#define svld1sh_gather_u32base_offset_s32(...) __builtin_sve_svld1sh_gather_u32base_offset_s32(__VA_ARGS__)
+#define svld1sh_gather_u64base_offset_s64(...) __builtin_sve_svld1sh_gather_u64base_offset_s64(__VA_ARGS__)
+#define svld1sh_gather_u32base_u32(...) __builtin_sve_svld1sh_gather_u32base_u32(__VA_ARGS__)
+#define svld1sh_gather_u64base_u64(...) __builtin_sve_svld1sh_gather_u64base_u64(__VA_ARGS__)
+#define svld1sh_gather_u32base_s32(...) __builtin_sve_svld1sh_gather_u32base_s32(__VA_ARGS__)
+#define svld1sh_gather_u64base_s64(...) __builtin_sve_svld1sh_gather_u64base_s64(__VA_ARGS__)
+#define svld1sh_gather_s32index_u32(...) __builtin_sve_svld1sh_gather_s32index_u32(__VA_ARGS__)
+#define svld1sh_gather_s32index_s32(...) __builtin_sve_svld1sh_gather_s32index_s32(__VA_ARGS__)
+#define svld1sh_gather_u32index_u32(...) __builtin_sve_svld1sh_gather_u32index_u32(__VA_ARGS__)
+#define svld1sh_gather_u32index_s32(...) __builtin_sve_svld1sh_gather_u32index_s32(__VA_ARGS__)
+#define svld1sh_gather_s64index_u64(...) __builtin_sve_svld1sh_gather_s64index_u64(__VA_ARGS__)
+#define svld1sh_gather_s64index_s64(...) __builtin_sve_svld1sh_gather_s64index_s64(__VA_ARGS__)
+#define svld1sh_gather_u64index_u64(...) __builtin_sve_svld1sh_gather_u64index_u64(__VA_ARGS__)
+#define svld1sh_gather_u64index_s64(...) __builtin_sve_svld1sh_gather_u64index_s64(__VA_ARGS__)
+#define svld1sh_gather_s32offset_u32(...) __builtin_sve_svld1sh_gather_s32offset_u32(__VA_ARGS__)
+#define svld1sh_gather_s32offset_s32(...) __builtin_sve_svld1sh_gather_s32offset_s32(__VA_ARGS__)
+#define svld1sh_gather_u32offset_u32(...) __builtin_sve_svld1sh_gather_u32offset_u32(__VA_ARGS__)
+#define svld1sh_gather_u32offset_s32(...) __builtin_sve_svld1sh_gather_u32offset_s32(__VA_ARGS__)
+#define svld1sh_gather_s64offset_u64(...) __builtin_sve_svld1sh_gather_s64offset_u64(__VA_ARGS__)
+#define svld1sh_gather_s64offset_s64(...) __builtin_sve_svld1sh_gather_s64offset_s64(__VA_ARGS__)
+#define svld1sh_gather_u64offset_u64(...) __builtin_sve_svld1sh_gather_u64offset_u64(__VA_ARGS__)
+#define svld1sh_gather_u64offset_s64(...) __builtin_sve_svld1sh_gather_u64offset_s64(__VA_ARGS__)
+#define svld1sh_vnum_u32(...) __builtin_sve_svld1sh_vnum_u32(__VA_ARGS__)
+#define svld1sh_vnum_u64(...) __builtin_sve_svld1sh_vnum_u64(__VA_ARGS__)
+#define svld1sh_vnum_s32(...) __builtin_sve_svld1sh_vnum_s32(__VA_ARGS__)
+#define svld1sh_vnum_s64(...) __builtin_sve_svld1sh_vnum_s64(__VA_ARGS__)
+#define svld1sh_u32(...) __builtin_sve_svld1sh_u32(__VA_ARGS__)
+#define svld1sh_u64(...) __builtin_sve_svld1sh_u64(__VA_ARGS__)
+#define svld1sh_s32(...) __builtin_sve_svld1sh_s32(__VA_ARGS__)
+#define svld1sh_s64(...) __builtin_sve_svld1sh_s64(__VA_ARGS__)
+#define svld1sw_gather_u64base_index_u64(...) __builtin_sve_svld1sw_gather_u64base_index_u64(__VA_ARGS__)
+#define svld1sw_gather_u64base_index_s64(...) __builtin_sve_svld1sw_gather_u64base_index_s64(__VA_ARGS__)
+#define svld1sw_gather_u64base_offset_u64(...) __builtin_sve_svld1sw_gather_u64base_offset_u64(__VA_ARGS__)
+#define svld1sw_gather_u64base_offset_s64(...) __builtin_sve_svld1sw_gather_u64base_offset_s64(__VA_ARGS__)
+#define svld1sw_gather_u64base_u64(...) __builtin_sve_svld1sw_gather_u64base_u64(__VA_ARGS__)
+#define svld1sw_gather_u64base_s64(...) __builtin_sve_svld1sw_gather_u64base_s64(__VA_ARGS__)
+#define svld1sw_gather_s64index_u64(...) __builtin_sve_svld1sw_gather_s64index_u64(__VA_ARGS__)
+#define svld1sw_gather_s64index_s64(...) __builtin_sve_svld1sw_gather_s64index_s64(__VA_ARGS__)
+#define svld1sw_gather_u64index_u64(...) __builtin_sve_svld1sw_gather_u64index_u64(__VA_ARGS__)
+#define svld1sw_gather_u64index_s64(...) __builtin_sve_svld1sw_gather_u64index_s64(__VA_ARGS__)
+#define svld1sw_gather_s64offset_u64(...) __builtin_sve_svld1sw_gather_s64offset_u64(__VA_ARGS__)
+#define svld1sw_gather_s64offset_s64(...) __builtin_sve_svld1sw_gather_s64offset_s64(__VA_ARGS__)
+#define svld1sw_gather_u64offset_u64(...) __builtin_sve_svld1sw_gather_u64offset_u64(__VA_ARGS__)
+#define svld1sw_gather_u64offset_s64(...) __builtin_sve_svld1sw_gather_u64offset_s64(__VA_ARGS__)
+#define svld1sw_vnum_u64(...) __builtin_sve_svld1sw_vnum_u64(__VA_ARGS__)
+#define svld1sw_vnum_s64(...) __builtin_sve_svld1sw_vnum_s64(__VA_ARGS__)
+#define svld1sw_u64(...) __builtin_sve_svld1sw_u64(__VA_ARGS__)
+#define svld1sw_s64(...) __builtin_sve_svld1sw_s64(__VA_ARGS__)
+#define svld1ub_gather_u32base_offset_u32(...) __builtin_sve_svld1ub_gather_u32base_offset_u32(__VA_ARGS__)
+#define svld1ub_gather_u64base_offset_u64(...) __builtin_sve_svld1ub_gather_u64base_offset_u64(__VA_ARGS__)
+#define svld1ub_gather_u32base_offset_s32(...) __builtin_sve_svld1ub_gather_u32base_offset_s32(__VA_ARGS__)
+#define svld1ub_gather_u64base_offset_s64(...) __builtin_sve_svld1ub_gather_u64base_offset_s64(__VA_ARGS__)
+#define svld1ub_gather_u32base_u32(...) __builtin_sve_svld1ub_gather_u32base_u32(__VA_ARGS__)
+#define svld1ub_gather_u64base_u64(...) __builtin_sve_svld1ub_gather_u64base_u64(__VA_ARGS__)
+#define svld1ub_gather_u32base_s32(...) __builtin_sve_svld1ub_gather_u32base_s32(__VA_ARGS__)
+#define svld1ub_gather_u64base_s64(...) __builtin_sve_svld1ub_gather_u64base_s64(__VA_ARGS__)
+#define svld1ub_gather_s32offset_u32(...) __builtin_sve_svld1ub_gather_s32offset_u32(__VA_ARGS__)
+#define svld1ub_gather_s32offset_s32(...) __builtin_sve_svld1ub_gather_s32offset_s32(__VA_ARGS__)
+#define svld1ub_gather_u32offset_u32(...) __builtin_sve_svld1ub_gather_u32offset_u32(__VA_ARGS__)
+#define svld1ub_gather_u32offset_s32(...) __builtin_sve_svld1ub_gather_u32offset_s32(__VA_ARGS__)
+#define svld1ub_gather_s64offset_u64(...) __builtin_sve_svld1ub_gather_s64offset_u64(__VA_ARGS__)
+#define svld1ub_gather_s64offset_s64(...) __builtin_sve_svld1ub_gather_s64offset_s64(__VA_ARGS__)
+#define svld1ub_gather_u64offset_u64(...) __builtin_sve_svld1ub_gather_u64offset_u64(__VA_ARGS__)
+#define svld1ub_gather_u64offset_s64(...) __builtin_sve_svld1ub_gather_u64offset_s64(__VA_ARGS__)
+#define svld1ub_vnum_u32(...) __builtin_sve_svld1ub_vnum_u32(__VA_ARGS__)
+#define svld1ub_vnum_u64(...) __builtin_sve_svld1ub_vnum_u64(__VA_ARGS__)
+#define svld1ub_vnum_u16(...) __builtin_sve_svld1ub_vnum_u16(__VA_ARGS__)
+#define svld1ub_vnum_s32(...) __builtin_sve_svld1ub_vnum_s32(__VA_ARGS__)
+#define svld1ub_vnum_s64(...) __builtin_sve_svld1ub_vnum_s64(__VA_ARGS__)
+#define svld1ub_vnum_s16(...) __builtin_sve_svld1ub_vnum_s16(__VA_ARGS__)
+#define svld1ub_u32(...) __builtin_sve_svld1ub_u32(__VA_ARGS__)
+#define svld1ub_u64(...) __builtin_sve_svld1ub_u64(__VA_ARGS__)
+#define svld1ub_u16(...) __builtin_sve_svld1ub_u16(__VA_ARGS__)
+#define svld1ub_s32(...) __builtin_sve_svld1ub_s32(__VA_ARGS__)
+#define svld1ub_s64(...) __builtin_sve_svld1ub_s64(__VA_ARGS__)
+#define svld1ub_s16(...) __builtin_sve_svld1ub_s16(__VA_ARGS__)
+#define svld1uh_gather_u32base_index_u32(...) __builtin_sve_svld1uh_gather_u32base_index_u32(__VA_ARGS__)
+#define svld1uh_gather_u64base_index_u64(...) __builtin_sve_svld1uh_gather_u64base_index_u64(__VA_ARGS__)
+#define svld1uh_gather_u32base_index_s32(...) __builtin_sve_svld1uh_gather_u32base_index_s32(__VA_ARGS__)
+#define svld1uh_gather_u64base_index_s64(...) __builtin_sve_svld1uh_gather_u64base_index_s64(__VA_ARGS__)
+#define svld1uh_gather_u32base_offset_u32(...) __builtin_sve_svld1uh_gather_u32base_offset_u32(__VA_ARGS__)
+#define svld1uh_gather_u64base_offset_u64(...) __builtin_sve_svld1uh_gather_u64base_offset_u64(__VA_ARGS__)
+#define svld1uh_gather_u32base_offset_s32(...) __builtin_sve_svld1uh_gather_u32base_offset_s32(__VA_ARGS__)
+#define svld1uh_gather_u64base_offset_s64(...) __builtin_sve_svld1uh_gather_u64base_offset_s64(__VA_ARGS__)
+#define svld1uh_gather_u32base_u32(...) __builtin_sve_svld1uh_gather_u32base_u32(__VA_ARGS__)
+#define svld1uh_gather_u64base_u64(...) __builtin_sve_svld1uh_gather_u64base_u64(__VA_ARGS__)
+#define svld1uh_gather_u32base_s32(...) __builtin_sve_svld1uh_gather_u32base_s32(__VA_ARGS__)
+#define svld1uh_gather_u64base_s64(...) __builtin_sve_svld1uh_gather_u64base_s64(__VA_ARGS__)
+#define svld1uh_gather_s32index_u32(...) __builtin_sve_svld1uh_gather_s32index_u32(__VA_ARGS__)
+#define svld1uh_gather_s32index_s32(...) __builtin_sve_svld1uh_gather_s32index_s32(__VA_ARGS__)
+#define svld1uh_gather_u32index_u32(...) __builtin_sve_svld1uh_gather_u32index_u32(__VA_ARGS__)
+#define svld1uh_gather_u32index_s32(...) __builtin_sve_svld1uh_gather_u32index_s32(__VA_ARGS__)
+#define svld1uh_gather_s64index_u64(...) __builtin_sve_svld1uh_gather_s64index_u64(__VA_ARGS__)
+#define svld1uh_gather_s64index_s64(...) __builtin_sve_svld1uh_gather_s64index_s64(__VA_ARGS__)
+#define svld1uh_gather_u64index_u64(...) __builtin_sve_svld1uh_gather_u64index_u64(__VA_ARGS__)
+#define svld1uh_gather_u64index_s64(...) __builtin_sve_svld1uh_gather_u64index_s64(__VA_ARGS__)
+#define svld1uh_gather_s32offset_u32(...) __builtin_sve_svld1uh_gather_s32offset_u32(__VA_ARGS__)
+#define svld1uh_gather_s32offset_s32(...) __builtin_sve_svld1uh_gather_s32offset_s32(__VA_ARGS__)
+#define svld1uh_gather_u32offset_u32(...) __builtin_sve_svld1uh_gather_u32offset_u32(__VA_ARGS__)
+#define svld1uh_gather_u32offset_s32(...) __builtin_sve_svld1uh_gather_u32offset_s32(__VA_ARGS__)
+#define svld1uh_gather_s64offset_u64(...) __builtin_sve_svld1uh_gather_s64offset_u64(__VA_ARGS__)
+#define svld1uh_gather_s64offset_s64(...) __builtin_sve_svld1uh_gather_s64offset_s64(__VA_ARGS__)
+#define svld1uh_gather_u64offset_u64(...) __builtin_sve_svld1uh_gather_u64offset_u64(__VA_ARGS__)
+#define svld1uh_gather_u64offset_s64(...) __builtin_sve_svld1uh_gather_u64offset_s64(__VA_ARGS__)
+#define svld1uh_vnum_u32(...) __builtin_sve_svld1uh_vnum_u32(__VA_ARGS__)
+#define svld1uh_vnum_u64(...) __builtin_sve_svld1uh_vnum_u64(__VA_ARGS__)
+#define svld1uh_vnum_s32(...) __builtin_sve_svld1uh_vnum_s32(__VA_ARGS__)
+#define svld1uh_vnum_s64(...) __builtin_sve_svld1uh_vnum_s64(__VA_ARGS__)
+#define svld1uh_u32(...) __builtin_sve_svld1uh_u32(__VA_ARGS__)
+#define svld1uh_u64(...) __builtin_sve_svld1uh_u64(__VA_ARGS__)
+#define svld1uh_s32(...) __builtin_sve_svld1uh_s32(__VA_ARGS__)
+#define svld1uh_s64(...) __builtin_sve_svld1uh_s64(__VA_ARGS__)
+#define svld1uw_gather_u64base_index_u64(...) __builtin_sve_svld1uw_gather_u64base_index_u64(__VA_ARGS__)
+#define svld1uw_gather_u64base_index_s64(...) __builtin_sve_svld1uw_gather_u64base_index_s64(__VA_ARGS__)
+#define svld1uw_gather_u64base_offset_u64(...) __builtin_sve_svld1uw_gather_u64base_offset_u64(__VA_ARGS__)
+#define svld1uw_gather_u64base_offset_s64(...) __builtin_sve_svld1uw_gather_u64base_offset_s64(__VA_ARGS__)
+#define svld1uw_gather_u64base_u64(...) __builtin_sve_svld1uw_gather_u64base_u64(__VA_ARGS__)
+#define svld1uw_gather_u64base_s64(...) __builtin_sve_svld1uw_gather_u64base_s64(__VA_ARGS__)
+#define svld1uw_gather_s64index_u64(...) __builtin_sve_svld1uw_gather_s64index_u64(__VA_ARGS__)
+#define svld1uw_gather_s64index_s64(...) __builtin_sve_svld1uw_gather_s64index_s64(__VA_ARGS__)
+#define svld1uw_gather_u64index_u64(...) __builtin_sve_svld1uw_gather_u64index_u64(__VA_ARGS__)
+#define svld1uw_gather_u64index_s64(...) __builtin_sve_svld1uw_gather_u64index_s64(__VA_ARGS__)
+#define svld1uw_gather_s64offset_u64(...) __builtin_sve_svld1uw_gather_s64offset_u64(__VA_ARGS__)
+#define svld1uw_gather_s64offset_s64(...) __builtin_sve_svld1uw_gather_s64offset_s64(__VA_ARGS__)
+#define svld1uw_gather_u64offset_u64(...) __builtin_sve_svld1uw_gather_u64offset_u64(__VA_ARGS__)
+#define svld1uw_gather_u64offset_s64(...) __builtin_sve_svld1uw_gather_u64offset_s64(__VA_ARGS__)
+#define svld1uw_vnum_u64(...) __builtin_sve_svld1uw_vnum_u64(__VA_ARGS__)
+#define svld1uw_vnum_s64(...) __builtin_sve_svld1uw_vnum_s64(__VA_ARGS__)
+#define svld1uw_u64(...) __builtin_sve_svld1uw_u64(__VA_ARGS__)
+#define svld1uw_s64(...) __builtin_sve_svld1uw_s64(__VA_ARGS__)
+#define svld2_u8(...) __builtin_sve_svld2_u8(__VA_ARGS__)
+#define svld2_u32(...) __builtin_sve_svld2_u32(__VA_ARGS__)
+#define svld2_u64(...) __builtin_sve_svld2_u64(__VA_ARGS__)
+#define svld2_u16(...) __builtin_sve_svld2_u16(__VA_ARGS__)
+#define svld2_s8(...) __builtin_sve_svld2_s8(__VA_ARGS__)
+#define svld2_f64(...) __builtin_sve_svld2_f64(__VA_ARGS__)
+#define svld2_f32(...) __builtin_sve_svld2_f32(__VA_ARGS__)
+#define svld2_f16(...) __builtin_sve_svld2_f16(__VA_ARGS__)
+#define svld2_s32(...) __builtin_sve_svld2_s32(__VA_ARGS__)
+#define svld2_s64(...) __builtin_sve_svld2_s64(__VA_ARGS__)
+#define svld2_s16(...) __builtin_sve_svld2_s16(__VA_ARGS__)
+#define svld2_vnum_u8(...) __builtin_sve_svld2_vnum_u8(__VA_ARGS__)
+#define svld2_vnum_u32(...) __builtin_sve_svld2_vnum_u32(__VA_ARGS__)
+#define svld2_vnum_u64(...) __builtin_sve_svld2_vnum_u64(__VA_ARGS__)
+#define svld2_vnum_u16(...) __builtin_sve_svld2_vnum_u16(__VA_ARGS__)
+#define svld2_vnum_s8(...) __builtin_sve_svld2_vnum_s8(__VA_ARGS__)
+#define svld2_vnum_f64(...) __builtin_sve_svld2_vnum_f64(__VA_ARGS__)
+#define svld2_vnum_f32(...) __builtin_sve_svld2_vnum_f32(__VA_ARGS__)
+#define svld2_vnum_f16(...) __builtin_sve_svld2_vnum_f16(__VA_ARGS__)
+#define svld2_vnum_s32(...) __builtin_sve_svld2_vnum_s32(__VA_ARGS__)
+#define svld2_vnum_s64(...) __builtin_sve_svld2_vnum_s64(__VA_ARGS__)
+#define svld2_vnum_s16(...) __builtin_sve_svld2_vnum_s16(__VA_ARGS__)
+#define svld3_u8(...) __builtin_sve_svld3_u8(__VA_ARGS__)
+#define svld3_u32(...) __builtin_sve_svld3_u32(__VA_ARGS__)
+#define svld3_u64(...) __builtin_sve_svld3_u64(__VA_ARGS__)
+#define svld3_u16(...) __builtin_sve_svld3_u16(__VA_ARGS__)
+#define svld3_s8(...) __builtin_sve_svld3_s8(__VA_ARGS__)
+#define svld3_f64(...) __builtin_sve_svld3_f64(__VA_ARGS__)
+#define svld3_f32(...) __builtin_sve_svld3_f32(__VA_ARGS__)
+#define svld3_f16(...) __builtin_sve_svld3_f16(__VA_ARGS__)
+#define svld3_s32(...) __builtin_sve_svld3_s32(__VA_ARGS__)
+#define svld3_s64(...) __builtin_sve_svld3_s64(__VA_ARGS__)
+#define svld3_s16(...) __builtin_sve_svld3_s16(__VA_ARGS__)
+#define svld3_vnum_u8(...) __builtin_sve_svld3_vnum_u8(__VA_ARGS__)
+#define svld3_vnum_u32(...) __builtin_sve_svld3_vnum_u32(__VA_ARGS__)
+#define svld3_vnum_u64(...) __builtin_sve_svld3_vnum_u64(__VA_ARGS__)
+#define svld3_vnum_u16(...) __builtin_sve_svld3_vnum_u16(__VA_ARGS__)
+#define svld3_vnum_s8(...) __builtin_sve_svld3_vnum_s8(__VA_ARGS__)
+#define svld3_vnum_f64(...) __builtin_sve_svld3_vnum_f64(__VA_ARGS__)
+#define svld3_vnum_f32(...) __builtin_sve_svld3_vnum_f32(__VA_ARGS__)
+#define svld3_vnum_f16(...) __builtin_sve_svld3_vnum_f16(__VA_ARGS__)
+#define svld3_vnum_s32(...) __builtin_sve_svld3_vnum_s32(__VA_ARGS__)
+#define svld3_vnum_s64(...) __builtin_sve_svld3_vnum_s64(__VA_ARGS__)
+#define svld3_vnum_s16(...) __builtin_sve_svld3_vnum_s16(__VA_ARGS__)
+#define svld4_u8(...) __builtin_sve_svld4_u8(__VA_ARGS__)
+#define svld4_u32(...) __builtin_sve_svld4_u32(__VA_ARGS__)
+#define svld4_u64(...) __builtin_sve_svld4_u64(__VA_ARGS__)
+#define svld4_u16(...) __builtin_sve_svld4_u16(__VA_ARGS__)
+#define svld4_s8(...) __builtin_sve_svld4_s8(__VA_ARGS__)
+#define svld4_f64(...) __builtin_sve_svld4_f64(__VA_ARGS__)
+#define svld4_f32(...) __builtin_sve_svld4_f32(__VA_ARGS__)
+#define svld4_f16(...) __builtin_sve_svld4_f16(__VA_ARGS__)
+#define svld4_s32(...) __builtin_sve_svld4_s32(__VA_ARGS__)
+#define svld4_s64(...) __builtin_sve_svld4_s64(__VA_ARGS__)
+#define svld4_s16(...) __builtin_sve_svld4_s16(__VA_ARGS__)
+#define svld4_vnum_u8(...) __builtin_sve_svld4_vnum_u8(__VA_ARGS__)
+#define svld4_vnum_u32(...) __builtin_sve_svld4_vnum_u32(__VA_ARGS__)
+#define svld4_vnum_u64(...) __builtin_sve_svld4_vnum_u64(__VA_ARGS__)
+#define svld4_vnum_u16(...) __builtin_sve_svld4_vnum_u16(__VA_ARGS__)
+#define svld4_vnum_s8(...) __builtin_sve_svld4_vnum_s8(__VA_ARGS__)
+#define svld4_vnum_f64(...) __builtin_sve_svld4_vnum_f64(__VA_ARGS__)
+#define svld4_vnum_f32(...) __builtin_sve_svld4_vnum_f32(__VA_ARGS__)
+#define svld4_vnum_f16(...) __builtin_sve_svld4_vnum_f16(__VA_ARGS__)
+#define svld4_vnum_s32(...) __builtin_sve_svld4_vnum_s32(__VA_ARGS__)
+#define svld4_vnum_s64(...) __builtin_sve_svld4_vnum_s64(__VA_ARGS__)
+#define svld4_vnum_s16(...) __builtin_sve_svld4_vnum_s16(__VA_ARGS__)
+#define svldff1_u8(...) __builtin_sve_svldff1_u8(__VA_ARGS__)
+#define svldff1_u32(...) __builtin_sve_svldff1_u32(__VA_ARGS__)
+#define svldff1_u64(...) __builtin_sve_svldff1_u64(__VA_ARGS__)
+#define svldff1_u16(...) __builtin_sve_svldff1_u16(__VA_ARGS__)
+#define svldff1_s8(...) __builtin_sve_svldff1_s8(__VA_ARGS__)
+#define svldff1_f64(...) __builtin_sve_svldff1_f64(__VA_ARGS__)
+#define svldff1_f32(...) __builtin_sve_svldff1_f32(__VA_ARGS__)
+#define svldff1_f16(...) __builtin_sve_svldff1_f16(__VA_ARGS__)
+#define svldff1_s32(...) __builtin_sve_svldff1_s32(__VA_ARGS__)
+#define svldff1_s64(...) __builtin_sve_svldff1_s64(__VA_ARGS__)
+#define svldff1_s16(...) __builtin_sve_svldff1_s16(__VA_ARGS__)
+#define svldff1_gather_u32base_index_u32(...) __builtin_sve_svldff1_gather_u32base_index_u32(__VA_ARGS__)
+#define svldff1_gather_u64base_index_u64(...) __builtin_sve_svldff1_gather_u64base_index_u64(__VA_ARGS__)
+#define svldff1_gather_u64base_index_f64(...) __builtin_sve_svldff1_gather_u64base_index_f64(__VA_ARGS__)
+#define svldff1_gather_u32base_index_f32(...) __builtin_sve_svldff1_gather_u32base_index_f32(__VA_ARGS__)
+#define svldff1_gather_u32base_index_s32(...) __builtin_sve_svldff1_gather_u32base_index_s32(__VA_ARGS__)
+#define svldff1_gather_u64base_index_s64(...) __builtin_sve_svldff1_gather_u64base_index_s64(__VA_ARGS__)
+#define svldff1_gather_u32base_offset_u32(...) __builtin_sve_svldff1_gather_u32base_offset_u32(__VA_ARGS__)
+#define svldff1_gather_u64base_offset_u64(...) __builtin_sve_svldff1_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldff1_gather_u64base_offset_f64(...) __builtin_sve_svldff1_gather_u64base_offset_f64(__VA_ARGS__)
+#define svldff1_gather_u32base_offset_f32(...) __builtin_sve_svldff1_gather_u32base_offset_f32(__VA_ARGS__)
+#define svldff1_gather_u32base_offset_s32(...) __builtin_sve_svldff1_gather_u32base_offset_s32(__VA_ARGS__)
+#define svldff1_gather_u64base_offset_s64(...) __builtin_sve_svldff1_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldff1_gather_u32base_u32(...) __builtin_sve_svldff1_gather_u32base_u32(__VA_ARGS__)
+#define svldff1_gather_u64base_u64(...) __builtin_sve_svldff1_gather_u64base_u64(__VA_ARGS__)
+#define svldff1_gather_u64base_f64(...) __builtin_sve_svldff1_gather_u64base_f64(__VA_ARGS__)
+#define svldff1_gather_u32base_f32(...) __builtin_sve_svldff1_gather_u32base_f32(__VA_ARGS__)
+#define svldff1_gather_u32base_s32(...) __builtin_sve_svldff1_gather_u32base_s32(__VA_ARGS__)
+#define svldff1_gather_u64base_s64(...) __builtin_sve_svldff1_gather_u64base_s64(__VA_ARGS__)
+#define svldff1_gather_s32index_u32(...) __builtin_sve_svldff1_gather_s32index_u32(__VA_ARGS__)
+#define svldff1_gather_s32index_f32(...) __builtin_sve_svldff1_gather_s32index_f32(__VA_ARGS__)
+#define svldff1_gather_s32index_s32(...) __builtin_sve_svldff1_gather_s32index_s32(__VA_ARGS__)
+#define svldff1_gather_u32index_u32(...) __builtin_sve_svldff1_gather_u32index_u32(__VA_ARGS__)
+#define svldff1_gather_u32index_f32(...) __builtin_sve_svldff1_gather_u32index_f32(__VA_ARGS__)
+#define svldff1_gather_u32index_s32(...) __builtin_sve_svldff1_gather_u32index_s32(__VA_ARGS__)
+#define svldff1_gather_s64index_u64(...) __builtin_sve_svldff1_gather_s64index_u64(__VA_ARGS__)
+#define svldff1_gather_s64index_f64(...) __builtin_sve_svldff1_gather_s64index_f64(__VA_ARGS__)
+#define svldff1_gather_s64index_s64(...) __builtin_sve_svldff1_gather_s64index_s64(__VA_ARGS__)
+#define svldff1_gather_u64index_u64(...) __builtin_sve_svldff1_gather_u64index_u64(__VA_ARGS__)
+#define svldff1_gather_u64index_f64(...) __builtin_sve_svldff1_gather_u64index_f64(__VA_ARGS__)
+#define svldff1_gather_u64index_s64(...) __builtin_sve_svldff1_gather_u64index_s64(__VA_ARGS__)
+#define svldff1_gather_s32offset_u32(...) __builtin_sve_svldff1_gather_s32offset_u32(__VA_ARGS__)
+#define svldff1_gather_s32offset_f32(...) __builtin_sve_svldff1_gather_s32offset_f32(__VA_ARGS__)
+#define svldff1_gather_s32offset_s32(...) __builtin_sve_svldff1_gather_s32offset_s32(__VA_ARGS__)
+#define svldff1_gather_u32offset_u32(...) __builtin_sve_svldff1_gather_u32offset_u32(__VA_ARGS__)
+#define svldff1_gather_u32offset_f32(...) __builtin_sve_svldff1_gather_u32offset_f32(__VA_ARGS__)
+#define svldff1_gather_u32offset_s32(...) __builtin_sve_svldff1_gather_u32offset_s32(__VA_ARGS__)
+#define svldff1_gather_s64offset_u64(...) __builtin_sve_svldff1_gather_s64offset_u64(__VA_ARGS__)
+#define svldff1_gather_s64offset_f64(...) __builtin_sve_svldff1_gather_s64offset_f64(__VA_ARGS__)
+#define svldff1_gather_s64offset_s64(...) __builtin_sve_svldff1_gather_s64offset_s64(__VA_ARGS__)
+#define svldff1_gather_u64offset_u64(...) __builtin_sve_svldff1_gather_u64offset_u64(__VA_ARGS__)
+#define svldff1_gather_u64offset_f64(...) __builtin_sve_svldff1_gather_u64offset_f64(__VA_ARGS__)
+#define svldff1_gather_u64offset_s64(...) __builtin_sve_svldff1_gather_u64offset_s64(__VA_ARGS__)
+#define svldff1_vnum_u8(...) __builtin_sve_svldff1_vnum_u8(__VA_ARGS__)
+#define svldff1_vnum_u32(...) __builtin_sve_svldff1_vnum_u32(__VA_ARGS__)
+#define svldff1_vnum_u64(...) __builtin_sve_svldff1_vnum_u64(__VA_ARGS__)
+#define svldff1_vnum_u16(...) __builtin_sve_svldff1_vnum_u16(__VA_ARGS__)
+#define svldff1_vnum_s8(...) __builtin_sve_svldff1_vnum_s8(__VA_ARGS__)
+#define svldff1_vnum_f64(...) __builtin_sve_svldff1_vnum_f64(__VA_ARGS__)
+#define svldff1_vnum_f32(...) __builtin_sve_svldff1_vnum_f32(__VA_ARGS__)
+#define svldff1_vnum_f16(...) __builtin_sve_svldff1_vnum_f16(__VA_ARGS__)
+#define svldff1_vnum_s32(...) __builtin_sve_svldff1_vnum_s32(__VA_ARGS__)
+#define svldff1_vnum_s64(...) __builtin_sve_svldff1_vnum_s64(__VA_ARGS__)
+#define svldff1_vnum_s16(...) __builtin_sve_svldff1_vnum_s16(__VA_ARGS__)
+#define svldff1sb_gather_u32base_offset_u32(...) __builtin_sve_svldff1sb_gather_u32base_offset_u32(__VA_ARGS__)
+#define svldff1sb_gather_u64base_offset_u64(...) __builtin_sve_svldff1sb_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldff1sb_gather_u32base_offset_s32(...) __builtin_sve_svldff1sb_gather_u32base_offset_s32(__VA_ARGS__)
+#define svldff1sb_gather_u64base_offset_s64(...) __builtin_sve_svldff1sb_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldff1sb_gather_u32base_u32(...) __builtin_sve_svldff1sb_gather_u32base_u32(__VA_ARGS__)
+#define svldff1sb_gather_u64base_u64(...) __builtin_sve_svldff1sb_gather_u64base_u64(__VA_ARGS__)
+#define svldff1sb_gather_u32base_s32(...) __builtin_sve_svldff1sb_gather_u32base_s32(__VA_ARGS__)
+#define svldff1sb_gather_u64base_s64(...) __builtin_sve_svldff1sb_gather_u64base_s64(__VA_ARGS__)
+#define svldff1sb_gather_s32offset_u32(...) __builtin_sve_svldff1sb_gather_s32offset_u32(__VA_ARGS__)
+#define svldff1sb_gather_s32offset_s32(...) __builtin_sve_svldff1sb_gather_s32offset_s32(__VA_ARGS__)
+#define svldff1sb_gather_u32offset_u32(...) __builtin_sve_svldff1sb_gather_u32offset_u32(__VA_ARGS__)
+#define svldff1sb_gather_u32offset_s32(...) __builtin_sve_svldff1sb_gather_u32offset_s32(__VA_ARGS__)
+#define svldff1sb_gather_s64offset_u64(...) __builtin_sve_svldff1sb_gather_s64offset_u64(__VA_ARGS__)
+#define svldff1sb_gather_s64offset_s64(...) __builtin_sve_svldff1sb_gather_s64offset_s64(__VA_ARGS__)
+#define svldff1sb_gather_u64offset_u64(...) __builtin_sve_svldff1sb_gather_u64offset_u64(__VA_ARGS__)
+#define svldff1sb_gather_u64offset_s64(...) __builtin_sve_svldff1sb_gather_u64offset_s64(__VA_ARGS__)
+#define svldff1sb_vnum_u32(...) __builtin_sve_svldff1sb_vnum_u32(__VA_ARGS__)
+#define svldff1sb_vnum_u64(...) __builtin_sve_svldff1sb_vnum_u64(__VA_ARGS__)
+#define svldff1sb_vnum_u16(...) __builtin_sve_svldff1sb_vnum_u16(__VA_ARGS__)
+#define svldff1sb_vnum_s32(...) __builtin_sve_svldff1sb_vnum_s32(__VA_ARGS__)
+#define svldff1sb_vnum_s64(...) __builtin_sve_svldff1sb_vnum_s64(__VA_ARGS__)
+#define svldff1sb_vnum_s16(...) __builtin_sve_svldff1sb_vnum_s16(__VA_ARGS__)
+#define svldff1sb_u32(...) __builtin_sve_svldff1sb_u32(__VA_ARGS__)
+#define svldff1sb_u64(...) __builtin_sve_svldff1sb_u64(__VA_ARGS__)
+#define svldff1sb_u16(...) __builtin_sve_svldff1sb_u16(__VA_ARGS__)
+#define svldff1sb_s32(...) __builtin_sve_svldff1sb_s32(__VA_ARGS__)
+#define svldff1sb_s64(...) __builtin_sve_svldff1sb_s64(__VA_ARGS__)
+#define svldff1sb_s16(...) __builtin_sve_svldff1sb_s16(__VA_ARGS__)
+#define svldff1sh_gather_u32base_index_u32(...) __builtin_sve_svldff1sh_gather_u32base_index_u32(__VA_ARGS__)
+#define svldff1sh_gather_u64base_index_u64(...) __builtin_sve_svldff1sh_gather_u64base_index_u64(__VA_ARGS__)
+#define svldff1sh_gather_u32base_index_s32(...) __builtin_sve_svldff1sh_gather_u32base_index_s32(__VA_ARGS__)
+#define svldff1sh_gather_u64base_index_s64(...) __builtin_sve_svldff1sh_gather_u64base_index_s64(__VA_ARGS__)
+#define svldff1sh_gather_u32base_offset_u32(...) __builtin_sve_svldff1sh_gather_u32base_offset_u32(__VA_ARGS__)
+#define svldff1sh_gather_u64base_offset_u64(...) __builtin_sve_svldff1sh_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldff1sh_gather_u32base_offset_s32(...) __builtin_sve_svldff1sh_gather_u32base_offset_s32(__VA_ARGS__)
+#define svldff1sh_gather_u64base_offset_s64(...) __builtin_sve_svldff1sh_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldff1sh_gather_u32base_u32(...) __builtin_sve_svldff1sh_gather_u32base_u32(__VA_ARGS__)
+#define svldff1sh_gather_u64base_u64(...) __builtin_sve_svldff1sh_gather_u64base_u64(__VA_ARGS__)
+#define svldff1sh_gather_u32base_s32(...) __builtin_sve_svldff1sh_gather_u32base_s32(__VA_ARGS__)
+#define svldff1sh_gather_u64base_s64(...) __builtin_sve_svldff1sh_gather_u64base_s64(__VA_ARGS__)
+#define svldff1sh_gather_s32index_u32(...) __builtin_sve_svldff1sh_gather_s32index_u32(__VA_ARGS__)
+#define svldff1sh_gather_s32index_s32(...) __builtin_sve_svldff1sh_gather_s32index_s32(__VA_ARGS__)
+#define svldff1sh_gather_u32index_u32(...) __builtin_sve_svldff1sh_gather_u32index_u32(__VA_ARGS__)
+#define svldff1sh_gather_u32index_s32(...) __builtin_sve_svldff1sh_gather_u32index_s32(__VA_ARGS__)
+#define svldff1sh_gather_s64index_u64(...) __builtin_sve_svldff1sh_gather_s64index_u64(__VA_ARGS__)
+#define svldff1sh_gather_s64index_s64(...) __builtin_sve_svldff1sh_gather_s64index_s64(__VA_ARGS__)
+#define svldff1sh_gather_u64index_u64(...) __builtin_sve_svldff1sh_gather_u64index_u64(__VA_ARGS__)
+#define svldff1sh_gather_u64index_s64(...) __builtin_sve_svldff1sh_gather_u64index_s64(__VA_ARGS__)
+#define svldff1sh_gather_s32offset_u32(...) __builtin_sve_svldff1sh_gather_s32offset_u32(__VA_ARGS__)
+#define svldff1sh_gather_s32offset_s32(...) __builtin_sve_svldff1sh_gather_s32offset_s32(__VA_ARGS__)
+#define svldff1sh_gather_u32offset_u32(...) __builtin_sve_svldff1sh_gather_u32offset_u32(__VA_ARGS__)
+#define svldff1sh_gather_u32offset_s32(...) __builtin_sve_svldff1sh_gather_u32offset_s32(__VA_ARGS__)
+#define svldff1sh_gather_s64offset_u64(...) __builtin_sve_svldff1sh_gather_s64offset_u64(__VA_ARGS__)
+#define svldff1sh_gather_s64offset_s64(...) __builtin_sve_svldff1sh_gather_s64offset_s64(__VA_ARGS__)
+#define svldff1sh_gather_u64offset_u64(...) __builtin_sve_svldff1sh_gather_u64offset_u64(__VA_ARGS__)
+#define svldff1sh_gather_u64offset_s64(...) __builtin_sve_svldff1sh_gather_u64offset_s64(__VA_ARGS__)
+#define svldff1sh_vnum_u32(...) __builtin_sve_svldff1sh_vnum_u32(__VA_ARGS__)
+#define svldff1sh_vnum_u64(...) __builtin_sve_svldff1sh_vnum_u64(__VA_ARGS__)
+#define svldff1sh_vnum_s32(...) __builtin_sve_svldff1sh_vnum_s32(__VA_ARGS__)
+#define svldff1sh_vnum_s64(...) __builtin_sve_svldff1sh_vnum_s64(__VA_ARGS__)
+#define svldff1sh_u32(...) __builtin_sve_svldff1sh_u32(__VA_ARGS__)
+#define svldff1sh_u64(...) __builtin_sve_svldff1sh_u64(__VA_ARGS__)
+#define svldff1sh_s32(...) __builtin_sve_svldff1sh_s32(__VA_ARGS__)
+#define svldff1sh_s64(...) __builtin_sve_svldff1sh_s64(__VA_ARGS__)
+#define svldff1sw_gather_u64base_index_u64(...) __builtin_sve_svldff1sw_gather_u64base_index_u64(__VA_ARGS__)
+#define svldff1sw_gather_u64base_index_s64(...) __builtin_sve_svldff1sw_gather_u64base_index_s64(__VA_ARGS__)
+#define svldff1sw_gather_u64base_offset_u64(...) __builtin_sve_svldff1sw_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldff1sw_gather_u64base_offset_s64(...) __builtin_sve_svldff1sw_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldff1sw_gather_u64base_u64(...) __builtin_sve_svldff1sw_gather_u64base_u64(__VA_ARGS__)
+#define svldff1sw_gather_u64base_s64(...) __builtin_sve_svldff1sw_gather_u64base_s64(__VA_ARGS__)
+#define svldff1sw_gather_s64index_u64(...) __builtin_sve_svldff1sw_gather_s64index_u64(__VA_ARGS__)
+#define svldff1sw_gather_s64index_s64(...) __builtin_sve_svldff1sw_gather_s64index_s64(__VA_ARGS__)
+#define svldff1sw_gather_u64index_u64(...) __builtin_sve_svldff1sw_gather_u64index_u64(__VA_ARGS__)
+#define svldff1sw_gather_u64index_s64(...) __builtin_sve_svldff1sw_gather_u64index_s64(__VA_ARGS__)
+#define svldff1sw_gather_s64offset_u64(...) __builtin_sve_svldff1sw_gather_s64offset_u64(__VA_ARGS__)
+#define svldff1sw_gather_s64offset_s64(...) __builtin_sve_svldff1sw_gather_s64offset_s64(__VA_ARGS__)
+#define svldff1sw_gather_u64offset_u64(...) __builtin_sve_svldff1sw_gather_u64offset_u64(__VA_ARGS__)
+#define svldff1sw_gather_u64offset_s64(...) __builtin_sve_svldff1sw_gather_u64offset_s64(__VA_ARGS__)
+#define svldff1sw_vnum_u64(...) __builtin_sve_svldff1sw_vnum_u64(__VA_ARGS__)
+#define svldff1sw_vnum_s64(...) __builtin_sve_svldff1sw_vnum_s64(__VA_ARGS__)
+#define svldff1sw_u64(...) __builtin_sve_svldff1sw_u64(__VA_ARGS__)
+#define svldff1sw_s64(...) __builtin_sve_svldff1sw_s64(__VA_ARGS__)
+#define svldff1ub_gather_u32base_offset_u32(...) __builtin_sve_svldff1ub_gather_u32base_offset_u32(__VA_ARGS__)
+#define svldff1ub_gather_u64base_offset_u64(...) __builtin_sve_svldff1ub_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldff1ub_gather_u32base_offset_s32(...) __builtin_sve_svldff1ub_gather_u32base_offset_s32(__VA_ARGS__)
+#define svldff1ub_gather_u64base_offset_s64(...) __builtin_sve_svldff1ub_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldff1ub_gather_u32base_u32(...) __builtin_sve_svldff1ub_gather_u32base_u32(__VA_ARGS__)
+#define svldff1ub_gather_u64base_u64(...) __builtin_sve_svldff1ub_gather_u64base_u64(__VA_ARGS__)
+#define svldff1ub_gather_u32base_s32(...) __builtin_sve_svldff1ub_gather_u32base_s32(__VA_ARGS__)
+#define svldff1ub_gather_u64base_s64(...) __builtin_sve_svldff1ub_gather_u64base_s64(__VA_ARGS__)
+#define svldff1ub_gather_s32offset_u32(...) __builtin_sve_svldff1ub_gather_s32offset_u32(__VA_ARGS__)
+#define svldff1ub_gather_s32offset_s32(...) __builtin_sve_svldff1ub_gather_s32offset_s32(__VA_ARGS__)
+#define svldff1ub_gather_u32offset_u32(...) __builtin_sve_svldff1ub_gather_u32offset_u32(__VA_ARGS__)
+#define svldff1ub_gather_u32offset_s32(...) __builtin_sve_svldff1ub_gather_u32offset_s32(__VA_ARGS__)
+#define svldff1ub_gather_s64offset_u64(...) __builtin_sve_svldff1ub_gather_s64offset_u64(__VA_ARGS__)
+#define svldff1ub_gather_s64offset_s64(...) __builtin_sve_svldff1ub_gather_s64offset_s64(__VA_ARGS__)
+#define svldff1ub_gather_u64offset_u64(...) __builtin_sve_svldff1ub_gather_u64offset_u64(__VA_ARGS__)
+#define svldff1ub_gather_u64offset_s64(...) __builtin_sve_svldff1ub_gather_u64offset_s64(__VA_ARGS__)
+#define svldff1ub_vnum_u32(...) __builtin_sve_svldff1ub_vnum_u32(__VA_ARGS__)
+#define svldff1ub_vnum_u64(...) __builtin_sve_svldff1ub_vnum_u64(__VA_ARGS__)
+#define svldff1ub_vnum_u16(...) __builtin_sve_svldff1ub_vnum_u16(__VA_ARGS__)
+#define svldff1ub_vnum_s32(...) __builtin_sve_svldff1ub_vnum_s32(__VA_ARGS__)
+#define svldff1ub_vnum_s64(...) __builtin_sve_svldff1ub_vnum_s64(__VA_ARGS__)
+#define svldff1ub_vnum_s16(...) __builtin_sve_svldff1ub_vnum_s16(__VA_ARGS__)
+#define svldff1ub_u32(...) __builtin_sve_svldff1ub_u32(__VA_ARGS__)
+#define svldff1ub_u64(...) __builtin_sve_svldff1ub_u64(__VA_ARGS__)
+#define svldff1ub_u16(...) __builtin_sve_svldff1ub_u16(__VA_ARGS__)
+#define svldff1ub_s32(...) __builtin_sve_svldff1ub_s32(__VA_ARGS__)
+#define svldff1ub_s64(...) __builtin_sve_svldff1ub_s64(__VA_ARGS__)
+#define svldff1ub_s16(...) __builtin_sve_svldff1ub_s16(__VA_ARGS__)
+#define svldff1uh_gather_u32base_index_u32(...) __builtin_sve_svldff1uh_gather_u32base_index_u32(__VA_ARGS__)
+#define svldff1uh_gather_u64base_index_u64(...) __builtin_sve_svldff1uh_gather_u64base_index_u64(__VA_ARGS__)
+#define svldff1uh_gather_u32base_index_s32(...) __builtin_sve_svldff1uh_gather_u32base_index_s32(__VA_ARGS__)
+#define svldff1uh_gather_u64base_index_s64(...) __builtin_sve_svldff1uh_gather_u64base_index_s64(__VA_ARGS__)
+#define svldff1uh_gather_u32base_offset_u32(...) __builtin_sve_svldff1uh_gather_u32base_offset_u32(__VA_ARGS__)
+#define svldff1uh_gather_u64base_offset_u64(...) __builtin_sve_svldff1uh_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldff1uh_gather_u32base_offset_s32(...) __builtin_sve_svldff1uh_gather_u32base_offset_s32(__VA_ARGS__)
+#define svldff1uh_gather_u64base_offset_s64(...) __builtin_sve_svldff1uh_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldff1uh_gather_u32base_u32(...) __builtin_sve_svldff1uh_gather_u32base_u32(__VA_ARGS__)
+#define svldff1uh_gather_u64base_u64(...) __builtin_sve_svldff1uh_gather_u64base_u64(__VA_ARGS__)
+#define svldff1uh_gather_u32base_s32(...) __builtin_sve_svldff1uh_gather_u32base_s32(__VA_ARGS__)
+#define svldff1uh_gather_u64base_s64(...) __builtin_sve_svldff1uh_gather_u64base_s64(__VA_ARGS__)
+#define svldff1uh_gather_s32index_u32(...) __builtin_sve_svldff1uh_gather_s32index_u32(__VA_ARGS__)
+#define svldff1uh_gather_s32index_s32(...) __builtin_sve_svldff1uh_gather_s32index_s32(__VA_ARGS__)
+#define svldff1uh_gather_u32index_u32(...) __builtin_sve_svldff1uh_gather_u32index_u32(__VA_ARGS__)
+#define svldff1uh_gather_u32index_s32(...) __builtin_sve_svldff1uh_gather_u32index_s32(__VA_ARGS__)
+#define svldff1uh_gather_s64index_u64(...) __builtin_sve_svldff1uh_gather_s64index_u64(__VA_ARGS__)
+#define svldff1uh_gather_s64index_s64(...) __builtin_sve_svldff1uh_gather_s64index_s64(__VA_ARGS__)
+#define svldff1uh_gather_u64index_u64(...) __builtin_sve_svldff1uh_gather_u64index_u64(__VA_ARGS__)
+#define svldff1uh_gather_u64index_s64(...) __builtin_sve_svldff1uh_gather_u64index_s64(__VA_ARGS__)
+#define svldff1uh_gather_s32offset_u32(...) __builtin_sve_svldff1uh_gather_s32offset_u32(__VA_ARGS__)
+#define svldff1uh_gather_s32offset_s32(...) __builtin_sve_svldff1uh_gather_s32offset_s32(__VA_ARGS__)
+#define svldff1uh_gather_u32offset_u32(...) __builtin_sve_svldff1uh_gather_u32offset_u32(__VA_ARGS__)
+#define svldff1uh_gather_u32offset_s32(...) __builtin_sve_svldff1uh_gather_u32offset_s32(__VA_ARGS__)
+#define svldff1uh_gather_s64offset_u64(...) __builtin_sve_svldff1uh_gather_s64offset_u64(__VA_ARGS__)
+#define svldff1uh_gather_s64offset_s64(...) __builtin_sve_svldff1uh_gather_s64offset_s64(__VA_ARGS__)
+#define svldff1uh_gather_u64offset_u64(...) __builtin_sve_svldff1uh_gather_u64offset_u64(__VA_ARGS__)
+#define svldff1uh_gather_u64offset_s64(...) __builtin_sve_svldff1uh_gather_u64offset_s64(__VA_ARGS__)
+#define svldff1uh_vnum_u32(...) __builtin_sve_svldff1uh_vnum_u32(__VA_ARGS__)
+#define svldff1uh_vnum_u64(...) __builtin_sve_svldff1uh_vnum_u64(__VA_ARGS__)
+#define svldff1uh_vnum_s32(...) __builtin_sve_svldff1uh_vnum_s32(__VA_ARGS__)
+#define svldff1uh_vnum_s64(...) __builtin_sve_svldff1uh_vnum_s64(__VA_ARGS__)
+#define svldff1uh_u32(...) __builtin_sve_svldff1uh_u32(__VA_ARGS__)
+#define svldff1uh_u64(...) __builtin_sve_svldff1uh_u64(__VA_ARGS__)
+#define svldff1uh_s32(...) __builtin_sve_svldff1uh_s32(__VA_ARGS__)
+#define svldff1uh_s64(...) __builtin_sve_svldff1uh_s64(__VA_ARGS__)
+#define svldff1uw_gather_u64base_index_u64(...) __builtin_sve_svldff1uw_gather_u64base_index_u64(__VA_ARGS__)
+#define svldff1uw_gather_u64base_index_s64(...) __builtin_sve_svldff1uw_gather_u64base_index_s64(__VA_ARGS__)
+#define svldff1uw_gather_u64base_offset_u64(...) __builtin_sve_svldff1uw_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldff1uw_gather_u64base_offset_s64(...) __builtin_sve_svldff1uw_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldff1uw_gather_u64base_u64(...) __builtin_sve_svldff1uw_gather_u64base_u64(__VA_ARGS__)
+#define svldff1uw_gather_u64base_s64(...) __builtin_sve_svldff1uw_gather_u64base_s64(__VA_ARGS__)
+#define svldff1uw_gather_s64index_u64(...) __builtin_sve_svldff1uw_gather_s64index_u64(__VA_ARGS__)
+#define svldff1uw_gather_s64index_s64(...) __builtin_sve_svldff1uw_gather_s64index_s64(__VA_ARGS__)
+#define svldff1uw_gather_u64index_u64(...) __builtin_sve_svldff1uw_gather_u64index_u64(__VA_ARGS__)
+#define svldff1uw_gather_u64index_s64(...) __builtin_sve_svldff1uw_gather_u64index_s64(__VA_ARGS__)
+#define svldff1uw_gather_s64offset_u64(...) __builtin_sve_svldff1uw_gather_s64offset_u64(__VA_ARGS__)
+#define svldff1uw_gather_s64offset_s64(...) __builtin_sve_svldff1uw_gather_s64offset_s64(__VA_ARGS__)
+#define svldff1uw_gather_u64offset_u64(...) __builtin_sve_svldff1uw_gather_u64offset_u64(__VA_ARGS__)
+#define svldff1uw_gather_u64offset_s64(...) __builtin_sve_svldff1uw_gather_u64offset_s64(__VA_ARGS__)
+#define svldff1uw_vnum_u64(...) __builtin_sve_svldff1uw_vnum_u64(__VA_ARGS__)
+#define svldff1uw_vnum_s64(...) __builtin_sve_svldff1uw_vnum_s64(__VA_ARGS__)
+#define svldff1uw_u64(...) __builtin_sve_svldff1uw_u64(__VA_ARGS__)
+#define svldff1uw_s64(...) __builtin_sve_svldff1uw_s64(__VA_ARGS__)
+#define svldnf1_u8(...) __builtin_sve_svldnf1_u8(__VA_ARGS__)
+#define svldnf1_u32(...) __builtin_sve_svldnf1_u32(__VA_ARGS__)
+#define svldnf1_u64(...) __builtin_sve_svldnf1_u64(__VA_ARGS__)
+#define svldnf1_u16(...) __builtin_sve_svldnf1_u16(__VA_ARGS__)
+#define svldnf1_s8(...) __builtin_sve_svldnf1_s8(__VA_ARGS__)
+#define svldnf1_f64(...) __builtin_sve_svldnf1_f64(__VA_ARGS__)
+#define svldnf1_f32(...) __builtin_sve_svldnf1_f32(__VA_ARGS__)
+#define svldnf1_f16(...) __builtin_sve_svldnf1_f16(__VA_ARGS__)
+#define svldnf1_s32(...) __builtin_sve_svldnf1_s32(__VA_ARGS__)
+#define svldnf1_s64(...) __builtin_sve_svldnf1_s64(__VA_ARGS__)
+#define svldnf1_s16(...) __builtin_sve_svldnf1_s16(__VA_ARGS__)
+#define svldnf1_vnum_u8(...) __builtin_sve_svldnf1_vnum_u8(__VA_ARGS__)
+#define svldnf1_vnum_u32(...) __builtin_sve_svldnf1_vnum_u32(__VA_ARGS__)
+#define svldnf1_vnum_u64(...) __builtin_sve_svldnf1_vnum_u64(__VA_ARGS__)
+#define svldnf1_vnum_u16(...) __builtin_sve_svldnf1_vnum_u16(__VA_ARGS__)
+#define svldnf1_vnum_s8(...) __builtin_sve_svldnf1_vnum_s8(__VA_ARGS__)
+#define svldnf1_vnum_f64(...) __builtin_sve_svldnf1_vnum_f64(__VA_ARGS__)
+#define svldnf1_vnum_f32(...) __builtin_sve_svldnf1_vnum_f32(__VA_ARGS__)
+#define svldnf1_vnum_f16(...) __builtin_sve_svldnf1_vnum_f16(__VA_ARGS__)
+#define svldnf1_vnum_s32(...) __builtin_sve_svldnf1_vnum_s32(__VA_ARGS__)
+#define svldnf1_vnum_s64(...) __builtin_sve_svldnf1_vnum_s64(__VA_ARGS__)
+#define svldnf1_vnum_s16(...) __builtin_sve_svldnf1_vnum_s16(__VA_ARGS__)
+#define svldnf1sb_vnum_u32(...) __builtin_sve_svldnf1sb_vnum_u32(__VA_ARGS__)
+#define svldnf1sb_vnum_u64(...) __builtin_sve_svldnf1sb_vnum_u64(__VA_ARGS__)
+#define svldnf1sb_vnum_u16(...) __builtin_sve_svldnf1sb_vnum_u16(__VA_ARGS__)
+#define svldnf1sb_vnum_s32(...) __builtin_sve_svldnf1sb_vnum_s32(__VA_ARGS__)
+#define svldnf1sb_vnum_s64(...) __builtin_sve_svldnf1sb_vnum_s64(__VA_ARGS__)
+#define svldnf1sb_vnum_s16(...) __builtin_sve_svldnf1sb_vnum_s16(__VA_ARGS__)
+#define svldnf1sb_u32(...) __builtin_sve_svldnf1sb_u32(__VA_ARGS__)
+#define svldnf1sb_u64(...) __builtin_sve_svldnf1sb_u64(__VA_ARGS__)
+#define svldnf1sb_u16(...) __builtin_sve_svldnf1sb_u16(__VA_ARGS__)
+#define svldnf1sb_s32(...) __builtin_sve_svldnf1sb_s32(__VA_ARGS__)
+#define svldnf1sb_s64(...) __builtin_sve_svldnf1sb_s64(__VA_ARGS__)
+#define svldnf1sb_s16(...) __builtin_sve_svldnf1sb_s16(__VA_ARGS__)
+#define svldnf1sh_vnum_u32(...) __builtin_sve_svldnf1sh_vnum_u32(__VA_ARGS__)
+#define svldnf1sh_vnum_u64(...) __builtin_sve_svldnf1sh_vnum_u64(__VA_ARGS__)
+#define svldnf1sh_vnum_s32(...) __builtin_sve_svldnf1sh_vnum_s32(__VA_ARGS__)
+#define svldnf1sh_vnum_s64(...) __builtin_sve_svldnf1sh_vnum_s64(__VA_ARGS__)
+#define svldnf1sh_u32(...) __builtin_sve_svldnf1sh_u32(__VA_ARGS__)
+#define svldnf1sh_u64(...) __builtin_sve_svldnf1sh_u64(__VA_ARGS__)
+#define svldnf1sh_s32(...) __builtin_sve_svldnf1sh_s32(__VA_ARGS__)
+#define svldnf1sh_s64(...) __builtin_sve_svldnf1sh_s64(__VA_ARGS__)
+#define svldnf1sw_vnum_u64(...) __builtin_sve_svldnf1sw_vnum_u64(__VA_ARGS__)
+#define svldnf1sw_vnum_s64(...) __builtin_sve_svldnf1sw_vnum_s64(__VA_ARGS__)
+#define svldnf1sw_u64(...) __builtin_sve_svldnf1sw_u64(__VA_ARGS__)
+#define svldnf1sw_s64(...) __builtin_sve_svldnf1sw_s64(__VA_ARGS__)
+#define svldnf1ub_vnum_u32(...) __builtin_sve_svldnf1ub_vnum_u32(__VA_ARGS__)
+#define svldnf1ub_vnum_u64(...) __builtin_sve_svldnf1ub_vnum_u64(__VA_ARGS__)
+#define svldnf1ub_vnum_u16(...) __builtin_sve_svldnf1ub_vnum_u16(__VA_ARGS__)
+#define svldnf1ub_vnum_s32(...) __builtin_sve_svldnf1ub_vnum_s32(__VA_ARGS__)
+#define svldnf1ub_vnum_s64(...) __builtin_sve_svldnf1ub_vnum_s64(__VA_ARGS__)
+#define svldnf1ub_vnum_s16(...) __builtin_sve_svldnf1ub_vnum_s16(__VA_ARGS__)
+#define svldnf1ub_u32(...) __builtin_sve_svldnf1ub_u32(__VA_ARGS__)
+#define svldnf1ub_u64(...) __builtin_sve_svldnf1ub_u64(__VA_ARGS__)
+#define svldnf1ub_u16(...) __builtin_sve_svldnf1ub_u16(__VA_ARGS__)
+#define svldnf1ub_s32(...) __builtin_sve_svldnf1ub_s32(__VA_ARGS__)
+#define svldnf1ub_s64(...) __builtin_sve_svldnf1ub_s64(__VA_ARGS__)
+#define svldnf1ub_s16(...) __builtin_sve_svldnf1ub_s16(__VA_ARGS__)
+#define svldnf1uh_vnum_u32(...) __builtin_sve_svldnf1uh_vnum_u32(__VA_ARGS__)
+#define svldnf1uh_vnum_u64(...) __builtin_sve_svldnf1uh_vnum_u64(__VA_ARGS__)
+#define svldnf1uh_vnum_s32(...) __builtin_sve_svldnf1uh_vnum_s32(__VA_ARGS__)
+#define svldnf1uh_vnum_s64(...) __builtin_sve_svldnf1uh_vnum_s64(__VA_ARGS__)
+#define svldnf1uh_u32(...) __builtin_sve_svldnf1uh_u32(__VA_ARGS__)
+#define svldnf1uh_u64(...) __builtin_sve_svldnf1uh_u64(__VA_ARGS__)
+#define svldnf1uh_s32(...) __builtin_sve_svldnf1uh_s32(__VA_ARGS__)
+#define svldnf1uh_s64(...) __builtin_sve_svldnf1uh_s64(__VA_ARGS__)
+#define svldnf1uw_vnum_u64(...) __builtin_sve_svldnf1uw_vnum_u64(__VA_ARGS__)
+#define svldnf1uw_vnum_s64(...) __builtin_sve_svldnf1uw_vnum_s64(__VA_ARGS__)
+#define svldnf1uw_u64(...) __builtin_sve_svldnf1uw_u64(__VA_ARGS__)
+#define svldnf1uw_s64(...) __builtin_sve_svldnf1uw_s64(__VA_ARGS__)
+#define svldnt1_u8(...) __builtin_sve_svldnt1_u8(__VA_ARGS__)
+#define svldnt1_u32(...) __builtin_sve_svldnt1_u32(__VA_ARGS__)
+#define svldnt1_u64(...) __builtin_sve_svldnt1_u64(__VA_ARGS__)
+#define svldnt1_u16(...) __builtin_sve_svldnt1_u16(__VA_ARGS__)
+#define svldnt1_s8(...) __builtin_sve_svldnt1_s8(__VA_ARGS__)
+#define svldnt1_f64(...) __builtin_sve_svldnt1_f64(__VA_ARGS__)
+#define svldnt1_f32(...) __builtin_sve_svldnt1_f32(__VA_ARGS__)
+#define svldnt1_f16(...) __builtin_sve_svldnt1_f16(__VA_ARGS__)
+#define svldnt1_s32(...) __builtin_sve_svldnt1_s32(__VA_ARGS__)
+#define svldnt1_s64(...) __builtin_sve_svldnt1_s64(__VA_ARGS__)
+#define svldnt1_s16(...) __builtin_sve_svldnt1_s16(__VA_ARGS__)
+#define svldnt1_vnum_u8(...) __builtin_sve_svldnt1_vnum_u8(__VA_ARGS__)
+#define svldnt1_vnum_u32(...) __builtin_sve_svldnt1_vnum_u32(__VA_ARGS__)
+#define svldnt1_vnum_u64(...) __builtin_sve_svldnt1_vnum_u64(__VA_ARGS__)
+#define svldnt1_vnum_u16(...) __builtin_sve_svldnt1_vnum_u16(__VA_ARGS__)
+#define svldnt1_vnum_s8(...) __builtin_sve_svldnt1_vnum_s8(__VA_ARGS__)
+#define svldnt1_vnum_f64(...) __builtin_sve_svldnt1_vnum_f64(__VA_ARGS__)
+#define svldnt1_vnum_f32(...) __builtin_sve_svldnt1_vnum_f32(__VA_ARGS__)
+#define svldnt1_vnum_f16(...) __builtin_sve_svldnt1_vnum_f16(__VA_ARGS__)
+#define svldnt1_vnum_s32(...) __builtin_sve_svldnt1_vnum_s32(__VA_ARGS__)
+#define svldnt1_vnum_s64(...) __builtin_sve_svldnt1_vnum_s64(__VA_ARGS__)
+#define svldnt1_vnum_s16(...) __builtin_sve_svldnt1_vnum_s16(__VA_ARGS__)
+#define svlen_u8(...) __builtin_sve_svlen_u8(__VA_ARGS__)
+#define svlen_u32(...) __builtin_sve_svlen_u32(__VA_ARGS__)
+#define svlen_u64(...) __builtin_sve_svlen_u64(__VA_ARGS__)
+#define svlen_u16(...) __builtin_sve_svlen_u16(__VA_ARGS__)
+#define svlen_s8(...) __builtin_sve_svlen_s8(__VA_ARGS__)
+#define svlen_f64(...) __builtin_sve_svlen_f64(__VA_ARGS__)
+#define svlen_f32(...) __builtin_sve_svlen_f32(__VA_ARGS__)
+#define svlen_f16(...) __builtin_sve_svlen_f16(__VA_ARGS__)
+#define svlen_s32(...) __builtin_sve_svlen_s32(__VA_ARGS__)
+#define svlen_s64(...) __builtin_sve_svlen_s64(__VA_ARGS__)
+#define svlen_s16(...) __builtin_sve_svlen_s16(__VA_ARGS__)
+#define svlsl_n_u8_m(...) __builtin_sve_svlsl_n_u8_m(__VA_ARGS__)
+#define svlsl_n_u32_m(...) __builtin_sve_svlsl_n_u32_m(__VA_ARGS__)
+#define svlsl_n_u64_m(...) __builtin_sve_svlsl_n_u64_m(__VA_ARGS__)
+#define svlsl_n_u16_m(...) __builtin_sve_svlsl_n_u16_m(__VA_ARGS__)
+#define svlsl_n_s8_m(...) __builtin_sve_svlsl_n_s8_m(__VA_ARGS__)
+#define svlsl_n_s32_m(...) __builtin_sve_svlsl_n_s32_m(__VA_ARGS__)
+#define svlsl_n_s64_m(...) __builtin_sve_svlsl_n_s64_m(__VA_ARGS__)
+#define svlsl_n_s16_m(...) __builtin_sve_svlsl_n_s16_m(__VA_ARGS__)
+#define svlsl_n_u8_x(...) __builtin_sve_svlsl_n_u8_x(__VA_ARGS__)
+#define svlsl_n_u32_x(...) __builtin_sve_svlsl_n_u32_x(__VA_ARGS__)
+#define svlsl_n_u64_x(...) __builtin_sve_svlsl_n_u64_x(__VA_ARGS__)
+#define svlsl_n_u16_x(...) __builtin_sve_svlsl_n_u16_x(__VA_ARGS__)
+#define svlsl_n_s8_x(...) __builtin_sve_svlsl_n_s8_x(__VA_ARGS__)
+#define svlsl_n_s32_x(...) __builtin_sve_svlsl_n_s32_x(__VA_ARGS__)
+#define svlsl_n_s64_x(...) __builtin_sve_svlsl_n_s64_x(__VA_ARGS__)
+#define svlsl_n_s16_x(...) __builtin_sve_svlsl_n_s16_x(__VA_ARGS__)
+#define svlsl_n_u8_z(...) __builtin_sve_svlsl_n_u8_z(__VA_ARGS__)
+#define svlsl_n_u32_z(...) __builtin_sve_svlsl_n_u32_z(__VA_ARGS__)
+#define svlsl_n_u64_z(...) __builtin_sve_svlsl_n_u64_z(__VA_ARGS__)
+#define svlsl_n_u16_z(...) __builtin_sve_svlsl_n_u16_z(__VA_ARGS__)
+#define svlsl_n_s8_z(...) __builtin_sve_svlsl_n_s8_z(__VA_ARGS__)
+#define svlsl_n_s32_z(...) __builtin_sve_svlsl_n_s32_z(__VA_ARGS__)
+#define svlsl_n_s64_z(...) __builtin_sve_svlsl_n_s64_z(__VA_ARGS__)
+#define svlsl_n_s16_z(...) __builtin_sve_svlsl_n_s16_z(__VA_ARGS__)
+#define svlsl_u8_m(...) __builtin_sve_svlsl_u8_m(__VA_ARGS__)
+#define svlsl_u32_m(...) __builtin_sve_svlsl_u32_m(__VA_ARGS__)
+#define svlsl_u64_m(...) __builtin_sve_svlsl_u64_m(__VA_ARGS__)
+#define svlsl_u16_m(...) __builtin_sve_svlsl_u16_m(__VA_ARGS__)
+#define svlsl_s8_m(...) __builtin_sve_svlsl_s8_m(__VA_ARGS__)
+#define svlsl_s32_m(...) __builtin_sve_svlsl_s32_m(__VA_ARGS__)
+#define svlsl_s64_m(...) __builtin_sve_svlsl_s64_m(__VA_ARGS__)
+#define svlsl_s16_m(...) __builtin_sve_svlsl_s16_m(__VA_ARGS__)
+#define svlsl_u8_x(...) __builtin_sve_svlsl_u8_x(__VA_ARGS__)
+#define svlsl_u32_x(...) __builtin_sve_svlsl_u32_x(__VA_ARGS__)
+#define svlsl_u64_x(...) __builtin_sve_svlsl_u64_x(__VA_ARGS__)
+#define svlsl_u16_x(...) __builtin_sve_svlsl_u16_x(__VA_ARGS__)
+#define svlsl_s8_x(...) __builtin_sve_svlsl_s8_x(__VA_ARGS__)
+#define svlsl_s32_x(...) __builtin_sve_svlsl_s32_x(__VA_ARGS__)
+#define svlsl_s64_x(...) __builtin_sve_svlsl_s64_x(__VA_ARGS__)
+#define svlsl_s16_x(...) __builtin_sve_svlsl_s16_x(__VA_ARGS__)
+#define svlsl_u8_z(...) __builtin_sve_svlsl_u8_z(__VA_ARGS__)
+#define svlsl_u32_z(...) __builtin_sve_svlsl_u32_z(__VA_ARGS__)
+#define svlsl_u64_z(...) __builtin_sve_svlsl_u64_z(__VA_ARGS__)
+#define svlsl_u16_z(...) __builtin_sve_svlsl_u16_z(__VA_ARGS__)
+#define svlsl_s8_z(...) __builtin_sve_svlsl_s8_z(__VA_ARGS__)
+#define svlsl_s32_z(...) __builtin_sve_svlsl_s32_z(__VA_ARGS__)
+#define svlsl_s64_z(...) __builtin_sve_svlsl_s64_z(__VA_ARGS__)
+#define svlsl_s16_z(...) __builtin_sve_svlsl_s16_z(__VA_ARGS__)
+#define svlsl_wide_n_u8_m(...) __builtin_sve_svlsl_wide_n_u8_m(__VA_ARGS__)
+#define svlsl_wide_n_u32_m(...) __builtin_sve_svlsl_wide_n_u32_m(__VA_ARGS__)
+#define svlsl_wide_n_u16_m(...) __builtin_sve_svlsl_wide_n_u16_m(__VA_ARGS__)
+#define svlsl_wide_n_s8_m(...) __builtin_sve_svlsl_wide_n_s8_m(__VA_ARGS__)
+#define svlsl_wide_n_s32_m(...) __builtin_sve_svlsl_wide_n_s32_m(__VA_ARGS__)
+#define svlsl_wide_n_s16_m(...) __builtin_sve_svlsl_wide_n_s16_m(__VA_ARGS__)
+#define svlsl_wide_n_u8_x(...) __builtin_sve_svlsl_wide_n_u8_x(__VA_ARGS__)
+#define svlsl_wide_n_u32_x(...) __builtin_sve_svlsl_wide_n_u32_x(__VA_ARGS__)
+#define svlsl_wide_n_u16_x(...) __builtin_sve_svlsl_wide_n_u16_x(__VA_ARGS__)
+#define svlsl_wide_n_s8_x(...) __builtin_sve_svlsl_wide_n_s8_x(__VA_ARGS__)
+#define svlsl_wide_n_s32_x(...) __builtin_sve_svlsl_wide_n_s32_x(__VA_ARGS__)
+#define svlsl_wide_n_s16_x(...) __builtin_sve_svlsl_wide_n_s16_x(__VA_ARGS__)
+#define svlsl_wide_n_u8_z(...) __builtin_sve_svlsl_wide_n_u8_z(__VA_ARGS__)
+#define svlsl_wide_n_u32_z(...) __builtin_sve_svlsl_wide_n_u32_z(__VA_ARGS__)
+#define svlsl_wide_n_u16_z(...) __builtin_sve_svlsl_wide_n_u16_z(__VA_ARGS__)
+#define svlsl_wide_n_s8_z(...) __builtin_sve_svlsl_wide_n_s8_z(__VA_ARGS__)
+#define svlsl_wide_n_s32_z(...) __builtin_sve_svlsl_wide_n_s32_z(__VA_ARGS__)
+#define svlsl_wide_n_s16_z(...) __builtin_sve_svlsl_wide_n_s16_z(__VA_ARGS__)
+#define svlsl_wide_u8_m(...) __builtin_sve_svlsl_wide_u8_m(__VA_ARGS__)
+#define svlsl_wide_u32_m(...) __builtin_sve_svlsl_wide_u32_m(__VA_ARGS__)
+#define svlsl_wide_u16_m(...) __builtin_sve_svlsl_wide_u16_m(__VA_ARGS__)
+#define svlsl_wide_s8_m(...) __builtin_sve_svlsl_wide_s8_m(__VA_ARGS__)
+#define svlsl_wide_s32_m(...) __builtin_sve_svlsl_wide_s32_m(__VA_ARGS__)
+#define svlsl_wide_s16_m(...) __builtin_sve_svlsl_wide_s16_m(__VA_ARGS__)
+#define svlsl_wide_u8_x(...) __builtin_sve_svlsl_wide_u8_x(__VA_ARGS__)
+#define svlsl_wide_u32_x(...) __builtin_sve_svlsl_wide_u32_x(__VA_ARGS__)
+#define svlsl_wide_u16_x(...) __builtin_sve_svlsl_wide_u16_x(__VA_ARGS__)
+#define svlsl_wide_s8_x(...) __builtin_sve_svlsl_wide_s8_x(__VA_ARGS__)
+#define svlsl_wide_s32_x(...) __builtin_sve_svlsl_wide_s32_x(__VA_ARGS__)
+#define svlsl_wide_s16_x(...) __builtin_sve_svlsl_wide_s16_x(__VA_ARGS__)
+#define svlsl_wide_u8_z(...) __builtin_sve_svlsl_wide_u8_z(__VA_ARGS__)
+#define svlsl_wide_u32_z(...) __builtin_sve_svlsl_wide_u32_z(__VA_ARGS__)
+#define svlsl_wide_u16_z(...) __builtin_sve_svlsl_wide_u16_z(__VA_ARGS__)
+#define svlsl_wide_s8_z(...) __builtin_sve_svlsl_wide_s8_z(__VA_ARGS__)
+#define svlsl_wide_s32_z(...) __builtin_sve_svlsl_wide_s32_z(__VA_ARGS__)
+#define svlsl_wide_s16_z(...) __builtin_sve_svlsl_wide_s16_z(__VA_ARGS__)
+#define svlsr_n_u8_m(...) __builtin_sve_svlsr_n_u8_m(__VA_ARGS__)
+#define svlsr_n_u32_m(...) __builtin_sve_svlsr_n_u32_m(__VA_ARGS__)
+#define svlsr_n_u64_m(...) __builtin_sve_svlsr_n_u64_m(__VA_ARGS__)
+#define svlsr_n_u16_m(...) __builtin_sve_svlsr_n_u16_m(__VA_ARGS__)
+#define svlsr_n_u8_x(...) __builtin_sve_svlsr_n_u8_x(__VA_ARGS__)
+#define svlsr_n_u32_x(...) __builtin_sve_svlsr_n_u32_x(__VA_ARGS__)
+#define svlsr_n_u64_x(...) __builtin_sve_svlsr_n_u64_x(__VA_ARGS__)
+#define svlsr_n_u16_x(...) __builtin_sve_svlsr_n_u16_x(__VA_ARGS__)
+#define svlsr_n_u8_z(...) __builtin_sve_svlsr_n_u8_z(__VA_ARGS__)
+#define svlsr_n_u32_z(...) __builtin_sve_svlsr_n_u32_z(__VA_ARGS__)
+#define svlsr_n_u64_z(...) __builtin_sve_svlsr_n_u64_z(__VA_ARGS__)
+#define svlsr_n_u16_z(...) __builtin_sve_svlsr_n_u16_z(__VA_ARGS__)
+#define svlsr_u8_m(...) __builtin_sve_svlsr_u8_m(__VA_ARGS__)
+#define svlsr_u32_m(...) __builtin_sve_svlsr_u32_m(__VA_ARGS__)
+#define svlsr_u64_m(...) __builtin_sve_svlsr_u64_m(__VA_ARGS__)
+#define svlsr_u16_m(...) __builtin_sve_svlsr_u16_m(__VA_ARGS__)
+#define svlsr_u8_x(...) __builtin_sve_svlsr_u8_x(__VA_ARGS__)
+#define svlsr_u32_x(...) __builtin_sve_svlsr_u32_x(__VA_ARGS__)
+#define svlsr_u64_x(...) __builtin_sve_svlsr_u64_x(__VA_ARGS__)
+#define svlsr_u16_x(...) __builtin_sve_svlsr_u16_x(__VA_ARGS__)
+#define svlsr_u8_z(...) __builtin_sve_svlsr_u8_z(__VA_ARGS__)
+#define svlsr_u32_z(...) __builtin_sve_svlsr_u32_z(__VA_ARGS__)
+#define svlsr_u64_z(...) __builtin_sve_svlsr_u64_z(__VA_ARGS__)
+#define svlsr_u16_z(...) __builtin_sve_svlsr_u16_z(__VA_ARGS__)
+#define svlsr_wide_n_u8_m(...) __builtin_sve_svlsr_wide_n_u8_m(__VA_ARGS__)
+#define svlsr_wide_n_u32_m(...) __builtin_sve_svlsr_wide_n_u32_m(__VA_ARGS__)
+#define svlsr_wide_n_u16_m(...) __builtin_sve_svlsr_wide_n_u16_m(__VA_ARGS__)
+#define svlsr_wide_n_u8_x(...) __builtin_sve_svlsr_wide_n_u8_x(__VA_ARGS__)
+#define svlsr_wide_n_u32_x(...) __builtin_sve_svlsr_wide_n_u32_x(__VA_ARGS__)
+#define svlsr_wide_n_u16_x(...) __builtin_sve_svlsr_wide_n_u16_x(__VA_ARGS__)
+#define svlsr_wide_n_u8_z(...) __builtin_sve_svlsr_wide_n_u8_z(__VA_ARGS__)
+#define svlsr_wide_n_u32_z(...) __builtin_sve_svlsr_wide_n_u32_z(__VA_ARGS__)
+#define svlsr_wide_n_u16_z(...) __builtin_sve_svlsr_wide_n_u16_z(__VA_ARGS__)
+#define svlsr_wide_u8_m(...) __builtin_sve_svlsr_wide_u8_m(__VA_ARGS__)
+#define svlsr_wide_u32_m(...) __builtin_sve_svlsr_wide_u32_m(__VA_ARGS__)
+#define svlsr_wide_u16_m(...) __builtin_sve_svlsr_wide_u16_m(__VA_ARGS__)
+#define svlsr_wide_u8_x(...) __builtin_sve_svlsr_wide_u8_x(__VA_ARGS__)
+#define svlsr_wide_u32_x(...) __builtin_sve_svlsr_wide_u32_x(__VA_ARGS__)
+#define svlsr_wide_u16_x(...) __builtin_sve_svlsr_wide_u16_x(__VA_ARGS__)
+#define svlsr_wide_u8_z(...) __builtin_sve_svlsr_wide_u8_z(__VA_ARGS__)
+#define svlsr_wide_u32_z(...) __builtin_sve_svlsr_wide_u32_z(__VA_ARGS__)
+#define svlsr_wide_u16_z(...) __builtin_sve_svlsr_wide_u16_z(__VA_ARGS__)
+#define svmad_n_f64_m(...) __builtin_sve_svmad_n_f64_m(__VA_ARGS__)
+#define svmad_n_f32_m(...) __builtin_sve_svmad_n_f32_m(__VA_ARGS__)
+#define svmad_n_f16_m(...) __builtin_sve_svmad_n_f16_m(__VA_ARGS__)
+#define svmad_n_f64_x(...) __builtin_sve_svmad_n_f64_x(__VA_ARGS__)
+#define svmad_n_f32_x(...) __builtin_sve_svmad_n_f32_x(__VA_ARGS__)
+#define svmad_n_f16_x(...) __builtin_sve_svmad_n_f16_x(__VA_ARGS__)
+#define svmad_n_f64_z(...) __builtin_sve_svmad_n_f64_z(__VA_ARGS__)
+#define svmad_n_f32_z(...) __builtin_sve_svmad_n_f32_z(__VA_ARGS__)
+#define svmad_n_f16_z(...) __builtin_sve_svmad_n_f16_z(__VA_ARGS__)
+#define svmad_n_u8_m(...) __builtin_sve_svmad_n_u8_m(__VA_ARGS__)
+#define svmad_n_u32_m(...) __builtin_sve_svmad_n_u32_m(__VA_ARGS__)
+#define svmad_n_u64_m(...) __builtin_sve_svmad_n_u64_m(__VA_ARGS__)
+#define svmad_n_u16_m(...) __builtin_sve_svmad_n_u16_m(__VA_ARGS__)
+#define svmad_n_s8_m(...) __builtin_sve_svmad_n_s8_m(__VA_ARGS__)
+#define svmad_n_s32_m(...) __builtin_sve_svmad_n_s32_m(__VA_ARGS__)
+#define svmad_n_s64_m(...) __builtin_sve_svmad_n_s64_m(__VA_ARGS__)
+#define svmad_n_s16_m(...) __builtin_sve_svmad_n_s16_m(__VA_ARGS__)
+#define svmad_n_u8_x(...) __builtin_sve_svmad_n_u8_x(__VA_ARGS__)
+#define svmad_n_u32_x(...) __builtin_sve_svmad_n_u32_x(__VA_ARGS__)
+#define svmad_n_u64_x(...) __builtin_sve_svmad_n_u64_x(__VA_ARGS__)
+#define svmad_n_u16_x(...) __builtin_sve_svmad_n_u16_x(__VA_ARGS__)
+#define svmad_n_s8_x(...) __builtin_sve_svmad_n_s8_x(__VA_ARGS__)
+#define svmad_n_s32_x(...) __builtin_sve_svmad_n_s32_x(__VA_ARGS__)
+#define svmad_n_s64_x(...) __builtin_sve_svmad_n_s64_x(__VA_ARGS__)
+#define svmad_n_s16_x(...) __builtin_sve_svmad_n_s16_x(__VA_ARGS__)
+#define svmad_n_u8_z(...) __builtin_sve_svmad_n_u8_z(__VA_ARGS__)
+#define svmad_n_u32_z(...) __builtin_sve_svmad_n_u32_z(__VA_ARGS__)
+#define svmad_n_u64_z(...) __builtin_sve_svmad_n_u64_z(__VA_ARGS__)
+#define svmad_n_u16_z(...) __builtin_sve_svmad_n_u16_z(__VA_ARGS__)
+#define svmad_n_s8_z(...) __builtin_sve_svmad_n_s8_z(__VA_ARGS__)
+#define svmad_n_s32_z(...) __builtin_sve_svmad_n_s32_z(__VA_ARGS__)
+#define svmad_n_s64_z(...) __builtin_sve_svmad_n_s64_z(__VA_ARGS__)
+#define svmad_n_s16_z(...) __builtin_sve_svmad_n_s16_z(__VA_ARGS__)
+#define svmad_f64_m(...) __builtin_sve_svmad_f64_m(__VA_ARGS__)
+#define svmad_f32_m(...) __builtin_sve_svmad_f32_m(__VA_ARGS__)
+#define svmad_f16_m(...) __builtin_sve_svmad_f16_m(__VA_ARGS__)
+#define svmad_f64_x(...) __builtin_sve_svmad_f64_x(__VA_ARGS__)
+#define svmad_f32_x(...) __builtin_sve_svmad_f32_x(__VA_ARGS__)
+#define svmad_f16_x(...) __builtin_sve_svmad_f16_x(__VA_ARGS__)
+#define svmad_f64_z(...) __builtin_sve_svmad_f64_z(__VA_ARGS__)
+#define svmad_f32_z(...) __builtin_sve_svmad_f32_z(__VA_ARGS__)
+#define svmad_f16_z(...) __builtin_sve_svmad_f16_z(__VA_ARGS__)
+#define svmad_u8_m(...) __builtin_sve_svmad_u8_m(__VA_ARGS__)
+#define svmad_u32_m(...) __builtin_sve_svmad_u32_m(__VA_ARGS__)
+#define svmad_u64_m(...) __builtin_sve_svmad_u64_m(__VA_ARGS__)
+#define svmad_u16_m(...) __builtin_sve_svmad_u16_m(__VA_ARGS__)
+#define svmad_s8_m(...) __builtin_sve_svmad_s8_m(__VA_ARGS__)
+#define svmad_s32_m(...) __builtin_sve_svmad_s32_m(__VA_ARGS__)
+#define svmad_s64_m(...) __builtin_sve_svmad_s64_m(__VA_ARGS__)
+#define svmad_s16_m(...) __builtin_sve_svmad_s16_m(__VA_ARGS__)
+#define svmad_u8_x(...) __builtin_sve_svmad_u8_x(__VA_ARGS__)
+#define svmad_u32_x(...) __builtin_sve_svmad_u32_x(__VA_ARGS__)
+#define svmad_u64_x(...) __builtin_sve_svmad_u64_x(__VA_ARGS__)
+#define svmad_u16_x(...) __builtin_sve_svmad_u16_x(__VA_ARGS__)
+#define svmad_s8_x(...) __builtin_sve_svmad_s8_x(__VA_ARGS__)
+#define svmad_s32_x(...) __builtin_sve_svmad_s32_x(__VA_ARGS__)
+#define svmad_s64_x(...) __builtin_sve_svmad_s64_x(__VA_ARGS__)
+#define svmad_s16_x(...) __builtin_sve_svmad_s16_x(__VA_ARGS__)
+#define svmad_u8_z(...) __builtin_sve_svmad_u8_z(__VA_ARGS__)
+#define svmad_u32_z(...) __builtin_sve_svmad_u32_z(__VA_ARGS__)
+#define svmad_u64_z(...) __builtin_sve_svmad_u64_z(__VA_ARGS__)
+#define svmad_u16_z(...) __builtin_sve_svmad_u16_z(__VA_ARGS__)
+#define svmad_s8_z(...) __builtin_sve_svmad_s8_z(__VA_ARGS__)
+#define svmad_s32_z(...) __builtin_sve_svmad_s32_z(__VA_ARGS__)
+#define svmad_s64_z(...) __builtin_sve_svmad_s64_z(__VA_ARGS__)
+#define svmad_s16_z(...) __builtin_sve_svmad_s16_z(__VA_ARGS__)
+#define svmax_n_f64_m(...) __builtin_sve_svmax_n_f64_m(__VA_ARGS__)
+#define svmax_n_f32_m(...) __builtin_sve_svmax_n_f32_m(__VA_ARGS__)
+#define svmax_n_f16_m(...) __builtin_sve_svmax_n_f16_m(__VA_ARGS__)
+#define svmax_n_f64_x(...) __builtin_sve_svmax_n_f64_x(__VA_ARGS__)
+#define svmax_n_f32_x(...) __builtin_sve_svmax_n_f32_x(__VA_ARGS__)
+#define svmax_n_f16_x(...) __builtin_sve_svmax_n_f16_x(__VA_ARGS__)
+#define svmax_n_f64_z(...) __builtin_sve_svmax_n_f64_z(__VA_ARGS__)
+#define svmax_n_f32_z(...) __builtin_sve_svmax_n_f32_z(__VA_ARGS__)
+#define svmax_n_f16_z(...) __builtin_sve_svmax_n_f16_z(__VA_ARGS__)
+#define svmax_n_s8_m(...) __builtin_sve_svmax_n_s8_m(__VA_ARGS__)
+#define svmax_n_s32_m(...) __builtin_sve_svmax_n_s32_m(__VA_ARGS__)
+#define svmax_n_s64_m(...) __builtin_sve_svmax_n_s64_m(__VA_ARGS__)
+#define svmax_n_s16_m(...) __builtin_sve_svmax_n_s16_m(__VA_ARGS__)
+#define svmax_n_s8_x(...) __builtin_sve_svmax_n_s8_x(__VA_ARGS__)
+#define svmax_n_s32_x(...) __builtin_sve_svmax_n_s32_x(__VA_ARGS__)
+#define svmax_n_s64_x(...) __builtin_sve_svmax_n_s64_x(__VA_ARGS__)
+#define svmax_n_s16_x(...) __builtin_sve_svmax_n_s16_x(__VA_ARGS__)
+#define svmax_n_s8_z(...) __builtin_sve_svmax_n_s8_z(__VA_ARGS__)
+#define svmax_n_s32_z(...) __builtin_sve_svmax_n_s32_z(__VA_ARGS__)
+#define svmax_n_s64_z(...) __builtin_sve_svmax_n_s64_z(__VA_ARGS__)
+#define svmax_n_s16_z(...) __builtin_sve_svmax_n_s16_z(__VA_ARGS__)
+#define svmax_n_u8_m(...) __builtin_sve_svmax_n_u8_m(__VA_ARGS__)
+#define svmax_n_u32_m(...) __builtin_sve_svmax_n_u32_m(__VA_ARGS__)
+#define svmax_n_u64_m(...) __builtin_sve_svmax_n_u64_m(__VA_ARGS__)
+#define svmax_n_u16_m(...) __builtin_sve_svmax_n_u16_m(__VA_ARGS__)
+#define svmax_n_u8_x(...) __builtin_sve_svmax_n_u8_x(__VA_ARGS__)
+#define svmax_n_u32_x(...) __builtin_sve_svmax_n_u32_x(__VA_ARGS__)
+#define svmax_n_u64_x(...) __builtin_sve_svmax_n_u64_x(__VA_ARGS__)
+#define svmax_n_u16_x(...) __builtin_sve_svmax_n_u16_x(__VA_ARGS__)
+#define svmax_n_u8_z(...) __builtin_sve_svmax_n_u8_z(__VA_ARGS__)
+#define svmax_n_u32_z(...) __builtin_sve_svmax_n_u32_z(__VA_ARGS__)
+#define svmax_n_u64_z(...) __builtin_sve_svmax_n_u64_z(__VA_ARGS__)
+#define svmax_n_u16_z(...) __builtin_sve_svmax_n_u16_z(__VA_ARGS__)
+#define svmax_f64_m(...) __builtin_sve_svmax_f64_m(__VA_ARGS__)
+#define svmax_f32_m(...) __builtin_sve_svmax_f32_m(__VA_ARGS__)
+#define svmax_f16_m(...) __builtin_sve_svmax_f16_m(__VA_ARGS__)
+#define svmax_f64_x(...) __builtin_sve_svmax_f64_x(__VA_ARGS__)
+#define svmax_f32_x(...) __builtin_sve_svmax_f32_x(__VA_ARGS__)
+#define svmax_f16_x(...) __builtin_sve_svmax_f16_x(__VA_ARGS__)
+#define svmax_f64_z(...) __builtin_sve_svmax_f64_z(__VA_ARGS__)
+#define svmax_f32_z(...) __builtin_sve_svmax_f32_z(__VA_ARGS__)
+#define svmax_f16_z(...) __builtin_sve_svmax_f16_z(__VA_ARGS__)
+#define svmax_s8_m(...) __builtin_sve_svmax_s8_m(__VA_ARGS__)
+#define svmax_s32_m(...) __builtin_sve_svmax_s32_m(__VA_ARGS__)
+#define svmax_s64_m(...) __builtin_sve_svmax_s64_m(__VA_ARGS__)
+#define svmax_s16_m(...) __builtin_sve_svmax_s16_m(__VA_ARGS__)
+#define svmax_s8_x(...) __builtin_sve_svmax_s8_x(__VA_ARGS__)
+#define svmax_s32_x(...) __builtin_sve_svmax_s32_x(__VA_ARGS__)
+#define svmax_s64_x(...) __builtin_sve_svmax_s64_x(__VA_ARGS__)
+#define svmax_s16_x(...) __builtin_sve_svmax_s16_x(__VA_ARGS__)
+#define svmax_s8_z(...) __builtin_sve_svmax_s8_z(__VA_ARGS__)
+#define svmax_s32_z(...) __builtin_sve_svmax_s32_z(__VA_ARGS__)
+#define svmax_s64_z(...) __builtin_sve_svmax_s64_z(__VA_ARGS__)
+#define svmax_s16_z(...) __builtin_sve_svmax_s16_z(__VA_ARGS__)
+#define svmax_u8_m(...) __builtin_sve_svmax_u8_m(__VA_ARGS__)
+#define svmax_u32_m(...) __builtin_sve_svmax_u32_m(__VA_ARGS__)
+#define svmax_u64_m(...) __builtin_sve_svmax_u64_m(__VA_ARGS__)
+#define svmax_u16_m(...) __builtin_sve_svmax_u16_m(__VA_ARGS__)
+#define svmax_u8_x(...) __builtin_sve_svmax_u8_x(__VA_ARGS__)
+#define svmax_u32_x(...) __builtin_sve_svmax_u32_x(__VA_ARGS__)
+#define svmax_u64_x(...) __builtin_sve_svmax_u64_x(__VA_ARGS__)
+#define svmax_u16_x(...) __builtin_sve_svmax_u16_x(__VA_ARGS__)
+#define svmax_u8_z(...) __builtin_sve_svmax_u8_z(__VA_ARGS__)
+#define svmax_u32_z(...) __builtin_sve_svmax_u32_z(__VA_ARGS__)
+#define svmax_u64_z(...) __builtin_sve_svmax_u64_z(__VA_ARGS__)
+#define svmax_u16_z(...) __builtin_sve_svmax_u16_z(__VA_ARGS__)
+#define svmaxnm_n_f64_m(...) __builtin_sve_svmaxnm_n_f64_m(__VA_ARGS__)
+#define svmaxnm_n_f32_m(...) __builtin_sve_svmaxnm_n_f32_m(__VA_ARGS__)
+#define svmaxnm_n_f16_m(...) __builtin_sve_svmaxnm_n_f16_m(__VA_ARGS__)
+#define svmaxnm_n_f64_x(...) __builtin_sve_svmaxnm_n_f64_x(__VA_ARGS__)
+#define svmaxnm_n_f32_x(...) __builtin_sve_svmaxnm_n_f32_x(__VA_ARGS__)
+#define svmaxnm_n_f16_x(...) __builtin_sve_svmaxnm_n_f16_x(__VA_ARGS__)
+#define svmaxnm_n_f64_z(...) __builtin_sve_svmaxnm_n_f64_z(__VA_ARGS__)
+#define svmaxnm_n_f32_z(...) __builtin_sve_svmaxnm_n_f32_z(__VA_ARGS__)
+#define svmaxnm_n_f16_z(...) __builtin_sve_svmaxnm_n_f16_z(__VA_ARGS__)
+#define svmaxnm_f64_m(...) __builtin_sve_svmaxnm_f64_m(__VA_ARGS__)
+#define svmaxnm_f32_m(...) __builtin_sve_svmaxnm_f32_m(__VA_ARGS__)
+#define svmaxnm_f16_m(...) __builtin_sve_svmaxnm_f16_m(__VA_ARGS__)
+#define svmaxnm_f64_x(...) __builtin_sve_svmaxnm_f64_x(__VA_ARGS__)
+#define svmaxnm_f32_x(...) __builtin_sve_svmaxnm_f32_x(__VA_ARGS__)
+#define svmaxnm_f16_x(...) __builtin_sve_svmaxnm_f16_x(__VA_ARGS__)
+#define svmaxnm_f64_z(...) __builtin_sve_svmaxnm_f64_z(__VA_ARGS__)
+#define svmaxnm_f32_z(...) __builtin_sve_svmaxnm_f32_z(__VA_ARGS__)
+#define svmaxnm_f16_z(...) __builtin_sve_svmaxnm_f16_z(__VA_ARGS__)
+#define svmaxnmv_f64(...) __builtin_sve_svmaxnmv_f64(__VA_ARGS__)
+#define svmaxnmv_f32(...) __builtin_sve_svmaxnmv_f32(__VA_ARGS__)
+#define svmaxnmv_f16(...) __builtin_sve_svmaxnmv_f16(__VA_ARGS__)
+#define svmaxv_f64(...) __builtin_sve_svmaxv_f64(__VA_ARGS__)
+#define svmaxv_f32(...) __builtin_sve_svmaxv_f32(__VA_ARGS__)
+#define svmaxv_f16(...) __builtin_sve_svmaxv_f16(__VA_ARGS__)
+#define svmaxv_s8(...) __builtin_sve_svmaxv_s8(__VA_ARGS__)
+#define svmaxv_s32(...) __builtin_sve_svmaxv_s32(__VA_ARGS__)
+#define svmaxv_s64(...) __builtin_sve_svmaxv_s64(__VA_ARGS__)
+#define svmaxv_s16(...) __builtin_sve_svmaxv_s16(__VA_ARGS__)
+#define svmaxv_u8(...) __builtin_sve_svmaxv_u8(__VA_ARGS__)
+#define svmaxv_u32(...) __builtin_sve_svmaxv_u32(__VA_ARGS__)
+#define svmaxv_u64(...) __builtin_sve_svmaxv_u64(__VA_ARGS__)
+#define svmaxv_u16(...) __builtin_sve_svmaxv_u16(__VA_ARGS__)
+#define svmin_n_f64_m(...) __builtin_sve_svmin_n_f64_m(__VA_ARGS__)
+#define svmin_n_f32_m(...) __builtin_sve_svmin_n_f32_m(__VA_ARGS__)
+#define svmin_n_f16_m(...) __builtin_sve_svmin_n_f16_m(__VA_ARGS__)
+#define svmin_n_f64_x(...) __builtin_sve_svmin_n_f64_x(__VA_ARGS__)
+#define svmin_n_f32_x(...) __builtin_sve_svmin_n_f32_x(__VA_ARGS__)
+#define svmin_n_f16_x(...) __builtin_sve_svmin_n_f16_x(__VA_ARGS__)
+#define svmin_n_f64_z(...) __builtin_sve_svmin_n_f64_z(__VA_ARGS__)
+#define svmin_n_f32_z(...) __builtin_sve_svmin_n_f32_z(__VA_ARGS__)
+#define svmin_n_f16_z(...) __builtin_sve_svmin_n_f16_z(__VA_ARGS__)
+#define svmin_n_s8_m(...) __builtin_sve_svmin_n_s8_m(__VA_ARGS__)
+#define svmin_n_s32_m(...) __builtin_sve_svmin_n_s32_m(__VA_ARGS__)
+#define svmin_n_s64_m(...) __builtin_sve_svmin_n_s64_m(__VA_ARGS__)
+#define svmin_n_s16_m(...) __builtin_sve_svmin_n_s16_m(__VA_ARGS__)
+#define svmin_n_s8_x(...) __builtin_sve_svmin_n_s8_x(__VA_ARGS__)
+#define svmin_n_s32_x(...) __builtin_sve_svmin_n_s32_x(__VA_ARGS__)
+#define svmin_n_s64_x(...) __builtin_sve_svmin_n_s64_x(__VA_ARGS__)
+#define svmin_n_s16_x(...) __builtin_sve_svmin_n_s16_x(__VA_ARGS__)
+#define svmin_n_s8_z(...) __builtin_sve_svmin_n_s8_z(__VA_ARGS__)
+#define svmin_n_s32_z(...) __builtin_sve_svmin_n_s32_z(__VA_ARGS__)
+#define svmin_n_s64_z(...) __builtin_sve_svmin_n_s64_z(__VA_ARGS__)
+#define svmin_n_s16_z(...) __builtin_sve_svmin_n_s16_z(__VA_ARGS__)
+#define svmin_n_u8_m(...) __builtin_sve_svmin_n_u8_m(__VA_ARGS__)
+#define svmin_n_u32_m(...) __builtin_sve_svmin_n_u32_m(__VA_ARGS__)
+#define svmin_n_u64_m(...) __builtin_sve_svmin_n_u64_m(__VA_ARGS__)
+#define svmin_n_u16_m(...) __builtin_sve_svmin_n_u16_m(__VA_ARGS__)
+#define svmin_n_u8_x(...) __builtin_sve_svmin_n_u8_x(__VA_ARGS__)
+#define svmin_n_u32_x(...) __builtin_sve_svmin_n_u32_x(__VA_ARGS__)
+#define svmin_n_u64_x(...) __builtin_sve_svmin_n_u64_x(__VA_ARGS__)
+#define svmin_n_u16_x(...) __builtin_sve_svmin_n_u16_x(__VA_ARGS__)
+#define svmin_n_u8_z(...) __builtin_sve_svmin_n_u8_z(__VA_ARGS__)
+#define svmin_n_u32_z(...) __builtin_sve_svmin_n_u32_z(__VA_ARGS__)
+#define svmin_n_u64_z(...) __builtin_sve_svmin_n_u64_z(__VA_ARGS__)
+#define svmin_n_u16_z(...) __builtin_sve_svmin_n_u16_z(__VA_ARGS__)
+#define svmin_f64_m(...) __builtin_sve_svmin_f64_m(__VA_ARGS__)
+#define svmin_f32_m(...) __builtin_sve_svmin_f32_m(__VA_ARGS__)
+#define svmin_f16_m(...) __builtin_sve_svmin_f16_m(__VA_ARGS__)
+#define svmin_f64_x(...) __builtin_sve_svmin_f64_x(__VA_ARGS__)
+#define svmin_f32_x(...) __builtin_sve_svmin_f32_x(__VA_ARGS__)
+#define svmin_f16_x(...) __builtin_sve_svmin_f16_x(__VA_ARGS__)
+#define svmin_f64_z(...) __builtin_sve_svmin_f64_z(__VA_ARGS__)
+#define svmin_f32_z(...) __builtin_sve_svmin_f32_z(__VA_ARGS__)
+#define svmin_f16_z(...) __builtin_sve_svmin_f16_z(__VA_ARGS__)
+#define svmin_s8_m(...) __builtin_sve_svmin_s8_m(__VA_ARGS__)
+#define svmin_s32_m(...) __builtin_sve_svmin_s32_m(__VA_ARGS__)
+#define svmin_s64_m(...) __builtin_sve_svmin_s64_m(__VA_ARGS__)
+#define svmin_s16_m(...) __builtin_sve_svmin_s16_m(__VA_ARGS__)
+#define svmin_s8_x(...) __builtin_sve_svmin_s8_x(__VA_ARGS__)
+#define svmin_s32_x(...) __builtin_sve_svmin_s32_x(__VA_ARGS__)
+#define svmin_s64_x(...) __builtin_sve_svmin_s64_x(__VA_ARGS__)
+#define svmin_s16_x(...) __builtin_sve_svmin_s16_x(__VA_ARGS__)
+#define svmin_s8_z(...) __builtin_sve_svmin_s8_z(__VA_ARGS__)
+#define svmin_s32_z(...) __builtin_sve_svmin_s32_z(__VA_ARGS__)
+#define svmin_s64_z(...) __builtin_sve_svmin_s64_z(__VA_ARGS__)
+#define svmin_s16_z(...) __builtin_sve_svmin_s16_z(__VA_ARGS__)
+#define svmin_u8_m(...) __builtin_sve_svmin_u8_m(__VA_ARGS__)
+#define svmin_u32_m(...) __builtin_sve_svmin_u32_m(__VA_ARGS__)
+#define svmin_u64_m(...) __builtin_sve_svmin_u64_m(__VA_ARGS__)
+#define svmin_u16_m(...) __builtin_sve_svmin_u16_m(__VA_ARGS__)
+#define svmin_u8_x(...) __builtin_sve_svmin_u8_x(__VA_ARGS__)
+#define svmin_u32_x(...) __builtin_sve_svmin_u32_x(__VA_ARGS__)
+#define svmin_u64_x(...) __builtin_sve_svmin_u64_x(__VA_ARGS__)
+#define svmin_u16_x(...) __builtin_sve_svmin_u16_x(__VA_ARGS__)
+#define svmin_u8_z(...) __builtin_sve_svmin_u8_z(__VA_ARGS__)
+#define svmin_u32_z(...) __builtin_sve_svmin_u32_z(__VA_ARGS__)
+#define svmin_u64_z(...) __builtin_sve_svmin_u64_z(__VA_ARGS__)
+#define svmin_u16_z(...) __builtin_sve_svmin_u16_z(__VA_ARGS__)
+#define svminnm_n_f64_m(...) __builtin_sve_svminnm_n_f64_m(__VA_ARGS__)
+#define svminnm_n_f32_m(...) __builtin_sve_svminnm_n_f32_m(__VA_ARGS__)
+#define svminnm_n_f16_m(...) __builtin_sve_svminnm_n_f16_m(__VA_ARGS__)
+#define svminnm_n_f64_x(...) __builtin_sve_svminnm_n_f64_x(__VA_ARGS__)
+#define svminnm_n_f32_x(...) __builtin_sve_svminnm_n_f32_x(__VA_ARGS__)
+#define svminnm_n_f16_x(...) __builtin_sve_svminnm_n_f16_x(__VA_ARGS__)
+#define svminnm_n_f64_z(...) __builtin_sve_svminnm_n_f64_z(__VA_ARGS__)
+#define svminnm_n_f32_z(...) __builtin_sve_svminnm_n_f32_z(__VA_ARGS__)
+#define svminnm_n_f16_z(...) __builtin_sve_svminnm_n_f16_z(__VA_ARGS__)
+#define svminnm_f64_m(...) __builtin_sve_svminnm_f64_m(__VA_ARGS__)
+#define svminnm_f32_m(...) __builtin_sve_svminnm_f32_m(__VA_ARGS__)
+#define svminnm_f16_m(...) __builtin_sve_svminnm_f16_m(__VA_ARGS__)
+#define svminnm_f64_x(...) __builtin_sve_svminnm_f64_x(__VA_ARGS__)
+#define svminnm_f32_x(...) __builtin_sve_svminnm_f32_x(__VA_ARGS__)
+#define svminnm_f16_x(...) __builtin_sve_svminnm_f16_x(__VA_ARGS__)
+#define svminnm_f64_z(...) __builtin_sve_svminnm_f64_z(__VA_ARGS__)
+#define svminnm_f32_z(...) __builtin_sve_svminnm_f32_z(__VA_ARGS__)
+#define svminnm_f16_z(...) __builtin_sve_svminnm_f16_z(__VA_ARGS__)
+#define svminnmv_f64(...) __builtin_sve_svminnmv_f64(__VA_ARGS__)
+#define svminnmv_f32(...) __builtin_sve_svminnmv_f32(__VA_ARGS__)
+#define svminnmv_f16(...) __builtin_sve_svminnmv_f16(__VA_ARGS__)
+#define svminv_f64(...) __builtin_sve_svminv_f64(__VA_ARGS__)
+#define svminv_f32(...) __builtin_sve_svminv_f32(__VA_ARGS__)
+#define svminv_f16(...) __builtin_sve_svminv_f16(__VA_ARGS__)
+#define svminv_s8(...) __builtin_sve_svminv_s8(__VA_ARGS__)
+#define svminv_s32(...) __builtin_sve_svminv_s32(__VA_ARGS__)
+#define svminv_s64(...) __builtin_sve_svminv_s64(__VA_ARGS__)
+#define svminv_s16(...) __builtin_sve_svminv_s16(__VA_ARGS__)
+#define svminv_u8(...) __builtin_sve_svminv_u8(__VA_ARGS__)
+#define svminv_u32(...) __builtin_sve_svminv_u32(__VA_ARGS__)
+#define svminv_u64(...) __builtin_sve_svminv_u64(__VA_ARGS__)
+#define svminv_u16(...) __builtin_sve_svminv_u16(__VA_ARGS__)
+#define svmla_n_f64_m(...) __builtin_sve_svmla_n_f64_m(__VA_ARGS__)
+#define svmla_n_f32_m(...) __builtin_sve_svmla_n_f32_m(__VA_ARGS__)
+#define svmla_n_f16_m(...) __builtin_sve_svmla_n_f16_m(__VA_ARGS__)
+#define svmla_n_f64_x(...) __builtin_sve_svmla_n_f64_x(__VA_ARGS__)
+#define svmla_n_f32_x(...) __builtin_sve_svmla_n_f32_x(__VA_ARGS__)
+#define svmla_n_f16_x(...) __builtin_sve_svmla_n_f16_x(__VA_ARGS__)
+#define svmla_n_f64_z(...) __builtin_sve_svmla_n_f64_z(__VA_ARGS__)
+#define svmla_n_f32_z(...) __builtin_sve_svmla_n_f32_z(__VA_ARGS__)
+#define svmla_n_f16_z(...) __builtin_sve_svmla_n_f16_z(__VA_ARGS__)
+#define svmla_n_u8_m(...) __builtin_sve_svmla_n_u8_m(__VA_ARGS__)
+#define svmla_n_u32_m(...) __builtin_sve_svmla_n_u32_m(__VA_ARGS__)
+#define svmla_n_u64_m(...) __builtin_sve_svmla_n_u64_m(__VA_ARGS__)
+#define svmla_n_u16_m(...) __builtin_sve_svmla_n_u16_m(__VA_ARGS__)
+#define svmla_n_s8_m(...) __builtin_sve_svmla_n_s8_m(__VA_ARGS__)
+#define svmla_n_s32_m(...) __builtin_sve_svmla_n_s32_m(__VA_ARGS__)
+#define svmla_n_s64_m(...) __builtin_sve_svmla_n_s64_m(__VA_ARGS__)
+#define svmla_n_s16_m(...) __builtin_sve_svmla_n_s16_m(__VA_ARGS__)
+#define svmla_n_u8_x(...) __builtin_sve_svmla_n_u8_x(__VA_ARGS__)
+#define svmla_n_u32_x(...) __builtin_sve_svmla_n_u32_x(__VA_ARGS__)
+#define svmla_n_u64_x(...) __builtin_sve_svmla_n_u64_x(__VA_ARGS__)
+#define svmla_n_u16_x(...) __builtin_sve_svmla_n_u16_x(__VA_ARGS__)
+#define svmla_n_s8_x(...) __builtin_sve_svmla_n_s8_x(__VA_ARGS__)
+#define svmla_n_s32_x(...) __builtin_sve_svmla_n_s32_x(__VA_ARGS__)
+#define svmla_n_s64_x(...) __builtin_sve_svmla_n_s64_x(__VA_ARGS__)
+#define svmla_n_s16_x(...) __builtin_sve_svmla_n_s16_x(__VA_ARGS__)
+#define svmla_n_u8_z(...) __builtin_sve_svmla_n_u8_z(__VA_ARGS__)
+#define svmla_n_u32_z(...) __builtin_sve_svmla_n_u32_z(__VA_ARGS__)
+#define svmla_n_u64_z(...) __builtin_sve_svmla_n_u64_z(__VA_ARGS__)
+#define svmla_n_u16_z(...) __builtin_sve_svmla_n_u16_z(__VA_ARGS__)
+#define svmla_n_s8_z(...) __builtin_sve_svmla_n_s8_z(__VA_ARGS__)
+#define svmla_n_s32_z(...) __builtin_sve_svmla_n_s32_z(__VA_ARGS__)
+#define svmla_n_s64_z(...) __builtin_sve_svmla_n_s64_z(__VA_ARGS__)
+#define svmla_n_s16_z(...) __builtin_sve_svmla_n_s16_z(__VA_ARGS__)
+#define svmla_f64_m(...) __builtin_sve_svmla_f64_m(__VA_ARGS__)
+#define svmla_f32_m(...) __builtin_sve_svmla_f32_m(__VA_ARGS__)
+#define svmla_f16_m(...) __builtin_sve_svmla_f16_m(__VA_ARGS__)
+#define svmla_f64_x(...) __builtin_sve_svmla_f64_x(__VA_ARGS__)
+#define svmla_f32_x(...) __builtin_sve_svmla_f32_x(__VA_ARGS__)
+#define svmla_f16_x(...) __builtin_sve_svmla_f16_x(__VA_ARGS__)
+#define svmla_f64_z(...) __builtin_sve_svmla_f64_z(__VA_ARGS__)
+#define svmla_f32_z(...) __builtin_sve_svmla_f32_z(__VA_ARGS__)
+#define svmla_f16_z(...) __builtin_sve_svmla_f16_z(__VA_ARGS__)
+#define svmla_u8_m(...) __builtin_sve_svmla_u8_m(__VA_ARGS__)
+#define svmla_u32_m(...) __builtin_sve_svmla_u32_m(__VA_ARGS__)
+#define svmla_u64_m(...) __builtin_sve_svmla_u64_m(__VA_ARGS__)
+#define svmla_u16_m(...) __builtin_sve_svmla_u16_m(__VA_ARGS__)
+#define svmla_s8_m(...) __builtin_sve_svmla_s8_m(__VA_ARGS__)
+#define svmla_s32_m(...) __builtin_sve_svmla_s32_m(__VA_ARGS__)
+#define svmla_s64_m(...) __builtin_sve_svmla_s64_m(__VA_ARGS__)
+#define svmla_s16_m(...) __builtin_sve_svmla_s16_m(__VA_ARGS__)
+#define svmla_u8_x(...) __builtin_sve_svmla_u8_x(__VA_ARGS__)
+#define svmla_u32_x(...) __builtin_sve_svmla_u32_x(__VA_ARGS__)
+#define svmla_u64_x(...) __builtin_sve_svmla_u64_x(__VA_ARGS__)
+#define svmla_u16_x(...) __builtin_sve_svmla_u16_x(__VA_ARGS__)
+#define svmla_s8_x(...) __builtin_sve_svmla_s8_x(__VA_ARGS__)
+#define svmla_s32_x(...) __builtin_sve_svmla_s32_x(__VA_ARGS__)
+#define svmla_s64_x(...) __builtin_sve_svmla_s64_x(__VA_ARGS__)
+#define svmla_s16_x(...) __builtin_sve_svmla_s16_x(__VA_ARGS__)
+#define svmla_u8_z(...) __builtin_sve_svmla_u8_z(__VA_ARGS__)
+#define svmla_u32_z(...) __builtin_sve_svmla_u32_z(__VA_ARGS__)
+#define svmla_u64_z(...) __builtin_sve_svmla_u64_z(__VA_ARGS__)
+#define svmla_u16_z(...) __builtin_sve_svmla_u16_z(__VA_ARGS__)
+#define svmla_s8_z(...) __builtin_sve_svmla_s8_z(__VA_ARGS__)
+#define svmla_s32_z(...) __builtin_sve_svmla_s32_z(__VA_ARGS__)
+#define svmla_s64_z(...) __builtin_sve_svmla_s64_z(__VA_ARGS__)
+#define svmla_s16_z(...) __builtin_sve_svmla_s16_z(__VA_ARGS__)
+#define svmla_lane_f64(...) __builtin_sve_svmla_lane_f64(__VA_ARGS__)
+#define svmla_lane_f32(...) __builtin_sve_svmla_lane_f32(__VA_ARGS__)
+#define svmla_lane_f16(...) __builtin_sve_svmla_lane_f16(__VA_ARGS__)
+#define svmls_n_f64_m(...) __builtin_sve_svmls_n_f64_m(__VA_ARGS__)
+#define svmls_n_f32_m(...) __builtin_sve_svmls_n_f32_m(__VA_ARGS__)
+#define svmls_n_f16_m(...) __builtin_sve_svmls_n_f16_m(__VA_ARGS__)
+#define svmls_n_f64_x(...) __builtin_sve_svmls_n_f64_x(__VA_ARGS__)
+#define svmls_n_f32_x(...) __builtin_sve_svmls_n_f32_x(__VA_ARGS__)
+#define svmls_n_f16_x(...) __builtin_sve_svmls_n_f16_x(__VA_ARGS__)
+#define svmls_n_f64_z(...) __builtin_sve_svmls_n_f64_z(__VA_ARGS__)
+#define svmls_n_f32_z(...) __builtin_sve_svmls_n_f32_z(__VA_ARGS__)
+#define svmls_n_f16_z(...) __builtin_sve_svmls_n_f16_z(__VA_ARGS__)
+#define svmls_n_u8_m(...) __builtin_sve_svmls_n_u8_m(__VA_ARGS__)
+#define svmls_n_u32_m(...) __builtin_sve_svmls_n_u32_m(__VA_ARGS__)
+#define svmls_n_u64_m(...) __builtin_sve_svmls_n_u64_m(__VA_ARGS__)
+#define svmls_n_u16_m(...) __builtin_sve_svmls_n_u16_m(__VA_ARGS__)
+#define svmls_n_s8_m(...) __builtin_sve_svmls_n_s8_m(__VA_ARGS__)
+#define svmls_n_s32_m(...) __builtin_sve_svmls_n_s32_m(__VA_ARGS__)
+#define svmls_n_s64_m(...) __builtin_sve_svmls_n_s64_m(__VA_ARGS__)
+#define svmls_n_s16_m(...) __builtin_sve_svmls_n_s16_m(__VA_ARGS__)
+#define svmls_n_u8_x(...) __builtin_sve_svmls_n_u8_x(__VA_ARGS__)
+#define svmls_n_u32_x(...) __builtin_sve_svmls_n_u32_x(__VA_ARGS__)
+#define svmls_n_u64_x(...) __builtin_sve_svmls_n_u64_x(__VA_ARGS__)
+#define svmls_n_u16_x(...) __builtin_sve_svmls_n_u16_x(__VA_ARGS__)
+#define svmls_n_s8_x(...) __builtin_sve_svmls_n_s8_x(__VA_ARGS__)
+#define svmls_n_s32_x(...) __builtin_sve_svmls_n_s32_x(__VA_ARGS__)
+#define svmls_n_s64_x(...) __builtin_sve_svmls_n_s64_x(__VA_ARGS__)
+#define svmls_n_s16_x(...) __builtin_sve_svmls_n_s16_x(__VA_ARGS__)
+#define svmls_n_u8_z(...) __builtin_sve_svmls_n_u8_z(__VA_ARGS__)
+#define svmls_n_u32_z(...) __builtin_sve_svmls_n_u32_z(__VA_ARGS__)
+#define svmls_n_u64_z(...) __builtin_sve_svmls_n_u64_z(__VA_ARGS__)
+#define svmls_n_u16_z(...) __builtin_sve_svmls_n_u16_z(__VA_ARGS__)
+#define svmls_n_s8_z(...) __builtin_sve_svmls_n_s8_z(__VA_ARGS__)
+#define svmls_n_s32_z(...) __builtin_sve_svmls_n_s32_z(__VA_ARGS__)
+#define svmls_n_s64_z(...) __builtin_sve_svmls_n_s64_z(__VA_ARGS__)
+#define svmls_n_s16_z(...) __builtin_sve_svmls_n_s16_z(__VA_ARGS__)
+#define svmls_f64_m(...) __builtin_sve_svmls_f64_m(__VA_ARGS__)
+#define svmls_f32_m(...) __builtin_sve_svmls_f32_m(__VA_ARGS__)
+#define svmls_f16_m(...) __builtin_sve_svmls_f16_m(__VA_ARGS__)
+#define svmls_f64_x(...) __builtin_sve_svmls_f64_x(__VA_ARGS__)
+#define svmls_f32_x(...) __builtin_sve_svmls_f32_x(__VA_ARGS__)
+#define svmls_f16_x(...) __builtin_sve_svmls_f16_x(__VA_ARGS__)
+#define svmls_f64_z(...) __builtin_sve_svmls_f64_z(__VA_ARGS__)
+#define svmls_f32_z(...) __builtin_sve_svmls_f32_z(__VA_ARGS__)
+#define svmls_f16_z(...) __builtin_sve_svmls_f16_z(__VA_ARGS__)
+#define svmls_u8_m(...) __builtin_sve_svmls_u8_m(__VA_ARGS__)
+#define svmls_u32_m(...) __builtin_sve_svmls_u32_m(__VA_ARGS__)
+#define svmls_u64_m(...) __builtin_sve_svmls_u64_m(__VA_ARGS__)
+#define svmls_u16_m(...) __builtin_sve_svmls_u16_m(__VA_ARGS__)
+#define svmls_s8_m(...) __builtin_sve_svmls_s8_m(__VA_ARGS__)
+#define svmls_s32_m(...) __builtin_sve_svmls_s32_m(__VA_ARGS__)
+#define svmls_s64_m(...) __builtin_sve_svmls_s64_m(__VA_ARGS__)
+#define svmls_s16_m(...) __builtin_sve_svmls_s16_m(__VA_ARGS__)
+#define svmls_u8_x(...) __builtin_sve_svmls_u8_x(__VA_ARGS__)
+#define svmls_u32_x(...) __builtin_sve_svmls_u32_x(__VA_ARGS__)
+#define svmls_u64_x(...) __builtin_sve_svmls_u64_x(__VA_ARGS__)
+#define svmls_u16_x(...) __builtin_sve_svmls_u16_x(__VA_ARGS__)
+#define svmls_s8_x(...) __builtin_sve_svmls_s8_x(__VA_ARGS__)
+#define svmls_s32_x(...) __builtin_sve_svmls_s32_x(__VA_ARGS__)
+#define svmls_s64_x(...) __builtin_sve_svmls_s64_x(__VA_ARGS__)
+#define svmls_s16_x(...) __builtin_sve_svmls_s16_x(__VA_ARGS__)
+#define svmls_u8_z(...) __builtin_sve_svmls_u8_z(__VA_ARGS__)
+#define svmls_u32_z(...) __builtin_sve_svmls_u32_z(__VA_ARGS__)
+#define svmls_u64_z(...) __builtin_sve_svmls_u64_z(__VA_ARGS__)
+#define svmls_u16_z(...) __builtin_sve_svmls_u16_z(__VA_ARGS__)
+#define svmls_s8_z(...) __builtin_sve_svmls_s8_z(__VA_ARGS__)
+#define svmls_s32_z(...) __builtin_sve_svmls_s32_z(__VA_ARGS__)
+#define svmls_s64_z(...) __builtin_sve_svmls_s64_z(__VA_ARGS__)
+#define svmls_s16_z(...) __builtin_sve_svmls_s16_z(__VA_ARGS__)
+#define svmls_lane_f64(...) __builtin_sve_svmls_lane_f64(__VA_ARGS__)
+#define svmls_lane_f32(...) __builtin_sve_svmls_lane_f32(__VA_ARGS__)
+#define svmls_lane_f16(...) __builtin_sve_svmls_lane_f16(__VA_ARGS__)
+#define svmov_b_z(...) __builtin_sve_svmov_b_z(__VA_ARGS__)
+#define svmsb_n_f64_m(...) __builtin_sve_svmsb_n_f64_m(__VA_ARGS__)
+#define svmsb_n_f32_m(...) __builtin_sve_svmsb_n_f32_m(__VA_ARGS__)
+#define svmsb_n_f16_m(...) __builtin_sve_svmsb_n_f16_m(__VA_ARGS__)
+#define svmsb_n_f64_x(...) __builtin_sve_svmsb_n_f64_x(__VA_ARGS__)
+#define svmsb_n_f32_x(...) __builtin_sve_svmsb_n_f32_x(__VA_ARGS__)
+#define svmsb_n_f16_x(...) __builtin_sve_svmsb_n_f16_x(__VA_ARGS__)
+#define svmsb_n_f64_z(...) __builtin_sve_svmsb_n_f64_z(__VA_ARGS__)
+#define svmsb_n_f32_z(...) __builtin_sve_svmsb_n_f32_z(__VA_ARGS__)
+#define svmsb_n_f16_z(...) __builtin_sve_svmsb_n_f16_z(__VA_ARGS__)
+#define svmsb_n_u8_m(...) __builtin_sve_svmsb_n_u8_m(__VA_ARGS__)
+#define svmsb_n_u32_m(...) __builtin_sve_svmsb_n_u32_m(__VA_ARGS__)
+#define svmsb_n_u64_m(...) __builtin_sve_svmsb_n_u64_m(__VA_ARGS__)
+#define svmsb_n_u16_m(...) __builtin_sve_svmsb_n_u16_m(__VA_ARGS__)
+#define svmsb_n_s8_m(...) __builtin_sve_svmsb_n_s8_m(__VA_ARGS__)
+#define svmsb_n_s32_m(...) __builtin_sve_svmsb_n_s32_m(__VA_ARGS__)
+#define svmsb_n_s64_m(...) __builtin_sve_svmsb_n_s64_m(__VA_ARGS__)
+#define svmsb_n_s16_m(...) __builtin_sve_svmsb_n_s16_m(__VA_ARGS__)
+#define svmsb_n_u8_x(...) __builtin_sve_svmsb_n_u8_x(__VA_ARGS__)
+#define svmsb_n_u32_x(...) __builtin_sve_svmsb_n_u32_x(__VA_ARGS__)
+#define svmsb_n_u64_x(...) __builtin_sve_svmsb_n_u64_x(__VA_ARGS__)
+#define svmsb_n_u16_x(...) __builtin_sve_svmsb_n_u16_x(__VA_ARGS__)
+#define svmsb_n_s8_x(...) __builtin_sve_svmsb_n_s8_x(__VA_ARGS__)
+#define svmsb_n_s32_x(...) __builtin_sve_svmsb_n_s32_x(__VA_ARGS__)
+#define svmsb_n_s64_x(...) __builtin_sve_svmsb_n_s64_x(__VA_ARGS__)
+#define svmsb_n_s16_x(...) __builtin_sve_svmsb_n_s16_x(__VA_ARGS__)
+#define svmsb_n_u8_z(...) __builtin_sve_svmsb_n_u8_z(__VA_ARGS__)
+#define svmsb_n_u32_z(...) __builtin_sve_svmsb_n_u32_z(__VA_ARGS__)
+#define svmsb_n_u64_z(...) __builtin_sve_svmsb_n_u64_z(__VA_ARGS__)
+#define svmsb_n_u16_z(...) __builtin_sve_svmsb_n_u16_z(__VA_ARGS__)
+#define svmsb_n_s8_z(...) __builtin_sve_svmsb_n_s8_z(__VA_ARGS__)
+#define svmsb_n_s32_z(...) __builtin_sve_svmsb_n_s32_z(__VA_ARGS__)
+#define svmsb_n_s64_z(...) __builtin_sve_svmsb_n_s64_z(__VA_ARGS__)
+#define svmsb_n_s16_z(...) __builtin_sve_svmsb_n_s16_z(__VA_ARGS__)
+#define svmsb_f64_m(...) __builtin_sve_svmsb_f64_m(__VA_ARGS__)
+#define svmsb_f32_m(...) __builtin_sve_svmsb_f32_m(__VA_ARGS__)
+#define svmsb_f16_m(...) __builtin_sve_svmsb_f16_m(__VA_ARGS__)
+#define svmsb_f64_x(...) __builtin_sve_svmsb_f64_x(__VA_ARGS__)
+#define svmsb_f32_x(...) __builtin_sve_svmsb_f32_x(__VA_ARGS__)
+#define svmsb_f16_x(...) __builtin_sve_svmsb_f16_x(__VA_ARGS__)
+#define svmsb_f64_z(...) __builtin_sve_svmsb_f64_z(__VA_ARGS__)
+#define svmsb_f32_z(...) __builtin_sve_svmsb_f32_z(__VA_ARGS__)
+#define svmsb_f16_z(...) __builtin_sve_svmsb_f16_z(__VA_ARGS__)
+#define svmsb_u8_m(...) __builtin_sve_svmsb_u8_m(__VA_ARGS__)
+#define svmsb_u32_m(...) __builtin_sve_svmsb_u32_m(__VA_ARGS__)
+#define svmsb_u64_m(...) __builtin_sve_svmsb_u64_m(__VA_ARGS__)
+#define svmsb_u16_m(...) __builtin_sve_svmsb_u16_m(__VA_ARGS__)
+#define svmsb_s8_m(...) __builtin_sve_svmsb_s8_m(__VA_ARGS__)
+#define svmsb_s32_m(...) __builtin_sve_svmsb_s32_m(__VA_ARGS__)
+#define svmsb_s64_m(...) __builtin_sve_svmsb_s64_m(__VA_ARGS__)
+#define svmsb_s16_m(...) __builtin_sve_svmsb_s16_m(__VA_ARGS__)
+#define svmsb_u8_x(...) __builtin_sve_svmsb_u8_x(__VA_ARGS__)
+#define svmsb_u32_x(...) __builtin_sve_svmsb_u32_x(__VA_ARGS__)
+#define svmsb_u64_x(...) __builtin_sve_svmsb_u64_x(__VA_ARGS__)
+#define svmsb_u16_x(...) __builtin_sve_svmsb_u16_x(__VA_ARGS__)
+#define svmsb_s8_x(...) __builtin_sve_svmsb_s8_x(__VA_ARGS__)
+#define svmsb_s32_x(...) __builtin_sve_svmsb_s32_x(__VA_ARGS__)
+#define svmsb_s64_x(...) __builtin_sve_svmsb_s64_x(__VA_ARGS__)
+#define svmsb_s16_x(...) __builtin_sve_svmsb_s16_x(__VA_ARGS__)
+#define svmsb_u8_z(...) __builtin_sve_svmsb_u8_z(__VA_ARGS__)
+#define svmsb_u32_z(...) __builtin_sve_svmsb_u32_z(__VA_ARGS__)
+#define svmsb_u64_z(...) __builtin_sve_svmsb_u64_z(__VA_ARGS__)
+#define svmsb_u16_z(...) __builtin_sve_svmsb_u16_z(__VA_ARGS__)
+#define svmsb_s8_z(...) __builtin_sve_svmsb_s8_z(__VA_ARGS__)
+#define svmsb_s32_z(...) __builtin_sve_svmsb_s32_z(__VA_ARGS__)
+#define svmsb_s64_z(...) __builtin_sve_svmsb_s64_z(__VA_ARGS__)
+#define svmsb_s16_z(...) __builtin_sve_svmsb_s16_z(__VA_ARGS__)
+#define svmul_n_f64_m(...) __builtin_sve_svmul_n_f64_m(__VA_ARGS__)
+#define svmul_n_f32_m(...) __builtin_sve_svmul_n_f32_m(__VA_ARGS__)
+#define svmul_n_f16_m(...) __builtin_sve_svmul_n_f16_m(__VA_ARGS__)
+#define svmul_n_f64_x(...) __builtin_sve_svmul_n_f64_x(__VA_ARGS__)
+#define svmul_n_f32_x(...) __builtin_sve_svmul_n_f32_x(__VA_ARGS__)
+#define svmul_n_f16_x(...) __builtin_sve_svmul_n_f16_x(__VA_ARGS__)
+#define svmul_n_f64_z(...) __builtin_sve_svmul_n_f64_z(__VA_ARGS__)
+#define svmul_n_f32_z(...) __builtin_sve_svmul_n_f32_z(__VA_ARGS__)
+#define svmul_n_f16_z(...) __builtin_sve_svmul_n_f16_z(__VA_ARGS__)
+#define svmul_n_u8_m(...) __builtin_sve_svmul_n_u8_m(__VA_ARGS__)
+#define svmul_n_u32_m(...) __builtin_sve_svmul_n_u32_m(__VA_ARGS__)
+#define svmul_n_u64_m(...) __builtin_sve_svmul_n_u64_m(__VA_ARGS__)
+#define svmul_n_u16_m(...) __builtin_sve_svmul_n_u16_m(__VA_ARGS__)
+#define svmul_n_s8_m(...) __builtin_sve_svmul_n_s8_m(__VA_ARGS__)
+#define svmul_n_s32_m(...) __builtin_sve_svmul_n_s32_m(__VA_ARGS__)
+#define svmul_n_s64_m(...) __builtin_sve_svmul_n_s64_m(__VA_ARGS__)
+#define svmul_n_s16_m(...) __builtin_sve_svmul_n_s16_m(__VA_ARGS__)
+#define svmul_n_u8_x(...) __builtin_sve_svmul_n_u8_x(__VA_ARGS__)
+#define svmul_n_u32_x(...) __builtin_sve_svmul_n_u32_x(__VA_ARGS__)
+#define svmul_n_u64_x(...) __builtin_sve_svmul_n_u64_x(__VA_ARGS__)
+#define svmul_n_u16_x(...) __builtin_sve_svmul_n_u16_x(__VA_ARGS__)
+#define svmul_n_s8_x(...) __builtin_sve_svmul_n_s8_x(__VA_ARGS__)
+#define svmul_n_s32_x(...) __builtin_sve_svmul_n_s32_x(__VA_ARGS__)
+#define svmul_n_s64_x(...) __builtin_sve_svmul_n_s64_x(__VA_ARGS__)
+#define svmul_n_s16_x(...) __builtin_sve_svmul_n_s16_x(__VA_ARGS__)
+#define svmul_n_u8_z(...) __builtin_sve_svmul_n_u8_z(__VA_ARGS__)
+#define svmul_n_u32_z(...) __builtin_sve_svmul_n_u32_z(__VA_ARGS__)
+#define svmul_n_u64_z(...) __builtin_sve_svmul_n_u64_z(__VA_ARGS__)
+#define svmul_n_u16_z(...) __builtin_sve_svmul_n_u16_z(__VA_ARGS__)
+#define svmul_n_s8_z(...) __builtin_sve_svmul_n_s8_z(__VA_ARGS__)
+#define svmul_n_s32_z(...) __builtin_sve_svmul_n_s32_z(__VA_ARGS__)
+#define svmul_n_s64_z(...) __builtin_sve_svmul_n_s64_z(__VA_ARGS__)
+#define svmul_n_s16_z(...) __builtin_sve_svmul_n_s16_z(__VA_ARGS__)
+#define svmul_f64_m(...) __builtin_sve_svmul_f64_m(__VA_ARGS__)
+#define svmul_f32_m(...) __builtin_sve_svmul_f32_m(__VA_ARGS__)
+#define svmul_f16_m(...) __builtin_sve_svmul_f16_m(__VA_ARGS__)
+#define svmul_f64_x(...) __builtin_sve_svmul_f64_x(__VA_ARGS__)
+#define svmul_f32_x(...) __builtin_sve_svmul_f32_x(__VA_ARGS__)
+#define svmul_f16_x(...) __builtin_sve_svmul_f16_x(__VA_ARGS__)
+#define svmul_f64_z(...) __builtin_sve_svmul_f64_z(__VA_ARGS__)
+#define svmul_f32_z(...) __builtin_sve_svmul_f32_z(__VA_ARGS__)
+#define svmul_f16_z(...) __builtin_sve_svmul_f16_z(__VA_ARGS__)
+#define svmul_u8_m(...) __builtin_sve_svmul_u8_m(__VA_ARGS__)
+#define svmul_u32_m(...) __builtin_sve_svmul_u32_m(__VA_ARGS__)
+#define svmul_u64_m(...) __builtin_sve_svmul_u64_m(__VA_ARGS__)
+#define svmul_u16_m(...) __builtin_sve_svmul_u16_m(__VA_ARGS__)
+#define svmul_s8_m(...) __builtin_sve_svmul_s8_m(__VA_ARGS__)
+#define svmul_s32_m(...) __builtin_sve_svmul_s32_m(__VA_ARGS__)
+#define svmul_s64_m(...) __builtin_sve_svmul_s64_m(__VA_ARGS__)
+#define svmul_s16_m(...) __builtin_sve_svmul_s16_m(__VA_ARGS__)
+#define svmul_u8_x(...) __builtin_sve_svmul_u8_x(__VA_ARGS__)
+#define svmul_u32_x(...) __builtin_sve_svmul_u32_x(__VA_ARGS__)
+#define svmul_u64_x(...) __builtin_sve_svmul_u64_x(__VA_ARGS__)
+#define svmul_u16_x(...) __builtin_sve_svmul_u16_x(__VA_ARGS__)
+#define svmul_s8_x(...) __builtin_sve_svmul_s8_x(__VA_ARGS__)
+#define svmul_s32_x(...) __builtin_sve_svmul_s32_x(__VA_ARGS__)
+#define svmul_s64_x(...) __builtin_sve_svmul_s64_x(__VA_ARGS__)
+#define svmul_s16_x(...) __builtin_sve_svmul_s16_x(__VA_ARGS__)
+#define svmul_u8_z(...) __builtin_sve_svmul_u8_z(__VA_ARGS__)
+#define svmul_u32_z(...) __builtin_sve_svmul_u32_z(__VA_ARGS__)
+#define svmul_u64_z(...) __builtin_sve_svmul_u64_z(__VA_ARGS__)
+#define svmul_u16_z(...) __builtin_sve_svmul_u16_z(__VA_ARGS__)
+#define svmul_s8_z(...) __builtin_sve_svmul_s8_z(__VA_ARGS__)
+#define svmul_s32_z(...) __builtin_sve_svmul_s32_z(__VA_ARGS__)
+#define svmul_s64_z(...) __builtin_sve_svmul_s64_z(__VA_ARGS__)
+#define svmul_s16_z(...) __builtin_sve_svmul_s16_z(__VA_ARGS__)
+#define svmul_lane_f64(...) __builtin_sve_svmul_lane_f64(__VA_ARGS__)
+#define svmul_lane_f32(...) __builtin_sve_svmul_lane_f32(__VA_ARGS__)
+#define svmul_lane_f16(...) __builtin_sve_svmul_lane_f16(__VA_ARGS__)
+#define svmulh_n_s8_m(...) __builtin_sve_svmulh_n_s8_m(__VA_ARGS__)
+#define svmulh_n_s32_m(...) __builtin_sve_svmulh_n_s32_m(__VA_ARGS__)
+#define svmulh_n_s64_m(...) __builtin_sve_svmulh_n_s64_m(__VA_ARGS__)
+#define svmulh_n_s16_m(...) __builtin_sve_svmulh_n_s16_m(__VA_ARGS__)
+#define svmulh_n_s8_x(...) __builtin_sve_svmulh_n_s8_x(__VA_ARGS__)
+#define svmulh_n_s32_x(...) __builtin_sve_svmulh_n_s32_x(__VA_ARGS__)
+#define svmulh_n_s64_x(...) __builtin_sve_svmulh_n_s64_x(__VA_ARGS__)
+#define svmulh_n_s16_x(...) __builtin_sve_svmulh_n_s16_x(__VA_ARGS__)
+#define svmulh_n_s8_z(...) __builtin_sve_svmulh_n_s8_z(__VA_ARGS__)
+#define svmulh_n_s32_z(...) __builtin_sve_svmulh_n_s32_z(__VA_ARGS__)
+#define svmulh_n_s64_z(...) __builtin_sve_svmulh_n_s64_z(__VA_ARGS__)
+#define svmulh_n_s16_z(...) __builtin_sve_svmulh_n_s16_z(__VA_ARGS__)
+#define svmulh_n_u8_m(...) __builtin_sve_svmulh_n_u8_m(__VA_ARGS__)
+#define svmulh_n_u32_m(...) __builtin_sve_svmulh_n_u32_m(__VA_ARGS__)
+#define svmulh_n_u64_m(...) __builtin_sve_svmulh_n_u64_m(__VA_ARGS__)
+#define svmulh_n_u16_m(...) __builtin_sve_svmulh_n_u16_m(__VA_ARGS__)
+#define svmulh_n_u8_x(...) __builtin_sve_svmulh_n_u8_x(__VA_ARGS__)
+#define svmulh_n_u32_x(...) __builtin_sve_svmulh_n_u32_x(__VA_ARGS__)
+#define svmulh_n_u64_x(...) __builtin_sve_svmulh_n_u64_x(__VA_ARGS__)
+#define svmulh_n_u16_x(...) __builtin_sve_svmulh_n_u16_x(__VA_ARGS__)
+#define svmulh_n_u8_z(...) __builtin_sve_svmulh_n_u8_z(__VA_ARGS__)
+#define svmulh_n_u32_z(...) __builtin_sve_svmulh_n_u32_z(__VA_ARGS__)
+#define svmulh_n_u64_z(...) __builtin_sve_svmulh_n_u64_z(__VA_ARGS__)
+#define svmulh_n_u16_z(...) __builtin_sve_svmulh_n_u16_z(__VA_ARGS__)
+#define svmulh_s8_m(...) __builtin_sve_svmulh_s8_m(__VA_ARGS__)
+#define svmulh_s32_m(...) __builtin_sve_svmulh_s32_m(__VA_ARGS__)
+#define svmulh_s64_m(...) __builtin_sve_svmulh_s64_m(__VA_ARGS__)
+#define svmulh_s16_m(...) __builtin_sve_svmulh_s16_m(__VA_ARGS__)
+#define svmulh_s8_x(...) __builtin_sve_svmulh_s8_x(__VA_ARGS__)
+#define svmulh_s32_x(...) __builtin_sve_svmulh_s32_x(__VA_ARGS__)
+#define svmulh_s64_x(...) __builtin_sve_svmulh_s64_x(__VA_ARGS__)
+#define svmulh_s16_x(...) __builtin_sve_svmulh_s16_x(__VA_ARGS__)
+#define svmulh_s8_z(...) __builtin_sve_svmulh_s8_z(__VA_ARGS__)
+#define svmulh_s32_z(...) __builtin_sve_svmulh_s32_z(__VA_ARGS__)
+#define svmulh_s64_z(...) __builtin_sve_svmulh_s64_z(__VA_ARGS__)
+#define svmulh_s16_z(...) __builtin_sve_svmulh_s16_z(__VA_ARGS__)
+#define svmulh_u8_m(...) __builtin_sve_svmulh_u8_m(__VA_ARGS__)
+#define svmulh_u32_m(...) __builtin_sve_svmulh_u32_m(__VA_ARGS__)
+#define svmulh_u64_m(...) __builtin_sve_svmulh_u64_m(__VA_ARGS__)
+#define svmulh_u16_m(...) __builtin_sve_svmulh_u16_m(__VA_ARGS__)
+#define svmulh_u8_x(...) __builtin_sve_svmulh_u8_x(__VA_ARGS__)
+#define svmulh_u32_x(...) __builtin_sve_svmulh_u32_x(__VA_ARGS__)
+#define svmulh_u64_x(...) __builtin_sve_svmulh_u64_x(__VA_ARGS__)
+#define svmulh_u16_x(...) __builtin_sve_svmulh_u16_x(__VA_ARGS__)
+#define svmulh_u8_z(...) __builtin_sve_svmulh_u8_z(__VA_ARGS__)
+#define svmulh_u32_z(...) __builtin_sve_svmulh_u32_z(__VA_ARGS__)
+#define svmulh_u64_z(...) __builtin_sve_svmulh_u64_z(__VA_ARGS__)
+#define svmulh_u16_z(...) __builtin_sve_svmulh_u16_z(__VA_ARGS__)
+#define svmulx_n_f64_m(...) __builtin_sve_svmulx_n_f64_m(__VA_ARGS__)
+#define svmulx_n_f32_m(...) __builtin_sve_svmulx_n_f32_m(__VA_ARGS__)
+#define svmulx_n_f16_m(...) __builtin_sve_svmulx_n_f16_m(__VA_ARGS__)
+#define svmulx_n_f64_x(...) __builtin_sve_svmulx_n_f64_x(__VA_ARGS__)
+#define svmulx_n_f32_x(...) __builtin_sve_svmulx_n_f32_x(__VA_ARGS__)
+#define svmulx_n_f16_x(...) __builtin_sve_svmulx_n_f16_x(__VA_ARGS__)
+#define svmulx_n_f64_z(...) __builtin_sve_svmulx_n_f64_z(__VA_ARGS__)
+#define svmulx_n_f32_z(...) __builtin_sve_svmulx_n_f32_z(__VA_ARGS__)
+#define svmulx_n_f16_z(...) __builtin_sve_svmulx_n_f16_z(__VA_ARGS__)
+#define svmulx_f64_m(...) __builtin_sve_svmulx_f64_m(__VA_ARGS__)
+#define svmulx_f32_m(...) __builtin_sve_svmulx_f32_m(__VA_ARGS__)
+#define svmulx_f16_m(...) __builtin_sve_svmulx_f16_m(__VA_ARGS__)
+#define svmulx_f64_x(...) __builtin_sve_svmulx_f64_x(__VA_ARGS__)
+#define svmulx_f32_x(...) __builtin_sve_svmulx_f32_x(__VA_ARGS__)
+#define svmulx_f16_x(...) __builtin_sve_svmulx_f16_x(__VA_ARGS__)
+#define svmulx_f64_z(...) __builtin_sve_svmulx_f64_z(__VA_ARGS__)
+#define svmulx_f32_z(...) __builtin_sve_svmulx_f32_z(__VA_ARGS__)
+#define svmulx_f16_z(...) __builtin_sve_svmulx_f16_z(__VA_ARGS__)
+#define svnand_b_z(...) __builtin_sve_svnand_b_z(__VA_ARGS__)
+#define svneg_f64_m(...) __builtin_sve_svneg_f64_m(__VA_ARGS__)
+#define svneg_f32_m(...) __builtin_sve_svneg_f32_m(__VA_ARGS__)
+#define svneg_f16_m(...) __builtin_sve_svneg_f16_m(__VA_ARGS__)
+#define svneg_f64_x(...) __builtin_sve_svneg_f64_x(__VA_ARGS__)
+#define svneg_f32_x(...) __builtin_sve_svneg_f32_x(__VA_ARGS__)
+#define svneg_f16_x(...) __builtin_sve_svneg_f16_x(__VA_ARGS__)
+#define svneg_f64_z(...) __builtin_sve_svneg_f64_z(__VA_ARGS__)
+#define svneg_f32_z(...) __builtin_sve_svneg_f32_z(__VA_ARGS__)
+#define svneg_f16_z(...) __builtin_sve_svneg_f16_z(__VA_ARGS__)
+#define svneg_s8_m(...) __builtin_sve_svneg_s8_m(__VA_ARGS__)
+#define svneg_s32_m(...) __builtin_sve_svneg_s32_m(__VA_ARGS__)
+#define svneg_s64_m(...) __builtin_sve_svneg_s64_m(__VA_ARGS__)
+#define svneg_s16_m(...) __builtin_sve_svneg_s16_m(__VA_ARGS__)
+#define svneg_s8_x(...) __builtin_sve_svneg_s8_x(__VA_ARGS__)
+#define svneg_s32_x(...) __builtin_sve_svneg_s32_x(__VA_ARGS__)
+#define svneg_s64_x(...) __builtin_sve_svneg_s64_x(__VA_ARGS__)
+#define svneg_s16_x(...) __builtin_sve_svneg_s16_x(__VA_ARGS__)
+#define svneg_s8_z(...) __builtin_sve_svneg_s8_z(__VA_ARGS__)
+#define svneg_s32_z(...) __builtin_sve_svneg_s32_z(__VA_ARGS__)
+#define svneg_s64_z(...) __builtin_sve_svneg_s64_z(__VA_ARGS__)
+#define svneg_s16_z(...) __builtin_sve_svneg_s16_z(__VA_ARGS__)
+#define svnmad_n_f64_m(...) __builtin_sve_svnmad_n_f64_m(__VA_ARGS__)
+#define svnmad_n_f32_m(...) __builtin_sve_svnmad_n_f32_m(__VA_ARGS__)
+#define svnmad_n_f16_m(...) __builtin_sve_svnmad_n_f16_m(__VA_ARGS__)
+#define svnmad_n_f64_x(...) __builtin_sve_svnmad_n_f64_x(__VA_ARGS__)
+#define svnmad_n_f32_x(...) __builtin_sve_svnmad_n_f32_x(__VA_ARGS__)
+#define svnmad_n_f16_x(...) __builtin_sve_svnmad_n_f16_x(__VA_ARGS__)
+#define svnmad_n_f64_z(...) __builtin_sve_svnmad_n_f64_z(__VA_ARGS__)
+#define svnmad_n_f32_z(...) __builtin_sve_svnmad_n_f32_z(__VA_ARGS__)
+#define svnmad_n_f16_z(...) __builtin_sve_svnmad_n_f16_z(__VA_ARGS__)
+#define svnmad_f64_m(...) __builtin_sve_svnmad_f64_m(__VA_ARGS__)
+#define svnmad_f32_m(...) __builtin_sve_svnmad_f32_m(__VA_ARGS__)
+#define svnmad_f16_m(...) __builtin_sve_svnmad_f16_m(__VA_ARGS__)
+#define svnmad_f64_x(...) __builtin_sve_svnmad_f64_x(__VA_ARGS__)
+#define svnmad_f32_x(...) __builtin_sve_svnmad_f32_x(__VA_ARGS__)
+#define svnmad_f16_x(...) __builtin_sve_svnmad_f16_x(__VA_ARGS__)
+#define svnmad_f64_z(...) __builtin_sve_svnmad_f64_z(__VA_ARGS__)
+#define svnmad_f32_z(...) __builtin_sve_svnmad_f32_z(__VA_ARGS__)
+#define svnmad_f16_z(...) __builtin_sve_svnmad_f16_z(__VA_ARGS__)
+#define svnmla_n_f64_m(...) __builtin_sve_svnmla_n_f64_m(__VA_ARGS__)
+#define svnmla_n_f32_m(...) __builtin_sve_svnmla_n_f32_m(__VA_ARGS__)
+#define svnmla_n_f16_m(...) __builtin_sve_svnmla_n_f16_m(__VA_ARGS__)
+#define svnmla_n_f64_x(...) __builtin_sve_svnmla_n_f64_x(__VA_ARGS__)
+#define svnmla_n_f32_x(...) __builtin_sve_svnmla_n_f32_x(__VA_ARGS__)
+#define svnmla_n_f16_x(...) __builtin_sve_svnmla_n_f16_x(__VA_ARGS__)
+#define svnmla_n_f64_z(...) __builtin_sve_svnmla_n_f64_z(__VA_ARGS__)
+#define svnmla_n_f32_z(...) __builtin_sve_svnmla_n_f32_z(__VA_ARGS__)
+#define svnmla_n_f16_z(...) __builtin_sve_svnmla_n_f16_z(__VA_ARGS__)
+#define svnmla_f64_m(...) __builtin_sve_svnmla_f64_m(__VA_ARGS__)
+#define svnmla_f32_m(...) __builtin_sve_svnmla_f32_m(__VA_ARGS__)
+#define svnmla_f16_m(...) __builtin_sve_svnmla_f16_m(__VA_ARGS__)
+#define svnmla_f64_x(...) __builtin_sve_svnmla_f64_x(__VA_ARGS__)
+#define svnmla_f32_x(...) __builtin_sve_svnmla_f32_x(__VA_ARGS__)
+#define svnmla_f16_x(...) __builtin_sve_svnmla_f16_x(__VA_ARGS__)
+#define svnmla_f64_z(...) __builtin_sve_svnmla_f64_z(__VA_ARGS__)
+#define svnmla_f32_z(...) __builtin_sve_svnmla_f32_z(__VA_ARGS__)
+#define svnmla_f16_z(...) __builtin_sve_svnmla_f16_z(__VA_ARGS__)
+#define svnmls_n_f64_m(...) __builtin_sve_svnmls_n_f64_m(__VA_ARGS__)
+#define svnmls_n_f32_m(...) __builtin_sve_svnmls_n_f32_m(__VA_ARGS__)
+#define svnmls_n_f16_m(...) __builtin_sve_svnmls_n_f16_m(__VA_ARGS__)
+#define svnmls_n_f64_x(...) __builtin_sve_svnmls_n_f64_x(__VA_ARGS__)
+#define svnmls_n_f32_x(...) __builtin_sve_svnmls_n_f32_x(__VA_ARGS__)
+#define svnmls_n_f16_x(...) __builtin_sve_svnmls_n_f16_x(__VA_ARGS__)
+#define svnmls_n_f64_z(...) __builtin_sve_svnmls_n_f64_z(__VA_ARGS__)
+#define svnmls_n_f32_z(...) __builtin_sve_svnmls_n_f32_z(__VA_ARGS__)
+#define svnmls_n_f16_z(...) __builtin_sve_svnmls_n_f16_z(__VA_ARGS__)
+#define svnmls_f64_m(...) __builtin_sve_svnmls_f64_m(__VA_ARGS__)
+#define svnmls_f32_m(...) __builtin_sve_svnmls_f32_m(__VA_ARGS__)
+#define svnmls_f16_m(...) __builtin_sve_svnmls_f16_m(__VA_ARGS__)
+#define svnmls_f64_x(...) __builtin_sve_svnmls_f64_x(__VA_ARGS__)
+#define svnmls_f32_x(...) __builtin_sve_svnmls_f32_x(__VA_ARGS__)
+#define svnmls_f16_x(...) __builtin_sve_svnmls_f16_x(__VA_ARGS__)
+#define svnmls_f64_z(...) __builtin_sve_svnmls_f64_z(__VA_ARGS__)
+#define svnmls_f32_z(...) __builtin_sve_svnmls_f32_z(__VA_ARGS__)
+#define svnmls_f16_z(...) __builtin_sve_svnmls_f16_z(__VA_ARGS__)
+#define svnmsb_n_f64_m(...) __builtin_sve_svnmsb_n_f64_m(__VA_ARGS__)
+#define svnmsb_n_f32_m(...) __builtin_sve_svnmsb_n_f32_m(__VA_ARGS__)
+#define svnmsb_n_f16_m(...) __builtin_sve_svnmsb_n_f16_m(__VA_ARGS__)
+#define svnmsb_n_f64_x(...) __builtin_sve_svnmsb_n_f64_x(__VA_ARGS__)
+#define svnmsb_n_f32_x(...) __builtin_sve_svnmsb_n_f32_x(__VA_ARGS__)
+#define svnmsb_n_f16_x(...) __builtin_sve_svnmsb_n_f16_x(__VA_ARGS__)
+#define svnmsb_n_f64_z(...) __builtin_sve_svnmsb_n_f64_z(__VA_ARGS__)
+#define svnmsb_n_f32_z(...) __builtin_sve_svnmsb_n_f32_z(__VA_ARGS__)
+#define svnmsb_n_f16_z(...) __builtin_sve_svnmsb_n_f16_z(__VA_ARGS__)
+#define svnmsb_f64_m(...) __builtin_sve_svnmsb_f64_m(__VA_ARGS__)
+#define svnmsb_f32_m(...) __builtin_sve_svnmsb_f32_m(__VA_ARGS__)
+#define svnmsb_f16_m(...) __builtin_sve_svnmsb_f16_m(__VA_ARGS__)
+#define svnmsb_f64_x(...) __builtin_sve_svnmsb_f64_x(__VA_ARGS__)
+#define svnmsb_f32_x(...) __builtin_sve_svnmsb_f32_x(__VA_ARGS__)
+#define svnmsb_f16_x(...) __builtin_sve_svnmsb_f16_x(__VA_ARGS__)
+#define svnmsb_f64_z(...) __builtin_sve_svnmsb_f64_z(__VA_ARGS__)
+#define svnmsb_f32_z(...) __builtin_sve_svnmsb_f32_z(__VA_ARGS__)
+#define svnmsb_f16_z(...) __builtin_sve_svnmsb_f16_z(__VA_ARGS__)
+#define svnor_b_z(...) __builtin_sve_svnor_b_z(__VA_ARGS__)
+#define svnot_b_z(...) __builtin_sve_svnot_b_z(__VA_ARGS__)
+#define svnot_u8_m(...) __builtin_sve_svnot_u8_m(__VA_ARGS__)
+#define svnot_u32_m(...) __builtin_sve_svnot_u32_m(__VA_ARGS__)
+#define svnot_u64_m(...) __builtin_sve_svnot_u64_m(__VA_ARGS__)
+#define svnot_u16_m(...) __builtin_sve_svnot_u16_m(__VA_ARGS__)
+#define svnot_s8_m(...) __builtin_sve_svnot_s8_m(__VA_ARGS__)
+#define svnot_s32_m(...) __builtin_sve_svnot_s32_m(__VA_ARGS__)
+#define svnot_s64_m(...) __builtin_sve_svnot_s64_m(__VA_ARGS__)
+#define svnot_s16_m(...) __builtin_sve_svnot_s16_m(__VA_ARGS__)
+#define svnot_u8_x(...) __builtin_sve_svnot_u8_x(__VA_ARGS__)
+#define svnot_u32_x(...) __builtin_sve_svnot_u32_x(__VA_ARGS__)
+#define svnot_u64_x(...) __builtin_sve_svnot_u64_x(__VA_ARGS__)
+#define svnot_u16_x(...) __builtin_sve_svnot_u16_x(__VA_ARGS__)
+#define svnot_s8_x(...) __builtin_sve_svnot_s8_x(__VA_ARGS__)
+#define svnot_s32_x(...) __builtin_sve_svnot_s32_x(__VA_ARGS__)
+#define svnot_s64_x(...) __builtin_sve_svnot_s64_x(__VA_ARGS__)
+#define svnot_s16_x(...) __builtin_sve_svnot_s16_x(__VA_ARGS__)
+#define svnot_u8_z(...) __builtin_sve_svnot_u8_z(__VA_ARGS__)
+#define svnot_u32_z(...) __builtin_sve_svnot_u32_z(__VA_ARGS__)
+#define svnot_u64_z(...) __builtin_sve_svnot_u64_z(__VA_ARGS__)
+#define svnot_u16_z(...) __builtin_sve_svnot_u16_z(__VA_ARGS__)
+#define svnot_s8_z(...) __builtin_sve_svnot_s8_z(__VA_ARGS__)
+#define svnot_s32_z(...) __builtin_sve_svnot_s32_z(__VA_ARGS__)
+#define svnot_s64_z(...) __builtin_sve_svnot_s64_z(__VA_ARGS__)
+#define svnot_s16_z(...) __builtin_sve_svnot_s16_z(__VA_ARGS__)
+#define svorn_b_z(...) __builtin_sve_svorn_b_z(__VA_ARGS__)
+#define svorr_b_z(...) __builtin_sve_svorr_b_z(__VA_ARGS__)
+#define svorr_n_u8_m(...) __builtin_sve_svorr_n_u8_m(__VA_ARGS__)
+#define svorr_n_u32_m(...) __builtin_sve_svorr_n_u32_m(__VA_ARGS__)
+#define svorr_n_u64_m(...) __builtin_sve_svorr_n_u64_m(__VA_ARGS__)
+#define svorr_n_u16_m(...) __builtin_sve_svorr_n_u16_m(__VA_ARGS__)
+#define svorr_n_s8_m(...) __builtin_sve_svorr_n_s8_m(__VA_ARGS__)
+#define svorr_n_s32_m(...) __builtin_sve_svorr_n_s32_m(__VA_ARGS__)
+#define svorr_n_s64_m(...) __builtin_sve_svorr_n_s64_m(__VA_ARGS__)
+#define svorr_n_s16_m(...) __builtin_sve_svorr_n_s16_m(__VA_ARGS__)
+#define svorr_n_u8_x(...) __builtin_sve_svorr_n_u8_x(__VA_ARGS__)
+#define svorr_n_u32_x(...) __builtin_sve_svorr_n_u32_x(__VA_ARGS__)
+#define svorr_n_u64_x(...) __builtin_sve_svorr_n_u64_x(__VA_ARGS__)
+#define svorr_n_u16_x(...) __builtin_sve_svorr_n_u16_x(__VA_ARGS__)
+#define svorr_n_s8_x(...) __builtin_sve_svorr_n_s8_x(__VA_ARGS__)
+#define svorr_n_s32_x(...) __builtin_sve_svorr_n_s32_x(__VA_ARGS__)
+#define svorr_n_s64_x(...) __builtin_sve_svorr_n_s64_x(__VA_ARGS__)
+#define svorr_n_s16_x(...) __builtin_sve_svorr_n_s16_x(__VA_ARGS__)
+#define svorr_n_u8_z(...) __builtin_sve_svorr_n_u8_z(__VA_ARGS__)
+#define svorr_n_u32_z(...) __builtin_sve_svorr_n_u32_z(__VA_ARGS__)
+#define svorr_n_u64_z(...) __builtin_sve_svorr_n_u64_z(__VA_ARGS__)
+#define svorr_n_u16_z(...) __builtin_sve_svorr_n_u16_z(__VA_ARGS__)
+#define svorr_n_s8_z(...) __builtin_sve_svorr_n_s8_z(__VA_ARGS__)
+#define svorr_n_s32_z(...) __builtin_sve_svorr_n_s32_z(__VA_ARGS__)
+#define svorr_n_s64_z(...) __builtin_sve_svorr_n_s64_z(__VA_ARGS__)
+#define svorr_n_s16_z(...) __builtin_sve_svorr_n_s16_z(__VA_ARGS__)
+#define svorr_u8_m(...) __builtin_sve_svorr_u8_m(__VA_ARGS__)
+#define svorr_u32_m(...) __builtin_sve_svorr_u32_m(__VA_ARGS__)
+#define svorr_u64_m(...) __builtin_sve_svorr_u64_m(__VA_ARGS__)
+#define svorr_u16_m(...) __builtin_sve_svorr_u16_m(__VA_ARGS__)
+#define svorr_s8_m(...) __builtin_sve_svorr_s8_m(__VA_ARGS__)
+#define svorr_s32_m(...) __builtin_sve_svorr_s32_m(__VA_ARGS__)
+#define svorr_s64_m(...) __builtin_sve_svorr_s64_m(__VA_ARGS__)
+#define svorr_s16_m(...) __builtin_sve_svorr_s16_m(__VA_ARGS__)
+#define svorr_u8_x(...) __builtin_sve_svorr_u8_x(__VA_ARGS__)
+#define svorr_u32_x(...) __builtin_sve_svorr_u32_x(__VA_ARGS__)
+#define svorr_u64_x(...) __builtin_sve_svorr_u64_x(__VA_ARGS__)
+#define svorr_u16_x(...) __builtin_sve_svorr_u16_x(__VA_ARGS__)
+#define svorr_s8_x(...) __builtin_sve_svorr_s8_x(__VA_ARGS__)
+#define svorr_s32_x(...) __builtin_sve_svorr_s32_x(__VA_ARGS__)
+#define svorr_s64_x(...) __builtin_sve_svorr_s64_x(__VA_ARGS__)
+#define svorr_s16_x(...) __builtin_sve_svorr_s16_x(__VA_ARGS__)
+#define svorr_u8_z(...) __builtin_sve_svorr_u8_z(__VA_ARGS__)
+#define svorr_u32_z(...) __builtin_sve_svorr_u32_z(__VA_ARGS__)
+#define svorr_u64_z(...) __builtin_sve_svorr_u64_z(__VA_ARGS__)
+#define svorr_u16_z(...) __builtin_sve_svorr_u16_z(__VA_ARGS__)
+#define svorr_s8_z(...) __builtin_sve_svorr_s8_z(__VA_ARGS__)
+#define svorr_s32_z(...) __builtin_sve_svorr_s32_z(__VA_ARGS__)
+#define svorr_s64_z(...) __builtin_sve_svorr_s64_z(__VA_ARGS__)
+#define svorr_s16_z(...) __builtin_sve_svorr_s16_z(__VA_ARGS__)
+#define svorv_u8(...) __builtin_sve_svorv_u8(__VA_ARGS__)
+#define svorv_u32(...) __builtin_sve_svorv_u32(__VA_ARGS__)
+#define svorv_u64(...) __builtin_sve_svorv_u64(__VA_ARGS__)
+#define svorv_u16(...) __builtin_sve_svorv_u16(__VA_ARGS__)
+#define svorv_s8(...) __builtin_sve_svorv_s8(__VA_ARGS__)
+#define svorv_s32(...) __builtin_sve_svorv_s32(__VA_ARGS__)
+#define svorv_s64(...) __builtin_sve_svorv_s64(__VA_ARGS__)
+#define svorv_s16(...) __builtin_sve_svorv_s16(__VA_ARGS__)
+#define svpfalse_b(...) __builtin_sve_svpfalse_b(__VA_ARGS__)
+#define svpfirst_b(...) __builtin_sve_svpfirst_b(__VA_ARGS__)
+#define svpnext_b8(...) __builtin_sve_svpnext_b8(__VA_ARGS__)
+#define svpnext_b32(...) __builtin_sve_svpnext_b32(__VA_ARGS__)
+#define svpnext_b64(...) __builtin_sve_svpnext_b64(__VA_ARGS__)
+#define svpnext_b16(...) __builtin_sve_svpnext_b16(__VA_ARGS__)
+#define svprfb(...) __builtin_sve_svprfb(__VA_ARGS__)
+#define svprfb_gather_u32base(...) __builtin_sve_svprfb_gather_u32base(__VA_ARGS__)
+#define svprfb_gather_u64base(...) __builtin_sve_svprfb_gather_u64base(__VA_ARGS__)
+#define svprfb_gather_u32base_offset(...) __builtin_sve_svprfb_gather_u32base_offset(__VA_ARGS__)
+#define svprfb_gather_u64base_offset(...) __builtin_sve_svprfb_gather_u64base_offset(__VA_ARGS__)
+#define svprfb_gather_s32offset(...) __builtin_sve_svprfb_gather_s32offset(__VA_ARGS__)
+#define svprfb_gather_u32offset(...) __builtin_sve_svprfb_gather_u32offset(__VA_ARGS__)
+#define svprfb_gather_s64offset(...) __builtin_sve_svprfb_gather_s64offset(__VA_ARGS__)
+#define svprfb_gather_u64offset(...) __builtin_sve_svprfb_gather_u64offset(__VA_ARGS__)
+#define svprfb_vnum(...) __builtin_sve_svprfb_vnum(__VA_ARGS__)
+#define svprfd(...) __builtin_sve_svprfd(__VA_ARGS__)
+#define svprfd_gather_u32base(...) __builtin_sve_svprfd_gather_u32base(__VA_ARGS__)
+#define svprfd_gather_u64base(...) __builtin_sve_svprfd_gather_u64base(__VA_ARGS__)
+#define svprfd_gather_u32base_index(...) __builtin_sve_svprfd_gather_u32base_index(__VA_ARGS__)
+#define svprfd_gather_u64base_index(...) __builtin_sve_svprfd_gather_u64base_index(__VA_ARGS__)
+#define svprfd_gather_s32index(...) __builtin_sve_svprfd_gather_s32index(__VA_ARGS__)
+#define svprfd_gather_u32index(...) __builtin_sve_svprfd_gather_u32index(__VA_ARGS__)
+#define svprfd_gather_s64index(...) __builtin_sve_svprfd_gather_s64index(__VA_ARGS__)
+#define svprfd_gather_u64index(...) __builtin_sve_svprfd_gather_u64index(__VA_ARGS__)
+#define svprfd_vnum(...) __builtin_sve_svprfd_vnum(__VA_ARGS__)
+#define svprfh(...) __builtin_sve_svprfh(__VA_ARGS__)
+#define svprfh_gather_u32base(...) __builtin_sve_svprfh_gather_u32base(__VA_ARGS__)
+#define svprfh_gather_u64base(...) __builtin_sve_svprfh_gather_u64base(__VA_ARGS__)
+#define svprfh_gather_u32base_index(...) __builtin_sve_svprfh_gather_u32base_index(__VA_ARGS__)
+#define svprfh_gather_u64base_index(...) __builtin_sve_svprfh_gather_u64base_index(__VA_ARGS__)
+#define svprfh_gather_s32index(...) __builtin_sve_svprfh_gather_s32index(__VA_ARGS__)
+#define svprfh_gather_u32index(...) __builtin_sve_svprfh_gather_u32index(__VA_ARGS__)
+#define svprfh_gather_s64index(...) __builtin_sve_svprfh_gather_s64index(__VA_ARGS__)
+#define svprfh_gather_u64index(...) __builtin_sve_svprfh_gather_u64index(__VA_ARGS__)
+#define svprfh_vnum(...) __builtin_sve_svprfh_vnum(__VA_ARGS__)
+#define svprfw(...) __builtin_sve_svprfw(__VA_ARGS__)
+#define svprfw_gather_u32base(...) __builtin_sve_svprfw_gather_u32base(__VA_ARGS__)
+#define svprfw_gather_u64base(...) __builtin_sve_svprfw_gather_u64base(__VA_ARGS__)
+#define svprfw_gather_u32base_index(...) __builtin_sve_svprfw_gather_u32base_index(__VA_ARGS__)
+#define svprfw_gather_u64base_index(...) __builtin_sve_svprfw_gather_u64base_index(__VA_ARGS__)
+#define svprfw_gather_s32index(...) __builtin_sve_svprfw_gather_s32index(__VA_ARGS__)
+#define svprfw_gather_u32index(...) __builtin_sve_svprfw_gather_u32index(__VA_ARGS__)
+#define svprfw_gather_s64index(...) __builtin_sve_svprfw_gather_s64index(__VA_ARGS__)
+#define svprfw_gather_u64index(...) __builtin_sve_svprfw_gather_u64index(__VA_ARGS__)
+#define svprfw_vnum(...) __builtin_sve_svprfw_vnum(__VA_ARGS__)
+#define svptest_any(...) __builtin_sve_svptest_any(__VA_ARGS__)
+#define svptest_first(...) __builtin_sve_svptest_first(__VA_ARGS__)
+#define svptest_last(...) __builtin_sve_svptest_last(__VA_ARGS__)
+#define svptrue_pat_b8(...) __builtin_sve_svptrue_pat_b8(__VA_ARGS__)
+#define svptrue_pat_b32(...) __builtin_sve_svptrue_pat_b32(__VA_ARGS__)
+#define svptrue_pat_b64(...) __builtin_sve_svptrue_pat_b64(__VA_ARGS__)
+#define svptrue_pat_b16(...) __builtin_sve_svptrue_pat_b16(__VA_ARGS__)
+#define svptrue_b8(...) __builtin_sve_svptrue_b8(__VA_ARGS__)
+#define svptrue_b32(...) __builtin_sve_svptrue_b32(__VA_ARGS__)
+#define svptrue_b64(...) __builtin_sve_svptrue_b64(__VA_ARGS__)
+#define svptrue_b16(...) __builtin_sve_svptrue_b16(__VA_ARGS__)
+#define svqadd_n_s8(...) __builtin_sve_svqadd_n_s8(__VA_ARGS__)
+#define svqadd_n_s32(...) __builtin_sve_svqadd_n_s32(__VA_ARGS__)
+#define svqadd_n_s64(...) __builtin_sve_svqadd_n_s64(__VA_ARGS__)
+#define svqadd_n_s16(...) __builtin_sve_svqadd_n_s16(__VA_ARGS__)
+#define svqadd_n_u8(...) __builtin_sve_svqadd_n_u8(__VA_ARGS__)
+#define svqadd_n_u32(...) __builtin_sve_svqadd_n_u32(__VA_ARGS__)
+#define svqadd_n_u64(...) __builtin_sve_svqadd_n_u64(__VA_ARGS__)
+#define svqadd_n_u16(...) __builtin_sve_svqadd_n_u16(__VA_ARGS__)
+#define svqadd_s8(...) __builtin_sve_svqadd_s8(__VA_ARGS__)
+#define svqadd_s32(...) __builtin_sve_svqadd_s32(__VA_ARGS__)
+#define svqadd_s64(...) __builtin_sve_svqadd_s64(__VA_ARGS__)
+#define svqadd_s16(...) __builtin_sve_svqadd_s16(__VA_ARGS__)
+#define svqadd_u8(...) __builtin_sve_svqadd_u8(__VA_ARGS__)
+#define svqadd_u32(...) __builtin_sve_svqadd_u32(__VA_ARGS__)
+#define svqadd_u64(...) __builtin_sve_svqadd_u64(__VA_ARGS__)
+#define svqadd_u16(...) __builtin_sve_svqadd_u16(__VA_ARGS__)
+#define svqdecb_n_s32(...) __builtin_sve_svqdecb_n_s32(__VA_ARGS__)
+#define svqdecb_n_s64(...) __builtin_sve_svqdecb_n_s64(__VA_ARGS__)
+#define svqdecb_n_u32(...) __builtin_sve_svqdecb_n_u32(__VA_ARGS__)
+#define svqdecb_n_u64(...) __builtin_sve_svqdecb_n_u64(__VA_ARGS__)
+#define svqdecb_pat_n_s32(...) __builtin_sve_svqdecb_pat_n_s32(__VA_ARGS__)
+#define svqdecb_pat_n_s64(...) __builtin_sve_svqdecb_pat_n_s64(__VA_ARGS__)
+#define svqdecb_pat_n_u32(...) __builtin_sve_svqdecb_pat_n_u32(__VA_ARGS__)
+#define svqdecb_pat_n_u64(...) __builtin_sve_svqdecb_pat_n_u64(__VA_ARGS__)
+#define svqdecd_n_s32(...) __builtin_sve_svqdecd_n_s32(__VA_ARGS__)
+#define svqdecd_n_s64(...) __builtin_sve_svqdecd_n_s64(__VA_ARGS__)
+#define svqdecd_n_u32(...) __builtin_sve_svqdecd_n_u32(__VA_ARGS__)
+#define svqdecd_n_u64(...) __builtin_sve_svqdecd_n_u64(__VA_ARGS__)
+#define svqdecd_s64(...) __builtin_sve_svqdecd_s64(__VA_ARGS__)
+#define svqdecd_u64(...) __builtin_sve_svqdecd_u64(__VA_ARGS__)
+#define svqdecd_pat_n_s32(...) __builtin_sve_svqdecd_pat_n_s32(__VA_ARGS__)
+#define svqdecd_pat_n_s64(...) __builtin_sve_svqdecd_pat_n_s64(__VA_ARGS__)
+#define svqdecd_pat_n_u32(...) __builtin_sve_svqdecd_pat_n_u32(__VA_ARGS__)
+#define svqdecd_pat_n_u64(...) __builtin_sve_svqdecd_pat_n_u64(__VA_ARGS__)
+#define svqdecd_pat_s64(...) __builtin_sve_svqdecd_pat_s64(__VA_ARGS__)
+#define svqdecd_pat_u64(...) __builtin_sve_svqdecd_pat_u64(__VA_ARGS__)
+#define svqdech_n_s32(...) __builtin_sve_svqdech_n_s32(__VA_ARGS__)
+#define svqdech_n_s64(...) __builtin_sve_svqdech_n_s64(__VA_ARGS__)
+#define svqdech_n_u32(...) __builtin_sve_svqdech_n_u32(__VA_ARGS__)
+#define svqdech_n_u64(...) __builtin_sve_svqdech_n_u64(__VA_ARGS__)
+#define svqdech_s16(...) __builtin_sve_svqdech_s16(__VA_ARGS__)
+#define svqdech_u16(...) __builtin_sve_svqdech_u16(__VA_ARGS__)
+#define svqdech_pat_n_s32(...) __builtin_sve_svqdech_pat_n_s32(__VA_ARGS__)
+#define svqdech_pat_n_s64(...) __builtin_sve_svqdech_pat_n_s64(__VA_ARGS__)
+#define svqdech_pat_n_u32(...) __builtin_sve_svqdech_pat_n_u32(__VA_ARGS__)
+#define svqdech_pat_n_u64(...) __builtin_sve_svqdech_pat_n_u64(__VA_ARGS__)
+#define svqdech_pat_s16(...) __builtin_sve_svqdech_pat_s16(__VA_ARGS__)
+#define svqdech_pat_u16(...) __builtin_sve_svqdech_pat_u16(__VA_ARGS__)
+#define svqdecp_n_s32_b8(...) __builtin_sve_svqdecp_n_s32_b8(__VA_ARGS__)
+#define svqdecp_n_s32_b32(...) __builtin_sve_svqdecp_n_s32_b32(__VA_ARGS__)
+#define svqdecp_n_s32_b64(...) __builtin_sve_svqdecp_n_s32_b64(__VA_ARGS__)
+#define svqdecp_n_s32_b16(...) __builtin_sve_svqdecp_n_s32_b16(__VA_ARGS__)
+#define svqdecp_n_s64_b8(...) __builtin_sve_svqdecp_n_s64_b8(__VA_ARGS__)
+#define svqdecp_n_s64_b32(...) __builtin_sve_svqdecp_n_s64_b32(__VA_ARGS__)
+#define svqdecp_n_s64_b64(...) __builtin_sve_svqdecp_n_s64_b64(__VA_ARGS__)
+#define svqdecp_n_s64_b16(...) __builtin_sve_svqdecp_n_s64_b16(__VA_ARGS__)
+#define svqdecp_n_u32_b8(...) __builtin_sve_svqdecp_n_u32_b8(__VA_ARGS__)
+#define svqdecp_n_u32_b32(...) __builtin_sve_svqdecp_n_u32_b32(__VA_ARGS__)
+#define svqdecp_n_u32_b64(...) __builtin_sve_svqdecp_n_u32_b64(__VA_ARGS__)
+#define svqdecp_n_u32_b16(...) __builtin_sve_svqdecp_n_u32_b16(__VA_ARGS__)
+#define svqdecp_n_u64_b8(...) __builtin_sve_svqdecp_n_u64_b8(__VA_ARGS__)
+#define svqdecp_n_u64_b32(...) __builtin_sve_svqdecp_n_u64_b32(__VA_ARGS__)
+#define svqdecp_n_u64_b64(...) __builtin_sve_svqdecp_n_u64_b64(__VA_ARGS__)
+#define svqdecp_n_u64_b16(...) __builtin_sve_svqdecp_n_u64_b16(__VA_ARGS__)
+#define svqdecp_s32(...) __builtin_sve_svqdecp_s32(__VA_ARGS__)
+#define svqdecp_s64(...) __builtin_sve_svqdecp_s64(__VA_ARGS__)
+#define svqdecp_s16(...) __builtin_sve_svqdecp_s16(__VA_ARGS__)
+#define svqdecp_u32(...) __builtin_sve_svqdecp_u32(__VA_ARGS__)
+#define svqdecp_u64(...) __builtin_sve_svqdecp_u64(__VA_ARGS__)
+#define svqdecp_u16(...) __builtin_sve_svqdecp_u16(__VA_ARGS__)
+#define svqdecw_n_s32(...) __builtin_sve_svqdecw_n_s32(__VA_ARGS__)
+#define svqdecw_n_s64(...) __builtin_sve_svqdecw_n_s64(__VA_ARGS__)
+#define svqdecw_n_u32(...) __builtin_sve_svqdecw_n_u32(__VA_ARGS__)
+#define svqdecw_n_u64(...) __builtin_sve_svqdecw_n_u64(__VA_ARGS__)
+#define svqdecw_s32(...) __builtin_sve_svqdecw_s32(__VA_ARGS__)
+#define svqdecw_u32(...) __builtin_sve_svqdecw_u32(__VA_ARGS__)
+#define svqdecw_pat_n_s32(...) __builtin_sve_svqdecw_pat_n_s32(__VA_ARGS__)
+#define svqdecw_pat_n_s64(...) __builtin_sve_svqdecw_pat_n_s64(__VA_ARGS__)
+#define svqdecw_pat_n_u32(...) __builtin_sve_svqdecw_pat_n_u32(__VA_ARGS__)
+#define svqdecw_pat_n_u64(...) __builtin_sve_svqdecw_pat_n_u64(__VA_ARGS__)
+#define svqdecw_pat_s32(...) __builtin_sve_svqdecw_pat_s32(__VA_ARGS__)
+#define svqdecw_pat_u32(...) __builtin_sve_svqdecw_pat_u32(__VA_ARGS__)
+#define svqincb_n_s32(...) __builtin_sve_svqincb_n_s32(__VA_ARGS__)
+#define svqincb_n_s64(...) __builtin_sve_svqincb_n_s64(__VA_ARGS__)
+#define svqincb_n_u32(...) __builtin_sve_svqincb_n_u32(__VA_ARGS__)
+#define svqincb_n_u64(...) __builtin_sve_svqincb_n_u64(__VA_ARGS__)
+#define svqincb_pat_n_s32(...) __builtin_sve_svqincb_pat_n_s32(__VA_ARGS__)
+#define svqincb_pat_n_s64(...) __builtin_sve_svqincb_pat_n_s64(__VA_ARGS__)
+#define svqincb_pat_n_u32(...) __builtin_sve_svqincb_pat_n_u32(__VA_ARGS__)
+#define svqincb_pat_n_u64(...) __builtin_sve_svqincb_pat_n_u64(__VA_ARGS__)
+#define svqincd_n_s32(...) __builtin_sve_svqincd_n_s32(__VA_ARGS__)
+#define svqincd_n_s64(...) __builtin_sve_svqincd_n_s64(__VA_ARGS__)
+#define svqincd_n_u32(...) __builtin_sve_svqincd_n_u32(__VA_ARGS__)
+#define svqincd_n_u64(...) __builtin_sve_svqincd_n_u64(__VA_ARGS__)
+#define svqincd_s64(...) __builtin_sve_svqincd_s64(__VA_ARGS__)
+#define svqincd_u64(...) __builtin_sve_svqincd_u64(__VA_ARGS__)
+#define svqincd_pat_n_s32(...) __builtin_sve_svqincd_pat_n_s32(__VA_ARGS__)
+#define svqincd_pat_n_s64(...) __builtin_sve_svqincd_pat_n_s64(__VA_ARGS__)
+#define svqincd_pat_n_u32(...) __builtin_sve_svqincd_pat_n_u32(__VA_ARGS__)
+#define svqincd_pat_n_u64(...) __builtin_sve_svqincd_pat_n_u64(__VA_ARGS__)
+#define svqincd_pat_s64(...) __builtin_sve_svqincd_pat_s64(__VA_ARGS__)
+#define svqincd_pat_u64(...) __builtin_sve_svqincd_pat_u64(__VA_ARGS__)
+#define svqinch_n_s32(...) __builtin_sve_svqinch_n_s32(__VA_ARGS__)
+#define svqinch_n_s64(...) __builtin_sve_svqinch_n_s64(__VA_ARGS__)
+#define svqinch_n_u32(...) __builtin_sve_svqinch_n_u32(__VA_ARGS__)
+#define svqinch_n_u64(...) __builtin_sve_svqinch_n_u64(__VA_ARGS__)
+#define svqinch_s16(...) __builtin_sve_svqinch_s16(__VA_ARGS__)
+#define svqinch_u16(...) __builtin_sve_svqinch_u16(__VA_ARGS__)
+#define svqinch_pat_n_s32(...) __builtin_sve_svqinch_pat_n_s32(__VA_ARGS__)
+#define svqinch_pat_n_s64(...) __builtin_sve_svqinch_pat_n_s64(__VA_ARGS__)
+#define svqinch_pat_n_u32(...) __builtin_sve_svqinch_pat_n_u32(__VA_ARGS__)
+#define svqinch_pat_n_u64(...) __builtin_sve_svqinch_pat_n_u64(__VA_ARGS__)
+#define svqinch_pat_s16(...) __builtin_sve_svqinch_pat_s16(__VA_ARGS__)
+#define svqinch_pat_u16(...) __builtin_sve_svqinch_pat_u16(__VA_ARGS__)
+#define svqincp_n_s32_b8(...) __builtin_sve_svqincp_n_s32_b8(__VA_ARGS__)
+#define svqincp_n_s32_b32(...) __builtin_sve_svqincp_n_s32_b32(__VA_ARGS__)
+#define svqincp_n_s32_b64(...) __builtin_sve_svqincp_n_s32_b64(__VA_ARGS__)
+#define svqincp_n_s32_b16(...) __builtin_sve_svqincp_n_s32_b16(__VA_ARGS__)
+#define svqincp_n_s64_b8(...) __builtin_sve_svqincp_n_s64_b8(__VA_ARGS__)
+#define svqincp_n_s64_b32(...) __builtin_sve_svqincp_n_s64_b32(__VA_ARGS__)
+#define svqincp_n_s64_b64(...) __builtin_sve_svqincp_n_s64_b64(__VA_ARGS__)
+#define svqincp_n_s64_b16(...) __builtin_sve_svqincp_n_s64_b16(__VA_ARGS__)
+#define svqincp_n_u32_b8(...) __builtin_sve_svqincp_n_u32_b8(__VA_ARGS__)
+#define svqincp_n_u32_b32(...) __builtin_sve_svqincp_n_u32_b32(__VA_ARGS__)
+#define svqincp_n_u32_b64(...) __builtin_sve_svqincp_n_u32_b64(__VA_ARGS__)
+#define svqincp_n_u32_b16(...) __builtin_sve_svqincp_n_u32_b16(__VA_ARGS__)
+#define svqincp_n_u64_b8(...) __builtin_sve_svqincp_n_u64_b8(__VA_ARGS__)
+#define svqincp_n_u64_b32(...) __builtin_sve_svqincp_n_u64_b32(__VA_ARGS__)
+#define svqincp_n_u64_b64(...) __builtin_sve_svqincp_n_u64_b64(__VA_ARGS__)
+#define svqincp_n_u64_b16(...) __builtin_sve_svqincp_n_u64_b16(__VA_ARGS__)
+#define svqincp_s32(...) __builtin_sve_svqincp_s32(__VA_ARGS__)
+#define svqincp_s64(...) __builtin_sve_svqincp_s64(__VA_ARGS__)
+#define svqincp_s16(...) __builtin_sve_svqincp_s16(__VA_ARGS__)
+#define svqincp_u32(...) __builtin_sve_svqincp_u32(__VA_ARGS__)
+#define svqincp_u64(...) __builtin_sve_svqincp_u64(__VA_ARGS__)
+#define svqincp_u16(...) __builtin_sve_svqincp_u16(__VA_ARGS__)
+#define svqincw_n_s32(...) __builtin_sve_svqincw_n_s32(__VA_ARGS__)
+#define svqincw_n_s64(...) __builtin_sve_svqincw_n_s64(__VA_ARGS__)
+#define svqincw_n_u32(...) __builtin_sve_svqincw_n_u32(__VA_ARGS__)
+#define svqincw_n_u64(...) __builtin_sve_svqincw_n_u64(__VA_ARGS__)
+#define svqincw_s32(...) __builtin_sve_svqincw_s32(__VA_ARGS__)
+#define svqincw_u32(...) __builtin_sve_svqincw_u32(__VA_ARGS__)
+#define svqincw_pat_n_s32(...) __builtin_sve_svqincw_pat_n_s32(__VA_ARGS__)
+#define svqincw_pat_n_s64(...) __builtin_sve_svqincw_pat_n_s64(__VA_ARGS__)
+#define svqincw_pat_n_u32(...) __builtin_sve_svqincw_pat_n_u32(__VA_ARGS__)
+#define svqincw_pat_n_u64(...) __builtin_sve_svqincw_pat_n_u64(__VA_ARGS__)
+#define svqincw_pat_s32(...) __builtin_sve_svqincw_pat_s32(__VA_ARGS__)
+#define svqincw_pat_u32(...) __builtin_sve_svqincw_pat_u32(__VA_ARGS__)
+#define svqsub_n_s8(...) __builtin_sve_svqsub_n_s8(__VA_ARGS__)
+#define svqsub_n_s32(...) __builtin_sve_svqsub_n_s32(__VA_ARGS__)
+#define svqsub_n_s64(...) __builtin_sve_svqsub_n_s64(__VA_ARGS__)
+#define svqsub_n_s16(...) __builtin_sve_svqsub_n_s16(__VA_ARGS__)
+#define svqsub_n_u8(...) __builtin_sve_svqsub_n_u8(__VA_ARGS__)
+#define svqsub_n_u32(...) __builtin_sve_svqsub_n_u32(__VA_ARGS__)
+#define svqsub_n_u64(...) __builtin_sve_svqsub_n_u64(__VA_ARGS__)
+#define svqsub_n_u16(...) __builtin_sve_svqsub_n_u16(__VA_ARGS__)
+#define svqsub_s8(...) __builtin_sve_svqsub_s8(__VA_ARGS__)
+#define svqsub_s32(...) __builtin_sve_svqsub_s32(__VA_ARGS__)
+#define svqsub_s64(...) __builtin_sve_svqsub_s64(__VA_ARGS__)
+#define svqsub_s16(...) __builtin_sve_svqsub_s16(__VA_ARGS__)
+#define svqsub_u8(...) __builtin_sve_svqsub_u8(__VA_ARGS__)
+#define svqsub_u32(...) __builtin_sve_svqsub_u32(__VA_ARGS__)
+#define svqsub_u64(...) __builtin_sve_svqsub_u64(__VA_ARGS__)
+#define svqsub_u16(...) __builtin_sve_svqsub_u16(__VA_ARGS__)
+#define svrbit_u8_m(...) __builtin_sve_svrbit_u8_m(__VA_ARGS__)
+#define svrbit_u32_m(...) __builtin_sve_svrbit_u32_m(__VA_ARGS__)
+#define svrbit_u64_m(...) __builtin_sve_svrbit_u64_m(__VA_ARGS__)
+#define svrbit_u16_m(...) __builtin_sve_svrbit_u16_m(__VA_ARGS__)
+#define svrbit_s8_m(...) __builtin_sve_svrbit_s8_m(__VA_ARGS__)
+#define svrbit_s32_m(...) __builtin_sve_svrbit_s32_m(__VA_ARGS__)
+#define svrbit_s64_m(...) __builtin_sve_svrbit_s64_m(__VA_ARGS__)
+#define svrbit_s16_m(...) __builtin_sve_svrbit_s16_m(__VA_ARGS__)
+#define svrbit_u8_x(...) __builtin_sve_svrbit_u8_x(__VA_ARGS__)
+#define svrbit_u32_x(...) __builtin_sve_svrbit_u32_x(__VA_ARGS__)
+#define svrbit_u64_x(...) __builtin_sve_svrbit_u64_x(__VA_ARGS__)
+#define svrbit_u16_x(...) __builtin_sve_svrbit_u16_x(__VA_ARGS__)
+#define svrbit_s8_x(...) __builtin_sve_svrbit_s8_x(__VA_ARGS__)
+#define svrbit_s32_x(...) __builtin_sve_svrbit_s32_x(__VA_ARGS__)
+#define svrbit_s64_x(...) __builtin_sve_svrbit_s64_x(__VA_ARGS__)
+#define svrbit_s16_x(...) __builtin_sve_svrbit_s16_x(__VA_ARGS__)
+#define svrbit_u8_z(...) __builtin_sve_svrbit_u8_z(__VA_ARGS__)
+#define svrbit_u32_z(...) __builtin_sve_svrbit_u32_z(__VA_ARGS__)
+#define svrbit_u64_z(...) __builtin_sve_svrbit_u64_z(__VA_ARGS__)
+#define svrbit_u16_z(...) __builtin_sve_svrbit_u16_z(__VA_ARGS__)
+#define svrbit_s8_z(...) __builtin_sve_svrbit_s8_z(__VA_ARGS__)
+#define svrbit_s32_z(...) __builtin_sve_svrbit_s32_z(__VA_ARGS__)
+#define svrbit_s64_z(...) __builtin_sve_svrbit_s64_z(__VA_ARGS__)
+#define svrbit_s16_z(...) __builtin_sve_svrbit_s16_z(__VA_ARGS__)
+#define svrdffr(...) __builtin_sve_svrdffr(__VA_ARGS__)
+#define svrdffr_z(...) __builtin_sve_svrdffr_z(__VA_ARGS__)
+#define svrecpe_f64(...) __builtin_sve_svrecpe_f64(__VA_ARGS__)
+#define svrecpe_f32(...) __builtin_sve_svrecpe_f32(__VA_ARGS__)
+#define svrecpe_f16(...) __builtin_sve_svrecpe_f16(__VA_ARGS__)
+#define svrecps_f64(...) __builtin_sve_svrecps_f64(__VA_ARGS__)
+#define svrecps_f32(...) __builtin_sve_svrecps_f32(__VA_ARGS__)
+#define svrecps_f16(...) __builtin_sve_svrecps_f16(__VA_ARGS__)
+#define svrecpx_f64_m(...) __builtin_sve_svrecpx_f64_m(__VA_ARGS__)
+#define svrecpx_f32_m(...) __builtin_sve_svrecpx_f32_m(__VA_ARGS__)
+#define svrecpx_f16_m(...) __builtin_sve_svrecpx_f16_m(__VA_ARGS__)
+#define svrecpx_f64_x(...) __builtin_sve_svrecpx_f64_x(__VA_ARGS__)
+#define svrecpx_f32_x(...) __builtin_sve_svrecpx_f32_x(__VA_ARGS__)
+#define svrecpx_f16_x(...) __builtin_sve_svrecpx_f16_x(__VA_ARGS__)
+#define svrecpx_f64_z(...) __builtin_sve_svrecpx_f64_z(__VA_ARGS__)
+#define svrecpx_f32_z(...) __builtin_sve_svrecpx_f32_z(__VA_ARGS__)
+#define svrecpx_f16_z(...) __builtin_sve_svrecpx_f16_z(__VA_ARGS__)
+#define svrev_u8(...) __builtin_sve_svrev_u8(__VA_ARGS__)
+#define svrev_u32(...) __builtin_sve_svrev_u32(__VA_ARGS__)
+#define svrev_u64(...) __builtin_sve_svrev_u64(__VA_ARGS__)
+#define svrev_u16(...) __builtin_sve_svrev_u16(__VA_ARGS__)
+#define svrev_s8(...) __builtin_sve_svrev_s8(__VA_ARGS__)
+#define svrev_f64(...) __builtin_sve_svrev_f64(__VA_ARGS__)
+#define svrev_f32(...) __builtin_sve_svrev_f32(__VA_ARGS__)
+#define svrev_f16(...) __builtin_sve_svrev_f16(__VA_ARGS__)
+#define svrev_s32(...) __builtin_sve_svrev_s32(__VA_ARGS__)
+#define svrev_s64(...) __builtin_sve_svrev_s64(__VA_ARGS__)
+#define svrev_s16(...) __builtin_sve_svrev_s16(__VA_ARGS__)
+#define svrev_b8(...) __builtin_sve_svrev_b8(__VA_ARGS__)
+#define svrev_b32(...) __builtin_sve_svrev_b32(__VA_ARGS__)
+#define svrev_b64(...) __builtin_sve_svrev_b64(__VA_ARGS__)
+#define svrev_b16(...) __builtin_sve_svrev_b16(__VA_ARGS__)
+#define svrevb_u32_m(...) __builtin_sve_svrevb_u32_m(__VA_ARGS__)
+#define svrevb_u64_m(...) __builtin_sve_svrevb_u64_m(__VA_ARGS__)
+#define svrevb_u16_m(...) __builtin_sve_svrevb_u16_m(__VA_ARGS__)
+#define svrevb_s32_m(...) __builtin_sve_svrevb_s32_m(__VA_ARGS__)
+#define svrevb_s64_m(...) __builtin_sve_svrevb_s64_m(__VA_ARGS__)
+#define svrevb_s16_m(...) __builtin_sve_svrevb_s16_m(__VA_ARGS__)
+#define svrevb_u32_x(...) __builtin_sve_svrevb_u32_x(__VA_ARGS__)
+#define svrevb_u64_x(...) __builtin_sve_svrevb_u64_x(__VA_ARGS__)
+#define svrevb_u16_x(...) __builtin_sve_svrevb_u16_x(__VA_ARGS__)
+#define svrevb_s32_x(...) __builtin_sve_svrevb_s32_x(__VA_ARGS__)
+#define svrevb_s64_x(...) __builtin_sve_svrevb_s64_x(__VA_ARGS__)
+#define svrevb_s16_x(...) __builtin_sve_svrevb_s16_x(__VA_ARGS__)
+#define svrevb_u32_z(...) __builtin_sve_svrevb_u32_z(__VA_ARGS__)
+#define svrevb_u64_z(...) __builtin_sve_svrevb_u64_z(__VA_ARGS__)
+#define svrevb_u16_z(...) __builtin_sve_svrevb_u16_z(__VA_ARGS__)
+#define svrevb_s32_z(...) __builtin_sve_svrevb_s32_z(__VA_ARGS__)
+#define svrevb_s64_z(...) __builtin_sve_svrevb_s64_z(__VA_ARGS__)
+#define svrevb_s16_z(...) __builtin_sve_svrevb_s16_z(__VA_ARGS__)
+#define svrevh_u32_m(...) __builtin_sve_svrevh_u32_m(__VA_ARGS__)
+#define svrevh_u64_m(...) __builtin_sve_svrevh_u64_m(__VA_ARGS__)
+#define svrevh_s32_m(...) __builtin_sve_svrevh_s32_m(__VA_ARGS__)
+#define svrevh_s64_m(...) __builtin_sve_svrevh_s64_m(__VA_ARGS__)
+#define svrevh_u32_x(...) __builtin_sve_svrevh_u32_x(__VA_ARGS__)
+#define svrevh_u64_x(...) __builtin_sve_svrevh_u64_x(__VA_ARGS__)
+#define svrevh_s32_x(...) __builtin_sve_svrevh_s32_x(__VA_ARGS__)
+#define svrevh_s64_x(...) __builtin_sve_svrevh_s64_x(__VA_ARGS__)
+#define svrevh_u32_z(...) __builtin_sve_svrevh_u32_z(__VA_ARGS__)
+#define svrevh_u64_z(...) __builtin_sve_svrevh_u64_z(__VA_ARGS__)
+#define svrevh_s32_z(...) __builtin_sve_svrevh_s32_z(__VA_ARGS__)
+#define svrevh_s64_z(...) __builtin_sve_svrevh_s64_z(__VA_ARGS__)
+#define svrevw_u64_m(...) __builtin_sve_svrevw_u64_m(__VA_ARGS__)
+#define svrevw_s64_m(...) __builtin_sve_svrevw_s64_m(__VA_ARGS__)
+#define svrevw_u64_x(...) __builtin_sve_svrevw_u64_x(__VA_ARGS__)
+#define svrevw_s64_x(...) __builtin_sve_svrevw_s64_x(__VA_ARGS__)
+#define svrevw_u64_z(...) __builtin_sve_svrevw_u64_z(__VA_ARGS__)
+#define svrevw_s64_z(...) __builtin_sve_svrevw_s64_z(__VA_ARGS__)
+#define svrinta_f64_m(...) __builtin_sve_svrinta_f64_m(__VA_ARGS__)
+#define svrinta_f32_m(...) __builtin_sve_svrinta_f32_m(__VA_ARGS__)
+#define svrinta_f16_m(...) __builtin_sve_svrinta_f16_m(__VA_ARGS__)
+#define svrinta_f64_x(...) __builtin_sve_svrinta_f64_x(__VA_ARGS__)
+#define svrinta_f32_x(...) __builtin_sve_svrinta_f32_x(__VA_ARGS__)
+#define svrinta_f16_x(...) __builtin_sve_svrinta_f16_x(__VA_ARGS__)
+#define svrinta_f64_z(...) __builtin_sve_svrinta_f64_z(__VA_ARGS__)
+#define svrinta_f32_z(...) __builtin_sve_svrinta_f32_z(__VA_ARGS__)
+#define svrinta_f16_z(...) __builtin_sve_svrinta_f16_z(__VA_ARGS__)
+#define svrinti_f64_m(...) __builtin_sve_svrinti_f64_m(__VA_ARGS__)
+#define svrinti_f32_m(...) __builtin_sve_svrinti_f32_m(__VA_ARGS__)
+#define svrinti_f16_m(...) __builtin_sve_svrinti_f16_m(__VA_ARGS__)
+#define svrinti_f64_x(...) __builtin_sve_svrinti_f64_x(__VA_ARGS__)
+#define svrinti_f32_x(...) __builtin_sve_svrinti_f32_x(__VA_ARGS__)
+#define svrinti_f16_x(...) __builtin_sve_svrinti_f16_x(__VA_ARGS__)
+#define svrinti_f64_z(...) __builtin_sve_svrinti_f64_z(__VA_ARGS__)
+#define svrinti_f32_z(...) __builtin_sve_svrinti_f32_z(__VA_ARGS__)
+#define svrinti_f16_z(...) __builtin_sve_svrinti_f16_z(__VA_ARGS__)
+#define svrintm_f64_m(...) __builtin_sve_svrintm_f64_m(__VA_ARGS__)
+#define svrintm_f32_m(...) __builtin_sve_svrintm_f32_m(__VA_ARGS__)
+#define svrintm_f16_m(...) __builtin_sve_svrintm_f16_m(__VA_ARGS__)
+#define svrintm_f64_x(...) __builtin_sve_svrintm_f64_x(__VA_ARGS__)
+#define svrintm_f32_x(...) __builtin_sve_svrintm_f32_x(__VA_ARGS__)
+#define svrintm_f16_x(...) __builtin_sve_svrintm_f16_x(__VA_ARGS__)
+#define svrintm_f64_z(...) __builtin_sve_svrintm_f64_z(__VA_ARGS__)
+#define svrintm_f32_z(...) __builtin_sve_svrintm_f32_z(__VA_ARGS__)
+#define svrintm_f16_z(...) __builtin_sve_svrintm_f16_z(__VA_ARGS__)
+#define svrintn_f64_m(...) __builtin_sve_svrintn_f64_m(__VA_ARGS__)
+#define svrintn_f32_m(...) __builtin_sve_svrintn_f32_m(__VA_ARGS__)
+#define svrintn_f16_m(...) __builtin_sve_svrintn_f16_m(__VA_ARGS__)
+#define svrintn_f64_x(...) __builtin_sve_svrintn_f64_x(__VA_ARGS__)
+#define svrintn_f32_x(...) __builtin_sve_svrintn_f32_x(__VA_ARGS__)
+#define svrintn_f16_x(...) __builtin_sve_svrintn_f16_x(__VA_ARGS__)
+#define svrintn_f64_z(...) __builtin_sve_svrintn_f64_z(__VA_ARGS__)
+#define svrintn_f32_z(...) __builtin_sve_svrintn_f32_z(__VA_ARGS__)
+#define svrintn_f16_z(...) __builtin_sve_svrintn_f16_z(__VA_ARGS__)
+#define svrintp_f64_m(...) __builtin_sve_svrintp_f64_m(__VA_ARGS__)
+#define svrintp_f32_m(...) __builtin_sve_svrintp_f32_m(__VA_ARGS__)
+#define svrintp_f16_m(...) __builtin_sve_svrintp_f16_m(__VA_ARGS__)
+#define svrintp_f64_x(...) __builtin_sve_svrintp_f64_x(__VA_ARGS__)
+#define svrintp_f32_x(...) __builtin_sve_svrintp_f32_x(__VA_ARGS__)
+#define svrintp_f16_x(...) __builtin_sve_svrintp_f16_x(__VA_ARGS__)
+#define svrintp_f64_z(...) __builtin_sve_svrintp_f64_z(__VA_ARGS__)
+#define svrintp_f32_z(...) __builtin_sve_svrintp_f32_z(__VA_ARGS__)
+#define svrintp_f16_z(...) __builtin_sve_svrintp_f16_z(__VA_ARGS__)
+#define svrintx_f64_m(...) __builtin_sve_svrintx_f64_m(__VA_ARGS__)
+#define svrintx_f32_m(...) __builtin_sve_svrintx_f32_m(__VA_ARGS__)
+#define svrintx_f16_m(...) __builtin_sve_svrintx_f16_m(__VA_ARGS__)
+#define svrintx_f64_x(...) __builtin_sve_svrintx_f64_x(__VA_ARGS__)
+#define svrintx_f32_x(...) __builtin_sve_svrintx_f32_x(__VA_ARGS__)
+#define svrintx_f16_x(...) __builtin_sve_svrintx_f16_x(__VA_ARGS__)
+#define svrintx_f64_z(...) __builtin_sve_svrintx_f64_z(__VA_ARGS__)
+#define svrintx_f32_z(...) __builtin_sve_svrintx_f32_z(__VA_ARGS__)
+#define svrintx_f16_z(...) __builtin_sve_svrintx_f16_z(__VA_ARGS__)
+#define svrintz_f64_m(...) __builtin_sve_svrintz_f64_m(__VA_ARGS__)
+#define svrintz_f32_m(...) __builtin_sve_svrintz_f32_m(__VA_ARGS__)
+#define svrintz_f16_m(...) __builtin_sve_svrintz_f16_m(__VA_ARGS__)
+#define svrintz_f64_x(...) __builtin_sve_svrintz_f64_x(__VA_ARGS__)
+#define svrintz_f32_x(...) __builtin_sve_svrintz_f32_x(__VA_ARGS__)
+#define svrintz_f16_x(...) __builtin_sve_svrintz_f16_x(__VA_ARGS__)
+#define svrintz_f64_z(...) __builtin_sve_svrintz_f64_z(__VA_ARGS__)
+#define svrintz_f32_z(...) __builtin_sve_svrintz_f32_z(__VA_ARGS__)
+#define svrintz_f16_z(...) __builtin_sve_svrintz_f16_z(__VA_ARGS__)
+#define svrsqrte_f64(...) __builtin_sve_svrsqrte_f64(__VA_ARGS__)
+#define svrsqrte_f32(...) __builtin_sve_svrsqrte_f32(__VA_ARGS__)
+#define svrsqrte_f16(...) __builtin_sve_svrsqrte_f16(__VA_ARGS__)
+#define svrsqrts_f64(...) __builtin_sve_svrsqrts_f64(__VA_ARGS__)
+#define svrsqrts_f32(...) __builtin_sve_svrsqrts_f32(__VA_ARGS__)
+#define svrsqrts_f16(...) __builtin_sve_svrsqrts_f16(__VA_ARGS__)
+#define svscale_n_f64_m(...) __builtin_sve_svscale_n_f64_m(__VA_ARGS__)
+#define svscale_n_f32_m(...) __builtin_sve_svscale_n_f32_m(__VA_ARGS__)
+#define svscale_n_f16_m(...) __builtin_sve_svscale_n_f16_m(__VA_ARGS__)
+#define svscale_n_f64_x(...) __builtin_sve_svscale_n_f64_x(__VA_ARGS__)
+#define svscale_n_f32_x(...) __builtin_sve_svscale_n_f32_x(__VA_ARGS__)
+#define svscale_n_f16_x(...) __builtin_sve_svscale_n_f16_x(__VA_ARGS__)
+#define svscale_n_f64_z(...) __builtin_sve_svscale_n_f64_z(__VA_ARGS__)
+#define svscale_n_f32_z(...) __builtin_sve_svscale_n_f32_z(__VA_ARGS__)
+#define svscale_n_f16_z(...) __builtin_sve_svscale_n_f16_z(__VA_ARGS__)
+#define svscale_f64_m(...) __builtin_sve_svscale_f64_m(__VA_ARGS__)
+#define svscale_f32_m(...) __builtin_sve_svscale_f32_m(__VA_ARGS__)
+#define svscale_f16_m(...) __builtin_sve_svscale_f16_m(__VA_ARGS__)
+#define svscale_f64_x(...) __builtin_sve_svscale_f64_x(__VA_ARGS__)
+#define svscale_f32_x(...) __builtin_sve_svscale_f32_x(__VA_ARGS__)
+#define svscale_f16_x(...) __builtin_sve_svscale_f16_x(__VA_ARGS__)
+#define svscale_f64_z(...) __builtin_sve_svscale_f64_z(__VA_ARGS__)
+#define svscale_f32_z(...) __builtin_sve_svscale_f32_z(__VA_ARGS__)
+#define svscale_f16_z(...) __builtin_sve_svscale_f16_z(__VA_ARGS__)
+#define svsel_b(...) __builtin_sve_svsel_b(__VA_ARGS__)
+#define svsel_u8(...) __builtin_sve_svsel_u8(__VA_ARGS__)
+#define svsel_u32(...) __builtin_sve_svsel_u32(__VA_ARGS__)
+#define svsel_u64(...) __builtin_sve_svsel_u64(__VA_ARGS__)
+#define svsel_u16(...) __builtin_sve_svsel_u16(__VA_ARGS__)
+#define svsel_s8(...) __builtin_sve_svsel_s8(__VA_ARGS__)
+#define svsel_f64(...) __builtin_sve_svsel_f64(__VA_ARGS__)
+#define svsel_f32(...) __builtin_sve_svsel_f32(__VA_ARGS__)
+#define svsel_f16(...) __builtin_sve_svsel_f16(__VA_ARGS__)
+#define svsel_s32(...) __builtin_sve_svsel_s32(__VA_ARGS__)
+#define svsel_s64(...) __builtin_sve_svsel_s64(__VA_ARGS__)
+#define svsel_s16(...) __builtin_sve_svsel_s16(__VA_ARGS__)
+#define svset2_u8(...) __builtin_sve_svset2_u8(__VA_ARGS__)
+#define svset2_u32(...) __builtin_sve_svset2_u32(__VA_ARGS__)
+#define svset2_u64(...) __builtin_sve_svset2_u64(__VA_ARGS__)
+#define svset2_u16(...) __builtin_sve_svset2_u16(__VA_ARGS__)
+#define svset2_s8(...) __builtin_sve_svset2_s8(__VA_ARGS__)
+#define svset2_f64(...) __builtin_sve_svset2_f64(__VA_ARGS__)
+#define svset2_f32(...) __builtin_sve_svset2_f32(__VA_ARGS__)
+#define svset2_f16(...) __builtin_sve_svset2_f16(__VA_ARGS__)
+#define svset2_s32(...) __builtin_sve_svset2_s32(__VA_ARGS__)
+#define svset2_s64(...) __builtin_sve_svset2_s64(__VA_ARGS__)
+#define svset2_s16(...) __builtin_sve_svset2_s16(__VA_ARGS__)
+#define svset3_u8(...) __builtin_sve_svset3_u8(__VA_ARGS__)
+#define svset3_u32(...) __builtin_sve_svset3_u32(__VA_ARGS__)
+#define svset3_u64(...) __builtin_sve_svset3_u64(__VA_ARGS__)
+#define svset3_u16(...) __builtin_sve_svset3_u16(__VA_ARGS__)
+#define svset3_s8(...) __builtin_sve_svset3_s8(__VA_ARGS__)
+#define svset3_f64(...) __builtin_sve_svset3_f64(__VA_ARGS__)
+#define svset3_f32(...) __builtin_sve_svset3_f32(__VA_ARGS__)
+#define svset3_f16(...) __builtin_sve_svset3_f16(__VA_ARGS__)
+#define svset3_s32(...) __builtin_sve_svset3_s32(__VA_ARGS__)
+#define svset3_s64(...) __builtin_sve_svset3_s64(__VA_ARGS__)
+#define svset3_s16(...) __builtin_sve_svset3_s16(__VA_ARGS__)
+#define svset4_u8(...) __builtin_sve_svset4_u8(__VA_ARGS__)
+#define svset4_u32(...) __builtin_sve_svset4_u32(__VA_ARGS__)
+#define svset4_u64(...) __builtin_sve_svset4_u64(__VA_ARGS__)
+#define svset4_u16(...) __builtin_sve_svset4_u16(__VA_ARGS__)
+#define svset4_s8(...) __builtin_sve_svset4_s8(__VA_ARGS__)
+#define svset4_f64(...) __builtin_sve_svset4_f64(__VA_ARGS__)
+#define svset4_f32(...) __builtin_sve_svset4_f32(__VA_ARGS__)
+#define svset4_f16(...) __builtin_sve_svset4_f16(__VA_ARGS__)
+#define svset4_s32(...) __builtin_sve_svset4_s32(__VA_ARGS__)
+#define svset4_s64(...) __builtin_sve_svset4_s64(__VA_ARGS__)
+#define svset4_s16(...) __builtin_sve_svset4_s16(__VA_ARGS__)
+#define svsetffr(...) __builtin_sve_svsetffr(__VA_ARGS__)
+#define svsplice_u8(...) __builtin_sve_svsplice_u8(__VA_ARGS__)
+#define svsplice_u32(...) __builtin_sve_svsplice_u32(__VA_ARGS__)
+#define svsplice_u64(...) __builtin_sve_svsplice_u64(__VA_ARGS__)
+#define svsplice_u16(...) __builtin_sve_svsplice_u16(__VA_ARGS__)
+#define svsplice_s8(...) __builtin_sve_svsplice_s8(__VA_ARGS__)
+#define svsplice_f64(...) __builtin_sve_svsplice_f64(__VA_ARGS__)
+#define svsplice_f32(...) __builtin_sve_svsplice_f32(__VA_ARGS__)
+#define svsplice_f16(...) __builtin_sve_svsplice_f16(__VA_ARGS__)
+#define svsplice_s32(...) __builtin_sve_svsplice_s32(__VA_ARGS__)
+#define svsplice_s64(...) __builtin_sve_svsplice_s64(__VA_ARGS__)
+#define svsplice_s16(...) __builtin_sve_svsplice_s16(__VA_ARGS__)
+#define svsqrt_f64_m(...) __builtin_sve_svsqrt_f64_m(__VA_ARGS__)
+#define svsqrt_f32_m(...) __builtin_sve_svsqrt_f32_m(__VA_ARGS__)
+#define svsqrt_f16_m(...) __builtin_sve_svsqrt_f16_m(__VA_ARGS__)
+#define svsqrt_f64_x(...) __builtin_sve_svsqrt_f64_x(__VA_ARGS__)
+#define svsqrt_f32_x(...) __builtin_sve_svsqrt_f32_x(__VA_ARGS__)
+#define svsqrt_f16_x(...) __builtin_sve_svsqrt_f16_x(__VA_ARGS__)
+#define svsqrt_f64_z(...) __builtin_sve_svsqrt_f64_z(__VA_ARGS__)
+#define svsqrt_f32_z(...) __builtin_sve_svsqrt_f32_z(__VA_ARGS__)
+#define svsqrt_f16_z(...) __builtin_sve_svsqrt_f16_z(__VA_ARGS__)
+#define svst1_u8(...) __builtin_sve_svst1_u8(__VA_ARGS__)
+#define svst1_u32(...) __builtin_sve_svst1_u32(__VA_ARGS__)
+#define svst1_u64(...) __builtin_sve_svst1_u64(__VA_ARGS__)
+#define svst1_u16(...) __builtin_sve_svst1_u16(__VA_ARGS__)
+#define svst1_s8(...) __builtin_sve_svst1_s8(__VA_ARGS__)
+#define svst1_f64(...) __builtin_sve_svst1_f64(__VA_ARGS__)
+#define svst1_f32(...) __builtin_sve_svst1_f32(__VA_ARGS__)
+#define svst1_f16(...) __builtin_sve_svst1_f16(__VA_ARGS__)
+#define svst1_s32(...) __builtin_sve_svst1_s32(__VA_ARGS__)
+#define svst1_s64(...) __builtin_sve_svst1_s64(__VA_ARGS__)
+#define svst1_s16(...) __builtin_sve_svst1_s16(__VA_ARGS__)
+#define svst1_scatter_u32base_index_u32(...) __builtin_sve_svst1_scatter_u32base_index_u32(__VA_ARGS__)
+#define svst1_scatter_u64base_index_u64(...) __builtin_sve_svst1_scatter_u64base_index_u64(__VA_ARGS__)
+#define svst1_scatter_u64base_index_f64(...) __builtin_sve_svst1_scatter_u64base_index_f64(__VA_ARGS__)
+#define svst1_scatter_u32base_index_f32(...) __builtin_sve_svst1_scatter_u32base_index_f32(__VA_ARGS__)
+#define svst1_scatter_u32base_index_s32(...) __builtin_sve_svst1_scatter_u32base_index_s32(__VA_ARGS__)
+#define svst1_scatter_u64base_index_s64(...) __builtin_sve_svst1_scatter_u64base_index_s64(__VA_ARGS__)
+#define svst1_scatter_u32base_offset_u32(...) __builtin_sve_svst1_scatter_u32base_offset_u32(__VA_ARGS__)
+#define svst1_scatter_u64base_offset_u64(...) __builtin_sve_svst1_scatter_u64base_offset_u64(__VA_ARGS__)
+#define svst1_scatter_u64base_offset_f64(...) __builtin_sve_svst1_scatter_u64base_offset_f64(__VA_ARGS__)
+#define svst1_scatter_u32base_offset_f32(...) __builtin_sve_svst1_scatter_u32base_offset_f32(__VA_ARGS__)
+#define svst1_scatter_u32base_offset_s32(...) __builtin_sve_svst1_scatter_u32base_offset_s32(__VA_ARGS__)
+#define svst1_scatter_u64base_offset_s64(...) __builtin_sve_svst1_scatter_u64base_offset_s64(__VA_ARGS__)
+#define svst1_scatter_u32base_u32(...) __builtin_sve_svst1_scatter_u32base_u32(__VA_ARGS__)
+#define svst1_scatter_u64base_u64(...) __builtin_sve_svst1_scatter_u64base_u64(__VA_ARGS__)
+#define svst1_scatter_u64base_f64(...) __builtin_sve_svst1_scatter_u64base_f64(__VA_ARGS__)
+#define svst1_scatter_u32base_f32(...) __builtin_sve_svst1_scatter_u32base_f32(__VA_ARGS__)
+#define svst1_scatter_u32base_s32(...) __builtin_sve_svst1_scatter_u32base_s32(__VA_ARGS__)
+#define svst1_scatter_u64base_s64(...) __builtin_sve_svst1_scatter_u64base_s64(__VA_ARGS__)
+#define svst1_scatter_s32index_u32(...) __builtin_sve_svst1_scatter_s32index_u32(__VA_ARGS__)
+#define svst1_scatter_s32index_f32(...) __builtin_sve_svst1_scatter_s32index_f32(__VA_ARGS__)
+#define svst1_scatter_s32index_s32(...) __builtin_sve_svst1_scatter_s32index_s32(__VA_ARGS__)
+#define svst1_scatter_u32index_u32(...) __builtin_sve_svst1_scatter_u32index_u32(__VA_ARGS__)
+#define svst1_scatter_u32index_f32(...) __builtin_sve_svst1_scatter_u32index_f32(__VA_ARGS__)
+#define svst1_scatter_u32index_s32(...) __builtin_sve_svst1_scatter_u32index_s32(__VA_ARGS__)
+#define svst1_scatter_s64index_u64(...) __builtin_sve_svst1_scatter_s64index_u64(__VA_ARGS__)
+#define svst1_scatter_s64index_f64(...) __builtin_sve_svst1_scatter_s64index_f64(__VA_ARGS__)
+#define svst1_scatter_s64index_s64(...) __builtin_sve_svst1_scatter_s64index_s64(__VA_ARGS__)
+#define svst1_scatter_u64index_u64(...) __builtin_sve_svst1_scatter_u64index_u64(__VA_ARGS__)
+#define svst1_scatter_u64index_f64(...) __builtin_sve_svst1_scatter_u64index_f64(__VA_ARGS__)
+#define svst1_scatter_u64index_s64(...) __builtin_sve_svst1_scatter_u64index_s64(__VA_ARGS__)
+#define svst1_scatter_s32offset_u32(...) __builtin_sve_svst1_scatter_s32offset_u32(__VA_ARGS__)
+#define svst1_scatter_s32offset_f32(...) __builtin_sve_svst1_scatter_s32offset_f32(__VA_ARGS__)
+#define svst1_scatter_s32offset_s32(...) __builtin_sve_svst1_scatter_s32offset_s32(__VA_ARGS__)
+#define svst1_scatter_u32offset_u32(...) __builtin_sve_svst1_scatter_u32offset_u32(__VA_ARGS__)
+#define svst1_scatter_u32offset_f32(...) __builtin_sve_svst1_scatter_u32offset_f32(__VA_ARGS__)
+#define svst1_scatter_u32offset_s32(...) __builtin_sve_svst1_scatter_u32offset_s32(__VA_ARGS__)
+#define svst1_scatter_s64offset_u64(...) __builtin_sve_svst1_scatter_s64offset_u64(__VA_ARGS__)
+#define svst1_scatter_s64offset_f64(...) __builtin_sve_svst1_scatter_s64offset_f64(__VA_ARGS__)
+#define svst1_scatter_s64offset_s64(...) __builtin_sve_svst1_scatter_s64offset_s64(__VA_ARGS__)
+#define svst1_scatter_u64offset_u64(...) __builtin_sve_svst1_scatter_u64offset_u64(__VA_ARGS__)
+#define svst1_scatter_u64offset_f64(...) __builtin_sve_svst1_scatter_u64offset_f64(__VA_ARGS__)
+#define svst1_scatter_u64offset_s64(...) __builtin_sve_svst1_scatter_u64offset_s64(__VA_ARGS__)
+#define svst1_vnum_u8(...) __builtin_sve_svst1_vnum_u8(__VA_ARGS__)
+#define svst1_vnum_u32(...) __builtin_sve_svst1_vnum_u32(__VA_ARGS__)
+#define svst1_vnum_u64(...) __builtin_sve_svst1_vnum_u64(__VA_ARGS__)
+#define svst1_vnum_u16(...) __builtin_sve_svst1_vnum_u16(__VA_ARGS__)
+#define svst1_vnum_s8(...) __builtin_sve_svst1_vnum_s8(__VA_ARGS__)
+#define svst1_vnum_f64(...) __builtin_sve_svst1_vnum_f64(__VA_ARGS__)
+#define svst1_vnum_f32(...) __builtin_sve_svst1_vnum_f32(__VA_ARGS__)
+#define svst1_vnum_f16(...) __builtin_sve_svst1_vnum_f16(__VA_ARGS__)
+#define svst1_vnum_s32(...) __builtin_sve_svst1_vnum_s32(__VA_ARGS__)
+#define svst1_vnum_s64(...) __builtin_sve_svst1_vnum_s64(__VA_ARGS__)
+#define svst1_vnum_s16(...) __builtin_sve_svst1_vnum_s16(__VA_ARGS__)
+#define svst1b_s32(...) __builtin_sve_svst1b_s32(__VA_ARGS__)
+#define svst1b_s64(...) __builtin_sve_svst1b_s64(__VA_ARGS__)
+#define svst1b_s16(...) __builtin_sve_svst1b_s16(__VA_ARGS__)
+#define svst1b_u32(...) __builtin_sve_svst1b_u32(__VA_ARGS__)
+#define svst1b_u64(...) __builtin_sve_svst1b_u64(__VA_ARGS__)
+#define svst1b_u16(...) __builtin_sve_svst1b_u16(__VA_ARGS__)
+#define svst1b_scatter_u32base_offset_u32(...) __builtin_sve_svst1b_scatter_u32base_offset_u32(__VA_ARGS__)
+#define svst1b_scatter_u64base_offset_u64(...) __builtin_sve_svst1b_scatter_u64base_offset_u64(__VA_ARGS__)
+#define svst1b_scatter_u32base_offset_s32(...) __builtin_sve_svst1b_scatter_u32base_offset_s32(__VA_ARGS__)
+#define svst1b_scatter_u64base_offset_s64(...) __builtin_sve_svst1b_scatter_u64base_offset_s64(__VA_ARGS__)
+#define svst1b_scatter_u32base_u32(...) __builtin_sve_svst1b_scatter_u32base_u32(__VA_ARGS__)
+#define svst1b_scatter_u64base_u64(...) __builtin_sve_svst1b_scatter_u64base_u64(__VA_ARGS__)
+#define svst1b_scatter_u32base_s32(...) __builtin_sve_svst1b_scatter_u32base_s32(__VA_ARGS__)
+#define svst1b_scatter_u64base_s64(...) __builtin_sve_svst1b_scatter_u64base_s64(__VA_ARGS__)
+#define svst1b_scatter_s32offset_s32(...) __builtin_sve_svst1b_scatter_s32offset_s32(__VA_ARGS__)
+#define svst1b_scatter_s32offset_u32(...) __builtin_sve_svst1b_scatter_s32offset_u32(__VA_ARGS__)
+#define svst1b_scatter_u32offset_s32(...) __builtin_sve_svst1b_scatter_u32offset_s32(__VA_ARGS__)
+#define svst1b_scatter_u32offset_u32(...) __builtin_sve_svst1b_scatter_u32offset_u32(__VA_ARGS__)
+#define svst1b_scatter_s64offset_s64(...) __builtin_sve_svst1b_scatter_s64offset_s64(__VA_ARGS__)
+#define svst1b_scatter_s64offset_u64(...) __builtin_sve_svst1b_scatter_s64offset_u64(__VA_ARGS__)
+#define svst1b_scatter_u64offset_s64(...) __builtin_sve_svst1b_scatter_u64offset_s64(__VA_ARGS__)
+#define svst1b_scatter_u64offset_u64(...) __builtin_sve_svst1b_scatter_u64offset_u64(__VA_ARGS__)
+#define svst1b_vnum_s32(...) __builtin_sve_svst1b_vnum_s32(__VA_ARGS__)
+#define svst1b_vnum_s64(...) __builtin_sve_svst1b_vnum_s64(__VA_ARGS__)
+#define svst1b_vnum_s16(...) __builtin_sve_svst1b_vnum_s16(__VA_ARGS__)
+#define svst1b_vnum_u32(...) __builtin_sve_svst1b_vnum_u32(__VA_ARGS__)
+#define svst1b_vnum_u64(...) __builtin_sve_svst1b_vnum_u64(__VA_ARGS__)
+#define svst1b_vnum_u16(...) __builtin_sve_svst1b_vnum_u16(__VA_ARGS__)
+#define svst1h_s32(...) __builtin_sve_svst1h_s32(__VA_ARGS__)
+#define svst1h_s64(...) __builtin_sve_svst1h_s64(__VA_ARGS__)
+#define svst1h_u32(...) __builtin_sve_svst1h_u32(__VA_ARGS__)
+#define svst1h_u64(...) __builtin_sve_svst1h_u64(__VA_ARGS__)
+#define svst1h_scatter_u32base_index_u32(...) __builtin_sve_svst1h_scatter_u32base_index_u32(__VA_ARGS__)
+#define svst1h_scatter_u64base_index_u64(...) __builtin_sve_svst1h_scatter_u64base_index_u64(__VA_ARGS__)
+#define svst1h_scatter_u32base_index_s32(...) __builtin_sve_svst1h_scatter_u32base_index_s32(__VA_ARGS__)
+#define svst1h_scatter_u64base_index_s64(...) __builtin_sve_svst1h_scatter_u64base_index_s64(__VA_ARGS__)
+#define svst1h_scatter_u32base_offset_u32(...) __builtin_sve_svst1h_scatter_u32base_offset_u32(__VA_ARGS__)
+#define svst1h_scatter_u64base_offset_u64(...) __builtin_sve_svst1h_scatter_u64base_offset_u64(__VA_ARGS__)
+#define svst1h_scatter_u32base_offset_s32(...) __builtin_sve_svst1h_scatter_u32base_offset_s32(__VA_ARGS__)
+#define svst1h_scatter_u64base_offset_s64(...) __builtin_sve_svst1h_scatter_u64base_offset_s64(__VA_ARGS__)
+#define svst1h_scatter_u32base_u32(...) __builtin_sve_svst1h_scatter_u32base_u32(__VA_ARGS__)
+#define svst1h_scatter_u64base_u64(...) __builtin_sve_svst1h_scatter_u64base_u64(__VA_ARGS__)
+#define svst1h_scatter_u32base_s32(...) __builtin_sve_svst1h_scatter_u32base_s32(__VA_ARGS__)
+#define svst1h_scatter_u64base_s64(...) __builtin_sve_svst1h_scatter_u64base_s64(__VA_ARGS__)
+#define svst1h_scatter_s32index_s32(...) __builtin_sve_svst1h_scatter_s32index_s32(__VA_ARGS__)
+#define svst1h_scatter_s32index_u32(...) __builtin_sve_svst1h_scatter_s32index_u32(__VA_ARGS__)
+#define svst1h_scatter_u32index_s32(...) __builtin_sve_svst1h_scatter_u32index_s32(__VA_ARGS__)
+#define svst1h_scatter_u32index_u32(...) __builtin_sve_svst1h_scatter_u32index_u32(__VA_ARGS__)
+#define svst1h_scatter_s64index_s64(...) __builtin_sve_svst1h_scatter_s64index_s64(__VA_ARGS__)
+#define svst1h_scatter_s64index_u64(...) __builtin_sve_svst1h_scatter_s64index_u64(__VA_ARGS__)
+#define svst1h_scatter_u64index_s64(...) __builtin_sve_svst1h_scatter_u64index_s64(__VA_ARGS__)
+#define svst1h_scatter_u64index_u64(...) __builtin_sve_svst1h_scatter_u64index_u64(__VA_ARGS__)
+#define svst1h_scatter_s32offset_s32(...) __builtin_sve_svst1h_scatter_s32offset_s32(__VA_ARGS__)
+#define svst1h_scatter_s32offset_u32(...) __builtin_sve_svst1h_scatter_s32offset_u32(__VA_ARGS__)
+#define svst1h_scatter_u32offset_s32(...) __builtin_sve_svst1h_scatter_u32offset_s32(__VA_ARGS__)
+#define svst1h_scatter_u32offset_u32(...) __builtin_sve_svst1h_scatter_u32offset_u32(__VA_ARGS__)
+#define svst1h_scatter_s64offset_s64(...) __builtin_sve_svst1h_scatter_s64offset_s64(__VA_ARGS__)
+#define svst1h_scatter_s64offset_u64(...) __builtin_sve_svst1h_scatter_s64offset_u64(__VA_ARGS__)
+#define svst1h_scatter_u64offset_s64(...) __builtin_sve_svst1h_scatter_u64offset_s64(__VA_ARGS__)
+#define svst1h_scatter_u64offset_u64(...) __builtin_sve_svst1h_scatter_u64offset_u64(__VA_ARGS__)
+#define svst1h_vnum_s32(...) __builtin_sve_svst1h_vnum_s32(__VA_ARGS__)
+#define svst1h_vnum_s64(...) __builtin_sve_svst1h_vnum_s64(__VA_ARGS__)
+#define svst1h_vnum_u32(...) __builtin_sve_svst1h_vnum_u32(__VA_ARGS__)
+#define svst1h_vnum_u64(...) __builtin_sve_svst1h_vnum_u64(__VA_ARGS__)
+#define svst1w_s64(...) __builtin_sve_svst1w_s64(__VA_ARGS__)
+#define svst1w_u64(...) __builtin_sve_svst1w_u64(__VA_ARGS__)
+#define svst1w_scatter_u64base_index_u64(...) __builtin_sve_svst1w_scatter_u64base_index_u64(__VA_ARGS__)
+#define svst1w_scatter_u64base_index_s64(...) __builtin_sve_svst1w_scatter_u64base_index_s64(__VA_ARGS__)
+#define svst1w_scatter_u64base_offset_u64(...) __builtin_sve_svst1w_scatter_u64base_offset_u64(__VA_ARGS__)
+#define svst1w_scatter_u64base_offset_s64(...) __builtin_sve_svst1w_scatter_u64base_offset_s64(__VA_ARGS__)
+#define svst1w_scatter_u64base_u64(...) __builtin_sve_svst1w_scatter_u64base_u64(__VA_ARGS__)
+#define svst1w_scatter_u64base_s64(...) __builtin_sve_svst1w_scatter_u64base_s64(__VA_ARGS__)
+#define svst1w_scatter_s64index_s64(...) __builtin_sve_svst1w_scatter_s64index_s64(__VA_ARGS__)
+#define svst1w_scatter_s64index_u64(...) __builtin_sve_svst1w_scatter_s64index_u64(__VA_ARGS__)
+#define svst1w_scatter_u64index_s64(...) __builtin_sve_svst1w_scatter_u64index_s64(__VA_ARGS__)
+#define svst1w_scatter_u64index_u64(...) __builtin_sve_svst1w_scatter_u64index_u64(__VA_ARGS__)
+#define svst1w_scatter_s64offset_s64(...) __builtin_sve_svst1w_scatter_s64offset_s64(__VA_ARGS__)
+#define svst1w_scatter_s64offset_u64(...) __builtin_sve_svst1w_scatter_s64offset_u64(__VA_ARGS__)
+#define svst1w_scatter_u64offset_s64(...) __builtin_sve_svst1w_scatter_u64offset_s64(__VA_ARGS__)
+#define svst1w_scatter_u64offset_u64(...) __builtin_sve_svst1w_scatter_u64offset_u64(__VA_ARGS__)
+#define svst1w_vnum_s64(...) __builtin_sve_svst1w_vnum_s64(__VA_ARGS__)
+#define svst1w_vnum_u64(...) __builtin_sve_svst1w_vnum_u64(__VA_ARGS__)
+#define svst2_u8(...) __builtin_sve_svst2_u8(__VA_ARGS__)
+#define svst2_u32(...) __builtin_sve_svst2_u32(__VA_ARGS__)
+#define svst2_u64(...) __builtin_sve_svst2_u64(__VA_ARGS__)
+#define svst2_u16(...) __builtin_sve_svst2_u16(__VA_ARGS__)
+#define svst2_s8(...) __builtin_sve_svst2_s8(__VA_ARGS__)
+#define svst2_f64(...) __builtin_sve_svst2_f64(__VA_ARGS__)
+#define svst2_f32(...) __builtin_sve_svst2_f32(__VA_ARGS__)
+#define svst2_f16(...) __builtin_sve_svst2_f16(__VA_ARGS__)
+#define svst2_s32(...) __builtin_sve_svst2_s32(__VA_ARGS__)
+#define svst2_s64(...) __builtin_sve_svst2_s64(__VA_ARGS__)
+#define svst2_s16(...) __builtin_sve_svst2_s16(__VA_ARGS__)
+#define svst2_vnum_u8(...) __builtin_sve_svst2_vnum_u8(__VA_ARGS__)
+#define svst2_vnum_u32(...) __builtin_sve_svst2_vnum_u32(__VA_ARGS__)
+#define svst2_vnum_u64(...) __builtin_sve_svst2_vnum_u64(__VA_ARGS__)
+#define svst2_vnum_u16(...) __builtin_sve_svst2_vnum_u16(__VA_ARGS__)
+#define svst2_vnum_s8(...) __builtin_sve_svst2_vnum_s8(__VA_ARGS__)
+#define svst2_vnum_f64(...) __builtin_sve_svst2_vnum_f64(__VA_ARGS__)
+#define svst2_vnum_f32(...) __builtin_sve_svst2_vnum_f32(__VA_ARGS__)
+#define svst2_vnum_f16(...) __builtin_sve_svst2_vnum_f16(__VA_ARGS__)
+#define svst2_vnum_s32(...) __builtin_sve_svst2_vnum_s32(__VA_ARGS__)
+#define svst2_vnum_s64(...) __builtin_sve_svst2_vnum_s64(__VA_ARGS__)
+#define svst2_vnum_s16(...) __builtin_sve_svst2_vnum_s16(__VA_ARGS__)
+#define svst3_u8(...) __builtin_sve_svst3_u8(__VA_ARGS__)
+#define svst3_u32(...) __builtin_sve_svst3_u32(__VA_ARGS__)
+#define svst3_u64(...) __builtin_sve_svst3_u64(__VA_ARGS__)
+#define svst3_u16(...) __builtin_sve_svst3_u16(__VA_ARGS__)
+#define svst3_s8(...) __builtin_sve_svst3_s8(__VA_ARGS__)
+#define svst3_f64(...) __builtin_sve_svst3_f64(__VA_ARGS__)
+#define svst3_f32(...) __builtin_sve_svst3_f32(__VA_ARGS__)
+#define svst3_f16(...) __builtin_sve_svst3_f16(__VA_ARGS__)
+#define svst3_s32(...) __builtin_sve_svst3_s32(__VA_ARGS__)
+#define svst3_s64(...) __builtin_sve_svst3_s64(__VA_ARGS__)
+#define svst3_s16(...) __builtin_sve_svst3_s16(__VA_ARGS__)
+#define svst3_vnum_u8(...) __builtin_sve_svst3_vnum_u8(__VA_ARGS__)
+#define svst3_vnum_u32(...) __builtin_sve_svst3_vnum_u32(__VA_ARGS__)
+#define svst3_vnum_u64(...) __builtin_sve_svst3_vnum_u64(__VA_ARGS__)
+#define svst3_vnum_u16(...) __builtin_sve_svst3_vnum_u16(__VA_ARGS__)
+#define svst3_vnum_s8(...) __builtin_sve_svst3_vnum_s8(__VA_ARGS__)
+#define svst3_vnum_f64(...) __builtin_sve_svst3_vnum_f64(__VA_ARGS__)
+#define svst3_vnum_f32(...) __builtin_sve_svst3_vnum_f32(__VA_ARGS__)
+#define svst3_vnum_f16(...) __builtin_sve_svst3_vnum_f16(__VA_ARGS__)
+#define svst3_vnum_s32(...) __builtin_sve_svst3_vnum_s32(__VA_ARGS__)
+#define svst3_vnum_s64(...) __builtin_sve_svst3_vnum_s64(__VA_ARGS__)
+#define svst3_vnum_s16(...) __builtin_sve_svst3_vnum_s16(__VA_ARGS__)
+#define svst4_u8(...) __builtin_sve_svst4_u8(__VA_ARGS__)
+#define svst4_u32(...) __builtin_sve_svst4_u32(__VA_ARGS__)
+#define svst4_u64(...) __builtin_sve_svst4_u64(__VA_ARGS__)
+#define svst4_u16(...) __builtin_sve_svst4_u16(__VA_ARGS__)
+#define svst4_s8(...) __builtin_sve_svst4_s8(__VA_ARGS__)
+#define svst4_f64(...) __builtin_sve_svst4_f64(__VA_ARGS__)
+#define svst4_f32(...) __builtin_sve_svst4_f32(__VA_ARGS__)
+#define svst4_f16(...) __builtin_sve_svst4_f16(__VA_ARGS__)
+#define svst4_s32(...) __builtin_sve_svst4_s32(__VA_ARGS__)
+#define svst4_s64(...) __builtin_sve_svst4_s64(__VA_ARGS__)
+#define svst4_s16(...) __builtin_sve_svst4_s16(__VA_ARGS__)
+#define svst4_vnum_u8(...) __builtin_sve_svst4_vnum_u8(__VA_ARGS__)
+#define svst4_vnum_u32(...) __builtin_sve_svst4_vnum_u32(__VA_ARGS__)
+#define svst4_vnum_u64(...) __builtin_sve_svst4_vnum_u64(__VA_ARGS__)
+#define svst4_vnum_u16(...) __builtin_sve_svst4_vnum_u16(__VA_ARGS__)
+#define svst4_vnum_s8(...) __builtin_sve_svst4_vnum_s8(__VA_ARGS__)
+#define svst4_vnum_f64(...) __builtin_sve_svst4_vnum_f64(__VA_ARGS__)
+#define svst4_vnum_f32(...) __builtin_sve_svst4_vnum_f32(__VA_ARGS__)
+#define svst4_vnum_f16(...) __builtin_sve_svst4_vnum_f16(__VA_ARGS__)
+#define svst4_vnum_s32(...) __builtin_sve_svst4_vnum_s32(__VA_ARGS__)
+#define svst4_vnum_s64(...) __builtin_sve_svst4_vnum_s64(__VA_ARGS__)
+#define svst4_vnum_s16(...) __builtin_sve_svst4_vnum_s16(__VA_ARGS__)
+#define svstnt1_u8(...) __builtin_sve_svstnt1_u8(__VA_ARGS__)
+#define svstnt1_u32(...) __builtin_sve_svstnt1_u32(__VA_ARGS__)
+#define svstnt1_u64(...) __builtin_sve_svstnt1_u64(__VA_ARGS__)
+#define svstnt1_u16(...) __builtin_sve_svstnt1_u16(__VA_ARGS__)
+#define svstnt1_s8(...) __builtin_sve_svstnt1_s8(__VA_ARGS__)
+#define svstnt1_f64(...) __builtin_sve_svstnt1_f64(__VA_ARGS__)
+#define svstnt1_f32(...) __builtin_sve_svstnt1_f32(__VA_ARGS__)
+#define svstnt1_f16(...) __builtin_sve_svstnt1_f16(__VA_ARGS__)
+#define svstnt1_s32(...) __builtin_sve_svstnt1_s32(__VA_ARGS__)
+#define svstnt1_s64(...) __builtin_sve_svstnt1_s64(__VA_ARGS__)
+#define svstnt1_s16(...) __builtin_sve_svstnt1_s16(__VA_ARGS__)
+#define svstnt1_vnum_u8(...) __builtin_sve_svstnt1_vnum_u8(__VA_ARGS__)
+#define svstnt1_vnum_u32(...) __builtin_sve_svstnt1_vnum_u32(__VA_ARGS__)
+#define svstnt1_vnum_u64(...) __builtin_sve_svstnt1_vnum_u64(__VA_ARGS__)
+#define svstnt1_vnum_u16(...) __builtin_sve_svstnt1_vnum_u16(__VA_ARGS__)
+#define svstnt1_vnum_s8(...) __builtin_sve_svstnt1_vnum_s8(__VA_ARGS__)
+#define svstnt1_vnum_f64(...) __builtin_sve_svstnt1_vnum_f64(__VA_ARGS__)
+#define svstnt1_vnum_f32(...) __builtin_sve_svstnt1_vnum_f32(__VA_ARGS__)
+#define svstnt1_vnum_f16(...) __builtin_sve_svstnt1_vnum_f16(__VA_ARGS__)
+#define svstnt1_vnum_s32(...) __builtin_sve_svstnt1_vnum_s32(__VA_ARGS__)
+#define svstnt1_vnum_s64(...) __builtin_sve_svstnt1_vnum_s64(__VA_ARGS__)
+#define svstnt1_vnum_s16(...) __builtin_sve_svstnt1_vnum_s16(__VA_ARGS__)
+#define svsub_n_f64_m(...) __builtin_sve_svsub_n_f64_m(__VA_ARGS__)
+#define svsub_n_f32_m(...) __builtin_sve_svsub_n_f32_m(__VA_ARGS__)
+#define svsub_n_f16_m(...) __builtin_sve_svsub_n_f16_m(__VA_ARGS__)
+#define svsub_n_f64_x(...) __builtin_sve_svsub_n_f64_x(__VA_ARGS__)
+#define svsub_n_f32_x(...) __builtin_sve_svsub_n_f32_x(__VA_ARGS__)
+#define svsub_n_f16_x(...) __builtin_sve_svsub_n_f16_x(__VA_ARGS__)
+#define svsub_n_f64_z(...) __builtin_sve_svsub_n_f64_z(__VA_ARGS__)
+#define svsub_n_f32_z(...) __builtin_sve_svsub_n_f32_z(__VA_ARGS__)
+#define svsub_n_f16_z(...) __builtin_sve_svsub_n_f16_z(__VA_ARGS__)
+#define svsub_n_u8_m(...) __builtin_sve_svsub_n_u8_m(__VA_ARGS__)
+#define svsub_n_u32_m(...) __builtin_sve_svsub_n_u32_m(__VA_ARGS__)
+#define svsub_n_u64_m(...) __builtin_sve_svsub_n_u64_m(__VA_ARGS__)
+#define svsub_n_u16_m(...) __builtin_sve_svsub_n_u16_m(__VA_ARGS__)
+#define svsub_n_s8_m(...) __builtin_sve_svsub_n_s8_m(__VA_ARGS__)
+#define svsub_n_s32_m(...) __builtin_sve_svsub_n_s32_m(__VA_ARGS__)
+#define svsub_n_s64_m(...) __builtin_sve_svsub_n_s64_m(__VA_ARGS__)
+#define svsub_n_s16_m(...) __builtin_sve_svsub_n_s16_m(__VA_ARGS__)
+#define svsub_n_u8_x(...) __builtin_sve_svsub_n_u8_x(__VA_ARGS__)
+#define svsub_n_u32_x(...) __builtin_sve_svsub_n_u32_x(__VA_ARGS__)
+#define svsub_n_u64_x(...) __builtin_sve_svsub_n_u64_x(__VA_ARGS__)
+#define svsub_n_u16_x(...) __builtin_sve_svsub_n_u16_x(__VA_ARGS__)
+#define svsub_n_s8_x(...) __builtin_sve_svsub_n_s8_x(__VA_ARGS__)
+#define svsub_n_s32_x(...) __builtin_sve_svsub_n_s32_x(__VA_ARGS__)
+#define svsub_n_s64_x(...) __builtin_sve_svsub_n_s64_x(__VA_ARGS__)
+#define svsub_n_s16_x(...) __builtin_sve_svsub_n_s16_x(__VA_ARGS__)
+#define svsub_n_u8_z(...) __builtin_sve_svsub_n_u8_z(__VA_ARGS__)
+#define svsub_n_u32_z(...) __builtin_sve_svsub_n_u32_z(__VA_ARGS__)
+#define svsub_n_u64_z(...) __builtin_sve_svsub_n_u64_z(__VA_ARGS__)
+#define svsub_n_u16_z(...) __builtin_sve_svsub_n_u16_z(__VA_ARGS__)
+#define svsub_n_s8_z(...) __builtin_sve_svsub_n_s8_z(__VA_ARGS__)
+#define svsub_n_s32_z(...) __builtin_sve_svsub_n_s32_z(__VA_ARGS__)
+#define svsub_n_s64_z(...) __builtin_sve_svsub_n_s64_z(__VA_ARGS__)
+#define svsub_n_s16_z(...) __builtin_sve_svsub_n_s16_z(__VA_ARGS__)
+#define svsub_f64_m(...) __builtin_sve_svsub_f64_m(__VA_ARGS__)
+#define svsub_f32_m(...) __builtin_sve_svsub_f32_m(__VA_ARGS__)
+#define svsub_f16_m(...) __builtin_sve_svsub_f16_m(__VA_ARGS__)
+#define svsub_f64_x(...) __builtin_sve_svsub_f64_x(__VA_ARGS__)
+#define svsub_f32_x(...) __builtin_sve_svsub_f32_x(__VA_ARGS__)
+#define svsub_f16_x(...) __builtin_sve_svsub_f16_x(__VA_ARGS__)
+#define svsub_f64_z(...) __builtin_sve_svsub_f64_z(__VA_ARGS__)
+#define svsub_f32_z(...) __builtin_sve_svsub_f32_z(__VA_ARGS__)
+#define svsub_f16_z(...) __builtin_sve_svsub_f16_z(__VA_ARGS__)
+#define svsub_u8_m(...) __builtin_sve_svsub_u8_m(__VA_ARGS__)
+#define svsub_u32_m(...) __builtin_sve_svsub_u32_m(__VA_ARGS__)
+#define svsub_u64_m(...) __builtin_sve_svsub_u64_m(__VA_ARGS__)
+#define svsub_u16_m(...) __builtin_sve_svsub_u16_m(__VA_ARGS__)
+#define svsub_s8_m(...) __builtin_sve_svsub_s8_m(__VA_ARGS__)
+#define svsub_s32_m(...) __builtin_sve_svsub_s32_m(__VA_ARGS__)
+#define svsub_s64_m(...) __builtin_sve_svsub_s64_m(__VA_ARGS__)
+#define svsub_s16_m(...) __builtin_sve_svsub_s16_m(__VA_ARGS__)
+#define svsub_u8_x(...) __builtin_sve_svsub_u8_x(__VA_ARGS__)
+#define svsub_u32_x(...) __builtin_sve_svsub_u32_x(__VA_ARGS__)
+#define svsub_u64_x(...) __builtin_sve_svsub_u64_x(__VA_ARGS__)
+#define svsub_u16_x(...) __builtin_sve_svsub_u16_x(__VA_ARGS__)
+#define svsub_s8_x(...) __builtin_sve_svsub_s8_x(__VA_ARGS__)
+#define svsub_s32_x(...) __builtin_sve_svsub_s32_x(__VA_ARGS__)
+#define svsub_s64_x(...) __builtin_sve_svsub_s64_x(__VA_ARGS__)
+#define svsub_s16_x(...) __builtin_sve_svsub_s16_x(__VA_ARGS__)
+#define svsub_u8_z(...) __builtin_sve_svsub_u8_z(__VA_ARGS__)
+#define svsub_u32_z(...) __builtin_sve_svsub_u32_z(__VA_ARGS__)
+#define svsub_u64_z(...) __builtin_sve_svsub_u64_z(__VA_ARGS__)
+#define svsub_u16_z(...) __builtin_sve_svsub_u16_z(__VA_ARGS__)
+#define svsub_s8_z(...) __builtin_sve_svsub_s8_z(__VA_ARGS__)
+#define svsub_s32_z(...) __builtin_sve_svsub_s32_z(__VA_ARGS__)
+#define svsub_s64_z(...) __builtin_sve_svsub_s64_z(__VA_ARGS__)
+#define svsub_s16_z(...) __builtin_sve_svsub_s16_z(__VA_ARGS__)
+#define svsubr_n_f64_m(...) __builtin_sve_svsubr_n_f64_m(__VA_ARGS__)
+#define svsubr_n_f32_m(...) __builtin_sve_svsubr_n_f32_m(__VA_ARGS__)
+#define svsubr_n_f16_m(...) __builtin_sve_svsubr_n_f16_m(__VA_ARGS__)
+#define svsubr_n_f64_x(...) __builtin_sve_svsubr_n_f64_x(__VA_ARGS__)
+#define svsubr_n_f32_x(...) __builtin_sve_svsubr_n_f32_x(__VA_ARGS__)
+#define svsubr_n_f16_x(...) __builtin_sve_svsubr_n_f16_x(__VA_ARGS__)
+#define svsubr_n_f64_z(...) __builtin_sve_svsubr_n_f64_z(__VA_ARGS__)
+#define svsubr_n_f32_z(...) __builtin_sve_svsubr_n_f32_z(__VA_ARGS__)
+#define svsubr_n_f16_z(...) __builtin_sve_svsubr_n_f16_z(__VA_ARGS__)
+#define svsubr_n_u8_m(...) __builtin_sve_svsubr_n_u8_m(__VA_ARGS__)
+#define svsubr_n_u32_m(...) __builtin_sve_svsubr_n_u32_m(__VA_ARGS__)
+#define svsubr_n_u64_m(...) __builtin_sve_svsubr_n_u64_m(__VA_ARGS__)
+#define svsubr_n_u16_m(...) __builtin_sve_svsubr_n_u16_m(__VA_ARGS__)
+#define svsubr_n_s8_m(...) __builtin_sve_svsubr_n_s8_m(__VA_ARGS__)
+#define svsubr_n_s32_m(...) __builtin_sve_svsubr_n_s32_m(__VA_ARGS__)
+#define svsubr_n_s64_m(...) __builtin_sve_svsubr_n_s64_m(__VA_ARGS__)
+#define svsubr_n_s16_m(...) __builtin_sve_svsubr_n_s16_m(__VA_ARGS__)
+#define svsubr_n_u8_x(...) __builtin_sve_svsubr_n_u8_x(__VA_ARGS__)
+#define svsubr_n_u32_x(...) __builtin_sve_svsubr_n_u32_x(__VA_ARGS__)
+#define svsubr_n_u64_x(...) __builtin_sve_svsubr_n_u64_x(__VA_ARGS__)
+#define svsubr_n_u16_x(...) __builtin_sve_svsubr_n_u16_x(__VA_ARGS__)
+#define svsubr_n_s8_x(...) __builtin_sve_svsubr_n_s8_x(__VA_ARGS__)
+#define svsubr_n_s32_x(...) __builtin_sve_svsubr_n_s32_x(__VA_ARGS__)
+#define svsubr_n_s64_x(...) __builtin_sve_svsubr_n_s64_x(__VA_ARGS__)
+#define svsubr_n_s16_x(...) __builtin_sve_svsubr_n_s16_x(__VA_ARGS__)
+#define svsubr_n_u8_z(...) __builtin_sve_svsubr_n_u8_z(__VA_ARGS__)
+#define svsubr_n_u32_z(...) __builtin_sve_svsubr_n_u32_z(__VA_ARGS__)
+#define svsubr_n_u64_z(...) __builtin_sve_svsubr_n_u64_z(__VA_ARGS__)
+#define svsubr_n_u16_z(...) __builtin_sve_svsubr_n_u16_z(__VA_ARGS__)
+#define svsubr_n_s8_z(...) __builtin_sve_svsubr_n_s8_z(__VA_ARGS__)
+#define svsubr_n_s32_z(...) __builtin_sve_svsubr_n_s32_z(__VA_ARGS__)
+#define svsubr_n_s64_z(...) __builtin_sve_svsubr_n_s64_z(__VA_ARGS__)
+#define svsubr_n_s16_z(...) __builtin_sve_svsubr_n_s16_z(__VA_ARGS__)
+#define svsubr_f64_m(...) __builtin_sve_svsubr_f64_m(__VA_ARGS__)
+#define svsubr_f32_m(...) __builtin_sve_svsubr_f32_m(__VA_ARGS__)
+#define svsubr_f16_m(...) __builtin_sve_svsubr_f16_m(__VA_ARGS__)
+#define svsubr_f64_x(...) __builtin_sve_svsubr_f64_x(__VA_ARGS__)
+#define svsubr_f32_x(...) __builtin_sve_svsubr_f32_x(__VA_ARGS__)
+#define svsubr_f16_x(...) __builtin_sve_svsubr_f16_x(__VA_ARGS__)
+#define svsubr_f64_z(...) __builtin_sve_svsubr_f64_z(__VA_ARGS__)
+#define svsubr_f32_z(...) __builtin_sve_svsubr_f32_z(__VA_ARGS__)
+#define svsubr_f16_z(...) __builtin_sve_svsubr_f16_z(__VA_ARGS__)
+#define svsubr_u8_m(...) __builtin_sve_svsubr_u8_m(__VA_ARGS__)
+#define svsubr_u32_m(...) __builtin_sve_svsubr_u32_m(__VA_ARGS__)
+#define svsubr_u64_m(...) __builtin_sve_svsubr_u64_m(__VA_ARGS__)
+#define svsubr_u16_m(...) __builtin_sve_svsubr_u16_m(__VA_ARGS__)
+#define svsubr_s8_m(...) __builtin_sve_svsubr_s8_m(__VA_ARGS__)
+#define svsubr_s32_m(...) __builtin_sve_svsubr_s32_m(__VA_ARGS__)
+#define svsubr_s64_m(...) __builtin_sve_svsubr_s64_m(__VA_ARGS__)
+#define svsubr_s16_m(...) __builtin_sve_svsubr_s16_m(__VA_ARGS__)
+#define svsubr_u8_x(...) __builtin_sve_svsubr_u8_x(__VA_ARGS__)
+#define svsubr_u32_x(...) __builtin_sve_svsubr_u32_x(__VA_ARGS__)
+#define svsubr_u64_x(...) __builtin_sve_svsubr_u64_x(__VA_ARGS__)
+#define svsubr_u16_x(...) __builtin_sve_svsubr_u16_x(__VA_ARGS__)
+#define svsubr_s8_x(...) __builtin_sve_svsubr_s8_x(__VA_ARGS__)
+#define svsubr_s32_x(...) __builtin_sve_svsubr_s32_x(__VA_ARGS__)
+#define svsubr_s64_x(...) __builtin_sve_svsubr_s64_x(__VA_ARGS__)
+#define svsubr_s16_x(...) __builtin_sve_svsubr_s16_x(__VA_ARGS__)
+#define svsubr_u8_z(...) __builtin_sve_svsubr_u8_z(__VA_ARGS__)
+#define svsubr_u32_z(...) __builtin_sve_svsubr_u32_z(__VA_ARGS__)
+#define svsubr_u64_z(...) __builtin_sve_svsubr_u64_z(__VA_ARGS__)
+#define svsubr_u16_z(...) __builtin_sve_svsubr_u16_z(__VA_ARGS__)
+#define svsubr_s8_z(...) __builtin_sve_svsubr_s8_z(__VA_ARGS__)
+#define svsubr_s32_z(...) __builtin_sve_svsubr_s32_z(__VA_ARGS__)
+#define svsubr_s64_z(...) __builtin_sve_svsubr_s64_z(__VA_ARGS__)
+#define svsubr_s16_z(...) __builtin_sve_svsubr_s16_z(__VA_ARGS__)
+#define svtbl_u8(...) __builtin_sve_svtbl_u8(__VA_ARGS__)
+#define svtbl_u32(...) __builtin_sve_svtbl_u32(__VA_ARGS__)
+#define svtbl_u64(...) __builtin_sve_svtbl_u64(__VA_ARGS__)
+#define svtbl_u16(...) __builtin_sve_svtbl_u16(__VA_ARGS__)
+#define svtbl_s8(...) __builtin_sve_svtbl_s8(__VA_ARGS__)
+#define svtbl_f64(...) __builtin_sve_svtbl_f64(__VA_ARGS__)
+#define svtbl_f32(...) __builtin_sve_svtbl_f32(__VA_ARGS__)
+#define svtbl_f16(...) __builtin_sve_svtbl_f16(__VA_ARGS__)
+#define svtbl_s32(...) __builtin_sve_svtbl_s32(__VA_ARGS__)
+#define svtbl_s64(...) __builtin_sve_svtbl_s64(__VA_ARGS__)
+#define svtbl_s16(...) __builtin_sve_svtbl_s16(__VA_ARGS__)
+#define svtmad_f64(...) __builtin_sve_svtmad_f64(__VA_ARGS__)
+#define svtmad_f32(...) __builtin_sve_svtmad_f32(__VA_ARGS__)
+#define svtmad_f16(...) __builtin_sve_svtmad_f16(__VA_ARGS__)
+#define svtrn1_u8(...) __builtin_sve_svtrn1_u8(__VA_ARGS__)
+#define svtrn1_u32(...) __builtin_sve_svtrn1_u32(__VA_ARGS__)
+#define svtrn1_u64(...) __builtin_sve_svtrn1_u64(__VA_ARGS__)
+#define svtrn1_u16(...) __builtin_sve_svtrn1_u16(__VA_ARGS__)
+#define svtrn1_s8(...) __builtin_sve_svtrn1_s8(__VA_ARGS__)
+#define svtrn1_f64(...) __builtin_sve_svtrn1_f64(__VA_ARGS__)
+#define svtrn1_f32(...) __builtin_sve_svtrn1_f32(__VA_ARGS__)
+#define svtrn1_f16(...) __builtin_sve_svtrn1_f16(__VA_ARGS__)
+#define svtrn1_s32(...) __builtin_sve_svtrn1_s32(__VA_ARGS__)
+#define svtrn1_s64(...) __builtin_sve_svtrn1_s64(__VA_ARGS__)
+#define svtrn1_s16(...) __builtin_sve_svtrn1_s16(__VA_ARGS__)
+#define svtrn1_b8(...) __builtin_sve_svtrn1_b8(__VA_ARGS__)
+#define svtrn1_b32(...) __builtin_sve_svtrn1_b32(__VA_ARGS__)
+#define svtrn1_b64(...) __builtin_sve_svtrn1_b64(__VA_ARGS__)
+#define svtrn1_b16(...) __builtin_sve_svtrn1_b16(__VA_ARGS__)
+#define svtrn2_u8(...) __builtin_sve_svtrn2_u8(__VA_ARGS__)
+#define svtrn2_u32(...) __builtin_sve_svtrn2_u32(__VA_ARGS__)
+#define svtrn2_u64(...) __builtin_sve_svtrn2_u64(__VA_ARGS__)
+#define svtrn2_u16(...) __builtin_sve_svtrn2_u16(__VA_ARGS__)
+#define svtrn2_s8(...) __builtin_sve_svtrn2_s8(__VA_ARGS__)
+#define svtrn2_f64(...) __builtin_sve_svtrn2_f64(__VA_ARGS__)
+#define svtrn2_f32(...) __builtin_sve_svtrn2_f32(__VA_ARGS__)
+#define svtrn2_f16(...) __builtin_sve_svtrn2_f16(__VA_ARGS__)
+#define svtrn2_s32(...) __builtin_sve_svtrn2_s32(__VA_ARGS__)
+#define svtrn2_s64(...) __builtin_sve_svtrn2_s64(__VA_ARGS__)
+#define svtrn2_s16(...) __builtin_sve_svtrn2_s16(__VA_ARGS__)
+#define svtrn2_b8(...) __builtin_sve_svtrn2_b8(__VA_ARGS__)
+#define svtrn2_b32(...) __builtin_sve_svtrn2_b32(__VA_ARGS__)
+#define svtrn2_b64(...) __builtin_sve_svtrn2_b64(__VA_ARGS__)
+#define svtrn2_b16(...) __builtin_sve_svtrn2_b16(__VA_ARGS__)
+#define svtsmul_f64(...) __builtin_sve_svtsmul_f64(__VA_ARGS__)
+#define svtsmul_f32(...) __builtin_sve_svtsmul_f32(__VA_ARGS__)
+#define svtsmul_f16(...) __builtin_sve_svtsmul_f16(__VA_ARGS__)
+#define svtssel_f64(...) __builtin_sve_svtssel_f64(__VA_ARGS__)
+#define svtssel_f32(...) __builtin_sve_svtssel_f32(__VA_ARGS__)
+#define svtssel_f16(...) __builtin_sve_svtssel_f16(__VA_ARGS__)
+#define svundef2_u8(...) __builtin_sve_svundef2_u8(__VA_ARGS__)
+#define svundef2_u32(...) __builtin_sve_svundef2_u32(__VA_ARGS__)
+#define svundef2_u64(...) __builtin_sve_svundef2_u64(__VA_ARGS__)
+#define svundef2_u16(...) __builtin_sve_svundef2_u16(__VA_ARGS__)
+#define svundef2_s8(...) __builtin_sve_svundef2_s8(__VA_ARGS__)
+#define svundef2_f64(...) __builtin_sve_svundef2_f64(__VA_ARGS__)
+#define svundef2_f32(...) __builtin_sve_svundef2_f32(__VA_ARGS__)
+#define svundef2_f16(...) __builtin_sve_svundef2_f16(__VA_ARGS__)
+#define svundef2_s32(...) __builtin_sve_svundef2_s32(__VA_ARGS__)
+#define svundef2_s64(...) __builtin_sve_svundef2_s64(__VA_ARGS__)
+#define svundef2_s16(...) __builtin_sve_svundef2_s16(__VA_ARGS__)
+#define svundef3_u8(...) __builtin_sve_svundef3_u8(__VA_ARGS__)
+#define svundef3_u32(...) __builtin_sve_svundef3_u32(__VA_ARGS__)
+#define svundef3_u64(...) __builtin_sve_svundef3_u64(__VA_ARGS__)
+#define svundef3_u16(...) __builtin_sve_svundef3_u16(__VA_ARGS__)
+#define svundef3_s8(...) __builtin_sve_svundef3_s8(__VA_ARGS__)
+#define svundef3_f64(...) __builtin_sve_svundef3_f64(__VA_ARGS__)
+#define svundef3_f32(...) __builtin_sve_svundef3_f32(__VA_ARGS__)
+#define svundef3_f16(...) __builtin_sve_svundef3_f16(__VA_ARGS__)
+#define svundef3_s32(...) __builtin_sve_svundef3_s32(__VA_ARGS__)
+#define svundef3_s64(...) __builtin_sve_svundef3_s64(__VA_ARGS__)
+#define svundef3_s16(...) __builtin_sve_svundef3_s16(__VA_ARGS__)
+#define svundef4_u8(...) __builtin_sve_svundef4_u8(__VA_ARGS__)
+#define svundef4_u32(...) __builtin_sve_svundef4_u32(__VA_ARGS__)
+#define svundef4_u64(...) __builtin_sve_svundef4_u64(__VA_ARGS__)
+#define svundef4_u16(...) __builtin_sve_svundef4_u16(__VA_ARGS__)
+#define svundef4_s8(...) __builtin_sve_svundef4_s8(__VA_ARGS__)
+#define svundef4_f64(...) __builtin_sve_svundef4_f64(__VA_ARGS__)
+#define svundef4_f32(...) __builtin_sve_svundef4_f32(__VA_ARGS__)
+#define svundef4_f16(...) __builtin_sve_svundef4_f16(__VA_ARGS__)
+#define svundef4_s32(...) __builtin_sve_svundef4_s32(__VA_ARGS__)
+#define svundef4_s64(...) __builtin_sve_svundef4_s64(__VA_ARGS__)
+#define svundef4_s16(...) __builtin_sve_svundef4_s16(__VA_ARGS__)
+#define svundef_u8(...) __builtin_sve_svundef_u8(__VA_ARGS__)
+#define svundef_u32(...) __builtin_sve_svundef_u32(__VA_ARGS__)
+#define svundef_u64(...) __builtin_sve_svundef_u64(__VA_ARGS__)
+#define svundef_u16(...) __builtin_sve_svundef_u16(__VA_ARGS__)
+#define svundef_s8(...) __builtin_sve_svundef_s8(__VA_ARGS__)
+#define svundef_f64(...) __builtin_sve_svundef_f64(__VA_ARGS__)
+#define svundef_f32(...) __builtin_sve_svundef_f32(__VA_ARGS__)
+#define svundef_f16(...) __builtin_sve_svundef_f16(__VA_ARGS__)
+#define svundef_s32(...) __builtin_sve_svundef_s32(__VA_ARGS__)
+#define svundef_s64(...) __builtin_sve_svundef_s64(__VA_ARGS__)
+#define svundef_s16(...) __builtin_sve_svundef_s16(__VA_ARGS__)
+#define svunpkhi_b(...) __builtin_sve_svunpkhi_b(__VA_ARGS__)
+#define svunpkhi_s32(...) __builtin_sve_svunpkhi_s32(__VA_ARGS__)
+#define svunpkhi_s64(...) __builtin_sve_svunpkhi_s64(__VA_ARGS__)
+#define svunpkhi_s16(...) __builtin_sve_svunpkhi_s16(__VA_ARGS__)
+#define svunpkhi_u32(...) __builtin_sve_svunpkhi_u32(__VA_ARGS__)
+#define svunpkhi_u64(...) __builtin_sve_svunpkhi_u64(__VA_ARGS__)
+#define svunpkhi_u16(...) __builtin_sve_svunpkhi_u16(__VA_ARGS__)
+#define svunpklo_b(...) __builtin_sve_svunpklo_b(__VA_ARGS__)
+#define svunpklo_s32(...) __builtin_sve_svunpklo_s32(__VA_ARGS__)
+#define svunpklo_s64(...) __builtin_sve_svunpklo_s64(__VA_ARGS__)
+#define svunpklo_s16(...) __builtin_sve_svunpklo_s16(__VA_ARGS__)
+#define svunpklo_u32(...) __builtin_sve_svunpklo_u32(__VA_ARGS__)
+#define svunpklo_u64(...) __builtin_sve_svunpklo_u64(__VA_ARGS__)
+#define svunpklo_u16(...) __builtin_sve_svunpklo_u16(__VA_ARGS__)
+#define svuzp1_u8(...) __builtin_sve_svuzp1_u8(__VA_ARGS__)
+#define svuzp1_u32(...) __builtin_sve_svuzp1_u32(__VA_ARGS__)
+#define svuzp1_u64(...) __builtin_sve_svuzp1_u64(__VA_ARGS__)
+#define svuzp1_u16(...) __builtin_sve_svuzp1_u16(__VA_ARGS__)
+#define svuzp1_s8(...) __builtin_sve_svuzp1_s8(__VA_ARGS__)
+#define svuzp1_f64(...) __builtin_sve_svuzp1_f64(__VA_ARGS__)
+#define svuzp1_f32(...) __builtin_sve_svuzp1_f32(__VA_ARGS__)
+#define svuzp1_f16(...) __builtin_sve_svuzp1_f16(__VA_ARGS__)
+#define svuzp1_s32(...) __builtin_sve_svuzp1_s32(__VA_ARGS__)
+#define svuzp1_s64(...) __builtin_sve_svuzp1_s64(__VA_ARGS__)
+#define svuzp1_s16(...) __builtin_sve_svuzp1_s16(__VA_ARGS__)
+#define svuzp1_b8(...) __builtin_sve_svuzp1_b8(__VA_ARGS__)
+#define svuzp1_b32(...) __builtin_sve_svuzp1_b32(__VA_ARGS__)
+#define svuzp1_b64(...) __builtin_sve_svuzp1_b64(__VA_ARGS__)
+#define svuzp1_b16(...) __builtin_sve_svuzp1_b16(__VA_ARGS__)
+#define svuzp2_u8(...) __builtin_sve_svuzp2_u8(__VA_ARGS__)
+#define svuzp2_u32(...) __builtin_sve_svuzp2_u32(__VA_ARGS__)
+#define svuzp2_u64(...) __builtin_sve_svuzp2_u64(__VA_ARGS__)
+#define svuzp2_u16(...) __builtin_sve_svuzp2_u16(__VA_ARGS__)
+#define svuzp2_s8(...) __builtin_sve_svuzp2_s8(__VA_ARGS__)
+#define svuzp2_f64(...) __builtin_sve_svuzp2_f64(__VA_ARGS__)
+#define svuzp2_f32(...) __builtin_sve_svuzp2_f32(__VA_ARGS__)
+#define svuzp2_f16(...) __builtin_sve_svuzp2_f16(__VA_ARGS__)
+#define svuzp2_s32(...) __builtin_sve_svuzp2_s32(__VA_ARGS__)
+#define svuzp2_s64(...) __builtin_sve_svuzp2_s64(__VA_ARGS__)
+#define svuzp2_s16(...) __builtin_sve_svuzp2_s16(__VA_ARGS__)
+#define svuzp2_b8(...) __builtin_sve_svuzp2_b8(__VA_ARGS__)
+#define svuzp2_b32(...) __builtin_sve_svuzp2_b32(__VA_ARGS__)
+#define svuzp2_b64(...) __builtin_sve_svuzp2_b64(__VA_ARGS__)
+#define svuzp2_b16(...) __builtin_sve_svuzp2_b16(__VA_ARGS__)
+#define svwhilele_b8_s32(...) __builtin_sve_svwhilele_b8_s32(__VA_ARGS__)
+#define svwhilele_b32_s32(...) __builtin_sve_svwhilele_b32_s32(__VA_ARGS__)
+#define svwhilele_b64_s32(...) __builtin_sve_svwhilele_b64_s32(__VA_ARGS__)
+#define svwhilele_b16_s32(...) __builtin_sve_svwhilele_b16_s32(__VA_ARGS__)
+#define svwhilele_b8_s64(...) __builtin_sve_svwhilele_b8_s64(__VA_ARGS__)
+#define svwhilele_b32_s64(...) __builtin_sve_svwhilele_b32_s64(__VA_ARGS__)
+#define svwhilele_b64_s64(...) __builtin_sve_svwhilele_b64_s64(__VA_ARGS__)
+#define svwhilele_b16_s64(...) __builtin_sve_svwhilele_b16_s64(__VA_ARGS__)
+#define svwhilele_b8_u32(...) __builtin_sve_svwhilele_b8_u32(__VA_ARGS__)
+#define svwhilele_b32_u32(...) __builtin_sve_svwhilele_b32_u32(__VA_ARGS__)
+#define svwhilele_b64_u32(...) __builtin_sve_svwhilele_b64_u32(__VA_ARGS__)
+#define svwhilele_b16_u32(...) __builtin_sve_svwhilele_b16_u32(__VA_ARGS__)
+#define svwhilele_b8_u64(...) __builtin_sve_svwhilele_b8_u64(__VA_ARGS__)
+#define svwhilele_b32_u64(...) __builtin_sve_svwhilele_b32_u64(__VA_ARGS__)
+#define svwhilele_b64_u64(...) __builtin_sve_svwhilele_b64_u64(__VA_ARGS__)
+#define svwhilele_b16_u64(...) __builtin_sve_svwhilele_b16_u64(__VA_ARGS__)
+#define svwhilelt_b8_u32(...) __builtin_sve_svwhilelt_b8_u32(__VA_ARGS__)
+#define svwhilelt_b32_u32(...) __builtin_sve_svwhilelt_b32_u32(__VA_ARGS__)
+#define svwhilelt_b64_u32(...) __builtin_sve_svwhilelt_b64_u32(__VA_ARGS__)
+#define svwhilelt_b16_u32(...) __builtin_sve_svwhilelt_b16_u32(__VA_ARGS__)
+#define svwhilelt_b8_u64(...) __builtin_sve_svwhilelt_b8_u64(__VA_ARGS__)
+#define svwhilelt_b32_u64(...) __builtin_sve_svwhilelt_b32_u64(__VA_ARGS__)
+#define svwhilelt_b64_u64(...) __builtin_sve_svwhilelt_b64_u64(__VA_ARGS__)
+#define svwhilelt_b16_u64(...) __builtin_sve_svwhilelt_b16_u64(__VA_ARGS__)
+#define svwhilelt_b8_s32(...) __builtin_sve_svwhilelt_b8_s32(__VA_ARGS__)
+#define svwhilelt_b32_s32(...) __builtin_sve_svwhilelt_b32_s32(__VA_ARGS__)
+#define svwhilelt_b64_s32(...) __builtin_sve_svwhilelt_b64_s32(__VA_ARGS__)
+#define svwhilelt_b16_s32(...) __builtin_sve_svwhilelt_b16_s32(__VA_ARGS__)
+#define svwhilelt_b8_s64(...) __builtin_sve_svwhilelt_b8_s64(__VA_ARGS__)
+#define svwhilelt_b32_s64(...) __builtin_sve_svwhilelt_b32_s64(__VA_ARGS__)
+#define svwhilelt_b64_s64(...) __builtin_sve_svwhilelt_b64_s64(__VA_ARGS__)
+#define svwhilelt_b16_s64(...) __builtin_sve_svwhilelt_b16_s64(__VA_ARGS__)
+#define svwrffr(...) __builtin_sve_svwrffr(__VA_ARGS__)
+#define svzip1_u8(...) __builtin_sve_svzip1_u8(__VA_ARGS__)
+#define svzip1_u32(...) __builtin_sve_svzip1_u32(__VA_ARGS__)
+#define svzip1_u64(...) __builtin_sve_svzip1_u64(__VA_ARGS__)
+#define svzip1_u16(...) __builtin_sve_svzip1_u16(__VA_ARGS__)
+#define svzip1_s8(...) __builtin_sve_svzip1_s8(__VA_ARGS__)
+#define svzip1_f64(...) __builtin_sve_svzip1_f64(__VA_ARGS__)
+#define svzip1_f32(...) __builtin_sve_svzip1_f32(__VA_ARGS__)
+#define svzip1_f16(...) __builtin_sve_svzip1_f16(__VA_ARGS__)
+#define svzip1_s32(...) __builtin_sve_svzip1_s32(__VA_ARGS__)
+#define svzip1_s64(...) __builtin_sve_svzip1_s64(__VA_ARGS__)
+#define svzip1_s16(...) __builtin_sve_svzip1_s16(__VA_ARGS__)
+#define svzip1_b8(...) __builtin_sve_svzip1_b8(__VA_ARGS__)
+#define svzip1_b32(...) __builtin_sve_svzip1_b32(__VA_ARGS__)
+#define svzip1_b64(...) __builtin_sve_svzip1_b64(__VA_ARGS__)
+#define svzip1_b16(...) __builtin_sve_svzip1_b16(__VA_ARGS__)
+#define svzip2_u8(...) __builtin_sve_svzip2_u8(__VA_ARGS__)
+#define svzip2_u32(...) __builtin_sve_svzip2_u32(__VA_ARGS__)
+#define svzip2_u64(...) __builtin_sve_svzip2_u64(__VA_ARGS__)
+#define svzip2_u16(...) __builtin_sve_svzip2_u16(__VA_ARGS__)
+#define svzip2_s8(...) __builtin_sve_svzip2_s8(__VA_ARGS__)
+#define svzip2_f64(...) __builtin_sve_svzip2_f64(__VA_ARGS__)
+#define svzip2_f32(...) __builtin_sve_svzip2_f32(__VA_ARGS__)
+#define svzip2_f16(...) __builtin_sve_svzip2_f16(__VA_ARGS__)
+#define svzip2_s32(...) __builtin_sve_svzip2_s32(__VA_ARGS__)
+#define svzip2_s64(...) __builtin_sve_svzip2_s64(__VA_ARGS__)
+#define svzip2_s16(...) __builtin_sve_svzip2_s16(__VA_ARGS__)
+#define svzip2_b8(...) __builtin_sve_svzip2_b8(__VA_ARGS__)
+#define svzip2_b32(...) __builtin_sve_svzip2_b32(__VA_ARGS__)
+#define svzip2_b64(...) __builtin_sve_svzip2_b64(__VA_ARGS__)
+#define svzip2_b16(...) __builtin_sve_svzip2_b16(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_m)))
+svfloat64_t svabd_m(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_m)))
+svfloat32_t svabd_m(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_m)))
+svfloat16_t svabd_m(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_x)))
+svfloat64_t svabd_x(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_x)))
+svfloat32_t svabd_x(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_x)))
+svfloat16_t svabd_x(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_z)))
+svfloat64_t svabd_z(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_z)))
+svfloat32_t svabd_z(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_z)))
+svfloat16_t svabd_z(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_m)))
+svint8_t svabd_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_m)))
+svint32_t svabd_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_m)))
+svint64_t svabd_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_m)))
+svint16_t svabd_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_x)))
+svint8_t svabd_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_x)))
+svint32_t svabd_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_x)))
+svint64_t svabd_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_x)))
+svint16_t svabd_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_z)))
+svint8_t svabd_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_z)))
+svint32_t svabd_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_z)))
+svint64_t svabd_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_z)))
+svint16_t svabd_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_m)))
+svuint8_t svabd_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_m)))
+svuint32_t svabd_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_m)))
+svuint64_t svabd_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_m)))
+svuint16_t svabd_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_x)))
+svuint8_t svabd_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_x)))
+svuint32_t svabd_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_x)))
+svuint64_t svabd_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_x)))
+svuint16_t svabd_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_z)))
+svuint8_t svabd_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_z)))
+svuint32_t svabd_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_z)))
+svuint64_t svabd_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_z)))
+svuint16_t svabd_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_m)))
+svfloat64_t svabd_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_m)))
+svfloat32_t svabd_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_m)))
+svfloat16_t svabd_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_x)))
+svfloat64_t svabd_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_x)))
+svfloat32_t svabd_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_x)))
+svfloat16_t svabd_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_z)))
+svfloat64_t svabd_z(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_z)))
+svfloat32_t svabd_z(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_z)))
+svfloat16_t svabd_z(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_m)))
+svint8_t svabd_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_m)))
+svint32_t svabd_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_m)))
+svint64_t svabd_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_m)))
+svint16_t svabd_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_x)))
+svint8_t svabd_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_x)))
+svint32_t svabd_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_x)))
+svint64_t svabd_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_x)))
+svint16_t svabd_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_z)))
+svint8_t svabd_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_z)))
+svint32_t svabd_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_z)))
+svint64_t svabd_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_z)))
+svint16_t svabd_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_m)))
+svuint8_t svabd_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_m)))
+svuint32_t svabd_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_m)))
+svuint64_t svabd_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_m)))
+svuint16_t svabd_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_x)))
+svuint8_t svabd_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_x)))
+svuint32_t svabd_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_x)))
+svuint64_t svabd_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_x)))
+svuint16_t svabd_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_z)))
+svuint8_t svabd_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_z)))
+svuint32_t svabd_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_z)))
+svuint64_t svabd_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_z)))
+svuint16_t svabd_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_m)))
+svfloat64_t svabs_m(svfloat64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_m)))
+svfloat32_t svabs_m(svfloat32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_m)))
+svfloat16_t svabs_m(svfloat16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_x)))
+svfloat64_t svabs_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_x)))
+svfloat32_t svabs_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_x)))
+svfloat16_t svabs_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_z)))
+svfloat64_t svabs_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_z)))
+svfloat32_t svabs_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_z)))
+svfloat16_t svabs_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_m)))
+svint8_t svabs_m(svint8_t, svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_m)))
+svint32_t svabs_m(svint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_m)))
+svint64_t svabs_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_m)))
+svint16_t svabs_m(svint16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_x)))
+svint8_t svabs_x(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_x)))
+svint32_t svabs_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_x)))
+svint64_t svabs_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_x)))
+svint16_t svabs_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_z)))
+svint8_t svabs_z(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_z)))
+svint32_t svabs_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_z)))
+svint64_t svabs_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_z)))
+svint16_t svabs_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f64)))
+svbool_t svacge(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f32)))
+svbool_t svacge(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f16)))
+svbool_t svacge(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f64)))
+svbool_t svacge(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f32)))
+svbool_t svacge(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f16)))
+svbool_t svacge(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f64)))
+svbool_t svacgt(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f32)))
+svbool_t svacgt(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f16)))
+svbool_t svacgt(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f64)))
+svbool_t svacgt(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f32)))
+svbool_t svacgt(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f16)))
+svbool_t svacgt(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f64)))
+svbool_t svacle(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f32)))
+svbool_t svacle(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f16)))
+svbool_t svacle(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f64)))
+svbool_t svacle(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f32)))
+svbool_t svacle(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f16)))
+svbool_t svacle(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f64)))
+svbool_t svaclt(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f32)))
+svbool_t svaclt(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f16)))
+svbool_t svaclt(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f64)))
+svbool_t svaclt(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f32)))
+svbool_t svaclt(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f16)))
+svbool_t svaclt(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_m)))
+svfloat64_t svadd_m(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_m)))
+svfloat32_t svadd_m(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_m)))
+svfloat16_t svadd_m(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_x)))
+svfloat64_t svadd_x(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_x)))
+svfloat32_t svadd_x(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_x)))
+svfloat16_t svadd_x(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_z)))
+svfloat64_t svadd_z(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_z)))
+svfloat32_t svadd_z(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_z)))
+svfloat16_t svadd_z(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_m)))
+svuint8_t svadd_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_m)))
+svuint32_t svadd_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_m)))
+svuint64_t svadd_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_m)))
+svuint16_t svadd_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_m)))
+svint8_t svadd_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_m)))
+svint32_t svadd_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_m)))
+svint64_t svadd_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_m)))
+svint16_t svadd_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_x)))
+svuint8_t svadd_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_x)))
+svuint32_t svadd_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_x)))
+svuint64_t svadd_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_x)))
+svuint16_t svadd_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_x)))
+svint8_t svadd_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_x)))
+svint32_t svadd_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_x)))
+svint64_t svadd_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_x)))
+svint16_t svadd_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_z)))
+svuint8_t svadd_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_z)))
+svuint32_t svadd_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_z)))
+svuint64_t svadd_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_z)))
+svuint16_t svadd_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_z)))
+svint8_t svadd_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_z)))
+svint32_t svadd_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_z)))
+svint64_t svadd_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_z)))
+svint16_t svadd_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_m)))
+svfloat64_t svadd_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_m)))
+svfloat32_t svadd_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_m)))
+svfloat16_t svadd_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_x)))
+svfloat64_t svadd_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_x)))
+svfloat32_t svadd_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_x)))
+svfloat16_t svadd_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_z)))
+svfloat64_t svadd_z(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_z)))
+svfloat32_t svadd_z(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_z)))
+svfloat16_t svadd_z(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_m)))
+svuint8_t svadd_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_m)))
+svuint32_t svadd_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_m)))
+svuint64_t svadd_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_m)))
+svuint16_t svadd_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_m)))
+svint8_t svadd_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_m)))
+svint32_t svadd_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_m)))
+svint64_t svadd_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_m)))
+svint16_t svadd_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_x)))
+svuint8_t svadd_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_x)))
+svuint32_t svadd_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_x)))
+svuint64_t svadd_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_x)))
+svuint16_t svadd_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_x)))
+svint8_t svadd_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_x)))
+svint32_t svadd_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_x)))
+svint64_t svadd_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_x)))
+svint16_t svadd_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_z)))
+svuint8_t svadd_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_z)))
+svuint32_t svadd_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_z)))
+svuint64_t svadd_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_z)))
+svuint16_t svadd_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_z)))
+svint8_t svadd_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_z)))
+svint32_t svadd_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_z)))
+svint64_t svadd_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_z)))
+svint16_t svadd_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f64)))
+float64_t svadda(svbool_t, float64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f32)))
+float32_t svadda(svbool_t, float32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f16)))
+float16_t svadda(svbool_t, float16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s8)))
+int64_t svaddv(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s32)))
+int64_t svaddv(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s64)))
+int64_t svaddv(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s16)))
+int64_t svaddv(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u8)))
+uint64_t svaddv(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u32)))
+uint64_t svaddv(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u64)))
+uint64_t svaddv(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u16)))
+uint64_t svaddv(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f64)))
+float64_t svaddv(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f32)))
+float32_t svaddv(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f16)))
+float16_t svaddv(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_u32offset)))
+svuint32_t svadrb_offset(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_u64offset)))
+svuint64_t svadrb_offset(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_s32offset)))
+svuint32_t svadrb_offset(svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_s64offset)))
+svuint64_t svadrb_offset(svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_u32index)))
+svuint32_t svadrd_index(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_u64index)))
+svuint64_t svadrd_index(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_s32index)))
+svuint32_t svadrd_index(svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_s64index)))
+svuint64_t svadrd_index(svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_u32index)))
+svuint32_t svadrh_index(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_u64index)))
+svuint64_t svadrh_index(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_s32index)))
+svuint32_t svadrh_index(svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_s64index)))
+svuint64_t svadrh_index(svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_u32index)))
+svuint32_t svadrw_index(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_u64index)))
+svuint64_t svadrw_index(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_s32index)))
+svuint32_t svadrw_index(svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_s64index)))
+svuint64_t svadrw_index(svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_b_z)))
+svbool_t svand_z(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_m)))
+svuint8_t svand_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_m)))
+svuint32_t svand_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_m)))
+svuint64_t svand_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_m)))
+svuint16_t svand_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_m)))
+svint8_t svand_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_m)))
+svint32_t svand_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_m)))
+svint64_t svand_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_m)))
+svint16_t svand_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_x)))
+svuint8_t svand_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_x)))
+svuint32_t svand_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_x)))
+svuint64_t svand_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_x)))
+svuint16_t svand_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_x)))
+svint8_t svand_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_x)))
+svint32_t svand_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_x)))
+svint64_t svand_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_x)))
+svint16_t svand_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_z)))
+svuint8_t svand_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_z)))
+svuint32_t svand_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_z)))
+svuint64_t svand_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_z)))
+svuint16_t svand_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_z)))
+svint8_t svand_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_z)))
+svint32_t svand_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_z)))
+svint64_t svand_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_z)))
+svint16_t svand_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_m)))
+svuint8_t svand_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_m)))
+svuint32_t svand_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_m)))
+svuint64_t svand_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_m)))
+svuint16_t svand_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_m)))
+svint8_t svand_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_m)))
+svint32_t svand_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_m)))
+svint64_t svand_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_m)))
+svint16_t svand_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_x)))
+svuint8_t svand_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_x)))
+svuint32_t svand_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_x)))
+svuint64_t svand_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_x)))
+svuint16_t svand_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_x)))
+svint8_t svand_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_x)))
+svint32_t svand_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_x)))
+svint64_t svand_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_x)))
+svint16_t svand_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_z)))
+svuint8_t svand_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_z)))
+svuint32_t svand_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_z)))
+svuint64_t svand_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_z)))
+svuint16_t svand_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_z)))
+svint8_t svand_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_z)))
+svint32_t svand_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_z)))
+svint64_t svand_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_z)))
+svint16_t svand_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u8)))
+uint8_t svandv(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u32)))
+uint32_t svandv(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u64)))
+uint64_t svandv(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u16)))
+uint16_t svandv(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s8)))
+int8_t svandv(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s32)))
+int32_t svandv(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s64)))
+int64_t svandv(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s16)))
+int16_t svandv(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_m)))
+svint8_t svasr_m(svbool_t, svint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_m)))
+svint32_t svasr_m(svbool_t, svint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_m)))
+svint64_t svasr_m(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_m)))
+svint16_t svasr_m(svbool_t, svint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_x)))
+svint8_t svasr_x(svbool_t, svint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_x)))
+svint32_t svasr_x(svbool_t, svint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_x)))
+svint64_t svasr_x(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_x)))
+svint16_t svasr_x(svbool_t, svint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_z)))
+svint8_t svasr_z(svbool_t, svint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_z)))
+svint32_t svasr_z(svbool_t, svint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_z)))
+svint64_t svasr_z(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_z)))
+svint16_t svasr_z(svbool_t, svint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_m)))
+svint8_t svasr_m(svbool_t, svint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_m)))
+svint32_t svasr_m(svbool_t, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_m)))
+svint64_t svasr_m(svbool_t, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_m)))
+svint16_t svasr_m(svbool_t, svint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_x)))
+svint8_t svasr_x(svbool_t, svint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_x)))
+svint32_t svasr_x(svbool_t, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_x)))
+svint64_t svasr_x(svbool_t, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_x)))
+svint16_t svasr_x(svbool_t, svint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_z)))
+svint8_t svasr_z(svbool_t, svint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_z)))
+svint32_t svasr_z(svbool_t, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_z)))
+svint64_t svasr_z(svbool_t, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_z)))
+svint16_t svasr_z(svbool_t, svint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_m)))
+svint8_t svasr_wide_m(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_m)))
+svint32_t svasr_wide_m(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_m)))
+svint16_t svasr_wide_m(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_x)))
+svint8_t svasr_wide_x(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_x)))
+svint32_t svasr_wide_x(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_x)))
+svint16_t svasr_wide_x(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_z)))
+svint8_t svasr_wide_z(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_z)))
+svint32_t svasr_wide_z(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_z)))
+svint16_t svasr_wide_z(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_m)))
+svint8_t svasr_wide_m(svbool_t, svint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_m)))
+svint32_t svasr_wide_m(svbool_t, svint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_m)))
+svint16_t svasr_wide_m(svbool_t, svint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_x)))
+svint8_t svasr_wide_x(svbool_t, svint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_x)))
+svint32_t svasr_wide_x(svbool_t, svint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_x)))
+svint16_t svasr_wide_x(svbool_t, svint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_z)))
+svint8_t svasr_wide_z(svbool_t, svint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_z)))
+svint32_t svasr_wide_z(svbool_t, svint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_z)))
+svint16_t svasr_wide_z(svbool_t, svint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_m)))
+svint8_t svasrd_m(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_m)))
+svint32_t svasrd_m(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_m)))
+svint64_t svasrd_m(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_m)))
+svint16_t svasrd_m(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_x)))
+svint8_t svasrd_x(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_x)))
+svint32_t svasrd_x(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_x)))
+svint64_t svasrd_x(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_x)))
+svint16_t svasrd_x(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_z)))
+svint8_t svasrd_z(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_z)))
+svint32_t svasrd_z(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_z)))
+svint64_t svasrd_z(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_z)))
+svint16_t svasrd_z(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_b_z)))
+svbool_t svbic_z(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_m)))
+svuint8_t svbic_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_m)))
+svuint32_t svbic_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_m)))
+svuint64_t svbic_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_m)))
+svuint16_t svbic_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_m)))
+svint8_t svbic_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_m)))
+svint32_t svbic_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_m)))
+svint64_t svbic_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_m)))
+svint16_t svbic_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_x)))
+svuint8_t svbic_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_x)))
+svuint32_t svbic_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_x)))
+svuint64_t svbic_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_x)))
+svuint16_t svbic_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_x)))
+svint8_t svbic_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_x)))
+svint32_t svbic_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_x)))
+svint64_t svbic_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_x)))
+svint16_t svbic_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_z)))
+svuint8_t svbic_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_z)))
+svuint32_t svbic_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_z)))
+svuint64_t svbic_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_z)))
+svuint16_t svbic_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_z)))
+svint8_t svbic_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_z)))
+svint32_t svbic_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_z)))
+svint64_t svbic_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_z)))
+svint16_t svbic_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_m)))
+svuint8_t svbic_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_m)))
+svuint32_t svbic_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_m)))
+svuint64_t svbic_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_m)))
+svuint16_t svbic_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_m)))
+svint8_t svbic_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_m)))
+svint32_t svbic_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_m)))
+svint64_t svbic_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_m)))
+svint16_t svbic_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_x)))
+svuint8_t svbic_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_x)))
+svuint32_t svbic_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_x)))
+svuint64_t svbic_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_x)))
+svuint16_t svbic_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_x)))
+svint8_t svbic_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_x)))
+svint32_t svbic_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_x)))
+svint64_t svbic_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_x)))
+svint16_t svbic_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_z)))
+svuint8_t svbic_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_z)))
+svuint32_t svbic_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_z)))
+svuint64_t svbic_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_z)))
+svuint16_t svbic_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_z)))
+svint8_t svbic_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_z)))
+svint32_t svbic_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_z)))
+svint64_t svbic_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_z)))
+svint16_t svbic_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_m)))
+svbool_t svbrka_m(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_z)))
+svbool_t svbrka_z(svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_m)))
+svbool_t svbrkb_m(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_z)))
+svbool_t svbrkb_z(svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkn_b_z)))
+svbool_t svbrkn_z(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpa_b_z)))
+svbool_t svbrkpa_z(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpb_b_z)))
+svbool_t svbrkpb_z(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_m)))
+svfloat64_t svcadd_m(svbool_t, svfloat64_t, svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_m)))
+svfloat32_t svcadd_m(svbool_t, svfloat32_t, svfloat32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_m)))
+svfloat16_t svcadd_m(svbool_t, svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_x)))
+svfloat64_t svcadd_x(svbool_t, svfloat64_t, svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_x)))
+svfloat32_t svcadd_x(svbool_t, svfloat32_t, svfloat32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_x)))
+svfloat16_t svcadd_x(svbool_t, svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_z)))
+svfloat64_t svcadd_z(svbool_t, svfloat64_t, svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_z)))
+svfloat32_t svcadd_z(svbool_t, svfloat32_t, svfloat32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_z)))
+svfloat16_t svcadd_z(svbool_t, svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u8)))
+uint8_t svclasta(svbool_t, uint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u32)))
+uint32_t svclasta(svbool_t, uint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u64)))
+uint64_t svclasta(svbool_t, uint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u16)))
+uint16_t svclasta(svbool_t, uint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s8)))
+int8_t svclasta(svbool_t, int8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f64)))
+float64_t svclasta(svbool_t, float64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f32)))
+float32_t svclasta(svbool_t, float32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f16)))
+float16_t svclasta(svbool_t, float16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s32)))
+int32_t svclasta(svbool_t, int32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s64)))
+int64_t svclasta(svbool_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s16)))
+int16_t svclasta(svbool_t, int16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u8)))
+svuint8_t svclasta(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u32)))
+svuint32_t svclasta(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u64)))
+svuint64_t svclasta(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u16)))
+svuint16_t svclasta(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s8)))
+svint8_t svclasta(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f64)))
+svfloat64_t svclasta(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f32)))
+svfloat32_t svclasta(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f16)))
+svfloat16_t svclasta(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s32)))
+svint32_t svclasta(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s64)))
+svint64_t svclasta(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s16)))
+svint16_t svclasta(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u8)))
+uint8_t svclastb(svbool_t, uint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u32)))
+uint32_t svclastb(svbool_t, uint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u64)))
+uint64_t svclastb(svbool_t, uint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u16)))
+uint16_t svclastb(svbool_t, uint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s8)))
+int8_t svclastb(svbool_t, int8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f64)))
+float64_t svclastb(svbool_t, float64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f32)))
+float32_t svclastb(svbool_t, float32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f16)))
+float16_t svclastb(svbool_t, float16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s32)))
+int32_t svclastb(svbool_t, int32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s64)))
+int64_t svclastb(svbool_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s16)))
+int16_t svclastb(svbool_t, int16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u8)))
+svuint8_t svclastb(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u32)))
+svuint32_t svclastb(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u64)))
+svuint64_t svclastb(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u16)))
+svuint16_t svclastb(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s8)))
+svint8_t svclastb(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f64)))
+svfloat64_t svclastb(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f32)))
+svfloat32_t svclastb(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f16)))
+svfloat16_t svclastb(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s32)))
+svint32_t svclastb(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s64)))
+svint64_t svclastb(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s16)))
+svint16_t svclastb(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_m)))
+svuint8_t svcls_m(svuint8_t, svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_m)))
+svuint32_t svcls_m(svuint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_m)))
+svuint64_t svcls_m(svuint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_m)))
+svuint16_t svcls_m(svuint16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_x)))
+svuint8_t svcls_x(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_x)))
+svuint32_t svcls_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_x)))
+svuint64_t svcls_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_x)))
+svuint16_t svcls_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_z)))
+svuint8_t svcls_z(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_z)))
+svuint32_t svcls_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_z)))
+svuint64_t svcls_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_z)))
+svuint16_t svcls_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_m)))
+svuint8_t svclz_m(svuint8_t, svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_m)))
+svuint32_t svclz_m(svuint32_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_m)))
+svuint64_t svclz_m(svuint64_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_m)))
+svuint16_t svclz_m(svuint16_t, svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_m)))
+svuint8_t svclz_m(svuint8_t, svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_m)))
+svuint32_t svclz_m(svuint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_m)))
+svuint64_t svclz_m(svuint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_m)))
+svuint16_t svclz_m(svuint16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_x)))
+svuint8_t svclz_x(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_x)))
+svuint32_t svclz_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_x)))
+svuint64_t svclz_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_x)))
+svuint16_t svclz_x(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_x)))
+svuint8_t svclz_x(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_x)))
+svuint32_t svclz_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_x)))
+svuint64_t svclz_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_x)))
+svuint16_t svclz_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_z)))
+svuint8_t svclz_z(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_z)))
+svuint32_t svclz_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_z)))
+svuint64_t svclz_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_z)))
+svuint16_t svclz_z(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_z)))
+svuint8_t svclz_z(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_z)))
+svuint32_t svclz_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_z)))
+svuint64_t svclz_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_z)))
+svuint16_t svclz_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_m)))
+svfloat64_t svcmla_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_m)))
+svfloat32_t svcmla_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_m)))
+svfloat16_t svcmla_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_x)))
+svfloat64_t svcmla_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_x)))
+svfloat32_t svcmla_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_x)))
+svfloat16_t svcmla_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_z)))
+svfloat64_t svcmla_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_z)))
+svfloat32_t svcmla_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_z)))
+svfloat16_t svcmla_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f32)))
+svfloat32_t svcmla_lane(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f16)))
+svfloat16_t svcmla_lane(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f64)))
+svbool_t svcmpeq(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f32)))
+svbool_t svcmpeq(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f16)))
+svbool_t svcmpeq(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u8)))
+svbool_t svcmpeq(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u32)))
+svbool_t svcmpeq(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u64)))
+svbool_t svcmpeq(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u16)))
+svbool_t svcmpeq(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s8)))
+svbool_t svcmpeq(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s32)))
+svbool_t svcmpeq(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s64)))
+svbool_t svcmpeq(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s16)))
+svbool_t svcmpeq(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u8)))
+svbool_t svcmpeq(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u32)))
+svbool_t svcmpeq(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u64)))
+svbool_t svcmpeq(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u16)))
+svbool_t svcmpeq(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s8)))
+svbool_t svcmpeq(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s32)))
+svbool_t svcmpeq(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s64)))
+svbool_t svcmpeq(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s16)))
+svbool_t svcmpeq(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f64)))
+svbool_t svcmpeq(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f32)))
+svbool_t svcmpeq(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f16)))
+svbool_t svcmpeq(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s8)))
+svbool_t svcmpeq_wide(svbool_t, svint8_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s32)))
+svbool_t svcmpeq_wide(svbool_t, svint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s16)))
+svbool_t svcmpeq_wide(svbool_t, svint16_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s8)))
+svbool_t svcmpeq_wide(svbool_t, svint8_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s32)))
+svbool_t svcmpeq_wide(svbool_t, svint32_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s16)))
+svbool_t svcmpeq_wide(svbool_t, svint16_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f64)))
+svbool_t svcmpge(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f32)))
+svbool_t svcmpge(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f16)))
+svbool_t svcmpge(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s8)))
+svbool_t svcmpge(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s32)))
+svbool_t svcmpge(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s64)))
+svbool_t svcmpge(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s16)))
+svbool_t svcmpge(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u8)))
+svbool_t svcmpge(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u32)))
+svbool_t svcmpge(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u64)))
+svbool_t svcmpge(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u16)))
+svbool_t svcmpge(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s8)))
+svbool_t svcmpge(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s32)))
+svbool_t svcmpge(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s64)))
+svbool_t svcmpge(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s16)))
+svbool_t svcmpge(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f64)))
+svbool_t svcmpge(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f32)))
+svbool_t svcmpge(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f16)))
+svbool_t svcmpge(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u8)))
+svbool_t svcmpge(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u32)))
+svbool_t svcmpge(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u64)))
+svbool_t svcmpge(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u16)))
+svbool_t svcmpge(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s8)))
+svbool_t svcmpge_wide(svbool_t, svint8_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s32)))
+svbool_t svcmpge_wide(svbool_t, svint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s16)))
+svbool_t svcmpge_wide(svbool_t, svint16_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u8)))
+svbool_t svcmpge_wide(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u32)))
+svbool_t svcmpge_wide(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u16)))
+svbool_t svcmpge_wide(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s8)))
+svbool_t svcmpge_wide(svbool_t, svint8_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s32)))
+svbool_t svcmpge_wide(svbool_t, svint32_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s16)))
+svbool_t svcmpge_wide(svbool_t, svint16_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u8)))
+svbool_t svcmpge_wide(svbool_t, svuint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u32)))
+svbool_t svcmpge_wide(svbool_t, svuint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u16)))
+svbool_t svcmpge_wide(svbool_t, svuint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f64)))
+svbool_t svcmpgt(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f32)))
+svbool_t svcmpgt(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f16)))
+svbool_t svcmpgt(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s8)))
+svbool_t svcmpgt(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s32)))
+svbool_t svcmpgt(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s64)))
+svbool_t svcmpgt(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s16)))
+svbool_t svcmpgt(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u8)))
+svbool_t svcmpgt(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u32)))
+svbool_t svcmpgt(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u64)))
+svbool_t svcmpgt(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u16)))
+svbool_t svcmpgt(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s8)))
+svbool_t svcmpgt(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s32)))
+svbool_t svcmpgt(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s64)))
+svbool_t svcmpgt(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s16)))
+svbool_t svcmpgt(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f64)))
+svbool_t svcmpgt(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f32)))
+svbool_t svcmpgt(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f16)))
+svbool_t svcmpgt(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u8)))
+svbool_t svcmpgt(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u32)))
+svbool_t svcmpgt(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u64)))
+svbool_t svcmpgt(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u16)))
+svbool_t svcmpgt(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s8)))
+svbool_t svcmpgt_wide(svbool_t, svint8_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s32)))
+svbool_t svcmpgt_wide(svbool_t, svint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s16)))
+svbool_t svcmpgt_wide(svbool_t, svint16_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u8)))
+svbool_t svcmpgt_wide(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u32)))
+svbool_t svcmpgt_wide(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u16)))
+svbool_t svcmpgt_wide(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s8)))
+svbool_t svcmpgt_wide(svbool_t, svint8_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s32)))
+svbool_t svcmpgt_wide(svbool_t, svint32_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s16)))
+svbool_t svcmpgt_wide(svbool_t, svint16_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u8)))
+svbool_t svcmpgt_wide(svbool_t, svuint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u32)))
+svbool_t svcmpgt_wide(svbool_t, svuint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u16)))
+svbool_t svcmpgt_wide(svbool_t, svuint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f64)))
+svbool_t svcmple(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f32)))
+svbool_t svcmple(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f16)))
+svbool_t svcmple(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s8)))
+svbool_t svcmple(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s32)))
+svbool_t svcmple(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s64)))
+svbool_t svcmple(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s16)))
+svbool_t svcmple(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u8)))
+svbool_t svcmple(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u32)))
+svbool_t svcmple(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u64)))
+svbool_t svcmple(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u16)))
+svbool_t svcmple(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s8)))
+svbool_t svcmple(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s32)))
+svbool_t svcmple(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s64)))
+svbool_t svcmple(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s16)))
+svbool_t svcmple(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f64)))
+svbool_t svcmple(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f32)))
+svbool_t svcmple(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f16)))
+svbool_t svcmple(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u8)))
+svbool_t svcmple(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u32)))
+svbool_t svcmple(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u64)))
+svbool_t svcmple(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u16)))
+svbool_t svcmple(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s8)))
+svbool_t svcmple_wide(svbool_t, svint8_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s32)))
+svbool_t svcmple_wide(svbool_t, svint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s16)))
+svbool_t svcmple_wide(svbool_t, svint16_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u8)))
+svbool_t svcmple_wide(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u32)))
+svbool_t svcmple_wide(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u16)))
+svbool_t svcmple_wide(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s8)))
+svbool_t svcmple_wide(svbool_t, svint8_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s32)))
+svbool_t svcmple_wide(svbool_t, svint32_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s16)))
+svbool_t svcmple_wide(svbool_t, svint16_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u8)))
+svbool_t svcmple_wide(svbool_t, svuint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u32)))
+svbool_t svcmple_wide(svbool_t, svuint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u16)))
+svbool_t svcmple_wide(svbool_t, svuint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u8)))
+svbool_t svcmplt(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u32)))
+svbool_t svcmplt(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u64)))
+svbool_t svcmplt(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u16)))
+svbool_t svcmplt(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f64)))
+svbool_t svcmplt(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f32)))
+svbool_t svcmplt(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f16)))
+svbool_t svcmplt(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s8)))
+svbool_t svcmplt(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s32)))
+svbool_t svcmplt(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s64)))
+svbool_t svcmplt(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s16)))
+svbool_t svcmplt(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u8)))
+svbool_t svcmplt(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u32)))
+svbool_t svcmplt(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u64)))
+svbool_t svcmplt(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u16)))
+svbool_t svcmplt(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s8)))
+svbool_t svcmplt(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s32)))
+svbool_t svcmplt(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s64)))
+svbool_t svcmplt(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s16)))
+svbool_t svcmplt(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f64)))
+svbool_t svcmplt(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f32)))
+svbool_t svcmplt(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f16)))
+svbool_t svcmplt(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u8)))
+svbool_t svcmplt_wide(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u32)))
+svbool_t svcmplt_wide(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u16)))
+svbool_t svcmplt_wide(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s8)))
+svbool_t svcmplt_wide(svbool_t, svint8_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s32)))
+svbool_t svcmplt_wide(svbool_t, svint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s16)))
+svbool_t svcmplt_wide(svbool_t, svint16_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u8)))
+svbool_t svcmplt_wide(svbool_t, svuint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u32)))
+svbool_t svcmplt_wide(svbool_t, svuint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u16)))
+svbool_t svcmplt_wide(svbool_t, svuint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s8)))
+svbool_t svcmplt_wide(svbool_t, svint8_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s32)))
+svbool_t svcmplt_wide(svbool_t, svint32_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s16)))
+svbool_t svcmplt_wide(svbool_t, svint16_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f64)))
+svbool_t svcmpne(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f32)))
+svbool_t svcmpne(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f16)))
+svbool_t svcmpne(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u8)))
+svbool_t svcmpne(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u32)))
+svbool_t svcmpne(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u64)))
+svbool_t svcmpne(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u16)))
+svbool_t svcmpne(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s8)))
+svbool_t svcmpne(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s32)))
+svbool_t svcmpne(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s64)))
+svbool_t svcmpne(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s16)))
+svbool_t svcmpne(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u8)))
+svbool_t svcmpne(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u32)))
+svbool_t svcmpne(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u64)))
+svbool_t svcmpne(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u16)))
+svbool_t svcmpne(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s8)))
+svbool_t svcmpne(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s32)))
+svbool_t svcmpne(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s64)))
+svbool_t svcmpne(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s16)))
+svbool_t svcmpne(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f64)))
+svbool_t svcmpne(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f32)))
+svbool_t svcmpne(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f16)))
+svbool_t svcmpne(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s8)))
+svbool_t svcmpne_wide(svbool_t, svint8_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s32)))
+svbool_t svcmpne_wide(svbool_t, svint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s16)))
+svbool_t svcmpne_wide(svbool_t, svint16_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s8)))
+svbool_t svcmpne_wide(svbool_t, svint8_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s32)))
+svbool_t svcmpne_wide(svbool_t, svint32_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s16)))
+svbool_t svcmpne_wide(svbool_t, svint16_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f64)))
+svbool_t svcmpuo(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f32)))
+svbool_t svcmpuo(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f16)))
+svbool_t svcmpuo(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f64)))
+svbool_t svcmpuo(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f32)))
+svbool_t svcmpuo(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f16)))
+svbool_t svcmpuo(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_m)))
+svuint8_t svcnot_m(svuint8_t, svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_m)))
+svuint32_t svcnot_m(svuint32_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_m)))
+svuint64_t svcnot_m(svuint64_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_m)))
+svuint16_t svcnot_m(svuint16_t, svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_m)))
+svint8_t svcnot_m(svint8_t, svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_m)))
+svint32_t svcnot_m(svint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_m)))
+svint64_t svcnot_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_m)))
+svint16_t svcnot_m(svint16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_x)))
+svuint8_t svcnot_x(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_x)))
+svuint32_t svcnot_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_x)))
+svuint64_t svcnot_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_x)))
+svuint16_t svcnot_x(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_x)))
+svint8_t svcnot_x(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_x)))
+svint32_t svcnot_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_x)))
+svint64_t svcnot_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_x)))
+svint16_t svcnot_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_z)))
+svuint8_t svcnot_z(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_z)))
+svuint32_t svcnot_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_z)))
+svuint64_t svcnot_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_z)))
+svuint16_t svcnot_z(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_z)))
+svint8_t svcnot_z(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_z)))
+svint32_t svcnot_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_z)))
+svint64_t svcnot_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_z)))
+svint16_t svcnot_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_m)))
+svuint8_t svcnt_m(svuint8_t, svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_m)))
+svuint32_t svcnt_m(svuint32_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_m)))
+svuint64_t svcnt_m(svuint64_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_m)))
+svuint16_t svcnt_m(svuint16_t, svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_m)))
+svuint8_t svcnt_m(svuint8_t, svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_m)))
+svuint64_t svcnt_m(svuint64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_m)))
+svuint32_t svcnt_m(svuint32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_m)))
+svuint16_t svcnt_m(svuint16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_m)))
+svuint32_t svcnt_m(svuint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_m)))
+svuint64_t svcnt_m(svuint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_m)))
+svuint16_t svcnt_m(svuint16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_x)))
+svuint8_t svcnt_x(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_x)))
+svuint32_t svcnt_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_x)))
+svuint64_t svcnt_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_x)))
+svuint16_t svcnt_x(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_x)))
+svuint8_t svcnt_x(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_x)))
+svuint64_t svcnt_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_x)))
+svuint32_t svcnt_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_x)))
+svuint16_t svcnt_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_x)))
+svuint32_t svcnt_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_x)))
+svuint64_t svcnt_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_x)))
+svuint16_t svcnt_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_z)))
+svuint8_t svcnt_z(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_z)))
+svuint32_t svcnt_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_z)))
+svuint64_t svcnt_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_z)))
+svuint16_t svcnt_z(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_z)))
+svuint8_t svcnt_z(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_z)))
+svuint64_t svcnt_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_z)))
+svuint32_t svcnt_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_z)))
+svuint16_t svcnt_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_z)))
+svuint32_t svcnt_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_z)))
+svuint64_t svcnt_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_z)))
+svuint16_t svcnt_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u32)))
+svuint32_t svcompact(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u64)))
+svuint64_t svcompact(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f64)))
+svfloat64_t svcompact(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f32)))
+svfloat32_t svcompact(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s32)))
+svint32_t svcompact(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s64)))
+svint64_t svcompact(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u8)))
+svuint8x2_t svcreate2(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u32)))
+svuint32x2_t svcreate2(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u64)))
+svuint64x2_t svcreate2(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u16)))
+svuint16x2_t svcreate2(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s8)))
+svint8x2_t svcreate2(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f64)))
+svfloat64x2_t svcreate2(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f32)))
+svfloat32x2_t svcreate2(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f16)))
+svfloat16x2_t svcreate2(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s32)))
+svint32x2_t svcreate2(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s64)))
+svint64x2_t svcreate2(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s16)))
+svint16x2_t svcreate2(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u8)))
+svuint8x3_t svcreate3(svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u32)))
+svuint32x3_t svcreate3(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u64)))
+svuint64x3_t svcreate3(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u16)))
+svuint16x3_t svcreate3(svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s8)))
+svint8x3_t svcreate3(svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f64)))
+svfloat64x3_t svcreate3(svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f32)))
+svfloat32x3_t svcreate3(svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f16)))
+svfloat16x3_t svcreate3(svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s32)))
+svint32x3_t svcreate3(svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s64)))
+svint64x3_t svcreate3(svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s16)))
+svint16x3_t svcreate3(svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u8)))
+svuint8x4_t svcreate4(svuint8_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u32)))
+svuint32x4_t svcreate4(svuint32_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u64)))
+svuint64x4_t svcreate4(svuint64_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u16)))
+svuint16x4_t svcreate4(svuint16_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s8)))
+svint8x4_t svcreate4(svint8_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f64)))
+svfloat64x4_t svcreate4(svfloat64_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f32)))
+svfloat32x4_t svcreate4(svfloat32_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f16)))
+svfloat16x4_t svcreate4(svfloat16_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s32)))
+svint32x4_t svcreate4(svint32_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s64)))
+svint64x4_t svcreate4(svint64_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s16)))
+svint16x4_t svcreate4(svint16_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_m)))
+svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_x)))
+svfloat16_t svcvt_f16_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_z)))
+svfloat16_t svcvt_f16_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_m)))
+svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_x)))
+svfloat16_t svcvt_f16_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_z)))
+svfloat16_t svcvt_f16_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_m)))
+svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_x)))
+svfloat16_t svcvt_f16_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_z)))
+svfloat16_t svcvt_f16_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_m)))
+svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_x)))
+svfloat16_t svcvt_f16_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_z)))
+svfloat16_t svcvt_f16_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_m)))
+svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_x)))
+svfloat16_t svcvt_f16_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_z)))
+svfloat16_t svcvt_f16_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_m)))
+svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_x)))
+svfloat16_t svcvt_f16_x(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_z)))
+svfloat16_t svcvt_f16_z(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_m)))
+svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_x)))
+svfloat16_t svcvt_f16_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_z)))
+svfloat16_t svcvt_f16_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_m)))
+svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_x)))
+svfloat16_t svcvt_f16_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_z)))
+svfloat16_t svcvt_f16_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_m)))
+svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_x)))
+svfloat32_t svcvt_f32_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_z)))
+svfloat32_t svcvt_f32_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_m)))
+svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_x)))
+svfloat32_t svcvt_f32_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_z)))
+svfloat32_t svcvt_f32_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_m)))
+svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x)))
+svfloat32_t svcvt_f32_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_z)))
+svfloat32_t svcvt_f32_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_m)))
+svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_x)))
+svfloat32_t svcvt_f32_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_z)))
+svfloat32_t svcvt_f32_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_m)))
+svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x)))
+svfloat32_t svcvt_f32_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_z)))
+svfloat32_t svcvt_f32_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_m)))
+svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_x)))
+svfloat32_t svcvt_f32_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_z)))
+svfloat32_t svcvt_f32_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_m)))
+svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_x)))
+svfloat64_t svcvt_f64_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_z)))
+svfloat64_t svcvt_f64_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_m)))
+svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_x)))
+svfloat64_t svcvt_f64_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_z)))
+svfloat64_t svcvt_f64_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_m)))
+svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_x)))
+svfloat64_t svcvt_f64_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_z)))
+svfloat64_t svcvt_f64_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_m)))
+svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_x)))
+svfloat64_t svcvt_f64_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_z)))
+svfloat64_t svcvt_f64_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_m)))
+svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_x)))
+svfloat64_t svcvt_f64_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_z)))
+svfloat64_t svcvt_f64_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_m)))
+svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_x)))
+svfloat64_t svcvt_f64_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_z)))
+svfloat64_t svcvt_f64_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_m)))
+svint16_t svcvt_s16_m(svint16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_x)))
+svint16_t svcvt_s16_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_z)))
+svint16_t svcvt_s16_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_m)))
+svint32_t svcvt_s32_m(svint32_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_x)))
+svint32_t svcvt_s32_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_z)))
+svint32_t svcvt_s32_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_m)))
+svint32_t svcvt_s32_m(svint32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x)))
+svint32_t svcvt_s32_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_z)))
+svint32_t svcvt_s32_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_m)))
+svint32_t svcvt_s32_m(svint32_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_x)))
+svint32_t svcvt_s32_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_z)))
+svint32_t svcvt_s32_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_m)))
+svint64_t svcvt_s64_m(svint64_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_x)))
+svint64_t svcvt_s64_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_z)))
+svint64_t svcvt_s64_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_m)))
+svint64_t svcvt_s64_m(svint64_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_x)))
+svint64_t svcvt_s64_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_z)))
+svint64_t svcvt_s64_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_m)))
+svint64_t svcvt_s64_m(svint64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_x)))
+svint64_t svcvt_s64_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_z)))
+svint64_t svcvt_s64_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_m)))
+svuint16_t svcvt_u16_m(svuint16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_x)))
+svuint16_t svcvt_u16_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_z)))
+svuint16_t svcvt_u16_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_m)))
+svuint32_t svcvt_u32_m(svuint32_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_x)))
+svuint32_t svcvt_u32_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_z)))
+svuint32_t svcvt_u32_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_m)))
+svuint32_t svcvt_u32_m(svuint32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x)))
+svuint32_t svcvt_u32_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_z)))
+svuint32_t svcvt_u32_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_m)))
+svuint32_t svcvt_u32_m(svuint32_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_x)))
+svuint32_t svcvt_u32_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_z)))
+svuint32_t svcvt_u32_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_m)))
+svuint64_t svcvt_u64_m(svuint64_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_x)))
+svuint64_t svcvt_u64_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_z)))
+svuint64_t svcvt_u64_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_m)))
+svuint64_t svcvt_u64_m(svuint64_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_x)))
+svuint64_t svcvt_u64_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_z)))
+svuint64_t svcvt_u64_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_m)))
+svuint64_t svcvt_u64_m(svuint64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_x)))
+svuint64_t svcvt_u64_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_z)))
+svuint64_t svcvt_u64_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_m)))
+svfloat64_t svdiv_m(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_m)))
+svfloat32_t svdiv_m(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_m)))
+svfloat16_t svdiv_m(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_x)))
+svfloat64_t svdiv_x(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_x)))
+svfloat32_t svdiv_x(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_x)))
+svfloat16_t svdiv_x(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_z)))
+svfloat64_t svdiv_z(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_z)))
+svfloat32_t svdiv_z(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_z)))
+svfloat16_t svdiv_z(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_m)))
+svint32_t svdiv_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_m)))
+svint64_t svdiv_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_x)))
+svint32_t svdiv_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_x)))
+svint64_t svdiv_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_z)))
+svint32_t svdiv_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_z)))
+svint64_t svdiv_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_m)))
+svuint32_t svdiv_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_m)))
+svuint64_t svdiv_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_x)))
+svuint32_t svdiv_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_x)))
+svuint64_t svdiv_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_z)))
+svuint32_t svdiv_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_z)))
+svuint64_t svdiv_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_m)))
+svfloat64_t svdiv_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_m)))
+svfloat32_t svdiv_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_m)))
+svfloat16_t svdiv_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_x)))
+svfloat64_t svdiv_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_x)))
+svfloat32_t svdiv_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_x)))
+svfloat16_t svdiv_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_z)))
+svfloat64_t svdiv_z(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_z)))
+svfloat32_t svdiv_z(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_z)))
+svfloat16_t svdiv_z(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_m)))
+svint32_t svdiv_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_m)))
+svint64_t svdiv_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_x)))
+svint32_t svdiv_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_x)))
+svint64_t svdiv_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_z)))
+svint32_t svdiv_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_z)))
+svint64_t svdiv_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_m)))
+svuint32_t svdiv_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_m)))
+svuint64_t svdiv_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_x)))
+svuint32_t svdiv_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_x)))
+svuint64_t svdiv_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_z)))
+svuint32_t svdiv_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_z)))
+svuint64_t svdiv_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_m)))
+svfloat64_t svdivr_m(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_m)))
+svfloat32_t svdivr_m(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_m)))
+svfloat16_t svdivr_m(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_x)))
+svfloat64_t svdivr_x(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_x)))
+svfloat32_t svdivr_x(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_x)))
+svfloat16_t svdivr_x(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_z)))
+svfloat64_t svdivr_z(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_z)))
+svfloat32_t svdivr_z(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_z)))
+svfloat16_t svdivr_z(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_m)))
+svint32_t svdivr_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_m)))
+svint64_t svdivr_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_x)))
+svint32_t svdivr_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_x)))
+svint64_t svdivr_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_z)))
+svint32_t svdivr_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_z)))
+svint64_t svdivr_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_m)))
+svuint32_t svdivr_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_m)))
+svuint64_t svdivr_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_x)))
+svuint32_t svdivr_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_x)))
+svuint64_t svdivr_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_z)))
+svuint32_t svdivr_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_z)))
+svuint64_t svdivr_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_m)))
+svfloat64_t svdivr_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_m)))
+svfloat32_t svdivr_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_m)))
+svfloat16_t svdivr_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_x)))
+svfloat64_t svdivr_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_x)))
+svfloat32_t svdivr_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_x)))
+svfloat16_t svdivr_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_z)))
+svfloat64_t svdivr_z(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_z)))
+svfloat32_t svdivr_z(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_z)))
+svfloat16_t svdivr_z(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_m)))
+svint32_t svdivr_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_m)))
+svint64_t svdivr_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_x)))
+svint32_t svdivr_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_x)))
+svint64_t svdivr_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_z)))
+svint32_t svdivr_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_z)))
+svint64_t svdivr_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_m)))
+svuint32_t svdivr_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_m)))
+svuint64_t svdivr_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_x)))
+svuint32_t svdivr_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_x)))
+svuint64_t svdivr_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_z)))
+svuint32_t svdivr_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_z)))
+svuint64_t svdivr_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s32)))
+svint32_t svdot(svint32_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s64)))
+svint64_t svdot(svint64_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u32)))
+svuint32_t svdot(svuint32_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u64)))
+svuint64_t svdot(svuint64_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s32)))
+svint32_t svdot(svint32_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s64)))
+svint64_t svdot(svint64_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u32)))
+svuint32_t svdot(svuint32_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u64)))
+svuint64_t svdot(svuint64_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s32)))
+svint32_t svdot_lane(svint32_t, svint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s64)))
+svint64_t svdot_lane(svint64_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u32)))
+svuint32_t svdot_lane(svuint32_t, svuint8_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u64)))
+svuint64_t svdot_lane(svuint64_t, svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8)))
+svuint8_t svdup_u8(uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32)))
+svuint32_t svdup_u32(uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64)))
+svuint64_t svdup_u64(uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16)))
+svuint16_t svdup_u16(uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8)))
+svint8_t svdup_s8(int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64)))
+svfloat64_t svdup_f64(float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32)))
+svfloat32_t svdup_f32(float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16)))
+svfloat16_t svdup_f16(float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32)))
+svint32_t svdup_s32(int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64)))
+svint64_t svdup_s64(int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16)))
+svint16_t svdup_s16(int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_m)))
+svuint8_t svdup_u8_m(svuint8_t, svbool_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_m)))
+svuint32_t svdup_u32_m(svuint32_t, svbool_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_m)))
+svuint64_t svdup_u64_m(svuint64_t, svbool_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_m)))
+svuint16_t svdup_u16_m(svuint16_t, svbool_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_m)))
+svint8_t svdup_s8_m(svint8_t, svbool_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_m)))
+svfloat64_t svdup_f64_m(svfloat64_t, svbool_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_m)))
+svfloat32_t svdup_f32_m(svfloat32_t, svbool_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_m)))
+svfloat16_t svdup_f16_m(svfloat16_t, svbool_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_m)))
+svint32_t svdup_s32_m(svint32_t, svbool_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_m)))
+svint64_t svdup_s64_m(svint64_t, svbool_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_m)))
+svint16_t svdup_s16_m(svint16_t, svbool_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b8)))
+svbool_t svdup_b8(bool);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b32)))
+svbool_t svdup_b32(bool);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b64)))
+svbool_t svdup_b64(bool);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b16)))
+svbool_t svdup_b16(bool);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_x)))
+svuint8_t svdup_u8_x(svbool_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_x)))
+svuint32_t svdup_u32_x(svbool_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_x)))
+svuint64_t svdup_u64_x(svbool_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_x)))
+svuint16_t svdup_u16_x(svbool_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_x)))
+svint8_t svdup_s8_x(svbool_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_x)))
+svfloat64_t svdup_f64_x(svbool_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_x)))
+svfloat32_t svdup_f32_x(svbool_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_x)))
+svfloat16_t svdup_f16_x(svbool_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_x)))
+svint32_t svdup_s32_x(svbool_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_x)))
+svint64_t svdup_s64_x(svbool_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_x)))
+svint16_t svdup_s16_x(svbool_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_z)))
+svuint8_t svdup_u8_z(svbool_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_z)))
+svuint32_t svdup_u32_z(svbool_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_z)))
+svuint64_t svdup_u64_z(svbool_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_z)))
+svuint16_t svdup_u16_z(svbool_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_z)))
+svint8_t svdup_s8_z(svbool_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_z)))
+svfloat64_t svdup_f64_z(svbool_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_z)))
+svfloat32_t svdup_f32_z(svbool_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_z)))
+svfloat16_t svdup_f16_z(svbool_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_z)))
+svint32_t svdup_s32_z(svbool_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_z)))
+svint64_t svdup_s64_z(svbool_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_z)))
+svint16_t svdup_s16_z(svbool_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u8)))
+svuint8_t svdup_lane(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u32)))
+svuint32_t svdup_lane(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u64)))
+svuint64_t svdup_lane(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u16)))
+svuint16_t svdup_lane(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s8)))
+svint8_t svdup_lane(svint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f64)))
+svfloat64_t svdup_lane(svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f32)))
+svfloat32_t svdup_lane(svfloat32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f16)))
+svfloat16_t svdup_lane(svfloat16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s32)))
+svint32_t svdup_lane(svint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s64)))
+svint64_t svdup_lane(svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s16)))
+svint16_t svdup_lane(svint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u16)))
+svuint16_t svdupq_u16(uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f16)))
+svfloat16_t svdupq_f16(float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s16)))
+svint16_t svdupq_s16(int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u32)))
+svuint32_t svdupq_u32(uint32_t, uint32_t, uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f32)))
+svfloat32_t svdupq_f32(float32_t, float32_t, float32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s32)))
+svint32_t svdupq_s32(int32_t, int32_t, int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u64)))
+svuint64_t svdupq_u64(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f64)))
+svfloat64_t svdupq_f64(float64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s64)))
+svint64_t svdupq_s64(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u8)))
+svuint8_t svdupq_u8(uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s8)))
+svint8_t svdupq_s8(int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b16)))
+svbool_t svdupq_b16(bool, bool, bool, bool, bool, bool, bool, bool);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b32)))
+svbool_t svdupq_b32(bool, bool, bool, bool);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b64)))
+svbool_t svdupq_b64(bool, bool);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b8)))
+svbool_t svdupq_b8(bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u8)))
+svuint8_t svdupq_lane(svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u32)))
+svuint32_t svdupq_lane(svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u64)))
+svuint64_t svdupq_lane(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u16)))
+svuint16_t svdupq_lane(svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s8)))
+svint8_t svdupq_lane(svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f64)))
+svfloat64_t svdupq_lane(svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f32)))
+svfloat32_t svdupq_lane(svfloat32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f16)))
+svfloat16_t svdupq_lane(svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s32)))
+svint32_t svdupq_lane(svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s64)))
+svint64_t svdupq_lane(svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s16)))
+svint16_t svdupq_lane(svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_b_z)))
+svbool_t sveor_z(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_m)))
+svuint8_t sveor_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_m)))
+svuint32_t sveor_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_m)))
+svuint64_t sveor_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_m)))
+svuint16_t sveor_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_m)))
+svint8_t sveor_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_m)))
+svint32_t sveor_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_m)))
+svint64_t sveor_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_m)))
+svint16_t sveor_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_x)))
+svuint8_t sveor_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_x)))
+svuint32_t sveor_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_x)))
+svuint64_t sveor_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_x)))
+svuint16_t sveor_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_x)))
+svint8_t sveor_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_x)))
+svint32_t sveor_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_x)))
+svint64_t sveor_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_x)))
+svint16_t sveor_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_z)))
+svuint8_t sveor_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_z)))
+svuint32_t sveor_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_z)))
+svuint64_t sveor_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_z)))
+svuint16_t sveor_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_z)))
+svint8_t sveor_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_z)))
+svint32_t sveor_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_z)))
+svint64_t sveor_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_z)))
+svint16_t sveor_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_m)))
+svuint8_t sveor_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_m)))
+svuint32_t sveor_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_m)))
+svuint64_t sveor_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_m)))
+svuint16_t sveor_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_m)))
+svint8_t sveor_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_m)))
+svint32_t sveor_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_m)))
+svint64_t sveor_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_m)))
+svint16_t sveor_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_x)))
+svuint8_t sveor_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_x)))
+svuint32_t sveor_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_x)))
+svuint64_t sveor_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_x)))
+svuint16_t sveor_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_x)))
+svint8_t sveor_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_x)))
+svint32_t sveor_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_x)))
+svint64_t sveor_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_x)))
+svint16_t sveor_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_z)))
+svuint8_t sveor_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_z)))
+svuint32_t sveor_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_z)))
+svuint64_t sveor_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_z)))
+svuint16_t sveor_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_z)))
+svint8_t sveor_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_z)))
+svint32_t sveor_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_z)))
+svint64_t sveor_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_z)))
+svint16_t sveor_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u8)))
+uint8_t sveorv(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u32)))
+uint32_t sveorv(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u64)))
+uint64_t sveorv(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u16)))
+uint16_t sveorv(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s8)))
+int8_t sveorv(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s32)))
+int32_t sveorv(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s64)))
+int64_t sveorv(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s16)))
+int16_t sveorv(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f64)))
+svfloat64_t svexpa(svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f32)))
+svfloat32_t svexpa(svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f16)))
+svfloat16_t svexpa(svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u8)))
+svuint8_t svext(svuint8_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u32)))
+svuint32_t svext(svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u64)))
+svuint64_t svext(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u16)))
+svuint16_t svext(svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s8)))
+svint8_t svext(svint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f64)))
+svfloat64_t svext(svfloat64_t, svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f32)))
+svfloat32_t svext(svfloat32_t, svfloat32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f16)))
+svfloat16_t svext(svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s32)))
+svint32_t svext(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s64)))
+svint64_t svext(svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s16)))
+svint16_t svext(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_m)))
+svint32_t svextb_m(svint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_m)))
+svint64_t svextb_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_m)))
+svint16_t svextb_m(svint16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_x)))
+svint32_t svextb_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_x)))
+svint64_t svextb_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_x)))
+svint16_t svextb_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_z)))
+svint32_t svextb_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_z)))
+svint64_t svextb_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_z)))
+svint16_t svextb_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_m)))
+svuint32_t svextb_m(svuint32_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_m)))
+svuint64_t svextb_m(svuint64_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_m)))
+svuint16_t svextb_m(svuint16_t, svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_x)))
+svuint32_t svextb_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_x)))
+svuint64_t svextb_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_x)))
+svuint16_t svextb_x(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_z)))
+svuint32_t svextb_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_z)))
+svuint64_t svextb_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_z)))
+svuint16_t svextb_z(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_m)))
+svint32_t svexth_m(svint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_m)))
+svint64_t svexth_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_x)))
+svint32_t svexth_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_x)))
+svint64_t svexth_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_z)))
+svint32_t svexth_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_z)))
+svint64_t svexth_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_m)))
+svuint32_t svexth_m(svuint32_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_m)))
+svuint64_t svexth_m(svuint64_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_x)))
+svuint32_t svexth_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_x)))
+svuint64_t svexth_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_z)))
+svuint32_t svexth_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_z)))
+svuint64_t svexth_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_m)))
+svint64_t svextw_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_x)))
+svint64_t svextw_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_z)))
+svint64_t svextw_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_m)))
+svuint64_t svextw_m(svuint64_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_x)))
+svuint64_t svextw_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_z)))
+svuint64_t svextw_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u8)))
+svuint8_t svget2(svuint8x2_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u32)))
+svuint32_t svget2(svuint32x2_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u64)))
+svuint64_t svget2(svuint64x2_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u16)))
+svuint16_t svget2(svuint16x2_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s8)))
+svint8_t svget2(svint8x2_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f64)))
+svfloat64_t svget2(svfloat64x2_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f32)))
+svfloat32_t svget2(svfloat32x2_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f16)))
+svfloat16_t svget2(svfloat16x2_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s32)))
+svint32_t svget2(svint32x2_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s64)))
+svint64_t svget2(svint64x2_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s16)))
+svint16_t svget2(svint16x2_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u8)))
+svuint8_t svget3(svuint8x3_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u32)))
+svuint32_t svget3(svuint32x3_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u64)))
+svuint64_t svget3(svuint64x3_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u16)))
+svuint16_t svget3(svuint16x3_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s8)))
+svint8_t svget3(svint8x3_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f64)))
+svfloat64_t svget3(svfloat64x3_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f32)))
+svfloat32_t svget3(svfloat32x3_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f16)))
+svfloat16_t svget3(svfloat16x3_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s32)))
+svint32_t svget3(svint32x3_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s64)))
+svint64_t svget3(svint64x3_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s16)))
+svint16_t svget3(svint16x3_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u8)))
+svuint8_t svget4(svuint8x4_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u32)))
+svuint32_t svget4(svuint32x4_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u64)))
+svuint64_t svget4(svuint64x4_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u16)))
+svuint16_t svget4(svuint16x4_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s8)))
+svint8_t svget4(svint8x4_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f64)))
+svfloat64_t svget4(svfloat64x4_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f32)))
+svfloat32_t svget4(svfloat32x4_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f16)))
+svfloat16_t svget4(svfloat16x4_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s32)))
+svint32_t svget4(svint32x4_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s64)))
+svint64_t svget4(svint64x4_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s16)))
+svint16_t svget4(svint16x4_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u8)))
+svuint8_t svinsr(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u32)))
+svuint32_t svinsr(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u64)))
+svuint64_t svinsr(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u16)))
+svuint16_t svinsr(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s8)))
+svint8_t svinsr(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f64)))
+svfloat64_t svinsr(svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f32)))
+svfloat32_t svinsr(svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f16)))
+svfloat16_t svinsr(svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s32)))
+svint32_t svinsr(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s64)))
+svint64_t svinsr(svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s16)))
+svint16_t svinsr(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u8)))
+uint8_t svlasta(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u32)))
+uint32_t svlasta(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u64)))
+uint64_t svlasta(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u16)))
+uint16_t svlasta(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s8)))
+int8_t svlasta(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f64)))
+float64_t svlasta(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f32)))
+float32_t svlasta(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f16)))
+float16_t svlasta(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s32)))
+int32_t svlasta(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s64)))
+int64_t svlasta(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s16)))
+int16_t svlasta(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u8)))
+uint8_t svlastb(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u32)))
+uint32_t svlastb(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u64)))
+uint64_t svlastb(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u16)))
+uint16_t svlastb(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s8)))
+int8_t svlastb(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f64)))
+float64_t svlastb(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f32)))
+float32_t svlastb(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f16)))
+float16_t svlastb(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s32)))
+int32_t svlastb(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s64)))
+int64_t svlastb(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s16)))
+int16_t svlastb(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8)))
+svuint8_t svld1(svbool_t, uint8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32)))
+svuint32_t svld1(svbool_t, uint32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64)))
+svuint64_t svld1(svbool_t, uint64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16)))
+svuint16_t svld1(svbool_t, uint16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8)))
+svint8_t svld1(svbool_t, int8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64)))
+svfloat64_t svld1(svbool_t, float64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32)))
+svfloat32_t svld1(svbool_t, float32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16)))
+svfloat16_t svld1(svbool_t, float16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32)))
+svint32_t svld1(svbool_t, int32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64)))
+svint64_t svld1(svbool_t, int64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16)))
+svint16_t svld1(svbool_t, int16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_u32)))
+svuint32_t svld1_gather_index_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_u64)))
+svuint64_t svld1_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_f64)))
+svfloat64_t svld1_gather_index_f64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_f32)))
+svfloat32_t svld1_gather_index_f32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_s32)))
+svint32_t svld1_gather_index_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_s64)))
+svint64_t svld1_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_u32)))
+svuint32_t svld1_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_u64)))
+svuint64_t svld1_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_f64)))
+svfloat64_t svld1_gather_offset_f64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_f32)))
+svfloat32_t svld1_gather_offset_f32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_s32)))
+svint32_t svld1_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_s64)))
+svint64_t svld1_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_u32)))
+svuint32_t svld1_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_u64)))
+svuint64_t svld1_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_f64)))
+svfloat64_t svld1_gather_f64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_f32)))
+svfloat32_t svld1_gather_f32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_s32)))
+svint32_t svld1_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_s64)))
+svint64_t svld1_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_u32)))
+svuint32_t svld1_gather_index(svbool_t, uint32_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_f32)))
+svfloat32_t svld1_gather_index(svbool_t, float32_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_s32)))
+svint32_t svld1_gather_index(svbool_t, int32_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_u32)))
+svuint32_t svld1_gather_index(svbool_t, uint32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_f32)))
+svfloat32_t svld1_gather_index(svbool_t, float32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_s32)))
+svint32_t svld1_gather_index(svbool_t, int32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_u64)))
+svuint64_t svld1_gather_index(svbool_t, uint64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_f64)))
+svfloat64_t svld1_gather_index(svbool_t, float64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_s64)))
+svint64_t svld1_gather_index(svbool_t, int64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_u64)))
+svuint64_t svld1_gather_index(svbool_t, uint64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_f64)))
+svfloat64_t svld1_gather_index(svbool_t, float64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_s64)))
+svint64_t svld1_gather_index(svbool_t, int64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_u32)))
+svuint32_t svld1_gather_offset(svbool_t, uint32_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_f32)))
+svfloat32_t svld1_gather_offset(svbool_t, float32_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_s32)))
+svint32_t svld1_gather_offset(svbool_t, int32_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_u32)))
+svuint32_t svld1_gather_offset(svbool_t, uint32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_f32)))
+svfloat32_t svld1_gather_offset(svbool_t, float32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_s32)))
+svint32_t svld1_gather_offset(svbool_t, int32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_u64)))
+svuint64_t svld1_gather_offset(svbool_t, uint64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_f64)))
+svfloat64_t svld1_gather_offset(svbool_t, float64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_s64)))
+svint64_t svld1_gather_offset(svbool_t, int64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_u64)))
+svuint64_t svld1_gather_offset(svbool_t, uint64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_f64)))
+svfloat64_t svld1_gather_offset(svbool_t, float64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_s64)))
+svint64_t svld1_gather_offset(svbool_t, int64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8)))
+svuint8_t svld1_vnum(svbool_t, uint8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32)))
+svuint32_t svld1_vnum(svbool_t, uint32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64)))
+svuint64_t svld1_vnum(svbool_t, uint64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16)))
+svuint16_t svld1_vnum(svbool_t, uint16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8)))
+svint8_t svld1_vnum(svbool_t, int8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64)))
+svfloat64_t svld1_vnum(svbool_t, float64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32)))
+svfloat32_t svld1_vnum(svbool_t, float32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16)))
+svfloat16_t svld1_vnum(svbool_t, float16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32)))
+svint32_t svld1_vnum(svbool_t, int32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64)))
+svint64_t svld1_vnum(svbool_t, int64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16)))
+svint16_t svld1_vnum(svbool_t, int16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u8)))
+svuint8_t svld1rq(svbool_t, uint8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u32)))
+svuint32_t svld1rq(svbool_t, uint32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u64)))
+svuint64_t svld1rq(svbool_t, uint64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u16)))
+svuint16_t svld1rq(svbool_t, uint16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s8)))
+svint8_t svld1rq(svbool_t, int8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f64)))
+svfloat64_t svld1rq(svbool_t, float64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f32)))
+svfloat32_t svld1rq(svbool_t, float32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f16)))
+svfloat16_t svld1rq(svbool_t, float16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s32)))
+svint32_t svld1rq(svbool_t, int32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s64)))
+svint64_t svld1rq(svbool_t, int64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s16)))
+svint16_t svld1rq(svbool_t, int16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_u32)))
+svuint32_t svld1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_u64)))
+svuint64_t svld1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_s32)))
+svint32_t svld1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_s64)))
+svint64_t svld1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_u32)))
+svuint32_t svld1sb_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_u64)))
+svuint64_t svld1sb_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_s32)))
+svint32_t svld1sb_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_s64)))
+svint64_t svld1sb_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_u32)))
+svuint32_t svld1sb_gather_offset_u32(svbool_t, int8_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_s32)))
+svint32_t svld1sb_gather_offset_s32(svbool_t, int8_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_u32)))
+svuint32_t svld1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_s32)))
+svint32_t svld1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_u64)))
+svuint64_t svld1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_s64)))
+svint64_t svld1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_u64)))
+svuint64_t svld1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_s64)))
+svint64_t svld1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_u32)))
+svuint32_t svld1sh_gather_index_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_u64)))
+svuint64_t svld1sh_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_s32)))
+svint32_t svld1sh_gather_index_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_s64)))
+svint64_t svld1sh_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_u32)))
+svuint32_t svld1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_u64)))
+svuint64_t svld1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_s32)))
+svint32_t svld1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_s64)))
+svint64_t svld1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_u32)))
+svuint32_t svld1sh_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_u64)))
+svuint64_t svld1sh_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_s32)))
+svint32_t svld1sh_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_s64)))
+svint64_t svld1sh_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_u32)))
+svuint32_t svld1sh_gather_index_u32(svbool_t, int16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_s32)))
+svint32_t svld1sh_gather_index_s32(svbool_t, int16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_u32)))
+svuint32_t svld1sh_gather_index_u32(svbool_t, int16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_s32)))
+svint32_t svld1sh_gather_index_s32(svbool_t, int16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_u64)))
+svuint64_t svld1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_s64)))
+svint64_t svld1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_u64)))
+svuint64_t svld1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_s64)))
+svint64_t svld1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_u32)))
+svuint32_t svld1sh_gather_offset_u32(svbool_t, int16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_s32)))
+svint32_t svld1sh_gather_offset_s32(svbool_t, int16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_u32)))
+svuint32_t svld1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_s32)))
+svint32_t svld1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_u64)))
+svuint64_t svld1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_s64)))
+svint64_t svld1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_u64)))
+svuint64_t svld1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_s64)))
+svint64_t svld1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_u64)))
+svuint64_t svld1sw_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_s64)))
+svint64_t svld1sw_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_u64)))
+svuint64_t svld1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_s64)))
+svint64_t svld1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_u64)))
+svuint64_t svld1sw_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_s64)))
+svint64_t svld1sw_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_u64)))
+svuint64_t svld1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_s64)))
+svint64_t svld1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_u64)))
+svuint64_t svld1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_s64)))
+svint64_t svld1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_u64)))
+svuint64_t svld1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_s64)))
+svint64_t svld1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_u64)))
+svuint64_t svld1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_s64)))
+svint64_t svld1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_u32)))
+svuint32_t svld1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_u64)))
+svuint64_t svld1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_s32)))
+svint32_t svld1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_s64)))
+svint64_t svld1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_u32)))
+svuint32_t svld1ub_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_u64)))
+svuint64_t svld1ub_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_s32)))
+svint32_t svld1ub_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_s64)))
+svint64_t svld1ub_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_u32)))
+svuint32_t svld1ub_gather_offset_u32(svbool_t, uint8_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_s32)))
+svint32_t svld1ub_gather_offset_s32(svbool_t, uint8_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_u32)))
+svuint32_t svld1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_s32)))
+svint32_t svld1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_u64)))
+svuint64_t svld1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_s64)))
+svint64_t svld1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_u64)))
+svuint64_t svld1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_s64)))
+svint64_t svld1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_u32)))
+svuint32_t svld1uh_gather_index_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_u64)))
+svuint64_t svld1uh_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_s32)))
+svint32_t svld1uh_gather_index_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_s64)))
+svint64_t svld1uh_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_u32)))
+svuint32_t svld1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_u64)))
+svuint64_t svld1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_s32)))
+svint32_t svld1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_s64)))
+svint64_t svld1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_u32)))
+svuint32_t svld1uh_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_u64)))
+svuint64_t svld1uh_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_s32)))
+svint32_t svld1uh_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_s64)))
+svint64_t svld1uh_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_u32)))
+svuint32_t svld1uh_gather_index_u32(svbool_t, uint16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_s32)))
+svint32_t svld1uh_gather_index_s32(svbool_t, uint16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_u32)))
+svuint32_t svld1uh_gather_index_u32(svbool_t, uint16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_s32)))
+svint32_t svld1uh_gather_index_s32(svbool_t, uint16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_u64)))
+svuint64_t svld1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_s64)))
+svint64_t svld1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_u64)))
+svuint64_t svld1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_s64)))
+svint64_t svld1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_u32)))
+svuint32_t svld1uh_gather_offset_u32(svbool_t, uint16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_s32)))
+svint32_t svld1uh_gather_offset_s32(svbool_t, uint16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_u32)))
+svuint32_t svld1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_s32)))
+svint32_t svld1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_u64)))
+svuint64_t svld1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_s64)))
+svint64_t svld1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_u64)))
+svuint64_t svld1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_s64)))
+svint64_t svld1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_u64)))
+svuint64_t svld1uw_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_s64)))
+svint64_t svld1uw_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_u64)))
+svuint64_t svld1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_s64)))
+svint64_t svld1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_u64)))
+svuint64_t svld1uw_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_s64)))
+svint64_t svld1uw_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_u64)))
+svuint64_t svld1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_s64)))
+svint64_t svld1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_u64)))
+svuint64_t svld1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_s64)))
+svint64_t svld1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_u64)))
+svuint64_t svld1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_s64)))
+svint64_t svld1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_u64)))
+svuint64_t svld1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_s64)))
+svint64_t svld1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u8)))
+svuint8x2_t svld2(svbool_t, uint8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u32)))
+svuint32x2_t svld2(svbool_t, uint32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u64)))
+svuint64x2_t svld2(svbool_t, uint64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u16)))
+svuint16x2_t svld2(svbool_t, uint16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s8)))
+svint8x2_t svld2(svbool_t, int8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f64)))
+svfloat64x2_t svld2(svbool_t, float64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f32)))
+svfloat32x2_t svld2(svbool_t, float32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f16)))
+svfloat16x2_t svld2(svbool_t, float16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s32)))
+svint32x2_t svld2(svbool_t, int32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s64)))
+svint64x2_t svld2(svbool_t, int64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s16)))
+svint16x2_t svld2(svbool_t, int16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u8)))
+svuint8x2_t svld2_vnum(svbool_t, uint8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u32)))
+svuint32x2_t svld2_vnum(svbool_t, uint32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u64)))
+svuint64x2_t svld2_vnum(svbool_t, uint64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u16)))
+svuint16x2_t svld2_vnum(svbool_t, uint16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s8)))
+svint8x2_t svld2_vnum(svbool_t, int8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f64)))
+svfloat64x2_t svld2_vnum(svbool_t, float64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f32)))
+svfloat32x2_t svld2_vnum(svbool_t, float32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f16)))
+svfloat16x2_t svld2_vnum(svbool_t, float16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s32)))
+svint32x2_t svld2_vnum(svbool_t, int32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s64)))
+svint64x2_t svld2_vnum(svbool_t, int64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s16)))
+svint16x2_t svld2_vnum(svbool_t, int16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u8)))
+svuint8x3_t svld3(svbool_t, uint8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u32)))
+svuint32x3_t svld3(svbool_t, uint32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u64)))
+svuint64x3_t svld3(svbool_t, uint64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u16)))
+svuint16x3_t svld3(svbool_t, uint16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s8)))
+svint8x3_t svld3(svbool_t, int8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f64)))
+svfloat64x3_t svld3(svbool_t, float64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f32)))
+svfloat32x3_t svld3(svbool_t, float32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f16)))
+svfloat16x3_t svld3(svbool_t, float16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s32)))
+svint32x3_t svld3(svbool_t, int32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s64)))
+svint64x3_t svld3(svbool_t, int64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s16)))
+svint16x3_t svld3(svbool_t, int16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u8)))
+svuint8x3_t svld3_vnum(svbool_t, uint8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u32)))
+svuint32x3_t svld3_vnum(svbool_t, uint32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u64)))
+svuint64x3_t svld3_vnum(svbool_t, uint64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u16)))
+svuint16x3_t svld3_vnum(svbool_t, uint16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s8)))
+svint8x3_t svld3_vnum(svbool_t, int8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f64)))
+svfloat64x3_t svld3_vnum(svbool_t, float64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f32)))
+svfloat32x3_t svld3_vnum(svbool_t, float32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f16)))
+svfloat16x3_t svld3_vnum(svbool_t, float16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s32)))
+svint32x3_t svld3_vnum(svbool_t, int32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s64)))
+svint64x3_t svld3_vnum(svbool_t, int64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s16)))
+svint16x3_t svld3_vnum(svbool_t, int16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u8)))
+svuint8x4_t svld4(svbool_t, uint8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u32)))
+svuint32x4_t svld4(svbool_t, uint32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u64)))
+svuint64x4_t svld4(svbool_t, uint64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u16)))
+svuint16x4_t svld4(svbool_t, uint16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s8)))
+svint8x4_t svld4(svbool_t, int8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f64)))
+svfloat64x4_t svld4(svbool_t, float64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f32)))
+svfloat32x4_t svld4(svbool_t, float32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f16)))
+svfloat16x4_t svld4(svbool_t, float16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s32)))
+svint32x4_t svld4(svbool_t, int32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s64)))
+svint64x4_t svld4(svbool_t, int64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s16)))
+svint16x4_t svld4(svbool_t, int16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u8)))
+svuint8x4_t svld4_vnum(svbool_t, uint8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u32)))
+svuint32x4_t svld4_vnum(svbool_t, uint32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u64)))
+svuint64x4_t svld4_vnum(svbool_t, uint64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u16)))
+svuint16x4_t svld4_vnum(svbool_t, uint16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s8)))
+svint8x4_t svld4_vnum(svbool_t, int8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f64)))
+svfloat64x4_t svld4_vnum(svbool_t, float64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f32)))
+svfloat32x4_t svld4_vnum(svbool_t, float32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f16)))
+svfloat16x4_t svld4_vnum(svbool_t, float16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s32)))
+svint32x4_t svld4_vnum(svbool_t, int32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s64)))
+svint64x4_t svld4_vnum(svbool_t, int64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s16)))
+svint16x4_t svld4_vnum(svbool_t, int16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u8)))
+svuint8_t svldff1(svbool_t, uint8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u32)))
+svuint32_t svldff1(svbool_t, uint32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u64)))
+svuint64_t svldff1(svbool_t, uint64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u16)))
+svuint16_t svldff1(svbool_t, uint16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s8)))
+svint8_t svldff1(svbool_t, int8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f64)))
+svfloat64_t svldff1(svbool_t, float64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f32)))
+svfloat32_t svldff1(svbool_t, float32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f16)))
+svfloat16_t svldff1(svbool_t, float16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s32)))
+svint32_t svldff1(svbool_t, int32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s64)))
+svint64_t svldff1(svbool_t, int64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s16)))
+svint16_t svldff1(svbool_t, int16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_u32)))
+svuint32_t svldff1_gather_index_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_u64)))
+svuint64_t svldff1_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_f64)))
+svfloat64_t svldff1_gather_index_f64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_f32)))
+svfloat32_t svldff1_gather_index_f32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_s32)))
+svint32_t svldff1_gather_index_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_s64)))
+svint64_t svldff1_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_u32)))
+svuint32_t svldff1_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_u64)))
+svuint64_t svldff1_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_f64)))
+svfloat64_t svldff1_gather_offset_f64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_f32)))
+svfloat32_t svldff1_gather_offset_f32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_s32)))
+svint32_t svldff1_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_s64)))
+svint64_t svldff1_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_u32)))
+svuint32_t svldff1_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_u64)))
+svuint64_t svldff1_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_f64)))
+svfloat64_t svldff1_gather_f64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_f32)))
+svfloat32_t svldff1_gather_f32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_s32)))
+svint32_t svldff1_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_s64)))
+svint64_t svldff1_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_u32)))
+svuint32_t svldff1_gather_index(svbool_t, uint32_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_f32)))
+svfloat32_t svldff1_gather_index(svbool_t, float32_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_s32)))
+svint32_t svldff1_gather_index(svbool_t, int32_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_u32)))
+svuint32_t svldff1_gather_index(svbool_t, uint32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_f32)))
+svfloat32_t svldff1_gather_index(svbool_t, float32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_s32)))
+svint32_t svldff1_gather_index(svbool_t, int32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_u64)))
+svuint64_t svldff1_gather_index(svbool_t, uint64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_f64)))
+svfloat64_t svldff1_gather_index(svbool_t, float64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_s64)))
+svint64_t svldff1_gather_index(svbool_t, int64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_u64)))
+svuint64_t svldff1_gather_index(svbool_t, uint64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_f64)))
+svfloat64_t svldff1_gather_index(svbool_t, float64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_s64)))
+svint64_t svldff1_gather_index(svbool_t, int64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_u32)))
+svuint32_t svldff1_gather_offset(svbool_t, uint32_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_f32)))
+svfloat32_t svldff1_gather_offset(svbool_t, float32_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_s32)))
+svint32_t svldff1_gather_offset(svbool_t, int32_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_u32)))
+svuint32_t svldff1_gather_offset(svbool_t, uint32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_f32)))
+svfloat32_t svldff1_gather_offset(svbool_t, float32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_s32)))
+svint32_t svldff1_gather_offset(svbool_t, int32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_u64)))
+svuint64_t svldff1_gather_offset(svbool_t, uint64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_f64)))
+svfloat64_t svldff1_gather_offset(svbool_t, float64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_s64)))
+svint64_t svldff1_gather_offset(svbool_t, int64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_u64)))
+svuint64_t svldff1_gather_offset(svbool_t, uint64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_f64)))
+svfloat64_t svldff1_gather_offset(svbool_t, float64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_s64)))
+svint64_t svldff1_gather_offset(svbool_t, int64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u8)))
+svuint8_t svldff1_vnum(svbool_t, uint8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u32)))
+svuint32_t svldff1_vnum(svbool_t, uint32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u64)))
+svuint64_t svldff1_vnum(svbool_t, uint64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u16)))
+svuint16_t svldff1_vnum(svbool_t, uint16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s8)))
+svint8_t svldff1_vnum(svbool_t, int8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f64)))
+svfloat64_t svldff1_vnum(svbool_t, float64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f32)))
+svfloat32_t svldff1_vnum(svbool_t, float32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f16)))
+svfloat16_t svldff1_vnum(svbool_t, float16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s32)))
+svint32_t svldff1_vnum(svbool_t, int32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s64)))
+svint64_t svldff1_vnum(svbool_t, int64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s16)))
+svint16_t svldff1_vnum(svbool_t, int16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_u32)))
+svuint32_t svldff1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_u64)))
+svuint64_t svldff1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_s32)))
+svint32_t svldff1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_s64)))
+svint64_t svldff1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_u32)))
+svuint32_t svldff1sb_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_u64)))
+svuint64_t svldff1sb_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_s32)))
+svint32_t svldff1sb_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_s64)))
+svint64_t svldff1sb_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_u32)))
+svuint32_t svldff1sb_gather_offset_u32(svbool_t, int8_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_s32)))
+svint32_t svldff1sb_gather_offset_s32(svbool_t, int8_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_u32)))
+svuint32_t svldff1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_s32)))
+svint32_t svldff1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_u64)))
+svuint64_t svldff1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_s64)))
+svint64_t svldff1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_u64)))
+svuint64_t svldff1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_s64)))
+svint64_t svldff1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_u32)))
+svuint32_t svldff1sh_gather_index_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_u64)))
+svuint64_t svldff1sh_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_s32)))
+svint32_t svldff1sh_gather_index_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_s64)))
+svint64_t svldff1sh_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_u32)))
+svuint32_t svldff1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_u64)))
+svuint64_t svldff1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_s32)))
+svint32_t svldff1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_s64)))
+svint64_t svldff1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_u32)))
+svuint32_t svldff1sh_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_u64)))
+svuint64_t svldff1sh_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_s32)))
+svint32_t svldff1sh_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_s64)))
+svint64_t svldff1sh_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_u32)))
+svuint32_t svldff1sh_gather_index_u32(svbool_t, int16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_s32)))
+svint32_t svldff1sh_gather_index_s32(svbool_t, int16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_u32)))
+svuint32_t svldff1sh_gather_index_u32(svbool_t, int16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_s32)))
+svint32_t svldff1sh_gather_index_s32(svbool_t, int16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_u64)))
+svuint64_t svldff1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_s64)))
+svint64_t svldff1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_u64)))
+svuint64_t svldff1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_s64)))
+svint64_t svldff1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_u32)))
+svuint32_t svldff1sh_gather_offset_u32(svbool_t, int16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_s32)))
+svint32_t svldff1sh_gather_offset_s32(svbool_t, int16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_u32)))
+svuint32_t svldff1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_s32)))
+svint32_t svldff1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_u64)))
+svuint64_t svldff1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_s64)))
+svint64_t svldff1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_u64)))
+svuint64_t svldff1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_s64)))
+svint64_t svldff1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_u64)))
+svuint64_t svldff1sw_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_s64)))
+svint64_t svldff1sw_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_u64)))
+svuint64_t svldff1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_s64)))
+svint64_t svldff1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_u64)))
+svuint64_t svldff1sw_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_s64)))
+svint64_t svldff1sw_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_u64)))
+svuint64_t svldff1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_s64)))
+svint64_t svldff1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_u64)))
+svuint64_t svldff1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_s64)))
+svint64_t svldff1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_u64)))
+svuint64_t svldff1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_s64)))
+svint64_t svldff1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_u64)))
+svuint64_t svldff1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_s64)))
+svint64_t svldff1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_u32)))
+svuint32_t svldff1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_u64)))
+svuint64_t svldff1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_s32)))
+svint32_t svldff1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_s64)))
+svint64_t svldff1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_u32)))
+svuint32_t svldff1ub_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_u64)))
+svuint64_t svldff1ub_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_s32)))
+svint32_t svldff1ub_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_s64)))
+svint64_t svldff1ub_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_u32)))
+svuint32_t svldff1ub_gather_offset_u32(svbool_t, uint8_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_s32)))
+svint32_t svldff1ub_gather_offset_s32(svbool_t, uint8_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_u32)))
+svuint32_t svldff1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_s32)))
+svint32_t svldff1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_u64)))
+svuint64_t svldff1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_s64)))
+svint64_t svldff1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_u64)))
+svuint64_t svldff1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_s64)))
+svint64_t svldff1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_u32)))
+svuint32_t svldff1uh_gather_index_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_u64)))
+svuint64_t svldff1uh_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_s32)))
+svint32_t svldff1uh_gather_index_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_s64)))
+svint64_t svldff1uh_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_u32)))
+svuint32_t svldff1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_u64)))
+svuint64_t svldff1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_s32)))
+svint32_t svldff1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_s64)))
+svint64_t svldff1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_u32)))
+svuint32_t svldff1uh_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_u64)))
+svuint64_t svldff1uh_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_s32)))
+svint32_t svldff1uh_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_s64)))
+svint64_t svldff1uh_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_u32)))
+svuint32_t svldff1uh_gather_index_u32(svbool_t, uint16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_s32)))
+svint32_t svldff1uh_gather_index_s32(svbool_t, uint16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_u32)))
+svuint32_t svldff1uh_gather_index_u32(svbool_t, uint16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_s32)))
+svint32_t svldff1uh_gather_index_s32(svbool_t, uint16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_u64)))
+svuint64_t svldff1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_s64)))
+svint64_t svldff1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_u64)))
+svuint64_t svldff1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_s64)))
+svint64_t svldff1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_u32)))
+svuint32_t svldff1uh_gather_offset_u32(svbool_t, uint16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_s32)))
+svint32_t svldff1uh_gather_offset_s32(svbool_t, uint16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_u32)))
+svuint32_t svldff1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_s32)))
+svint32_t svldff1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_u64)))
+svuint64_t svldff1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_s64)))
+svint64_t svldff1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_u64)))
+svuint64_t svldff1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_s64)))
+svint64_t svldff1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_u64)))
+svuint64_t svldff1uw_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_s64)))
+svint64_t svldff1uw_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_u64)))
+svuint64_t svldff1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_s64)))
+svint64_t svldff1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_u64)))
+svuint64_t svldff1uw_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_s64)))
+svint64_t svldff1uw_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_u64)))
+svuint64_t svldff1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_s64)))
+svint64_t svldff1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_u64)))
+svuint64_t svldff1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_s64)))
+svint64_t svldff1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_u64)))
+svuint64_t svldff1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_s64)))
+svint64_t svldff1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_u64)))
+svuint64_t svldff1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_s64)))
+svint64_t svldff1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u8)))
+svuint8_t svldnf1(svbool_t, uint8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u32)))
+svuint32_t svldnf1(svbool_t, uint32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u64)))
+svuint64_t svldnf1(svbool_t, uint64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u16)))
+svuint16_t svldnf1(svbool_t, uint16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s8)))
+svint8_t svldnf1(svbool_t, int8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f64)))
+svfloat64_t svldnf1(svbool_t, float64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f32)))
+svfloat32_t svldnf1(svbool_t, float32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f16)))
+svfloat16_t svldnf1(svbool_t, float16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s32)))
+svint32_t svldnf1(svbool_t, int32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s64)))
+svint64_t svldnf1(svbool_t, int64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s16)))
+svint16_t svldnf1(svbool_t, int16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u8)))
+svuint8_t svldnf1_vnum(svbool_t, uint8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u32)))
+svuint32_t svldnf1_vnum(svbool_t, uint32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u64)))
+svuint64_t svldnf1_vnum(svbool_t, uint64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u16)))
+svuint16_t svldnf1_vnum(svbool_t, uint16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s8)))
+svint8_t svldnf1_vnum(svbool_t, int8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f64)))
+svfloat64_t svldnf1_vnum(svbool_t, float64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f32)))
+svfloat32_t svldnf1_vnum(svbool_t, float32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f16)))
+svfloat16_t svldnf1_vnum(svbool_t, float16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s32)))
+svint32_t svldnf1_vnum(svbool_t, int32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s64)))
+svint64_t svldnf1_vnum(svbool_t, int64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s16)))
+svint16_t svldnf1_vnum(svbool_t, int16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8)))
+svuint8_t svldnt1(svbool_t, uint8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32)))
+svuint32_t svldnt1(svbool_t, uint32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64)))
+svuint64_t svldnt1(svbool_t, uint64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16)))
+svuint16_t svldnt1(svbool_t, uint16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8)))
+svint8_t svldnt1(svbool_t, int8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64)))
+svfloat64_t svldnt1(svbool_t, float64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32)))
+svfloat32_t svldnt1(svbool_t, float32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16)))
+svfloat16_t svldnt1(svbool_t, float16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32)))
+svint32_t svldnt1(svbool_t, int32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64)))
+svint64_t svldnt1(svbool_t, int64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16)))
+svint16_t svldnt1(svbool_t, int16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8)))
+svuint8_t svldnt1_vnum(svbool_t, uint8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32)))
+svuint32_t svldnt1_vnum(svbool_t, uint32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64)))
+svuint64_t svldnt1_vnum(svbool_t, uint64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16)))
+svuint16_t svldnt1_vnum(svbool_t, uint16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8)))
+svint8_t svldnt1_vnum(svbool_t, int8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64)))
+svfloat64_t svldnt1_vnum(svbool_t, float64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32)))
+svfloat32_t svldnt1_vnum(svbool_t, float32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16)))
+svfloat16_t svldnt1_vnum(svbool_t, float16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32)))
+svint32_t svldnt1_vnum(svbool_t, int32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64)))
+svint64_t svldnt1_vnum(svbool_t, int64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16)))
+svint16_t svldnt1_vnum(svbool_t, int16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u8)))
+uint64_t svlen(svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u32)))
+uint64_t svlen(svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u64)))
+uint64_t svlen(svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u16)))
+uint64_t svlen(svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s8)))
+uint64_t svlen(svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f64)))
+uint64_t svlen(svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f32)))
+uint64_t svlen(svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f16)))
+uint64_t svlen(svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s32)))
+uint64_t svlen(svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s64)))
+uint64_t svlen(svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s16)))
+uint64_t svlen(svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_m)))
+svuint8_t svlsl_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_m)))
+svuint32_t svlsl_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_m)))
+svuint64_t svlsl_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_m)))
+svuint16_t svlsl_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_m)))
+svint8_t svlsl_m(svbool_t, svint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_m)))
+svint32_t svlsl_m(svbool_t, svint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_m)))
+svint64_t svlsl_m(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_m)))
+svint16_t svlsl_m(svbool_t, svint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_x)))
+svuint8_t svlsl_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_x)))
+svuint32_t svlsl_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_x)))
+svuint64_t svlsl_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_x)))
+svuint16_t svlsl_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_x)))
+svint8_t svlsl_x(svbool_t, svint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_x)))
+svint32_t svlsl_x(svbool_t, svint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_x)))
+svint64_t svlsl_x(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_x)))
+svint16_t svlsl_x(svbool_t, svint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_z)))
+svuint8_t svlsl_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_z)))
+svuint32_t svlsl_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_z)))
+svuint64_t svlsl_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_z)))
+svuint16_t svlsl_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_z)))
+svint8_t svlsl_z(svbool_t, svint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_z)))
+svint32_t svlsl_z(svbool_t, svint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_z)))
+svint64_t svlsl_z(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_z)))
+svint16_t svlsl_z(svbool_t, svint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_m)))
+svuint8_t svlsl_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_m)))
+svuint32_t svlsl_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_m)))
+svuint64_t svlsl_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_m)))
+svuint16_t svlsl_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_m)))
+svint8_t svlsl_m(svbool_t, svint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_m)))
+svint32_t svlsl_m(svbool_t, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_m)))
+svint64_t svlsl_m(svbool_t, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_m)))
+svint16_t svlsl_m(svbool_t, svint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_x)))
+svuint8_t svlsl_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_x)))
+svuint32_t svlsl_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_x)))
+svuint64_t svlsl_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_x)))
+svuint16_t svlsl_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_x)))
+svint8_t svlsl_x(svbool_t, svint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_x)))
+svint32_t svlsl_x(svbool_t, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_x)))
+svint64_t svlsl_x(svbool_t, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_x)))
+svint16_t svlsl_x(svbool_t, svint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_z)))
+svuint8_t svlsl_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_z)))
+svuint32_t svlsl_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_z)))
+svuint64_t svlsl_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_z)))
+svuint16_t svlsl_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_z)))
+svint8_t svlsl_z(svbool_t, svint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_z)))
+svint32_t svlsl_z(svbool_t, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_z)))
+svint64_t svlsl_z(svbool_t, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_z)))
+svint16_t svlsl_z(svbool_t, svint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_m)))
+svuint8_t svlsl_wide_m(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_m)))
+svuint32_t svlsl_wide_m(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_m)))
+svuint16_t svlsl_wide_m(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_m)))
+svint8_t svlsl_wide_m(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_m)))
+svint32_t svlsl_wide_m(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_m)))
+svint16_t svlsl_wide_m(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_x)))
+svuint8_t svlsl_wide_x(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_x)))
+svuint32_t svlsl_wide_x(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_x)))
+svuint16_t svlsl_wide_x(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_x)))
+svint8_t svlsl_wide_x(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_x)))
+svint32_t svlsl_wide_x(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_x)))
+svint16_t svlsl_wide_x(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_z)))
+svuint8_t svlsl_wide_z(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_z)))
+svuint32_t svlsl_wide_z(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_z)))
+svuint16_t svlsl_wide_z(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_z)))
+svint8_t svlsl_wide_z(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_z)))
+svint32_t svlsl_wide_z(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_z)))
+svint16_t svlsl_wide_z(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_m)))
+svuint8_t svlsl_wide_m(svbool_t, svuint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_m)))
+svuint32_t svlsl_wide_m(svbool_t, svuint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_m)))
+svuint16_t svlsl_wide_m(svbool_t, svuint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_m)))
+svint8_t svlsl_wide_m(svbool_t, svint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_m)))
+svint32_t svlsl_wide_m(svbool_t, svint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_m)))
+svint16_t svlsl_wide_m(svbool_t, svint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_x)))
+svuint8_t svlsl_wide_x(svbool_t, svuint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_x)))
+svuint32_t svlsl_wide_x(svbool_t, svuint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_x)))
+svuint16_t svlsl_wide_x(svbool_t, svuint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_x)))
+svint8_t svlsl_wide_x(svbool_t, svint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_x)))
+svint32_t svlsl_wide_x(svbool_t, svint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_x)))
+svint16_t svlsl_wide_x(svbool_t, svint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_z)))
+svuint8_t svlsl_wide_z(svbool_t, svuint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_z)))
+svuint32_t svlsl_wide_z(svbool_t, svuint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_z)))
+svuint16_t svlsl_wide_z(svbool_t, svuint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_z)))
+svint8_t svlsl_wide_z(svbool_t, svint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_z)))
+svint32_t svlsl_wide_z(svbool_t, svint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_z)))
+svint16_t svlsl_wide_z(svbool_t, svint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_m)))
+svuint8_t svlsr_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_m)))
+svuint32_t svlsr_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_m)))
+svuint64_t svlsr_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_m)))
+svuint16_t svlsr_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_x)))
+svuint8_t svlsr_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_x)))
+svuint32_t svlsr_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_x)))
+svuint64_t svlsr_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_x)))
+svuint16_t svlsr_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_z)))
+svuint8_t svlsr_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_z)))
+svuint32_t svlsr_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_z)))
+svuint64_t svlsr_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_z)))
+svuint16_t svlsr_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_m)))
+svuint8_t svlsr_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_m)))
+svuint32_t svlsr_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_m)))
+svuint64_t svlsr_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_m)))
+svuint16_t svlsr_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_x)))
+svuint8_t svlsr_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_x)))
+svuint32_t svlsr_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_x)))
+svuint64_t svlsr_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_x)))
+svuint16_t svlsr_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_z)))
+svuint8_t svlsr_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_z)))
+svuint32_t svlsr_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_z)))
+svuint64_t svlsr_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_z)))
+svuint16_t svlsr_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_m)))
+svuint8_t svlsr_wide_m(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_m)))
+svuint32_t svlsr_wide_m(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_m)))
+svuint16_t svlsr_wide_m(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_x)))
+svuint8_t svlsr_wide_x(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_x)))
+svuint32_t svlsr_wide_x(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_x)))
+svuint16_t svlsr_wide_x(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_z)))
+svuint8_t svlsr_wide_z(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_z)))
+svuint32_t svlsr_wide_z(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_z)))
+svuint16_t svlsr_wide_z(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_m)))
+svuint8_t svlsr_wide_m(svbool_t, svuint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_m)))
+svuint32_t svlsr_wide_m(svbool_t, svuint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_m)))
+svuint16_t svlsr_wide_m(svbool_t, svuint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_x)))
+svuint8_t svlsr_wide_x(svbool_t, svuint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_x)))
+svuint32_t svlsr_wide_x(svbool_t, svuint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_x)))
+svuint16_t svlsr_wide_x(svbool_t, svuint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_z)))
+svuint8_t svlsr_wide_z(svbool_t, svuint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_z)))
+svuint32_t svlsr_wide_z(svbool_t, svuint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_z)))
+svuint16_t svlsr_wide_z(svbool_t, svuint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_m)))
+svfloat64_t svmad_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_m)))
+svfloat32_t svmad_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_m)))
+svfloat16_t svmad_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_x)))
+svfloat64_t svmad_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_x)))
+svfloat32_t svmad_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_x)))
+svfloat16_t svmad_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_z)))
+svfloat64_t svmad_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_z)))
+svfloat32_t svmad_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_z)))
+svfloat16_t svmad_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_m)))
+svuint8_t svmad_m(svbool_t, svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_m)))
+svuint32_t svmad_m(svbool_t, svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_m)))
+svuint64_t svmad_m(svbool_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_m)))
+svuint16_t svmad_m(svbool_t, svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_m)))
+svint8_t svmad_m(svbool_t, svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_m)))
+svint32_t svmad_m(svbool_t, svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_m)))
+svint64_t svmad_m(svbool_t, svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_m)))
+svint16_t svmad_m(svbool_t, svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_x)))
+svuint8_t svmad_x(svbool_t, svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_x)))
+svuint32_t svmad_x(svbool_t, svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_x)))
+svuint64_t svmad_x(svbool_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_x)))
+svuint16_t svmad_x(svbool_t, svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_x)))
+svint8_t svmad_x(svbool_t, svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_x)))
+svint32_t svmad_x(svbool_t, svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_x)))
+svint64_t svmad_x(svbool_t, svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_x)))
+svint16_t svmad_x(svbool_t, svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_z)))
+svuint8_t svmad_z(svbool_t, svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_z)))
+svuint32_t svmad_z(svbool_t, svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_z)))
+svuint64_t svmad_z(svbool_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_z)))
+svuint16_t svmad_z(svbool_t, svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_z)))
+svint8_t svmad_z(svbool_t, svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_z)))
+svint32_t svmad_z(svbool_t, svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_z)))
+svint64_t svmad_z(svbool_t, svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_z)))
+svint16_t svmad_z(svbool_t, svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_m)))
+svfloat64_t svmad_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_m)))
+svfloat32_t svmad_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_m)))
+svfloat16_t svmad_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_x)))
+svfloat64_t svmad_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_x)))
+svfloat32_t svmad_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_x)))
+svfloat16_t svmad_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_z)))
+svfloat64_t svmad_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_z)))
+svfloat32_t svmad_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_z)))
+svfloat16_t svmad_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_m)))
+svuint8_t svmad_m(svbool_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_m)))
+svuint32_t svmad_m(svbool_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_m)))
+svuint64_t svmad_m(svbool_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_m)))
+svuint16_t svmad_m(svbool_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_m)))
+svint8_t svmad_m(svbool_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_m)))
+svint32_t svmad_m(svbool_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_m)))
+svint64_t svmad_m(svbool_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_m)))
+svint16_t svmad_m(svbool_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_x)))
+svuint8_t svmad_x(svbool_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_x)))
+svuint32_t svmad_x(svbool_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_x)))
+svuint64_t svmad_x(svbool_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_x)))
+svuint16_t svmad_x(svbool_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_x)))
+svint8_t svmad_x(svbool_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_x)))
+svint32_t svmad_x(svbool_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_x)))
+svint64_t svmad_x(svbool_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_x)))
+svint16_t svmad_x(svbool_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_z)))
+svuint8_t svmad_z(svbool_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_z)))
+svuint32_t svmad_z(svbool_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_z)))
+svuint64_t svmad_z(svbool_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_z)))
+svuint16_t svmad_z(svbool_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_z)))
+svint8_t svmad_z(svbool_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_z)))
+svint32_t svmad_z(svbool_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_z)))
+svint64_t svmad_z(svbool_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_z)))
+svint16_t svmad_z(svbool_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_m)))
+svfloat64_t svmax_m(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_m)))
+svfloat32_t svmax_m(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_m)))
+svfloat16_t svmax_m(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_x)))
+svfloat64_t svmax_x(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_x)))
+svfloat32_t svmax_x(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_x)))
+svfloat16_t svmax_x(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_z)))
+svfloat64_t svmax_z(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_z)))
+svfloat32_t svmax_z(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_z)))
+svfloat16_t svmax_z(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_m)))
+svint8_t svmax_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_m)))
+svint32_t svmax_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_m)))
+svint64_t svmax_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_m)))
+svint16_t svmax_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_x)))
+svint8_t svmax_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_x)))
+svint32_t svmax_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_x)))
+svint64_t svmax_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_x)))
+svint16_t svmax_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_z)))
+svint8_t svmax_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_z)))
+svint32_t svmax_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_z)))
+svint64_t svmax_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_z)))
+svint16_t svmax_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_m)))
+svuint8_t svmax_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_m)))
+svuint32_t svmax_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_m)))
+svuint64_t svmax_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_m)))
+svuint16_t svmax_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_x)))
+svuint8_t svmax_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_x)))
+svuint32_t svmax_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_x)))
+svuint64_t svmax_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_x)))
+svuint16_t svmax_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_z)))
+svuint8_t svmax_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_z)))
+svuint32_t svmax_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_z)))
+svuint64_t svmax_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_z)))
+svuint16_t svmax_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_m)))
+svfloat64_t svmax_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_m)))
+svfloat32_t svmax_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_m)))
+svfloat16_t svmax_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x)))
+svfloat64_t svmax_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x)))
+svfloat32_t svmax_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x)))
+svfloat16_t svmax_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_z)))
+svfloat64_t svmax_z(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_z)))
+svfloat32_t svmax_z(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_z)))
+svfloat16_t svmax_z(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_m)))
+svint8_t svmax_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_m)))
+svint32_t svmax_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_m)))
+svint64_t svmax_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_m)))
+svint16_t svmax_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x)))
+svint8_t svmax_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x)))
+svint32_t svmax_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x)))
+svint64_t svmax_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x)))
+svint16_t svmax_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_z)))
+svint8_t svmax_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_z)))
+svint32_t svmax_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_z)))
+svint64_t svmax_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_z)))
+svint16_t svmax_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_m)))
+svuint8_t svmax_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_m)))
+svuint32_t svmax_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_m)))
+svuint64_t svmax_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_m)))
+svuint16_t svmax_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x)))
+svuint8_t svmax_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x)))
+svuint32_t svmax_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x)))
+svuint64_t svmax_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x)))
+svuint16_t svmax_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_z)))
+svuint8_t svmax_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_z)))
+svuint32_t svmax_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_z)))
+svuint64_t svmax_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_z)))
+svuint16_t svmax_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_m)))
+svfloat64_t svmaxnm_m(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_m)))
+svfloat32_t svmaxnm_m(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_m)))
+svfloat16_t svmaxnm_m(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_x)))
+svfloat64_t svmaxnm_x(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_x)))
+svfloat32_t svmaxnm_x(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_x)))
+svfloat16_t svmaxnm_x(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_z)))
+svfloat64_t svmaxnm_z(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_z)))
+svfloat32_t svmaxnm_z(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_z)))
+svfloat16_t svmaxnm_z(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_m)))
+svfloat64_t svmaxnm_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_m)))
+svfloat32_t svmaxnm_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_m)))
+svfloat16_t svmaxnm_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x)))
+svfloat64_t svmaxnm_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x)))
+svfloat32_t svmaxnm_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x)))
+svfloat16_t svmaxnm_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_z)))
+svfloat64_t svmaxnm_z(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_z)))
+svfloat32_t svmaxnm_z(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_z)))
+svfloat16_t svmaxnm_z(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f64)))
+float64_t svmaxnmv(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f32)))
+float32_t svmaxnmv(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f16)))
+float16_t svmaxnmv(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f64)))
+float64_t svmaxv(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f32)))
+float32_t svmaxv(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f16)))
+float16_t svmaxv(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s8)))
+int8_t svmaxv(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s32)))
+int32_t svmaxv(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s64)))
+int64_t svmaxv(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s16)))
+int16_t svmaxv(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u8)))
+uint8_t svmaxv(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u32)))
+uint32_t svmaxv(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u64)))
+uint64_t svmaxv(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u16)))
+uint16_t svmaxv(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_m)))
+svfloat64_t svmin_m(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_m)))
+svfloat32_t svmin_m(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_m)))
+svfloat16_t svmin_m(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_x)))
+svfloat64_t svmin_x(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_x)))
+svfloat32_t svmin_x(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_x)))
+svfloat16_t svmin_x(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_z)))
+svfloat64_t svmin_z(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_z)))
+svfloat32_t svmin_z(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_z)))
+svfloat16_t svmin_z(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_m)))
+svint8_t svmin_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_m)))
+svint32_t svmin_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_m)))
+svint64_t svmin_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_m)))
+svint16_t svmin_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_x)))
+svint8_t svmin_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_x)))
+svint32_t svmin_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_x)))
+svint64_t svmin_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_x)))
+svint16_t svmin_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_z)))
+svint8_t svmin_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_z)))
+svint32_t svmin_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_z)))
+svint64_t svmin_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_z)))
+svint16_t svmin_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_m)))
+svuint8_t svmin_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_m)))
+svuint32_t svmin_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_m)))
+svuint64_t svmin_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_m)))
+svuint16_t svmin_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_x)))
+svuint8_t svmin_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_x)))
+svuint32_t svmin_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_x)))
+svuint64_t svmin_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_x)))
+svuint16_t svmin_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_z)))
+svuint8_t svmin_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_z)))
+svuint32_t svmin_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_z)))
+svuint64_t svmin_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_z)))
+svuint16_t svmin_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_m)))
+svfloat64_t svmin_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_m)))
+svfloat32_t svmin_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_m)))
+svfloat16_t svmin_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x)))
+svfloat64_t svmin_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x)))
+svfloat32_t svmin_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x)))
+svfloat16_t svmin_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_z)))
+svfloat64_t svmin_z(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_z)))
+svfloat32_t svmin_z(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_z)))
+svfloat16_t svmin_z(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_m)))
+svint8_t svmin_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_m)))
+svint32_t svmin_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_m)))
+svint64_t svmin_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_m)))
+svint16_t svmin_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x)))
+svint8_t svmin_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x)))
+svint32_t svmin_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x)))
+svint64_t svmin_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x)))
+svint16_t svmin_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_z)))
+svint8_t svmin_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_z)))
+svint32_t svmin_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_z)))
+svint64_t svmin_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_z)))
+svint16_t svmin_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_m)))
+svuint8_t svmin_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_m)))
+svuint32_t svmin_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_m)))
+svuint64_t svmin_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_m)))
+svuint16_t svmin_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x)))
+svuint8_t svmin_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x)))
+svuint32_t svmin_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x)))
+svuint64_t svmin_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x)))
+svuint16_t svmin_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_z)))
+svuint8_t svmin_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_z)))
+svuint32_t svmin_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_z)))
+svuint64_t svmin_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_z)))
+svuint16_t svmin_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_m)))
+svfloat64_t svminnm_m(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_m)))
+svfloat32_t svminnm_m(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_m)))
+svfloat16_t svminnm_m(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_x)))
+svfloat64_t svminnm_x(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_x)))
+svfloat32_t svminnm_x(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_x)))
+svfloat16_t svminnm_x(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_z)))
+svfloat64_t svminnm_z(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_z)))
+svfloat32_t svminnm_z(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_z)))
+svfloat16_t svminnm_z(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_m)))
+svfloat64_t svminnm_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_m)))
+svfloat32_t svminnm_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_m)))
+svfloat16_t svminnm_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x)))
+svfloat64_t svminnm_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x)))
+svfloat32_t svminnm_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x)))
+svfloat16_t svminnm_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_z)))
+svfloat64_t svminnm_z(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_z)))
+svfloat32_t svminnm_z(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_z)))
+svfloat16_t svminnm_z(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f64)))
+float64_t svminnmv(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f32)))
+float32_t svminnmv(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f16)))
+float16_t svminnmv(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f64)))
+float64_t svminv(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f32)))
+float32_t svminv(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f16)))
+float16_t svminv(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s8)))
+int8_t svminv(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s32)))
+int32_t svminv(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s64)))
+int64_t svminv(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s16)))
+int16_t svminv(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u8)))
+uint8_t svminv(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u32)))
+uint32_t svminv(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u64)))
+uint64_t svminv(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u16)))
+uint16_t svminv(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_m)))
+svfloat64_t svmla_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_m)))
+svfloat32_t svmla_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_m)))
+svfloat16_t svmla_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_x)))
+svfloat64_t svmla_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_x)))
+svfloat32_t svmla_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_x)))
+svfloat16_t svmla_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_z)))
+svfloat64_t svmla_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_z)))
+svfloat32_t svmla_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_z)))
+svfloat16_t svmla_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_m)))
+svuint8_t svmla_m(svbool_t, svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_m)))
+svuint32_t svmla_m(svbool_t, svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_m)))
+svuint64_t svmla_m(svbool_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_m)))
+svuint16_t svmla_m(svbool_t, svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_m)))
+svint8_t svmla_m(svbool_t, svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_m)))
+svint32_t svmla_m(svbool_t, svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_m)))
+svint64_t svmla_m(svbool_t, svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_m)))
+svint16_t svmla_m(svbool_t, svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_x)))
+svuint8_t svmla_x(svbool_t, svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_x)))
+svuint32_t svmla_x(svbool_t, svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_x)))
+svuint64_t svmla_x(svbool_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_x)))
+svuint16_t svmla_x(svbool_t, svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_x)))
+svint8_t svmla_x(svbool_t, svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_x)))
+svint32_t svmla_x(svbool_t, svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_x)))
+svint64_t svmla_x(svbool_t, svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_x)))
+svint16_t svmla_x(svbool_t, svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_z)))
+svuint8_t svmla_z(svbool_t, svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_z)))
+svuint32_t svmla_z(svbool_t, svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_z)))
+svuint64_t svmla_z(svbool_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_z)))
+svuint16_t svmla_z(svbool_t, svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_z)))
+svint8_t svmla_z(svbool_t, svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_z)))
+svint32_t svmla_z(svbool_t, svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_z)))
+svint64_t svmla_z(svbool_t, svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_z)))
+svint16_t svmla_z(svbool_t, svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_m)))
+svfloat64_t svmla_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_m)))
+svfloat32_t svmla_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_m)))
+svfloat16_t svmla_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_x)))
+svfloat64_t svmla_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_x)))
+svfloat32_t svmla_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_x)))
+svfloat16_t svmla_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_z)))
+svfloat64_t svmla_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_z)))
+svfloat32_t svmla_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_z)))
+svfloat16_t svmla_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_m)))
+svuint8_t svmla_m(svbool_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_m)))
+svuint32_t svmla_m(svbool_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_m)))
+svuint64_t svmla_m(svbool_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_m)))
+svuint16_t svmla_m(svbool_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_m)))
+svint8_t svmla_m(svbool_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_m)))
+svint32_t svmla_m(svbool_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_m)))
+svint64_t svmla_m(svbool_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_m)))
+svint16_t svmla_m(svbool_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_x)))
+svuint8_t svmla_x(svbool_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_x)))
+svuint32_t svmla_x(svbool_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_x)))
+svuint64_t svmla_x(svbool_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_x)))
+svuint16_t svmla_x(svbool_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_x)))
+svint8_t svmla_x(svbool_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_x)))
+svint32_t svmla_x(svbool_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_x)))
+svint64_t svmla_x(svbool_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_x)))
+svint16_t svmla_x(svbool_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_z)))
+svuint8_t svmla_z(svbool_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_z)))
+svuint32_t svmla_z(svbool_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_z)))
+svuint64_t svmla_z(svbool_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_z)))
+svuint16_t svmla_z(svbool_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_z)))
+svint8_t svmla_z(svbool_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_z)))
+svint32_t svmla_z(svbool_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_z)))
+svint64_t svmla_z(svbool_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_z)))
+svint16_t svmla_z(svbool_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f64)))
+svfloat64_t svmla_lane(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f32)))
+svfloat32_t svmla_lane(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f16)))
+svfloat16_t svmla_lane(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_m)))
+svfloat64_t svmls_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_m)))
+svfloat32_t svmls_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_m)))
+svfloat16_t svmls_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_x)))
+svfloat64_t svmls_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_x)))
+svfloat32_t svmls_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_x)))
+svfloat16_t svmls_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_z)))
+svfloat64_t svmls_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_z)))
+svfloat32_t svmls_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_z)))
+svfloat16_t svmls_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_m)))
+svuint8_t svmls_m(svbool_t, svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_m)))
+svuint32_t svmls_m(svbool_t, svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_m)))
+svuint64_t svmls_m(svbool_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_m)))
+svuint16_t svmls_m(svbool_t, svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_m)))
+svint8_t svmls_m(svbool_t, svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_m)))
+svint32_t svmls_m(svbool_t, svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_m)))
+svint64_t svmls_m(svbool_t, svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_m)))
+svint16_t svmls_m(svbool_t, svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_x)))
+svuint8_t svmls_x(svbool_t, svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_x)))
+svuint32_t svmls_x(svbool_t, svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_x)))
+svuint64_t svmls_x(svbool_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_x)))
+svuint16_t svmls_x(svbool_t, svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_x)))
+svint8_t svmls_x(svbool_t, svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_x)))
+svint32_t svmls_x(svbool_t, svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_x)))
+svint64_t svmls_x(svbool_t, svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_x)))
+svint16_t svmls_x(svbool_t, svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_z)))
+svuint8_t svmls_z(svbool_t, svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_z)))
+svuint32_t svmls_z(svbool_t, svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_z)))
+svuint64_t svmls_z(svbool_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_z)))
+svuint16_t svmls_z(svbool_t, svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_z)))
+svint8_t svmls_z(svbool_t, svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_z)))
+svint32_t svmls_z(svbool_t, svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_z)))
+svint64_t svmls_z(svbool_t, svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_z)))
+svint16_t svmls_z(svbool_t, svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_m)))
+svfloat64_t svmls_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_m)))
+svfloat32_t svmls_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_m)))
+svfloat16_t svmls_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_x)))
+svfloat64_t svmls_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_x)))
+svfloat32_t svmls_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_x)))
+svfloat16_t svmls_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_z)))
+svfloat64_t svmls_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_z)))
+svfloat32_t svmls_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_z)))
+svfloat16_t svmls_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_m)))
+svuint8_t svmls_m(svbool_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_m)))
+svuint32_t svmls_m(svbool_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_m)))
+svuint64_t svmls_m(svbool_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_m)))
+svuint16_t svmls_m(svbool_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_m)))
+svint8_t svmls_m(svbool_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_m)))
+svint32_t svmls_m(svbool_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_m)))
+svint64_t svmls_m(svbool_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_m)))
+svint16_t svmls_m(svbool_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_x)))
+svuint8_t svmls_x(svbool_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_x)))
+svuint32_t svmls_x(svbool_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_x)))
+svuint64_t svmls_x(svbool_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_x)))
+svuint16_t svmls_x(svbool_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_x)))
+svint8_t svmls_x(svbool_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_x)))
+svint32_t svmls_x(svbool_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_x)))
+svint64_t svmls_x(svbool_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_x)))
+svint16_t svmls_x(svbool_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_z)))
+svuint8_t svmls_z(svbool_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_z)))
+svuint32_t svmls_z(svbool_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_z)))
+svuint64_t svmls_z(svbool_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_z)))
+svuint16_t svmls_z(svbool_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_z)))
+svint8_t svmls_z(svbool_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_z)))
+svint32_t svmls_z(svbool_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_z)))
+svint64_t svmls_z(svbool_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_z)))
+svint16_t svmls_z(svbool_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f64)))
+svfloat64_t svmls_lane(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f32)))
+svfloat32_t svmls_lane(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f16)))
+svfloat16_t svmls_lane(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmov_b_z)))
+svbool_t svmov_z(svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_m)))
+svfloat64_t svmsb_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_m)))
+svfloat32_t svmsb_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_m)))
+svfloat16_t svmsb_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_x)))
+svfloat64_t svmsb_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_x)))
+svfloat32_t svmsb_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_x)))
+svfloat16_t svmsb_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_z)))
+svfloat64_t svmsb_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_z)))
+svfloat32_t svmsb_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_z)))
+svfloat16_t svmsb_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_m)))
+svuint8_t svmsb_m(svbool_t, svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_m)))
+svuint32_t svmsb_m(svbool_t, svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_m)))
+svuint64_t svmsb_m(svbool_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_m)))
+svuint16_t svmsb_m(svbool_t, svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_m)))
+svint8_t svmsb_m(svbool_t, svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_m)))
+svint32_t svmsb_m(svbool_t, svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_m)))
+svint64_t svmsb_m(svbool_t, svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_m)))
+svint16_t svmsb_m(svbool_t, svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_x)))
+svuint8_t svmsb_x(svbool_t, svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_x)))
+svuint32_t svmsb_x(svbool_t, svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_x)))
+svuint64_t svmsb_x(svbool_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_x)))
+svuint16_t svmsb_x(svbool_t, svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_x)))
+svint8_t svmsb_x(svbool_t, svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_x)))
+svint32_t svmsb_x(svbool_t, svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_x)))
+svint64_t svmsb_x(svbool_t, svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_x)))
+svint16_t svmsb_x(svbool_t, svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_z)))
+svuint8_t svmsb_z(svbool_t, svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_z)))
+svuint32_t svmsb_z(svbool_t, svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_z)))
+svuint64_t svmsb_z(svbool_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_z)))
+svuint16_t svmsb_z(svbool_t, svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_z)))
+svint8_t svmsb_z(svbool_t, svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_z)))
+svint32_t svmsb_z(svbool_t, svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_z)))
+svint64_t svmsb_z(svbool_t, svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_z)))
+svint16_t svmsb_z(svbool_t, svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_m)))
+svfloat64_t svmsb_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_m)))
+svfloat32_t svmsb_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_m)))
+svfloat16_t svmsb_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_x)))
+svfloat64_t svmsb_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_x)))
+svfloat32_t svmsb_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_x)))
+svfloat16_t svmsb_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_z)))
+svfloat64_t svmsb_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_z)))
+svfloat32_t svmsb_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_z)))
+svfloat16_t svmsb_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_m)))
+svuint8_t svmsb_m(svbool_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_m)))
+svuint32_t svmsb_m(svbool_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_m)))
+svuint64_t svmsb_m(svbool_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_m)))
+svuint16_t svmsb_m(svbool_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_m)))
+svint8_t svmsb_m(svbool_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_m)))
+svint32_t svmsb_m(svbool_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_m)))
+svint64_t svmsb_m(svbool_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_m)))
+svint16_t svmsb_m(svbool_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_x)))
+svuint8_t svmsb_x(svbool_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_x)))
+svuint32_t svmsb_x(svbool_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_x)))
+svuint64_t svmsb_x(svbool_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_x)))
+svuint16_t svmsb_x(svbool_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_x)))
+svint8_t svmsb_x(svbool_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_x)))
+svint32_t svmsb_x(svbool_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_x)))
+svint64_t svmsb_x(svbool_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_x)))
+svint16_t svmsb_x(svbool_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_z)))
+svuint8_t svmsb_z(svbool_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_z)))
+svuint32_t svmsb_z(svbool_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_z)))
+svuint64_t svmsb_z(svbool_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_z)))
+svuint16_t svmsb_z(svbool_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_z)))
+svint8_t svmsb_z(svbool_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_z)))
+svint32_t svmsb_z(svbool_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_z)))
+svint64_t svmsb_z(svbool_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_z)))
+svint16_t svmsb_z(svbool_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_m)))
+svfloat64_t svmul_m(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_m)))
+svfloat32_t svmul_m(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_m)))
+svfloat16_t svmul_m(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_x)))
+svfloat64_t svmul_x(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_x)))
+svfloat32_t svmul_x(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_x)))
+svfloat16_t svmul_x(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_z)))
+svfloat64_t svmul_z(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_z)))
+svfloat32_t svmul_z(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_z)))
+svfloat16_t svmul_z(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_m)))
+svuint8_t svmul_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_m)))
+svuint32_t svmul_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_m)))
+svuint64_t svmul_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_m)))
+svuint16_t svmul_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_m)))
+svint8_t svmul_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_m)))
+svint32_t svmul_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_m)))
+svint64_t svmul_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_m)))
+svint16_t svmul_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_x)))
+svuint8_t svmul_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_x)))
+svuint32_t svmul_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_x)))
+svuint64_t svmul_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_x)))
+svuint16_t svmul_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_x)))
+svint8_t svmul_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_x)))
+svint32_t svmul_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_x)))
+svint64_t svmul_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_x)))
+svint16_t svmul_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_z)))
+svuint8_t svmul_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_z)))
+svuint32_t svmul_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_z)))
+svuint64_t svmul_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_z)))
+svuint16_t svmul_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_z)))
+svint8_t svmul_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_z)))
+svint32_t svmul_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_z)))
+svint64_t svmul_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_z)))
+svint16_t svmul_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_m)))
+svfloat64_t svmul_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_m)))
+svfloat32_t svmul_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_m)))
+svfloat16_t svmul_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_x)))
+svfloat64_t svmul_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_x)))
+svfloat32_t svmul_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_x)))
+svfloat16_t svmul_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_z)))
+svfloat64_t svmul_z(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_z)))
+svfloat32_t svmul_z(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_z)))
+svfloat16_t svmul_z(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_m)))
+svuint8_t svmul_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_m)))
+svuint32_t svmul_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_m)))
+svuint64_t svmul_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_m)))
+svuint16_t svmul_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_m)))
+svint8_t svmul_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_m)))
+svint32_t svmul_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_m)))
+svint64_t svmul_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_m)))
+svint16_t svmul_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_x)))
+svuint8_t svmul_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_x)))
+svuint32_t svmul_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_x)))
+svuint64_t svmul_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_x)))
+svuint16_t svmul_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_x)))
+svint8_t svmul_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_x)))
+svint32_t svmul_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_x)))
+svint64_t svmul_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_x)))
+svint16_t svmul_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_z)))
+svuint8_t svmul_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_z)))
+svuint32_t svmul_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_z)))
+svuint64_t svmul_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_z)))
+svuint16_t svmul_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_z)))
+svint8_t svmul_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_z)))
+svint32_t svmul_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_z)))
+svint64_t svmul_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_z)))
+svint16_t svmul_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f64)))
+svfloat64_t svmul_lane(svfloat64_t, svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f32)))
+svfloat32_t svmul_lane(svfloat32_t, svfloat32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f16)))
+svfloat16_t svmul_lane(svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_m)))
+svint8_t svmulh_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_m)))
+svint32_t svmulh_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_m)))
+svint64_t svmulh_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_m)))
+svint16_t svmulh_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_x)))
+svint8_t svmulh_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_x)))
+svint32_t svmulh_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_x)))
+svint64_t svmulh_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_x)))
+svint16_t svmulh_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_z)))
+svint8_t svmulh_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_z)))
+svint32_t svmulh_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_z)))
+svint64_t svmulh_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_z)))
+svint16_t svmulh_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_m)))
+svuint8_t svmulh_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_m)))
+svuint32_t svmulh_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_m)))
+svuint64_t svmulh_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_m)))
+svuint16_t svmulh_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_x)))
+svuint8_t svmulh_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_x)))
+svuint32_t svmulh_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_x)))
+svuint64_t svmulh_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_x)))
+svuint16_t svmulh_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_z)))
+svuint8_t svmulh_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_z)))
+svuint32_t svmulh_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_z)))
+svuint64_t svmulh_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_z)))
+svuint16_t svmulh_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_m)))
+svint8_t svmulh_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_m)))
+svint32_t svmulh_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_m)))
+svint64_t svmulh_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_m)))
+svint16_t svmulh_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_x)))
+svint8_t svmulh_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_x)))
+svint32_t svmulh_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_x)))
+svint64_t svmulh_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_x)))
+svint16_t svmulh_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_z)))
+svint8_t svmulh_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_z)))
+svint32_t svmulh_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_z)))
+svint64_t svmulh_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_z)))
+svint16_t svmulh_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_m)))
+svuint8_t svmulh_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_m)))
+svuint32_t svmulh_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_m)))
+svuint64_t svmulh_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_m)))
+svuint16_t svmulh_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_x)))
+svuint8_t svmulh_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_x)))
+svuint32_t svmulh_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_x)))
+svuint64_t svmulh_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_x)))
+svuint16_t svmulh_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_z)))
+svuint8_t svmulh_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_z)))
+svuint32_t svmulh_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_z)))
+svuint64_t svmulh_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_z)))
+svuint16_t svmulh_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_m)))
+svfloat64_t svmulx_m(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_m)))
+svfloat32_t svmulx_m(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_m)))
+svfloat16_t svmulx_m(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_x)))
+svfloat64_t svmulx_x(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_x)))
+svfloat32_t svmulx_x(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_x)))
+svfloat16_t svmulx_x(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_z)))
+svfloat64_t svmulx_z(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_z)))
+svfloat32_t svmulx_z(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_z)))
+svfloat16_t svmulx_z(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_m)))
+svfloat64_t svmulx_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_m)))
+svfloat32_t svmulx_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_m)))
+svfloat16_t svmulx_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_x)))
+svfloat64_t svmulx_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_x)))
+svfloat32_t svmulx_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_x)))
+svfloat16_t svmulx_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_z)))
+svfloat64_t svmulx_z(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_z)))
+svfloat32_t svmulx_z(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_z)))
+svfloat16_t svmulx_z(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnand_b_z)))
+svbool_t svnand_z(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_m)))
+svfloat64_t svneg_m(svfloat64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_m)))
+svfloat32_t svneg_m(svfloat32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_m)))
+svfloat16_t svneg_m(svfloat16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_x)))
+svfloat64_t svneg_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_x)))
+svfloat32_t svneg_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_x)))
+svfloat16_t svneg_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_z)))
+svfloat64_t svneg_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_z)))
+svfloat32_t svneg_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_z)))
+svfloat16_t svneg_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_m)))
+svint8_t svneg_m(svint8_t, svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_m)))
+svint32_t svneg_m(svint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_m)))
+svint64_t svneg_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_m)))
+svint16_t svneg_m(svint16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_x)))
+svint8_t svneg_x(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_x)))
+svint32_t svneg_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_x)))
+svint64_t svneg_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_x)))
+svint16_t svneg_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_z)))
+svint8_t svneg_z(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_z)))
+svint32_t svneg_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_z)))
+svint64_t svneg_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_z)))
+svint16_t svneg_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_m)))
+svfloat64_t svnmad_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_m)))
+svfloat32_t svnmad_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_m)))
+svfloat16_t svnmad_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_x)))
+svfloat64_t svnmad_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_x)))
+svfloat32_t svnmad_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_x)))
+svfloat16_t svnmad_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_z)))
+svfloat64_t svnmad_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_z)))
+svfloat32_t svnmad_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_z)))
+svfloat16_t svnmad_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_m)))
+svfloat64_t svnmad_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_m)))
+svfloat32_t svnmad_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_m)))
+svfloat16_t svnmad_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_x)))
+svfloat64_t svnmad_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_x)))
+svfloat32_t svnmad_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_x)))
+svfloat16_t svnmad_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_z)))
+svfloat64_t svnmad_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_z)))
+svfloat32_t svnmad_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_z)))
+svfloat16_t svnmad_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_m)))
+svfloat64_t svnmla_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_m)))
+svfloat32_t svnmla_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_m)))
+svfloat16_t svnmla_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_x)))
+svfloat64_t svnmla_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_x)))
+svfloat32_t svnmla_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_x)))
+svfloat16_t svnmla_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_z)))
+svfloat64_t svnmla_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_z)))
+svfloat32_t svnmla_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_z)))
+svfloat16_t svnmla_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_m)))
+svfloat64_t svnmla_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_m)))
+svfloat32_t svnmla_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_m)))
+svfloat16_t svnmla_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_x)))
+svfloat64_t svnmla_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_x)))
+svfloat32_t svnmla_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_x)))
+svfloat16_t svnmla_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_z)))
+svfloat64_t svnmla_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_z)))
+svfloat32_t svnmla_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_z)))
+svfloat16_t svnmla_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_m)))
+svfloat64_t svnmls_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_m)))
+svfloat32_t svnmls_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_m)))
+svfloat16_t svnmls_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_x)))
+svfloat64_t svnmls_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_x)))
+svfloat32_t svnmls_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_x)))
+svfloat16_t svnmls_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_z)))
+svfloat64_t svnmls_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_z)))
+svfloat32_t svnmls_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_z)))
+svfloat16_t svnmls_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_m)))
+svfloat64_t svnmls_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_m)))
+svfloat32_t svnmls_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_m)))
+svfloat16_t svnmls_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_x)))
+svfloat64_t svnmls_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_x)))
+svfloat32_t svnmls_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_x)))
+svfloat16_t svnmls_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_z)))
+svfloat64_t svnmls_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_z)))
+svfloat32_t svnmls_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_z)))
+svfloat16_t svnmls_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_m)))
+svfloat64_t svnmsb_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_m)))
+svfloat32_t svnmsb_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_m)))
+svfloat16_t svnmsb_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_x)))
+svfloat64_t svnmsb_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_x)))
+svfloat32_t svnmsb_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_x)))
+svfloat16_t svnmsb_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_z)))
+svfloat64_t svnmsb_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_z)))
+svfloat32_t svnmsb_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_z)))
+svfloat16_t svnmsb_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_m)))
+svfloat64_t svnmsb_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_m)))
+svfloat32_t svnmsb_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_m)))
+svfloat16_t svnmsb_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_x)))
+svfloat64_t svnmsb_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_x)))
+svfloat32_t svnmsb_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_x)))
+svfloat16_t svnmsb_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_z)))
+svfloat64_t svnmsb_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_z)))
+svfloat32_t svnmsb_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_z)))
+svfloat16_t svnmsb_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnor_b_z)))
+svbool_t svnor_z(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_b_z)))
+svbool_t svnot_z(svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_m)))
+svuint8_t svnot_m(svuint8_t, svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_m)))
+svuint32_t svnot_m(svuint32_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_m)))
+svuint64_t svnot_m(svuint64_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_m)))
+svuint16_t svnot_m(svuint16_t, svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_m)))
+svint8_t svnot_m(svint8_t, svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_m)))
+svint32_t svnot_m(svint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_m)))
+svint64_t svnot_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_m)))
+svint16_t svnot_m(svint16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_x)))
+svuint8_t svnot_x(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_x)))
+svuint32_t svnot_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_x)))
+svuint64_t svnot_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_x)))
+svuint16_t svnot_x(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_x)))
+svint8_t svnot_x(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_x)))
+svint32_t svnot_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_x)))
+svint64_t svnot_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_x)))
+svint16_t svnot_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_z)))
+svuint8_t svnot_z(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_z)))
+svuint32_t svnot_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_z)))
+svuint64_t svnot_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_z)))
+svuint16_t svnot_z(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_z)))
+svint8_t svnot_z(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_z)))
+svint32_t svnot_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_z)))
+svint64_t svnot_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_z)))
+svint16_t svnot_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorn_b_z)))
+svbool_t svorn_z(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_b_z)))
+svbool_t svorr_z(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_m)))
+svuint8_t svorr_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_m)))
+svuint32_t svorr_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_m)))
+svuint64_t svorr_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_m)))
+svuint16_t svorr_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_m)))
+svint8_t svorr_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_m)))
+svint32_t svorr_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_m)))
+svint64_t svorr_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_m)))
+svint16_t svorr_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_x)))
+svuint8_t svorr_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_x)))
+svuint32_t svorr_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_x)))
+svuint64_t svorr_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_x)))
+svuint16_t svorr_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_x)))
+svint8_t svorr_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_x)))
+svint32_t svorr_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_x)))
+svint64_t svorr_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_x)))
+svint16_t svorr_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_z)))
+svuint8_t svorr_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_z)))
+svuint32_t svorr_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_z)))
+svuint64_t svorr_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_z)))
+svuint16_t svorr_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_z)))
+svint8_t svorr_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_z)))
+svint32_t svorr_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_z)))
+svint64_t svorr_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_z)))
+svint16_t svorr_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_m)))
+svuint8_t svorr_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_m)))
+svuint32_t svorr_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_m)))
+svuint64_t svorr_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_m)))
+svuint16_t svorr_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_m)))
+svint8_t svorr_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_m)))
+svint32_t svorr_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_m)))
+svint64_t svorr_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_m)))
+svint16_t svorr_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_x)))
+svuint8_t svorr_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_x)))
+svuint32_t svorr_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_x)))
+svuint64_t svorr_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_x)))
+svuint16_t svorr_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_x)))
+svint8_t svorr_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_x)))
+svint32_t svorr_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_x)))
+svint64_t svorr_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_x)))
+svint16_t svorr_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_z)))
+svuint8_t svorr_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_z)))
+svuint32_t svorr_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_z)))
+svuint64_t svorr_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_z)))
+svuint16_t svorr_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_z)))
+svint8_t svorr_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_z)))
+svint32_t svorr_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_z)))
+svint64_t svorr_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_z)))
+svint16_t svorr_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u8)))
+uint8_t svorv(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u32)))
+uint32_t svorv(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u64)))
+uint64_t svorv(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u16)))
+uint16_t svorv(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s8)))
+int8_t svorv(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s32)))
+int32_t svorv(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s64)))
+int64_t svorv(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s16)))
+int16_t svorv(svbool_t, svint16_t);
+#define svpfalse(...) __builtin_sve_svpfalse_b(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfirst_b)))
+svbool_t svpfirst(svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base)))
+void svprfb_gather(svbool_t, svuint32_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base)))
+void svprfb_gather(svbool_t, svuint64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base_offset)))
+void svprfb_gather_offset(svbool_t, svuint32_t, int64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base_offset)))
+void svprfb_gather_offset(svbool_t, svuint64_t, int64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s32offset)))
+void svprfb_gather_offset(svbool_t, void const *, svint32_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32offset)))
+void svprfb_gather_offset(svbool_t, void const *, svuint32_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s64offset)))
+void svprfb_gather_offset(svbool_t, void const *, svint64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64offset)))
+void svprfb_gather_offset(svbool_t, void const *, svuint64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base)))
+void svprfd_gather(svbool_t, svuint32_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base)))
+void svprfd_gather(svbool_t, svuint64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base_index)))
+void svprfd_gather_index(svbool_t, svuint32_t, int64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base_index)))
+void svprfd_gather_index(svbool_t, svuint64_t, int64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s32index)))
+void svprfd_gather_index(svbool_t, void const *, svint32_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32index)))
+void svprfd_gather_index(svbool_t, void const *, svuint32_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s64index)))
+void svprfd_gather_index(svbool_t, void const *, svint64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64index)))
+void svprfd_gather_index(svbool_t, void const *, svuint64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base)))
+void svprfh_gather(svbool_t, svuint32_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base)))
+void svprfh_gather(svbool_t, svuint64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base_index)))
+void svprfh_gather_index(svbool_t, svuint32_t, int64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base_index)))
+void svprfh_gather_index(svbool_t, svuint64_t, int64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s32index)))
+void svprfh_gather_index(svbool_t, void const *, svint32_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32index)))
+void svprfh_gather_index(svbool_t, void const *, svuint32_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s64index)))
+void svprfh_gather_index(svbool_t, void const *, svint64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64index)))
+void svprfh_gather_index(svbool_t, void const *, svuint64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base)))
+void svprfw_gather(svbool_t, svuint32_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base)))
+void svprfw_gather(svbool_t, svuint64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base_index)))
+void svprfw_gather_index(svbool_t, svuint32_t, int64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base_index)))
+void svprfw_gather_index(svbool_t, svuint64_t, int64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s32index)))
+void svprfw_gather_index(svbool_t, void const *, svint32_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32index)))
+void svprfw_gather_index(svbool_t, void const *, svuint32_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s64index)))
+void svprfw_gather_index(svbool_t, void const *, svint64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64index)))
+void svprfw_gather_index(svbool_t, void const *, svuint64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8)))
+svint8_t svqadd(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32)))
+svint32_t svqadd(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64)))
+svint64_t svqadd(svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16)))
+svint16_t svqadd(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8)))
+svuint8_t svqadd(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32)))
+svuint32_t svqadd(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64)))
+svuint64_t svqadd(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16)))
+svuint16_t svqadd(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8)))
+svint8_t svqadd(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32)))
+svint32_t svqadd(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64)))
+svint64_t svqadd(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16)))
+svint16_t svqadd(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8)))
+svuint8_t svqadd(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32)))
+svuint32_t svqadd(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64)))
+svuint64_t svqadd(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16)))
+svuint16_t svqadd(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s32)))
+int32_t svqdecb(int32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s64)))
+int64_t svqdecb(int64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u32)))
+uint32_t svqdecb(uint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u64)))
+uint64_t svqdecb(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s32)))
+int32_t svqdecb_pat(int32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s64)))
+int64_t svqdecb_pat(int64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u32)))
+uint32_t svqdecb_pat(uint32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u64)))
+uint64_t svqdecb_pat(uint64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s32)))
+int32_t svqdecd(int32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s64)))
+int64_t svqdecd(int64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u32)))
+uint32_t svqdecd(uint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u64)))
+uint64_t svqdecd(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_s64)))
+svint64_t svqdecd(svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_u64)))
+svuint64_t svqdecd(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s32)))
+int32_t svqdecd_pat(int32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s64)))
+int64_t svqdecd_pat(int64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u32)))
+uint32_t svqdecd_pat(uint32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u64)))
+uint64_t svqdecd_pat(uint64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_s64)))
+svint64_t svqdecd_pat(svint64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_u64)))
+svuint64_t svqdecd_pat(svuint64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s32)))
+int32_t svqdech(int32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s64)))
+int64_t svqdech(int64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u32)))
+uint32_t svqdech(uint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u64)))
+uint64_t svqdech(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_s16)))
+svint16_t svqdech(svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_u16)))
+svuint16_t svqdech(svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s32)))
+int32_t svqdech_pat(int32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s64)))
+int64_t svqdech_pat(int64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u32)))
+uint32_t svqdech_pat(uint32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u64)))
+uint64_t svqdech_pat(uint64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_s16)))
+svint16_t svqdech_pat(svint16_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_u16)))
+svuint16_t svqdech_pat(svuint16_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b8)))
+int32_t svqdecp_b8(int32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b32)))
+int32_t svqdecp_b32(int32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b64)))
+int32_t svqdecp_b64(int32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b16)))
+int32_t svqdecp_b16(int32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b8)))
+int64_t svqdecp_b8(int64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b32)))
+int64_t svqdecp_b32(int64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b64)))
+int64_t svqdecp_b64(int64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b16)))
+int64_t svqdecp_b16(int64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b8)))
+uint32_t svqdecp_b8(uint32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b32)))
+uint32_t svqdecp_b32(uint32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b64)))
+uint32_t svqdecp_b64(uint32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b16)))
+uint32_t svqdecp_b16(uint32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b8)))
+uint64_t svqdecp_b8(uint64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b32)))
+uint64_t svqdecp_b32(uint64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b64)))
+uint64_t svqdecp_b64(uint64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b16)))
+uint64_t svqdecp_b16(uint64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s32)))
+svint32_t svqdecp(svint32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s64)))
+svint64_t svqdecp(svint64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s16)))
+svint16_t svqdecp(svint16_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u32)))
+svuint32_t svqdecp(svuint32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u64)))
+svuint64_t svqdecp(svuint64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u16)))
+svuint16_t svqdecp(svuint16_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s32)))
+int32_t svqdecw(int32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s64)))
+int64_t svqdecw(int64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u32)))
+uint32_t svqdecw(uint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u64)))
+uint64_t svqdecw(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_s32)))
+svint32_t svqdecw(svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_u32)))
+svuint32_t svqdecw(svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s32)))
+int32_t svqdecw_pat(int32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s64)))
+int64_t svqdecw_pat(int64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u32)))
+uint32_t svqdecw_pat(uint32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u64)))
+uint64_t svqdecw_pat(uint64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_s32)))
+svint32_t svqdecw_pat(svint32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_u32)))
+svuint32_t svqdecw_pat(svuint32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s32)))
+int32_t svqincb(int32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s64)))
+int64_t svqincb(int64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u32)))
+uint32_t svqincb(uint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u64)))
+uint64_t svqincb(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s32)))
+int32_t svqincb_pat(int32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s64)))
+int64_t svqincb_pat(int64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u32)))
+uint32_t svqincb_pat(uint32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u64)))
+uint64_t svqincb_pat(uint64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s32)))
+int32_t svqincd(int32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s64)))
+int64_t svqincd(int64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u32)))
+uint32_t svqincd(uint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u64)))
+uint64_t svqincd(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_s64)))
+svint64_t svqincd(svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_u64)))
+svuint64_t svqincd(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s32)))
+int32_t svqincd_pat(int32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s64)))
+int64_t svqincd_pat(int64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u32)))
+uint32_t svqincd_pat(uint32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u64)))
+uint64_t svqincd_pat(uint64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_s64)))
+svint64_t svqincd_pat(svint64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_u64)))
+svuint64_t svqincd_pat(svuint64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s32)))
+int32_t svqinch(int32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s64)))
+int64_t svqinch(int64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u32)))
+uint32_t svqinch(uint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u64)))
+uint64_t svqinch(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_s16)))
+svint16_t svqinch(svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_u16)))
+svuint16_t svqinch(svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s32)))
+int32_t svqinch_pat(int32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s64)))
+int64_t svqinch_pat(int64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u32)))
+uint32_t svqinch_pat(uint32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u64)))
+uint64_t svqinch_pat(uint64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_s16)))
+svint16_t svqinch_pat(svint16_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_u16)))
+svuint16_t svqinch_pat(svuint16_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b8)))
+int32_t svqincp_b8(int32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b32)))
+int32_t svqincp_b32(int32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b64)))
+int32_t svqincp_b64(int32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b16)))
+int32_t svqincp_b16(int32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b8)))
+int64_t svqincp_b8(int64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b32)))
+int64_t svqincp_b32(int64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b64)))
+int64_t svqincp_b64(int64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b16)))
+int64_t svqincp_b16(int64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b8)))
+uint32_t svqincp_b8(uint32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b32)))
+uint32_t svqincp_b32(uint32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b64)))
+uint32_t svqincp_b64(uint32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b16)))
+uint32_t svqincp_b16(uint32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b8)))
+uint64_t svqincp_b8(uint64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b32)))
+uint64_t svqincp_b32(uint64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b64)))
+uint64_t svqincp_b64(uint64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b16)))
+uint64_t svqincp_b16(uint64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s32)))
+svint32_t svqincp(svint32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s64)))
+svint64_t svqincp(svint64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s16)))
+svint16_t svqincp(svint16_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u32)))
+svuint32_t svqincp(svuint32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u64)))
+svuint64_t svqincp(svuint64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u16)))
+svuint16_t svqincp(svuint16_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s32)))
+int32_t svqincw(int32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s64)))
+int64_t svqincw(int64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u32)))
+uint32_t svqincw(uint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u64)))
+uint64_t svqincw(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_s32)))
+svint32_t svqincw(svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_u32)))
+svuint32_t svqincw(svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s32)))
+int32_t svqincw_pat(int32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s64)))
+int64_t svqincw_pat(int64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u32)))
+uint32_t svqincw_pat(uint32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u64)))
+uint64_t svqincw_pat(uint64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_s32)))
+svint32_t svqincw_pat(svint32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_u32)))
+svuint32_t svqincw_pat(svuint32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8)))
+svint8_t svqsub(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32)))
+svint32_t svqsub(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64)))
+svint64_t svqsub(svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16)))
+svint16_t svqsub(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8)))
+svuint8_t svqsub(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32)))
+svuint32_t svqsub(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64)))
+svuint64_t svqsub(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16)))
+svuint16_t svqsub(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8)))
+svint8_t svqsub(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32)))
+svint32_t svqsub(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64)))
+svint64_t svqsub(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16)))
+svint16_t svqsub(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8)))
+svuint8_t svqsub(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32)))
+svuint32_t svqsub(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64)))
+svuint64_t svqsub(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16)))
+svuint16_t svqsub(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_m)))
+svuint8_t svrbit_m(svuint8_t, svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_m)))
+svuint32_t svrbit_m(svuint32_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_m)))
+svuint64_t svrbit_m(svuint64_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_m)))
+svuint16_t svrbit_m(svuint16_t, svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_m)))
+svint8_t svrbit_m(svint8_t, svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_m)))
+svint32_t svrbit_m(svint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_m)))
+svint64_t svrbit_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_m)))
+svint16_t svrbit_m(svint16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_x)))
+svuint8_t svrbit_x(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_x)))
+svuint32_t svrbit_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_x)))
+svuint64_t svrbit_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_x)))
+svuint16_t svrbit_x(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_x)))
+svint8_t svrbit_x(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_x)))
+svint32_t svrbit_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_x)))
+svint64_t svrbit_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_x)))
+svint16_t svrbit_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_z)))
+svuint8_t svrbit_z(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_z)))
+svuint32_t svrbit_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_z)))
+svuint64_t svrbit_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_z)))
+svuint16_t svrbit_z(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_z)))
+svint8_t svrbit_z(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_z)))
+svint32_t svrbit_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_z)))
+svint64_t svrbit_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_z)))
+svint16_t svrbit_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f64)))
+svfloat64_t svrecpe(svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f32)))
+svfloat32_t svrecpe(svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f16)))
+svfloat16_t svrecpe(svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f64)))
+svfloat64_t svrecps(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f32)))
+svfloat32_t svrecps(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f16)))
+svfloat16_t svrecps(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_m)))
+svfloat64_t svrecpx_m(svfloat64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_m)))
+svfloat32_t svrecpx_m(svfloat32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_m)))
+svfloat16_t svrecpx_m(svfloat16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_x)))
+svfloat64_t svrecpx_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_x)))
+svfloat32_t svrecpx_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_x)))
+svfloat16_t svrecpx_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_z)))
+svfloat64_t svrecpx_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_z)))
+svfloat32_t svrecpx_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_z)))
+svfloat16_t svrecpx_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u8)))
+svuint8_t svrev(svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u32)))
+svuint32_t svrev(svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u64)))
+svuint64_t svrev(svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u16)))
+svuint16_t svrev(svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s8)))
+svint8_t svrev(svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f64)))
+svfloat64_t svrev(svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f32)))
+svfloat32_t svrev(svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f16)))
+svfloat16_t svrev(svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s32)))
+svint32_t svrev(svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s64)))
+svint64_t svrev(svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s16)))
+svint16_t svrev(svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_m)))
+svuint32_t svrevb_m(svuint32_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_m)))
+svuint64_t svrevb_m(svuint64_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_m)))
+svuint16_t svrevb_m(svuint16_t, svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_m)))
+svint32_t svrevb_m(svint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_m)))
+svint64_t svrevb_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_m)))
+svint16_t svrevb_m(svint16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_x)))
+svuint32_t svrevb_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_x)))
+svuint64_t svrevb_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_x)))
+svuint16_t svrevb_x(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_x)))
+svint32_t svrevb_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_x)))
+svint64_t svrevb_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_x)))
+svint16_t svrevb_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_z)))
+svuint32_t svrevb_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_z)))
+svuint64_t svrevb_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_z)))
+svuint16_t svrevb_z(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_z)))
+svint32_t svrevb_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_z)))
+svint64_t svrevb_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_z)))
+svint16_t svrevb_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_m)))
+svuint32_t svrevh_m(svuint32_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_m)))
+svuint64_t svrevh_m(svuint64_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_m)))
+svint32_t svrevh_m(svint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_m)))
+svint64_t svrevh_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_x)))
+svuint32_t svrevh_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_x)))
+svuint64_t svrevh_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_x)))
+svint32_t svrevh_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_x)))
+svint64_t svrevh_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_z)))
+svuint32_t svrevh_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_z)))
+svuint64_t svrevh_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_z)))
+svint32_t svrevh_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_z)))
+svint64_t svrevh_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_m)))
+svuint64_t svrevw_m(svuint64_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_m)))
+svint64_t svrevw_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_x)))
+svuint64_t svrevw_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_x)))
+svint64_t svrevw_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_z)))
+svuint64_t svrevw_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_z)))
+svint64_t svrevw_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_m)))
+svfloat64_t svrinta_m(svfloat64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_m)))
+svfloat32_t svrinta_m(svfloat32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_m)))
+svfloat16_t svrinta_m(svfloat16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_x)))
+svfloat64_t svrinta_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x)))
+svfloat32_t svrinta_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_x)))
+svfloat16_t svrinta_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_z)))
+svfloat64_t svrinta_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_z)))
+svfloat32_t svrinta_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_z)))
+svfloat16_t svrinta_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_m)))
+svfloat64_t svrinti_m(svfloat64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_m)))
+svfloat32_t svrinti_m(svfloat32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_m)))
+svfloat16_t svrinti_m(svfloat16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_x)))
+svfloat64_t svrinti_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_x)))
+svfloat32_t svrinti_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_x)))
+svfloat16_t svrinti_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_z)))
+svfloat64_t svrinti_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_z)))
+svfloat32_t svrinti_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_z)))
+svfloat16_t svrinti_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_m)))
+svfloat64_t svrintm_m(svfloat64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_m)))
+svfloat32_t svrintm_m(svfloat32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_m)))
+svfloat16_t svrintm_m(svfloat16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_x)))
+svfloat64_t svrintm_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x)))
+svfloat32_t svrintm_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_x)))
+svfloat16_t svrintm_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_z)))
+svfloat64_t svrintm_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_z)))
+svfloat32_t svrintm_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_z)))
+svfloat16_t svrintm_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_m)))
+svfloat64_t svrintn_m(svfloat64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_m)))
+svfloat32_t svrintn_m(svfloat32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_m)))
+svfloat16_t svrintn_m(svfloat16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_x)))
+svfloat64_t svrintn_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x)))
+svfloat32_t svrintn_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_x)))
+svfloat16_t svrintn_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_z)))
+svfloat64_t svrintn_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_z)))
+svfloat32_t svrintn_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_z)))
+svfloat16_t svrintn_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_m)))
+svfloat64_t svrintp_m(svfloat64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_m)))
+svfloat32_t svrintp_m(svfloat32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_m)))
+svfloat16_t svrintp_m(svfloat16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_x)))
+svfloat64_t svrintp_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x)))
+svfloat32_t svrintp_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_x)))
+svfloat16_t svrintp_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_z)))
+svfloat64_t svrintp_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_z)))
+svfloat32_t svrintp_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_z)))
+svfloat16_t svrintp_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_m)))
+svfloat64_t svrintx_m(svfloat64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_m)))
+svfloat32_t svrintx_m(svfloat32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_m)))
+svfloat16_t svrintx_m(svfloat16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_x)))
+svfloat64_t svrintx_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_x)))
+svfloat32_t svrintx_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_x)))
+svfloat16_t svrintx_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_z)))
+svfloat64_t svrintx_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_z)))
+svfloat32_t svrintx_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_z)))
+svfloat16_t svrintx_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_m)))
+svfloat64_t svrintz_m(svfloat64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_m)))
+svfloat32_t svrintz_m(svfloat32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_m)))
+svfloat16_t svrintz_m(svfloat16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_x)))
+svfloat64_t svrintz_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_x)))
+svfloat32_t svrintz_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_x)))
+svfloat16_t svrintz_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_z)))
+svfloat64_t svrintz_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_z)))
+svfloat32_t svrintz_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_z)))
+svfloat16_t svrintz_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f64)))
+svfloat64_t svrsqrte(svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f32)))
+svfloat32_t svrsqrte(svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f16)))
+svfloat16_t svrsqrte(svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f64)))
+svfloat64_t svrsqrts(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f32)))
+svfloat32_t svrsqrts(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f16)))
+svfloat16_t svrsqrts(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_m)))
+svfloat64_t svscale_m(svbool_t, svfloat64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_m)))
+svfloat32_t svscale_m(svbool_t, svfloat32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_m)))
+svfloat16_t svscale_m(svbool_t, svfloat16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_x)))
+svfloat64_t svscale_x(svbool_t, svfloat64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_x)))
+svfloat32_t svscale_x(svbool_t, svfloat32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_x)))
+svfloat16_t svscale_x(svbool_t, svfloat16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_z)))
+svfloat64_t svscale_z(svbool_t, svfloat64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_z)))
+svfloat32_t svscale_z(svbool_t, svfloat32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_z)))
+svfloat16_t svscale_z(svbool_t, svfloat16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_m)))
+svfloat64_t svscale_m(svbool_t, svfloat64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_m)))
+svfloat32_t svscale_m(svbool_t, svfloat32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_m)))
+svfloat16_t svscale_m(svbool_t, svfloat16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_x)))
+svfloat64_t svscale_x(svbool_t, svfloat64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_x)))
+svfloat32_t svscale_x(svbool_t, svfloat32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_x)))
+svfloat16_t svscale_x(svbool_t, svfloat16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_z)))
+svfloat64_t svscale_z(svbool_t, svfloat64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_z)))
+svfloat32_t svscale_z(svbool_t, svfloat32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_z)))
+svfloat16_t svscale_z(svbool_t, svfloat16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_b)))
+svbool_t svsel(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8)))
+svuint8_t svsel(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32)))
+svuint32_t svsel(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64)))
+svuint64_t svsel(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16)))
+svuint16_t svsel(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8)))
+svint8_t svsel(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64)))
+svfloat64_t svsel(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32)))
+svfloat32_t svsel(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16)))
+svfloat16_t svsel(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32)))
+svint32_t svsel(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64)))
+svint64_t svsel(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16)))
+svint16_t svsel(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u8)))
+svuint8x2_t svset2(svuint8x2_t, uint64_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u32)))
+svuint32x2_t svset2(svuint32x2_t, uint64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u64)))
+svuint64x2_t svset2(svuint64x2_t, uint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u16)))
+svuint16x2_t svset2(svuint16x2_t, uint64_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s8)))
+svint8x2_t svset2(svint8x2_t, uint64_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f64)))
+svfloat64x2_t svset2(svfloat64x2_t, uint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f32)))
+svfloat32x2_t svset2(svfloat32x2_t, uint64_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f16)))
+svfloat16x2_t svset2(svfloat16x2_t, uint64_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s32)))
+svint32x2_t svset2(svint32x2_t, uint64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s64)))
+svint64x2_t svset2(svint64x2_t, uint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s16)))
+svint16x2_t svset2(svint16x2_t, uint64_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u8)))
+svuint8x3_t svset3(svuint8x3_t, uint64_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u32)))
+svuint32x3_t svset3(svuint32x3_t, uint64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u64)))
+svuint64x3_t svset3(svuint64x3_t, uint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u16)))
+svuint16x3_t svset3(svuint16x3_t, uint64_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s8)))
+svint8x3_t svset3(svint8x3_t, uint64_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f64)))
+svfloat64x3_t svset3(svfloat64x3_t, uint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f32)))
+svfloat32x3_t svset3(svfloat32x3_t, uint64_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f16)))
+svfloat16x3_t svset3(svfloat16x3_t, uint64_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s32)))
+svint32x3_t svset3(svint32x3_t, uint64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s64)))
+svint64x3_t svset3(svint64x3_t, uint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s16)))
+svint16x3_t svset3(svint16x3_t, uint64_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u8)))
+svuint8x4_t svset4(svuint8x4_t, uint64_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u32)))
+svuint32x4_t svset4(svuint32x4_t, uint64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u64)))
+svuint64x4_t svset4(svuint64x4_t, uint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u16)))
+svuint16x4_t svset4(svuint16x4_t, uint64_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s8)))
+svint8x4_t svset4(svint8x4_t, uint64_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f64)))
+svfloat64x4_t svset4(svfloat64x4_t, uint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f32)))
+svfloat32x4_t svset4(svfloat32x4_t, uint64_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f16)))
+svfloat16x4_t svset4(svfloat16x4_t, uint64_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s32)))
+svint32x4_t svset4(svint32x4_t, uint64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s64)))
+svint64x4_t svset4(svint64x4_t, uint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s16)))
+svint16x4_t svset4(svint16x4_t, uint64_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u8)))
+svuint8_t svsplice(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u32)))
+svuint32_t svsplice(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u64)))
+svuint64_t svsplice(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u16)))
+svuint16_t svsplice(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s8)))
+svint8_t svsplice(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f64)))
+svfloat64_t svsplice(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f32)))
+svfloat32_t svsplice(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f16)))
+svfloat16_t svsplice(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s32)))
+svint32_t svsplice(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s64)))
+svint64_t svsplice(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s16)))
+svint16_t svsplice(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_m)))
+svfloat64_t svsqrt_m(svfloat64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_m)))
+svfloat32_t svsqrt_m(svfloat32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_m)))
+svfloat16_t svsqrt_m(svfloat16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_x)))
+svfloat64_t svsqrt_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_x)))
+svfloat32_t svsqrt_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_x)))
+svfloat16_t svsqrt_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_z)))
+svfloat64_t svsqrt_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_z)))
+svfloat32_t svsqrt_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_z)))
+svfloat16_t svsqrt_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8)))
+void svst1(svbool_t, uint8_t *, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32)))
+void svst1(svbool_t, uint32_t *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64)))
+void svst1(svbool_t, uint64_t *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16)))
+void svst1(svbool_t, uint16_t *, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8)))
+void svst1(svbool_t, int8_t *, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64)))
+void svst1(svbool_t, float64_t *, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32)))
+void svst1(svbool_t, float32_t *, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16)))
+void svst1(svbool_t, float16_t *, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32)))
+void svst1(svbool_t, int32_t *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64)))
+void svst1(svbool_t, int64_t *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16)))
+void svst1(svbool_t, int16_t *, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_u32)))
+void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_u64)))
+void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_f64)))
+void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_f32)))
+void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_s32)))
+void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_s64)))
+void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_u32)))
+void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_u64)))
+void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_f64)))
+void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_f32)))
+void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_s32)))
+void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_s64)))
+void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_u32)))
+void svst1_scatter(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_u64)))
+void svst1_scatter(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_f64)))
+void svst1_scatter(svbool_t, svuint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_f32)))
+void svst1_scatter(svbool_t, svuint32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_s32)))
+void svst1_scatter(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_s64)))
+void svst1_scatter(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_u32)))
+void svst1_scatter_index(svbool_t, uint32_t *, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_f32)))
+void svst1_scatter_index(svbool_t, float32_t *, svint32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_s32)))
+void svst1_scatter_index(svbool_t, int32_t *, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_u32)))
+void svst1_scatter_index(svbool_t, uint32_t *, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_f32)))
+void svst1_scatter_index(svbool_t, float32_t *, svuint32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_s32)))
+void svst1_scatter_index(svbool_t, int32_t *, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_u64)))
+void svst1_scatter_index(svbool_t, uint64_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_f64)))
+void svst1_scatter_index(svbool_t, float64_t *, svint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_s64)))
+void svst1_scatter_index(svbool_t, int64_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_u64)))
+void svst1_scatter_index(svbool_t, uint64_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_f64)))
+void svst1_scatter_index(svbool_t, float64_t *, svuint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_s64)))
+void svst1_scatter_index(svbool_t, int64_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_u32)))
+void svst1_scatter_offset(svbool_t, uint32_t *, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_f32)))
+void svst1_scatter_offset(svbool_t, float32_t *, svint32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_s32)))
+void svst1_scatter_offset(svbool_t, int32_t *, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_u32)))
+void svst1_scatter_offset(svbool_t, uint32_t *, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_f32)))
+void svst1_scatter_offset(svbool_t, float32_t *, svuint32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_s32)))
+void svst1_scatter_offset(svbool_t, int32_t *, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_u64)))
+void svst1_scatter_offset(svbool_t, uint64_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_f64)))
+void svst1_scatter_offset(svbool_t, float64_t *, svint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_s64)))
+void svst1_scatter_offset(svbool_t, int64_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_u64)))
+void svst1_scatter_offset(svbool_t, uint64_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_f64)))
+void svst1_scatter_offset(svbool_t, float64_t *, svuint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_s64)))
+void svst1_scatter_offset(svbool_t, int64_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8)))
+void svst1_vnum(svbool_t, uint8_t *, int64_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32)))
+void svst1_vnum(svbool_t, uint32_t *, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64)))
+void svst1_vnum(svbool_t, uint64_t *, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16)))
+void svst1_vnum(svbool_t, uint16_t *, int64_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8)))
+void svst1_vnum(svbool_t, int8_t *, int64_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64)))
+void svst1_vnum(svbool_t, float64_t *, int64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32)))
+void svst1_vnum(svbool_t, float32_t *, int64_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16)))
+void svst1_vnum(svbool_t, float16_t *, int64_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32)))
+void svst1_vnum(svbool_t, int32_t *, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64)))
+void svst1_vnum(svbool_t, int64_t *, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16)))
+void svst1_vnum(svbool_t, int16_t *, int64_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s32)))
+void svst1b(svbool_t, int8_t *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s64)))
+void svst1b(svbool_t, int8_t *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s16)))
+void svst1b(svbool_t, int8_t *, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u32)))
+void svst1b(svbool_t, uint8_t *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u64)))
+void svst1b(svbool_t, uint8_t *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u16)))
+void svst1b(svbool_t, uint8_t *, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_u32)))
+void svst1b_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_u64)))
+void svst1b_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_s32)))
+void svst1b_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_s64)))
+void svst1b_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_u32)))
+void svst1b_scatter(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_u64)))
+void svst1b_scatter(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_s32)))
+void svst1b_scatter(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_s64)))
+void svst1b_scatter(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_s32)))
+void svst1b_scatter_offset(svbool_t, int8_t *, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_u32)))
+void svst1b_scatter_offset(svbool_t, uint8_t *, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_s32)))
+void svst1b_scatter_offset(svbool_t, int8_t *, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_u32)))
+void svst1b_scatter_offset(svbool_t, uint8_t *, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_s64)))
+void svst1b_scatter_offset(svbool_t, int8_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_u64)))
+void svst1b_scatter_offset(svbool_t, uint8_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_s64)))
+void svst1b_scatter_offset(svbool_t, int8_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_u64)))
+void svst1b_scatter_offset(svbool_t, uint8_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s32)))
+void svst1b_vnum(svbool_t, int8_t *, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s64)))
+void svst1b_vnum(svbool_t, int8_t *, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s16)))
+void svst1b_vnum(svbool_t, int8_t *, int64_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u32)))
+void svst1b_vnum(svbool_t, uint8_t *, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u64)))
+void svst1b_vnum(svbool_t, uint8_t *, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u16)))
+void svst1b_vnum(svbool_t, uint8_t *, int64_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s32)))
+void svst1h(svbool_t, int16_t *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s64)))
+void svst1h(svbool_t, int16_t *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u32)))
+void svst1h(svbool_t, uint16_t *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u64)))
+void svst1h(svbool_t, uint16_t *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_u32)))
+void svst1h_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_u64)))
+void svst1h_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_s32)))
+void svst1h_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_s64)))
+void svst1h_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_u32)))
+void svst1h_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_u64)))
+void svst1h_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_s32)))
+void svst1h_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_s64)))
+void svst1h_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_u32)))
+void svst1h_scatter(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_u64)))
+void svst1h_scatter(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_s32)))
+void svst1h_scatter(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_s64)))
+void svst1h_scatter(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_s32)))
+void svst1h_scatter_index(svbool_t, int16_t *, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_u32)))
+void svst1h_scatter_index(svbool_t, uint16_t *, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_s32)))
+void svst1h_scatter_index(svbool_t, int16_t *, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_u32)))
+void svst1h_scatter_index(svbool_t, uint16_t *, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_s64)))
+void svst1h_scatter_index(svbool_t, int16_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_u64)))
+void svst1h_scatter_index(svbool_t, uint16_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_s64)))
+void svst1h_scatter_index(svbool_t, int16_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_u64)))
+void svst1h_scatter_index(svbool_t, uint16_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_s32)))
+void svst1h_scatter_offset(svbool_t, int16_t *, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_u32)))
+void svst1h_scatter_offset(svbool_t, uint16_t *, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_s32)))
+void svst1h_scatter_offset(svbool_t, int16_t *, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_u32)))
+void svst1h_scatter_offset(svbool_t, uint16_t *, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_s64)))
+void svst1h_scatter_offset(svbool_t, int16_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_u64)))
+void svst1h_scatter_offset(svbool_t, uint16_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_s64)))
+void svst1h_scatter_offset(svbool_t, int16_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_u64)))
+void svst1h_scatter_offset(svbool_t, uint16_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s32)))
+void svst1h_vnum(svbool_t, int16_t *, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s64)))
+void svst1h_vnum(svbool_t, int16_t *, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u32)))
+void svst1h_vnum(svbool_t, uint16_t *, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u64)))
+void svst1h_vnum(svbool_t, uint16_t *, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_s64)))
+void svst1w(svbool_t, int32_t *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_u64)))
+void svst1w(svbool_t, uint32_t *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_u64)))
+void svst1w_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_s64)))
+void svst1w_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_u64)))
+void svst1w_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_s64)))
+void svst1w_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_u64)))
+void svst1w_scatter(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_s64)))
+void svst1w_scatter(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_s64)))
+void svst1w_scatter_index(svbool_t, int32_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_u64)))
+void svst1w_scatter_index(svbool_t, uint32_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_s64)))
+void svst1w_scatter_index(svbool_t, int32_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_u64)))
+void svst1w_scatter_index(svbool_t, uint32_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_s64)))
+void svst1w_scatter_offset(svbool_t, int32_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_u64)))
+void svst1w_scatter_offset(svbool_t, uint32_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_s64)))
+void svst1w_scatter_offset(svbool_t, int32_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_u64)))
+void svst1w_scatter_offset(svbool_t, uint32_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_s64)))
+void svst1w_vnum(svbool_t, int32_t *, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_u64)))
+void svst1w_vnum(svbool_t, uint32_t *, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u8)))
+void svst2(svbool_t, uint8_t *, svuint8x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u32)))
+void svst2(svbool_t, uint32_t *, svuint32x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u64)))
+void svst2(svbool_t, uint64_t *, svuint64x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u16)))
+void svst2(svbool_t, uint16_t *, svuint16x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s8)))
+void svst2(svbool_t, int8_t *, svint8x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f64)))
+void svst2(svbool_t, float64_t *, svfloat64x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f32)))
+void svst2(svbool_t, float32_t *, svfloat32x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f16)))
+void svst2(svbool_t, float16_t *, svfloat16x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s32)))
+void svst2(svbool_t, int32_t *, svint32x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s64)))
+void svst2(svbool_t, int64_t *, svint64x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s16)))
+void svst2(svbool_t, int16_t *, svint16x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u8)))
+void svst2_vnum(svbool_t, uint8_t *, int64_t, svuint8x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u32)))
+void svst2_vnum(svbool_t, uint32_t *, int64_t, svuint32x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u64)))
+void svst2_vnum(svbool_t, uint64_t *, int64_t, svuint64x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u16)))
+void svst2_vnum(svbool_t, uint16_t *, int64_t, svuint16x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s8)))
+void svst2_vnum(svbool_t, int8_t *, int64_t, svint8x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f64)))
+void svst2_vnum(svbool_t, float64_t *, int64_t, svfloat64x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f32)))
+void svst2_vnum(svbool_t, float32_t *, int64_t, svfloat32x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f16)))
+void svst2_vnum(svbool_t, float16_t *, int64_t, svfloat16x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s32)))
+void svst2_vnum(svbool_t, int32_t *, int64_t, svint32x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s64)))
+void svst2_vnum(svbool_t, int64_t *, int64_t, svint64x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s16)))
+void svst2_vnum(svbool_t, int16_t *, int64_t, svint16x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u8)))
+void svst3(svbool_t, uint8_t *, svuint8x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u32)))
+void svst3(svbool_t, uint32_t *, svuint32x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u64)))
+void svst3(svbool_t, uint64_t *, svuint64x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u16)))
+void svst3(svbool_t, uint16_t *, svuint16x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s8)))
+void svst3(svbool_t, int8_t *, svint8x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f64)))
+void svst3(svbool_t, float64_t *, svfloat64x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f32)))
+void svst3(svbool_t, float32_t *, svfloat32x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f16)))
+void svst3(svbool_t, float16_t *, svfloat16x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s32)))
+void svst3(svbool_t, int32_t *, svint32x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s64)))
+void svst3(svbool_t, int64_t *, svint64x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s16)))
+void svst3(svbool_t, int16_t *, svint16x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u8)))
+void svst3_vnum(svbool_t, uint8_t *, int64_t, svuint8x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u32)))
+void svst3_vnum(svbool_t, uint32_t *, int64_t, svuint32x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u64)))
+void svst3_vnum(svbool_t, uint64_t *, int64_t, svuint64x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u16)))
+void svst3_vnum(svbool_t, uint16_t *, int64_t, svuint16x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s8)))
+void svst3_vnum(svbool_t, int8_t *, int64_t, svint8x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f64)))
+void svst3_vnum(svbool_t, float64_t *, int64_t, svfloat64x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f32)))
+void svst3_vnum(svbool_t, float32_t *, int64_t, svfloat32x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f16)))
+void svst3_vnum(svbool_t, float16_t *, int64_t, svfloat16x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s32)))
+void svst3_vnum(svbool_t, int32_t *, int64_t, svint32x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s64)))
+void svst3_vnum(svbool_t, int64_t *, int64_t, svint64x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s16)))
+void svst3_vnum(svbool_t, int16_t *, int64_t, svint16x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u8)))
+void svst4(svbool_t, uint8_t *, svuint8x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u32)))
+void svst4(svbool_t, uint32_t *, svuint32x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u64)))
+void svst4(svbool_t, uint64_t *, svuint64x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u16)))
+void svst4(svbool_t, uint16_t *, svuint16x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s8)))
+void svst4(svbool_t, int8_t *, svint8x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f64)))
+void svst4(svbool_t, float64_t *, svfloat64x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f32)))
+void svst4(svbool_t, float32_t *, svfloat32x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f16)))
+void svst4(svbool_t, float16_t *, svfloat16x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s32)))
+void svst4(svbool_t, int32_t *, svint32x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s64)))
+void svst4(svbool_t, int64_t *, svint64x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s16)))
+void svst4(svbool_t, int16_t *, svint16x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u8)))
+void svst4_vnum(svbool_t, uint8_t *, int64_t, svuint8x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u32)))
+void svst4_vnum(svbool_t, uint32_t *, int64_t, svuint32x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u64)))
+void svst4_vnum(svbool_t, uint64_t *, int64_t, svuint64x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u16)))
+void svst4_vnum(svbool_t, uint16_t *, int64_t, svuint16x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s8)))
+void svst4_vnum(svbool_t, int8_t *, int64_t, svint8x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f64)))
+void svst4_vnum(svbool_t, float64_t *, int64_t, svfloat64x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f32)))
+void svst4_vnum(svbool_t, float32_t *, int64_t, svfloat32x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f16)))
+void svst4_vnum(svbool_t, float16_t *, int64_t, svfloat16x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s32)))
+void svst4_vnum(svbool_t, int32_t *, int64_t, svint32x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s64)))
+void svst4_vnum(svbool_t, int64_t *, int64_t, svint64x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s16)))
+void svst4_vnum(svbool_t, int16_t *, int64_t, svint16x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8)))
+void svstnt1(svbool_t, uint8_t *, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32)))
+void svstnt1(svbool_t, uint32_t *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64)))
+void svstnt1(svbool_t, uint64_t *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16)))
+void svstnt1(svbool_t, uint16_t *, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8)))
+void svstnt1(svbool_t, int8_t *, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64)))
+void svstnt1(svbool_t, float64_t *, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32)))
+void svstnt1(svbool_t, float32_t *, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16)))
+void svstnt1(svbool_t, float16_t *, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32)))
+void svstnt1(svbool_t, int32_t *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64)))
+void svstnt1(svbool_t, int64_t *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16)))
+void svstnt1(svbool_t, int16_t *, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8)))
+void svstnt1_vnum(svbool_t, uint8_t *, int64_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32)))
+void svstnt1_vnum(svbool_t, uint32_t *, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64)))
+void svstnt1_vnum(svbool_t, uint64_t *, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16)))
+void svstnt1_vnum(svbool_t, uint16_t *, int64_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8)))
+void svstnt1_vnum(svbool_t, int8_t *, int64_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64)))
+void svstnt1_vnum(svbool_t, float64_t *, int64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32)))
+void svstnt1_vnum(svbool_t, float32_t *, int64_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16)))
+void svstnt1_vnum(svbool_t, float16_t *, int64_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32)))
+void svstnt1_vnum(svbool_t, int32_t *, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64)))
+void svstnt1_vnum(svbool_t, int64_t *, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16)))
+void svstnt1_vnum(svbool_t, int16_t *, int64_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_m)))
+svfloat64_t svsub_m(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_m)))
+svfloat32_t svsub_m(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_m)))
+svfloat16_t svsub_m(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_x)))
+svfloat64_t svsub_x(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_x)))
+svfloat32_t svsub_x(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_x)))
+svfloat16_t svsub_x(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_z)))
+svfloat64_t svsub_z(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_z)))
+svfloat32_t svsub_z(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_z)))
+svfloat16_t svsub_z(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_m)))
+svuint8_t svsub_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_m)))
+svuint32_t svsub_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_m)))
+svuint64_t svsub_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_m)))
+svuint16_t svsub_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_m)))
+svint8_t svsub_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_m)))
+svint32_t svsub_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_m)))
+svint64_t svsub_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_m)))
+svint16_t svsub_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_x)))
+svuint8_t svsub_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_x)))
+svuint32_t svsub_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_x)))
+svuint64_t svsub_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_x)))
+svuint16_t svsub_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_x)))
+svint8_t svsub_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_x)))
+svint32_t svsub_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_x)))
+svint64_t svsub_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_x)))
+svint16_t svsub_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_z)))
+svuint8_t svsub_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_z)))
+svuint32_t svsub_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_z)))
+svuint64_t svsub_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_z)))
+svuint16_t svsub_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_z)))
+svint8_t svsub_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_z)))
+svint32_t svsub_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_z)))
+svint64_t svsub_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_z)))
+svint16_t svsub_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_m)))
+svfloat64_t svsub_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_m)))
+svfloat32_t svsub_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_m)))
+svfloat16_t svsub_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_x)))
+svfloat64_t svsub_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_x)))
+svfloat32_t svsub_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_x)))
+svfloat16_t svsub_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_z)))
+svfloat64_t svsub_z(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_z)))
+svfloat32_t svsub_z(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_z)))
+svfloat16_t svsub_z(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_m)))
+svuint8_t svsub_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_m)))
+svuint32_t svsub_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_m)))
+svuint64_t svsub_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_m)))
+svuint16_t svsub_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_m)))
+svint8_t svsub_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_m)))
+svint32_t svsub_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_m)))
+svint64_t svsub_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_m)))
+svint16_t svsub_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_x)))
+svuint8_t svsub_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_x)))
+svuint32_t svsub_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_x)))
+svuint64_t svsub_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_x)))
+svuint16_t svsub_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_x)))
+svint8_t svsub_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_x)))
+svint32_t svsub_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_x)))
+svint64_t svsub_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_x)))
+svint16_t svsub_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_z)))
+svuint8_t svsub_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_z)))
+svuint32_t svsub_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_z)))
+svuint64_t svsub_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_z)))
+svuint16_t svsub_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_z)))
+svint8_t svsub_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_z)))
+svint32_t svsub_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_z)))
+svint64_t svsub_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_z)))
+svint16_t svsub_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_m)))
+svfloat64_t svsubr_m(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_m)))
+svfloat32_t svsubr_m(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_m)))
+svfloat16_t svsubr_m(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_x)))
+svfloat64_t svsubr_x(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_x)))
+svfloat32_t svsubr_x(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_x)))
+svfloat16_t svsubr_x(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_z)))
+svfloat64_t svsubr_z(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_z)))
+svfloat32_t svsubr_z(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_z)))
+svfloat16_t svsubr_z(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_m)))
+svuint8_t svsubr_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_m)))
+svuint32_t svsubr_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_m)))
+svuint64_t svsubr_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_m)))
+svuint16_t svsubr_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_m)))
+svint8_t svsubr_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_m)))
+svint32_t svsubr_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_m)))
+svint64_t svsubr_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_m)))
+svint16_t svsubr_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_x)))
+svuint8_t svsubr_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_x)))
+svuint32_t svsubr_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_x)))
+svuint64_t svsubr_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_x)))
+svuint16_t svsubr_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_x)))
+svint8_t svsubr_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_x)))
+svint32_t svsubr_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_x)))
+svint64_t svsubr_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_x)))
+svint16_t svsubr_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_z)))
+svuint8_t svsubr_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_z)))
+svuint32_t svsubr_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_z)))
+svuint64_t svsubr_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_z)))
+svuint16_t svsubr_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_z)))
+svint8_t svsubr_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_z)))
+svint32_t svsubr_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_z)))
+svint64_t svsubr_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_z)))
+svint16_t svsubr_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_m)))
+svfloat64_t svsubr_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_m)))
+svfloat32_t svsubr_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_m)))
+svfloat16_t svsubr_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_x)))
+svfloat64_t svsubr_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_x)))
+svfloat32_t svsubr_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_x)))
+svfloat16_t svsubr_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_z)))
+svfloat64_t svsubr_z(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_z)))
+svfloat32_t svsubr_z(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_z)))
+svfloat16_t svsubr_z(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_m)))
+svuint8_t svsubr_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_m)))
+svuint32_t svsubr_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_m)))
+svuint64_t svsubr_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_m)))
+svuint16_t svsubr_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_m)))
+svint8_t svsubr_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_m)))
+svint32_t svsubr_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_m)))
+svint64_t svsubr_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_m)))
+svint16_t svsubr_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_x)))
+svuint8_t svsubr_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_x)))
+svuint32_t svsubr_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_x)))
+svuint64_t svsubr_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_x)))
+svuint16_t svsubr_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_x)))
+svint8_t svsubr_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_x)))
+svint32_t svsubr_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_x)))
+svint64_t svsubr_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_x)))
+svint16_t svsubr_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_z)))
+svuint8_t svsubr_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_z)))
+svuint32_t svsubr_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_z)))
+svuint64_t svsubr_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_z)))
+svuint16_t svsubr_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_z)))
+svint8_t svsubr_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_z)))
+svint32_t svsubr_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_z)))
+svint64_t svsubr_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_z)))
+svint16_t svsubr_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u8)))
+svuint8_t svtbl(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u32)))
+svuint32_t svtbl(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u64)))
+svuint64_t svtbl(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u16)))
+svuint16_t svtbl(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s8)))
+svint8_t svtbl(svint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f64)))
+svfloat64_t svtbl(svfloat64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f32)))
+svfloat32_t svtbl(svfloat32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f16)))
+svfloat16_t svtbl(svfloat16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s32)))
+svint32_t svtbl(svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s64)))
+svint64_t svtbl(svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s16)))
+svint16_t svtbl(svint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f64)))
+svfloat64_t svtmad(svfloat64_t, svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f32)))
+svfloat32_t svtmad(svfloat32_t, svfloat32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f16)))
+svfloat16_t svtmad(svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u8)))
+svuint8_t svtrn1(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u32)))
+svuint32_t svtrn1(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u64)))
+svuint64_t svtrn1(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u16)))
+svuint16_t svtrn1(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s8)))
+svint8_t svtrn1(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f64)))
+svfloat64_t svtrn1(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f32)))
+svfloat32_t svtrn1(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f16)))
+svfloat16_t svtrn1(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s32)))
+svint32_t svtrn1(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s64)))
+svint64_t svtrn1(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s16)))
+svint16_t svtrn1(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u8)))
+svuint8_t svtrn2(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u32)))
+svuint32_t svtrn2(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u64)))
+svuint64_t svtrn2(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u16)))
+svuint16_t svtrn2(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s8)))
+svint8_t svtrn2(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f64)))
+svfloat64_t svtrn2(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f32)))
+svfloat32_t svtrn2(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f16)))
+svfloat16_t svtrn2(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s32)))
+svint32_t svtrn2(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s64)))
+svint64_t svtrn2(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s16)))
+svint16_t svtrn2(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f64)))
+svfloat64_t svtsmul(svfloat64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f32)))
+svfloat32_t svtsmul(svfloat32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f16)))
+svfloat16_t svtsmul(svfloat16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f64)))
+svfloat64_t svtssel(svfloat64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f32)))
+svfloat32_t svtssel(svfloat32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f16)))
+svfloat16_t svtssel(svfloat16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_b)))
+svbool_t svunpkhi(svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s32)))
+svint32_t svunpkhi(svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s64)))
+svint64_t svunpkhi(svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s16)))
+svint16_t svunpkhi(svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u32)))
+svuint32_t svunpkhi(svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u64)))
+svuint64_t svunpkhi(svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u16)))
+svuint16_t svunpkhi(svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_b)))
+svbool_t svunpklo(svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s32)))
+svint32_t svunpklo(svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s64)))
+svint64_t svunpklo(svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s16)))
+svint16_t svunpklo(svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u32)))
+svuint32_t svunpklo(svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u64)))
+svuint64_t svunpklo(svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u16)))
+svuint16_t svunpklo(svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u8)))
+svuint8_t svuzp1(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u32)))
+svuint32_t svuzp1(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u64)))
+svuint64_t svuzp1(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u16)))
+svuint16_t svuzp1(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s8)))
+svint8_t svuzp1(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f64)))
+svfloat64_t svuzp1(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f32)))
+svfloat32_t svuzp1(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f16)))
+svfloat16_t svuzp1(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s32)))
+svint32_t svuzp1(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s64)))
+svint64_t svuzp1(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s16)))
+svint16_t svuzp1(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u8)))
+svuint8_t svuzp2(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u32)))
+svuint32_t svuzp2(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u64)))
+svuint64_t svuzp2(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u16)))
+svuint16_t svuzp2(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s8)))
+svint8_t svuzp2(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f64)))
+svfloat64_t svuzp2(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f32)))
+svfloat32_t svuzp2(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f16)))
+svfloat16_t svuzp2(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s32)))
+svint32_t svuzp2(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s64)))
+svint64_t svuzp2(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s16)))
+svint16_t svuzp2(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s32)))
+svbool_t svwhilele_b8(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s32)))
+svbool_t svwhilele_b32(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s32)))
+svbool_t svwhilele_b64(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s32)))
+svbool_t svwhilele_b16(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s64)))
+svbool_t svwhilele_b8(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s64)))
+svbool_t svwhilele_b32(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s64)))
+svbool_t svwhilele_b64(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s64)))
+svbool_t svwhilele_b16(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u32)))
+svbool_t svwhilele_b8(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u32)))
+svbool_t svwhilele_b32(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u32)))
+svbool_t svwhilele_b64(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u32)))
+svbool_t svwhilele_b16(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u64)))
+svbool_t svwhilele_b8(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u64)))
+svbool_t svwhilele_b32(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u64)))
+svbool_t svwhilele_b64(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u64)))
+svbool_t svwhilele_b16(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u32)))
+svbool_t svwhilelt_b8(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u32)))
+svbool_t svwhilelt_b32(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u32)))
+svbool_t svwhilelt_b64(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u32)))
+svbool_t svwhilelt_b16(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u64)))
+svbool_t svwhilelt_b8(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u64)))
+svbool_t svwhilelt_b32(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u64)))
+svbool_t svwhilelt_b64(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u64)))
+svbool_t svwhilelt_b16(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s32)))
+svbool_t svwhilelt_b8(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s32)))
+svbool_t svwhilelt_b32(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s32)))
+svbool_t svwhilelt_b64(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s32)))
+svbool_t svwhilelt_b16(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s64)))
+svbool_t svwhilelt_b8(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s64)))
+svbool_t svwhilelt_b32(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s64)))
+svbool_t svwhilelt_b64(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s64)))
+svbool_t svwhilelt_b16(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u8)))
+svuint8_t svzip1(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u32)))
+svuint32_t svzip1(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u64)))
+svuint64_t svzip1(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u16)))
+svuint16_t svzip1(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s8)))
+svint8_t svzip1(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f64)))
+svfloat64_t svzip1(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f32)))
+svfloat32_t svzip1(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f16)))
+svfloat16_t svzip1(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s32)))
+svint32_t svzip1(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s64)))
+svint64_t svzip1(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s16)))
+svint16_t svzip1(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u8)))
+svuint8_t svzip2(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u32)))
+svuint32_t svzip2(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u64)))
+svuint64_t svzip2(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u16)))
+svuint16_t svzip2(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s8)))
+svint8_t svzip2(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f64)))
+svfloat64_t svzip2(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f32)))
+svfloat32_t svzip2(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f16)))
+svfloat16_t svzip2(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s32)))
+svint32_t svzip2(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s64)))
+svint64_t svzip2(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s16)))
+svint16_t svzip2(svint16_t, svint16_t);
+
+#if __ARM_FEATURE_SVE2_BITPERM
+#define svbdep_n_u8(...) __builtin_sve_svbdep_n_u8(__VA_ARGS__)
+#define svbdep_n_u32(...) __builtin_sve_svbdep_n_u32(__VA_ARGS__)
+#define svbdep_n_u64(...) __builtin_sve_svbdep_n_u64(__VA_ARGS__)
+#define svbdep_n_u16(...) __builtin_sve_svbdep_n_u16(__VA_ARGS__)
+#define svbdep_u8(...) __builtin_sve_svbdep_u8(__VA_ARGS__)
+#define svbdep_u32(...) __builtin_sve_svbdep_u32(__VA_ARGS__)
+#define svbdep_u64(...) __builtin_sve_svbdep_u64(__VA_ARGS__)
+#define svbdep_u16(...) __builtin_sve_svbdep_u16(__VA_ARGS__)
+#define svbext_n_u8(...) __builtin_sve_svbext_n_u8(__VA_ARGS__)
+#define svbext_n_u32(...) __builtin_sve_svbext_n_u32(__VA_ARGS__)
+#define svbext_n_u64(...) __builtin_sve_svbext_n_u64(__VA_ARGS__)
+#define svbext_n_u16(...) __builtin_sve_svbext_n_u16(__VA_ARGS__)
+#define svbext_u8(...) __builtin_sve_svbext_u8(__VA_ARGS__)
+#define svbext_u32(...) __builtin_sve_svbext_u32(__VA_ARGS__)
+#define svbext_u64(...) __builtin_sve_svbext_u64(__VA_ARGS__)
+#define svbext_u16(...) __builtin_sve_svbext_u16(__VA_ARGS__)
+#define svbgrp_n_u8(...) __builtin_sve_svbgrp_n_u8(__VA_ARGS__)
+#define svbgrp_n_u32(...) __builtin_sve_svbgrp_n_u32(__VA_ARGS__)
+#define svbgrp_n_u64(...) __builtin_sve_svbgrp_n_u64(__VA_ARGS__)
+#define svbgrp_n_u16(...) __builtin_sve_svbgrp_n_u16(__VA_ARGS__)
+#define svbgrp_u8(...) __builtin_sve_svbgrp_u8(__VA_ARGS__)
+#define svbgrp_u32(...) __builtin_sve_svbgrp_u32(__VA_ARGS__)
+#define svbgrp_u64(...) __builtin_sve_svbgrp_u64(__VA_ARGS__)
+#define svbgrp_u16(...) __builtin_sve_svbgrp_u16(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u8)))
+svuint8_t svbdep(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u32)))
+svuint32_t svbdep(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u64)))
+svuint64_t svbdep(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u16)))
+svuint16_t svbdep(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u8)))
+svuint8_t svbdep(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u32)))
+svuint32_t svbdep(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u64)))
+svuint64_t svbdep(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u16)))
+svuint16_t svbdep(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u8)))
+svuint8_t svbext(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u32)))
+svuint32_t svbext(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u64)))
+svuint64_t svbext(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u16)))
+svuint16_t svbext(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u8)))
+svuint8_t svbext(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u32)))
+svuint32_t svbext(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u64)))
+svuint64_t svbext(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u16)))
+svuint16_t svbext(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u8)))
+svuint8_t svbgrp(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u32)))
+svuint32_t svbgrp(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u64)))
+svuint64_t svbgrp(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u16)))
+svuint16_t svbgrp(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u8)))
+svuint8_t svbgrp(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u32)))
+svuint32_t svbgrp(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u64)))
+svuint64_t svbgrp(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u16)))
+svuint16_t svbgrp(svuint16_t, svuint16_t);
+#endif  //__ARM_FEATURE_SVE2_BITPERM
+
+#if defined(__ARM_FEATURE_SVE2)
+#define svaba_n_s8(...) __builtin_sve_svaba_n_s8(__VA_ARGS__)
+#define svaba_n_s32(...) __builtin_sve_svaba_n_s32(__VA_ARGS__)
+#define svaba_n_s64(...) __builtin_sve_svaba_n_s64(__VA_ARGS__)
+#define svaba_n_s16(...) __builtin_sve_svaba_n_s16(__VA_ARGS__)
+#define svaba_n_u8(...) __builtin_sve_svaba_n_u8(__VA_ARGS__)
+#define svaba_n_u32(...) __builtin_sve_svaba_n_u32(__VA_ARGS__)
+#define svaba_n_u64(...) __builtin_sve_svaba_n_u64(__VA_ARGS__)
+#define svaba_n_u16(...) __builtin_sve_svaba_n_u16(__VA_ARGS__)
+#define svaba_s8(...) __builtin_sve_svaba_s8(__VA_ARGS__)
+#define svaba_s32(...) __builtin_sve_svaba_s32(__VA_ARGS__)
+#define svaba_s64(...) __builtin_sve_svaba_s64(__VA_ARGS__)
+#define svaba_s16(...) __builtin_sve_svaba_s16(__VA_ARGS__)
+#define svaba_u8(...) __builtin_sve_svaba_u8(__VA_ARGS__)
+#define svaba_u32(...) __builtin_sve_svaba_u32(__VA_ARGS__)
+#define svaba_u64(...) __builtin_sve_svaba_u64(__VA_ARGS__)
+#define svaba_u16(...) __builtin_sve_svaba_u16(__VA_ARGS__)
+#define svabalb_n_s32(...) __builtin_sve_svabalb_n_s32(__VA_ARGS__)
+#define svabalb_n_s64(...) __builtin_sve_svabalb_n_s64(__VA_ARGS__)
+#define svabalb_n_s16(...) __builtin_sve_svabalb_n_s16(__VA_ARGS__)
+#define svabalb_n_u32(...) __builtin_sve_svabalb_n_u32(__VA_ARGS__)
+#define svabalb_n_u64(...) __builtin_sve_svabalb_n_u64(__VA_ARGS__)
+#define svabalb_n_u16(...) __builtin_sve_svabalb_n_u16(__VA_ARGS__)
+#define svabalb_s32(...) __builtin_sve_svabalb_s32(__VA_ARGS__)
+#define svabalb_s64(...) __builtin_sve_svabalb_s64(__VA_ARGS__)
+#define svabalb_s16(...) __builtin_sve_svabalb_s16(__VA_ARGS__)
+#define svabalb_u32(...) __builtin_sve_svabalb_u32(__VA_ARGS__)
+#define svabalb_u64(...) __builtin_sve_svabalb_u64(__VA_ARGS__)
+#define svabalb_u16(...) __builtin_sve_svabalb_u16(__VA_ARGS__)
+#define svabalt_n_s32(...) __builtin_sve_svabalt_n_s32(__VA_ARGS__)
+#define svabalt_n_s64(...) __builtin_sve_svabalt_n_s64(__VA_ARGS__)
+#define svabalt_n_s16(...) __builtin_sve_svabalt_n_s16(__VA_ARGS__)
+#define svabalt_n_u32(...) __builtin_sve_svabalt_n_u32(__VA_ARGS__)
+#define svabalt_n_u64(...) __builtin_sve_svabalt_n_u64(__VA_ARGS__)
+#define svabalt_n_u16(...) __builtin_sve_svabalt_n_u16(__VA_ARGS__)
+#define svabalt_s32(...) __builtin_sve_svabalt_s32(__VA_ARGS__)
+#define svabalt_s64(...) __builtin_sve_svabalt_s64(__VA_ARGS__)
+#define svabalt_s16(...) __builtin_sve_svabalt_s16(__VA_ARGS__)
+#define svabalt_u32(...) __builtin_sve_svabalt_u32(__VA_ARGS__)
+#define svabalt_u64(...) __builtin_sve_svabalt_u64(__VA_ARGS__)
+#define svabalt_u16(...) __builtin_sve_svabalt_u16(__VA_ARGS__)
+#define svabdlb_n_s32(...) __builtin_sve_svabdlb_n_s32(__VA_ARGS__)
+#define svabdlb_n_s64(...) __builtin_sve_svabdlb_n_s64(__VA_ARGS__)
+#define svabdlb_n_s16(...) __builtin_sve_svabdlb_n_s16(__VA_ARGS__)
+#define svabdlb_n_u32(...) __builtin_sve_svabdlb_n_u32(__VA_ARGS__)
+#define svabdlb_n_u64(...) __builtin_sve_svabdlb_n_u64(__VA_ARGS__)
+#define svabdlb_n_u16(...) __builtin_sve_svabdlb_n_u16(__VA_ARGS__)
+#define svabdlb_s32(...) __builtin_sve_svabdlb_s32(__VA_ARGS__)
+#define svabdlb_s64(...) __builtin_sve_svabdlb_s64(__VA_ARGS__)
+#define svabdlb_s16(...) __builtin_sve_svabdlb_s16(__VA_ARGS__)
+#define svabdlb_u32(...) __builtin_sve_svabdlb_u32(__VA_ARGS__)
+#define svabdlb_u64(...) __builtin_sve_svabdlb_u64(__VA_ARGS__)
+#define svabdlb_u16(...) __builtin_sve_svabdlb_u16(__VA_ARGS__)
+#define svabdlt_n_s32(...) __builtin_sve_svabdlt_n_s32(__VA_ARGS__)
+#define svabdlt_n_s64(...) __builtin_sve_svabdlt_n_s64(__VA_ARGS__)
+#define svabdlt_n_s16(...) __builtin_sve_svabdlt_n_s16(__VA_ARGS__)
+#define svabdlt_n_u32(...) __builtin_sve_svabdlt_n_u32(__VA_ARGS__)
+#define svabdlt_n_u64(...) __builtin_sve_svabdlt_n_u64(__VA_ARGS__)
+#define svabdlt_n_u16(...) __builtin_sve_svabdlt_n_u16(__VA_ARGS__)
+#define svabdlt_s32(...) __builtin_sve_svabdlt_s32(__VA_ARGS__)
+#define svabdlt_s64(...) __builtin_sve_svabdlt_s64(__VA_ARGS__)
+#define svabdlt_s16(...) __builtin_sve_svabdlt_s16(__VA_ARGS__)
+#define svabdlt_u32(...) __builtin_sve_svabdlt_u32(__VA_ARGS__)
+#define svabdlt_u64(...) __builtin_sve_svabdlt_u64(__VA_ARGS__)
+#define svabdlt_u16(...) __builtin_sve_svabdlt_u16(__VA_ARGS__)
+#define svadalp_s32_m(...) __builtin_sve_svadalp_s32_m(__VA_ARGS__)
+#define svadalp_s64_m(...) __builtin_sve_svadalp_s64_m(__VA_ARGS__)
+#define svadalp_s16_m(...) __builtin_sve_svadalp_s16_m(__VA_ARGS__)
+#define svadalp_s32_x(...) __builtin_sve_svadalp_s32_x(__VA_ARGS__)
+#define svadalp_s64_x(...) __builtin_sve_svadalp_s64_x(__VA_ARGS__)
+#define svadalp_s16_x(...) __builtin_sve_svadalp_s16_x(__VA_ARGS__)
+#define svadalp_s32_z(...) __builtin_sve_svadalp_s32_z(__VA_ARGS__)
+#define svadalp_s64_z(...) __builtin_sve_svadalp_s64_z(__VA_ARGS__)
+#define svadalp_s16_z(...) __builtin_sve_svadalp_s16_z(__VA_ARGS__)
+#define svadalp_u32_m(...) __builtin_sve_svadalp_u32_m(__VA_ARGS__)
+#define svadalp_u64_m(...) __builtin_sve_svadalp_u64_m(__VA_ARGS__)
+#define svadalp_u16_m(...) __builtin_sve_svadalp_u16_m(__VA_ARGS__)
+#define svadalp_u32_x(...) __builtin_sve_svadalp_u32_x(__VA_ARGS__)
+#define svadalp_u64_x(...) __builtin_sve_svadalp_u64_x(__VA_ARGS__)
+#define svadalp_u16_x(...) __builtin_sve_svadalp_u16_x(__VA_ARGS__)
+#define svadalp_u32_z(...) __builtin_sve_svadalp_u32_z(__VA_ARGS__)
+#define svadalp_u64_z(...) __builtin_sve_svadalp_u64_z(__VA_ARGS__)
+#define svadalp_u16_z(...) __builtin_sve_svadalp_u16_z(__VA_ARGS__)
+#define svadclb_n_u32(...) __builtin_sve_svadclb_n_u32(__VA_ARGS__)
+#define svadclb_n_u64(...) __builtin_sve_svadclb_n_u64(__VA_ARGS__)
+#define svadclb_u32(...) __builtin_sve_svadclb_u32(__VA_ARGS__)
+#define svadclb_u64(...) __builtin_sve_svadclb_u64(__VA_ARGS__)
+#define svadclt_n_u32(...) __builtin_sve_svadclt_n_u32(__VA_ARGS__)
+#define svadclt_n_u64(...) __builtin_sve_svadclt_n_u64(__VA_ARGS__)
+#define svadclt_u32(...) __builtin_sve_svadclt_u32(__VA_ARGS__)
+#define svadclt_u64(...) __builtin_sve_svadclt_u64(__VA_ARGS__)
+#define svaddhnb_n_u32(...) __builtin_sve_svaddhnb_n_u32(__VA_ARGS__)
+#define svaddhnb_n_u64(...) __builtin_sve_svaddhnb_n_u64(__VA_ARGS__)
+#define svaddhnb_n_u16(...) __builtin_sve_svaddhnb_n_u16(__VA_ARGS__)
+#define svaddhnb_n_s32(...) __builtin_sve_svaddhnb_n_s32(__VA_ARGS__)
+#define svaddhnb_n_s64(...) __builtin_sve_svaddhnb_n_s64(__VA_ARGS__)
+#define svaddhnb_n_s16(...) __builtin_sve_svaddhnb_n_s16(__VA_ARGS__)
+#define svaddhnb_u32(...) __builtin_sve_svaddhnb_u32(__VA_ARGS__)
+#define svaddhnb_u64(...) __builtin_sve_svaddhnb_u64(__VA_ARGS__)
+#define svaddhnb_u16(...) __builtin_sve_svaddhnb_u16(__VA_ARGS__)
+#define svaddhnb_s32(...) __builtin_sve_svaddhnb_s32(__VA_ARGS__)
+#define svaddhnb_s64(...) __builtin_sve_svaddhnb_s64(__VA_ARGS__)
+#define svaddhnb_s16(...) __builtin_sve_svaddhnb_s16(__VA_ARGS__)
+#define svaddhnt_n_u32(...) __builtin_sve_svaddhnt_n_u32(__VA_ARGS__)
+#define svaddhnt_n_u64(...) __builtin_sve_svaddhnt_n_u64(__VA_ARGS__)
+#define svaddhnt_n_u16(...) __builtin_sve_svaddhnt_n_u16(__VA_ARGS__)
+#define svaddhnt_n_s32(...) __builtin_sve_svaddhnt_n_s32(__VA_ARGS__)
+#define svaddhnt_n_s64(...) __builtin_sve_svaddhnt_n_s64(__VA_ARGS__)
+#define svaddhnt_n_s16(...) __builtin_sve_svaddhnt_n_s16(__VA_ARGS__)
+#define svaddhnt_u32(...) __builtin_sve_svaddhnt_u32(__VA_ARGS__)
+#define svaddhnt_u64(...) __builtin_sve_svaddhnt_u64(__VA_ARGS__)
+#define svaddhnt_u16(...) __builtin_sve_svaddhnt_u16(__VA_ARGS__)
+#define svaddhnt_s32(...) __builtin_sve_svaddhnt_s32(__VA_ARGS__)
+#define svaddhnt_s64(...) __builtin_sve_svaddhnt_s64(__VA_ARGS__)
+#define svaddhnt_s16(...) __builtin_sve_svaddhnt_s16(__VA_ARGS__)
+#define svaddlb_n_s32(...) __builtin_sve_svaddlb_n_s32(__VA_ARGS__)
+#define svaddlb_n_s64(...) __builtin_sve_svaddlb_n_s64(__VA_ARGS__)
+#define svaddlb_n_s16(...) __builtin_sve_svaddlb_n_s16(__VA_ARGS__)
+#define svaddlb_n_u32(...) __builtin_sve_svaddlb_n_u32(__VA_ARGS__)
+#define svaddlb_n_u64(...) __builtin_sve_svaddlb_n_u64(__VA_ARGS__)
+#define svaddlb_n_u16(...) __builtin_sve_svaddlb_n_u16(__VA_ARGS__)
+#define svaddlb_s32(...) __builtin_sve_svaddlb_s32(__VA_ARGS__)
+#define svaddlb_s64(...) __builtin_sve_svaddlb_s64(__VA_ARGS__)
+#define svaddlb_s16(...) __builtin_sve_svaddlb_s16(__VA_ARGS__)
+#define svaddlb_u32(...) __builtin_sve_svaddlb_u32(__VA_ARGS__)
+#define svaddlb_u64(...) __builtin_sve_svaddlb_u64(__VA_ARGS__)
+#define svaddlb_u16(...) __builtin_sve_svaddlb_u16(__VA_ARGS__)
+#define svaddlbt_n_s32(...) __builtin_sve_svaddlbt_n_s32(__VA_ARGS__)
+#define svaddlbt_n_s64(...) __builtin_sve_svaddlbt_n_s64(__VA_ARGS__)
+#define svaddlbt_n_s16(...) __builtin_sve_svaddlbt_n_s16(__VA_ARGS__)
+#define svaddlbt_s32(...) __builtin_sve_svaddlbt_s32(__VA_ARGS__)
+#define svaddlbt_s64(...) __builtin_sve_svaddlbt_s64(__VA_ARGS__)
+#define svaddlbt_s16(...) __builtin_sve_svaddlbt_s16(__VA_ARGS__)
+#define svaddlt_n_s32(...) __builtin_sve_svaddlt_n_s32(__VA_ARGS__)
+#define svaddlt_n_s64(...) __builtin_sve_svaddlt_n_s64(__VA_ARGS__)
+#define svaddlt_n_s16(...) __builtin_sve_svaddlt_n_s16(__VA_ARGS__)
+#define svaddlt_n_u32(...) __builtin_sve_svaddlt_n_u32(__VA_ARGS__)
+#define svaddlt_n_u64(...) __builtin_sve_svaddlt_n_u64(__VA_ARGS__)
+#define svaddlt_n_u16(...) __builtin_sve_svaddlt_n_u16(__VA_ARGS__)
+#define svaddlt_s32(...) __builtin_sve_svaddlt_s32(__VA_ARGS__)
+#define svaddlt_s64(...) __builtin_sve_svaddlt_s64(__VA_ARGS__)
+#define svaddlt_s16(...) __builtin_sve_svaddlt_s16(__VA_ARGS__)
+#define svaddlt_u32(...) __builtin_sve_svaddlt_u32(__VA_ARGS__)
+#define svaddlt_u64(...) __builtin_sve_svaddlt_u64(__VA_ARGS__)
+#define svaddlt_u16(...) __builtin_sve_svaddlt_u16(__VA_ARGS__)
+#define svaddp_f64_m(...) __builtin_sve_svaddp_f64_m(__VA_ARGS__)
+#define svaddp_f32_m(...) __builtin_sve_svaddp_f32_m(__VA_ARGS__)
+#define svaddp_f16_m(...) __builtin_sve_svaddp_f16_m(__VA_ARGS__)
+#define svaddp_f64_x(...) __builtin_sve_svaddp_f64_x(__VA_ARGS__)
+#define svaddp_f32_x(...) __builtin_sve_svaddp_f32_x(__VA_ARGS__)
+#define svaddp_f16_x(...) __builtin_sve_svaddp_f16_x(__VA_ARGS__)
+#define svaddp_u8_m(...) __builtin_sve_svaddp_u8_m(__VA_ARGS__)
+#define svaddp_u32_m(...) __builtin_sve_svaddp_u32_m(__VA_ARGS__)
+#define svaddp_u64_m(...) __builtin_sve_svaddp_u64_m(__VA_ARGS__)
+#define svaddp_u16_m(...) __builtin_sve_svaddp_u16_m(__VA_ARGS__)
+#define svaddp_s8_m(...) __builtin_sve_svaddp_s8_m(__VA_ARGS__)
+#define svaddp_s32_m(...) __builtin_sve_svaddp_s32_m(__VA_ARGS__)
+#define svaddp_s64_m(...) __builtin_sve_svaddp_s64_m(__VA_ARGS__)
+#define svaddp_s16_m(...) __builtin_sve_svaddp_s16_m(__VA_ARGS__)
+#define svaddp_u8_x(...) __builtin_sve_svaddp_u8_x(__VA_ARGS__)
+#define svaddp_u32_x(...) __builtin_sve_svaddp_u32_x(__VA_ARGS__)
+#define svaddp_u64_x(...) __builtin_sve_svaddp_u64_x(__VA_ARGS__)
+#define svaddp_u16_x(...) __builtin_sve_svaddp_u16_x(__VA_ARGS__)
+#define svaddp_s8_x(...) __builtin_sve_svaddp_s8_x(__VA_ARGS__)
+#define svaddp_s32_x(...) __builtin_sve_svaddp_s32_x(__VA_ARGS__)
+#define svaddp_s64_x(...) __builtin_sve_svaddp_s64_x(__VA_ARGS__)
+#define svaddp_s16_x(...) __builtin_sve_svaddp_s16_x(__VA_ARGS__)
+#define svaddwb_n_s32(...) __builtin_sve_svaddwb_n_s32(__VA_ARGS__)
+#define svaddwb_n_s64(...) __builtin_sve_svaddwb_n_s64(__VA_ARGS__)
+#define svaddwb_n_s16(...) __builtin_sve_svaddwb_n_s16(__VA_ARGS__)
+#define svaddwb_n_u32(...) __builtin_sve_svaddwb_n_u32(__VA_ARGS__)
+#define svaddwb_n_u64(...) __builtin_sve_svaddwb_n_u64(__VA_ARGS__)
+#define svaddwb_n_u16(...) __builtin_sve_svaddwb_n_u16(__VA_ARGS__)
+#define svaddwb_s32(...) __builtin_sve_svaddwb_s32(__VA_ARGS__)
+#define svaddwb_s64(...) __builtin_sve_svaddwb_s64(__VA_ARGS__)
+#define svaddwb_s16(...) __builtin_sve_svaddwb_s16(__VA_ARGS__)
+#define svaddwb_u32(...) __builtin_sve_svaddwb_u32(__VA_ARGS__)
+#define svaddwb_u64(...) __builtin_sve_svaddwb_u64(__VA_ARGS__)
+#define svaddwb_u16(...) __builtin_sve_svaddwb_u16(__VA_ARGS__)
+#define svaddwt_n_s32(...) __builtin_sve_svaddwt_n_s32(__VA_ARGS__)
+#define svaddwt_n_s64(...) __builtin_sve_svaddwt_n_s64(__VA_ARGS__)
+#define svaddwt_n_s16(...) __builtin_sve_svaddwt_n_s16(__VA_ARGS__)
+#define svaddwt_n_u32(...) __builtin_sve_svaddwt_n_u32(__VA_ARGS__)
+#define svaddwt_n_u64(...) __builtin_sve_svaddwt_n_u64(__VA_ARGS__)
+#define svaddwt_n_u16(...) __builtin_sve_svaddwt_n_u16(__VA_ARGS__)
+#define svaddwt_s32(...) __builtin_sve_svaddwt_s32(__VA_ARGS__)
+#define svaddwt_s64(...) __builtin_sve_svaddwt_s64(__VA_ARGS__)
+#define svaddwt_s16(...) __builtin_sve_svaddwt_s16(__VA_ARGS__)
+#define svaddwt_u32(...) __builtin_sve_svaddwt_u32(__VA_ARGS__)
+#define svaddwt_u64(...) __builtin_sve_svaddwt_u64(__VA_ARGS__)
+#define svaddwt_u16(...) __builtin_sve_svaddwt_u16(__VA_ARGS__)
+#define svbcax_n_u8(...) __builtin_sve_svbcax_n_u8(__VA_ARGS__)
+#define svbcax_n_u32(...) __builtin_sve_svbcax_n_u32(__VA_ARGS__)
+#define svbcax_n_u64(...) __builtin_sve_svbcax_n_u64(__VA_ARGS__)
+#define svbcax_n_u16(...) __builtin_sve_svbcax_n_u16(__VA_ARGS__)
+#define svbcax_n_s8(...) __builtin_sve_svbcax_n_s8(__VA_ARGS__)
+#define svbcax_n_s32(...) __builtin_sve_svbcax_n_s32(__VA_ARGS__)
+#define svbcax_n_s64(...) __builtin_sve_svbcax_n_s64(__VA_ARGS__)
+#define svbcax_n_s16(...) __builtin_sve_svbcax_n_s16(__VA_ARGS__)
+#define svbcax_u8(...) __builtin_sve_svbcax_u8(__VA_ARGS__)
+#define svbcax_u32(...) __builtin_sve_svbcax_u32(__VA_ARGS__)
+#define svbcax_u64(...) __builtin_sve_svbcax_u64(__VA_ARGS__)
+#define svbcax_u16(...) __builtin_sve_svbcax_u16(__VA_ARGS__)
+#define svbcax_s8(...) __builtin_sve_svbcax_s8(__VA_ARGS__)
+#define svbcax_s32(...) __builtin_sve_svbcax_s32(__VA_ARGS__)
+#define svbcax_s64(...) __builtin_sve_svbcax_s64(__VA_ARGS__)
+#define svbcax_s16(...) __builtin_sve_svbcax_s16(__VA_ARGS__)
+#define svbsl1n_n_u8(...) __builtin_sve_svbsl1n_n_u8(__VA_ARGS__)
+#define svbsl1n_n_u32(...) __builtin_sve_svbsl1n_n_u32(__VA_ARGS__)
+#define svbsl1n_n_u64(...) __builtin_sve_svbsl1n_n_u64(__VA_ARGS__)
+#define svbsl1n_n_u16(...) __builtin_sve_svbsl1n_n_u16(__VA_ARGS__)
+#define svbsl1n_n_s8(...) __builtin_sve_svbsl1n_n_s8(__VA_ARGS__)
+#define svbsl1n_n_s32(...) __builtin_sve_svbsl1n_n_s32(__VA_ARGS__)
+#define svbsl1n_n_s64(...) __builtin_sve_svbsl1n_n_s64(__VA_ARGS__)
+#define svbsl1n_n_s16(...) __builtin_sve_svbsl1n_n_s16(__VA_ARGS__)
+#define svbsl1n_u8(...) __builtin_sve_svbsl1n_u8(__VA_ARGS__)
+#define svbsl1n_u32(...) __builtin_sve_svbsl1n_u32(__VA_ARGS__)
+#define svbsl1n_u64(...) __builtin_sve_svbsl1n_u64(__VA_ARGS__)
+#define svbsl1n_u16(...) __builtin_sve_svbsl1n_u16(__VA_ARGS__)
+#define svbsl1n_s8(...) __builtin_sve_svbsl1n_s8(__VA_ARGS__)
+#define svbsl1n_s32(...) __builtin_sve_svbsl1n_s32(__VA_ARGS__)
+#define svbsl1n_s64(...) __builtin_sve_svbsl1n_s64(__VA_ARGS__)
+#define svbsl1n_s16(...) __builtin_sve_svbsl1n_s16(__VA_ARGS__)
+#define svbsl2n_n_u8(...) __builtin_sve_svbsl2n_n_u8(__VA_ARGS__)
+#define svbsl2n_n_u32(...) __builtin_sve_svbsl2n_n_u32(__VA_ARGS__)
+#define svbsl2n_n_u64(...) __builtin_sve_svbsl2n_n_u64(__VA_ARGS__)
+#define svbsl2n_n_u16(...) __builtin_sve_svbsl2n_n_u16(__VA_ARGS__)
+#define svbsl2n_n_s8(...) __builtin_sve_svbsl2n_n_s8(__VA_ARGS__)
+#define svbsl2n_n_s32(...) __builtin_sve_svbsl2n_n_s32(__VA_ARGS__)
+#define svbsl2n_n_s64(...) __builtin_sve_svbsl2n_n_s64(__VA_ARGS__)
+#define svbsl2n_n_s16(...) __builtin_sve_svbsl2n_n_s16(__VA_ARGS__)
+#define svbsl2n_u8(...) __builtin_sve_svbsl2n_u8(__VA_ARGS__)
+#define svbsl2n_u32(...) __builtin_sve_svbsl2n_u32(__VA_ARGS__)
+#define svbsl2n_u64(...) __builtin_sve_svbsl2n_u64(__VA_ARGS__)
+#define svbsl2n_u16(...) __builtin_sve_svbsl2n_u16(__VA_ARGS__)
+#define svbsl2n_s8(...) __builtin_sve_svbsl2n_s8(__VA_ARGS__)
+#define svbsl2n_s32(...) __builtin_sve_svbsl2n_s32(__VA_ARGS__)
+#define svbsl2n_s64(...) __builtin_sve_svbsl2n_s64(__VA_ARGS__)
+#define svbsl2n_s16(...) __builtin_sve_svbsl2n_s16(__VA_ARGS__)
+#define svbsl_n_u8(...) __builtin_sve_svbsl_n_u8(__VA_ARGS__)
+#define svbsl_n_u32(...) __builtin_sve_svbsl_n_u32(__VA_ARGS__)
+#define svbsl_n_u64(...) __builtin_sve_svbsl_n_u64(__VA_ARGS__)
+#define svbsl_n_u16(...) __builtin_sve_svbsl_n_u16(__VA_ARGS__)
+#define svbsl_n_s8(...) __builtin_sve_svbsl_n_s8(__VA_ARGS__)
+#define svbsl_n_s32(...) __builtin_sve_svbsl_n_s32(__VA_ARGS__)
+#define svbsl_n_s64(...) __builtin_sve_svbsl_n_s64(__VA_ARGS__)
+#define svbsl_n_s16(...) __builtin_sve_svbsl_n_s16(__VA_ARGS__)
+#define svbsl_u8(...) __builtin_sve_svbsl_u8(__VA_ARGS__)
+#define svbsl_u32(...) __builtin_sve_svbsl_u32(__VA_ARGS__)
+#define svbsl_u64(...) __builtin_sve_svbsl_u64(__VA_ARGS__)
+#define svbsl_u16(...) __builtin_sve_svbsl_u16(__VA_ARGS__)
+#define svbsl_s8(...) __builtin_sve_svbsl_s8(__VA_ARGS__)
+#define svbsl_s32(...) __builtin_sve_svbsl_s32(__VA_ARGS__)
+#define svbsl_s64(...) __builtin_sve_svbsl_s64(__VA_ARGS__)
+#define svbsl_s16(...) __builtin_sve_svbsl_s16(__VA_ARGS__)
+#define svcadd_u8(...) __builtin_sve_svcadd_u8(__VA_ARGS__)
+#define svcadd_u32(...) __builtin_sve_svcadd_u32(__VA_ARGS__)
+#define svcadd_u64(...) __builtin_sve_svcadd_u64(__VA_ARGS__)
+#define svcadd_u16(...) __builtin_sve_svcadd_u16(__VA_ARGS__)
+#define svcadd_s8(...) __builtin_sve_svcadd_s8(__VA_ARGS__)
+#define svcadd_s32(...) __builtin_sve_svcadd_s32(__VA_ARGS__)
+#define svcadd_s64(...) __builtin_sve_svcadd_s64(__VA_ARGS__)
+#define svcadd_s16(...) __builtin_sve_svcadd_s16(__VA_ARGS__)
+#define svcdot_s32(...) __builtin_sve_svcdot_s32(__VA_ARGS__)
+#define svcdot_s64(...) __builtin_sve_svcdot_s64(__VA_ARGS__)
+#define svcdot_lane_s32(...) __builtin_sve_svcdot_lane_s32(__VA_ARGS__)
+#define svcdot_lane_s64(...) __builtin_sve_svcdot_lane_s64(__VA_ARGS__)
+#define svcmla_u8(...) __builtin_sve_svcmla_u8(__VA_ARGS__)
+#define svcmla_u32(...) __builtin_sve_svcmla_u32(__VA_ARGS__)
+#define svcmla_u64(...) __builtin_sve_svcmla_u64(__VA_ARGS__)
+#define svcmla_u16(...) __builtin_sve_svcmla_u16(__VA_ARGS__)
+#define svcmla_s8(...) __builtin_sve_svcmla_s8(__VA_ARGS__)
+#define svcmla_s32(...) __builtin_sve_svcmla_s32(__VA_ARGS__)
+#define svcmla_s64(...) __builtin_sve_svcmla_s64(__VA_ARGS__)
+#define svcmla_s16(...) __builtin_sve_svcmla_s16(__VA_ARGS__)
+#define svcmla_lane_u32(...) __builtin_sve_svcmla_lane_u32(__VA_ARGS__)
+#define svcmla_lane_u16(...) __builtin_sve_svcmla_lane_u16(__VA_ARGS__)
+#define svcmla_lane_s32(...) __builtin_sve_svcmla_lane_s32(__VA_ARGS__)
+#define svcmla_lane_s16(...) __builtin_sve_svcmla_lane_s16(__VA_ARGS__)
+#define svcvtlt_f32_f16_m(...) __builtin_sve_svcvtlt_f32_f16_m(__VA_ARGS__)
+#define svcvtlt_f32_f16_x(...) __builtin_sve_svcvtlt_f32_f16_x(__VA_ARGS__)
+#define svcvtlt_f64_f32_m(...) __builtin_sve_svcvtlt_f64_f32_m(__VA_ARGS__)
+#define svcvtlt_f64_f32_x(...) __builtin_sve_svcvtlt_f64_f32_x(__VA_ARGS__)
+#define svcvtnt_f16_f32_m(...) __builtin_sve_svcvtnt_f16_f32_m(__VA_ARGS__)
+#define svcvtnt_f32_f64_m(...) __builtin_sve_svcvtnt_f32_f64_m(__VA_ARGS__)
+#define svcvtx_f32_f64_m(...) __builtin_sve_svcvtx_f32_f64_m(__VA_ARGS__)
+#define svcvtx_f32_f64_x(...) __builtin_sve_svcvtx_f32_f64_x(__VA_ARGS__)
+#define svcvtx_f32_f64_z(...) __builtin_sve_svcvtx_f32_f64_z(__VA_ARGS__)
+#define svcvtxnt_f32_f64_m(...) __builtin_sve_svcvtxnt_f32_f64_m(__VA_ARGS__)
+#define sveor3_n_u8(...) __builtin_sve_sveor3_n_u8(__VA_ARGS__)
+#define sveor3_n_u32(...) __builtin_sve_sveor3_n_u32(__VA_ARGS__)
+#define sveor3_n_u64(...) __builtin_sve_sveor3_n_u64(__VA_ARGS__)
+#define sveor3_n_u16(...) __builtin_sve_sveor3_n_u16(__VA_ARGS__)
+#define sveor3_n_s8(...) __builtin_sve_sveor3_n_s8(__VA_ARGS__)
+#define sveor3_n_s32(...) __builtin_sve_sveor3_n_s32(__VA_ARGS__)
+#define sveor3_n_s64(...) __builtin_sve_sveor3_n_s64(__VA_ARGS__)
+#define sveor3_n_s16(...) __builtin_sve_sveor3_n_s16(__VA_ARGS__)
+#define sveor3_u8(...) __builtin_sve_sveor3_u8(__VA_ARGS__)
+#define sveor3_u32(...) __builtin_sve_sveor3_u32(__VA_ARGS__)
+#define sveor3_u64(...) __builtin_sve_sveor3_u64(__VA_ARGS__)
+#define sveor3_u16(...) __builtin_sve_sveor3_u16(__VA_ARGS__)
+#define sveor3_s8(...) __builtin_sve_sveor3_s8(__VA_ARGS__)
+#define sveor3_s32(...) __builtin_sve_sveor3_s32(__VA_ARGS__)
+#define sveor3_s64(...) __builtin_sve_sveor3_s64(__VA_ARGS__)
+#define sveor3_s16(...) __builtin_sve_sveor3_s16(__VA_ARGS__)
+#define sveorbt_n_u8(...) __builtin_sve_sveorbt_n_u8(__VA_ARGS__)
+#define sveorbt_n_u32(...) __builtin_sve_sveorbt_n_u32(__VA_ARGS__)
+#define sveorbt_n_u64(...) __builtin_sve_sveorbt_n_u64(__VA_ARGS__)
+#define sveorbt_n_u16(...) __builtin_sve_sveorbt_n_u16(__VA_ARGS__)
+#define sveorbt_n_s8(...) __builtin_sve_sveorbt_n_s8(__VA_ARGS__)
+#define sveorbt_n_s32(...) __builtin_sve_sveorbt_n_s32(__VA_ARGS__)
+#define sveorbt_n_s64(...) __builtin_sve_sveorbt_n_s64(__VA_ARGS__)
+#define sveorbt_n_s16(...) __builtin_sve_sveorbt_n_s16(__VA_ARGS__)
+#define sveorbt_u8(...) __builtin_sve_sveorbt_u8(__VA_ARGS__)
+#define sveorbt_u32(...) __builtin_sve_sveorbt_u32(__VA_ARGS__)
+#define sveorbt_u64(...) __builtin_sve_sveorbt_u64(__VA_ARGS__)
+#define sveorbt_u16(...) __builtin_sve_sveorbt_u16(__VA_ARGS__)
+#define sveorbt_s8(...) __builtin_sve_sveorbt_s8(__VA_ARGS__)
+#define sveorbt_s32(...) __builtin_sve_sveorbt_s32(__VA_ARGS__)
+#define sveorbt_s64(...) __builtin_sve_sveorbt_s64(__VA_ARGS__)
+#define sveorbt_s16(...) __builtin_sve_sveorbt_s16(__VA_ARGS__)
+#define sveortb_n_u8(...) __builtin_sve_sveortb_n_u8(__VA_ARGS__)
+#define sveortb_n_u32(...) __builtin_sve_sveortb_n_u32(__VA_ARGS__)
+#define sveortb_n_u64(...) __builtin_sve_sveortb_n_u64(__VA_ARGS__)
+#define sveortb_n_u16(...) __builtin_sve_sveortb_n_u16(__VA_ARGS__)
+#define sveortb_n_s8(...) __builtin_sve_sveortb_n_s8(__VA_ARGS__)
+#define sveortb_n_s32(...) __builtin_sve_sveortb_n_s32(__VA_ARGS__)
+#define sveortb_n_s64(...) __builtin_sve_sveortb_n_s64(__VA_ARGS__)
+#define sveortb_n_s16(...) __builtin_sve_sveortb_n_s16(__VA_ARGS__)
+#define sveortb_u8(...) __builtin_sve_sveortb_u8(__VA_ARGS__)
+#define sveortb_u32(...) __builtin_sve_sveortb_u32(__VA_ARGS__)
+#define sveortb_u64(...) __builtin_sve_sveortb_u64(__VA_ARGS__)
+#define sveortb_u16(...) __builtin_sve_sveortb_u16(__VA_ARGS__)
+#define sveortb_s8(...) __builtin_sve_sveortb_s8(__VA_ARGS__)
+#define sveortb_s32(...) __builtin_sve_sveortb_s32(__VA_ARGS__)
+#define sveortb_s64(...) __builtin_sve_sveortb_s64(__VA_ARGS__)
+#define sveortb_s16(...) __builtin_sve_sveortb_s16(__VA_ARGS__)
+#define svhadd_n_s8_m(...) __builtin_sve_svhadd_n_s8_m(__VA_ARGS__)
+#define svhadd_n_s32_m(...) __builtin_sve_svhadd_n_s32_m(__VA_ARGS__)
+#define svhadd_n_s64_m(...) __builtin_sve_svhadd_n_s64_m(__VA_ARGS__)
+#define svhadd_n_s16_m(...) __builtin_sve_svhadd_n_s16_m(__VA_ARGS__)
+#define svhadd_n_s8_x(...) __builtin_sve_svhadd_n_s8_x(__VA_ARGS__)
+#define svhadd_n_s32_x(...) __builtin_sve_svhadd_n_s32_x(__VA_ARGS__)
+#define svhadd_n_s64_x(...) __builtin_sve_svhadd_n_s64_x(__VA_ARGS__)
+#define svhadd_n_s16_x(...) __builtin_sve_svhadd_n_s16_x(__VA_ARGS__)
+#define svhadd_n_s8_z(...) __builtin_sve_svhadd_n_s8_z(__VA_ARGS__)
+#define svhadd_n_s32_z(...) __builtin_sve_svhadd_n_s32_z(__VA_ARGS__)
+#define svhadd_n_s64_z(...) __builtin_sve_svhadd_n_s64_z(__VA_ARGS__)
+#define svhadd_n_s16_z(...) __builtin_sve_svhadd_n_s16_z(__VA_ARGS__)
+#define svhadd_n_u8_m(...) __builtin_sve_svhadd_n_u8_m(__VA_ARGS__)
+#define svhadd_n_u32_m(...) __builtin_sve_svhadd_n_u32_m(__VA_ARGS__)
+#define svhadd_n_u64_m(...) __builtin_sve_svhadd_n_u64_m(__VA_ARGS__)
+#define svhadd_n_u16_m(...) __builtin_sve_svhadd_n_u16_m(__VA_ARGS__)
+#define svhadd_n_u8_x(...) __builtin_sve_svhadd_n_u8_x(__VA_ARGS__)
+#define svhadd_n_u32_x(...) __builtin_sve_svhadd_n_u32_x(__VA_ARGS__)
+#define svhadd_n_u64_x(...) __builtin_sve_svhadd_n_u64_x(__VA_ARGS__)
+#define svhadd_n_u16_x(...) __builtin_sve_svhadd_n_u16_x(__VA_ARGS__)
+#define svhadd_n_u8_z(...) __builtin_sve_svhadd_n_u8_z(__VA_ARGS__)
+#define svhadd_n_u32_z(...) __builtin_sve_svhadd_n_u32_z(__VA_ARGS__)
+#define svhadd_n_u64_z(...) __builtin_sve_svhadd_n_u64_z(__VA_ARGS__)
+#define svhadd_n_u16_z(...) __builtin_sve_svhadd_n_u16_z(__VA_ARGS__)
+#define svhadd_s8_m(...) __builtin_sve_svhadd_s8_m(__VA_ARGS__)
+#define svhadd_s32_m(...) __builtin_sve_svhadd_s32_m(__VA_ARGS__)
+#define svhadd_s64_m(...) __builtin_sve_svhadd_s64_m(__VA_ARGS__)
+#define svhadd_s16_m(...) __builtin_sve_svhadd_s16_m(__VA_ARGS__)
+#define svhadd_s8_x(...) __builtin_sve_svhadd_s8_x(__VA_ARGS__)
+#define svhadd_s32_x(...) __builtin_sve_svhadd_s32_x(__VA_ARGS__)
+#define svhadd_s64_x(...) __builtin_sve_svhadd_s64_x(__VA_ARGS__)
+#define svhadd_s16_x(...) __builtin_sve_svhadd_s16_x(__VA_ARGS__)
+#define svhadd_s8_z(...) __builtin_sve_svhadd_s8_z(__VA_ARGS__)
+#define svhadd_s32_z(...) __builtin_sve_svhadd_s32_z(__VA_ARGS__)
+#define svhadd_s64_z(...) __builtin_sve_svhadd_s64_z(__VA_ARGS__)
+#define svhadd_s16_z(...) __builtin_sve_svhadd_s16_z(__VA_ARGS__)
+#define svhadd_u8_m(...) __builtin_sve_svhadd_u8_m(__VA_ARGS__)
+#define svhadd_u32_m(...) __builtin_sve_svhadd_u32_m(__VA_ARGS__)
+#define svhadd_u64_m(...) __builtin_sve_svhadd_u64_m(__VA_ARGS__)
+#define svhadd_u16_m(...) __builtin_sve_svhadd_u16_m(__VA_ARGS__)
+#define svhadd_u8_x(...) __builtin_sve_svhadd_u8_x(__VA_ARGS__)
+#define svhadd_u32_x(...) __builtin_sve_svhadd_u32_x(__VA_ARGS__)
+#define svhadd_u64_x(...) __builtin_sve_svhadd_u64_x(__VA_ARGS__)
+#define svhadd_u16_x(...) __builtin_sve_svhadd_u16_x(__VA_ARGS__)
+#define svhadd_u8_z(...) __builtin_sve_svhadd_u8_z(__VA_ARGS__)
+#define svhadd_u32_z(...) __builtin_sve_svhadd_u32_z(__VA_ARGS__)
+#define svhadd_u64_z(...) __builtin_sve_svhadd_u64_z(__VA_ARGS__)
+#define svhadd_u16_z(...) __builtin_sve_svhadd_u16_z(__VA_ARGS__)
+#define svhistcnt_u32_z(...) __builtin_sve_svhistcnt_u32_z(__VA_ARGS__)
+#define svhistcnt_u64_z(...) __builtin_sve_svhistcnt_u64_z(__VA_ARGS__)
+#define svhistcnt_s32_z(...) __builtin_sve_svhistcnt_s32_z(__VA_ARGS__)
+#define svhistcnt_s64_z(...) __builtin_sve_svhistcnt_s64_z(__VA_ARGS__)
+#define svhistseg_u8(...) __builtin_sve_svhistseg_u8(__VA_ARGS__)
+#define svhistseg_s8(...) __builtin_sve_svhistseg_s8(__VA_ARGS__)
+#define svhsub_n_s8_m(...) __builtin_sve_svhsub_n_s8_m(__VA_ARGS__)
+#define svhsub_n_s32_m(...) __builtin_sve_svhsub_n_s32_m(__VA_ARGS__)
+#define svhsub_n_s64_m(...) __builtin_sve_svhsub_n_s64_m(__VA_ARGS__)
+#define svhsub_n_s16_m(...) __builtin_sve_svhsub_n_s16_m(__VA_ARGS__)
+#define svhsub_n_s8_x(...) __builtin_sve_svhsub_n_s8_x(__VA_ARGS__)
+#define svhsub_n_s32_x(...) __builtin_sve_svhsub_n_s32_x(__VA_ARGS__)
+#define svhsub_n_s64_x(...) __builtin_sve_svhsub_n_s64_x(__VA_ARGS__)
+#define svhsub_n_s16_x(...) __builtin_sve_svhsub_n_s16_x(__VA_ARGS__)
+#define svhsub_n_s8_z(...) __builtin_sve_svhsub_n_s8_z(__VA_ARGS__)
+#define svhsub_n_s32_z(...) __builtin_sve_svhsub_n_s32_z(__VA_ARGS__)
+#define svhsub_n_s64_z(...) __builtin_sve_svhsub_n_s64_z(__VA_ARGS__)
+#define svhsub_n_s16_z(...) __builtin_sve_svhsub_n_s16_z(__VA_ARGS__)
+#define svhsub_n_u8_m(...) __builtin_sve_svhsub_n_u8_m(__VA_ARGS__)
+#define svhsub_n_u32_m(...) __builtin_sve_svhsub_n_u32_m(__VA_ARGS__)
+#define svhsub_n_u64_m(...) __builtin_sve_svhsub_n_u64_m(__VA_ARGS__)
+#define svhsub_n_u16_m(...) __builtin_sve_svhsub_n_u16_m(__VA_ARGS__)
+#define svhsub_n_u8_x(...) __builtin_sve_svhsub_n_u8_x(__VA_ARGS__)
+#define svhsub_n_u32_x(...) __builtin_sve_svhsub_n_u32_x(__VA_ARGS__)
+#define svhsub_n_u64_x(...) __builtin_sve_svhsub_n_u64_x(__VA_ARGS__)
+#define svhsub_n_u16_x(...) __builtin_sve_svhsub_n_u16_x(__VA_ARGS__)
+#define svhsub_n_u8_z(...) __builtin_sve_svhsub_n_u8_z(__VA_ARGS__)
+#define svhsub_n_u32_z(...) __builtin_sve_svhsub_n_u32_z(__VA_ARGS__)
+#define svhsub_n_u64_z(...) __builtin_sve_svhsub_n_u64_z(__VA_ARGS__)
+#define svhsub_n_u16_z(...) __builtin_sve_svhsub_n_u16_z(__VA_ARGS__)
+#define svhsub_s8_m(...) __builtin_sve_svhsub_s8_m(__VA_ARGS__)
+#define svhsub_s32_m(...) __builtin_sve_svhsub_s32_m(__VA_ARGS__)
+#define svhsub_s64_m(...) __builtin_sve_svhsub_s64_m(__VA_ARGS__)
+#define svhsub_s16_m(...) __builtin_sve_svhsub_s16_m(__VA_ARGS__)
+#define svhsub_s8_x(...) __builtin_sve_svhsub_s8_x(__VA_ARGS__)
+#define svhsub_s32_x(...) __builtin_sve_svhsub_s32_x(__VA_ARGS__)
+#define svhsub_s64_x(...) __builtin_sve_svhsub_s64_x(__VA_ARGS__)
+#define svhsub_s16_x(...) __builtin_sve_svhsub_s16_x(__VA_ARGS__)
+#define svhsub_s8_z(...) __builtin_sve_svhsub_s8_z(__VA_ARGS__)
+#define svhsub_s32_z(...) __builtin_sve_svhsub_s32_z(__VA_ARGS__)
+#define svhsub_s64_z(...) __builtin_sve_svhsub_s64_z(__VA_ARGS__)
+#define svhsub_s16_z(...) __builtin_sve_svhsub_s16_z(__VA_ARGS__)
+#define svhsub_u8_m(...) __builtin_sve_svhsub_u8_m(__VA_ARGS__)
+#define svhsub_u32_m(...) __builtin_sve_svhsub_u32_m(__VA_ARGS__)
+#define svhsub_u64_m(...) __builtin_sve_svhsub_u64_m(__VA_ARGS__)
+#define svhsub_u16_m(...) __builtin_sve_svhsub_u16_m(__VA_ARGS__)
+#define svhsub_u8_x(...) __builtin_sve_svhsub_u8_x(__VA_ARGS__)
+#define svhsub_u32_x(...) __builtin_sve_svhsub_u32_x(__VA_ARGS__)
+#define svhsub_u64_x(...) __builtin_sve_svhsub_u64_x(__VA_ARGS__)
+#define svhsub_u16_x(...) __builtin_sve_svhsub_u16_x(__VA_ARGS__)
+#define svhsub_u8_z(...) __builtin_sve_svhsub_u8_z(__VA_ARGS__)
+#define svhsub_u32_z(...) __builtin_sve_svhsub_u32_z(__VA_ARGS__)
+#define svhsub_u64_z(...) __builtin_sve_svhsub_u64_z(__VA_ARGS__)
+#define svhsub_u16_z(...) __builtin_sve_svhsub_u16_z(__VA_ARGS__)
+#define svhsubr_n_s8_m(...) __builtin_sve_svhsubr_n_s8_m(__VA_ARGS__)
+#define svhsubr_n_s32_m(...) __builtin_sve_svhsubr_n_s32_m(__VA_ARGS__)
+#define svhsubr_n_s64_m(...) __builtin_sve_svhsubr_n_s64_m(__VA_ARGS__)
+#define svhsubr_n_s16_m(...) __builtin_sve_svhsubr_n_s16_m(__VA_ARGS__)
+#define svhsubr_n_s8_x(...) __builtin_sve_svhsubr_n_s8_x(__VA_ARGS__)
+#define svhsubr_n_s32_x(...) __builtin_sve_svhsubr_n_s32_x(__VA_ARGS__)
+#define svhsubr_n_s64_x(...) __builtin_sve_svhsubr_n_s64_x(__VA_ARGS__)
+#define svhsubr_n_s16_x(...) __builtin_sve_svhsubr_n_s16_x(__VA_ARGS__)
+#define svhsubr_n_s8_z(...) __builtin_sve_svhsubr_n_s8_z(__VA_ARGS__)
+#define svhsubr_n_s32_z(...) __builtin_sve_svhsubr_n_s32_z(__VA_ARGS__)
+#define svhsubr_n_s64_z(...) __builtin_sve_svhsubr_n_s64_z(__VA_ARGS__)
+#define svhsubr_n_s16_z(...) __builtin_sve_svhsubr_n_s16_z(__VA_ARGS__)
+#define svhsubr_n_u8_m(...) __builtin_sve_svhsubr_n_u8_m(__VA_ARGS__)
+#define svhsubr_n_u32_m(...) __builtin_sve_svhsubr_n_u32_m(__VA_ARGS__)
+#define svhsubr_n_u64_m(...) __builtin_sve_svhsubr_n_u64_m(__VA_ARGS__)
+#define svhsubr_n_u16_m(...) __builtin_sve_svhsubr_n_u16_m(__VA_ARGS__)
+#define svhsubr_n_u8_x(...) __builtin_sve_svhsubr_n_u8_x(__VA_ARGS__)
+#define svhsubr_n_u32_x(...) __builtin_sve_svhsubr_n_u32_x(__VA_ARGS__)
+#define svhsubr_n_u64_x(...) __builtin_sve_svhsubr_n_u64_x(__VA_ARGS__)
+#define svhsubr_n_u16_x(...) __builtin_sve_svhsubr_n_u16_x(__VA_ARGS__)
+#define svhsubr_n_u8_z(...) __builtin_sve_svhsubr_n_u8_z(__VA_ARGS__)
+#define svhsubr_n_u32_z(...) __builtin_sve_svhsubr_n_u32_z(__VA_ARGS__)
+#define svhsubr_n_u64_z(...) __builtin_sve_svhsubr_n_u64_z(__VA_ARGS__)
+#define svhsubr_n_u16_z(...) __builtin_sve_svhsubr_n_u16_z(__VA_ARGS__)
+#define svhsubr_s8_m(...) __builtin_sve_svhsubr_s8_m(__VA_ARGS__)
+#define svhsubr_s32_m(...) __builtin_sve_svhsubr_s32_m(__VA_ARGS__)
+#define svhsubr_s64_m(...) __builtin_sve_svhsubr_s64_m(__VA_ARGS__)
+#define svhsubr_s16_m(...) __builtin_sve_svhsubr_s16_m(__VA_ARGS__)
+#define svhsubr_s8_x(...) __builtin_sve_svhsubr_s8_x(__VA_ARGS__)
+#define svhsubr_s32_x(...) __builtin_sve_svhsubr_s32_x(__VA_ARGS__)
+#define svhsubr_s64_x(...) __builtin_sve_svhsubr_s64_x(__VA_ARGS__)
+#define svhsubr_s16_x(...) __builtin_sve_svhsubr_s16_x(__VA_ARGS__)
+#define svhsubr_s8_z(...) __builtin_sve_svhsubr_s8_z(__VA_ARGS__)
+#define svhsubr_s32_z(...) __builtin_sve_svhsubr_s32_z(__VA_ARGS__)
+#define svhsubr_s64_z(...) __builtin_sve_svhsubr_s64_z(__VA_ARGS__)
+#define svhsubr_s16_z(...) __builtin_sve_svhsubr_s16_z(__VA_ARGS__)
+#define svhsubr_u8_m(...) __builtin_sve_svhsubr_u8_m(__VA_ARGS__)
+#define svhsubr_u32_m(...) __builtin_sve_svhsubr_u32_m(__VA_ARGS__)
+#define svhsubr_u64_m(...) __builtin_sve_svhsubr_u64_m(__VA_ARGS__)
+#define svhsubr_u16_m(...) __builtin_sve_svhsubr_u16_m(__VA_ARGS__)
+#define svhsubr_u8_x(...) __builtin_sve_svhsubr_u8_x(__VA_ARGS__)
+#define svhsubr_u32_x(...) __builtin_sve_svhsubr_u32_x(__VA_ARGS__)
+#define svhsubr_u64_x(...) __builtin_sve_svhsubr_u64_x(__VA_ARGS__)
+#define svhsubr_u16_x(...) __builtin_sve_svhsubr_u16_x(__VA_ARGS__)
+#define svhsubr_u8_z(...) __builtin_sve_svhsubr_u8_z(__VA_ARGS__)
+#define svhsubr_u32_z(...) __builtin_sve_svhsubr_u32_z(__VA_ARGS__)
+#define svhsubr_u64_z(...) __builtin_sve_svhsubr_u64_z(__VA_ARGS__)
+#define svhsubr_u16_z(...) __builtin_sve_svhsubr_u16_z(__VA_ARGS__)
+#define svldnt1_gather_u32base_index_u32(...) __builtin_sve_svldnt1_gather_u32base_index_u32(__VA_ARGS__)
+#define svldnt1_gather_u64base_index_u64(...) __builtin_sve_svldnt1_gather_u64base_index_u64(__VA_ARGS__)
+#define svldnt1_gather_u64base_index_f64(...) __builtin_sve_svldnt1_gather_u64base_index_f64(__VA_ARGS__)
+#define svldnt1_gather_u32base_index_f32(...) __builtin_sve_svldnt1_gather_u32base_index_f32(__VA_ARGS__)
+#define svldnt1_gather_u32base_index_s32(...) __builtin_sve_svldnt1_gather_u32base_index_s32(__VA_ARGS__)
+#define svldnt1_gather_u64base_index_s64(...) __builtin_sve_svldnt1_gather_u64base_index_s64(__VA_ARGS__)
+#define svldnt1_gather_u32base_offset_u32(...) __builtin_sve_svldnt1_gather_u32base_offset_u32(__VA_ARGS__)
+#define svldnt1_gather_u64base_offset_u64(...) __builtin_sve_svldnt1_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldnt1_gather_u64base_offset_f64(...) __builtin_sve_svldnt1_gather_u64base_offset_f64(__VA_ARGS__)
+#define svldnt1_gather_u32base_offset_f32(...) __builtin_sve_svldnt1_gather_u32base_offset_f32(__VA_ARGS__)
+#define svldnt1_gather_u32base_offset_s32(...) __builtin_sve_svldnt1_gather_u32base_offset_s32(__VA_ARGS__)
+#define svldnt1_gather_u64base_offset_s64(...) __builtin_sve_svldnt1_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldnt1_gather_u32base_u32(...) __builtin_sve_svldnt1_gather_u32base_u32(__VA_ARGS__)
+#define svldnt1_gather_u64base_u64(...) __builtin_sve_svldnt1_gather_u64base_u64(__VA_ARGS__)
+#define svldnt1_gather_u64base_f64(...) __builtin_sve_svldnt1_gather_u64base_f64(__VA_ARGS__)
+#define svldnt1_gather_u32base_f32(...) __builtin_sve_svldnt1_gather_u32base_f32(__VA_ARGS__)
+#define svldnt1_gather_u32base_s32(...) __builtin_sve_svldnt1_gather_u32base_s32(__VA_ARGS__)
+#define svldnt1_gather_u64base_s64(...) __builtin_sve_svldnt1_gather_u64base_s64(__VA_ARGS__)
+#define svldnt1_gather_s64index_u64(...) __builtin_sve_svldnt1_gather_s64index_u64(__VA_ARGS__)
+#define svldnt1_gather_s64index_f64(...) __builtin_sve_svldnt1_gather_s64index_f64(__VA_ARGS__)
+#define svldnt1_gather_s64index_s64(...) __builtin_sve_svldnt1_gather_s64index_s64(__VA_ARGS__)
+#define svldnt1_gather_u64index_u64(...) __builtin_sve_svldnt1_gather_u64index_u64(__VA_ARGS__)
+#define svldnt1_gather_u64index_f64(...) __builtin_sve_svldnt1_gather_u64index_f64(__VA_ARGS__)
+#define svldnt1_gather_u64index_s64(...) __builtin_sve_svldnt1_gather_u64index_s64(__VA_ARGS__)
+#define svldnt1_gather_u32offset_u32(...) __builtin_sve_svldnt1_gather_u32offset_u32(__VA_ARGS__)
+#define svldnt1_gather_u32offset_f32(...) __builtin_sve_svldnt1_gather_u32offset_f32(__VA_ARGS__)
+#define svldnt1_gather_u32offset_s32(...) __builtin_sve_svldnt1_gather_u32offset_s32(__VA_ARGS__)
+#define svldnt1_gather_s64offset_u64(...) __builtin_sve_svldnt1_gather_s64offset_u64(__VA_ARGS__)
+#define svldnt1_gather_s64offset_f64(...) __builtin_sve_svldnt1_gather_s64offset_f64(__VA_ARGS__)
+#define svldnt1_gather_s64offset_s64(...) __builtin_sve_svldnt1_gather_s64offset_s64(__VA_ARGS__)
+#define svldnt1_gather_u64offset_u64(...) __builtin_sve_svldnt1_gather_u64offset_u64(__VA_ARGS__)
+#define svldnt1_gather_u64offset_f64(...) __builtin_sve_svldnt1_gather_u64offset_f64(__VA_ARGS__)
+#define svldnt1_gather_u64offset_s64(...) __builtin_sve_svldnt1_gather_u64offset_s64(__VA_ARGS__)
+#define svldnt1sb_gather_u32base_offset_u32(...) __builtin_sve_svldnt1sb_gather_u32base_offset_u32(__VA_ARGS__)
+#define svldnt1sb_gather_u64base_offset_u64(...) __builtin_sve_svldnt1sb_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldnt1sb_gather_u32base_offset_s32(...) __builtin_sve_svldnt1sb_gather_u32base_offset_s32(__VA_ARGS__)
+#define svldnt1sb_gather_u64base_offset_s64(...) __builtin_sve_svldnt1sb_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldnt1sb_gather_u32base_u32(...) __builtin_sve_svldnt1sb_gather_u32base_u32(__VA_ARGS__)
+#define svldnt1sb_gather_u64base_u64(...) __builtin_sve_svldnt1sb_gather_u64base_u64(__VA_ARGS__)
+#define svldnt1sb_gather_u32base_s32(...) __builtin_sve_svldnt1sb_gather_u32base_s32(__VA_ARGS__)
+#define svldnt1sb_gather_u64base_s64(...) __builtin_sve_svldnt1sb_gather_u64base_s64(__VA_ARGS__)
+#define svldnt1sb_gather_u32offset_u32(...) __builtin_sve_svldnt1sb_gather_u32offset_u32(__VA_ARGS__)
+#define svldnt1sb_gather_u32offset_s32(...) __builtin_sve_svldnt1sb_gather_u32offset_s32(__VA_ARGS__)
+#define svldnt1sb_gather_s64offset_u64(...) __builtin_sve_svldnt1sb_gather_s64offset_u64(__VA_ARGS__)
+#define svldnt1sb_gather_s64offset_s64(...) __builtin_sve_svldnt1sb_gather_s64offset_s64(__VA_ARGS__)
+#define svldnt1sb_gather_u64offset_u64(...) __builtin_sve_svldnt1sb_gather_u64offset_u64(__VA_ARGS__)
+#define svldnt1sb_gather_u64offset_s64(...) __builtin_sve_svldnt1sb_gather_u64offset_s64(__VA_ARGS__)
+#define svldnt1sh_gather_u32base_index_u32(...) __builtin_sve_svldnt1sh_gather_u32base_index_u32(__VA_ARGS__)
+#define svldnt1sh_gather_u64base_index_u64(...) __builtin_sve_svldnt1sh_gather_u64base_index_u64(__VA_ARGS__)
+#define svldnt1sh_gather_u32base_index_s32(...) __builtin_sve_svldnt1sh_gather_u32base_index_s32(__VA_ARGS__)
+#define svldnt1sh_gather_u64base_index_s64(...) __builtin_sve_svldnt1sh_gather_u64base_index_s64(__VA_ARGS__)
+#define svldnt1sh_gather_u32base_offset_u32(...) __builtin_sve_svldnt1sh_gather_u32base_offset_u32(__VA_ARGS__)
+#define svldnt1sh_gather_u64base_offset_u64(...) __builtin_sve_svldnt1sh_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldnt1sh_gather_u32base_offset_s32(...) __builtin_sve_svldnt1sh_gather_u32base_offset_s32(__VA_ARGS__)
+#define svldnt1sh_gather_u64base_offset_s64(...) __builtin_sve_svldnt1sh_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldnt1sh_gather_u32base_u32(...) __builtin_sve_svldnt1sh_gather_u32base_u32(__VA_ARGS__)
+#define svldnt1sh_gather_u64base_u64(...) __builtin_sve_svldnt1sh_gather_u64base_u64(__VA_ARGS__)
+#define svldnt1sh_gather_u32base_s32(...) __builtin_sve_svldnt1sh_gather_u32base_s32(__VA_ARGS__)
+#define svldnt1sh_gather_u64base_s64(...) __builtin_sve_svldnt1sh_gather_u64base_s64(__VA_ARGS__)
+#define svldnt1sh_gather_s64index_u64(...) __builtin_sve_svldnt1sh_gather_s64index_u64(__VA_ARGS__)
+#define svldnt1sh_gather_s64index_s64(...) __builtin_sve_svldnt1sh_gather_s64index_s64(__VA_ARGS__)
+#define svldnt1sh_gather_u64index_u64(...) __builtin_sve_svldnt1sh_gather_u64index_u64(__VA_ARGS__)
+#define svldnt1sh_gather_u64index_s64(...) __builtin_sve_svldnt1sh_gather_u64index_s64(__VA_ARGS__)
+#define svldnt1sh_gather_u32offset_u32(...) __builtin_sve_svldnt1sh_gather_u32offset_u32(__VA_ARGS__)
+#define svldnt1sh_gather_u32offset_s32(...) __builtin_sve_svldnt1sh_gather_u32offset_s32(__VA_ARGS__)
+#define svldnt1sh_gather_s64offset_u64(...) __builtin_sve_svldnt1sh_gather_s64offset_u64(__VA_ARGS__)
+#define svldnt1sh_gather_s64offset_s64(...) __builtin_sve_svldnt1sh_gather_s64offset_s64(__VA_ARGS__)
+#define svldnt1sh_gather_u64offset_u64(...) __builtin_sve_svldnt1sh_gather_u64offset_u64(__VA_ARGS__)
+#define svldnt1sh_gather_u64offset_s64(...) __builtin_sve_svldnt1sh_gather_u64offset_s64(__VA_ARGS__)
+#define svldnt1sw_gather_u64base_index_u64(...) __builtin_sve_svldnt1sw_gather_u64base_index_u64(__VA_ARGS__)
+#define svldnt1sw_gather_u64base_index_s64(...) __builtin_sve_svldnt1sw_gather_u64base_index_s64(__VA_ARGS__)
+#define svldnt1sw_gather_u64base_offset_u64(...) __builtin_sve_svldnt1sw_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldnt1sw_gather_u64base_offset_s64(...) __builtin_sve_svldnt1sw_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldnt1sw_gather_u64base_u64(...) __builtin_sve_svldnt1sw_gather_u64base_u64(__VA_ARGS__)
+#define svldnt1sw_gather_u64base_s64(...) __builtin_sve_svldnt1sw_gather_u64base_s64(__VA_ARGS__)
+#define svldnt1sw_gather_s64index_u64(...) __builtin_sve_svldnt1sw_gather_s64index_u64(__VA_ARGS__)
+#define svldnt1sw_gather_s64index_s64(...) __builtin_sve_svldnt1sw_gather_s64index_s64(__VA_ARGS__)
+#define svldnt1sw_gather_u64index_u64(...) __builtin_sve_svldnt1sw_gather_u64index_u64(__VA_ARGS__)
+#define svldnt1sw_gather_u64index_s64(...) __builtin_sve_svldnt1sw_gather_u64index_s64(__VA_ARGS__)
+#define svldnt1sw_gather_s64offset_u64(...) __builtin_sve_svldnt1sw_gather_s64offset_u64(__VA_ARGS__)
+#define svldnt1sw_gather_s64offset_s64(...) __builtin_sve_svldnt1sw_gather_s64offset_s64(__VA_ARGS__)
+#define svldnt1sw_gather_u64offset_u64(...) __builtin_sve_svldnt1sw_gather_u64offset_u64(__VA_ARGS__)
+#define svldnt1sw_gather_u64offset_s64(...) __builtin_sve_svldnt1sw_gather_u64offset_s64(__VA_ARGS__)
+#define svldnt1ub_gather_u32base_offset_u32(...) __builtin_sve_svldnt1ub_gather_u32base_offset_u32(__VA_ARGS__)
+#define svldnt1ub_gather_u64base_offset_u64(...) __builtin_sve_svldnt1ub_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldnt1ub_gather_u32base_offset_s32(...) __builtin_sve_svldnt1ub_gather_u32base_offset_s32(__VA_ARGS__)
+#define svldnt1ub_gather_u64base_offset_s64(...) __builtin_sve_svldnt1ub_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldnt1ub_gather_u32base_u32(...) __builtin_sve_svldnt1ub_gather_u32base_u32(__VA_ARGS__)
+#define svldnt1ub_gather_u64base_u64(...) __builtin_sve_svldnt1ub_gather_u64base_u64(__VA_ARGS__)
+#define svldnt1ub_gather_u32base_s32(...) __builtin_sve_svldnt1ub_gather_u32base_s32(__VA_ARGS__)
+#define svldnt1ub_gather_u64base_s64(...) __builtin_sve_svldnt1ub_gather_u64base_s64(__VA_ARGS__)
+#define svldnt1ub_gather_u32offset_u32(...) __builtin_sve_svldnt1ub_gather_u32offset_u32(__VA_ARGS__)
+#define svldnt1ub_gather_u32offset_s32(...) __builtin_sve_svldnt1ub_gather_u32offset_s32(__VA_ARGS__)
+#define svldnt1ub_gather_s64offset_u64(...) __builtin_sve_svldnt1ub_gather_s64offset_u64(__VA_ARGS__)
+#define svldnt1ub_gather_s64offset_s64(...) __builtin_sve_svldnt1ub_gather_s64offset_s64(__VA_ARGS__)
+#define svldnt1ub_gather_u64offset_u64(...) __builtin_sve_svldnt1ub_gather_u64offset_u64(__VA_ARGS__)
+#define svldnt1ub_gather_u64offset_s64(...) __builtin_sve_svldnt1ub_gather_u64offset_s64(__VA_ARGS__)
+#define svldnt1uh_gather_u32base_index_u32(...) __builtin_sve_svldnt1uh_gather_u32base_index_u32(__VA_ARGS__)
+#define svldnt1uh_gather_u64base_index_u64(...) __builtin_sve_svldnt1uh_gather_u64base_index_u64(__VA_ARGS__)
+#define svldnt1uh_gather_u32base_index_s32(...) __builtin_sve_svldnt1uh_gather_u32base_index_s32(__VA_ARGS__)
+#define svldnt1uh_gather_u64base_index_s64(...) __builtin_sve_svldnt1uh_gather_u64base_index_s64(__VA_ARGS__)
+#define svldnt1uh_gather_u32base_offset_u32(...) __builtin_sve_svldnt1uh_gather_u32base_offset_u32(__VA_ARGS__)
+#define svldnt1uh_gather_u64base_offset_u64(...) __builtin_sve_svldnt1uh_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldnt1uh_gather_u32base_offset_s32(...) __builtin_sve_svldnt1uh_gather_u32base_offset_s32(__VA_ARGS__)
+#define svldnt1uh_gather_u64base_offset_s64(...) __builtin_sve_svldnt1uh_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldnt1uh_gather_u32base_u32(...) __builtin_sve_svldnt1uh_gather_u32base_u32(__VA_ARGS__)
+#define svldnt1uh_gather_u64base_u64(...) __builtin_sve_svldnt1uh_gather_u64base_u64(__VA_ARGS__)
+#define svldnt1uh_gather_u32base_s32(...) __builtin_sve_svldnt1uh_gather_u32base_s32(__VA_ARGS__)
+#define svldnt1uh_gather_u64base_s64(...) __builtin_sve_svldnt1uh_gather_u64base_s64(__VA_ARGS__)
+#define svldnt1uh_gather_s64index_u64(...) __builtin_sve_svldnt1uh_gather_s64index_u64(__VA_ARGS__)
+#define svldnt1uh_gather_s64index_s64(...) __builtin_sve_svldnt1uh_gather_s64index_s64(__VA_ARGS__)
+#define svldnt1uh_gather_u64index_u64(...) __builtin_sve_svldnt1uh_gather_u64index_u64(__VA_ARGS__)
+#define svldnt1uh_gather_u64index_s64(...) __builtin_sve_svldnt1uh_gather_u64index_s64(__VA_ARGS__)
+#define svldnt1uh_gather_u32offset_u32(...) __builtin_sve_svldnt1uh_gather_u32offset_u32(__VA_ARGS__)
+#define svldnt1uh_gather_u32offset_s32(...) __builtin_sve_svldnt1uh_gather_u32offset_s32(__VA_ARGS__)
+#define svldnt1uh_gather_s64offset_u64(...) __builtin_sve_svldnt1uh_gather_s64offset_u64(__VA_ARGS__)
+#define svldnt1uh_gather_s64offset_s64(...) __builtin_sve_svldnt1uh_gather_s64offset_s64(__VA_ARGS__)
+#define svldnt1uh_gather_u64offset_u64(...) __builtin_sve_svldnt1uh_gather_u64offset_u64(__VA_ARGS__)
+#define svldnt1uh_gather_u64offset_s64(...) __builtin_sve_svldnt1uh_gather_u64offset_s64(__VA_ARGS__)
+#define svldnt1uw_gather_u64base_index_u64(...) __builtin_sve_svldnt1uw_gather_u64base_index_u64(__VA_ARGS__)
+#define svldnt1uw_gather_u64base_index_s64(...) __builtin_sve_svldnt1uw_gather_u64base_index_s64(__VA_ARGS__)
+#define svldnt1uw_gather_u64base_offset_u64(...) __builtin_sve_svldnt1uw_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldnt1uw_gather_u64base_offset_s64(...) __builtin_sve_svldnt1uw_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldnt1uw_gather_u64base_u64(...) __builtin_sve_svldnt1uw_gather_u64base_u64(__VA_ARGS__)
+#define svldnt1uw_gather_u64base_s64(...) __builtin_sve_svldnt1uw_gather_u64base_s64(__VA_ARGS__)
+#define svldnt1uw_gather_s64index_u64(...) __builtin_sve_svldnt1uw_gather_s64index_u64(__VA_ARGS__)
+#define svldnt1uw_gather_s64index_s64(...) __builtin_sve_svldnt1uw_gather_s64index_s64(__VA_ARGS__)
+#define svldnt1uw_gather_u64index_u64(...) __builtin_sve_svldnt1uw_gather_u64index_u64(__VA_ARGS__)
+#define svldnt1uw_gather_u64index_s64(...) __builtin_sve_svldnt1uw_gather_u64index_s64(__VA_ARGS__)
+#define svldnt1uw_gather_s64offset_u64(...) __builtin_sve_svldnt1uw_gather_s64offset_u64(__VA_ARGS__)
+#define svldnt1uw_gather_s64offset_s64(...) __builtin_sve_svldnt1uw_gather_s64offset_s64(__VA_ARGS__)
+#define svldnt1uw_gather_u64offset_u64(...) __builtin_sve_svldnt1uw_gather_u64offset_u64(__VA_ARGS__)
+#define svldnt1uw_gather_u64offset_s64(...) __builtin_sve_svldnt1uw_gather_u64offset_s64(__VA_ARGS__)
+#define svlogb_f64_m(...) __builtin_sve_svlogb_f64_m(__VA_ARGS__)
+#define svlogb_f32_m(...) __builtin_sve_svlogb_f32_m(__VA_ARGS__)
+#define svlogb_f16_m(...) __builtin_sve_svlogb_f16_m(__VA_ARGS__)
+#define svlogb_f64_x(...) __builtin_sve_svlogb_f64_x(__VA_ARGS__)
+#define svlogb_f32_x(...) __builtin_sve_svlogb_f32_x(__VA_ARGS__)
+#define svlogb_f16_x(...) __builtin_sve_svlogb_f16_x(__VA_ARGS__)
+#define svlogb_f64_z(...) __builtin_sve_svlogb_f64_z(__VA_ARGS__)
+#define svlogb_f32_z(...) __builtin_sve_svlogb_f32_z(__VA_ARGS__)
+#define svlogb_f16_z(...) __builtin_sve_svlogb_f16_z(__VA_ARGS__)
+#define svmatch_u8(...) __builtin_sve_svmatch_u8(__VA_ARGS__)
+#define svmatch_u16(...) __builtin_sve_svmatch_u16(__VA_ARGS__)
+#define svmatch_s8(...) __builtin_sve_svmatch_s8(__VA_ARGS__)
+#define svmatch_s16(...) __builtin_sve_svmatch_s16(__VA_ARGS__)
+#define svmaxnmp_f64_m(...) __builtin_sve_svmaxnmp_f64_m(__VA_ARGS__)
+#define svmaxnmp_f32_m(...) __builtin_sve_svmaxnmp_f32_m(__VA_ARGS__)
+#define svmaxnmp_f16_m(...) __builtin_sve_svmaxnmp_f16_m(__VA_ARGS__)
+#define svmaxnmp_f64_x(...) __builtin_sve_svmaxnmp_f64_x(__VA_ARGS__)
+#define svmaxnmp_f32_x(...) __builtin_sve_svmaxnmp_f32_x(__VA_ARGS__)
+#define svmaxnmp_f16_x(...) __builtin_sve_svmaxnmp_f16_x(__VA_ARGS__)
+#define svmaxp_f64_m(...) __builtin_sve_svmaxp_f64_m(__VA_ARGS__)
+#define svmaxp_f32_m(...) __builtin_sve_svmaxp_f32_m(__VA_ARGS__)
+#define svmaxp_f16_m(...) __builtin_sve_svmaxp_f16_m(__VA_ARGS__)
+#define svmaxp_f64_x(...) __builtin_sve_svmaxp_f64_x(__VA_ARGS__)
+#define svmaxp_f32_x(...) __builtin_sve_svmaxp_f32_x(__VA_ARGS__)
+#define svmaxp_f16_x(...) __builtin_sve_svmaxp_f16_x(__VA_ARGS__)
+#define svmaxp_s8_m(...) __builtin_sve_svmaxp_s8_m(__VA_ARGS__)
+#define svmaxp_s32_m(...) __builtin_sve_svmaxp_s32_m(__VA_ARGS__)
+#define svmaxp_s64_m(...) __builtin_sve_svmaxp_s64_m(__VA_ARGS__)
+#define svmaxp_s16_m(...) __builtin_sve_svmaxp_s16_m(__VA_ARGS__)
+#define svmaxp_s8_x(...) __builtin_sve_svmaxp_s8_x(__VA_ARGS__)
+#define svmaxp_s32_x(...) __builtin_sve_svmaxp_s32_x(__VA_ARGS__)
+#define svmaxp_s64_x(...) __builtin_sve_svmaxp_s64_x(__VA_ARGS__)
+#define svmaxp_s16_x(...) __builtin_sve_svmaxp_s16_x(__VA_ARGS__)
+#define svmaxp_u8_m(...) __builtin_sve_svmaxp_u8_m(__VA_ARGS__)
+#define svmaxp_u32_m(...) __builtin_sve_svmaxp_u32_m(__VA_ARGS__)
+#define svmaxp_u64_m(...) __builtin_sve_svmaxp_u64_m(__VA_ARGS__)
+#define svmaxp_u16_m(...) __builtin_sve_svmaxp_u16_m(__VA_ARGS__)
+#define svmaxp_u8_x(...) __builtin_sve_svmaxp_u8_x(__VA_ARGS__)
+#define svmaxp_u32_x(...) __builtin_sve_svmaxp_u32_x(__VA_ARGS__)
+#define svmaxp_u64_x(...) __builtin_sve_svmaxp_u64_x(__VA_ARGS__)
+#define svmaxp_u16_x(...) __builtin_sve_svmaxp_u16_x(__VA_ARGS__)
+#define svminnmp_f64_m(...) __builtin_sve_svminnmp_f64_m(__VA_ARGS__)
+#define svminnmp_f32_m(...) __builtin_sve_svminnmp_f32_m(__VA_ARGS__)
+#define svminnmp_f16_m(...) __builtin_sve_svminnmp_f16_m(__VA_ARGS__)
+#define svminnmp_f64_x(...) __builtin_sve_svminnmp_f64_x(__VA_ARGS__)
+#define svminnmp_f32_x(...) __builtin_sve_svminnmp_f32_x(__VA_ARGS__)
+#define svminnmp_f16_x(...) __builtin_sve_svminnmp_f16_x(__VA_ARGS__)
+#define svminp_f64_m(...) __builtin_sve_svminp_f64_m(__VA_ARGS__)
+#define svminp_f32_m(...) __builtin_sve_svminp_f32_m(__VA_ARGS__)
+#define svminp_f16_m(...) __builtin_sve_svminp_f16_m(__VA_ARGS__)
+#define svminp_f64_x(...) __builtin_sve_svminp_f64_x(__VA_ARGS__)
+#define svminp_f32_x(...) __builtin_sve_svminp_f32_x(__VA_ARGS__)
+#define svminp_f16_x(...) __builtin_sve_svminp_f16_x(__VA_ARGS__)
+#define svminp_s8_m(...) __builtin_sve_svminp_s8_m(__VA_ARGS__)
+#define svminp_s32_m(...) __builtin_sve_svminp_s32_m(__VA_ARGS__)
+#define svminp_s64_m(...) __builtin_sve_svminp_s64_m(__VA_ARGS__)
+#define svminp_s16_m(...) __builtin_sve_svminp_s16_m(__VA_ARGS__)
+#define svminp_s8_x(...) __builtin_sve_svminp_s8_x(__VA_ARGS__)
+#define svminp_s32_x(...) __builtin_sve_svminp_s32_x(__VA_ARGS__)
+#define svminp_s64_x(...) __builtin_sve_svminp_s64_x(__VA_ARGS__)
+#define svminp_s16_x(...) __builtin_sve_svminp_s16_x(__VA_ARGS__)
+#define svminp_u8_m(...) __builtin_sve_svminp_u8_m(__VA_ARGS__)
+#define svminp_u32_m(...) __builtin_sve_svminp_u32_m(__VA_ARGS__)
+#define svminp_u64_m(...) __builtin_sve_svminp_u64_m(__VA_ARGS__)
+#define svminp_u16_m(...) __builtin_sve_svminp_u16_m(__VA_ARGS__)
+#define svminp_u8_x(...) __builtin_sve_svminp_u8_x(__VA_ARGS__)
+#define svminp_u32_x(...) __builtin_sve_svminp_u32_x(__VA_ARGS__)
+#define svminp_u64_x(...) __builtin_sve_svminp_u64_x(__VA_ARGS__)
+#define svminp_u16_x(...) __builtin_sve_svminp_u16_x(__VA_ARGS__)
+#define svmla_lane_u32(...) __builtin_sve_svmla_lane_u32(__VA_ARGS__)
+#define svmla_lane_u64(...) __builtin_sve_svmla_lane_u64(__VA_ARGS__)
+#define svmla_lane_u16(...) __builtin_sve_svmla_lane_u16(__VA_ARGS__)
+#define svmla_lane_s32(...) __builtin_sve_svmla_lane_s32(__VA_ARGS__)
+#define svmla_lane_s64(...) __builtin_sve_svmla_lane_s64(__VA_ARGS__)
+#define svmla_lane_s16(...) __builtin_sve_svmla_lane_s16(__VA_ARGS__)
+#define svmlalb_n_f32(...) __builtin_sve_svmlalb_n_f32(__VA_ARGS__)
+#define svmlalb_n_s32(...) __builtin_sve_svmlalb_n_s32(__VA_ARGS__)
+#define svmlalb_n_s64(...) __builtin_sve_svmlalb_n_s64(__VA_ARGS__)
+#define svmlalb_n_s16(...) __builtin_sve_svmlalb_n_s16(__VA_ARGS__)
+#define svmlalb_n_u32(...) __builtin_sve_svmlalb_n_u32(__VA_ARGS__)
+#define svmlalb_n_u64(...) __builtin_sve_svmlalb_n_u64(__VA_ARGS__)
+#define svmlalb_n_u16(...) __builtin_sve_svmlalb_n_u16(__VA_ARGS__)
+#define svmlalb_f32(...) __builtin_sve_svmlalb_f32(__VA_ARGS__)
+#define svmlalb_s32(...) __builtin_sve_svmlalb_s32(__VA_ARGS__)
+#define svmlalb_s64(...) __builtin_sve_svmlalb_s64(__VA_ARGS__)
+#define svmlalb_s16(...) __builtin_sve_svmlalb_s16(__VA_ARGS__)
+#define svmlalb_u32(...) __builtin_sve_svmlalb_u32(__VA_ARGS__)
+#define svmlalb_u64(...) __builtin_sve_svmlalb_u64(__VA_ARGS__)
+#define svmlalb_u16(...) __builtin_sve_svmlalb_u16(__VA_ARGS__)
+#define svmlalb_lane_f32(...) __builtin_sve_svmlalb_lane_f32(__VA_ARGS__)
+#define svmlalb_lane_s32(...) __builtin_sve_svmlalb_lane_s32(__VA_ARGS__)
+#define svmlalb_lane_s64(...) __builtin_sve_svmlalb_lane_s64(__VA_ARGS__)
+#define svmlalb_lane_u32(...) __builtin_sve_svmlalb_lane_u32(__VA_ARGS__)
+#define svmlalb_lane_u64(...) __builtin_sve_svmlalb_lane_u64(__VA_ARGS__)
+#define svmlalt_n_f32(...) __builtin_sve_svmlalt_n_f32(__VA_ARGS__)
+#define svmlalt_n_s32(...) __builtin_sve_svmlalt_n_s32(__VA_ARGS__)
+#define svmlalt_n_s64(...) __builtin_sve_svmlalt_n_s64(__VA_ARGS__)
+#define svmlalt_n_s16(...) __builtin_sve_svmlalt_n_s16(__VA_ARGS__)
+#define svmlalt_n_u32(...) __builtin_sve_svmlalt_n_u32(__VA_ARGS__)
+#define svmlalt_n_u64(...) __builtin_sve_svmlalt_n_u64(__VA_ARGS__)
+#define svmlalt_n_u16(...) __builtin_sve_svmlalt_n_u16(__VA_ARGS__)
+#define svmlalt_f32(...) __builtin_sve_svmlalt_f32(__VA_ARGS__)
+#define svmlalt_s32(...) __builtin_sve_svmlalt_s32(__VA_ARGS__)
+#define svmlalt_s64(...) __builtin_sve_svmlalt_s64(__VA_ARGS__)
+#define svmlalt_s16(...) __builtin_sve_svmlalt_s16(__VA_ARGS__)
+#define svmlalt_u32(...) __builtin_sve_svmlalt_u32(__VA_ARGS__)
+#define svmlalt_u64(...) __builtin_sve_svmlalt_u64(__VA_ARGS__)
+#define svmlalt_u16(...) __builtin_sve_svmlalt_u16(__VA_ARGS__)
+#define svmlalt_lane_f32(...) __builtin_sve_svmlalt_lane_f32(__VA_ARGS__)
+#define svmlalt_lane_s32(...) __builtin_sve_svmlalt_lane_s32(__VA_ARGS__)
+#define svmlalt_lane_s64(...) __builtin_sve_svmlalt_lane_s64(__VA_ARGS__)
+#define svmlalt_lane_u32(...) __builtin_sve_svmlalt_lane_u32(__VA_ARGS__)
+#define svmlalt_lane_u64(...) __builtin_sve_svmlalt_lane_u64(__VA_ARGS__)
+#define svmls_lane_u32(...) __builtin_sve_svmls_lane_u32(__VA_ARGS__)
+#define svmls_lane_u64(...) __builtin_sve_svmls_lane_u64(__VA_ARGS__)
+#define svmls_lane_u16(...) __builtin_sve_svmls_lane_u16(__VA_ARGS__)
+#define svmls_lane_s32(...) __builtin_sve_svmls_lane_s32(__VA_ARGS__)
+#define svmls_lane_s64(...) __builtin_sve_svmls_lane_s64(__VA_ARGS__)
+#define svmls_lane_s16(...) __builtin_sve_svmls_lane_s16(__VA_ARGS__)
+#define svmlslb_n_f32(...) __builtin_sve_svmlslb_n_f32(__VA_ARGS__)
+#define svmlslb_n_s32(...) __builtin_sve_svmlslb_n_s32(__VA_ARGS__)
+#define svmlslb_n_s64(...) __builtin_sve_svmlslb_n_s64(__VA_ARGS__)
+#define svmlslb_n_s16(...) __builtin_sve_svmlslb_n_s16(__VA_ARGS__)
+#define svmlslb_n_u32(...) __builtin_sve_svmlslb_n_u32(__VA_ARGS__)
+#define svmlslb_n_u64(...) __builtin_sve_svmlslb_n_u64(__VA_ARGS__)
+#define svmlslb_n_u16(...) __builtin_sve_svmlslb_n_u16(__VA_ARGS__)
+#define svmlslb_f32(...) __builtin_sve_svmlslb_f32(__VA_ARGS__)
+#define svmlslb_s32(...) __builtin_sve_svmlslb_s32(__VA_ARGS__)
+#define svmlslb_s64(...) __builtin_sve_svmlslb_s64(__VA_ARGS__)
+#define svmlslb_s16(...) __builtin_sve_svmlslb_s16(__VA_ARGS__)
+#define svmlslb_u32(...) __builtin_sve_svmlslb_u32(__VA_ARGS__)
+#define svmlslb_u64(...) __builtin_sve_svmlslb_u64(__VA_ARGS__)
+#define svmlslb_u16(...) __builtin_sve_svmlslb_u16(__VA_ARGS__)
+#define svmlslb_lane_f32(...) __builtin_sve_svmlslb_lane_f32(__VA_ARGS__)
+#define svmlslb_lane_s32(...) __builtin_sve_svmlslb_lane_s32(__VA_ARGS__)
+#define svmlslb_lane_s64(...) __builtin_sve_svmlslb_lane_s64(__VA_ARGS__)
+#define svmlslb_lane_u32(...) __builtin_sve_svmlslb_lane_u32(__VA_ARGS__)
+#define svmlslb_lane_u64(...) __builtin_sve_svmlslb_lane_u64(__VA_ARGS__)
+#define svmlslt_n_f32(...) __builtin_sve_svmlslt_n_f32(__VA_ARGS__)
+#define svmlslt_n_s32(...) __builtin_sve_svmlslt_n_s32(__VA_ARGS__)
+#define svmlslt_n_s64(...) __builtin_sve_svmlslt_n_s64(__VA_ARGS__)
+#define svmlslt_n_s16(...) __builtin_sve_svmlslt_n_s16(__VA_ARGS__)
+#define svmlslt_n_u32(...) __builtin_sve_svmlslt_n_u32(__VA_ARGS__)
+#define svmlslt_n_u64(...) __builtin_sve_svmlslt_n_u64(__VA_ARGS__)
+#define svmlslt_n_u16(...) __builtin_sve_svmlslt_n_u16(__VA_ARGS__)
+#define svmlslt_f32(...) __builtin_sve_svmlslt_f32(__VA_ARGS__)
+#define svmlslt_s32(...) __builtin_sve_svmlslt_s32(__VA_ARGS__)
+#define svmlslt_s64(...) __builtin_sve_svmlslt_s64(__VA_ARGS__)
+#define svmlslt_s16(...) __builtin_sve_svmlslt_s16(__VA_ARGS__)
+#define svmlslt_u32(...) __builtin_sve_svmlslt_u32(__VA_ARGS__)
+#define svmlslt_u64(...) __builtin_sve_svmlslt_u64(__VA_ARGS__)
+#define svmlslt_u16(...) __builtin_sve_svmlslt_u16(__VA_ARGS__)
+#define svmlslt_lane_f32(...) __builtin_sve_svmlslt_lane_f32(__VA_ARGS__)
+#define svmlslt_lane_s32(...) __builtin_sve_svmlslt_lane_s32(__VA_ARGS__)
+#define svmlslt_lane_s64(...) __builtin_sve_svmlslt_lane_s64(__VA_ARGS__)
+#define svmlslt_lane_u32(...) __builtin_sve_svmlslt_lane_u32(__VA_ARGS__)
+#define svmlslt_lane_u64(...) __builtin_sve_svmlslt_lane_u64(__VA_ARGS__)
+#define svmovlb_s32(...) __builtin_sve_svmovlb_s32(__VA_ARGS__)
+#define svmovlb_s64(...) __builtin_sve_svmovlb_s64(__VA_ARGS__)
+#define svmovlb_s16(...) __builtin_sve_svmovlb_s16(__VA_ARGS__)
+#define svmovlb_u32(...) __builtin_sve_svmovlb_u32(__VA_ARGS__)
+#define svmovlb_u64(...) __builtin_sve_svmovlb_u64(__VA_ARGS__)
+#define svmovlb_u16(...) __builtin_sve_svmovlb_u16(__VA_ARGS__)
+#define svmovlt_s32(...) __builtin_sve_svmovlt_s32(__VA_ARGS__)
+#define svmovlt_s64(...) __builtin_sve_svmovlt_s64(__VA_ARGS__)
+#define svmovlt_s16(...) __builtin_sve_svmovlt_s16(__VA_ARGS__)
+#define svmovlt_u32(...) __builtin_sve_svmovlt_u32(__VA_ARGS__)
+#define svmovlt_u64(...) __builtin_sve_svmovlt_u64(__VA_ARGS__)
+#define svmovlt_u16(...) __builtin_sve_svmovlt_u16(__VA_ARGS__)
+#define svmul_lane_u32(...) __builtin_sve_svmul_lane_u32(__VA_ARGS__)
+#define svmul_lane_u64(...) __builtin_sve_svmul_lane_u64(__VA_ARGS__)
+#define svmul_lane_u16(...) __builtin_sve_svmul_lane_u16(__VA_ARGS__)
+#define svmul_lane_s32(...) __builtin_sve_svmul_lane_s32(__VA_ARGS__)
+#define svmul_lane_s64(...) __builtin_sve_svmul_lane_s64(__VA_ARGS__)
+#define svmul_lane_s16(...) __builtin_sve_svmul_lane_s16(__VA_ARGS__)
+#define svmullb_n_s32(...) __builtin_sve_svmullb_n_s32(__VA_ARGS__)
+#define svmullb_n_s64(...) __builtin_sve_svmullb_n_s64(__VA_ARGS__)
+#define svmullb_n_s16(...) __builtin_sve_svmullb_n_s16(__VA_ARGS__)
+#define svmullb_n_u32(...) __builtin_sve_svmullb_n_u32(__VA_ARGS__)
+#define svmullb_n_u64(...) __builtin_sve_svmullb_n_u64(__VA_ARGS__)
+#define svmullb_n_u16(...) __builtin_sve_svmullb_n_u16(__VA_ARGS__)
+#define svmullb_s32(...) __builtin_sve_svmullb_s32(__VA_ARGS__)
+#define svmullb_s64(...) __builtin_sve_svmullb_s64(__VA_ARGS__)
+#define svmullb_s16(...) __builtin_sve_svmullb_s16(__VA_ARGS__)
+#define svmullb_u32(...) __builtin_sve_svmullb_u32(__VA_ARGS__)
+#define svmullb_u64(...) __builtin_sve_svmullb_u64(__VA_ARGS__)
+#define svmullb_u16(...) __builtin_sve_svmullb_u16(__VA_ARGS__)
+#define svmullb_lane_s32(...) __builtin_sve_svmullb_lane_s32(__VA_ARGS__)
+#define svmullb_lane_s64(...) __builtin_sve_svmullb_lane_s64(__VA_ARGS__)
+#define svmullb_lane_u32(...) __builtin_sve_svmullb_lane_u32(__VA_ARGS__)
+#define svmullb_lane_u64(...) __builtin_sve_svmullb_lane_u64(__VA_ARGS__)
+#define svmullt_n_s32(...) __builtin_sve_svmullt_n_s32(__VA_ARGS__)
+#define svmullt_n_s64(...) __builtin_sve_svmullt_n_s64(__VA_ARGS__)
+#define svmullt_n_s16(...) __builtin_sve_svmullt_n_s16(__VA_ARGS__)
+#define svmullt_n_u32(...) __builtin_sve_svmullt_n_u32(__VA_ARGS__)
+#define svmullt_n_u64(...) __builtin_sve_svmullt_n_u64(__VA_ARGS__)
+#define svmullt_n_u16(...) __builtin_sve_svmullt_n_u16(__VA_ARGS__)
+#define svmullt_s32(...) __builtin_sve_svmullt_s32(__VA_ARGS__)
+#define svmullt_s64(...) __builtin_sve_svmullt_s64(__VA_ARGS__)
+#define svmullt_s16(...) __builtin_sve_svmullt_s16(__VA_ARGS__)
+#define svmullt_u32(...) __builtin_sve_svmullt_u32(__VA_ARGS__)
+#define svmullt_u64(...) __builtin_sve_svmullt_u64(__VA_ARGS__)
+#define svmullt_u16(...) __builtin_sve_svmullt_u16(__VA_ARGS__)
+#define svmullt_lane_s32(...) __builtin_sve_svmullt_lane_s32(__VA_ARGS__)
+#define svmullt_lane_s64(...) __builtin_sve_svmullt_lane_s64(__VA_ARGS__)
+#define svmullt_lane_u32(...) __builtin_sve_svmullt_lane_u32(__VA_ARGS__)
+#define svmullt_lane_u64(...) __builtin_sve_svmullt_lane_u64(__VA_ARGS__)
+#define svnbsl_n_u8(...) __builtin_sve_svnbsl_n_u8(__VA_ARGS__)
+#define svnbsl_n_u32(...) __builtin_sve_svnbsl_n_u32(__VA_ARGS__)
+#define svnbsl_n_u64(...) __builtin_sve_svnbsl_n_u64(__VA_ARGS__)
+#define svnbsl_n_u16(...) __builtin_sve_svnbsl_n_u16(__VA_ARGS__)
+#define svnbsl_n_s8(...) __builtin_sve_svnbsl_n_s8(__VA_ARGS__)
+#define svnbsl_n_s32(...) __builtin_sve_svnbsl_n_s32(__VA_ARGS__)
+#define svnbsl_n_s64(...) __builtin_sve_svnbsl_n_s64(__VA_ARGS__)
+#define svnbsl_n_s16(...) __builtin_sve_svnbsl_n_s16(__VA_ARGS__)
+#define svnbsl_u8(...) __builtin_sve_svnbsl_u8(__VA_ARGS__)
+#define svnbsl_u32(...) __builtin_sve_svnbsl_u32(__VA_ARGS__)
+#define svnbsl_u64(...) __builtin_sve_svnbsl_u64(__VA_ARGS__)
+#define svnbsl_u16(...) __builtin_sve_svnbsl_u16(__VA_ARGS__)
+#define svnbsl_s8(...) __builtin_sve_svnbsl_s8(__VA_ARGS__)
+#define svnbsl_s32(...) __builtin_sve_svnbsl_s32(__VA_ARGS__)
+#define svnbsl_s64(...) __builtin_sve_svnbsl_s64(__VA_ARGS__)
+#define svnbsl_s16(...) __builtin_sve_svnbsl_s16(__VA_ARGS__)
+#define svnmatch_u8(...) __builtin_sve_svnmatch_u8(__VA_ARGS__)
+#define svnmatch_u16(...) __builtin_sve_svnmatch_u16(__VA_ARGS__)
+#define svnmatch_s8(...) __builtin_sve_svnmatch_s8(__VA_ARGS__)
+#define svnmatch_s16(...) __builtin_sve_svnmatch_s16(__VA_ARGS__)
+#define svpmul_n_u8(...) __builtin_sve_svpmul_n_u8(__VA_ARGS__)
+#define svpmul_u8(...) __builtin_sve_svpmul_u8(__VA_ARGS__)
+#define svpmullb_n_u64(...) __builtin_sve_svpmullb_n_u64(__VA_ARGS__)
+#define svpmullb_n_u16(...) __builtin_sve_svpmullb_n_u16(__VA_ARGS__)
+#define svpmullb_u64(...) __builtin_sve_svpmullb_u64(__VA_ARGS__)
+#define svpmullb_u16(...) __builtin_sve_svpmullb_u16(__VA_ARGS__)
+#define svpmullb_pair_n_u8(...) __builtin_sve_svpmullb_pair_n_u8(__VA_ARGS__)
+#define svpmullb_pair_n_u32(...) __builtin_sve_svpmullb_pair_n_u32(__VA_ARGS__)
+#define svpmullb_pair_u8(...) __builtin_sve_svpmullb_pair_u8(__VA_ARGS__)
+#define svpmullb_pair_u32(...) __builtin_sve_svpmullb_pair_u32(__VA_ARGS__)
+#define svpmullt_n_u64(...) __builtin_sve_svpmullt_n_u64(__VA_ARGS__)
+#define svpmullt_n_u16(...) __builtin_sve_svpmullt_n_u16(__VA_ARGS__)
+#define svpmullt_u64(...) __builtin_sve_svpmullt_u64(__VA_ARGS__)
+#define svpmullt_u16(...) __builtin_sve_svpmullt_u16(__VA_ARGS__)
+#define svpmullt_pair_n_u8(...) __builtin_sve_svpmullt_pair_n_u8(__VA_ARGS__)
+#define svpmullt_pair_n_u32(...) __builtin_sve_svpmullt_pair_n_u32(__VA_ARGS__)
+#define svpmullt_pair_u8(...) __builtin_sve_svpmullt_pair_u8(__VA_ARGS__)
+#define svpmullt_pair_u32(...) __builtin_sve_svpmullt_pair_u32(__VA_ARGS__)
+#define svqabs_s8_m(...) __builtin_sve_svqabs_s8_m(__VA_ARGS__)
+#define svqabs_s32_m(...) __builtin_sve_svqabs_s32_m(__VA_ARGS__)
+#define svqabs_s64_m(...) __builtin_sve_svqabs_s64_m(__VA_ARGS__)
+#define svqabs_s16_m(...) __builtin_sve_svqabs_s16_m(__VA_ARGS__)
+#define svqabs_s8_x(...) __builtin_sve_svqabs_s8_x(__VA_ARGS__)
+#define svqabs_s32_x(...) __builtin_sve_svqabs_s32_x(__VA_ARGS__)
+#define svqabs_s64_x(...) __builtin_sve_svqabs_s64_x(__VA_ARGS__)
+#define svqabs_s16_x(...) __builtin_sve_svqabs_s16_x(__VA_ARGS__)
+#define svqabs_s8_z(...) __builtin_sve_svqabs_s8_z(__VA_ARGS__)
+#define svqabs_s32_z(...) __builtin_sve_svqabs_s32_z(__VA_ARGS__)
+#define svqabs_s64_z(...) __builtin_sve_svqabs_s64_z(__VA_ARGS__)
+#define svqabs_s16_z(...) __builtin_sve_svqabs_s16_z(__VA_ARGS__)
+#define svqadd_n_s8_m(...) __builtin_sve_svqadd_n_s8_m(__VA_ARGS__)
+#define svqadd_n_s32_m(...) __builtin_sve_svqadd_n_s32_m(__VA_ARGS__)
+#define svqadd_n_s64_m(...) __builtin_sve_svqadd_n_s64_m(__VA_ARGS__)
+#define svqadd_n_s16_m(...) __builtin_sve_svqadd_n_s16_m(__VA_ARGS__)
+#define svqadd_n_s8_x(...) __builtin_sve_svqadd_n_s8_x(__VA_ARGS__)
+#define svqadd_n_s32_x(...) __builtin_sve_svqadd_n_s32_x(__VA_ARGS__)
+#define svqadd_n_s64_x(...) __builtin_sve_svqadd_n_s64_x(__VA_ARGS__)
+#define svqadd_n_s16_x(...) __builtin_sve_svqadd_n_s16_x(__VA_ARGS__)
+#define svqadd_n_s8_z(...) __builtin_sve_svqadd_n_s8_z(__VA_ARGS__)
+#define svqadd_n_s32_z(...) __builtin_sve_svqadd_n_s32_z(__VA_ARGS__)
+#define svqadd_n_s64_z(...) __builtin_sve_svqadd_n_s64_z(__VA_ARGS__)
+#define svqadd_n_s16_z(...) __builtin_sve_svqadd_n_s16_z(__VA_ARGS__)
+#define svqadd_n_u8_m(...) __builtin_sve_svqadd_n_u8_m(__VA_ARGS__)
+#define svqadd_n_u32_m(...) __builtin_sve_svqadd_n_u32_m(__VA_ARGS__)
+#define svqadd_n_u64_m(...) __builtin_sve_svqadd_n_u64_m(__VA_ARGS__)
+#define svqadd_n_u16_m(...) __builtin_sve_svqadd_n_u16_m(__VA_ARGS__)
+#define svqadd_n_u8_x(...) __builtin_sve_svqadd_n_u8_x(__VA_ARGS__)
+#define svqadd_n_u32_x(...) __builtin_sve_svqadd_n_u32_x(__VA_ARGS__)
+#define svqadd_n_u64_x(...) __builtin_sve_svqadd_n_u64_x(__VA_ARGS__)
+#define svqadd_n_u16_x(...) __builtin_sve_svqadd_n_u16_x(__VA_ARGS__)
+#define svqadd_n_u8_z(...) __builtin_sve_svqadd_n_u8_z(__VA_ARGS__)
+#define svqadd_n_u32_z(...) __builtin_sve_svqadd_n_u32_z(__VA_ARGS__)
+#define svqadd_n_u64_z(...) __builtin_sve_svqadd_n_u64_z(__VA_ARGS__)
+#define svqadd_n_u16_z(...) __builtin_sve_svqadd_n_u16_z(__VA_ARGS__)
+#define svqadd_s8_m(...) __builtin_sve_svqadd_s8_m(__VA_ARGS__)
+#define svqadd_s32_m(...) __builtin_sve_svqadd_s32_m(__VA_ARGS__)
+#define svqadd_s64_m(...) __builtin_sve_svqadd_s64_m(__VA_ARGS__)
+#define svqadd_s16_m(...) __builtin_sve_svqadd_s16_m(__VA_ARGS__)
+#define svqadd_s8_x(...) __builtin_sve_svqadd_s8_x(__VA_ARGS__)
+#define svqadd_s32_x(...) __builtin_sve_svqadd_s32_x(__VA_ARGS__)
+#define svqadd_s64_x(...) __builtin_sve_svqadd_s64_x(__VA_ARGS__)
+#define svqadd_s16_x(...) __builtin_sve_svqadd_s16_x(__VA_ARGS__)
+#define svqadd_s8_z(...) __builtin_sve_svqadd_s8_z(__VA_ARGS__)
+#define svqadd_s32_z(...) __builtin_sve_svqadd_s32_z(__VA_ARGS__)
+#define svqadd_s64_z(...) __builtin_sve_svqadd_s64_z(__VA_ARGS__)
+#define svqadd_s16_z(...) __builtin_sve_svqadd_s16_z(__VA_ARGS__)
+#define svqadd_u8_m(...) __builtin_sve_svqadd_u8_m(__VA_ARGS__)
+#define svqadd_u32_m(...) __builtin_sve_svqadd_u32_m(__VA_ARGS__)
+#define svqadd_u64_m(...) __builtin_sve_svqadd_u64_m(__VA_ARGS__)
+#define svqadd_u16_m(...) __builtin_sve_svqadd_u16_m(__VA_ARGS__)
+#define svqadd_u8_x(...) __builtin_sve_svqadd_u8_x(__VA_ARGS__)
+#define svqadd_u32_x(...) __builtin_sve_svqadd_u32_x(__VA_ARGS__)
+#define svqadd_u64_x(...) __builtin_sve_svqadd_u64_x(__VA_ARGS__)
+#define svqadd_u16_x(...) __builtin_sve_svqadd_u16_x(__VA_ARGS__)
+#define svqadd_u8_z(...) __builtin_sve_svqadd_u8_z(__VA_ARGS__)
+#define svqadd_u32_z(...) __builtin_sve_svqadd_u32_z(__VA_ARGS__)
+#define svqadd_u64_z(...) __builtin_sve_svqadd_u64_z(__VA_ARGS__)
+#define svqadd_u16_z(...) __builtin_sve_svqadd_u16_z(__VA_ARGS__)
+#define svqcadd_s8(...) __builtin_sve_svqcadd_s8(__VA_ARGS__)
+#define svqcadd_s32(...) __builtin_sve_svqcadd_s32(__VA_ARGS__)
+#define svqcadd_s64(...) __builtin_sve_svqcadd_s64(__VA_ARGS__)
+#define svqcadd_s16(...) __builtin_sve_svqcadd_s16(__VA_ARGS__)
+#define svqdmlalb_n_s32(...) __builtin_sve_svqdmlalb_n_s32(__VA_ARGS__)
+#define svqdmlalb_n_s64(...) __builtin_sve_svqdmlalb_n_s64(__VA_ARGS__)
+#define svqdmlalb_n_s16(...) __builtin_sve_svqdmlalb_n_s16(__VA_ARGS__)
+#define svqdmlalb_s32(...) __builtin_sve_svqdmlalb_s32(__VA_ARGS__)
+#define svqdmlalb_s64(...) __builtin_sve_svqdmlalb_s64(__VA_ARGS__)
+#define svqdmlalb_s16(...) __builtin_sve_svqdmlalb_s16(__VA_ARGS__)
+#define svqdmlalb_lane_s32(...) __builtin_sve_svqdmlalb_lane_s32(__VA_ARGS__)
+#define svqdmlalb_lane_s64(...) __builtin_sve_svqdmlalb_lane_s64(__VA_ARGS__)
+#define svqdmlalbt_n_s32(...) __builtin_sve_svqdmlalbt_n_s32(__VA_ARGS__)
+#define svqdmlalbt_n_s64(...) __builtin_sve_svqdmlalbt_n_s64(__VA_ARGS__)
+#define svqdmlalbt_n_s16(...) __builtin_sve_svqdmlalbt_n_s16(__VA_ARGS__)
+#define svqdmlalbt_s32(...) __builtin_sve_svqdmlalbt_s32(__VA_ARGS__)
+#define svqdmlalbt_s64(...) __builtin_sve_svqdmlalbt_s64(__VA_ARGS__)
+#define svqdmlalbt_s16(...) __builtin_sve_svqdmlalbt_s16(__VA_ARGS__)
+#define svqdmlalt_n_s32(...) __builtin_sve_svqdmlalt_n_s32(__VA_ARGS__)
+#define svqdmlalt_n_s64(...) __builtin_sve_svqdmlalt_n_s64(__VA_ARGS__)
+#define svqdmlalt_n_s16(...) __builtin_sve_svqdmlalt_n_s16(__VA_ARGS__)
+#define svqdmlalt_s32(...) __builtin_sve_svqdmlalt_s32(__VA_ARGS__)
+#define svqdmlalt_s64(...) __builtin_sve_svqdmlalt_s64(__VA_ARGS__)
+#define svqdmlalt_s16(...) __builtin_sve_svqdmlalt_s16(__VA_ARGS__)
+#define svqdmlalt_lane_s32(...) __builtin_sve_svqdmlalt_lane_s32(__VA_ARGS__)
+#define svqdmlalt_lane_s64(...) __builtin_sve_svqdmlalt_lane_s64(__VA_ARGS__)
+#define svqdmlslb_n_s32(...) __builtin_sve_svqdmlslb_n_s32(__VA_ARGS__)
+#define svqdmlslb_n_s64(...) __builtin_sve_svqdmlslb_n_s64(__VA_ARGS__)
+#define svqdmlslb_n_s16(...) __builtin_sve_svqdmlslb_n_s16(__VA_ARGS__)
+#define svqdmlslb_s32(...) __builtin_sve_svqdmlslb_s32(__VA_ARGS__)
+#define svqdmlslb_s64(...) __builtin_sve_svqdmlslb_s64(__VA_ARGS__)
+#define svqdmlslb_s16(...) __builtin_sve_svqdmlslb_s16(__VA_ARGS__)
+#define svqdmlslb_lane_s32(...) __builtin_sve_svqdmlslb_lane_s32(__VA_ARGS__)
+#define svqdmlslb_lane_s64(...) __builtin_sve_svqdmlslb_lane_s64(__VA_ARGS__)
+#define svqdmlslbt_n_s32(...) __builtin_sve_svqdmlslbt_n_s32(__VA_ARGS__)
+#define svqdmlslbt_n_s64(...) __builtin_sve_svqdmlslbt_n_s64(__VA_ARGS__)
+#define svqdmlslbt_n_s16(...) __builtin_sve_svqdmlslbt_n_s16(__VA_ARGS__)
+#define svqdmlslbt_s32(...) __builtin_sve_svqdmlslbt_s32(__VA_ARGS__)
+#define svqdmlslbt_s64(...) __builtin_sve_svqdmlslbt_s64(__VA_ARGS__)
+#define svqdmlslbt_s16(...) __builtin_sve_svqdmlslbt_s16(__VA_ARGS__)
+#define svqdmlslt_n_s32(...) __builtin_sve_svqdmlslt_n_s32(__VA_ARGS__)
+#define svqdmlslt_n_s64(...) __builtin_sve_svqdmlslt_n_s64(__VA_ARGS__)
+#define svqdmlslt_n_s16(...) __builtin_sve_svqdmlslt_n_s16(__VA_ARGS__)
+#define svqdmlslt_s32(...) __builtin_sve_svqdmlslt_s32(__VA_ARGS__)
+#define svqdmlslt_s64(...) __builtin_sve_svqdmlslt_s64(__VA_ARGS__)
+#define svqdmlslt_s16(...) __builtin_sve_svqdmlslt_s16(__VA_ARGS__)
+#define svqdmlslt_lane_s32(...) __builtin_sve_svqdmlslt_lane_s32(__VA_ARGS__)
+#define svqdmlslt_lane_s64(...) __builtin_sve_svqdmlslt_lane_s64(__VA_ARGS__)
+#define svqdmulh_n_s8(...) __builtin_sve_svqdmulh_n_s8(__VA_ARGS__)
+#define svqdmulh_n_s32(...) __builtin_sve_svqdmulh_n_s32(__VA_ARGS__)
+#define svqdmulh_n_s64(...) __builtin_sve_svqdmulh_n_s64(__VA_ARGS__)
+#define svqdmulh_n_s16(...) __builtin_sve_svqdmulh_n_s16(__VA_ARGS__)
+#define svqdmulh_s8(...) __builtin_sve_svqdmulh_s8(__VA_ARGS__)
+#define svqdmulh_s32(...) __builtin_sve_svqdmulh_s32(__VA_ARGS__)
+#define svqdmulh_s64(...) __builtin_sve_svqdmulh_s64(__VA_ARGS__)
+#define svqdmulh_s16(...) __builtin_sve_svqdmulh_s16(__VA_ARGS__)
+#define svqdmulh_lane_s32(...) __builtin_sve_svqdmulh_lane_s32(__VA_ARGS__)
+#define svqdmulh_lane_s64(...) __builtin_sve_svqdmulh_lane_s64(__VA_ARGS__)
+#define svqdmulh_lane_s16(...) __builtin_sve_svqdmulh_lane_s16(__VA_ARGS__)
+#define svqdmullb_n_s32(...) __builtin_sve_svqdmullb_n_s32(__VA_ARGS__)
+#define svqdmullb_n_s64(...) __builtin_sve_svqdmullb_n_s64(__VA_ARGS__)
+#define svqdmullb_n_s16(...) __builtin_sve_svqdmullb_n_s16(__VA_ARGS__)
+#define svqdmullb_s32(...) __builtin_sve_svqdmullb_s32(__VA_ARGS__)
+#define svqdmullb_s64(...) __builtin_sve_svqdmullb_s64(__VA_ARGS__)
+#define svqdmullb_s16(...) __builtin_sve_svqdmullb_s16(__VA_ARGS__)
+#define svqdmullb_lane_s32(...) __builtin_sve_svqdmullb_lane_s32(__VA_ARGS__)
+#define svqdmullb_lane_s64(...) __builtin_sve_svqdmullb_lane_s64(__VA_ARGS__)
+#define svqdmullt_n_s32(...) __builtin_sve_svqdmullt_n_s32(__VA_ARGS__)
+#define svqdmullt_n_s64(...) __builtin_sve_svqdmullt_n_s64(__VA_ARGS__)
+#define svqdmullt_n_s16(...) __builtin_sve_svqdmullt_n_s16(__VA_ARGS__)
+#define svqdmullt_s32(...) __builtin_sve_svqdmullt_s32(__VA_ARGS__)
+#define svqdmullt_s64(...) __builtin_sve_svqdmullt_s64(__VA_ARGS__)
+#define svqdmullt_s16(...) __builtin_sve_svqdmullt_s16(__VA_ARGS__)
+#define svqdmullt_lane_s32(...) __builtin_sve_svqdmullt_lane_s32(__VA_ARGS__)
+#define svqdmullt_lane_s64(...) __builtin_sve_svqdmullt_lane_s64(__VA_ARGS__)
+#define svqneg_s8_m(...) __builtin_sve_svqneg_s8_m(__VA_ARGS__)
+#define svqneg_s32_m(...) __builtin_sve_svqneg_s32_m(__VA_ARGS__)
+#define svqneg_s64_m(...) __builtin_sve_svqneg_s64_m(__VA_ARGS__)
+#define svqneg_s16_m(...) __builtin_sve_svqneg_s16_m(__VA_ARGS__)
+#define svqneg_s8_x(...) __builtin_sve_svqneg_s8_x(__VA_ARGS__)
+#define svqneg_s32_x(...) __builtin_sve_svqneg_s32_x(__VA_ARGS__)
+#define svqneg_s64_x(...) __builtin_sve_svqneg_s64_x(__VA_ARGS__)
+#define svqneg_s16_x(...) __builtin_sve_svqneg_s16_x(__VA_ARGS__)
+#define svqneg_s8_z(...) __builtin_sve_svqneg_s8_z(__VA_ARGS__)
+#define svqneg_s32_z(...) __builtin_sve_svqneg_s32_z(__VA_ARGS__)
+#define svqneg_s64_z(...) __builtin_sve_svqneg_s64_z(__VA_ARGS__)
+#define svqneg_s16_z(...) __builtin_sve_svqneg_s16_z(__VA_ARGS__)
+#define svqrdcmlah_s8(...) __builtin_sve_svqrdcmlah_s8(__VA_ARGS__)
+#define svqrdcmlah_s32(...) __builtin_sve_svqrdcmlah_s32(__VA_ARGS__)
+#define svqrdcmlah_s64(...) __builtin_sve_svqrdcmlah_s64(__VA_ARGS__)
+#define svqrdcmlah_s16(...) __builtin_sve_svqrdcmlah_s16(__VA_ARGS__)
+#define svqrdcmlah_lane_s32(...) __builtin_sve_svqrdcmlah_lane_s32(__VA_ARGS__)
+#define svqrdcmlah_lane_s16(...) __builtin_sve_svqrdcmlah_lane_s16(__VA_ARGS__)
+#define svqrdmlah_n_s8(...) __builtin_sve_svqrdmlah_n_s8(__VA_ARGS__)
+#define svqrdmlah_n_s32(...) __builtin_sve_svqrdmlah_n_s32(__VA_ARGS__)
+#define svqrdmlah_n_s64(...) __builtin_sve_svqrdmlah_n_s64(__VA_ARGS__)
+#define svqrdmlah_n_s16(...) __builtin_sve_svqrdmlah_n_s16(__VA_ARGS__)
+#define svqrdmlah_s8(...) __builtin_sve_svqrdmlah_s8(__VA_ARGS__)
+#define svqrdmlah_s32(...) __builtin_sve_svqrdmlah_s32(__VA_ARGS__)
+#define svqrdmlah_s64(...) __builtin_sve_svqrdmlah_s64(__VA_ARGS__)
+#define svqrdmlah_s16(...) __builtin_sve_svqrdmlah_s16(__VA_ARGS__)
+#define svqrdmlah_lane_s32(...) __builtin_sve_svqrdmlah_lane_s32(__VA_ARGS__)
+#define svqrdmlah_lane_s64(...) __builtin_sve_svqrdmlah_lane_s64(__VA_ARGS__)
+#define svqrdmlah_lane_s16(...) __builtin_sve_svqrdmlah_lane_s16(__VA_ARGS__)
+#define svqrdmlsh_n_s8(...) __builtin_sve_svqrdmlsh_n_s8(__VA_ARGS__)
+#define svqrdmlsh_n_s32(...) __builtin_sve_svqrdmlsh_n_s32(__VA_ARGS__)
+#define svqrdmlsh_n_s64(...) __builtin_sve_svqrdmlsh_n_s64(__VA_ARGS__)
+#define svqrdmlsh_n_s16(...) __builtin_sve_svqrdmlsh_n_s16(__VA_ARGS__)
+#define svqrdmlsh_s8(...) __builtin_sve_svqrdmlsh_s8(__VA_ARGS__)
+#define svqrdmlsh_s32(...) __builtin_sve_svqrdmlsh_s32(__VA_ARGS__)
+#define svqrdmlsh_s64(...) __builtin_sve_svqrdmlsh_s64(__VA_ARGS__)
+#define svqrdmlsh_s16(...) __builtin_sve_svqrdmlsh_s16(__VA_ARGS__)
+#define svqrdmlsh_lane_s32(...) __builtin_sve_svqrdmlsh_lane_s32(__VA_ARGS__)
+#define svqrdmlsh_lane_s64(...) __builtin_sve_svqrdmlsh_lane_s64(__VA_ARGS__)
+#define svqrdmlsh_lane_s16(...) __builtin_sve_svqrdmlsh_lane_s16(__VA_ARGS__)
+#define svqrdmulh_n_s8(...) __builtin_sve_svqrdmulh_n_s8(__VA_ARGS__)
+#define svqrdmulh_n_s32(...) __builtin_sve_svqrdmulh_n_s32(__VA_ARGS__)
+#define svqrdmulh_n_s64(...) __builtin_sve_svqrdmulh_n_s64(__VA_ARGS__)
+#define svqrdmulh_n_s16(...) __builtin_sve_svqrdmulh_n_s16(__VA_ARGS__)
+#define svqrdmulh_s8(...) __builtin_sve_svqrdmulh_s8(__VA_ARGS__)
+#define svqrdmulh_s32(...) __builtin_sve_svqrdmulh_s32(__VA_ARGS__)
+#define svqrdmulh_s64(...) __builtin_sve_svqrdmulh_s64(__VA_ARGS__)
+#define svqrdmulh_s16(...) __builtin_sve_svqrdmulh_s16(__VA_ARGS__)
+#define svqrdmulh_lane_s32(...) __builtin_sve_svqrdmulh_lane_s32(__VA_ARGS__)
+#define svqrdmulh_lane_s64(...) __builtin_sve_svqrdmulh_lane_s64(__VA_ARGS__)
+#define svqrdmulh_lane_s16(...) __builtin_sve_svqrdmulh_lane_s16(__VA_ARGS__)
+#define svqrshl_n_s8_m(...) __builtin_sve_svqrshl_n_s8_m(__VA_ARGS__)
+#define svqrshl_n_s32_m(...) __builtin_sve_svqrshl_n_s32_m(__VA_ARGS__)
+#define svqrshl_n_s64_m(...) __builtin_sve_svqrshl_n_s64_m(__VA_ARGS__)
+#define svqrshl_n_s16_m(...) __builtin_sve_svqrshl_n_s16_m(__VA_ARGS__)
+#define svqrshl_n_s8_x(...) __builtin_sve_svqrshl_n_s8_x(__VA_ARGS__)
+#define svqrshl_n_s32_x(...) __builtin_sve_svqrshl_n_s32_x(__VA_ARGS__)
+#define svqrshl_n_s64_x(...) __builtin_sve_svqrshl_n_s64_x(__VA_ARGS__)
+#define svqrshl_n_s16_x(...) __builtin_sve_svqrshl_n_s16_x(__VA_ARGS__)
+#define svqrshl_n_s8_z(...) __builtin_sve_svqrshl_n_s8_z(__VA_ARGS__)
+#define svqrshl_n_s32_z(...) __builtin_sve_svqrshl_n_s32_z(__VA_ARGS__)
+#define svqrshl_n_s64_z(...) __builtin_sve_svqrshl_n_s64_z(__VA_ARGS__)
+#define svqrshl_n_s16_z(...) __builtin_sve_svqrshl_n_s16_z(__VA_ARGS__)
+#define svqrshl_n_u8_m(...) __builtin_sve_svqrshl_n_u8_m(__VA_ARGS__)
+#define svqrshl_n_u32_m(...) __builtin_sve_svqrshl_n_u32_m(__VA_ARGS__)
+#define svqrshl_n_u64_m(...) __builtin_sve_svqrshl_n_u64_m(__VA_ARGS__)
+#define svqrshl_n_u16_m(...) __builtin_sve_svqrshl_n_u16_m(__VA_ARGS__)
+#define svqrshl_n_u8_x(...) __builtin_sve_svqrshl_n_u8_x(__VA_ARGS__)
+#define svqrshl_n_u32_x(...) __builtin_sve_svqrshl_n_u32_x(__VA_ARGS__)
+#define svqrshl_n_u64_x(...) __builtin_sve_svqrshl_n_u64_x(__VA_ARGS__)
+#define svqrshl_n_u16_x(...) __builtin_sve_svqrshl_n_u16_x(__VA_ARGS__)
+#define svqrshl_n_u8_z(...) __builtin_sve_svqrshl_n_u8_z(__VA_ARGS__)
+#define svqrshl_n_u32_z(...) __builtin_sve_svqrshl_n_u32_z(__VA_ARGS__)
+#define svqrshl_n_u64_z(...) __builtin_sve_svqrshl_n_u64_z(__VA_ARGS__)
+#define svqrshl_n_u16_z(...) __builtin_sve_svqrshl_n_u16_z(__VA_ARGS__)
+#define svqrshl_s8_m(...) __builtin_sve_svqrshl_s8_m(__VA_ARGS__)
+#define svqrshl_s32_m(...) __builtin_sve_svqrshl_s32_m(__VA_ARGS__)
+#define svqrshl_s64_m(...) __builtin_sve_svqrshl_s64_m(__VA_ARGS__)
+#define svqrshl_s16_m(...) __builtin_sve_svqrshl_s16_m(__VA_ARGS__)
+#define svqrshl_s8_x(...) __builtin_sve_svqrshl_s8_x(__VA_ARGS__)
+#define svqrshl_s32_x(...) __builtin_sve_svqrshl_s32_x(__VA_ARGS__)
+#define svqrshl_s64_x(...) __builtin_sve_svqrshl_s64_x(__VA_ARGS__)
+#define svqrshl_s16_x(...) __builtin_sve_svqrshl_s16_x(__VA_ARGS__)
+#define svqrshl_s8_z(...) __builtin_sve_svqrshl_s8_z(__VA_ARGS__)
+#define svqrshl_s32_z(...) __builtin_sve_svqrshl_s32_z(__VA_ARGS__)
+#define svqrshl_s64_z(...) __builtin_sve_svqrshl_s64_z(__VA_ARGS__)
+#define svqrshl_s16_z(...) __builtin_sve_svqrshl_s16_z(__VA_ARGS__)
+#define svqrshl_u8_m(...) __builtin_sve_svqrshl_u8_m(__VA_ARGS__)
+#define svqrshl_u32_m(...) __builtin_sve_svqrshl_u32_m(__VA_ARGS__)
+#define svqrshl_u64_m(...) __builtin_sve_svqrshl_u64_m(__VA_ARGS__)
+#define svqrshl_u16_m(...) __builtin_sve_svqrshl_u16_m(__VA_ARGS__)
+#define svqrshl_u8_x(...) __builtin_sve_svqrshl_u8_x(__VA_ARGS__)
+#define svqrshl_u32_x(...) __builtin_sve_svqrshl_u32_x(__VA_ARGS__)
+#define svqrshl_u64_x(...) __builtin_sve_svqrshl_u64_x(__VA_ARGS__)
+#define svqrshl_u16_x(...) __builtin_sve_svqrshl_u16_x(__VA_ARGS__)
+#define svqrshl_u8_z(...) __builtin_sve_svqrshl_u8_z(__VA_ARGS__)
+#define svqrshl_u32_z(...) __builtin_sve_svqrshl_u32_z(__VA_ARGS__)
+#define svqrshl_u64_z(...) __builtin_sve_svqrshl_u64_z(__VA_ARGS__)
+#define svqrshl_u16_z(...) __builtin_sve_svqrshl_u16_z(__VA_ARGS__)
+#define svqrshrnb_n_s32(...) __builtin_sve_svqrshrnb_n_s32(__VA_ARGS__)
+#define svqrshrnb_n_s64(...) __builtin_sve_svqrshrnb_n_s64(__VA_ARGS__)
+#define svqrshrnb_n_s16(...) __builtin_sve_svqrshrnb_n_s16(__VA_ARGS__)
+#define svqrshrnb_n_u32(...) __builtin_sve_svqrshrnb_n_u32(__VA_ARGS__)
+#define svqrshrnb_n_u64(...) __builtin_sve_svqrshrnb_n_u64(__VA_ARGS__)
+#define svqrshrnb_n_u16(...) __builtin_sve_svqrshrnb_n_u16(__VA_ARGS__)
+#define svqrshrnt_n_s32(...) __builtin_sve_svqrshrnt_n_s32(__VA_ARGS__)
+#define svqrshrnt_n_s64(...) __builtin_sve_svqrshrnt_n_s64(__VA_ARGS__)
+#define svqrshrnt_n_s16(...) __builtin_sve_svqrshrnt_n_s16(__VA_ARGS__)
+#define svqrshrnt_n_u32(...) __builtin_sve_svqrshrnt_n_u32(__VA_ARGS__)
+#define svqrshrnt_n_u64(...) __builtin_sve_svqrshrnt_n_u64(__VA_ARGS__)
+#define svqrshrnt_n_u16(...) __builtin_sve_svqrshrnt_n_u16(__VA_ARGS__)
+#define svqrshrunb_n_s32(...) __builtin_sve_svqrshrunb_n_s32(__VA_ARGS__)
+#define svqrshrunb_n_s64(...) __builtin_sve_svqrshrunb_n_s64(__VA_ARGS__)
+#define svqrshrunb_n_s16(...) __builtin_sve_svqrshrunb_n_s16(__VA_ARGS__)
+#define svqrshrunt_n_s32(...) __builtin_sve_svqrshrunt_n_s32(__VA_ARGS__)
+#define svqrshrunt_n_s64(...) __builtin_sve_svqrshrunt_n_s64(__VA_ARGS__)
+#define svqrshrunt_n_s16(...) __builtin_sve_svqrshrunt_n_s16(__VA_ARGS__)
+#define svqshl_n_s8_m(...) __builtin_sve_svqshl_n_s8_m(__VA_ARGS__)
+#define svqshl_n_s32_m(...) __builtin_sve_svqshl_n_s32_m(__VA_ARGS__)
+#define svqshl_n_s64_m(...) __builtin_sve_svqshl_n_s64_m(__VA_ARGS__)
+#define svqshl_n_s16_m(...) __builtin_sve_svqshl_n_s16_m(__VA_ARGS__)
+#define svqshl_n_s8_x(...) __builtin_sve_svqshl_n_s8_x(__VA_ARGS__)
+#define svqshl_n_s32_x(...) __builtin_sve_svqshl_n_s32_x(__VA_ARGS__)
+#define svqshl_n_s64_x(...) __builtin_sve_svqshl_n_s64_x(__VA_ARGS__)
+#define svqshl_n_s16_x(...) __builtin_sve_svqshl_n_s16_x(__VA_ARGS__)
+#define svqshl_n_s8_z(...) __builtin_sve_svqshl_n_s8_z(__VA_ARGS__)
+#define svqshl_n_s32_z(...) __builtin_sve_svqshl_n_s32_z(__VA_ARGS__)
+#define svqshl_n_s64_z(...) __builtin_sve_svqshl_n_s64_z(__VA_ARGS__)
+#define svqshl_n_s16_z(...) __builtin_sve_svqshl_n_s16_z(__VA_ARGS__)
+#define svqshl_n_u8_m(...) __builtin_sve_svqshl_n_u8_m(__VA_ARGS__)
+#define svqshl_n_u32_m(...) __builtin_sve_svqshl_n_u32_m(__VA_ARGS__)
+#define svqshl_n_u64_m(...) __builtin_sve_svqshl_n_u64_m(__VA_ARGS__)
+#define svqshl_n_u16_m(...) __builtin_sve_svqshl_n_u16_m(__VA_ARGS__)
+#define svqshl_n_u8_x(...) __builtin_sve_svqshl_n_u8_x(__VA_ARGS__)
+#define svqshl_n_u32_x(...) __builtin_sve_svqshl_n_u32_x(__VA_ARGS__)
+#define svqshl_n_u64_x(...) __builtin_sve_svqshl_n_u64_x(__VA_ARGS__)
+#define svqshl_n_u16_x(...) __builtin_sve_svqshl_n_u16_x(__VA_ARGS__)
+#define svqshl_n_u8_z(...) __builtin_sve_svqshl_n_u8_z(__VA_ARGS__)
+#define svqshl_n_u32_z(...) __builtin_sve_svqshl_n_u32_z(__VA_ARGS__)
+#define svqshl_n_u64_z(...) __builtin_sve_svqshl_n_u64_z(__VA_ARGS__)
+#define svqshl_n_u16_z(...) __builtin_sve_svqshl_n_u16_z(__VA_ARGS__)
+#define svqshl_s8_m(...) __builtin_sve_svqshl_s8_m(__VA_ARGS__)
+#define svqshl_s32_m(...) __builtin_sve_svqshl_s32_m(__VA_ARGS__)
+#define svqshl_s64_m(...) __builtin_sve_svqshl_s64_m(__VA_ARGS__)
+#define svqshl_s16_m(...) __builtin_sve_svqshl_s16_m(__VA_ARGS__)
+#define svqshl_s8_x(...) __builtin_sve_svqshl_s8_x(__VA_ARGS__)
+#define svqshl_s32_x(...) __builtin_sve_svqshl_s32_x(__VA_ARGS__)
+#define svqshl_s64_x(...) __builtin_sve_svqshl_s64_x(__VA_ARGS__)
+#define svqshl_s16_x(...) __builtin_sve_svqshl_s16_x(__VA_ARGS__)
+#define svqshl_s8_z(...) __builtin_sve_svqshl_s8_z(__VA_ARGS__)
+#define svqshl_s32_z(...) __builtin_sve_svqshl_s32_z(__VA_ARGS__)
+#define svqshl_s64_z(...) __builtin_sve_svqshl_s64_z(__VA_ARGS__)
+#define svqshl_s16_z(...) __builtin_sve_svqshl_s16_z(__VA_ARGS__)
+#define svqshl_u8_m(...) __builtin_sve_svqshl_u8_m(__VA_ARGS__)
+#define svqshl_u32_m(...) __builtin_sve_svqshl_u32_m(__VA_ARGS__)
+#define svqshl_u64_m(...) __builtin_sve_svqshl_u64_m(__VA_ARGS__)
+#define svqshl_u16_m(...) __builtin_sve_svqshl_u16_m(__VA_ARGS__)
+#define svqshl_u8_x(...) __builtin_sve_svqshl_u8_x(__VA_ARGS__)
+#define svqshl_u32_x(...) __builtin_sve_svqshl_u32_x(__VA_ARGS__)
+#define svqshl_u64_x(...) __builtin_sve_svqshl_u64_x(__VA_ARGS__)
+#define svqshl_u16_x(...) __builtin_sve_svqshl_u16_x(__VA_ARGS__)
+#define svqshl_u8_z(...) __builtin_sve_svqshl_u8_z(__VA_ARGS__)
+#define svqshl_u32_z(...) __builtin_sve_svqshl_u32_z(__VA_ARGS__)
+#define svqshl_u64_z(...) __builtin_sve_svqshl_u64_z(__VA_ARGS__)
+#define svqshl_u16_z(...) __builtin_sve_svqshl_u16_z(__VA_ARGS__)
+#define svqshlu_n_s8_m(...) __builtin_sve_svqshlu_n_s8_m(__VA_ARGS__)
+#define svqshlu_n_s32_m(...) __builtin_sve_svqshlu_n_s32_m(__VA_ARGS__)
+#define svqshlu_n_s64_m(...) __builtin_sve_svqshlu_n_s64_m(__VA_ARGS__)
+#define svqshlu_n_s16_m(...) __builtin_sve_svqshlu_n_s16_m(__VA_ARGS__)
+#define svqshlu_n_s8_x(...) __builtin_sve_svqshlu_n_s8_x(__VA_ARGS__)
+#define svqshlu_n_s32_x(...) __builtin_sve_svqshlu_n_s32_x(__VA_ARGS__)
+#define svqshlu_n_s64_x(...) __builtin_sve_svqshlu_n_s64_x(__VA_ARGS__)
+#define svqshlu_n_s16_x(...) __builtin_sve_svqshlu_n_s16_x(__VA_ARGS__)
+#define svqshlu_n_s8_z(...) __builtin_sve_svqshlu_n_s8_z(__VA_ARGS__)
+#define svqshlu_n_s32_z(...) __builtin_sve_svqshlu_n_s32_z(__VA_ARGS__)
+#define svqshlu_n_s64_z(...) __builtin_sve_svqshlu_n_s64_z(__VA_ARGS__)
+#define svqshlu_n_s16_z(...) __builtin_sve_svqshlu_n_s16_z(__VA_ARGS__)
+#define svqshrnb_n_s32(...) __builtin_sve_svqshrnb_n_s32(__VA_ARGS__)
+#define svqshrnb_n_s64(...) __builtin_sve_svqshrnb_n_s64(__VA_ARGS__)
+#define svqshrnb_n_s16(...) __builtin_sve_svqshrnb_n_s16(__VA_ARGS__)
+#define svqshrnb_n_u32(...) __builtin_sve_svqshrnb_n_u32(__VA_ARGS__)
+#define svqshrnb_n_u64(...) __builtin_sve_svqshrnb_n_u64(__VA_ARGS__)
+#define svqshrnb_n_u16(...) __builtin_sve_svqshrnb_n_u16(__VA_ARGS__)
+#define svqshrnt_n_s32(...) __builtin_sve_svqshrnt_n_s32(__VA_ARGS__)
+#define svqshrnt_n_s64(...) __builtin_sve_svqshrnt_n_s64(__VA_ARGS__)
+#define svqshrnt_n_s16(...) __builtin_sve_svqshrnt_n_s16(__VA_ARGS__)
+#define svqshrnt_n_u32(...) __builtin_sve_svqshrnt_n_u32(__VA_ARGS__)
+#define svqshrnt_n_u64(...) __builtin_sve_svqshrnt_n_u64(__VA_ARGS__)
+#define svqshrnt_n_u16(...) __builtin_sve_svqshrnt_n_u16(__VA_ARGS__)
+#define svqshrunb_n_s32(...) __builtin_sve_svqshrunb_n_s32(__VA_ARGS__)
+#define svqshrunb_n_s64(...) __builtin_sve_svqshrunb_n_s64(__VA_ARGS__)
+#define svqshrunb_n_s16(...) __builtin_sve_svqshrunb_n_s16(__VA_ARGS__)
+#define svqshrunt_n_s32(...) __builtin_sve_svqshrunt_n_s32(__VA_ARGS__)
+#define svqshrunt_n_s64(...) __builtin_sve_svqshrunt_n_s64(__VA_ARGS__)
+#define svqshrunt_n_s16(...) __builtin_sve_svqshrunt_n_s16(__VA_ARGS__)
+#define svqsub_n_s8_m(...) __builtin_sve_svqsub_n_s8_m(__VA_ARGS__)
+#define svqsub_n_s32_m(...) __builtin_sve_svqsub_n_s32_m(__VA_ARGS__)
+#define svqsub_n_s64_m(...) __builtin_sve_svqsub_n_s64_m(__VA_ARGS__)
+#define svqsub_n_s16_m(...) __builtin_sve_svqsub_n_s16_m(__VA_ARGS__)
+#define svqsub_n_s8_x(...) __builtin_sve_svqsub_n_s8_x(__VA_ARGS__)
+#define svqsub_n_s32_x(...) __builtin_sve_svqsub_n_s32_x(__VA_ARGS__)
+#define svqsub_n_s64_x(...) __builtin_sve_svqsub_n_s64_x(__VA_ARGS__)
+#define svqsub_n_s16_x(...) __builtin_sve_svqsub_n_s16_x(__VA_ARGS__)
+#define svqsub_n_s8_z(...) __builtin_sve_svqsub_n_s8_z(__VA_ARGS__)
+#define svqsub_n_s32_z(...) __builtin_sve_svqsub_n_s32_z(__VA_ARGS__)
+#define svqsub_n_s64_z(...) __builtin_sve_svqsub_n_s64_z(__VA_ARGS__)
+#define svqsub_n_s16_z(...) __builtin_sve_svqsub_n_s16_z(__VA_ARGS__)
+#define svqsub_n_u8_m(...) __builtin_sve_svqsub_n_u8_m(__VA_ARGS__)
+#define svqsub_n_u32_m(...) __builtin_sve_svqsub_n_u32_m(__VA_ARGS__)
+#define svqsub_n_u64_m(...) __builtin_sve_svqsub_n_u64_m(__VA_ARGS__)
+#define svqsub_n_u16_m(...) __builtin_sve_svqsub_n_u16_m(__VA_ARGS__)
+#define svqsub_n_u8_x(...) __builtin_sve_svqsub_n_u8_x(__VA_ARGS__)
+#define svqsub_n_u32_x(...) __builtin_sve_svqsub_n_u32_x(__VA_ARGS__)
+#define svqsub_n_u64_x(...) __builtin_sve_svqsub_n_u64_x(__VA_ARGS__)
+#define svqsub_n_u16_x(...) __builtin_sve_svqsub_n_u16_x(__VA_ARGS__)
+#define svqsub_n_u8_z(...) __builtin_sve_svqsub_n_u8_z(__VA_ARGS__)
+#define svqsub_n_u32_z(...) __builtin_sve_svqsub_n_u32_z(__VA_ARGS__)
+#define svqsub_n_u64_z(...) __builtin_sve_svqsub_n_u64_z(__VA_ARGS__)
+#define svqsub_n_u16_z(...) __builtin_sve_svqsub_n_u16_z(__VA_ARGS__)
+#define svqsub_s8_m(...) __builtin_sve_svqsub_s8_m(__VA_ARGS__)
+#define svqsub_s32_m(...) __builtin_sve_svqsub_s32_m(__VA_ARGS__)
+#define svqsub_s64_m(...) __builtin_sve_svqsub_s64_m(__VA_ARGS__)
+#define svqsub_s16_m(...) __builtin_sve_svqsub_s16_m(__VA_ARGS__)
+#define svqsub_s8_x(...) __builtin_sve_svqsub_s8_x(__VA_ARGS__)
+#define svqsub_s32_x(...) __builtin_sve_svqsub_s32_x(__VA_ARGS__)
+#define svqsub_s64_x(...) __builtin_sve_svqsub_s64_x(__VA_ARGS__)
+#define svqsub_s16_x(...) __builtin_sve_svqsub_s16_x(__VA_ARGS__)
+#define svqsub_s8_z(...) __builtin_sve_svqsub_s8_z(__VA_ARGS__)
+#define svqsub_s32_z(...) __builtin_sve_svqsub_s32_z(__VA_ARGS__)
+#define svqsub_s64_z(...) __builtin_sve_svqsub_s64_z(__VA_ARGS__)
+#define svqsub_s16_z(...) __builtin_sve_svqsub_s16_z(__VA_ARGS__)
+#define svqsub_u8_m(...) __builtin_sve_svqsub_u8_m(__VA_ARGS__)
+#define svqsub_u32_m(...) __builtin_sve_svqsub_u32_m(__VA_ARGS__)
+#define svqsub_u64_m(...) __builtin_sve_svqsub_u64_m(__VA_ARGS__)
+#define svqsub_u16_m(...) __builtin_sve_svqsub_u16_m(__VA_ARGS__)
+#define svqsub_u8_x(...) __builtin_sve_svqsub_u8_x(__VA_ARGS__)
+#define svqsub_u32_x(...) __builtin_sve_svqsub_u32_x(__VA_ARGS__)
+#define svqsub_u64_x(...) __builtin_sve_svqsub_u64_x(__VA_ARGS__)
+#define svqsub_u16_x(...) __builtin_sve_svqsub_u16_x(__VA_ARGS__)
+#define svqsub_u8_z(...) __builtin_sve_svqsub_u8_z(__VA_ARGS__)
+#define svqsub_u32_z(...) __builtin_sve_svqsub_u32_z(__VA_ARGS__)
+#define svqsub_u64_z(...) __builtin_sve_svqsub_u64_z(__VA_ARGS__)
+#define svqsub_u16_z(...) __builtin_sve_svqsub_u16_z(__VA_ARGS__)
+#define svqsubr_n_s8_m(...) __builtin_sve_svqsubr_n_s8_m(__VA_ARGS__)
+#define svqsubr_n_s32_m(...) __builtin_sve_svqsubr_n_s32_m(__VA_ARGS__)
+#define svqsubr_n_s64_m(...) __builtin_sve_svqsubr_n_s64_m(__VA_ARGS__)
+#define svqsubr_n_s16_m(...) __builtin_sve_svqsubr_n_s16_m(__VA_ARGS__)
+#define svqsubr_n_s8_x(...) __builtin_sve_svqsubr_n_s8_x(__VA_ARGS__)
+#define svqsubr_n_s32_x(...) __builtin_sve_svqsubr_n_s32_x(__VA_ARGS__)
+#define svqsubr_n_s64_x(...) __builtin_sve_svqsubr_n_s64_x(__VA_ARGS__)
+#define svqsubr_n_s16_x(...) __builtin_sve_svqsubr_n_s16_x(__VA_ARGS__)
+#define svqsubr_n_s8_z(...) __builtin_sve_svqsubr_n_s8_z(__VA_ARGS__)
+#define svqsubr_n_s32_z(...) __builtin_sve_svqsubr_n_s32_z(__VA_ARGS__)
+#define svqsubr_n_s64_z(...) __builtin_sve_svqsubr_n_s64_z(__VA_ARGS__)
+#define svqsubr_n_s16_z(...) __builtin_sve_svqsubr_n_s16_z(__VA_ARGS__)
+#define svqsubr_n_u8_m(...) __builtin_sve_svqsubr_n_u8_m(__VA_ARGS__)
+#define svqsubr_n_u32_m(...) __builtin_sve_svqsubr_n_u32_m(__VA_ARGS__)
+#define svqsubr_n_u64_m(...) __builtin_sve_svqsubr_n_u64_m(__VA_ARGS__)
+#define svqsubr_n_u16_m(...) __builtin_sve_svqsubr_n_u16_m(__VA_ARGS__)
+#define svqsubr_n_u8_x(...) __builtin_sve_svqsubr_n_u8_x(__VA_ARGS__)
+#define svqsubr_n_u32_x(...) __builtin_sve_svqsubr_n_u32_x(__VA_ARGS__)
+#define svqsubr_n_u64_x(...) __builtin_sve_svqsubr_n_u64_x(__VA_ARGS__)
+#define svqsubr_n_u16_x(...) __builtin_sve_svqsubr_n_u16_x(__VA_ARGS__)
+#define svqsubr_n_u8_z(...) __builtin_sve_svqsubr_n_u8_z(__VA_ARGS__)
+#define svqsubr_n_u32_z(...) __builtin_sve_svqsubr_n_u32_z(__VA_ARGS__)
+#define svqsubr_n_u64_z(...) __builtin_sve_svqsubr_n_u64_z(__VA_ARGS__)
+#define svqsubr_n_u16_z(...) __builtin_sve_svqsubr_n_u16_z(__VA_ARGS__)
+#define svqsubr_s8_m(...) __builtin_sve_svqsubr_s8_m(__VA_ARGS__)
+#define svqsubr_s32_m(...) __builtin_sve_svqsubr_s32_m(__VA_ARGS__)
+#define svqsubr_s64_m(...) __builtin_sve_svqsubr_s64_m(__VA_ARGS__)
+#define svqsubr_s16_m(...) __builtin_sve_svqsubr_s16_m(__VA_ARGS__)
+#define svqsubr_s8_x(...) __builtin_sve_svqsubr_s8_x(__VA_ARGS__)
+#define svqsubr_s32_x(...) __builtin_sve_svqsubr_s32_x(__VA_ARGS__)
+#define svqsubr_s64_x(...) __builtin_sve_svqsubr_s64_x(__VA_ARGS__)
+#define svqsubr_s16_x(...) __builtin_sve_svqsubr_s16_x(__VA_ARGS__)
+#define svqsubr_s8_z(...) __builtin_sve_svqsubr_s8_z(__VA_ARGS__)
+#define svqsubr_s32_z(...) __builtin_sve_svqsubr_s32_z(__VA_ARGS__)
+#define svqsubr_s64_z(...) __builtin_sve_svqsubr_s64_z(__VA_ARGS__)
+#define svqsubr_s16_z(...) __builtin_sve_svqsubr_s16_z(__VA_ARGS__)
+#define svqsubr_u8_m(...) __builtin_sve_svqsubr_u8_m(__VA_ARGS__)
+#define svqsubr_u32_m(...) __builtin_sve_svqsubr_u32_m(__VA_ARGS__)
+#define svqsubr_u64_m(...) __builtin_sve_svqsubr_u64_m(__VA_ARGS__)
+#define svqsubr_u16_m(...) __builtin_sve_svqsubr_u16_m(__VA_ARGS__)
+#define svqsubr_u8_x(...) __builtin_sve_svqsubr_u8_x(__VA_ARGS__)
+#define svqsubr_u32_x(...) __builtin_sve_svqsubr_u32_x(__VA_ARGS__)
+#define svqsubr_u64_x(...) __builtin_sve_svqsubr_u64_x(__VA_ARGS__)
+#define svqsubr_u16_x(...) __builtin_sve_svqsubr_u16_x(__VA_ARGS__)
+#define svqsubr_u8_z(...) __builtin_sve_svqsubr_u8_z(__VA_ARGS__)
+#define svqsubr_u32_z(...) __builtin_sve_svqsubr_u32_z(__VA_ARGS__)
+#define svqsubr_u64_z(...) __builtin_sve_svqsubr_u64_z(__VA_ARGS__)
+#define svqsubr_u16_z(...) __builtin_sve_svqsubr_u16_z(__VA_ARGS__)
+#define svqxtnb_s32(...) __builtin_sve_svqxtnb_s32(__VA_ARGS__)
+#define svqxtnb_s64(...) __builtin_sve_svqxtnb_s64(__VA_ARGS__)
+#define svqxtnb_s16(...) __builtin_sve_svqxtnb_s16(__VA_ARGS__)
+#define svqxtnb_u32(...) __builtin_sve_svqxtnb_u32(__VA_ARGS__)
+#define svqxtnb_u64(...) __builtin_sve_svqxtnb_u64(__VA_ARGS__)
+#define svqxtnb_u16(...) __builtin_sve_svqxtnb_u16(__VA_ARGS__)
+#define svqxtnt_s32(...) __builtin_sve_svqxtnt_s32(__VA_ARGS__)
+#define svqxtnt_s64(...) __builtin_sve_svqxtnt_s64(__VA_ARGS__)
+#define svqxtnt_s16(...) __builtin_sve_svqxtnt_s16(__VA_ARGS__)
+#define svqxtnt_u32(...) __builtin_sve_svqxtnt_u32(__VA_ARGS__)
+#define svqxtnt_u64(...) __builtin_sve_svqxtnt_u64(__VA_ARGS__)
+#define svqxtnt_u16(...) __builtin_sve_svqxtnt_u16(__VA_ARGS__)
+#define svqxtunb_s32(...) __builtin_sve_svqxtunb_s32(__VA_ARGS__)
+#define svqxtunb_s64(...) __builtin_sve_svqxtunb_s64(__VA_ARGS__)
+#define svqxtunb_s16(...) __builtin_sve_svqxtunb_s16(__VA_ARGS__)
+#define svqxtunt_s32(...) __builtin_sve_svqxtunt_s32(__VA_ARGS__)
+#define svqxtunt_s64(...) __builtin_sve_svqxtunt_s64(__VA_ARGS__)
+#define svqxtunt_s16(...) __builtin_sve_svqxtunt_s16(__VA_ARGS__)
+#define svraddhnb_n_u32(...) __builtin_sve_svraddhnb_n_u32(__VA_ARGS__)
+#define svraddhnb_n_u64(...) __builtin_sve_svraddhnb_n_u64(__VA_ARGS__)
+#define svraddhnb_n_u16(...) __builtin_sve_svraddhnb_n_u16(__VA_ARGS__)
+#define svraddhnb_n_s32(...) __builtin_sve_svraddhnb_n_s32(__VA_ARGS__)
+#define svraddhnb_n_s64(...) __builtin_sve_svraddhnb_n_s64(__VA_ARGS__)
+#define svraddhnb_n_s16(...) __builtin_sve_svraddhnb_n_s16(__VA_ARGS__)
+#define svraddhnb_u32(...) __builtin_sve_svraddhnb_u32(__VA_ARGS__)
+#define svraddhnb_u64(...) __builtin_sve_svraddhnb_u64(__VA_ARGS__)
+#define svraddhnb_u16(...) __builtin_sve_svraddhnb_u16(__VA_ARGS__)
+#define svraddhnb_s32(...) __builtin_sve_svraddhnb_s32(__VA_ARGS__)
+#define svraddhnb_s64(...) __builtin_sve_svraddhnb_s64(__VA_ARGS__)
+#define svraddhnb_s16(...) __builtin_sve_svraddhnb_s16(__VA_ARGS__)
+#define svraddhnt_n_u32(...) __builtin_sve_svraddhnt_n_u32(__VA_ARGS__)
+#define svraddhnt_n_u64(...) __builtin_sve_svraddhnt_n_u64(__VA_ARGS__)
+#define svraddhnt_n_u16(...) __builtin_sve_svraddhnt_n_u16(__VA_ARGS__)
+#define svraddhnt_n_s32(...) __builtin_sve_svraddhnt_n_s32(__VA_ARGS__)
+#define svraddhnt_n_s64(...) __builtin_sve_svraddhnt_n_s64(__VA_ARGS__)
+#define svraddhnt_n_s16(...) __builtin_sve_svraddhnt_n_s16(__VA_ARGS__)
+#define svraddhnt_u32(...) __builtin_sve_svraddhnt_u32(__VA_ARGS__)
+#define svraddhnt_u64(...) __builtin_sve_svraddhnt_u64(__VA_ARGS__)
+#define svraddhnt_u16(...) __builtin_sve_svraddhnt_u16(__VA_ARGS__)
+#define svraddhnt_s32(...) __builtin_sve_svraddhnt_s32(__VA_ARGS__)
+#define svraddhnt_s64(...) __builtin_sve_svraddhnt_s64(__VA_ARGS__)
+#define svraddhnt_s16(...) __builtin_sve_svraddhnt_s16(__VA_ARGS__)
+#define svrecpe_u32_m(...) __builtin_sve_svrecpe_u32_m(__VA_ARGS__)
+#define svrecpe_u32_x(...) __builtin_sve_svrecpe_u32_x(__VA_ARGS__)
+#define svrecpe_u32_z(...) __builtin_sve_svrecpe_u32_z(__VA_ARGS__)
+#define svrhadd_n_s8_m(...) __builtin_sve_svrhadd_n_s8_m(__VA_ARGS__)
+#define svrhadd_n_s32_m(...) __builtin_sve_svrhadd_n_s32_m(__VA_ARGS__)
+#define svrhadd_n_s64_m(...) __builtin_sve_svrhadd_n_s64_m(__VA_ARGS__)
+#define svrhadd_n_s16_m(...) __builtin_sve_svrhadd_n_s16_m(__VA_ARGS__)
+#define svrhadd_n_s8_x(...) __builtin_sve_svrhadd_n_s8_x(__VA_ARGS__)
+#define svrhadd_n_s32_x(...) __builtin_sve_svrhadd_n_s32_x(__VA_ARGS__)
+#define svrhadd_n_s64_x(...) __builtin_sve_svrhadd_n_s64_x(__VA_ARGS__)
+#define svrhadd_n_s16_x(...) __builtin_sve_svrhadd_n_s16_x(__VA_ARGS__)
+#define svrhadd_n_s8_z(...) __builtin_sve_svrhadd_n_s8_z(__VA_ARGS__)
+#define svrhadd_n_s32_z(...) __builtin_sve_svrhadd_n_s32_z(__VA_ARGS__)
+#define svrhadd_n_s64_z(...) __builtin_sve_svrhadd_n_s64_z(__VA_ARGS__)
+#define svrhadd_n_s16_z(...) __builtin_sve_svrhadd_n_s16_z(__VA_ARGS__)
+#define svrhadd_n_u8_m(...) __builtin_sve_svrhadd_n_u8_m(__VA_ARGS__)
+#define svrhadd_n_u32_m(...) __builtin_sve_svrhadd_n_u32_m(__VA_ARGS__)
+#define svrhadd_n_u64_m(...) __builtin_sve_svrhadd_n_u64_m(__VA_ARGS__)
+#define svrhadd_n_u16_m(...) __builtin_sve_svrhadd_n_u16_m(__VA_ARGS__)
+#define svrhadd_n_u8_x(...) __builtin_sve_svrhadd_n_u8_x(__VA_ARGS__)
+#define svrhadd_n_u32_x(...) __builtin_sve_svrhadd_n_u32_x(__VA_ARGS__)
+#define svrhadd_n_u64_x(...) __builtin_sve_svrhadd_n_u64_x(__VA_ARGS__)
+#define svrhadd_n_u16_x(...) __builtin_sve_svrhadd_n_u16_x(__VA_ARGS__)
+#define svrhadd_n_u8_z(...) __builtin_sve_svrhadd_n_u8_z(__VA_ARGS__)
+#define svrhadd_n_u32_z(...) __builtin_sve_svrhadd_n_u32_z(__VA_ARGS__)
+#define svrhadd_n_u64_z(...) __builtin_sve_svrhadd_n_u64_z(__VA_ARGS__)
+#define svrhadd_n_u16_z(...) __builtin_sve_svrhadd_n_u16_z(__VA_ARGS__)
+#define svrhadd_s8_m(...) __builtin_sve_svrhadd_s8_m(__VA_ARGS__)
+#define svrhadd_s32_m(...) __builtin_sve_svrhadd_s32_m(__VA_ARGS__)
+#define svrhadd_s64_m(...) __builtin_sve_svrhadd_s64_m(__VA_ARGS__)
+#define svrhadd_s16_m(...) __builtin_sve_svrhadd_s16_m(__VA_ARGS__)
+#define svrhadd_s8_x(...) __builtin_sve_svrhadd_s8_x(__VA_ARGS__)
+#define svrhadd_s32_x(...) __builtin_sve_svrhadd_s32_x(__VA_ARGS__)
+#define svrhadd_s64_x(...) __builtin_sve_svrhadd_s64_x(__VA_ARGS__)
+#define svrhadd_s16_x(...) __builtin_sve_svrhadd_s16_x(__VA_ARGS__)
+#define svrhadd_s8_z(...) __builtin_sve_svrhadd_s8_z(__VA_ARGS__)
+#define svrhadd_s32_z(...) __builtin_sve_svrhadd_s32_z(__VA_ARGS__)
+#define svrhadd_s64_z(...) __builtin_sve_svrhadd_s64_z(__VA_ARGS__)
+#define svrhadd_s16_z(...) __builtin_sve_svrhadd_s16_z(__VA_ARGS__)
+#define svrhadd_u8_m(...) __builtin_sve_svrhadd_u8_m(__VA_ARGS__)
+#define svrhadd_u32_m(...) __builtin_sve_svrhadd_u32_m(__VA_ARGS__)
+#define svrhadd_u64_m(...) __builtin_sve_svrhadd_u64_m(__VA_ARGS__)
+#define svrhadd_u16_m(...) __builtin_sve_svrhadd_u16_m(__VA_ARGS__)
+#define svrhadd_u8_x(...) __builtin_sve_svrhadd_u8_x(__VA_ARGS__)
+#define svrhadd_u32_x(...) __builtin_sve_svrhadd_u32_x(__VA_ARGS__)
+#define svrhadd_u64_x(...) __builtin_sve_svrhadd_u64_x(__VA_ARGS__)
+#define svrhadd_u16_x(...) __builtin_sve_svrhadd_u16_x(__VA_ARGS__)
+#define svrhadd_u8_z(...) __builtin_sve_svrhadd_u8_z(__VA_ARGS__)
+#define svrhadd_u32_z(...) __builtin_sve_svrhadd_u32_z(__VA_ARGS__)
+#define svrhadd_u64_z(...) __builtin_sve_svrhadd_u64_z(__VA_ARGS__)
+#define svrhadd_u16_z(...) __builtin_sve_svrhadd_u16_z(__VA_ARGS__)
+#define svrshl_n_s8_m(...) __builtin_sve_svrshl_n_s8_m(__VA_ARGS__)
+#define svrshl_n_s32_m(...) __builtin_sve_svrshl_n_s32_m(__VA_ARGS__)
+#define svrshl_n_s64_m(...) __builtin_sve_svrshl_n_s64_m(__VA_ARGS__)
+#define svrshl_n_s16_m(...) __builtin_sve_svrshl_n_s16_m(__VA_ARGS__)
+#define svrshl_n_s8_x(...) __builtin_sve_svrshl_n_s8_x(__VA_ARGS__)
+#define svrshl_n_s32_x(...) __builtin_sve_svrshl_n_s32_x(__VA_ARGS__)
+#define svrshl_n_s64_x(...) __builtin_sve_svrshl_n_s64_x(__VA_ARGS__)
+#define svrshl_n_s16_x(...) __builtin_sve_svrshl_n_s16_x(__VA_ARGS__)
+#define svrshl_n_s8_z(...) __builtin_sve_svrshl_n_s8_z(__VA_ARGS__)
+#define svrshl_n_s32_z(...) __builtin_sve_svrshl_n_s32_z(__VA_ARGS__)
+#define svrshl_n_s64_z(...) __builtin_sve_svrshl_n_s64_z(__VA_ARGS__)
+#define svrshl_n_s16_z(...) __builtin_sve_svrshl_n_s16_z(__VA_ARGS__)
+#define svrshl_n_u8_m(...) __builtin_sve_svrshl_n_u8_m(__VA_ARGS__)
+#define svrshl_n_u32_m(...) __builtin_sve_svrshl_n_u32_m(__VA_ARGS__)
+#define svrshl_n_u64_m(...) __builtin_sve_svrshl_n_u64_m(__VA_ARGS__)
+#define svrshl_n_u16_m(...) __builtin_sve_svrshl_n_u16_m(__VA_ARGS__)
+#define svrshl_n_u8_x(...) __builtin_sve_svrshl_n_u8_x(__VA_ARGS__)
+#define svrshl_n_u32_x(...) __builtin_sve_svrshl_n_u32_x(__VA_ARGS__)
+#define svrshl_n_u64_x(...) __builtin_sve_svrshl_n_u64_x(__VA_ARGS__)
+#define svrshl_n_u16_x(...) __builtin_sve_svrshl_n_u16_x(__VA_ARGS__)
+#define svrshl_n_u8_z(...) __builtin_sve_svrshl_n_u8_z(__VA_ARGS__)
+#define svrshl_n_u32_z(...) __builtin_sve_svrshl_n_u32_z(__VA_ARGS__)
+#define svrshl_n_u64_z(...) __builtin_sve_svrshl_n_u64_z(__VA_ARGS__)
+#define svrshl_n_u16_z(...) __builtin_sve_svrshl_n_u16_z(__VA_ARGS__)
+#define svrshl_s8_m(...) __builtin_sve_svrshl_s8_m(__VA_ARGS__)
+#define svrshl_s32_m(...) __builtin_sve_svrshl_s32_m(__VA_ARGS__)
+#define svrshl_s64_m(...) __builtin_sve_svrshl_s64_m(__VA_ARGS__)
+#define svrshl_s16_m(...) __builtin_sve_svrshl_s16_m(__VA_ARGS__)
+#define svrshl_s8_x(...) __builtin_sve_svrshl_s8_x(__VA_ARGS__)
+#define svrshl_s32_x(...) __builtin_sve_svrshl_s32_x(__VA_ARGS__)
+#define svrshl_s64_x(...) __builtin_sve_svrshl_s64_x(__VA_ARGS__)
+#define svrshl_s16_x(...) __builtin_sve_svrshl_s16_x(__VA_ARGS__)
+#define svrshl_s8_z(...) __builtin_sve_svrshl_s8_z(__VA_ARGS__)
+#define svrshl_s32_z(...) __builtin_sve_svrshl_s32_z(__VA_ARGS__)
+#define svrshl_s64_z(...) __builtin_sve_svrshl_s64_z(__VA_ARGS__)
+#define svrshl_s16_z(...) __builtin_sve_svrshl_s16_z(__VA_ARGS__)
+#define svrshl_u8_m(...) __builtin_sve_svrshl_u8_m(__VA_ARGS__)
+#define svrshl_u32_m(...) __builtin_sve_svrshl_u32_m(__VA_ARGS__)
+#define svrshl_u64_m(...) __builtin_sve_svrshl_u64_m(__VA_ARGS__)
+#define svrshl_u16_m(...) __builtin_sve_svrshl_u16_m(__VA_ARGS__)
+#define svrshl_u8_x(...) __builtin_sve_svrshl_u8_x(__VA_ARGS__)
+#define svrshl_u32_x(...) __builtin_sve_svrshl_u32_x(__VA_ARGS__)
+#define svrshl_u64_x(...) __builtin_sve_svrshl_u64_x(__VA_ARGS__)
+#define svrshl_u16_x(...) __builtin_sve_svrshl_u16_x(__VA_ARGS__)
+#define svrshl_u8_z(...) __builtin_sve_svrshl_u8_z(__VA_ARGS__)
+#define svrshl_u32_z(...) __builtin_sve_svrshl_u32_z(__VA_ARGS__)
+#define svrshl_u64_z(...) __builtin_sve_svrshl_u64_z(__VA_ARGS__)
+#define svrshl_u16_z(...) __builtin_sve_svrshl_u16_z(__VA_ARGS__)
+#define svrshr_n_s8_m(...) __builtin_sve_svrshr_n_s8_m(__VA_ARGS__)
+#define svrshr_n_s32_m(...) __builtin_sve_svrshr_n_s32_m(__VA_ARGS__)
+#define svrshr_n_s64_m(...) __builtin_sve_svrshr_n_s64_m(__VA_ARGS__)
+#define svrshr_n_s16_m(...) __builtin_sve_svrshr_n_s16_m(__VA_ARGS__)
+#define svrshr_n_u8_m(...) __builtin_sve_svrshr_n_u8_m(__VA_ARGS__)
+#define svrshr_n_u32_m(...) __builtin_sve_svrshr_n_u32_m(__VA_ARGS__)
+#define svrshr_n_u64_m(...) __builtin_sve_svrshr_n_u64_m(__VA_ARGS__)
+#define svrshr_n_u16_m(...) __builtin_sve_svrshr_n_u16_m(__VA_ARGS__)
+#define svrshr_n_s8_x(...) __builtin_sve_svrshr_n_s8_x(__VA_ARGS__)
+#define svrshr_n_s32_x(...) __builtin_sve_svrshr_n_s32_x(__VA_ARGS__)
+#define svrshr_n_s64_x(...) __builtin_sve_svrshr_n_s64_x(__VA_ARGS__)
+#define svrshr_n_s16_x(...) __builtin_sve_svrshr_n_s16_x(__VA_ARGS__)
+#define svrshr_n_u8_x(...) __builtin_sve_svrshr_n_u8_x(__VA_ARGS__)
+#define svrshr_n_u32_x(...) __builtin_sve_svrshr_n_u32_x(__VA_ARGS__)
+#define svrshr_n_u64_x(...) __builtin_sve_svrshr_n_u64_x(__VA_ARGS__)
+#define svrshr_n_u16_x(...) __builtin_sve_svrshr_n_u16_x(__VA_ARGS__)
+#define svrshr_n_s8_z(...) __builtin_sve_svrshr_n_s8_z(__VA_ARGS__)
+#define svrshr_n_s32_z(...) __builtin_sve_svrshr_n_s32_z(__VA_ARGS__)
+#define svrshr_n_s64_z(...) __builtin_sve_svrshr_n_s64_z(__VA_ARGS__)
+#define svrshr_n_s16_z(...) __builtin_sve_svrshr_n_s16_z(__VA_ARGS__)
+#define svrshr_n_u8_z(...) __builtin_sve_svrshr_n_u8_z(__VA_ARGS__)
+#define svrshr_n_u32_z(...) __builtin_sve_svrshr_n_u32_z(__VA_ARGS__)
+#define svrshr_n_u64_z(...) __builtin_sve_svrshr_n_u64_z(__VA_ARGS__)
+#define svrshr_n_u16_z(...) __builtin_sve_svrshr_n_u16_z(__VA_ARGS__)
+#define svrshrnb_n_u32(...) __builtin_sve_svrshrnb_n_u32(__VA_ARGS__)
+#define svrshrnb_n_u64(...) __builtin_sve_svrshrnb_n_u64(__VA_ARGS__)
+#define svrshrnb_n_u16(...) __builtin_sve_svrshrnb_n_u16(__VA_ARGS__)
+#define svrshrnb_n_s32(...) __builtin_sve_svrshrnb_n_s32(__VA_ARGS__)
+#define svrshrnb_n_s64(...) __builtin_sve_svrshrnb_n_s64(__VA_ARGS__)
+#define svrshrnb_n_s16(...) __builtin_sve_svrshrnb_n_s16(__VA_ARGS__)
+#define svrshrnt_n_u32(...) __builtin_sve_svrshrnt_n_u32(__VA_ARGS__)
+#define svrshrnt_n_u64(...) __builtin_sve_svrshrnt_n_u64(__VA_ARGS__)
+#define svrshrnt_n_u16(...) __builtin_sve_svrshrnt_n_u16(__VA_ARGS__)
+#define svrshrnt_n_s32(...) __builtin_sve_svrshrnt_n_s32(__VA_ARGS__)
+#define svrshrnt_n_s64(...) __builtin_sve_svrshrnt_n_s64(__VA_ARGS__)
+#define svrshrnt_n_s16(...) __builtin_sve_svrshrnt_n_s16(__VA_ARGS__)
+#define svrsqrte_u32_m(...) __builtin_sve_svrsqrte_u32_m(__VA_ARGS__)
+#define svrsqrte_u32_x(...) __builtin_sve_svrsqrte_u32_x(__VA_ARGS__)
+#define svrsqrte_u32_z(...) __builtin_sve_svrsqrte_u32_z(__VA_ARGS__)
+#define svrsra_n_s8(...) __builtin_sve_svrsra_n_s8(__VA_ARGS__)
+#define svrsra_n_s32(...) __builtin_sve_svrsra_n_s32(__VA_ARGS__)
+#define svrsra_n_s64(...) __builtin_sve_svrsra_n_s64(__VA_ARGS__)
+#define svrsra_n_s16(...) __builtin_sve_svrsra_n_s16(__VA_ARGS__)
+#define svrsra_n_u8(...) __builtin_sve_svrsra_n_u8(__VA_ARGS__)
+#define svrsra_n_u32(...) __builtin_sve_svrsra_n_u32(__VA_ARGS__)
+#define svrsra_n_u64(...) __builtin_sve_svrsra_n_u64(__VA_ARGS__)
+#define svrsra_n_u16(...) __builtin_sve_svrsra_n_u16(__VA_ARGS__)
+#define svrsubhnb_n_u32(...) __builtin_sve_svrsubhnb_n_u32(__VA_ARGS__)
+#define svrsubhnb_n_u64(...) __builtin_sve_svrsubhnb_n_u64(__VA_ARGS__)
+#define svrsubhnb_n_u16(...) __builtin_sve_svrsubhnb_n_u16(__VA_ARGS__)
+#define svrsubhnb_n_s32(...) __builtin_sve_svrsubhnb_n_s32(__VA_ARGS__)
+#define svrsubhnb_n_s64(...) __builtin_sve_svrsubhnb_n_s64(__VA_ARGS__)
+#define svrsubhnb_n_s16(...) __builtin_sve_svrsubhnb_n_s16(__VA_ARGS__)
+#define svrsubhnb_u32(...) __builtin_sve_svrsubhnb_u32(__VA_ARGS__)
+#define svrsubhnb_u64(...) __builtin_sve_svrsubhnb_u64(__VA_ARGS__)
+#define svrsubhnb_u16(...) __builtin_sve_svrsubhnb_u16(__VA_ARGS__)
+#define svrsubhnb_s32(...) __builtin_sve_svrsubhnb_s32(__VA_ARGS__)
+#define svrsubhnb_s64(...) __builtin_sve_svrsubhnb_s64(__VA_ARGS__)
+#define svrsubhnb_s16(...) __builtin_sve_svrsubhnb_s16(__VA_ARGS__)
+#define svrsubhnt_n_u32(...) __builtin_sve_svrsubhnt_n_u32(__VA_ARGS__)
+#define svrsubhnt_n_u64(...) __builtin_sve_svrsubhnt_n_u64(__VA_ARGS__)
+#define svrsubhnt_n_u16(...) __builtin_sve_svrsubhnt_n_u16(__VA_ARGS__)
+#define svrsubhnt_n_s32(...) __builtin_sve_svrsubhnt_n_s32(__VA_ARGS__)
+#define svrsubhnt_n_s64(...) __builtin_sve_svrsubhnt_n_s64(__VA_ARGS__)
+#define svrsubhnt_n_s16(...) __builtin_sve_svrsubhnt_n_s16(__VA_ARGS__)
+#define svrsubhnt_u32(...) __builtin_sve_svrsubhnt_u32(__VA_ARGS__)
+#define svrsubhnt_u64(...) __builtin_sve_svrsubhnt_u64(__VA_ARGS__)
+#define svrsubhnt_u16(...) __builtin_sve_svrsubhnt_u16(__VA_ARGS__)
+#define svrsubhnt_s32(...) __builtin_sve_svrsubhnt_s32(__VA_ARGS__)
+#define svrsubhnt_s64(...) __builtin_sve_svrsubhnt_s64(__VA_ARGS__)
+#define svrsubhnt_s16(...) __builtin_sve_svrsubhnt_s16(__VA_ARGS__)
+#define svsbclb_n_u32(...) __builtin_sve_svsbclb_n_u32(__VA_ARGS__)
+#define svsbclb_n_u64(...) __builtin_sve_svsbclb_n_u64(__VA_ARGS__)
+#define svsbclb_u32(...) __builtin_sve_svsbclb_u32(__VA_ARGS__)
+#define svsbclb_u64(...) __builtin_sve_svsbclb_u64(__VA_ARGS__)
+#define svsbclt_n_u32(...) __builtin_sve_svsbclt_n_u32(__VA_ARGS__)
+#define svsbclt_n_u64(...) __builtin_sve_svsbclt_n_u64(__VA_ARGS__)
+#define svsbclt_u32(...) __builtin_sve_svsbclt_u32(__VA_ARGS__)
+#define svsbclt_u64(...) __builtin_sve_svsbclt_u64(__VA_ARGS__)
+#define svshllb_n_s32(...) __builtin_sve_svshllb_n_s32(__VA_ARGS__)
+#define svshllb_n_s64(...) __builtin_sve_svshllb_n_s64(__VA_ARGS__)
+#define svshllb_n_s16(...) __builtin_sve_svshllb_n_s16(__VA_ARGS__)
+#define svshllb_n_u32(...) __builtin_sve_svshllb_n_u32(__VA_ARGS__)
+#define svshllb_n_u64(...) __builtin_sve_svshllb_n_u64(__VA_ARGS__)
+#define svshllb_n_u16(...) __builtin_sve_svshllb_n_u16(__VA_ARGS__)
+#define svshllt_n_s32(...) __builtin_sve_svshllt_n_s32(__VA_ARGS__)
+#define svshllt_n_s64(...) __builtin_sve_svshllt_n_s64(__VA_ARGS__)
+#define svshllt_n_s16(...) __builtin_sve_svshllt_n_s16(__VA_ARGS__)
+#define svshllt_n_u32(...) __builtin_sve_svshllt_n_u32(__VA_ARGS__)
+#define svshllt_n_u64(...) __builtin_sve_svshllt_n_u64(__VA_ARGS__)
+#define svshllt_n_u16(...) __builtin_sve_svshllt_n_u16(__VA_ARGS__)
+#define svshrnb_n_u32(...) __builtin_sve_svshrnb_n_u32(__VA_ARGS__)
+#define svshrnb_n_u64(...) __builtin_sve_svshrnb_n_u64(__VA_ARGS__)
+#define svshrnb_n_u16(...) __builtin_sve_svshrnb_n_u16(__VA_ARGS__)
+#define svshrnb_n_s32(...) __builtin_sve_svshrnb_n_s32(__VA_ARGS__)
+#define svshrnb_n_s64(...) __builtin_sve_svshrnb_n_s64(__VA_ARGS__)
+#define svshrnb_n_s16(...) __builtin_sve_svshrnb_n_s16(__VA_ARGS__)
+#define svshrnt_n_u32(...) __builtin_sve_svshrnt_n_u32(__VA_ARGS__)
+#define svshrnt_n_u64(...) __builtin_sve_svshrnt_n_u64(__VA_ARGS__)
+#define svshrnt_n_u16(...) __builtin_sve_svshrnt_n_u16(__VA_ARGS__)
+#define svshrnt_n_s32(...) __builtin_sve_svshrnt_n_s32(__VA_ARGS__)
+#define svshrnt_n_s64(...) __builtin_sve_svshrnt_n_s64(__VA_ARGS__)
+#define svshrnt_n_s16(...) __builtin_sve_svshrnt_n_s16(__VA_ARGS__)
+#define svsli_n_u8(...) __builtin_sve_svsli_n_u8(__VA_ARGS__)
+#define svsli_n_u32(...) __builtin_sve_svsli_n_u32(__VA_ARGS__)
+#define svsli_n_u64(...) __builtin_sve_svsli_n_u64(__VA_ARGS__)
+#define svsli_n_u16(...) __builtin_sve_svsli_n_u16(__VA_ARGS__)
+#define svsli_n_s8(...) __builtin_sve_svsli_n_s8(__VA_ARGS__)
+#define svsli_n_s32(...) __builtin_sve_svsli_n_s32(__VA_ARGS__)
+#define svsli_n_s64(...) __builtin_sve_svsli_n_s64(__VA_ARGS__)
+#define svsli_n_s16(...) __builtin_sve_svsli_n_s16(__VA_ARGS__)
+#define svsqadd_n_u8_m(...) __builtin_sve_svsqadd_n_u8_m(__VA_ARGS__)
+#define svsqadd_n_u32_m(...) __builtin_sve_svsqadd_n_u32_m(__VA_ARGS__)
+#define svsqadd_n_u64_m(...) __builtin_sve_svsqadd_n_u64_m(__VA_ARGS__)
+#define svsqadd_n_u16_m(...) __builtin_sve_svsqadd_n_u16_m(__VA_ARGS__)
+#define svsqadd_n_u8_x(...) __builtin_sve_svsqadd_n_u8_x(__VA_ARGS__)
+#define svsqadd_n_u32_x(...) __builtin_sve_svsqadd_n_u32_x(__VA_ARGS__)
+#define svsqadd_n_u64_x(...) __builtin_sve_svsqadd_n_u64_x(__VA_ARGS__)
+#define svsqadd_n_u16_x(...) __builtin_sve_svsqadd_n_u16_x(__VA_ARGS__)
+#define svsqadd_n_u8_z(...) __builtin_sve_svsqadd_n_u8_z(__VA_ARGS__)
+#define svsqadd_n_u32_z(...) __builtin_sve_svsqadd_n_u32_z(__VA_ARGS__)
+#define svsqadd_n_u64_z(...) __builtin_sve_svsqadd_n_u64_z(__VA_ARGS__)
+#define svsqadd_n_u16_z(...) __builtin_sve_svsqadd_n_u16_z(__VA_ARGS__)
+#define svsqadd_u8_m(...) __builtin_sve_svsqadd_u8_m(__VA_ARGS__)
+#define svsqadd_u32_m(...) __builtin_sve_svsqadd_u32_m(__VA_ARGS__)
+#define svsqadd_u64_m(...) __builtin_sve_svsqadd_u64_m(__VA_ARGS__)
+#define svsqadd_u16_m(...) __builtin_sve_svsqadd_u16_m(__VA_ARGS__)
+#define svsqadd_u8_x(...) __builtin_sve_svsqadd_u8_x(__VA_ARGS__)
+#define svsqadd_u32_x(...) __builtin_sve_svsqadd_u32_x(__VA_ARGS__)
+#define svsqadd_u64_x(...) __builtin_sve_svsqadd_u64_x(__VA_ARGS__)
+#define svsqadd_u16_x(...) __builtin_sve_svsqadd_u16_x(__VA_ARGS__)
+#define svsqadd_u8_z(...) __builtin_sve_svsqadd_u8_z(__VA_ARGS__)
+#define svsqadd_u32_z(...) __builtin_sve_svsqadd_u32_z(__VA_ARGS__)
+#define svsqadd_u64_z(...) __builtin_sve_svsqadd_u64_z(__VA_ARGS__)
+#define svsqadd_u16_z(...) __builtin_sve_svsqadd_u16_z(__VA_ARGS__)
+#define svsra_n_s8(...) __builtin_sve_svsra_n_s8(__VA_ARGS__)
+#define svsra_n_s32(...) __builtin_sve_svsra_n_s32(__VA_ARGS__)
+#define svsra_n_s64(...) __builtin_sve_svsra_n_s64(__VA_ARGS__)
+#define svsra_n_s16(...) __builtin_sve_svsra_n_s16(__VA_ARGS__)
+#define svsra_n_u8(...) __builtin_sve_svsra_n_u8(__VA_ARGS__)
+#define svsra_n_u32(...) __builtin_sve_svsra_n_u32(__VA_ARGS__)
+#define svsra_n_u64(...) __builtin_sve_svsra_n_u64(__VA_ARGS__)
+#define svsra_n_u16(...) __builtin_sve_svsra_n_u16(__VA_ARGS__)
+#define svsri_n_u8(...) __builtin_sve_svsri_n_u8(__VA_ARGS__)
+#define svsri_n_u32(...) __builtin_sve_svsri_n_u32(__VA_ARGS__)
+#define svsri_n_u64(...) __builtin_sve_svsri_n_u64(__VA_ARGS__)
+#define svsri_n_u16(...) __builtin_sve_svsri_n_u16(__VA_ARGS__)
+#define svsri_n_s8(...) __builtin_sve_svsri_n_s8(__VA_ARGS__)
+#define svsri_n_s32(...) __builtin_sve_svsri_n_s32(__VA_ARGS__)
+#define svsri_n_s64(...) __builtin_sve_svsri_n_s64(__VA_ARGS__)
+#define svsri_n_s16(...) __builtin_sve_svsri_n_s16(__VA_ARGS__)
+#define svstnt1_scatter_u32base_index_u32(...) __builtin_sve_svstnt1_scatter_u32base_index_u32(__VA_ARGS__)
+#define svstnt1_scatter_u64base_index_u64(...) __builtin_sve_svstnt1_scatter_u64base_index_u64(__VA_ARGS__)
+#define svstnt1_scatter_u64base_index_f64(...) __builtin_sve_svstnt1_scatter_u64base_index_f64(__VA_ARGS__)
+#define svstnt1_scatter_u32base_index_f32(...) __builtin_sve_svstnt1_scatter_u32base_index_f32(__VA_ARGS__)
+#define svstnt1_scatter_u32base_index_s32(...) __builtin_sve_svstnt1_scatter_u32base_index_s32(__VA_ARGS__)
+#define svstnt1_scatter_u64base_index_s64(...) __builtin_sve_svstnt1_scatter_u64base_index_s64(__VA_ARGS__)
+#define svstnt1_scatter_u32base_offset_u32(...) __builtin_sve_svstnt1_scatter_u32base_offset_u32(__VA_ARGS__)
+#define svstnt1_scatter_u64base_offset_u64(...) __builtin_sve_svstnt1_scatter_u64base_offset_u64(__VA_ARGS__)
+#define svstnt1_scatter_u64base_offset_f64(...) __builtin_sve_svstnt1_scatter_u64base_offset_f64(__VA_ARGS__)
+#define svstnt1_scatter_u32base_offset_f32(...) __builtin_sve_svstnt1_scatter_u32base_offset_f32(__VA_ARGS__)
+#define svstnt1_scatter_u32base_offset_s32(...) __builtin_sve_svstnt1_scatter_u32base_offset_s32(__VA_ARGS__)
+#define svstnt1_scatter_u64base_offset_s64(...) __builtin_sve_svstnt1_scatter_u64base_offset_s64(__VA_ARGS__)
+#define svstnt1_scatter_u32base_u32(...) __builtin_sve_svstnt1_scatter_u32base_u32(__VA_ARGS__)
+#define svstnt1_scatter_u64base_u64(...) __builtin_sve_svstnt1_scatter_u64base_u64(__VA_ARGS__)
+#define svstnt1_scatter_u64base_f64(...) __builtin_sve_svstnt1_scatter_u64base_f64(__VA_ARGS__)
+#define svstnt1_scatter_u32base_f32(...) __builtin_sve_svstnt1_scatter_u32base_f32(__VA_ARGS__)
+#define svstnt1_scatter_u32base_s32(...) __builtin_sve_svstnt1_scatter_u32base_s32(__VA_ARGS__)
+#define svstnt1_scatter_u64base_s64(...) __builtin_sve_svstnt1_scatter_u64base_s64(__VA_ARGS__)
+#define svstnt1_scatter_s64index_u64(...) __builtin_sve_svstnt1_scatter_s64index_u64(__VA_ARGS__)
+#define svstnt1_scatter_s64index_f64(...) __builtin_sve_svstnt1_scatter_s64index_f64(__VA_ARGS__)
+#define svstnt1_scatter_s64index_s64(...) __builtin_sve_svstnt1_scatter_s64index_s64(__VA_ARGS__)
+#define svstnt1_scatter_u64index_u64(...) __builtin_sve_svstnt1_scatter_u64index_u64(__VA_ARGS__)
+#define svstnt1_scatter_u64index_f64(...) __builtin_sve_svstnt1_scatter_u64index_f64(__VA_ARGS__)
+#define svstnt1_scatter_u64index_s64(...) __builtin_sve_svstnt1_scatter_u64index_s64(__VA_ARGS__)
+#define svstnt1_scatter_u32offset_u32(...) __builtin_sve_svstnt1_scatter_u32offset_u32(__VA_ARGS__)
+#define svstnt1_scatter_u32offset_f32(...) __builtin_sve_svstnt1_scatter_u32offset_f32(__VA_ARGS__)
+#define svstnt1_scatter_u32offset_s32(...) __builtin_sve_svstnt1_scatter_u32offset_s32(__VA_ARGS__)
+#define svstnt1_scatter_s64offset_u64(...) __builtin_sve_svstnt1_scatter_s64offset_u64(__VA_ARGS__)
+#define svstnt1_scatter_s64offset_f64(...) __builtin_sve_svstnt1_scatter_s64offset_f64(__VA_ARGS__)
+#define svstnt1_scatter_s64offset_s64(...) __builtin_sve_svstnt1_scatter_s64offset_s64(__VA_ARGS__)
+#define svstnt1_scatter_u64offset_u64(...) __builtin_sve_svstnt1_scatter_u64offset_u64(__VA_ARGS__)
+#define svstnt1_scatter_u64offset_f64(...) __builtin_sve_svstnt1_scatter_u64offset_f64(__VA_ARGS__)
+#define svstnt1_scatter_u64offset_s64(...) __builtin_sve_svstnt1_scatter_u64offset_s64(__VA_ARGS__)
+#define svstnt1b_scatter_u32base_offset_u32(...) __builtin_sve_svstnt1b_scatter_u32base_offset_u32(__VA_ARGS__)
+#define svstnt1b_scatter_u64base_offset_u64(...) __builtin_sve_svstnt1b_scatter_u64base_offset_u64(__VA_ARGS__)
+#define svstnt1b_scatter_u32base_offset_s32(...) __builtin_sve_svstnt1b_scatter_u32base_offset_s32(__VA_ARGS__)
+#define svstnt1b_scatter_u64base_offset_s64(...) __builtin_sve_svstnt1b_scatter_u64base_offset_s64(__VA_ARGS__)
+#define svstnt1b_scatter_u32base_u32(...) __builtin_sve_svstnt1b_scatter_u32base_u32(__VA_ARGS__)
+#define svstnt1b_scatter_u64base_u64(...) __builtin_sve_svstnt1b_scatter_u64base_u64(__VA_ARGS__)
+#define svstnt1b_scatter_u32base_s32(...) __builtin_sve_svstnt1b_scatter_u32base_s32(__VA_ARGS__)
+#define svstnt1b_scatter_u64base_s64(...) __builtin_sve_svstnt1b_scatter_u64base_s64(__VA_ARGS__)
+#define svstnt1b_scatter_u32offset_s32(...) __builtin_sve_svstnt1b_scatter_u32offset_s32(__VA_ARGS__)
+#define svstnt1b_scatter_u32offset_u32(...) __builtin_sve_svstnt1b_scatter_u32offset_u32(__VA_ARGS__)
+#define svstnt1b_scatter_s64offset_s64(...) __builtin_sve_svstnt1b_scatter_s64offset_s64(__VA_ARGS__)
+#define svstnt1b_scatter_s64offset_u64(...) __builtin_sve_svstnt1b_scatter_s64offset_u64(__VA_ARGS__)
+#define svstnt1b_scatter_u64offset_s64(...) __builtin_sve_svstnt1b_scatter_u64offset_s64(__VA_ARGS__)
+#define svstnt1b_scatter_u64offset_u64(...) __builtin_sve_svstnt1b_scatter_u64offset_u64(__VA_ARGS__)
+#define svstnt1h_scatter_u32base_index_u32(...) __builtin_sve_svstnt1h_scatter_u32base_index_u32(__VA_ARGS__)
+#define svstnt1h_scatter_u64base_index_u64(...) __builtin_sve_svstnt1h_scatter_u64base_index_u64(__VA_ARGS__)
+#define svstnt1h_scatter_u32base_index_s32(...) __builtin_sve_svstnt1h_scatter_u32base_index_s32(__VA_ARGS__)
+#define svstnt1h_scatter_u64base_index_s64(...) __builtin_sve_svstnt1h_scatter_u64base_index_s64(__VA_ARGS__)
+#define svstnt1h_scatter_u32base_offset_u32(...) __builtin_sve_svstnt1h_scatter_u32base_offset_u32(__VA_ARGS__)
+#define svstnt1h_scatter_u64base_offset_u64(...) __builtin_sve_svstnt1h_scatter_u64base_offset_u64(__VA_ARGS__)
+#define svstnt1h_scatter_u32base_offset_s32(...) __builtin_sve_svstnt1h_scatter_u32base_offset_s32(__VA_ARGS__)
+#define svstnt1h_scatter_u64base_offset_s64(...) __builtin_sve_svstnt1h_scatter_u64base_offset_s64(__VA_ARGS__)
+#define svstnt1h_scatter_u32base_u32(...) __builtin_sve_svstnt1h_scatter_u32base_u32(__VA_ARGS__)
+#define svstnt1h_scatter_u64base_u64(...) __builtin_sve_svstnt1h_scatter_u64base_u64(__VA_ARGS__)
+#define svstnt1h_scatter_u32base_s32(...) __builtin_sve_svstnt1h_scatter_u32base_s32(__VA_ARGS__)
+#define svstnt1h_scatter_u64base_s64(...) __builtin_sve_svstnt1h_scatter_u64base_s64(__VA_ARGS__)
+#define svstnt1h_scatter_s64index_s64(...) __builtin_sve_svstnt1h_scatter_s64index_s64(__VA_ARGS__)
+#define svstnt1h_scatter_s64index_u64(...) __builtin_sve_svstnt1h_scatter_s64index_u64(__VA_ARGS__)
+#define svstnt1h_scatter_u64index_s64(...) __builtin_sve_svstnt1h_scatter_u64index_s64(__VA_ARGS__)
+#define svstnt1h_scatter_u64index_u64(...) __builtin_sve_svstnt1h_scatter_u64index_u64(__VA_ARGS__)
+#define svstnt1h_scatter_u32offset_s32(...) __builtin_sve_svstnt1h_scatter_u32offset_s32(__VA_ARGS__)
+#define svstnt1h_scatter_u32offset_u32(...) __builtin_sve_svstnt1h_scatter_u32offset_u32(__VA_ARGS__)
+#define svstnt1h_scatter_s64offset_s64(...) __builtin_sve_svstnt1h_scatter_s64offset_s64(__VA_ARGS__)
+#define svstnt1h_scatter_s64offset_u64(...) __builtin_sve_svstnt1h_scatter_s64offset_u64(__VA_ARGS__)
+#define svstnt1h_scatter_u64offset_s64(...) __builtin_sve_svstnt1h_scatter_u64offset_s64(__VA_ARGS__)
+#define svstnt1h_scatter_u64offset_u64(...) __builtin_sve_svstnt1h_scatter_u64offset_u64(__VA_ARGS__)
+#define svstnt1w_scatter_u64base_index_u64(...) __builtin_sve_svstnt1w_scatter_u64base_index_u64(__VA_ARGS__)
+#define svstnt1w_scatter_u64base_index_s64(...) __builtin_sve_svstnt1w_scatter_u64base_index_s64(__VA_ARGS__)
+#define svstnt1w_scatter_u64base_offset_u64(...) __builtin_sve_svstnt1w_scatter_u64base_offset_u64(__VA_ARGS__)
+#define svstnt1w_scatter_u64base_offset_s64(...) __builtin_sve_svstnt1w_scatter_u64base_offset_s64(__VA_ARGS__)
+#define svstnt1w_scatter_u64base_u64(...) __builtin_sve_svstnt1w_scatter_u64base_u64(__VA_ARGS__)
+#define svstnt1w_scatter_u64base_s64(...) __builtin_sve_svstnt1w_scatter_u64base_s64(__VA_ARGS__)
+#define svstnt1w_scatter_s64index_s64(...) __builtin_sve_svstnt1w_scatter_s64index_s64(__VA_ARGS__)
+#define svstnt1w_scatter_s64index_u64(...) __builtin_sve_svstnt1w_scatter_s64index_u64(__VA_ARGS__)
+#define svstnt1w_scatter_u64index_s64(...) __builtin_sve_svstnt1w_scatter_u64index_s64(__VA_ARGS__)
+#define svstnt1w_scatter_u64index_u64(...) __builtin_sve_svstnt1w_scatter_u64index_u64(__VA_ARGS__)
+#define svstnt1w_scatter_s64offset_s64(...) __builtin_sve_svstnt1w_scatter_s64offset_s64(__VA_ARGS__)
+#define svstnt1w_scatter_s64offset_u64(...) __builtin_sve_svstnt1w_scatter_s64offset_u64(__VA_ARGS__)
+#define svstnt1w_scatter_u64offset_s64(...) __builtin_sve_svstnt1w_scatter_u64offset_s64(__VA_ARGS__)
+#define svstnt1w_scatter_u64offset_u64(...) __builtin_sve_svstnt1w_scatter_u64offset_u64(__VA_ARGS__)
+#define svsubhnb_n_u32(...) __builtin_sve_svsubhnb_n_u32(__VA_ARGS__)
+#define svsubhnb_n_u64(...) __builtin_sve_svsubhnb_n_u64(__VA_ARGS__)
+#define svsubhnb_n_u16(...) __builtin_sve_svsubhnb_n_u16(__VA_ARGS__)
+#define svsubhnb_n_s32(...) __builtin_sve_svsubhnb_n_s32(__VA_ARGS__)
+#define svsubhnb_n_s64(...) __builtin_sve_svsubhnb_n_s64(__VA_ARGS__)
+#define svsubhnb_n_s16(...) __builtin_sve_svsubhnb_n_s16(__VA_ARGS__)
+#define svsubhnb_u32(...) __builtin_sve_svsubhnb_u32(__VA_ARGS__)
+#define svsubhnb_u64(...) __builtin_sve_svsubhnb_u64(__VA_ARGS__)
+#define svsubhnb_u16(...) __builtin_sve_svsubhnb_u16(__VA_ARGS__)
+#define svsubhnb_s32(...) __builtin_sve_svsubhnb_s32(__VA_ARGS__)
+#define svsubhnb_s64(...) __builtin_sve_svsubhnb_s64(__VA_ARGS__)
+#define svsubhnb_s16(...) __builtin_sve_svsubhnb_s16(__VA_ARGS__)
+#define svsubhnt_n_u32(...) __builtin_sve_svsubhnt_n_u32(__VA_ARGS__)
+#define svsubhnt_n_u64(...) __builtin_sve_svsubhnt_n_u64(__VA_ARGS__)
+#define svsubhnt_n_u16(...) __builtin_sve_svsubhnt_n_u16(__VA_ARGS__)
+#define svsubhnt_n_s32(...) __builtin_sve_svsubhnt_n_s32(__VA_ARGS__)
+#define svsubhnt_n_s64(...) __builtin_sve_svsubhnt_n_s64(__VA_ARGS__)
+#define svsubhnt_n_s16(...) __builtin_sve_svsubhnt_n_s16(__VA_ARGS__)
+#define svsubhnt_u32(...) __builtin_sve_svsubhnt_u32(__VA_ARGS__)
+#define svsubhnt_u64(...) __builtin_sve_svsubhnt_u64(__VA_ARGS__)
+#define svsubhnt_u16(...) __builtin_sve_svsubhnt_u16(__VA_ARGS__)
+#define svsubhnt_s32(...) __builtin_sve_svsubhnt_s32(__VA_ARGS__)
+#define svsubhnt_s64(...) __builtin_sve_svsubhnt_s64(__VA_ARGS__)
+#define svsubhnt_s16(...) __builtin_sve_svsubhnt_s16(__VA_ARGS__)
+#define svsublb_n_s32(...) __builtin_sve_svsublb_n_s32(__VA_ARGS__)
+#define svsublb_n_s64(...) __builtin_sve_svsublb_n_s64(__VA_ARGS__)
+#define svsublb_n_s16(...) __builtin_sve_svsublb_n_s16(__VA_ARGS__)
+#define svsublb_n_u32(...) __builtin_sve_svsublb_n_u32(__VA_ARGS__)
+#define svsublb_n_u64(...) __builtin_sve_svsublb_n_u64(__VA_ARGS__)
+#define svsublb_n_u16(...) __builtin_sve_svsublb_n_u16(__VA_ARGS__)
+#define svsublb_s32(...) __builtin_sve_svsublb_s32(__VA_ARGS__)
+#define svsublb_s64(...) __builtin_sve_svsublb_s64(__VA_ARGS__)
+#define svsublb_s16(...) __builtin_sve_svsublb_s16(__VA_ARGS__)
+#define svsublb_u32(...) __builtin_sve_svsublb_u32(__VA_ARGS__)
+#define svsublb_u64(...) __builtin_sve_svsublb_u64(__VA_ARGS__)
+#define svsublb_u16(...) __builtin_sve_svsublb_u16(__VA_ARGS__)
+#define svsublbt_n_s32(...) __builtin_sve_svsublbt_n_s32(__VA_ARGS__)
+#define svsublbt_n_s64(...) __builtin_sve_svsublbt_n_s64(__VA_ARGS__)
+#define svsublbt_n_s16(...) __builtin_sve_svsublbt_n_s16(__VA_ARGS__)
+#define svsublbt_s32(...) __builtin_sve_svsublbt_s32(__VA_ARGS__)
+#define svsublbt_s64(...) __builtin_sve_svsublbt_s64(__VA_ARGS__)
+#define svsublbt_s16(...) __builtin_sve_svsublbt_s16(__VA_ARGS__)
+#define svsublt_n_s32(...) __builtin_sve_svsublt_n_s32(__VA_ARGS__)
+#define svsublt_n_s64(...) __builtin_sve_svsublt_n_s64(__VA_ARGS__)
+#define svsublt_n_s16(...) __builtin_sve_svsublt_n_s16(__VA_ARGS__)
+#define svsublt_n_u32(...) __builtin_sve_svsublt_n_u32(__VA_ARGS__)
+#define svsublt_n_u64(...) __builtin_sve_svsublt_n_u64(__VA_ARGS__)
+#define svsublt_n_u16(...) __builtin_sve_svsublt_n_u16(__VA_ARGS__)
+#define svsublt_s32(...) __builtin_sve_svsublt_s32(__VA_ARGS__)
+#define svsublt_s64(...) __builtin_sve_svsublt_s64(__VA_ARGS__)
+#define svsublt_s16(...) __builtin_sve_svsublt_s16(__VA_ARGS__)
+#define svsublt_u32(...) __builtin_sve_svsublt_u32(__VA_ARGS__)
+#define svsublt_u64(...) __builtin_sve_svsublt_u64(__VA_ARGS__)
+#define svsublt_u16(...) __builtin_sve_svsublt_u16(__VA_ARGS__)
+#define svsubltb_n_s32(...) __builtin_sve_svsubltb_n_s32(__VA_ARGS__)
+#define svsubltb_n_s64(...) __builtin_sve_svsubltb_n_s64(__VA_ARGS__)
+#define svsubltb_n_s16(...) __builtin_sve_svsubltb_n_s16(__VA_ARGS__)
+#define svsubltb_s32(...) __builtin_sve_svsubltb_s32(__VA_ARGS__)
+#define svsubltb_s64(...) __builtin_sve_svsubltb_s64(__VA_ARGS__)
+#define svsubltb_s16(...) __builtin_sve_svsubltb_s16(__VA_ARGS__)
+#define svsubwb_n_s32(...) __builtin_sve_svsubwb_n_s32(__VA_ARGS__)
+#define svsubwb_n_s64(...) __builtin_sve_svsubwb_n_s64(__VA_ARGS__)
+#define svsubwb_n_s16(...) __builtin_sve_svsubwb_n_s16(__VA_ARGS__)
+#define svsubwb_n_u32(...) __builtin_sve_svsubwb_n_u32(__VA_ARGS__)
+#define svsubwb_n_u64(...) __builtin_sve_svsubwb_n_u64(__VA_ARGS__)
+#define svsubwb_n_u16(...) __builtin_sve_svsubwb_n_u16(__VA_ARGS__)
+#define svsubwb_s32(...) __builtin_sve_svsubwb_s32(__VA_ARGS__)
+#define svsubwb_s64(...) __builtin_sve_svsubwb_s64(__VA_ARGS__)
+#define svsubwb_s16(...) __builtin_sve_svsubwb_s16(__VA_ARGS__)
+#define svsubwb_u32(...) __builtin_sve_svsubwb_u32(__VA_ARGS__)
+#define svsubwb_u64(...) __builtin_sve_svsubwb_u64(__VA_ARGS__)
+#define svsubwb_u16(...) __builtin_sve_svsubwb_u16(__VA_ARGS__)
+#define svsubwt_n_s32(...) __builtin_sve_svsubwt_n_s32(__VA_ARGS__)
+#define svsubwt_n_s64(...) __builtin_sve_svsubwt_n_s64(__VA_ARGS__)
+#define svsubwt_n_s16(...) __builtin_sve_svsubwt_n_s16(__VA_ARGS__)
+#define svsubwt_n_u32(...) __builtin_sve_svsubwt_n_u32(__VA_ARGS__)
+#define svsubwt_n_u64(...) __builtin_sve_svsubwt_n_u64(__VA_ARGS__)
+#define svsubwt_n_u16(...) __builtin_sve_svsubwt_n_u16(__VA_ARGS__)
+#define svsubwt_s32(...) __builtin_sve_svsubwt_s32(__VA_ARGS__)
+#define svsubwt_s64(...) __builtin_sve_svsubwt_s64(__VA_ARGS__)
+#define svsubwt_s16(...) __builtin_sve_svsubwt_s16(__VA_ARGS__)
+#define svsubwt_u32(...) __builtin_sve_svsubwt_u32(__VA_ARGS__)
+#define svsubwt_u64(...) __builtin_sve_svsubwt_u64(__VA_ARGS__)
+#define svsubwt_u16(...) __builtin_sve_svsubwt_u16(__VA_ARGS__)
+#define svtbl2_u8(...) __builtin_sve_svtbl2_u8(__VA_ARGS__)
+#define svtbl2_u32(...) __builtin_sve_svtbl2_u32(__VA_ARGS__)
+#define svtbl2_u64(...) __builtin_sve_svtbl2_u64(__VA_ARGS__)
+#define svtbl2_u16(...) __builtin_sve_svtbl2_u16(__VA_ARGS__)
+#define svtbl2_s8(...) __builtin_sve_svtbl2_s8(__VA_ARGS__)
+#define svtbl2_f64(...) __builtin_sve_svtbl2_f64(__VA_ARGS__)
+#define svtbl2_f32(...) __builtin_sve_svtbl2_f32(__VA_ARGS__)
+#define svtbl2_f16(...) __builtin_sve_svtbl2_f16(__VA_ARGS__)
+#define svtbl2_s32(...) __builtin_sve_svtbl2_s32(__VA_ARGS__)
+#define svtbl2_s64(...) __builtin_sve_svtbl2_s64(__VA_ARGS__)
+#define svtbl2_s16(...) __builtin_sve_svtbl2_s16(__VA_ARGS__)
+#define svtbx_u8(...) __builtin_sve_svtbx_u8(__VA_ARGS__)
+#define svtbx_u32(...) __builtin_sve_svtbx_u32(__VA_ARGS__)
+#define svtbx_u64(...) __builtin_sve_svtbx_u64(__VA_ARGS__)
+#define svtbx_u16(...) __builtin_sve_svtbx_u16(__VA_ARGS__)
+#define svtbx_s8(...) __builtin_sve_svtbx_s8(__VA_ARGS__)
+#define svtbx_f64(...) __builtin_sve_svtbx_f64(__VA_ARGS__)
+#define svtbx_f32(...) __builtin_sve_svtbx_f32(__VA_ARGS__)
+#define svtbx_f16(...) __builtin_sve_svtbx_f16(__VA_ARGS__)
+#define svtbx_s32(...) __builtin_sve_svtbx_s32(__VA_ARGS__)
+#define svtbx_s64(...) __builtin_sve_svtbx_s64(__VA_ARGS__)
+#define svtbx_s16(...) __builtin_sve_svtbx_s16(__VA_ARGS__)
+#define svuqadd_n_s8_m(...) __builtin_sve_svuqadd_n_s8_m(__VA_ARGS__)
+#define svuqadd_n_s32_m(...) __builtin_sve_svuqadd_n_s32_m(__VA_ARGS__)
+#define svuqadd_n_s64_m(...) __builtin_sve_svuqadd_n_s64_m(__VA_ARGS__)
+#define svuqadd_n_s16_m(...) __builtin_sve_svuqadd_n_s16_m(__VA_ARGS__)
+#define svuqadd_n_s8_x(...) __builtin_sve_svuqadd_n_s8_x(__VA_ARGS__)
+#define svuqadd_n_s32_x(...) __builtin_sve_svuqadd_n_s32_x(__VA_ARGS__)
+#define svuqadd_n_s64_x(...) __builtin_sve_svuqadd_n_s64_x(__VA_ARGS__)
+#define svuqadd_n_s16_x(...) __builtin_sve_svuqadd_n_s16_x(__VA_ARGS__)
+#define svuqadd_n_s8_z(...) __builtin_sve_svuqadd_n_s8_z(__VA_ARGS__)
+#define svuqadd_n_s32_z(...) __builtin_sve_svuqadd_n_s32_z(__VA_ARGS__)
+#define svuqadd_n_s64_z(...) __builtin_sve_svuqadd_n_s64_z(__VA_ARGS__)
+#define svuqadd_n_s16_z(...) __builtin_sve_svuqadd_n_s16_z(__VA_ARGS__)
+#define svuqadd_s8_m(...) __builtin_sve_svuqadd_s8_m(__VA_ARGS__)
+#define svuqadd_s32_m(...) __builtin_sve_svuqadd_s32_m(__VA_ARGS__)
+#define svuqadd_s64_m(...) __builtin_sve_svuqadd_s64_m(__VA_ARGS__)
+#define svuqadd_s16_m(...) __builtin_sve_svuqadd_s16_m(__VA_ARGS__)
+#define svuqadd_s8_x(...) __builtin_sve_svuqadd_s8_x(__VA_ARGS__)
+#define svuqadd_s32_x(...) __builtin_sve_svuqadd_s32_x(__VA_ARGS__)
+#define svuqadd_s64_x(...) __builtin_sve_svuqadd_s64_x(__VA_ARGS__)
+#define svuqadd_s16_x(...) __builtin_sve_svuqadd_s16_x(__VA_ARGS__)
+#define svuqadd_s8_z(...) __builtin_sve_svuqadd_s8_z(__VA_ARGS__)
+#define svuqadd_s32_z(...) __builtin_sve_svuqadd_s32_z(__VA_ARGS__)
+#define svuqadd_s64_z(...) __builtin_sve_svuqadd_s64_z(__VA_ARGS__)
+#define svuqadd_s16_z(...) __builtin_sve_svuqadd_s16_z(__VA_ARGS__)
+#define svwhilege_b8_s32(...) __builtin_sve_svwhilege_b8_s32(__VA_ARGS__)
+#define svwhilege_b32_s32(...) __builtin_sve_svwhilege_b32_s32(__VA_ARGS__)
+#define svwhilege_b64_s32(...) __builtin_sve_svwhilege_b64_s32(__VA_ARGS__)
+#define svwhilege_b16_s32(...) __builtin_sve_svwhilege_b16_s32(__VA_ARGS__)
+#define svwhilege_b8_s64(...) __builtin_sve_svwhilege_b8_s64(__VA_ARGS__)
+#define svwhilege_b32_s64(...) __builtin_sve_svwhilege_b32_s64(__VA_ARGS__)
+#define svwhilege_b64_s64(...) __builtin_sve_svwhilege_b64_s64(__VA_ARGS__)
+#define svwhilege_b16_s64(...) __builtin_sve_svwhilege_b16_s64(__VA_ARGS__)
+#define svwhilege_b8_u32(...) __builtin_sve_svwhilege_b8_u32(__VA_ARGS__)
+#define svwhilege_b32_u32(...) __builtin_sve_svwhilege_b32_u32(__VA_ARGS__)
+#define svwhilege_b64_u32(...) __builtin_sve_svwhilege_b64_u32(__VA_ARGS__)
+#define svwhilege_b16_u32(...) __builtin_sve_svwhilege_b16_u32(__VA_ARGS__)
+#define svwhilege_b8_u64(...) __builtin_sve_svwhilege_b8_u64(__VA_ARGS__)
+#define svwhilege_b32_u64(...) __builtin_sve_svwhilege_b32_u64(__VA_ARGS__)
+#define svwhilege_b64_u64(...) __builtin_sve_svwhilege_b64_u64(__VA_ARGS__)
+#define svwhilege_b16_u64(...) __builtin_sve_svwhilege_b16_u64(__VA_ARGS__)
+#define svwhilegt_b8_s32(...) __builtin_sve_svwhilegt_b8_s32(__VA_ARGS__)
+#define svwhilegt_b32_s32(...) __builtin_sve_svwhilegt_b32_s32(__VA_ARGS__)
+#define svwhilegt_b64_s32(...) __builtin_sve_svwhilegt_b64_s32(__VA_ARGS__)
+#define svwhilegt_b16_s32(...) __builtin_sve_svwhilegt_b16_s32(__VA_ARGS__)
+#define svwhilegt_b8_s64(...) __builtin_sve_svwhilegt_b8_s64(__VA_ARGS__)
+#define svwhilegt_b32_s64(...) __builtin_sve_svwhilegt_b32_s64(__VA_ARGS__)
+#define svwhilegt_b64_s64(...) __builtin_sve_svwhilegt_b64_s64(__VA_ARGS__)
+#define svwhilegt_b16_s64(...) __builtin_sve_svwhilegt_b16_s64(__VA_ARGS__)
+#define svwhilegt_b8_u32(...) __builtin_sve_svwhilegt_b8_u32(__VA_ARGS__)
+#define svwhilegt_b32_u32(...) __builtin_sve_svwhilegt_b32_u32(__VA_ARGS__)
+#define svwhilegt_b64_u32(...) __builtin_sve_svwhilegt_b64_u32(__VA_ARGS__)
+#define svwhilegt_b16_u32(...) __builtin_sve_svwhilegt_b16_u32(__VA_ARGS__)
+#define svwhilegt_b8_u64(...) __builtin_sve_svwhilegt_b8_u64(__VA_ARGS__)
+#define svwhilegt_b32_u64(...) __builtin_sve_svwhilegt_b32_u64(__VA_ARGS__)
+#define svwhilegt_b64_u64(...) __builtin_sve_svwhilegt_b64_u64(__VA_ARGS__)
+#define svwhilegt_b16_u64(...) __builtin_sve_svwhilegt_b16_u64(__VA_ARGS__)
+#define svwhilerw_u8(...) __builtin_sve_svwhilerw_u8(__VA_ARGS__)
+#define svwhilerw_s8(...) __builtin_sve_svwhilerw_s8(__VA_ARGS__)
+#define svwhilerw_u64(...) __builtin_sve_svwhilerw_u64(__VA_ARGS__)
+#define svwhilerw_f64(...) __builtin_sve_svwhilerw_f64(__VA_ARGS__)
+#define svwhilerw_s64(...) __builtin_sve_svwhilerw_s64(__VA_ARGS__)
+#define svwhilerw_u16(...) __builtin_sve_svwhilerw_u16(__VA_ARGS__)
+#define svwhilerw_f16(...) __builtin_sve_svwhilerw_f16(__VA_ARGS__)
+#define svwhilerw_s16(...) __builtin_sve_svwhilerw_s16(__VA_ARGS__)
+#define svwhilerw_u32(...) __builtin_sve_svwhilerw_u32(__VA_ARGS__)
+#define svwhilerw_f32(...) __builtin_sve_svwhilerw_f32(__VA_ARGS__)
+#define svwhilerw_s32(...) __builtin_sve_svwhilerw_s32(__VA_ARGS__)
+#define svwhilewr_u8(...) __builtin_sve_svwhilewr_u8(__VA_ARGS__)
+#define svwhilewr_s8(...) __builtin_sve_svwhilewr_s8(__VA_ARGS__)
+#define svwhilewr_u64(...) __builtin_sve_svwhilewr_u64(__VA_ARGS__)
+#define svwhilewr_f64(...) __builtin_sve_svwhilewr_f64(__VA_ARGS__)
+#define svwhilewr_s64(...) __builtin_sve_svwhilewr_s64(__VA_ARGS__)
+#define svwhilewr_u16(...) __builtin_sve_svwhilewr_u16(__VA_ARGS__)
+#define svwhilewr_f16(...) __builtin_sve_svwhilewr_f16(__VA_ARGS__)
+#define svwhilewr_s16(...) __builtin_sve_svwhilewr_s16(__VA_ARGS__)
+#define svwhilewr_u32(...) __builtin_sve_svwhilewr_u32(__VA_ARGS__)
+#define svwhilewr_f32(...) __builtin_sve_svwhilewr_f32(__VA_ARGS__)
+#define svwhilewr_s32(...) __builtin_sve_svwhilewr_s32(__VA_ARGS__)
+#define svxar_n_u8(...) __builtin_sve_svxar_n_u8(__VA_ARGS__)
+#define svxar_n_u32(...) __builtin_sve_svxar_n_u32(__VA_ARGS__)
+#define svxar_n_u64(...) __builtin_sve_svxar_n_u64(__VA_ARGS__)
+#define svxar_n_u16(...) __builtin_sve_svxar_n_u16(__VA_ARGS__)
+#define svxar_n_s8(...) __builtin_sve_svxar_n_s8(__VA_ARGS__)
+#define svxar_n_s32(...) __builtin_sve_svxar_n_s32(__VA_ARGS__)
+#define svxar_n_s64(...) __builtin_sve_svxar_n_s64(__VA_ARGS__)
+#define svxar_n_s16(...) __builtin_sve_svxar_n_s16(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s8)))
+svint8_t svaba(svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s32)))
+svint32_t svaba(svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s64)))
+svint64_t svaba(svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s16)))
+svint16_t svaba(svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u8)))
+svuint8_t svaba(svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u32)))
+svuint32_t svaba(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u64)))
+svuint64_t svaba(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u16)))
+svuint16_t svaba(svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s8)))
+svint8_t svaba(svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s32)))
+svint32_t svaba(svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s64)))
+svint64_t svaba(svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s16)))
+svint16_t svaba(svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u8)))
+svuint8_t svaba(svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u32)))
+svuint32_t svaba(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u64)))
+svuint64_t svaba(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u16)))
+svuint16_t svaba(svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s32)))
+svint32_t svabalb(svint32_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s64)))
+svint64_t svabalb(svint64_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s16)))
+svint16_t svabalb(svint16_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u32)))
+svuint32_t svabalb(svuint32_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u64)))
+svuint64_t svabalb(svuint64_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u16)))
+svuint16_t svabalb(svuint16_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s32)))
+svint32_t svabalb(svint32_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s64)))
+svint64_t svabalb(svint64_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s16)))
+svint16_t svabalb(svint16_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u32)))
+svuint32_t svabalb(svuint32_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u64)))
+svuint64_t svabalb(svuint64_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u16)))
+svuint16_t svabalb(svuint16_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s32)))
+svint32_t svabalt(svint32_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s64)))
+svint64_t svabalt(svint64_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s16)))
+svint16_t svabalt(svint16_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u32)))
+svuint32_t svabalt(svuint32_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u64)))
+svuint64_t svabalt(svuint64_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u16)))
+svuint16_t svabalt(svuint16_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s32)))
+svint32_t svabalt(svint32_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s64)))
+svint64_t svabalt(svint64_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s16)))
+svint16_t svabalt(svint16_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u32)))
+svuint32_t svabalt(svuint32_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u64)))
+svuint64_t svabalt(svuint64_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u16)))
+svuint16_t svabalt(svuint16_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s32)))
+svint32_t svabdlb(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s64)))
+svint64_t svabdlb(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s16)))
+svint16_t svabdlb(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u32)))
+svuint32_t svabdlb(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u64)))
+svuint64_t svabdlb(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u16)))
+svuint16_t svabdlb(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s32)))
+svint32_t svabdlb(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s64)))
+svint64_t svabdlb(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s16)))
+svint16_t svabdlb(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u32)))
+svuint32_t svabdlb(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u64)))
+svuint64_t svabdlb(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u16)))
+svuint16_t svabdlb(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s32)))
+svint32_t svabdlt(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s64)))
+svint64_t svabdlt(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s16)))
+svint16_t svabdlt(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u32)))
+svuint32_t svabdlt(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u64)))
+svuint64_t svabdlt(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u16)))
+svuint16_t svabdlt(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s32)))
+svint32_t svabdlt(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s64)))
+svint64_t svabdlt(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s16)))
+svint16_t svabdlt(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u32)))
+svuint32_t svabdlt(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u64)))
+svuint64_t svabdlt(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u16)))
+svuint16_t svabdlt(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_m)))
+svint32_t svadalp_m(svbool_t, svint32_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_m)))
+svint64_t svadalp_m(svbool_t, svint64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_m)))
+svint16_t svadalp_m(svbool_t, svint16_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_x)))
+svint32_t svadalp_x(svbool_t, svint32_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_x)))
+svint64_t svadalp_x(svbool_t, svint64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_x)))
+svint16_t svadalp_x(svbool_t, svint16_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_z)))
+svint32_t svadalp_z(svbool_t, svint32_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_z)))
+svint64_t svadalp_z(svbool_t, svint64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_z)))
+svint16_t svadalp_z(svbool_t, svint16_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_m)))
+svuint32_t svadalp_m(svbool_t, svuint32_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_m)))
+svuint64_t svadalp_m(svbool_t, svuint64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_m)))
+svuint16_t svadalp_m(svbool_t, svuint16_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_x)))
+svuint32_t svadalp_x(svbool_t, svuint32_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_x)))
+svuint64_t svadalp_x(svbool_t, svuint64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_x)))
+svuint16_t svadalp_x(svbool_t, svuint16_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_z)))
+svuint32_t svadalp_z(svbool_t, svuint32_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_z)))
+svuint64_t svadalp_z(svbool_t, svuint64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_z)))
+svuint16_t svadalp_z(svbool_t, svuint16_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u32)))
+svuint32_t svadclb(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u64)))
+svuint64_t svadclb(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u32)))
+svuint32_t svadclb(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u64)))
+svuint64_t svadclb(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u32)))
+svuint32_t svadclt(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u64)))
+svuint64_t svadclt(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u32)))
+svuint32_t svadclt(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u64)))
+svuint64_t svadclt(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u32)))
+svuint16_t svaddhnb(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u64)))
+svuint32_t svaddhnb(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u16)))
+svuint8_t svaddhnb(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s32)))
+svint16_t svaddhnb(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s64)))
+svint32_t svaddhnb(svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s16)))
+svint8_t svaddhnb(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u32)))
+svuint16_t svaddhnb(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u64)))
+svuint32_t svaddhnb(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u16)))
+svuint8_t svaddhnb(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s32)))
+svint16_t svaddhnb(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s64)))
+svint32_t svaddhnb(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s16)))
+svint8_t svaddhnb(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u32)))
+svuint16_t svaddhnt(svuint16_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u64)))
+svuint32_t svaddhnt(svuint32_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u16)))
+svuint8_t svaddhnt(svuint8_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s32)))
+svint16_t svaddhnt(svint16_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s64)))
+svint32_t svaddhnt(svint32_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s16)))
+svint8_t svaddhnt(svint8_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u32)))
+svuint16_t svaddhnt(svuint16_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u64)))
+svuint32_t svaddhnt(svuint32_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u16)))
+svuint8_t svaddhnt(svuint8_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s32)))
+svint16_t svaddhnt(svint16_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s64)))
+svint32_t svaddhnt(svint32_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s16)))
+svint8_t svaddhnt(svint8_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s32)))
+svint32_t svaddlb(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s64)))
+svint64_t svaddlb(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s16)))
+svint16_t svaddlb(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u32)))
+svuint32_t svaddlb(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u64)))
+svuint64_t svaddlb(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u16)))
+svuint16_t svaddlb(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s32)))
+svint32_t svaddlb(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s64)))
+svint64_t svaddlb(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s16)))
+svint16_t svaddlb(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u32)))
+svuint32_t svaddlb(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u64)))
+svuint64_t svaddlb(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u16)))
+svuint16_t svaddlb(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s32)))
+svint32_t svaddlbt(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s64)))
+svint64_t svaddlbt(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s16)))
+svint16_t svaddlbt(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s32)))
+svint32_t svaddlbt(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s64)))
+svint64_t svaddlbt(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s16)))
+svint16_t svaddlbt(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s32)))
+svint32_t svaddlt(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s64)))
+svint64_t svaddlt(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s16)))
+svint16_t svaddlt(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u32)))
+svuint32_t svaddlt(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u64)))
+svuint64_t svaddlt(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u16)))
+svuint16_t svaddlt(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s32)))
+svint32_t svaddlt(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s64)))
+svint64_t svaddlt(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s16)))
+svint16_t svaddlt(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u32)))
+svuint32_t svaddlt(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u64)))
+svuint64_t svaddlt(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u16)))
+svuint16_t svaddlt(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_m)))
+svfloat64_t svaddp_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_m)))
+svfloat32_t svaddp_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_m)))
+svfloat16_t svaddp_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_x)))
+svfloat64_t svaddp_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_x)))
+svfloat32_t svaddp_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_x)))
+svfloat16_t svaddp_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_m)))
+svuint8_t svaddp_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_m)))
+svuint32_t svaddp_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_m)))
+svuint64_t svaddp_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_m)))
+svuint16_t svaddp_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_m)))
+svint8_t svaddp_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_m)))
+svint32_t svaddp_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_m)))
+svint64_t svaddp_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_m)))
+svint16_t svaddp_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_x)))
+svuint8_t svaddp_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_x)))
+svuint32_t svaddp_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_x)))
+svuint64_t svaddp_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_x)))
+svuint16_t svaddp_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_x)))
+svint8_t svaddp_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_x)))
+svint32_t svaddp_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_x)))
+svint64_t svaddp_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_x)))
+svint16_t svaddp_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s32)))
+svint32_t svaddwb(svint32_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s64)))
+svint64_t svaddwb(svint64_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s16)))
+svint16_t svaddwb(svint16_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u32)))
+svuint32_t svaddwb(svuint32_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u64)))
+svuint64_t svaddwb(svuint64_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u16)))
+svuint16_t svaddwb(svuint16_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s32)))
+svint32_t svaddwb(svint32_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s64)))
+svint64_t svaddwb(svint64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s16)))
+svint16_t svaddwb(svint16_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u32)))
+svuint32_t svaddwb(svuint32_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u64)))
+svuint64_t svaddwb(svuint64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u16)))
+svuint16_t svaddwb(svuint16_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s32)))
+svint32_t svaddwt(svint32_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s64)))
+svint64_t svaddwt(svint64_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s16)))
+svint16_t svaddwt(svint16_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u32)))
+svuint32_t svaddwt(svuint32_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u64)))
+svuint64_t svaddwt(svuint64_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u16)))
+svuint16_t svaddwt(svuint16_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s32)))
+svint32_t svaddwt(svint32_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s64)))
+svint64_t svaddwt(svint64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s16)))
+svint16_t svaddwt(svint16_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u32)))
+svuint32_t svaddwt(svuint32_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u64)))
+svuint64_t svaddwt(svuint64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u16)))
+svuint16_t svaddwt(svuint16_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u8)))
+svuint8_t svbcax(svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u32)))
+svuint32_t svbcax(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u64)))
+svuint64_t svbcax(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u16)))
+svuint16_t svbcax(svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s8)))
+svint8_t svbcax(svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s32)))
+svint32_t svbcax(svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s64)))
+svint64_t svbcax(svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s16)))
+svint16_t svbcax(svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u8)))
+svuint8_t svbcax(svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u32)))
+svuint32_t svbcax(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u64)))
+svuint64_t svbcax(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u16)))
+svuint16_t svbcax(svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s8)))
+svint8_t svbcax(svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s32)))
+svint32_t svbcax(svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s64)))
+svint64_t svbcax(svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s16)))
+svint16_t svbcax(svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u8)))
+svuint8_t svbsl1n(svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u32)))
+svuint32_t svbsl1n(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u64)))
+svuint64_t svbsl1n(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u16)))
+svuint16_t svbsl1n(svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s8)))
+svint8_t svbsl1n(svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s32)))
+svint32_t svbsl1n(svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s64)))
+svint64_t svbsl1n(svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s16)))
+svint16_t svbsl1n(svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u8)))
+svuint8_t svbsl1n(svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u32)))
+svuint32_t svbsl1n(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u64)))
+svuint64_t svbsl1n(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u16)))
+svuint16_t svbsl1n(svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s8)))
+svint8_t svbsl1n(svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s32)))
+svint32_t svbsl1n(svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s64)))
+svint64_t svbsl1n(svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s16)))
+svint16_t svbsl1n(svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u8)))
+svuint8_t svbsl2n(svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u32)))
+svuint32_t svbsl2n(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u64)))
+svuint64_t svbsl2n(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u16)))
+svuint16_t svbsl2n(svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s8)))
+svint8_t svbsl2n(svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s32)))
+svint32_t svbsl2n(svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s64)))
+svint64_t svbsl2n(svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s16)))
+svint16_t svbsl2n(svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u8)))
+svuint8_t svbsl2n(svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u32)))
+svuint32_t svbsl2n(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u64)))
+svuint64_t svbsl2n(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u16)))
+svuint16_t svbsl2n(svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s8)))
+svint8_t svbsl2n(svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s32)))
+svint32_t svbsl2n(svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s64)))
+svint64_t svbsl2n(svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s16)))
+svint16_t svbsl2n(svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u8)))
+svuint8_t svbsl(svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u32)))
+svuint32_t svbsl(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u64)))
+svuint64_t svbsl(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u16)))
+svuint16_t svbsl(svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s8)))
+svint8_t svbsl(svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s32)))
+svint32_t svbsl(svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s64)))
+svint64_t svbsl(svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s16)))
+svint16_t svbsl(svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u8)))
+svuint8_t svbsl(svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u32)))
+svuint32_t svbsl(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u64)))
+svuint64_t svbsl(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u16)))
+svuint16_t svbsl(svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s8)))
+svint8_t svbsl(svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s32)))
+svint32_t svbsl(svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s64)))
+svint64_t svbsl(svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s16)))
+svint16_t svbsl(svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u8)))
+svuint8_t svcadd(svuint8_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u32)))
+svuint32_t svcadd(svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u64)))
+svuint64_t svcadd(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u16)))
+svuint16_t svcadd(svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s8)))
+svint8_t svcadd(svint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s32)))
+svint32_t svcadd(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s64)))
+svint64_t svcadd(svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s16)))
+svint16_t svcadd(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s32)))
+svint32_t svcdot(svint32_t, svint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s64)))
+svint64_t svcdot(svint64_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s32)))
+svint32_t svcdot_lane(svint32_t, svint8_t, svint8_t, uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s64)))
+svint64_t svcdot_lane(svint64_t, svint16_t, svint16_t, uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u8)))
+svuint8_t svcmla(svuint8_t, svuint8_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u32)))
+svuint32_t svcmla(svuint32_t, svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u64)))
+svuint64_t svcmla(svuint64_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u16)))
+svuint16_t svcmla(svuint16_t, svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s8)))
+svint8_t svcmla(svint8_t, svint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s32)))
+svint32_t svcmla(svint32_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s64)))
+svint64_t svcmla(svint64_t, svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s16)))
+svint16_t svcmla(svint16_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u32)))
+svuint32_t svcmla_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u16)))
+svuint16_t svcmla_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s32)))
+svint32_t svcmla_lane(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s16)))
+svint16_t svcmla_lane(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_m)))
+svfloat32_t svcvtlt_f32_m(svfloat32_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_x)))
+svfloat32_t svcvtlt_f32_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_m)))
+svfloat64_t svcvtlt_f64_m(svfloat64_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_x)))
+svfloat64_t svcvtlt_f64_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f16_f32_m)))
+svfloat16_t svcvtnt_f16_m(svfloat16_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f32_f64_m)))
+svfloat32_t svcvtnt_f32_m(svfloat32_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_m)))
+svfloat32_t svcvtx_f32_m(svfloat32_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_x)))
+svfloat32_t svcvtx_f32_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_z)))
+svfloat32_t svcvtx_f32_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtxnt_f32_f64_m)))
+svfloat32_t svcvtxnt_f32_m(svfloat32_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u8)))
+svuint8_t sveor3(svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u32)))
+svuint32_t sveor3(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u64)))
+svuint64_t sveor3(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u16)))
+svuint16_t sveor3(svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s8)))
+svint8_t sveor3(svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s32)))
+svint32_t sveor3(svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s64)))
+svint64_t sveor3(svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s16)))
+svint16_t sveor3(svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u8)))
+svuint8_t sveor3(svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u32)))
+svuint32_t sveor3(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u64)))
+svuint64_t sveor3(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u16)))
+svuint16_t sveor3(svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s8)))
+svint8_t sveor3(svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s32)))
+svint32_t sveor3(svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s64)))
+svint64_t sveor3(svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s16)))
+svint16_t sveor3(svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u8)))
+svuint8_t sveorbt(svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u32)))
+svuint32_t sveorbt(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u64)))
+svuint64_t sveorbt(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u16)))
+svuint16_t sveorbt(svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s8)))
+svint8_t sveorbt(svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s32)))
+svint32_t sveorbt(svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s64)))
+svint64_t sveorbt(svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s16)))
+svint16_t sveorbt(svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u8)))
+svuint8_t sveorbt(svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u32)))
+svuint32_t sveorbt(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u64)))
+svuint64_t sveorbt(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u16)))
+svuint16_t sveorbt(svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s8)))
+svint8_t sveorbt(svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s32)))
+svint32_t sveorbt(svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s64)))
+svint64_t sveorbt(svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s16)))
+svint16_t sveorbt(svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u8)))
+svuint8_t sveortb(svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u32)))
+svuint32_t sveortb(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u64)))
+svuint64_t sveortb(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u16)))
+svuint16_t sveortb(svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s8)))
+svint8_t sveortb(svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s32)))
+svint32_t sveortb(svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s64)))
+svint64_t sveortb(svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s16)))
+svint16_t sveortb(svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u8)))
+svuint8_t sveortb(svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u32)))
+svuint32_t sveortb(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u64)))
+svuint64_t sveortb(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u16)))
+svuint16_t sveortb(svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s8)))
+svint8_t sveortb(svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s32)))
+svint32_t sveortb(svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s64)))
+svint64_t sveortb(svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s16)))
+svint16_t sveortb(svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_m)))
+svint8_t svhadd_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_m)))
+svint32_t svhadd_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_m)))
+svint64_t svhadd_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_m)))
+svint16_t svhadd_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_x)))
+svint8_t svhadd_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_x)))
+svint32_t svhadd_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_x)))
+svint64_t svhadd_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_x)))
+svint16_t svhadd_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_z)))
+svint8_t svhadd_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_z)))
+svint32_t svhadd_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_z)))
+svint64_t svhadd_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_z)))
+svint16_t svhadd_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_m)))
+svuint8_t svhadd_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_m)))
+svuint32_t svhadd_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_m)))
+svuint64_t svhadd_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_m)))
+svuint16_t svhadd_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_x)))
+svuint8_t svhadd_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_x)))
+svuint32_t svhadd_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_x)))
+svuint64_t svhadd_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_x)))
+svuint16_t svhadd_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_z)))
+svuint8_t svhadd_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_z)))
+svuint32_t svhadd_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_z)))
+svuint64_t svhadd_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_z)))
+svuint16_t svhadd_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_m)))
+svint8_t svhadd_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_m)))
+svint32_t svhadd_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_m)))
+svint64_t svhadd_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_m)))
+svint16_t svhadd_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_x)))
+svint8_t svhadd_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_x)))
+svint32_t svhadd_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_x)))
+svint64_t svhadd_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_x)))
+svint16_t svhadd_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_z)))
+svint8_t svhadd_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_z)))
+svint32_t svhadd_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_z)))
+svint64_t svhadd_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_z)))
+svint16_t svhadd_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_m)))
+svuint8_t svhadd_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_m)))
+svuint32_t svhadd_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_m)))
+svuint64_t svhadd_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_m)))
+svuint16_t svhadd_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_x)))
+svuint8_t svhadd_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_x)))
+svuint32_t svhadd_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_x)))
+svuint64_t svhadd_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_x)))
+svuint16_t svhadd_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_z)))
+svuint8_t svhadd_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_z)))
+svuint32_t svhadd_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_z)))
+svuint64_t svhadd_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_z)))
+svuint16_t svhadd_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u32_z)))
+svuint32_t svhistcnt_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u64_z)))
+svuint64_t svhistcnt_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s32_z)))
+svuint32_t svhistcnt_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s64_z)))
+svuint64_t svhistcnt_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_u8)))
+svuint8_t svhistseg(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_s8)))
+svuint8_t svhistseg(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_m)))
+svint8_t svhsub_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_m)))
+svint32_t svhsub_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_m)))
+svint64_t svhsub_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_m)))
+svint16_t svhsub_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_x)))
+svint8_t svhsub_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_x)))
+svint32_t svhsub_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_x)))
+svint64_t svhsub_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_x)))
+svint16_t svhsub_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_z)))
+svint8_t svhsub_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_z)))
+svint32_t svhsub_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_z)))
+svint64_t svhsub_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_z)))
+svint16_t svhsub_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_m)))
+svuint8_t svhsub_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_m)))
+svuint32_t svhsub_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_m)))
+svuint64_t svhsub_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_m)))
+svuint16_t svhsub_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_x)))
+svuint8_t svhsub_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_x)))
+svuint32_t svhsub_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_x)))
+svuint64_t svhsub_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_x)))
+svuint16_t svhsub_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_z)))
+svuint8_t svhsub_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_z)))
+svuint32_t svhsub_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_z)))
+svuint64_t svhsub_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_z)))
+svuint16_t svhsub_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_m)))
+svint8_t svhsub_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_m)))
+svint32_t svhsub_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_m)))
+svint64_t svhsub_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_m)))
+svint16_t svhsub_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_x)))
+svint8_t svhsub_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_x)))
+svint32_t svhsub_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_x)))
+svint64_t svhsub_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_x)))
+svint16_t svhsub_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_z)))
+svint8_t svhsub_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_z)))
+svint32_t svhsub_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_z)))
+svint64_t svhsub_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_z)))
+svint16_t svhsub_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_m)))
+svuint8_t svhsub_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_m)))
+svuint32_t svhsub_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_m)))
+svuint64_t svhsub_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_m)))
+svuint16_t svhsub_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_x)))
+svuint8_t svhsub_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_x)))
+svuint32_t svhsub_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_x)))
+svuint64_t svhsub_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_x)))
+svuint16_t svhsub_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_z)))
+svuint8_t svhsub_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_z)))
+svuint32_t svhsub_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_z)))
+svuint64_t svhsub_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_z)))
+svuint16_t svhsub_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_m)))
+svint8_t svhsubr_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_m)))
+svint32_t svhsubr_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_m)))
+svint64_t svhsubr_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_m)))
+svint16_t svhsubr_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_x)))
+svint8_t svhsubr_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_x)))
+svint32_t svhsubr_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_x)))
+svint64_t svhsubr_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_x)))
+svint16_t svhsubr_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_z)))
+svint8_t svhsubr_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_z)))
+svint32_t svhsubr_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_z)))
+svint64_t svhsubr_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_z)))
+svint16_t svhsubr_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_m)))
+svuint8_t svhsubr_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_m)))
+svuint32_t svhsubr_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_m)))
+svuint64_t svhsubr_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_m)))
+svuint16_t svhsubr_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_x)))
+svuint8_t svhsubr_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_x)))
+svuint32_t svhsubr_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_x)))
+svuint64_t svhsubr_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_x)))
+svuint16_t svhsubr_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_z)))
+svuint8_t svhsubr_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_z)))
+svuint32_t svhsubr_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_z)))
+svuint64_t svhsubr_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_z)))
+svuint16_t svhsubr_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_m)))
+svint8_t svhsubr_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_m)))
+svint32_t svhsubr_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_m)))
+svint64_t svhsubr_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_m)))
+svint16_t svhsubr_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_x)))
+svint8_t svhsubr_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_x)))
+svint32_t svhsubr_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_x)))
+svint64_t svhsubr_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_x)))
+svint16_t svhsubr_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_z)))
+svint8_t svhsubr_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_z)))
+svint32_t svhsubr_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_z)))
+svint64_t svhsubr_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_z)))
+svint16_t svhsubr_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_m)))
+svuint8_t svhsubr_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_m)))
+svuint32_t svhsubr_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_m)))
+svuint64_t svhsubr_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_m)))
+svuint16_t svhsubr_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_x)))
+svuint8_t svhsubr_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_x)))
+svuint32_t svhsubr_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_x)))
+svuint64_t svhsubr_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_x)))
+svuint16_t svhsubr_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_z)))
+svuint8_t svhsubr_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_z)))
+svuint32_t svhsubr_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_z)))
+svuint64_t svhsubr_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_z)))
+svuint16_t svhsubr_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_u32)))
+svuint32_t svldnt1_gather_index_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_u64)))
+svuint64_t svldnt1_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_f64)))
+svfloat64_t svldnt1_gather_index_f64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_f32)))
+svfloat32_t svldnt1_gather_index_f32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_s32)))
+svint32_t svldnt1_gather_index_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_s64)))
+svint64_t svldnt1_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_u32)))
+svuint32_t svldnt1_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_u64)))
+svuint64_t svldnt1_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_f64)))
+svfloat64_t svldnt1_gather_offset_f64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_f32)))
+svfloat32_t svldnt1_gather_offset_f32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_s32)))
+svint32_t svldnt1_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_s64)))
+svint64_t svldnt1_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_u32)))
+svuint32_t svldnt1_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_u64)))
+svuint64_t svldnt1_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_f64)))
+svfloat64_t svldnt1_gather_f64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_f32)))
+svfloat32_t svldnt1_gather_f32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_s32)))
+svint32_t svldnt1_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_s64)))
+svint64_t svldnt1_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_u64)))
+svuint64_t svldnt1_gather_index(svbool_t, uint64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_f64)))
+svfloat64_t svldnt1_gather_index(svbool_t, float64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_s64)))
+svint64_t svldnt1_gather_index(svbool_t, int64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_u64)))
+svuint64_t svldnt1_gather_index(svbool_t, uint64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_f64)))
+svfloat64_t svldnt1_gather_index(svbool_t, float64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_s64)))
+svint64_t svldnt1_gather_index(svbool_t, int64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_u32)))
+svuint32_t svldnt1_gather_offset(svbool_t, uint32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_f32)))
+svfloat32_t svldnt1_gather_offset(svbool_t, float32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_s32)))
+svint32_t svldnt1_gather_offset(svbool_t, int32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_u64)))
+svuint64_t svldnt1_gather_offset(svbool_t, uint64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_f64)))
+svfloat64_t svldnt1_gather_offset(svbool_t, float64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_s64)))
+svint64_t svldnt1_gather_offset(svbool_t, int64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_u64)))
+svuint64_t svldnt1_gather_offset(svbool_t, uint64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_f64)))
+svfloat64_t svldnt1_gather_offset(svbool_t, float64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_s64)))
+svint64_t svldnt1_gather_offset(svbool_t, int64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_u32)))
+svuint32_t svldnt1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_u64)))
+svuint64_t svldnt1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_s32)))
+svint32_t svldnt1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_s64)))
+svint64_t svldnt1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_u32)))
+svuint32_t svldnt1sb_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_u64)))
+svuint64_t svldnt1sb_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_s32)))
+svint32_t svldnt1sb_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_s64)))
+svint64_t svldnt1sb_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_u32)))
+svuint32_t svldnt1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_s32)))
+svint32_t svldnt1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_u64)))
+svuint64_t svldnt1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_s64)))
+svint64_t svldnt1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_u64)))
+svuint64_t svldnt1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_s64)))
+svint64_t svldnt1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_u32)))
+svuint32_t svldnt1sh_gather_index_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_u64)))
+svuint64_t svldnt1sh_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_s32)))
+svint32_t svldnt1sh_gather_index_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_s64)))
+svint64_t svldnt1sh_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_u32)))
+svuint32_t svldnt1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_u64)))
+svuint64_t svldnt1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_s32)))
+svint32_t svldnt1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_s64)))
+svint64_t svldnt1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_u32)))
+svuint32_t svldnt1sh_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_u64)))
+svuint64_t svldnt1sh_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_s32)))
+svint32_t svldnt1sh_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_s64)))
+svint64_t svldnt1sh_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_u64)))
+svuint64_t svldnt1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_s64)))
+svint64_t svldnt1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_u64)))
+svuint64_t svldnt1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_s64)))
+svint64_t svldnt1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_u32)))
+svuint32_t svldnt1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_s32)))
+svint32_t svldnt1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_u64)))
+svuint64_t svldnt1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_s64)))
+svint64_t svldnt1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_u64)))
+svuint64_t svldnt1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_s64)))
+svint64_t svldnt1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_u64)))
+svuint64_t svldnt1sw_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_s64)))
+svint64_t svldnt1sw_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_u64)))
+svuint64_t svldnt1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_s64)))
+svint64_t svldnt1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_u64)))
+svuint64_t svldnt1sw_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_s64)))
+svint64_t svldnt1sw_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_u64)))
+svuint64_t svldnt1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_s64)))
+svint64_t svldnt1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_u64)))
+svuint64_t svldnt1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_s64)))
+svint64_t svldnt1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_u64)))
+svuint64_t svldnt1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_s64)))
+svint64_t svldnt1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_u64)))
+svuint64_t svldnt1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_s64)))
+svint64_t svldnt1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_u32)))
+svuint32_t svldnt1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_u64)))
+svuint64_t svldnt1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_s32)))
+svint32_t svldnt1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_s64)))
+svint64_t svldnt1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_u32)))
+svuint32_t svldnt1ub_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_u64)))
+svuint64_t svldnt1ub_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_s32)))
+svint32_t svldnt1ub_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_s64)))
+svint64_t svldnt1ub_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_u32)))
+svuint32_t svldnt1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_s32)))
+svint32_t svldnt1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_u64)))
+svuint64_t svldnt1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_s64)))
+svint64_t svldnt1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_u64)))
+svuint64_t svldnt1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_s64)))
+svint64_t svldnt1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_u32)))
+svuint32_t svldnt1uh_gather_index_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_u64)))
+svuint64_t svldnt1uh_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_s32)))
+svint32_t svldnt1uh_gather_index_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_s64)))
+svint64_t svldnt1uh_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_u32)))
+svuint32_t svldnt1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_u64)))
+svuint64_t svldnt1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_s32)))
+svint32_t svldnt1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_s64)))
+svint64_t svldnt1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_u32)))
+svuint32_t svldnt1uh_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_u64)))
+svuint64_t svldnt1uh_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_s32)))
+svint32_t svldnt1uh_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_s64)))
+svint64_t svldnt1uh_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_u64)))
+svuint64_t svldnt1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_s64)))
+svint64_t svldnt1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_u64)))
+svuint64_t svldnt1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_s64)))
+svint64_t svldnt1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_u32)))
+svuint32_t svldnt1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_s32)))
+svint32_t svldnt1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_u64)))
+svuint64_t svldnt1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_s64)))
+svint64_t svldnt1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_u64)))
+svuint64_t svldnt1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_s64)))
+svint64_t svldnt1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_u64)))
+svuint64_t svldnt1uw_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_s64)))
+svint64_t svldnt1uw_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_u64)))
+svuint64_t svldnt1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_s64)))
+svint64_t svldnt1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_u64)))
+svuint64_t svldnt1uw_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_s64)))
+svint64_t svldnt1uw_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_u64)))
+svuint64_t svldnt1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_s64)))
+svint64_t svldnt1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_u64)))
+svuint64_t svldnt1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_s64)))
+svint64_t svldnt1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_u64)))
+svuint64_t svldnt1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_s64)))
+svint64_t svldnt1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_u64)))
+svuint64_t svldnt1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_s64)))
+svint64_t svldnt1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_m)))
+svint64_t svlogb_m(svint64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_m)))
+svint32_t svlogb_m(svint32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_m)))
+svint16_t svlogb_m(svint16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_x)))
+svint64_t svlogb_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_x)))
+svint32_t svlogb_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_x)))
+svint16_t svlogb_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_z)))
+svint64_t svlogb_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_z)))
+svint32_t svlogb_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_z)))
+svint16_t svlogb_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u8)))
+svbool_t svmatch(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u16)))
+svbool_t svmatch(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s8)))
+svbool_t svmatch(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s16)))
+svbool_t svmatch(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_m)))
+svfloat64_t svmaxnmp_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_m)))
+svfloat32_t svmaxnmp_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_m)))
+svfloat16_t svmaxnmp_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_x)))
+svfloat64_t svmaxnmp_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_x)))
+svfloat32_t svmaxnmp_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_x)))
+svfloat16_t svmaxnmp_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_m)))
+svfloat64_t svmaxp_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_m)))
+svfloat32_t svmaxp_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_m)))
+svfloat16_t svmaxp_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_x)))
+svfloat64_t svmaxp_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_x)))
+svfloat32_t svmaxp_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_x)))
+svfloat16_t svmaxp_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_m)))
+svint8_t svmaxp_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_m)))
+svint32_t svmaxp_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_m)))
+svint64_t svmaxp_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_m)))
+svint16_t svmaxp_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_x)))
+svint8_t svmaxp_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_x)))
+svint32_t svmaxp_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_x)))
+svint64_t svmaxp_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_x)))
+svint16_t svmaxp_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_m)))
+svuint8_t svmaxp_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_m)))
+svuint32_t svmaxp_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_m)))
+svuint64_t svmaxp_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_m)))
+svuint16_t svmaxp_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_x)))
+svuint8_t svmaxp_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_x)))
+svuint32_t svmaxp_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_x)))
+svuint64_t svmaxp_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_x)))
+svuint16_t svmaxp_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_m)))
+svfloat64_t svminnmp_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_m)))
+svfloat32_t svminnmp_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_m)))
+svfloat16_t svminnmp_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_x)))
+svfloat64_t svminnmp_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_x)))
+svfloat32_t svminnmp_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_x)))
+svfloat16_t svminnmp_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_m)))
+svfloat64_t svminp_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_m)))
+svfloat32_t svminp_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_m)))
+svfloat16_t svminp_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_x)))
+svfloat64_t svminp_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_x)))
+svfloat32_t svminp_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_x)))
+svfloat16_t svminp_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_m)))
+svint8_t svminp_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_m)))
+svint32_t svminp_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_m)))
+svint64_t svminp_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_m)))
+svint16_t svminp_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_x)))
+svint8_t svminp_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_x)))
+svint32_t svminp_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_x)))
+svint64_t svminp_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_x)))
+svint16_t svminp_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_m)))
+svuint8_t svminp_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_m)))
+svuint32_t svminp_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_m)))
+svuint64_t svminp_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_m)))
+svuint16_t svminp_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_x)))
+svuint8_t svminp_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_x)))
+svuint32_t svminp_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_x)))
+svuint64_t svminp_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_x)))
+svuint16_t svminp_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u32)))
+svuint32_t svmla_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u64)))
+svuint64_t svmla_lane(svuint64_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u16)))
+svuint16_t svmla_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s32)))
+svint32_t svmla_lane(svint32_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s64)))
+svint64_t svmla_lane(svint64_t, svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s16)))
+svint16_t svmla_lane(svint16_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_f32)))
+svfloat32_t svmlalb(svfloat32_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s32)))
+svint32_t svmlalb(svint32_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s64)))
+svint64_t svmlalb(svint64_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s16)))
+svint16_t svmlalb(svint16_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u32)))
+svuint32_t svmlalb(svuint32_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u64)))
+svuint64_t svmlalb(svuint64_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u16)))
+svuint16_t svmlalb(svuint16_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_f32)))
+svfloat32_t svmlalb(svfloat32_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s32)))
+svint32_t svmlalb(svint32_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s64)))
+svint64_t svmlalb(svint64_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s16)))
+svint16_t svmlalb(svint16_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u32)))
+svuint32_t svmlalb(svuint32_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u64)))
+svuint64_t svmlalb(svuint64_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u16)))
+svuint16_t svmlalb(svuint16_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_f32)))
+svfloat32_t svmlalb_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s32)))
+svint32_t svmlalb_lane(svint32_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s64)))
+svint64_t svmlalb_lane(svint64_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u32)))
+svuint32_t svmlalb_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u64)))
+svuint64_t svmlalb_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_f32)))
+svfloat32_t svmlalt(svfloat32_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s32)))
+svint32_t svmlalt(svint32_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s64)))
+svint64_t svmlalt(svint64_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s16)))
+svint16_t svmlalt(svint16_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u32)))
+svuint32_t svmlalt(svuint32_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u64)))
+svuint64_t svmlalt(svuint64_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u16)))
+svuint16_t svmlalt(svuint16_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_f32)))
+svfloat32_t svmlalt(svfloat32_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s32)))
+svint32_t svmlalt(svint32_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s64)))
+svint64_t svmlalt(svint64_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s16)))
+svint16_t svmlalt(svint16_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u32)))
+svuint32_t svmlalt(svuint32_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u64)))
+svuint64_t svmlalt(svuint64_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u16)))
+svuint16_t svmlalt(svuint16_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_f32)))
+svfloat32_t svmlalt_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s32)))
+svint32_t svmlalt_lane(svint32_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s64)))
+svint64_t svmlalt_lane(svint64_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u32)))
+svuint32_t svmlalt_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u64)))
+svuint64_t svmlalt_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u32)))
+svuint32_t svmls_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u64)))
+svuint64_t svmls_lane(svuint64_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u16)))
+svuint16_t svmls_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s32)))
+svint32_t svmls_lane(svint32_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s64)))
+svint64_t svmls_lane(svint64_t, svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s16)))
+svint16_t svmls_lane(svint16_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_f32)))
+svfloat32_t svmlslb(svfloat32_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s32)))
+svint32_t svmlslb(svint32_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s64)))
+svint64_t svmlslb(svint64_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s16)))
+svint16_t svmlslb(svint16_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u32)))
+svuint32_t svmlslb(svuint32_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u64)))
+svuint64_t svmlslb(svuint64_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u16)))
+svuint16_t svmlslb(svuint16_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_f32)))
+svfloat32_t svmlslb(svfloat32_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s32)))
+svint32_t svmlslb(svint32_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s64)))
+svint64_t svmlslb(svint64_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s16)))
+svint16_t svmlslb(svint16_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u32)))
+svuint32_t svmlslb(svuint32_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u64)))
+svuint64_t svmlslb(svuint64_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u16)))
+svuint16_t svmlslb(svuint16_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_f32)))
+svfloat32_t svmlslb_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s32)))
+svint32_t svmlslb_lane(svint32_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s64)))
+svint64_t svmlslb_lane(svint64_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u32)))
+svuint32_t svmlslb_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u64)))
+svuint64_t svmlslb_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_f32)))
+svfloat32_t svmlslt(svfloat32_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s32)))
+svint32_t svmlslt(svint32_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s64)))
+svint64_t svmlslt(svint64_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s16)))
+svint16_t svmlslt(svint16_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u32)))
+svuint32_t svmlslt(svuint32_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u64)))
+svuint64_t svmlslt(svuint64_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u16)))
+svuint16_t svmlslt(svuint16_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_f32)))
+svfloat32_t svmlslt(svfloat32_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s32)))
+svint32_t svmlslt(svint32_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s64)))
+svint64_t svmlslt(svint64_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s16)))
+svint16_t svmlslt(svint16_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u32)))
+svuint32_t svmlslt(svuint32_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u64)))
+svuint64_t svmlslt(svuint64_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u16)))
+svuint16_t svmlslt(svuint16_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_f32)))
+svfloat32_t svmlslt_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s32)))
+svint32_t svmlslt_lane(svint32_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s64)))
+svint64_t svmlslt_lane(svint64_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u32)))
+svuint32_t svmlslt_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u64)))
+svuint64_t svmlslt_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s32)))
+svint32_t svmovlb(svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s64)))
+svint64_t svmovlb(svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s16)))
+svint16_t svmovlb(svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u32)))
+svuint32_t svmovlb(svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u64)))
+svuint64_t svmovlb(svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u16)))
+svuint16_t svmovlb(svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s32)))
+svint32_t svmovlt(svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s64)))
+svint64_t svmovlt(svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s16)))
+svint16_t svmovlt(svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u32)))
+svuint32_t svmovlt(svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u64)))
+svuint64_t svmovlt(svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u16)))
+svuint16_t svmovlt(svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u32)))
+svuint32_t svmul_lane(svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u64)))
+svuint64_t svmul_lane(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u16)))
+svuint16_t svmul_lane(svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s32)))
+svint32_t svmul_lane(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s64)))
+svint64_t svmul_lane(svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s16)))
+svint16_t svmul_lane(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s32)))
+svint32_t svmullb(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s64)))
+svint64_t svmullb(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s16)))
+svint16_t svmullb(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u32)))
+svuint32_t svmullb(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u64)))
+svuint64_t svmullb(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u16)))
+svuint16_t svmullb(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s32)))
+svint32_t svmullb(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s64)))
+svint64_t svmullb(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s16)))
+svint16_t svmullb(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u32)))
+svuint32_t svmullb(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u64)))
+svuint64_t svmullb(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u16)))
+svuint16_t svmullb(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s32)))
+svint32_t svmullb_lane(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s64)))
+svint64_t svmullb_lane(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u32)))
+svuint32_t svmullb_lane(svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u64)))
+svuint64_t svmullb_lane(svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s32)))
+svint32_t svmullt(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s64)))
+svint64_t svmullt(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s16)))
+svint16_t svmullt(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u32)))
+svuint32_t svmullt(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u64)))
+svuint64_t svmullt(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u16)))
+svuint16_t svmullt(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s32)))
+svint32_t svmullt(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s64)))
+svint64_t svmullt(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s16)))
+svint16_t svmullt(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u32)))
+svuint32_t svmullt(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u64)))
+svuint64_t svmullt(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u16)))
+svuint16_t svmullt(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s32)))
+svint32_t svmullt_lane(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s64)))
+svint64_t svmullt_lane(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u32)))
+svuint32_t svmullt_lane(svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u64)))
+svuint64_t svmullt_lane(svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u8)))
+svuint8_t svnbsl(svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u32)))
+svuint32_t svnbsl(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u64)))
+svuint64_t svnbsl(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u16)))
+svuint16_t svnbsl(svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s8)))
+svint8_t svnbsl(svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s32)))
+svint32_t svnbsl(svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s64)))
+svint64_t svnbsl(svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s16)))
+svint16_t svnbsl(svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u8)))
+svuint8_t svnbsl(svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u32)))
+svuint32_t svnbsl(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u64)))
+svuint64_t svnbsl(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u16)))
+svuint16_t svnbsl(svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s8)))
+svint8_t svnbsl(svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s32)))
+svint32_t svnbsl(svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s64)))
+svint64_t svnbsl(svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s16)))
+svint16_t svnbsl(svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u8)))
+svbool_t svnmatch(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u16)))
+svbool_t svnmatch(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s8)))
+svbool_t svnmatch(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s16)))
+svbool_t svnmatch(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_n_u8)))
+svuint8_t svpmul(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_u8)))
+svuint8_t svpmul(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u64)))
+svuint64_t svpmullb(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u16)))
+svuint16_t svpmullb(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u64)))
+svuint64_t svpmullb(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u16)))
+svuint16_t svpmullb(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u8)))
+svuint8_t svpmullb_pair(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u32)))
+svuint32_t svpmullb_pair(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u8)))
+svuint8_t svpmullb_pair(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u32)))
+svuint32_t svpmullb_pair(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u64)))
+svuint64_t svpmullt(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u16)))
+svuint16_t svpmullt(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u64)))
+svuint64_t svpmullt(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u16)))
+svuint16_t svpmullt(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u8)))
+svuint8_t svpmullt_pair(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u32)))
+svuint32_t svpmullt_pair(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u8)))
+svuint8_t svpmullt_pair(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u32)))
+svuint32_t svpmullt_pair(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_m)))
+svint8_t svqabs_m(svint8_t, svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_m)))
+svint32_t svqabs_m(svint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_m)))
+svint64_t svqabs_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_m)))
+svint16_t svqabs_m(svint16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_x)))
+svint8_t svqabs_x(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_x)))
+svint32_t svqabs_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_x)))
+svint64_t svqabs_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_x)))
+svint16_t svqabs_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_z)))
+svint8_t svqabs_z(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_z)))
+svint32_t svqabs_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_z)))
+svint64_t svqabs_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_z)))
+svint16_t svqabs_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_m)))
+svint8_t svqadd_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_m)))
+svint32_t svqadd_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_m)))
+svint64_t svqadd_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_m)))
+svint16_t svqadd_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_x)))
+svint8_t svqadd_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_x)))
+svint32_t svqadd_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_x)))
+svint64_t svqadd_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_x)))
+svint16_t svqadd_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_z)))
+svint8_t svqadd_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_z)))
+svint32_t svqadd_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_z)))
+svint64_t svqadd_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_z)))
+svint16_t svqadd_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_m)))
+svuint8_t svqadd_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_m)))
+svuint32_t svqadd_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_m)))
+svuint64_t svqadd_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_m)))
+svuint16_t svqadd_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_x)))
+svuint8_t svqadd_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_x)))
+svuint32_t svqadd_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_x)))
+svuint64_t svqadd_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_x)))
+svuint16_t svqadd_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_z)))
+svuint8_t svqadd_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_z)))
+svuint32_t svqadd_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_z)))
+svuint64_t svqadd_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_z)))
+svuint16_t svqadd_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_m)))
+svint8_t svqadd_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_m)))
+svint32_t svqadd_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_m)))
+svint64_t svqadd_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_m)))
+svint16_t svqadd_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_x)))
+svint8_t svqadd_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_x)))
+svint32_t svqadd_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_x)))
+svint64_t svqadd_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_x)))
+svint16_t svqadd_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_z)))
+svint8_t svqadd_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_z)))
+svint32_t svqadd_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_z)))
+svint64_t svqadd_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_z)))
+svint16_t svqadd_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_m)))
+svuint8_t svqadd_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_m)))
+svuint32_t svqadd_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_m)))
+svuint64_t svqadd_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_m)))
+svuint16_t svqadd_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_x)))
+svuint8_t svqadd_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_x)))
+svuint32_t svqadd_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_x)))
+svuint64_t svqadd_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_x)))
+svuint16_t svqadd_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_z)))
+svuint8_t svqadd_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_z)))
+svuint32_t svqadd_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_z)))
+svuint64_t svqadd_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_z)))
+svuint16_t svqadd_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s8)))
+svint8_t svqcadd(svint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s32)))
+svint32_t svqcadd(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s64)))
+svint64_t svqcadd(svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s16)))
+svint16_t svqcadd(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s32)))
+svint32_t svqdmlalb(svint32_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s64)))
+svint64_t svqdmlalb(svint64_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s16)))
+svint16_t svqdmlalb(svint16_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s32)))
+svint32_t svqdmlalb(svint32_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s64)))
+svint64_t svqdmlalb(svint64_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s16)))
+svint16_t svqdmlalb(svint16_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s32)))
+svint32_t svqdmlalb_lane(svint32_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s64)))
+svint64_t svqdmlalb_lane(svint64_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s32)))
+svint32_t svqdmlalbt(svint32_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s64)))
+svint64_t svqdmlalbt(svint64_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s16)))
+svint16_t svqdmlalbt(svint16_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s32)))
+svint32_t svqdmlalbt(svint32_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s64)))
+svint64_t svqdmlalbt(svint64_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s16)))
+svint16_t svqdmlalbt(svint16_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s32)))
+svint32_t svqdmlalt(svint32_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s64)))
+svint64_t svqdmlalt(svint64_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s16)))
+svint16_t svqdmlalt(svint16_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s32)))
+svint32_t svqdmlalt(svint32_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s64)))
+svint64_t svqdmlalt(svint64_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s16)))
+svint16_t svqdmlalt(svint16_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s32)))
+svint32_t svqdmlalt_lane(svint32_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s64)))
+svint64_t svqdmlalt_lane(svint64_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s32)))
+svint32_t svqdmlslb(svint32_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s64)))
+svint64_t svqdmlslb(svint64_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s16)))
+svint16_t svqdmlslb(svint16_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s32)))
+svint32_t svqdmlslb(svint32_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s64)))
+svint64_t svqdmlslb(svint64_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s16)))
+svint16_t svqdmlslb(svint16_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s32)))
+svint32_t svqdmlslb_lane(svint32_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s64)))
+svint64_t svqdmlslb_lane(svint64_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s32)))
+svint32_t svqdmlslbt(svint32_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s64)))
+svint64_t svqdmlslbt(svint64_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s16)))
+svint16_t svqdmlslbt(svint16_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s32)))
+svint32_t svqdmlslbt(svint32_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s64)))
+svint64_t svqdmlslbt(svint64_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s16)))
+svint16_t svqdmlslbt(svint16_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s32)))
+svint32_t svqdmlslt(svint32_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s64)))
+svint64_t svqdmlslt(svint64_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s16)))
+svint16_t svqdmlslt(svint16_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s32)))
+svint32_t svqdmlslt(svint32_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s64)))
+svint64_t svqdmlslt(svint64_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s16)))
+svint16_t svqdmlslt(svint16_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s32)))
+svint32_t svqdmlslt_lane(svint32_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s64)))
+svint64_t svqdmlslt_lane(svint64_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s8)))
+svint8_t svqdmulh(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s32)))
+svint32_t svqdmulh(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s64)))
+svint64_t svqdmulh(svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s16)))
+svint16_t svqdmulh(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8)))
+svint8_t svqdmulh(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32)))
+svint32_t svqdmulh(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64)))
+svint64_t svqdmulh(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16)))
+svint16_t svqdmulh(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s32)))
+svint32_t svqdmulh_lane(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s64)))
+svint64_t svqdmulh_lane(svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s16)))
+svint16_t svqdmulh_lane(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s32)))
+svint32_t svqdmullb(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s64)))
+svint64_t svqdmullb(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s16)))
+svint16_t svqdmullb(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s32)))
+svint32_t svqdmullb(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s64)))
+svint64_t svqdmullb(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s16)))
+svint16_t svqdmullb(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s32)))
+svint32_t svqdmullb_lane(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s64)))
+svint64_t svqdmullb_lane(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s32)))
+svint32_t svqdmullt(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s64)))
+svint64_t svqdmullt(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s16)))
+svint16_t svqdmullt(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s32)))
+svint32_t svqdmullt(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s64)))
+svint64_t svqdmullt(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s16)))
+svint16_t svqdmullt(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s32)))
+svint32_t svqdmullt_lane(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s64)))
+svint64_t svqdmullt_lane(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_m)))
+svint8_t svqneg_m(svint8_t, svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_m)))
+svint32_t svqneg_m(svint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_m)))
+svint64_t svqneg_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_m)))
+svint16_t svqneg_m(svint16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_x)))
+svint8_t svqneg_x(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_x)))
+svint32_t svqneg_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_x)))
+svint64_t svqneg_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_x)))
+svint16_t svqneg_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_z)))
+svint8_t svqneg_z(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_z)))
+svint32_t svqneg_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_z)))
+svint64_t svqneg_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_z)))
+svint16_t svqneg_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s8)))
+svint8_t svqrdcmlah(svint8_t, svint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s32)))
+svint32_t svqrdcmlah(svint32_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s64)))
+svint64_t svqrdcmlah(svint64_t, svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s16)))
+svint16_t svqrdcmlah(svint16_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s32)))
+svint32_t svqrdcmlah_lane(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s16)))
+svint16_t svqrdcmlah_lane(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s8)))
+svint8_t svqrdmlah(svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s32)))
+svint32_t svqrdmlah(svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s64)))
+svint64_t svqrdmlah(svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s16)))
+svint16_t svqrdmlah(svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s8)))
+svint8_t svqrdmlah(svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s32)))
+svint32_t svqrdmlah(svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s64)))
+svint64_t svqrdmlah(svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s16)))
+svint16_t svqrdmlah(svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s32)))
+svint32_t svqrdmlah_lane(svint32_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s64)))
+svint64_t svqrdmlah_lane(svint64_t, svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s16)))
+svint16_t svqrdmlah_lane(svint16_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s8)))
+svint8_t svqrdmlsh(svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s32)))
+svint32_t svqrdmlsh(svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s64)))
+svint64_t svqrdmlsh(svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s16)))
+svint16_t svqrdmlsh(svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s8)))
+svint8_t svqrdmlsh(svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s32)))
+svint32_t svqrdmlsh(svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s64)))
+svint64_t svqrdmlsh(svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s16)))
+svint16_t svqrdmlsh(svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s32)))
+svint32_t svqrdmlsh_lane(svint32_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s64)))
+svint64_t svqrdmlsh_lane(svint64_t, svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s16)))
+svint16_t svqrdmlsh_lane(svint16_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s8)))
+svint8_t svqrdmulh(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s32)))
+svint32_t svqrdmulh(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s64)))
+svint64_t svqrdmulh(svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s16)))
+svint16_t svqrdmulh(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s8)))
+svint8_t svqrdmulh(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s32)))
+svint32_t svqrdmulh(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s64)))
+svint64_t svqrdmulh(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s16)))
+svint16_t svqrdmulh(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s32)))
+svint32_t svqrdmulh_lane(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s64)))
+svint64_t svqrdmulh_lane(svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s16)))
+svint16_t svqrdmulh_lane(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_m)))
+svint8_t svqrshl_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_m)))
+svint32_t svqrshl_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_m)))
+svint64_t svqrshl_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_m)))
+svint16_t svqrshl_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_x)))
+svint8_t svqrshl_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_x)))
+svint32_t svqrshl_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_x)))
+svint64_t svqrshl_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_x)))
+svint16_t svqrshl_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_z)))
+svint8_t svqrshl_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_z)))
+svint32_t svqrshl_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_z)))
+svint64_t svqrshl_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_z)))
+svint16_t svqrshl_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_m)))
+svuint8_t svqrshl_m(svbool_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_m)))
+svuint32_t svqrshl_m(svbool_t, svuint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_m)))
+svuint64_t svqrshl_m(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_m)))
+svuint16_t svqrshl_m(svbool_t, svuint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_x)))
+svuint8_t svqrshl_x(svbool_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_x)))
+svuint32_t svqrshl_x(svbool_t, svuint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_x)))
+svuint64_t svqrshl_x(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_x)))
+svuint16_t svqrshl_x(svbool_t, svuint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_z)))
+svuint8_t svqrshl_z(svbool_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_z)))
+svuint32_t svqrshl_z(svbool_t, svuint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_z)))
+svuint64_t svqrshl_z(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_z)))
+svuint16_t svqrshl_z(svbool_t, svuint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_m)))
+svint8_t svqrshl_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_m)))
+svint32_t svqrshl_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_m)))
+svint64_t svqrshl_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_m)))
+svint16_t svqrshl_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_x)))
+svint8_t svqrshl_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_x)))
+svint32_t svqrshl_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_x)))
+svint64_t svqrshl_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_x)))
+svint16_t svqrshl_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_z)))
+svint8_t svqrshl_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_z)))
+svint32_t svqrshl_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_z)))
+svint64_t svqrshl_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_z)))
+svint16_t svqrshl_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_m)))
+svuint8_t svqrshl_m(svbool_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_m)))
+svuint32_t svqrshl_m(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_m)))
+svuint64_t svqrshl_m(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_m)))
+svuint16_t svqrshl_m(svbool_t, svuint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_x)))
+svuint8_t svqrshl_x(svbool_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_x)))
+svuint32_t svqrshl_x(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_x)))
+svuint64_t svqrshl_x(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_x)))
+svuint16_t svqrshl_x(svbool_t, svuint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_z)))
+svuint8_t svqrshl_z(svbool_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_z)))
+svuint32_t svqrshl_z(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_z)))
+svuint64_t svqrshl_z(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_z)))
+svuint16_t svqrshl_z(svbool_t, svuint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s32)))
+svint16_t svqrshrnb(svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s64)))
+svint32_t svqrshrnb(svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s16)))
+svint8_t svqrshrnb(svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u32)))
+svuint16_t svqrshrnb(svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u64)))
+svuint32_t svqrshrnb(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u16)))
+svuint8_t svqrshrnb(svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s32)))
+svint16_t svqrshrnt(svint16_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s64)))
+svint32_t svqrshrnt(svint32_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s16)))
+svint8_t svqrshrnt(svint8_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u32)))
+svuint16_t svqrshrnt(svuint16_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u64)))
+svuint32_t svqrshrnt(svuint32_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u16)))
+svuint8_t svqrshrnt(svuint8_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s32)))
+svuint16_t svqrshrunb(svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s64)))
+svuint32_t svqrshrunb(svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s16)))
+svuint8_t svqrshrunb(svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s32)))
+svuint16_t svqrshrunt(svuint16_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s64)))
+svuint32_t svqrshrunt(svuint32_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s16)))
+svuint8_t svqrshrunt(svuint8_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_m)))
+svint8_t svqshl_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_m)))
+svint32_t svqshl_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_m)))
+svint64_t svqshl_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_m)))
+svint16_t svqshl_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_x)))
+svint8_t svqshl_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_x)))
+svint32_t svqshl_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_x)))
+svint64_t svqshl_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_x)))
+svint16_t svqshl_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_z)))
+svint8_t svqshl_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_z)))
+svint32_t svqshl_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_z)))
+svint64_t svqshl_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_z)))
+svint16_t svqshl_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_m)))
+svuint8_t svqshl_m(svbool_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_m)))
+svuint32_t svqshl_m(svbool_t, svuint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_m)))
+svuint64_t svqshl_m(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_m)))
+svuint16_t svqshl_m(svbool_t, svuint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_x)))
+svuint8_t svqshl_x(svbool_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_x)))
+svuint32_t svqshl_x(svbool_t, svuint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_x)))
+svuint64_t svqshl_x(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_x)))
+svuint16_t svqshl_x(svbool_t, svuint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_z)))
+svuint8_t svqshl_z(svbool_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_z)))
+svuint32_t svqshl_z(svbool_t, svuint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_z)))
+svuint64_t svqshl_z(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_z)))
+svuint16_t svqshl_z(svbool_t, svuint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_m)))
+svint8_t svqshl_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_m)))
+svint32_t svqshl_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_m)))
+svint64_t svqshl_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_m)))
+svint16_t svqshl_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_x)))
+svint8_t svqshl_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_x)))
+svint32_t svqshl_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_x)))
+svint64_t svqshl_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_x)))
+svint16_t svqshl_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_z)))
+svint8_t svqshl_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_z)))
+svint32_t svqshl_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_z)))
+svint64_t svqshl_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_z)))
+svint16_t svqshl_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_m)))
+svuint8_t svqshl_m(svbool_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_m)))
+svuint32_t svqshl_m(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_m)))
+svuint64_t svqshl_m(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_m)))
+svuint16_t svqshl_m(svbool_t, svuint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_x)))
+svuint8_t svqshl_x(svbool_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_x)))
+svuint32_t svqshl_x(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_x)))
+svuint64_t svqshl_x(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_x)))
+svuint16_t svqshl_x(svbool_t, svuint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_z)))
+svuint8_t svqshl_z(svbool_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_z)))
+svuint32_t svqshl_z(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_z)))
+svuint64_t svqshl_z(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_z)))
+svuint16_t svqshl_z(svbool_t, svuint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_m)))
+svuint8_t svqshlu_m(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_m)))
+svuint32_t svqshlu_m(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_m)))
+svuint64_t svqshlu_m(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_m)))
+svuint16_t svqshlu_m(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_x)))
+svuint8_t svqshlu_x(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_x)))
+svuint32_t svqshlu_x(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_x)))
+svuint64_t svqshlu_x(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_x)))
+svuint16_t svqshlu_x(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_z)))
+svuint8_t svqshlu_z(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_z)))
+svuint32_t svqshlu_z(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_z)))
+svuint64_t svqshlu_z(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_z)))
+svuint16_t svqshlu_z(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s32)))
+svint16_t svqshrnb(svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s64)))
+svint32_t svqshrnb(svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s16)))
+svint8_t svqshrnb(svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u32)))
+svuint16_t svqshrnb(svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u64)))
+svuint32_t svqshrnb(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u16)))
+svuint8_t svqshrnb(svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s32)))
+svint16_t svqshrnt(svint16_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s64)))
+svint32_t svqshrnt(svint32_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s16)))
+svint8_t svqshrnt(svint8_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u32)))
+svuint16_t svqshrnt(svuint16_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u64)))
+svuint32_t svqshrnt(svuint32_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u16)))
+svuint8_t svqshrnt(svuint8_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s32)))
+svuint16_t svqshrunb(svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s64)))
+svuint32_t svqshrunb(svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s16)))
+svuint8_t svqshrunb(svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s32)))
+svuint16_t svqshrunt(svuint16_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s64)))
+svuint32_t svqshrunt(svuint32_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s16)))
+svuint8_t svqshrunt(svuint8_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_m)))
+svint8_t svqsub_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_m)))
+svint32_t svqsub_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_m)))
+svint64_t svqsub_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_m)))
+svint16_t svqsub_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_x)))
+svint8_t svqsub_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_x)))
+svint32_t svqsub_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_x)))
+svint64_t svqsub_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_x)))
+svint16_t svqsub_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_z)))
+svint8_t svqsub_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_z)))
+svint32_t svqsub_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_z)))
+svint64_t svqsub_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_z)))
+svint16_t svqsub_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_m)))
+svuint8_t svqsub_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_m)))
+svuint32_t svqsub_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_m)))
+svuint64_t svqsub_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_m)))
+svuint16_t svqsub_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_x)))
+svuint8_t svqsub_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_x)))
+svuint32_t svqsub_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_x)))
+svuint64_t svqsub_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_x)))
+svuint16_t svqsub_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_z)))
+svuint8_t svqsub_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_z)))
+svuint32_t svqsub_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_z)))
+svuint64_t svqsub_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_z)))
+svuint16_t svqsub_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_m)))
+svint8_t svqsub_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_m)))
+svint32_t svqsub_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_m)))
+svint64_t svqsub_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_m)))
+svint16_t svqsub_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_x)))
+svint8_t svqsub_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_x)))
+svint32_t svqsub_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_x)))
+svint64_t svqsub_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_x)))
+svint16_t svqsub_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_z)))
+svint8_t svqsub_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_z)))
+svint32_t svqsub_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_z)))
+svint64_t svqsub_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_z)))
+svint16_t svqsub_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_m)))
+svuint8_t svqsub_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_m)))
+svuint32_t svqsub_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_m)))
+svuint64_t svqsub_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_m)))
+svuint16_t svqsub_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_x)))
+svuint8_t svqsub_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_x)))
+svuint32_t svqsub_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_x)))
+svuint64_t svqsub_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_x)))
+svuint16_t svqsub_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_z)))
+svuint8_t svqsub_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_z)))
+svuint32_t svqsub_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_z)))
+svuint64_t svqsub_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_z)))
+svuint16_t svqsub_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_m)))
+svint8_t svqsubr_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_m)))
+svint32_t svqsubr_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_m)))
+svint64_t svqsubr_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_m)))
+svint16_t svqsubr_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_x)))
+svint8_t svqsubr_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_x)))
+svint32_t svqsubr_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_x)))
+svint64_t svqsubr_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_x)))
+svint16_t svqsubr_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_z)))
+svint8_t svqsubr_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_z)))
+svint32_t svqsubr_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_z)))
+svint64_t svqsubr_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_z)))
+svint16_t svqsubr_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_m)))
+svuint8_t svqsubr_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_m)))
+svuint32_t svqsubr_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_m)))
+svuint64_t svqsubr_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_m)))
+svuint16_t svqsubr_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_x)))
+svuint8_t svqsubr_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_x)))
+svuint32_t svqsubr_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_x)))
+svuint64_t svqsubr_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_x)))
+svuint16_t svqsubr_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_z)))
+svuint8_t svqsubr_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_z)))
+svuint32_t svqsubr_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_z)))
+svuint64_t svqsubr_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_z)))
+svuint16_t svqsubr_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_m)))
+svint8_t svqsubr_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_m)))
+svint32_t svqsubr_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_m)))
+svint64_t svqsubr_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_m)))
+svint16_t svqsubr_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_x)))
+svint8_t svqsubr_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_x)))
+svint32_t svqsubr_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_x)))
+svint64_t svqsubr_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_x)))
+svint16_t svqsubr_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_z)))
+svint8_t svqsubr_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_z)))
+svint32_t svqsubr_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_z)))
+svint64_t svqsubr_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_z)))
+svint16_t svqsubr_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_m)))
+svuint8_t svqsubr_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_m)))
+svuint32_t svqsubr_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_m)))
+svuint64_t svqsubr_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_m)))
+svuint16_t svqsubr_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_x)))
+svuint8_t svqsubr_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_x)))
+svuint32_t svqsubr_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_x)))
+svuint64_t svqsubr_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_x)))
+svuint16_t svqsubr_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_z)))
+svuint8_t svqsubr_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_z)))
+svuint32_t svqsubr_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_z)))
+svuint64_t svqsubr_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_z)))
+svuint16_t svqsubr_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s32)))
+svint16_t svqxtnb(svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s64)))
+svint32_t svqxtnb(svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s16)))
+svint8_t svqxtnb(svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u32)))
+svuint16_t svqxtnb(svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u64)))
+svuint32_t svqxtnb(svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u16)))
+svuint8_t svqxtnb(svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s32)))
+svint16_t svqxtnt(svint16_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s64)))
+svint32_t svqxtnt(svint32_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s16)))
+svint8_t svqxtnt(svint8_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u32)))
+svuint16_t svqxtnt(svuint16_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u64)))
+svuint32_t svqxtnt(svuint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u16)))
+svuint8_t svqxtnt(svuint8_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s32)))
+svuint16_t svqxtunb(svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s64)))
+svuint32_t svqxtunb(svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s16)))
+svuint8_t svqxtunb(svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s32)))
+svuint16_t svqxtunt(svuint16_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s64)))
+svuint32_t svqxtunt(svuint32_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s16)))
+svuint8_t svqxtunt(svuint8_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u32)))
+svuint16_t svraddhnb(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u64)))
+svuint32_t svraddhnb(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u16)))
+svuint8_t svraddhnb(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s32)))
+svint16_t svraddhnb(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s64)))
+svint32_t svraddhnb(svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s16)))
+svint8_t svraddhnb(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u32)))
+svuint16_t svraddhnb(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u64)))
+svuint32_t svraddhnb(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u16)))
+svuint8_t svraddhnb(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s32)))
+svint16_t svraddhnb(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s64)))
+svint32_t svraddhnb(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s16)))
+svint8_t svraddhnb(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u32)))
+svuint16_t svraddhnt(svuint16_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u64)))
+svuint32_t svraddhnt(svuint32_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u16)))
+svuint8_t svraddhnt(svuint8_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s32)))
+svint16_t svraddhnt(svint16_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s64)))
+svint32_t svraddhnt(svint32_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s16)))
+svint8_t svraddhnt(svint8_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u32)))
+svuint16_t svraddhnt(svuint16_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u64)))
+svuint32_t svraddhnt(svuint32_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u16)))
+svuint8_t svraddhnt(svuint8_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s32)))
+svint16_t svraddhnt(svint16_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s64)))
+svint32_t svraddhnt(svint32_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s16)))
+svint8_t svraddhnt(svint8_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_m)))
+svuint32_t svrecpe_m(svuint32_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_x)))
+svuint32_t svrecpe_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_z)))
+svuint32_t svrecpe_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_m)))
+svint8_t svrhadd_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_m)))
+svint32_t svrhadd_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_m)))
+svint64_t svrhadd_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_m)))
+svint16_t svrhadd_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_x)))
+svint8_t svrhadd_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_x)))
+svint32_t svrhadd_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_x)))
+svint64_t svrhadd_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_x)))
+svint16_t svrhadd_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_z)))
+svint8_t svrhadd_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_z)))
+svint32_t svrhadd_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_z)))
+svint64_t svrhadd_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_z)))
+svint16_t svrhadd_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_m)))
+svuint8_t svrhadd_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_m)))
+svuint32_t svrhadd_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_m)))
+svuint64_t svrhadd_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_m)))
+svuint16_t svrhadd_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_x)))
+svuint8_t svrhadd_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_x)))
+svuint32_t svrhadd_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_x)))
+svuint64_t svrhadd_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_x)))
+svuint16_t svrhadd_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_z)))
+svuint8_t svrhadd_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_z)))
+svuint32_t svrhadd_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_z)))
+svuint64_t svrhadd_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_z)))
+svuint16_t svrhadd_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_m)))
+svint8_t svrhadd_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_m)))
+svint32_t svrhadd_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_m)))
+svint64_t svrhadd_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_m)))
+svint16_t svrhadd_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_x)))
+svint8_t svrhadd_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_x)))
+svint32_t svrhadd_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_x)))
+svint64_t svrhadd_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_x)))
+svint16_t svrhadd_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_z)))
+svint8_t svrhadd_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_z)))
+svint32_t svrhadd_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_z)))
+svint64_t svrhadd_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_z)))
+svint16_t svrhadd_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_m)))
+svuint8_t svrhadd_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_m)))
+svuint32_t svrhadd_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_m)))
+svuint64_t svrhadd_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_m)))
+svuint16_t svrhadd_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_x)))
+svuint8_t svrhadd_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_x)))
+svuint32_t svrhadd_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_x)))
+svuint64_t svrhadd_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_x)))
+svuint16_t svrhadd_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_z)))
+svuint8_t svrhadd_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_z)))
+svuint32_t svrhadd_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_z)))
+svuint64_t svrhadd_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_z)))
+svuint16_t svrhadd_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_m)))
+svint8_t svrshl_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_m)))
+svint32_t svrshl_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_m)))
+svint64_t svrshl_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_m)))
+svint16_t svrshl_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_x)))
+svint8_t svrshl_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_x)))
+svint32_t svrshl_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_x)))
+svint64_t svrshl_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_x)))
+svint16_t svrshl_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_z)))
+svint8_t svrshl_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_z)))
+svint32_t svrshl_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_z)))
+svint64_t svrshl_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_z)))
+svint16_t svrshl_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_m)))
+svuint8_t svrshl_m(svbool_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_m)))
+svuint32_t svrshl_m(svbool_t, svuint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_m)))
+svuint64_t svrshl_m(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_m)))
+svuint16_t svrshl_m(svbool_t, svuint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_x)))
+svuint8_t svrshl_x(svbool_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_x)))
+svuint32_t svrshl_x(svbool_t, svuint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_x)))
+svuint64_t svrshl_x(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_x)))
+svuint16_t svrshl_x(svbool_t, svuint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_z)))
+svuint8_t svrshl_z(svbool_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_z)))
+svuint32_t svrshl_z(svbool_t, svuint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_z)))
+svuint64_t svrshl_z(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_z)))
+svuint16_t svrshl_z(svbool_t, svuint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_m)))
+svint8_t svrshl_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_m)))
+svint32_t svrshl_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_m)))
+svint64_t svrshl_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_m)))
+svint16_t svrshl_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x)))
+svint8_t svrshl_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x)))
+svint32_t svrshl_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x)))
+svint64_t svrshl_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x)))
+svint16_t svrshl_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_z)))
+svint8_t svrshl_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_z)))
+svint32_t svrshl_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_z)))
+svint64_t svrshl_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_z)))
+svint16_t svrshl_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_m)))
+svuint8_t svrshl_m(svbool_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_m)))
+svuint32_t svrshl_m(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_m)))
+svuint64_t svrshl_m(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_m)))
+svuint16_t svrshl_m(svbool_t, svuint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x)))
+svuint8_t svrshl_x(svbool_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x)))
+svuint32_t svrshl_x(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x)))
+svuint64_t svrshl_x(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x)))
+svuint16_t svrshl_x(svbool_t, svuint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_z)))
+svuint8_t svrshl_z(svbool_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_z)))
+svuint32_t svrshl_z(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_z)))
+svuint64_t svrshl_z(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_z)))
+svuint16_t svrshl_z(svbool_t, svuint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_m)))
+svint8_t svrshr_m(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_m)))
+svint32_t svrshr_m(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_m)))
+svint64_t svrshr_m(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_m)))
+svint16_t svrshr_m(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_m)))
+svuint8_t svrshr_m(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_m)))
+svuint32_t svrshr_m(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_m)))
+svuint64_t svrshr_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_m)))
+svuint16_t svrshr_m(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_x)))
+svint8_t svrshr_x(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_x)))
+svint32_t svrshr_x(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_x)))
+svint64_t svrshr_x(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_x)))
+svint16_t svrshr_x(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_x)))
+svuint8_t svrshr_x(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_x)))
+svuint32_t svrshr_x(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_x)))
+svuint64_t svrshr_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_x)))
+svuint16_t svrshr_x(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_z)))
+svint8_t svrshr_z(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_z)))
+svint32_t svrshr_z(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_z)))
+svint64_t svrshr_z(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_z)))
+svint16_t svrshr_z(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_z)))
+svuint8_t svrshr_z(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_z)))
+svuint32_t svrshr_z(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_z)))
+svuint64_t svrshr_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_z)))
+svuint16_t svrshr_z(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u32)))
+svuint16_t svrshrnb(svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u64)))
+svuint32_t svrshrnb(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u16)))
+svuint8_t svrshrnb(svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s32)))
+svint16_t svrshrnb(svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s64)))
+svint32_t svrshrnb(svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s16)))
+svint8_t svrshrnb(svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u32)))
+svuint16_t svrshrnt(svuint16_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u64)))
+svuint32_t svrshrnt(svuint32_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u16)))
+svuint8_t svrshrnt(svuint8_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s32)))
+svint16_t svrshrnt(svint16_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s64)))
+svint32_t svrshrnt(svint32_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s16)))
+svint8_t svrshrnt(svint8_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_m)))
+svuint32_t svrsqrte_m(svuint32_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_x)))
+svuint32_t svrsqrte_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_z)))
+svuint32_t svrsqrte_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s8)))
+svint8_t svrsra(svint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s32)))
+svint32_t svrsra(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s64)))
+svint64_t svrsra(svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s16)))
+svint16_t svrsra(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u8)))
+svuint8_t svrsra(svuint8_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u32)))
+svuint32_t svrsra(svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u64)))
+svuint64_t svrsra(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u16)))
+svuint16_t svrsra(svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u32)))
+svuint16_t svrsubhnb(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u64)))
+svuint32_t svrsubhnb(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u16)))
+svuint8_t svrsubhnb(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s32)))
+svint16_t svrsubhnb(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s64)))
+svint32_t svrsubhnb(svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s16)))
+svint8_t svrsubhnb(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u32)))
+svuint16_t svrsubhnb(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u64)))
+svuint32_t svrsubhnb(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u16)))
+svuint8_t svrsubhnb(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s32)))
+svint16_t svrsubhnb(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s64)))
+svint32_t svrsubhnb(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s16)))
+svint8_t svrsubhnb(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u32)))
+svuint16_t svrsubhnt(svuint16_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u64)))
+svuint32_t svrsubhnt(svuint32_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u16)))
+svuint8_t svrsubhnt(svuint8_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s32)))
+svint16_t svrsubhnt(svint16_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s64)))
+svint32_t svrsubhnt(svint32_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s16)))
+svint8_t svrsubhnt(svint8_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u32)))
+svuint16_t svrsubhnt(svuint16_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u64)))
+svuint32_t svrsubhnt(svuint32_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u16)))
+svuint8_t svrsubhnt(svuint8_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s32)))
+svint16_t svrsubhnt(svint16_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s64)))
+svint32_t svrsubhnt(svint32_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s16)))
+svint8_t svrsubhnt(svint8_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u32)))
+svuint32_t svsbclb(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u64)))
+svuint64_t svsbclb(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u32)))
+svuint32_t svsbclb(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u64)))
+svuint64_t svsbclb(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u32)))
+svuint32_t svsbclt(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u64)))
+svuint64_t svsbclt(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u32)))
+svuint32_t svsbclt(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u64)))
+svuint64_t svsbclt(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s32)))
+svint32_t svshllb(svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s64)))
+svint64_t svshllb(svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s16)))
+svint16_t svshllb(svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u32)))
+svuint32_t svshllb(svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u64)))
+svuint64_t svshllb(svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u16)))
+svuint16_t svshllb(svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s32)))
+svint32_t svshllt(svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s64)))
+svint64_t svshllt(svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s16)))
+svint16_t svshllt(svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u32)))
+svuint32_t svshllt(svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u64)))
+svuint64_t svshllt(svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u16)))
+svuint16_t svshllt(svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u32)))
+svuint16_t svshrnb(svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u64)))
+svuint32_t svshrnb(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u16)))
+svuint8_t svshrnb(svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s32)))
+svint16_t svshrnb(svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s64)))
+svint32_t svshrnb(svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s16)))
+svint8_t svshrnb(svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u32)))
+svuint16_t svshrnt(svuint16_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u64)))
+svuint32_t svshrnt(svuint32_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u16)))
+svuint8_t svshrnt(svuint8_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s32)))
+svint16_t svshrnt(svint16_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s64)))
+svint32_t svshrnt(svint32_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s16)))
+svint8_t svshrnt(svint8_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u8)))
+svuint8_t svsli(svuint8_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u32)))
+svuint32_t svsli(svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u64)))
+svuint64_t svsli(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u16)))
+svuint16_t svsli(svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s8)))
+svint8_t svsli(svint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s32)))
+svint32_t svsli(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s64)))
+svint64_t svsli(svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s16)))
+svint16_t svsli(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_m)))
+svuint8_t svsqadd_m(svbool_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_m)))
+svuint32_t svsqadd_m(svbool_t, svuint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_m)))
+svuint64_t svsqadd_m(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_m)))
+svuint16_t svsqadd_m(svbool_t, svuint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_x)))
+svuint8_t svsqadd_x(svbool_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_x)))
+svuint32_t svsqadd_x(svbool_t, svuint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_x)))
+svuint64_t svsqadd_x(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_x)))
+svuint16_t svsqadd_x(svbool_t, svuint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_z)))
+svuint8_t svsqadd_z(svbool_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_z)))
+svuint32_t svsqadd_z(svbool_t, svuint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_z)))
+svuint64_t svsqadd_z(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_z)))
+svuint16_t svsqadd_z(svbool_t, svuint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_m)))
+svuint8_t svsqadd_m(svbool_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_m)))
+svuint32_t svsqadd_m(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_m)))
+svuint64_t svsqadd_m(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_m)))
+svuint16_t svsqadd_m(svbool_t, svuint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_x)))
+svuint8_t svsqadd_x(svbool_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_x)))
+svuint32_t svsqadd_x(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_x)))
+svuint64_t svsqadd_x(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_x)))
+svuint16_t svsqadd_x(svbool_t, svuint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_z)))
+svuint8_t svsqadd_z(svbool_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_z)))
+svuint32_t svsqadd_z(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_z)))
+svuint64_t svsqadd_z(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_z)))
+svuint16_t svsqadd_z(svbool_t, svuint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s8)))
+svint8_t svsra(svint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s32)))
+svint32_t svsra(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s64)))
+svint64_t svsra(svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s16)))
+svint16_t svsra(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u8)))
+svuint8_t svsra(svuint8_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u32)))
+svuint32_t svsra(svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u64)))
+svuint64_t svsra(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u16)))
+svuint16_t svsra(svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u8)))
+svuint8_t svsri(svuint8_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u32)))
+svuint32_t svsri(svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u64)))
+svuint64_t svsri(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u16)))
+svuint16_t svsri(svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s8)))
+svint8_t svsri(svint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s32)))
+svint32_t svsri(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s64)))
+svint64_t svsri(svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s16)))
+svint16_t svsri(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_u32)))
+void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_u64)))
+void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_f64)))
+void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_f32)))
+void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_s32)))
+void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_s64)))
+void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_u32)))
+void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_u64)))
+void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_f64)))
+void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_f32)))
+void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_s32)))
+void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_s64)))
+void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_u32)))
+void svstnt1_scatter(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_u64)))
+void svstnt1_scatter(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_f64)))
+void svstnt1_scatter(svbool_t, svuint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_f32)))
+void svstnt1_scatter(svbool_t, svuint32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_s32)))
+void svstnt1_scatter(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_s64)))
+void svstnt1_scatter(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_u64)))
+void svstnt1_scatter_index(svbool_t, uint64_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_f64)))
+void svstnt1_scatter_index(svbool_t, float64_t *, svint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_s64)))
+void svstnt1_scatter_index(svbool_t, int64_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_u64)))
+void svstnt1_scatter_index(svbool_t, uint64_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_f64)))
+void svstnt1_scatter_index(svbool_t, float64_t *, svuint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_s64)))
+void svstnt1_scatter_index(svbool_t, int64_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_u32)))
+void svstnt1_scatter_offset(svbool_t, uint32_t *, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_f32)))
+void svstnt1_scatter_offset(svbool_t, float32_t *, svuint32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_s32)))
+void svstnt1_scatter_offset(svbool_t, int32_t *, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_u64)))
+void svstnt1_scatter_offset(svbool_t, uint64_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_f64)))
+void svstnt1_scatter_offset(svbool_t, float64_t *, svint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_s64)))
+void svstnt1_scatter_offset(svbool_t, int64_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_u64)))
+void svstnt1_scatter_offset(svbool_t, uint64_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_f64)))
+void svstnt1_scatter_offset(svbool_t, float64_t *, svuint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_s64)))
+void svstnt1_scatter_offset(svbool_t, int64_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_u32)))
+void svstnt1b_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_u64)))
+void svstnt1b_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_s32)))
+void svstnt1b_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_s64)))
+void svstnt1b_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_u32)))
+void svstnt1b_scatter(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_u64)))
+void svstnt1b_scatter(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_s32)))
+void svstnt1b_scatter(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_s64)))
+void svstnt1b_scatter(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_s32)))
+void svstnt1b_scatter_offset(svbool_t, int8_t *, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_u32)))
+void svstnt1b_scatter_offset(svbool_t, uint8_t *, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_s64)))
+void svstnt1b_scatter_offset(svbool_t, int8_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_u64)))
+void svstnt1b_scatter_offset(svbool_t, uint8_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_s64)))
+void svstnt1b_scatter_offset(svbool_t, int8_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_u64)))
+void svstnt1b_scatter_offset(svbool_t, uint8_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_u32)))
+void svstnt1h_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_u64)))
+void svstnt1h_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_s32)))
+void svstnt1h_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_s64)))
+void svstnt1h_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_u32)))
+void svstnt1h_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_u64)))
+void svstnt1h_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_s32)))
+void svstnt1h_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_s64)))
+void svstnt1h_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_u32)))
+void svstnt1h_scatter(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_u64)))
+void svstnt1h_scatter(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_s32)))
+void svstnt1h_scatter(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_s64)))
+void svstnt1h_scatter(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_s64)))
+void svstnt1h_scatter_index(svbool_t, int16_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_u64)))
+void svstnt1h_scatter_index(svbool_t, uint16_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_s64)))
+void svstnt1h_scatter_index(svbool_t, int16_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_u64)))
+void svstnt1h_scatter_index(svbool_t, uint16_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_s32)))
+void svstnt1h_scatter_offset(svbool_t, int16_t *, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_u32)))
+void svstnt1h_scatter_offset(svbool_t, uint16_t *, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_s64)))
+void svstnt1h_scatter_offset(svbool_t, int16_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_u64)))
+void svstnt1h_scatter_offset(svbool_t, uint16_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_s64)))
+void svstnt1h_scatter_offset(svbool_t, int16_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_u64)))
+void svstnt1h_scatter_offset(svbool_t, uint16_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_u64)))
+void svstnt1w_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_s64)))
+void svstnt1w_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_u64)))
+void svstnt1w_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_s64)))
+void svstnt1w_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_u64)))
+void svstnt1w_scatter(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_s64)))
+void svstnt1w_scatter(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_s64)))
+void svstnt1w_scatter_index(svbool_t, int32_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_u64)))
+void svstnt1w_scatter_index(svbool_t, uint32_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_s64)))
+void svstnt1w_scatter_index(svbool_t, int32_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_u64)))
+void svstnt1w_scatter_index(svbool_t, uint32_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_s64)))
+void svstnt1w_scatter_offset(svbool_t, int32_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_u64)))
+void svstnt1w_scatter_offset(svbool_t, uint32_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_s64)))
+void svstnt1w_scatter_offset(svbool_t, int32_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_u64)))
+void svstnt1w_scatter_offset(svbool_t, uint32_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u32)))
+svuint16_t svsubhnb(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u64)))
+svuint32_t svsubhnb(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u16)))
+svuint8_t svsubhnb(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s32)))
+svint16_t svsubhnb(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s64)))
+svint32_t svsubhnb(svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s16)))
+svint8_t svsubhnb(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u32)))
+svuint16_t svsubhnb(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u64)))
+svuint32_t svsubhnb(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u16)))
+svuint8_t svsubhnb(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s32)))
+svint16_t svsubhnb(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s64)))
+svint32_t svsubhnb(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s16)))
+svint8_t svsubhnb(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u32)))
+svuint16_t svsubhnt(svuint16_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u64)))
+svuint32_t svsubhnt(svuint32_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u16)))
+svuint8_t svsubhnt(svuint8_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s32)))
+svint16_t svsubhnt(svint16_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s64)))
+svint32_t svsubhnt(svint32_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s16)))
+svint8_t svsubhnt(svint8_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u32)))
+svuint16_t svsubhnt(svuint16_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u64)))
+svuint32_t svsubhnt(svuint32_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u16)))
+svuint8_t svsubhnt(svuint8_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s32)))
+svint16_t svsubhnt(svint16_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s64)))
+svint32_t svsubhnt(svint32_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s16)))
+svint8_t svsubhnt(svint8_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s32)))
+svint32_t svsublb(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s64)))
+svint64_t svsublb(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s16)))
+svint16_t svsublb(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u32)))
+svuint32_t svsublb(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u64)))
+svuint64_t svsublb(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u16)))
+svuint16_t svsublb(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s32)))
+svint32_t svsublb(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s64)))
+svint64_t svsublb(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s16)))
+svint16_t svsublb(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u32)))
+svuint32_t svsublb(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u64)))
+svuint64_t svsublb(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u16)))
+svuint16_t svsublb(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s32)))
+svint32_t svsublbt(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s64)))
+svint64_t svsublbt(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s16)))
+svint16_t svsublbt(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s32)))
+svint32_t svsublbt(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s64)))
+svint64_t svsublbt(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s16)))
+svint16_t svsublbt(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s32)))
+svint32_t svsublt(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s64)))
+svint64_t svsublt(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s16)))
+svint16_t svsublt(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u32)))
+svuint32_t svsublt(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u64)))
+svuint64_t svsublt(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u16)))
+svuint16_t svsublt(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s32)))
+svint32_t svsublt(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s64)))
+svint64_t svsublt(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s16)))
+svint16_t svsublt(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u32)))
+svuint32_t svsublt(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u64)))
+svuint64_t svsublt(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u16)))
+svuint16_t svsublt(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s32)))
+svint32_t svsubltb(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s64)))
+svint64_t svsubltb(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s16)))
+svint16_t svsubltb(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s32)))
+svint32_t svsubltb(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s64)))
+svint64_t svsubltb(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s16)))
+svint16_t svsubltb(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s32)))
+svint32_t svsubwb(svint32_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s64)))
+svint64_t svsubwb(svint64_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s16)))
+svint16_t svsubwb(svint16_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u32)))
+svuint32_t svsubwb(svuint32_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u64)))
+svuint64_t svsubwb(svuint64_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u16)))
+svuint16_t svsubwb(svuint16_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s32)))
+svint32_t svsubwb(svint32_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s64)))
+svint64_t svsubwb(svint64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s16)))
+svint16_t svsubwb(svint16_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u32)))
+svuint32_t svsubwb(svuint32_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u64)))
+svuint64_t svsubwb(svuint64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u16)))
+svuint16_t svsubwb(svuint16_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s32)))
+svint32_t svsubwt(svint32_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s64)))
+svint64_t svsubwt(svint64_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s16)))
+svint16_t svsubwt(svint16_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u32)))
+svuint32_t svsubwt(svuint32_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u64)))
+svuint64_t svsubwt(svuint64_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u16)))
+svuint16_t svsubwt(svuint16_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s32)))
+svint32_t svsubwt(svint32_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s64)))
+svint64_t svsubwt(svint64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s16)))
+svint16_t svsubwt(svint16_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u32)))
+svuint32_t svsubwt(svuint32_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u64)))
+svuint64_t svsubwt(svuint64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u16)))
+svuint16_t svsubwt(svuint16_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u8)))
+svuint8_t svtbl2(svuint8x2_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u32)))
+svuint32_t svtbl2(svuint32x2_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u64)))
+svuint64_t svtbl2(svuint64x2_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u16)))
+svuint16_t svtbl2(svuint16x2_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s8)))
+svint8_t svtbl2(svint8x2_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f64)))
+svfloat64_t svtbl2(svfloat64x2_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f32)))
+svfloat32_t svtbl2(svfloat32x2_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f16)))
+svfloat16_t svtbl2(svfloat16x2_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s32)))
+svint32_t svtbl2(svint32x2_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s64)))
+svint64_t svtbl2(svint64x2_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s16)))
+svint16_t svtbl2(svint16x2_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u8)))
+svuint8_t svtbx(svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u32)))
+svuint32_t svtbx(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u64)))
+svuint64_t svtbx(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u16)))
+svuint16_t svtbx(svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s8)))
+svint8_t svtbx(svint8_t, svint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f64)))
+svfloat64_t svtbx(svfloat64_t, svfloat64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f32)))
+svfloat32_t svtbx(svfloat32_t, svfloat32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f16)))
+svfloat16_t svtbx(svfloat16_t, svfloat16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s32)))
+svint32_t svtbx(svint32_t, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s64)))
+svint64_t svtbx(svint64_t, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s16)))
+svint16_t svtbx(svint16_t, svint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_m)))
+svint8_t svuqadd_m(svbool_t, svint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_m)))
+svint32_t svuqadd_m(svbool_t, svint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_m)))
+svint64_t svuqadd_m(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_m)))
+svint16_t svuqadd_m(svbool_t, svint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_x)))
+svint8_t svuqadd_x(svbool_t, svint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_x)))
+svint32_t svuqadd_x(svbool_t, svint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_x)))
+svint64_t svuqadd_x(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_x)))
+svint16_t svuqadd_x(svbool_t, svint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_z)))
+svint8_t svuqadd_z(svbool_t, svint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_z)))
+svint32_t svuqadd_z(svbool_t, svint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_z)))
+svint64_t svuqadd_z(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_z)))
+svint16_t svuqadd_z(svbool_t, svint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_m)))
+svint8_t svuqadd_m(svbool_t, svint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_m)))
+svint32_t svuqadd_m(svbool_t, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_m)))
+svint64_t svuqadd_m(svbool_t, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_m)))
+svint16_t svuqadd_m(svbool_t, svint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_x)))
+svint8_t svuqadd_x(svbool_t, svint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_x)))
+svint32_t svuqadd_x(svbool_t, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_x)))
+svint64_t svuqadd_x(svbool_t, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_x)))
+svint16_t svuqadd_x(svbool_t, svint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_z)))
+svint8_t svuqadd_z(svbool_t, svint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_z)))
+svint32_t svuqadd_z(svbool_t, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_z)))
+svint64_t svuqadd_z(svbool_t, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_z)))
+svint16_t svuqadd_z(svbool_t, svint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s32)))
+svbool_t svwhilege_b8(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s32)))
+svbool_t svwhilege_b32(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s32)))
+svbool_t svwhilege_b64(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s32)))
+svbool_t svwhilege_b16(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s64)))
+svbool_t svwhilege_b8(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s64)))
+svbool_t svwhilege_b32(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s64)))
+svbool_t svwhilege_b64(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s64)))
+svbool_t svwhilege_b16(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u32)))
+svbool_t svwhilege_b8(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u32)))
+svbool_t svwhilege_b32(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u32)))
+svbool_t svwhilege_b64(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u32)))
+svbool_t svwhilege_b16(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u64)))
+svbool_t svwhilege_b8(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u64)))
+svbool_t svwhilege_b32(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u64)))
+svbool_t svwhilege_b64(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u64)))
+svbool_t svwhilege_b16(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s32)))
+svbool_t svwhilegt_b8(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s32)))
+svbool_t svwhilegt_b32(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s32)))
+svbool_t svwhilegt_b64(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s32)))
+svbool_t svwhilegt_b16(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s64)))
+svbool_t svwhilegt_b8(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s64)))
+svbool_t svwhilegt_b32(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s64)))
+svbool_t svwhilegt_b64(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s64)))
+svbool_t svwhilegt_b16(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u32)))
+svbool_t svwhilegt_b8(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u32)))
+svbool_t svwhilegt_b32(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u32)))
+svbool_t svwhilegt_b64(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u32)))
+svbool_t svwhilegt_b16(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u64)))
+svbool_t svwhilegt_b8(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u64)))
+svbool_t svwhilegt_b32(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u64)))
+svbool_t svwhilegt_b64(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u64)))
+svbool_t svwhilegt_b16(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u8)))
+svbool_t svwhilerw(uint8_t const *, uint8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s8)))
+svbool_t svwhilerw(int8_t const *, int8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u64)))
+svbool_t svwhilerw(uint64_t const *, uint64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f64)))
+svbool_t svwhilerw(float64_t const *, float64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s64)))
+svbool_t svwhilerw(int64_t const *, int64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u16)))
+svbool_t svwhilerw(uint16_t const *, uint16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f16)))
+svbool_t svwhilerw(float16_t const *, float16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s16)))
+svbool_t svwhilerw(int16_t const *, int16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u32)))
+svbool_t svwhilerw(uint32_t const *, uint32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f32)))
+svbool_t svwhilerw(float32_t const *, float32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s32)))
+svbool_t svwhilerw(int32_t const *, int32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u8)))
+svbool_t svwhilewr(uint8_t const *, uint8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s8)))
+svbool_t svwhilewr(int8_t const *, int8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u64)))
+svbool_t svwhilewr(uint64_t const *, uint64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f64)))
+svbool_t svwhilewr(float64_t const *, float64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s64)))
+svbool_t svwhilewr(int64_t const *, int64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u16)))
+svbool_t svwhilewr(uint16_t const *, uint16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f16)))
+svbool_t svwhilewr(float16_t const *, float16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s16)))
+svbool_t svwhilewr(int16_t const *, int16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u32)))
+svbool_t svwhilewr(uint32_t const *, uint32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f32)))
+svbool_t svwhilewr(float32_t const *, float32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s32)))
+svbool_t svwhilewr(int32_t const *, int32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u8)))
+svuint8_t svxar(svuint8_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u32)))
+svuint32_t svxar(svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u64)))
+svuint64_t svxar(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u16)))
+svuint16_t svxar(svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s8)))
+svint8_t svxar(svint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s32)))
+svint32_t svxar(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s64)))
+svint64_t svxar(svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s16)))
+svint16_t svxar(svint16_t, svint16_t, uint64_t);
+#endif  //defined(__ARM_FEATURE_SVE2)
+
+#if defined(__ARM_FEATURE_SVE2) && defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC)
+#define svwhilerw_bf16(...) __builtin_sve_svwhilerw_bf16(__VA_ARGS__)
+#define svwhilewr_bf16(...) __builtin_sve_svwhilewr_bf16(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_bf16)))
+svbool_t svwhilerw(bfloat16_t const *, bfloat16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_bf16)))
+svbool_t svwhilewr(bfloat16_t const *, bfloat16_t const *);
+#endif  //defined(__ARM_FEATURE_SVE2) && defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC)
+
+#if defined(__ARM_FEATURE_SVE2) && defined(__ARM_FEATURE_SVE_BF16)
+#define svtbl2_bf16(...) __builtin_sve_svtbl2_bf16(__VA_ARGS__)
+#define svtbx_bf16(...) __builtin_sve_svtbx_bf16(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_bf16)))
+svbfloat16_t svtbl2(svbfloat16x2_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_bf16)))
+svbfloat16_t svtbx(svbfloat16_t, svbfloat16_t, svuint16_t);
+#endif  //defined(__ARM_FEATURE_SVE2) && defined(__ARM_FEATURE_SVE_BF16)
+
+#if defined(__ARM_FEATURE_SVE2_AES)
+#define svaesd_u8(...) __builtin_sve_svaesd_u8(__VA_ARGS__)
+#define svaese_u8(...) __builtin_sve_svaese_u8(__VA_ARGS__)
+#define svaesimc_u8(...) __builtin_sve_svaesimc_u8(__VA_ARGS__)
+#define svaesmc_u8(...) __builtin_sve_svaesmc_u8(__VA_ARGS__)
+#define svpmullb_pair_n_u64(...) __builtin_sve_svpmullb_pair_n_u64(__VA_ARGS__)
+#define svpmullb_pair_u64(...) __builtin_sve_svpmullb_pair_u64(__VA_ARGS__)
+#define svpmullt_pair_n_u64(...) __builtin_sve_svpmullt_pair_n_u64(__VA_ARGS__)
+#define svpmullt_pair_u64(...) __builtin_sve_svpmullt_pair_u64(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesd_u8)))
+svuint8_t svaesd(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaese_u8)))
+svuint8_t svaese(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesimc_u8)))
+svuint8_t svaesimc(svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesmc_u8)))
+svuint8_t svaesmc(svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u64)))
+svuint64_t svpmullb_pair(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u64)))
+svuint64_t svpmullb_pair(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u64)))
+svuint64_t svpmullt_pair(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u64)))
+svuint64_t svpmullt_pair(svuint64_t, svuint64_t);
+#endif  //defined(__ARM_FEATURE_SVE2_AES)
+
+#if defined(__ARM_FEATURE_SVE2_SHA3)
+#define svrax1_u64(...) __builtin_sve_svrax1_u64(__VA_ARGS__)
+#define svrax1_s64(...) __builtin_sve_svrax1_s64(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_u64)))
+svuint64_t svrax1(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_s64)))
+svint64_t svrax1(svint64_t, svint64_t);
+#endif  //defined(__ARM_FEATURE_SVE2_SHA3)
+
+#if defined(__ARM_FEATURE_SVE2_SM4)
+#define svsm4e_u32(...) __builtin_sve_svsm4e_u32(__VA_ARGS__)
+#define svsm4ekey_u32(...) __builtin_sve_svsm4ekey_u32(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4e_u32)))
+svuint32_t svsm4e(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4ekey_u32)))
+svuint32_t svsm4ekey(svuint32_t, svuint32_t);
+#endif  //defined(__ARM_FEATURE_SVE2_SM4)
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svbfdot_n_f32(...) __builtin_sve_svbfdot_n_f32(__VA_ARGS__)
+#define svbfdot_f32(...) __builtin_sve_svbfdot_f32(__VA_ARGS__)
+#define svbfdot_lane_f32(...) __builtin_sve_svbfdot_lane_f32(__VA_ARGS__)
+#define svbfmlalb_n_f32(...) __builtin_sve_svbfmlalb_n_f32(__VA_ARGS__)
+#define svbfmlalb_f32(...) __builtin_sve_svbfmlalb_f32(__VA_ARGS__)
+#define svbfmlalb_lane_f32(...) __builtin_sve_svbfmlalb_lane_f32(__VA_ARGS__)
+#define svbfmlalt_n_f32(...) __builtin_sve_svbfmlalt_n_f32(__VA_ARGS__)
+#define svbfmlalt_f32(...) __builtin_sve_svbfmlalt_f32(__VA_ARGS__)
+#define svbfmlalt_lane_f32(...) __builtin_sve_svbfmlalt_lane_f32(__VA_ARGS__)
+#define svbfmmla_f32(...) __builtin_sve_svbfmmla_f32(__VA_ARGS__)
+#define svclasta_n_bf16(...) __builtin_sve_svclasta_n_bf16(__VA_ARGS__)
+#define svclasta_bf16(...) __builtin_sve_svclasta_bf16(__VA_ARGS__)
+#define svclastb_n_bf16(...) __builtin_sve_svclastb_n_bf16(__VA_ARGS__)
+#define svclastb_bf16(...) __builtin_sve_svclastb_bf16(__VA_ARGS__)
+#define svcnt_bf16_m(...) __builtin_sve_svcnt_bf16_m(__VA_ARGS__)
+#define svcnt_bf16_x(...) __builtin_sve_svcnt_bf16_x(__VA_ARGS__)
+#define svcnt_bf16_z(...) __builtin_sve_svcnt_bf16_z(__VA_ARGS__)
+#define svcreate2_bf16(...) __builtin_sve_svcreate2_bf16(__VA_ARGS__)
+#define svcreate3_bf16(...) __builtin_sve_svcreate3_bf16(__VA_ARGS__)
+#define svcreate4_bf16(...) __builtin_sve_svcreate4_bf16(__VA_ARGS__)
+#define svcvt_bf16_f32_m(...) __builtin_sve_svcvt_bf16_f32_m(__VA_ARGS__)
+#define svcvt_bf16_f32_x(...) __builtin_sve_svcvt_bf16_f32_x(__VA_ARGS__)
+#define svcvt_bf16_f32_z(...) __builtin_sve_svcvt_bf16_f32_z(__VA_ARGS__)
+#define svcvtnt_bf16_f32_m(...) __builtin_sve_svcvtnt_bf16_f32_m(__VA_ARGS__)
+#define svdup_n_bf16(...) __builtin_sve_svdup_n_bf16(__VA_ARGS__)
+#define svdup_n_bf16_m(...) __builtin_sve_svdup_n_bf16_m(__VA_ARGS__)
+#define svdup_n_bf16_x(...) __builtin_sve_svdup_n_bf16_x(__VA_ARGS__)
+#define svdup_n_bf16_z(...) __builtin_sve_svdup_n_bf16_z(__VA_ARGS__)
+#define svdup_lane_bf16(...) __builtin_sve_svdup_lane_bf16(__VA_ARGS__)
+#define svdupq_n_bf16(...) __builtin_sve_svdupq_n_bf16(__VA_ARGS__)
+#define svdupq_lane_bf16(...) __builtin_sve_svdupq_lane_bf16(__VA_ARGS__)
+#define svext_bf16(...) __builtin_sve_svext_bf16(__VA_ARGS__)
+#define svget2_bf16(...) __builtin_sve_svget2_bf16(__VA_ARGS__)
+#define svget3_bf16(...) __builtin_sve_svget3_bf16(__VA_ARGS__)
+#define svget4_bf16(...) __builtin_sve_svget4_bf16(__VA_ARGS__)
+#define svinsr_n_bf16(...) __builtin_sve_svinsr_n_bf16(__VA_ARGS__)
+#define svlasta_bf16(...) __builtin_sve_svlasta_bf16(__VA_ARGS__)
+#define svlastb_bf16(...) __builtin_sve_svlastb_bf16(__VA_ARGS__)
+#define svld1_bf16(...) __builtin_sve_svld1_bf16(__VA_ARGS__)
+#define svld1_vnum_bf16(...) __builtin_sve_svld1_vnum_bf16(__VA_ARGS__)
+#define svld1rq_bf16(...) __builtin_sve_svld1rq_bf16(__VA_ARGS__)
+#define svld2_bf16(...) __builtin_sve_svld2_bf16(__VA_ARGS__)
+#define svld2_vnum_bf16(...) __builtin_sve_svld2_vnum_bf16(__VA_ARGS__)
+#define svld3_bf16(...) __builtin_sve_svld3_bf16(__VA_ARGS__)
+#define svld3_vnum_bf16(...) __builtin_sve_svld3_vnum_bf16(__VA_ARGS__)
+#define svld4_bf16(...) __builtin_sve_svld4_bf16(__VA_ARGS__)
+#define svld4_vnum_bf16(...) __builtin_sve_svld4_vnum_bf16(__VA_ARGS__)
+#define svldff1_bf16(...) __builtin_sve_svldff1_bf16(__VA_ARGS__)
+#define svldff1_vnum_bf16(...) __builtin_sve_svldff1_vnum_bf16(__VA_ARGS__)
+#define svldnf1_bf16(...) __builtin_sve_svldnf1_bf16(__VA_ARGS__)
+#define svldnf1_vnum_bf16(...) __builtin_sve_svldnf1_vnum_bf16(__VA_ARGS__)
+#define svldnt1_bf16(...) __builtin_sve_svldnt1_bf16(__VA_ARGS__)
+#define svldnt1_vnum_bf16(...) __builtin_sve_svldnt1_vnum_bf16(__VA_ARGS__)
+#define svlen_bf16(...) __builtin_sve_svlen_bf16(__VA_ARGS__)
+#define svrev_bf16(...) __builtin_sve_svrev_bf16(__VA_ARGS__)
+#define svsel_bf16(...) __builtin_sve_svsel_bf16(__VA_ARGS__)
+#define svset2_bf16(...) __builtin_sve_svset2_bf16(__VA_ARGS__)
+#define svset3_bf16(...) __builtin_sve_svset3_bf16(__VA_ARGS__)
+#define svset4_bf16(...) __builtin_sve_svset4_bf16(__VA_ARGS__)
+#define svsplice_bf16(...) __builtin_sve_svsplice_bf16(__VA_ARGS__)
+#define svst1_bf16(...) __builtin_sve_svst1_bf16(__VA_ARGS__)
+#define svst1_vnum_bf16(...) __builtin_sve_svst1_vnum_bf16(__VA_ARGS__)
+#define svst2_bf16(...) __builtin_sve_svst2_bf16(__VA_ARGS__)
+#define svst2_vnum_bf16(...) __builtin_sve_svst2_vnum_bf16(__VA_ARGS__)
+#define svst3_bf16(...) __builtin_sve_svst3_bf16(__VA_ARGS__)
+#define svst3_vnum_bf16(...) __builtin_sve_svst3_vnum_bf16(__VA_ARGS__)
+#define svst4_bf16(...) __builtin_sve_svst4_bf16(__VA_ARGS__)
+#define svst4_vnum_bf16(...) __builtin_sve_svst4_vnum_bf16(__VA_ARGS__)
+#define svstnt1_bf16(...) __builtin_sve_svstnt1_bf16(__VA_ARGS__)
+#define svstnt1_vnum_bf16(...) __builtin_sve_svstnt1_vnum_bf16(__VA_ARGS__)
+#define svtbl_bf16(...) __builtin_sve_svtbl_bf16(__VA_ARGS__)
+#define svtrn1_bf16(...) __builtin_sve_svtrn1_bf16(__VA_ARGS__)
+#define svtrn2_bf16(...) __builtin_sve_svtrn2_bf16(__VA_ARGS__)
+#define svundef2_bf16(...) __builtin_sve_svundef2_bf16(__VA_ARGS__)
+#define svundef3_bf16(...) __builtin_sve_svundef3_bf16(__VA_ARGS__)
+#define svundef4_bf16(...) __builtin_sve_svundef4_bf16(__VA_ARGS__)
+#define svundef_bf16(...) __builtin_sve_svundef_bf16(__VA_ARGS__)
+#define svuzp1_bf16(...) __builtin_sve_svuzp1_bf16(__VA_ARGS__)
+#define svuzp2_bf16(...) __builtin_sve_svuzp2_bf16(__VA_ARGS__)
+#define svzip1_bf16(...) __builtin_sve_svzip1_bf16(__VA_ARGS__)
+#define svzip2_bf16(...) __builtin_sve_svzip2_bf16(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_n_f32)))
+svfloat32_t svbfdot(svfloat32_t, svbfloat16_t, bfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_f32)))
+svfloat32_t svbfdot(svfloat32_t, svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_lane_f32)))
+svfloat32_t svbfdot_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_n_f32)))
+svfloat32_t svbfmlalb(svfloat32_t, svbfloat16_t, bfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_f32)))
+svfloat32_t svbfmlalb(svfloat32_t, svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_lane_f32)))
+svfloat32_t svbfmlalb_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_n_f32)))
+svfloat32_t svbfmlalt(svfloat32_t, svbfloat16_t, bfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_f32)))
+svfloat32_t svbfmlalt(svfloat32_t, svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_lane_f32)))
+svfloat32_t svbfmlalt_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmmla_f32)))
+svfloat32_t svbfmmla(svfloat32_t, svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_bf16)))
+bfloat16_t svclasta(svbool_t, bfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_bf16)))
+svbfloat16_t svclasta(svbool_t, svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_bf16)))
+bfloat16_t svclastb(svbool_t, bfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_bf16)))
+svbfloat16_t svclastb(svbool_t, svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_m)))
+svuint16_t svcnt_m(svuint16_t, svbool_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_x)))
+svuint16_t svcnt_x(svbool_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_z)))
+svuint16_t svcnt_z(svbool_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_bf16)))
+svbfloat16x2_t svcreate2(svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_bf16)))
+svbfloat16x3_t svcreate3(svbfloat16_t, svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_bf16)))
+svbfloat16x4_t svcreate4(svbfloat16_t, svbfloat16_t, svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_m)))
+svbfloat16_t svcvt_bf16_m(svbfloat16_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x)))
+svbfloat16_t svcvt_bf16_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_z)))
+svbfloat16_t svcvt_bf16_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_bf16_f32_m)))
+svbfloat16_t svcvtnt_bf16_m(svbfloat16_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16)))
+svbfloat16_t svdup_bf16(bfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_m)))
+svbfloat16_t svdup_bf16_m(svbfloat16_t, svbool_t, bfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_x)))
+svbfloat16_t svdup_bf16_x(svbool_t, bfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_z)))
+svbfloat16_t svdup_bf16_z(svbool_t, bfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_bf16)))
+svbfloat16_t svdup_lane(svbfloat16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_bf16)))
+svbfloat16_t svdupq_bf16(bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_bf16)))
+svbfloat16_t svdupq_lane(svbfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_bf16)))
+svbfloat16_t svext(svbfloat16_t, svbfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_bf16)))
+svbfloat16_t svget2(svbfloat16x2_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_bf16)))
+svbfloat16_t svget3(svbfloat16x3_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_bf16)))
+svbfloat16_t svget4(svbfloat16x4_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_bf16)))
+svbfloat16_t svinsr(svbfloat16_t, bfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_bf16)))
+bfloat16_t svlasta(svbool_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_bf16)))
+bfloat16_t svlastb(svbool_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16)))
+svbfloat16_t svld1(svbool_t, bfloat16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16)))
+svbfloat16_t svld1_vnum(svbool_t, bfloat16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_bf16)))
+svbfloat16_t svld1rq(svbool_t, bfloat16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_bf16)))
+svbfloat16x2_t svld2(svbool_t, bfloat16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_bf16)))
+svbfloat16x2_t svld2_vnum(svbool_t, bfloat16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_bf16)))
+svbfloat16x3_t svld3(svbool_t, bfloat16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_bf16)))
+svbfloat16x3_t svld3_vnum(svbool_t, bfloat16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_bf16)))
+svbfloat16x4_t svld4(svbool_t, bfloat16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_bf16)))
+svbfloat16x4_t svld4_vnum(svbool_t, bfloat16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_bf16)))
+svbfloat16_t svldff1(svbool_t, bfloat16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_bf16)))
+svbfloat16_t svldff1_vnum(svbool_t, bfloat16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_bf16)))
+svbfloat16_t svldnf1(svbool_t, bfloat16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_bf16)))
+svbfloat16_t svldnf1_vnum(svbool_t, bfloat16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16)))
+svbfloat16_t svldnt1(svbool_t, bfloat16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16)))
+svbfloat16_t svldnt1_vnum(svbool_t, bfloat16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_bf16)))
+uint64_t svlen(svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_bf16)))
+svbfloat16_t svrev(svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16)))
+svbfloat16_t svsel(svbool_t, svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_bf16)))
+svbfloat16x2_t svset2(svbfloat16x2_t, uint64_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_bf16)))
+svbfloat16x3_t svset3(svbfloat16x3_t, uint64_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_bf16)))
+svbfloat16x4_t svset4(svbfloat16x4_t, uint64_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_bf16)))
+svbfloat16_t svsplice(svbool_t, svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16)))
+void svst1(svbool_t, bfloat16_t *, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16)))
+void svst1_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_bf16)))
+void svst2(svbool_t, bfloat16_t *, svbfloat16x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_bf16)))
+void svst2_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_bf16)))
+void svst3(svbool_t, bfloat16_t *, svbfloat16x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_bf16)))
+void svst3_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_bf16)))
+void svst4(svbool_t, bfloat16_t *, svbfloat16x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_bf16)))
+void svst4_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16)))
+void svstnt1(svbool_t, bfloat16_t *, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16)))
+void svstnt1_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_bf16)))
+svbfloat16_t svtbl(svbfloat16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_bf16)))
+svbfloat16_t svtrn1(svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_bf16)))
+svbfloat16_t svtrn2(svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_bf16)))
+svbfloat16_t svuzp1(svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_bf16)))
+svbfloat16_t svuzp2(svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_bf16)))
+svbfloat16_t svzip1(svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_bf16)))
+svbfloat16_t svzip2(svbfloat16_t, svbfloat16_t);
+#endif  //defined(__ARM_FEATURE_SVE_BF16)
+
+#if defined(__ARM_FEATURE_SVE_MATMUL_FP32)
+#define svmmla_f32(...) __builtin_sve_svmmla_f32(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f32)))
+svfloat32_t svmmla(svfloat32_t, svfloat32_t, svfloat32_t);
+#endif  //defined(__ARM_FEATURE_SVE_MATMUL_FP32)
+
+#if defined(__ARM_FEATURE_SVE_MATMUL_FP64)
+#define svld1ro_u8(...) __builtin_sve_svld1ro_u8(__VA_ARGS__)
+#define svld1ro_u32(...) __builtin_sve_svld1ro_u32(__VA_ARGS__)
+#define svld1ro_u64(...) __builtin_sve_svld1ro_u64(__VA_ARGS__)
+#define svld1ro_u16(...) __builtin_sve_svld1ro_u16(__VA_ARGS__)
+#define svld1ro_s8(...) __builtin_sve_svld1ro_s8(__VA_ARGS__)
+#define svld1ro_f64(...) __builtin_sve_svld1ro_f64(__VA_ARGS__)
+#define svld1ro_f32(...) __builtin_sve_svld1ro_f32(__VA_ARGS__)
+#define svld1ro_f16(...) __builtin_sve_svld1ro_f16(__VA_ARGS__)
+#define svld1ro_s32(...) __builtin_sve_svld1ro_s32(__VA_ARGS__)
+#define svld1ro_s64(...) __builtin_sve_svld1ro_s64(__VA_ARGS__)
+#define svld1ro_s16(...) __builtin_sve_svld1ro_s16(__VA_ARGS__)
+#define svmmla_f64(...) __builtin_sve_svmmla_f64(__VA_ARGS__)
+#define svtrn1q_u8(...) __builtin_sve_svtrn1q_u8(__VA_ARGS__)
+#define svtrn1q_u32(...) __builtin_sve_svtrn1q_u32(__VA_ARGS__)
+#define svtrn1q_u64(...) __builtin_sve_svtrn1q_u64(__VA_ARGS__)
+#define svtrn1q_u16(...) __builtin_sve_svtrn1q_u16(__VA_ARGS__)
+#define svtrn1q_s8(...) __builtin_sve_svtrn1q_s8(__VA_ARGS__)
+#define svtrn1q_f64(...) __builtin_sve_svtrn1q_f64(__VA_ARGS__)
+#define svtrn1q_f32(...) __builtin_sve_svtrn1q_f32(__VA_ARGS__)
+#define svtrn1q_f16(...) __builtin_sve_svtrn1q_f16(__VA_ARGS__)
+#define svtrn1q_s32(...) __builtin_sve_svtrn1q_s32(__VA_ARGS__)
+#define svtrn1q_s64(...) __builtin_sve_svtrn1q_s64(__VA_ARGS__)
+#define svtrn1q_s16(...) __builtin_sve_svtrn1q_s16(__VA_ARGS__)
+#define svtrn2q_u8(...) __builtin_sve_svtrn2q_u8(__VA_ARGS__)
+#define svtrn2q_u32(...) __builtin_sve_svtrn2q_u32(__VA_ARGS__)
+#define svtrn2q_u64(...) __builtin_sve_svtrn2q_u64(__VA_ARGS__)
+#define svtrn2q_u16(...) __builtin_sve_svtrn2q_u16(__VA_ARGS__)
+#define svtrn2q_s8(...) __builtin_sve_svtrn2q_s8(__VA_ARGS__)
+#define svtrn2q_f64(...) __builtin_sve_svtrn2q_f64(__VA_ARGS__)
+#define svtrn2q_f32(...) __builtin_sve_svtrn2q_f32(__VA_ARGS__)
+#define svtrn2q_f16(...) __builtin_sve_svtrn2q_f16(__VA_ARGS__)
+#define svtrn2q_s32(...) __builtin_sve_svtrn2q_s32(__VA_ARGS__)
+#define svtrn2q_s64(...) __builtin_sve_svtrn2q_s64(__VA_ARGS__)
+#define svtrn2q_s16(...) __builtin_sve_svtrn2q_s16(__VA_ARGS__)
+#define svuzp1q_u8(...) __builtin_sve_svuzp1q_u8(__VA_ARGS__)
+#define svuzp1q_u32(...) __builtin_sve_svuzp1q_u32(__VA_ARGS__)
+#define svuzp1q_u64(...) __builtin_sve_svuzp1q_u64(__VA_ARGS__)
+#define svuzp1q_u16(...) __builtin_sve_svuzp1q_u16(__VA_ARGS__)
+#define svuzp1q_s8(...) __builtin_sve_svuzp1q_s8(__VA_ARGS__)
+#define svuzp1q_f64(...) __builtin_sve_svuzp1q_f64(__VA_ARGS__)
+#define svuzp1q_f32(...) __builtin_sve_svuzp1q_f32(__VA_ARGS__)
+#define svuzp1q_f16(...) __builtin_sve_svuzp1q_f16(__VA_ARGS__)
+#define svuzp1q_s32(...) __builtin_sve_svuzp1q_s32(__VA_ARGS__)
+#define svuzp1q_s64(...) __builtin_sve_svuzp1q_s64(__VA_ARGS__)
+#define svuzp1q_s16(...) __builtin_sve_svuzp1q_s16(__VA_ARGS__)
+#define svuzp2q_u8(...) __builtin_sve_svuzp2q_u8(__VA_ARGS__)
+#define svuzp2q_u32(...) __builtin_sve_svuzp2q_u32(__VA_ARGS__)
+#define svuzp2q_u64(...) __builtin_sve_svuzp2q_u64(__VA_ARGS__)
+#define svuzp2q_u16(...) __builtin_sve_svuzp2q_u16(__VA_ARGS__)
+#define svuzp2q_s8(...) __builtin_sve_svuzp2q_s8(__VA_ARGS__)
+#define svuzp2q_f64(...) __builtin_sve_svuzp2q_f64(__VA_ARGS__)
+#define svuzp2q_f32(...) __builtin_sve_svuzp2q_f32(__VA_ARGS__)
+#define svuzp2q_f16(...) __builtin_sve_svuzp2q_f16(__VA_ARGS__)
+#define svuzp2q_s32(...) __builtin_sve_svuzp2q_s32(__VA_ARGS__)
+#define svuzp2q_s64(...) __builtin_sve_svuzp2q_s64(__VA_ARGS__)
+#define svuzp2q_s16(...) __builtin_sve_svuzp2q_s16(__VA_ARGS__)
+#define svzip1q_u8(...) __builtin_sve_svzip1q_u8(__VA_ARGS__)
+#define svzip1q_u32(...) __builtin_sve_svzip1q_u32(__VA_ARGS__)
+#define svzip1q_u64(...) __builtin_sve_svzip1q_u64(__VA_ARGS__)
+#define svzip1q_u16(...) __builtin_sve_svzip1q_u16(__VA_ARGS__)
+#define svzip1q_s8(...) __builtin_sve_svzip1q_s8(__VA_ARGS__)
+#define svzip1q_f64(...) __builtin_sve_svzip1q_f64(__VA_ARGS__)
+#define svzip1q_f32(...) __builtin_sve_svzip1q_f32(__VA_ARGS__)
+#define svzip1q_f16(...) __builtin_sve_svzip1q_f16(__VA_ARGS__)
+#define svzip1q_s32(...) __builtin_sve_svzip1q_s32(__VA_ARGS__)
+#define svzip1q_s64(...) __builtin_sve_svzip1q_s64(__VA_ARGS__)
+#define svzip1q_s16(...) __builtin_sve_svzip1q_s16(__VA_ARGS__)
+#define svzip2q_u8(...) __builtin_sve_svzip2q_u8(__VA_ARGS__)
+#define svzip2q_u32(...) __builtin_sve_svzip2q_u32(__VA_ARGS__)
+#define svzip2q_u64(...) __builtin_sve_svzip2q_u64(__VA_ARGS__)
+#define svzip2q_u16(...) __builtin_sve_svzip2q_u16(__VA_ARGS__)
+#define svzip2q_s8(...) __builtin_sve_svzip2q_s8(__VA_ARGS__)
+#define svzip2q_f64(...) __builtin_sve_svzip2q_f64(__VA_ARGS__)
+#define svzip2q_f32(...) __builtin_sve_svzip2q_f32(__VA_ARGS__)
+#define svzip2q_f16(...) __builtin_sve_svzip2q_f16(__VA_ARGS__)
+#define svzip2q_s32(...) __builtin_sve_svzip2q_s32(__VA_ARGS__)
+#define svzip2q_s64(...) __builtin_sve_svzip2q_s64(__VA_ARGS__)
+#define svzip2q_s16(...) __builtin_sve_svzip2q_s16(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u8)))
+svuint8_t svld1ro(svbool_t, uint8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u32)))
+svuint32_t svld1ro(svbool_t, uint32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u64)))
+svuint64_t svld1ro(svbool_t, uint64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u16)))
+svuint16_t svld1ro(svbool_t, uint16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s8)))
+svint8_t svld1ro(svbool_t, int8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f64)))
+svfloat64_t svld1ro(svbool_t, float64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f32)))
+svfloat32_t svld1ro(svbool_t, float32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f16)))
+svfloat16_t svld1ro(svbool_t, float16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s32)))
+svint32_t svld1ro(svbool_t, int32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s64)))
+svint64_t svld1ro(svbool_t, int64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s16)))
+svint16_t svld1ro(svbool_t, int16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f64)))
+svfloat64_t svmmla(svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u8)))
+svuint8_t svtrn1q(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u32)))
+svuint32_t svtrn1q(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u64)))
+svuint64_t svtrn1q(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u16)))
+svuint16_t svtrn1q(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s8)))
+svint8_t svtrn1q(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f64)))
+svfloat64_t svtrn1q(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f32)))
+svfloat32_t svtrn1q(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f16)))
+svfloat16_t svtrn1q(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s32)))
+svint32_t svtrn1q(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s64)))
+svint64_t svtrn1q(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s16)))
+svint16_t svtrn1q(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u8)))
+svuint8_t svtrn2q(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u32)))
+svuint32_t svtrn2q(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u64)))
+svuint64_t svtrn2q(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u16)))
+svuint16_t svtrn2q(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s8)))
+svint8_t svtrn2q(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f64)))
+svfloat64_t svtrn2q(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f32)))
+svfloat32_t svtrn2q(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f16)))
+svfloat16_t svtrn2q(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s32)))
+svint32_t svtrn2q(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s64)))
+svint64_t svtrn2q(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s16)))
+svint16_t svtrn2q(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u8)))
+svuint8_t svuzp1q(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u32)))
+svuint32_t svuzp1q(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u64)))
+svuint64_t svuzp1q(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u16)))
+svuint16_t svuzp1q(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s8)))
+svint8_t svuzp1q(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f64)))
+svfloat64_t svuzp1q(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f32)))
+svfloat32_t svuzp1q(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f16)))
+svfloat16_t svuzp1q(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s32)))
+svint32_t svuzp1q(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s64)))
+svint64_t svuzp1q(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s16)))
+svint16_t svuzp1q(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u8)))
+svuint8_t svuzp2q(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u32)))
+svuint32_t svuzp2q(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u64)))
+svuint64_t svuzp2q(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u16)))
+svuint16_t svuzp2q(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s8)))
+svint8_t svuzp2q(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f64)))
+svfloat64_t svuzp2q(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f32)))
+svfloat32_t svuzp2q(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f16)))
+svfloat16_t svuzp2q(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s32)))
+svint32_t svuzp2q(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s64)))
+svint64_t svuzp2q(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s16)))
+svint16_t svuzp2q(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u8)))
+svuint8_t svzip1q(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u32)))
+svuint32_t svzip1q(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u64)))
+svuint64_t svzip1q(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u16)))
+svuint16_t svzip1q(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s8)))
+svint8_t svzip1q(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f64)))
+svfloat64_t svzip1q(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f32)))
+svfloat32_t svzip1q(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f16)))
+svfloat16_t svzip1q(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s32)))
+svint32_t svzip1q(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s64)))
+svint64_t svzip1q(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s16)))
+svint16_t svzip1q(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u8)))
+svuint8_t svzip2q(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u32)))
+svuint32_t svzip2q(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u64)))
+svuint64_t svzip2q(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u16)))
+svuint16_t svzip2q(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s8)))
+svint8_t svzip2q(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f64)))
+svfloat64_t svzip2q(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f32)))
+svfloat32_t svzip2q(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f16)))
+svfloat16_t svzip2q(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s32)))
+svint32_t svzip2q(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s64)))
+svint64_t svzip2q(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s16)))
+svint16_t svzip2q(svint16_t, svint16_t);
+#endif  //defined(__ARM_FEATURE_SVE_MATMUL_FP64)
+
+#if defined(__ARM_FEATURE_SVE_MATMUL_FP64) && defined(__ARM_FEATURE_SVE_BF16)
+#define svld1ro_bf16(...) __builtin_sve_svld1ro_bf16(__VA_ARGS__)
+#define svtrn1q_bf16(...) __builtin_sve_svtrn1q_bf16(__VA_ARGS__)
+#define svtrn2q_bf16(...) __builtin_sve_svtrn2q_bf16(__VA_ARGS__)
+#define svuzp1q_bf16(...) __builtin_sve_svuzp1q_bf16(__VA_ARGS__)
+#define svuzp2q_bf16(...) __builtin_sve_svuzp2q_bf16(__VA_ARGS__)
+#define svzip1q_bf16(...) __builtin_sve_svzip1q_bf16(__VA_ARGS__)
+#define svzip2q_bf16(...) __builtin_sve_svzip2q_bf16(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_bf16)))
+svbfloat16_t svld1ro(svbool_t, bfloat16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_bf16)))
+svbfloat16_t svtrn1q(svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_bf16)))
+svbfloat16_t svtrn2q(svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_bf16)))
+svbfloat16_t svuzp1q(svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_bf16)))
+svbfloat16_t svuzp2q(svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_bf16)))
+svbfloat16_t svzip1q(svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_bf16)))
+svbfloat16_t svzip2q(svbfloat16_t, svbfloat16_t);
+#endif  //defined(__ARM_FEATURE_SVE_MATMUL_FP64) && defined(__ARM_FEATURE_SVE_BF16)
+
+#if defined(__ARM_FEATURE_SVE_MATMUL_INT8)
+#define svmmla_s32(...) __builtin_sve_svmmla_s32(__VA_ARGS__)
+#define svmmla_u32(...) __builtin_sve_svmmla_u32(__VA_ARGS__)
+#define svsudot_n_s32(...) __builtin_sve_svsudot_n_s32(__VA_ARGS__)
+#define svsudot_s32(...) __builtin_sve_svsudot_s32(__VA_ARGS__)
+#define svsudot_lane_s32(...) __builtin_sve_svsudot_lane_s32(__VA_ARGS__)
+#define svusdot_n_s32(...) __builtin_sve_svusdot_n_s32(__VA_ARGS__)
+#define svusdot_s32(...) __builtin_sve_svusdot_s32(__VA_ARGS__)
+#define svusdot_lane_s32(...) __builtin_sve_svusdot_lane_s32(__VA_ARGS__)
+#define svusmmla_s32(...) __builtin_sve_svusmmla_s32(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_s32)))
+svint32_t svmmla(svint32_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_u32)))
+svuint32_t svmmla(svuint32_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_n_s32)))
+svint32_t svsudot(svint32_t, svint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_s32)))
+svint32_t svsudot(svint32_t, svint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_lane_s32)))
+svint32_t svsudot_lane(svint32_t, svint8_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_n_s32)))
+svint32_t svusdot(svint32_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_s32)))
+svint32_t svusdot(svint32_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_lane_s32)))
+svint32_t svusdot_lane(svint32_t, svuint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusmmla_s32)))
+svint32_t svusmmla(svint32_t, svuint8_t, svint8_t);
+#endif  //defined(__ARM_FEATURE_SVE_MATMUL_INT8)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svcvtnt_bf16_x      svcvtnt_bf16_m
+#define svcvtnt_bf16_f32_x  svcvtnt_bf16_f32_m
+#endif /*__ARM_FEATURE_SVE_BF16 */
+
+#if defined(__ARM_FEATURE_SVE2)
+#define svcvtnt_f16_x      svcvtnt_f16_m
+#define svcvtnt_f16_f32_x  svcvtnt_f16_f32_m
+#define svcvtnt_f32_x      svcvtnt_f32_m
+#define svcvtnt_f32_f64_x  svcvtnt_f32_f64_m
+
+#define svcvtxnt_f32_x     svcvtxnt_f32_m
+#define svcvtxnt_f32_f64_x svcvtxnt_f32_f64_m
+
+#endif /*__ARM_FEATURE_SVE2 */
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif /*__ARM_FEATURE_SVE */
+
+#endif /* __ARM_SVE_H */
diff --git a/darwin-x86/lib64/clang/11.0.1/include/armintr.h b/darwin-x86/lib64/clang/11.0.5/include/armintr.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/armintr.h
rename to darwin-x86/lib64/clang/11.0.5/include/armintr.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx2intrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx2intrin.h
similarity index 99%
rename from darwin-x86/lib64/clang/11.0.1/include/avx2intrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx2intrin.h
index 162e83e..cc16720 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/avx2intrin.h
+++ b/darwin-x86/lib64/clang/11.0.5/include/avx2intrin.h
@@ -740,6 +740,8 @@
   return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 1, 0, 1);
 }
 
+#define _mm_broadcastsi128_si256(X) _mm256_broadcastsi128_si256(X)
+
 #define _mm_blend_epi32(V1, V2, M) \
   (__m128i)__builtin_ia32_pblendd128((__v4si)(__m128i)(V1), \
                                      (__v4si)(__m128i)(V2), (int)(M))
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512bf16intrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512bf16intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512bf16intrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512bf16intrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512bitalgintrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512bitalgintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512bitalgintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512bitalgintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512bwintrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512bwintrin.h
similarity index 98%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512bwintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512bwintrin.h
index 3765584..4281a33 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/avx512bwintrin.h
+++ b/darwin-x86/lib64/clang/11.0.5/include/avx512bwintrin.h
@@ -1504,13 +1504,14 @@
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi16(__m512i __A, int __B)
+_mm512_slli_epi16(__m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_psllwi512((__v32hi)__A, __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_slli_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
+_mm512_mask_slli_epi16(__m512i __W, __mmask32 __U, __m512i __A,
+                       unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
                                          (__v32hi)_mm512_slli_epi16(__A, __B),
@@ -1518,7 +1519,7 @@
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_slli_epi16(__mmask32 __U, __m512i __A, int __B)
+_mm512_maskz_slli_epi16(__mmask32 __U, __m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
                                          (__v32hi)_mm512_slli_epi16(__A, __B),
@@ -1595,13 +1596,14 @@
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi16(__m512i __A, int __B)
+_mm512_srai_epi16(__m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_psrawi512((__v32hi)__A, __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srai_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
+_mm512_mask_srai_epi16(__m512i __W, __mmask32 __U, __m512i __A,
+                       unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
                                          (__v32hi)_mm512_srai_epi16(__A, __B),
@@ -1609,7 +1611,7 @@
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi16(__mmask32 __U, __m512i __A, int __B)
+_mm512_maskz_srai_epi16(__mmask32 __U, __m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
                                          (__v32hi)_mm512_srai_epi16(__A, __B),
@@ -1639,13 +1641,14 @@
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi16(__m512i __A, int __B)
+_mm512_srli_epi16(__m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_psrlwi512((__v32hi)__A, __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srli_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
+_mm512_mask_srli_epi16(__m512i __W, __mmask32 __U, __m512i __A,
+                       unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
                                          (__v32hi)_mm512_srli_epi16(__A, __B),
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512cdintrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512cdintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512cdintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512cdintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512dqintrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512dqintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512dqintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512dqintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512erintrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512erintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512erintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512erintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512fintrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512fintrin.h
similarity index 99%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512fintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512fintrin.h
index 7465da3..fa22ef3 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/avx512fintrin.h
+++ b/darwin-x86/lib64/clang/11.0.5/include/avx512fintrin.h
@@ -5111,13 +5111,14 @@
                                       (__v8di)_mm512_setzero_si512())
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi32(__m512i __A, int __B)
+_mm512_slli_epi32(__m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_pslldi512((__v16si)__A, __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_slli_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
+_mm512_mask_slli_epi32(__m512i __W, __mmask16 __U, __m512i __A,
+                       unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
                                          (__v16si)_mm512_slli_epi32(__A, __B),
@@ -5125,20 +5126,20 @@
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_slli_epi32(__mmask16 __U, __m512i __A, int __B) {
+_mm512_maskz_slli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) {
   return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
                                          (__v16si)_mm512_slli_epi32(__A, __B),
                                          (__v16si)_mm512_setzero_si512());
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi64(__m512i __A, int __B)
+_mm512_slli_epi64(__m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_psllqi512((__v8di)__A, __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_slli_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
+_mm512_mask_slli_epi64(__m512i __W, __mmask8 __U, __m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
                                           (__v8di)_mm512_slli_epi64(__A, __B),
@@ -5146,7 +5147,7 @@
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, int __B)
+_mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
                                           (__v8di)_mm512_slli_epi64(__A, __B),
@@ -5154,13 +5155,14 @@
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi32(__m512i __A, int __B)
+_mm512_srli_epi32(__m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_psrldi512((__v16si)__A, __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srli_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
+_mm512_mask_srli_epi32(__m512i __W, __mmask16 __U, __m512i __A,
+                       unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
                                          (__v16si)_mm512_srli_epi32(__A, __B),
@@ -5168,20 +5170,21 @@
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srli_epi32(__mmask16 __U, __m512i __A, int __B) {
+_mm512_maskz_srli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) {
   return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
                                          (__v16si)_mm512_srli_epi32(__A, __B),
                                          (__v16si)_mm512_setzero_si512());
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi64(__m512i __A, int __B)
+_mm512_srli_epi64(__m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_psrlqi512((__v8di)__A, __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srli_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
+_mm512_mask_srli_epi64(__m512i __W, __mmask8 __U, __m512i __A,
+                       unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
                                           (__v8di)_mm512_srli_epi64(__A, __B),
@@ -5189,7 +5192,8 @@
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srli_epi64(__mmask8 __U, __m512i __A, int __B)
+_mm512_maskz_srli_epi64(__mmask8 __U, __m512i __A,
+                        unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
                                           (__v8di)_mm512_srli_epi64(__A, __B),
@@ -6593,13 +6597,14 @@
                                              (int)(R))
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi32(__m512i __A, int __B)
+_mm512_srai_epi32(__m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_psradi512((__v16si)__A, __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srai_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
+_mm512_mask_srai_epi32(__m512i __W, __mmask16 __U, __m512i __A,
+                       unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
                                          (__v16si)_mm512_srai_epi32(__A, __B),
@@ -6607,20 +6612,21 @@
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi32(__mmask16 __U, __m512i __A, int __B) {
+_mm512_maskz_srai_epi32(__mmask16 __U, __m512i __A,
+                        unsigned int __B) {
   return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
                                          (__v16si)_mm512_srai_epi32(__A, __B),
                                          (__v16si)_mm512_setzero_si512());
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi64(__m512i __A, int __B)
+_mm512_srai_epi64(__m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_psraqi512((__v8di)__A, __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srai_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
+_mm512_mask_srai_epi64(__m512i __W, __mmask8 __U, __m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
                                           (__v8di)_mm512_srai_epi64(__A, __B),
@@ -6628,7 +6634,7 @@
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi64(__mmask8 __U, __m512i __A, int __B)
+_mm512_maskz_srai_epi64(__mmask8 __U, __m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
                                           (__v8di)_mm512_srai_epi64(__A, __B),
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512ifmaintrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512ifmaintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512ifmaintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512ifmaintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512ifmavlintrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512ifmavlintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512ifmavlintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512ifmavlintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512pfintrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512pfintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512pfintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512pfintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512vbmi2intrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512vbmi2intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512vbmi2intrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512vbmi2intrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512vbmiintrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512vbmiintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512vbmiintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512vbmiintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512vbmivlintrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512vbmivlintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512vbmivlintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512vbmivlintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512vlbf16intrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512vlbf16intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512vlbf16intrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512vlbf16intrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512vlbitalgintrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512vlbitalgintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512vlbitalgintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512vlbitalgintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512vlbwintrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512vlbwintrin.h
similarity index 99%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512vlbwintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512vlbwintrin.h
index cd9f240..6ed10ed 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/avx512vlbwintrin.h
+++ b/darwin-x86/lib64/clang/11.0.5/include/avx512vlbwintrin.h
@@ -1939,7 +1939,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
                                              (__v8hi)_mm_slli_epi16(__A, __B),
@@ -1947,7 +1947,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
                                              (__v8hi)_mm_slli_epi16(__A, __B),
@@ -1955,7 +1955,8 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_slli_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B)
+_mm256_mask_slli_epi16(__m256i __W, __mmask16 __U, __m256i __A,
+                       unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
                                          (__v16hi)_mm256_slli_epi16(__A, __B),
@@ -1963,7 +1964,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_slli_epi16(__mmask16 __U, __m256i __A, int __B)
+_mm256_maskz_slli_epi16(__mmask16 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
                                          (__v16hi)_mm256_slli_epi16(__A, __B),
@@ -2091,7 +2092,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
                                              (__v8hi)_mm_srai_epi16(__A, __B),
@@ -2099,7 +2100,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
                                              (__v8hi)_mm_srai_epi16(__A, __B),
@@ -2107,7 +2108,8 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srai_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B)
+_mm256_mask_srai_epi16(__m256i __W, __mmask16 __U, __m256i __A,
+                       unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
                                          (__v16hi)_mm256_srai_epi16(__A, __B),
@@ -2115,7 +2117,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srai_epi16(__mmask16 __U, __m256i __A, int __B)
+_mm256_maskz_srai_epi16(__mmask16 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
                                          (__v16hi)_mm256_srai_epi16(__A, __B),
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512vlcdintrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512vlcdintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512vlcdintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512vlcdintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512vldqintrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512vldqintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512vldqintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512vldqintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512vlintrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512vlintrin.h
similarity index 99%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512vlintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512vlintrin.h
index 9d1d791..968c10e 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/avx512vlintrin.h
+++ b/darwin-x86/lib64/clang/11.0.5/include/avx512vlintrin.h
@@ -4522,7 +4522,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
                                              (__v4si)_mm_slli_epi32(__A, __B),
@@ -4530,7 +4530,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
                                              (__v4si)_mm_slli_epi32(__A, __B),
@@ -4538,7 +4538,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
+_mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
                                              (__v8si)_mm256_slli_epi32(__A, __B),
@@ -4546,7 +4546,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, int __B)
+_mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
                                              (__v8si)_mm256_slli_epi32(__A, __B),
@@ -4586,7 +4586,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
                                              (__v2di)_mm_slli_epi64(__A, __B),
@@ -4594,7 +4594,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
                                              (__v2di)_mm_slli_epi64(__A, __B),
@@ -4602,7 +4602,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __B)
+_mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
                                              (__v4di)_mm256_slli_epi64(__A, __B),
@@ -4610,7 +4610,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, int __B)
+_mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
                                              (__v4di)_mm256_slli_epi64(__A, __B),
@@ -4866,7 +4866,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
                                              (__v4si)_mm_srli_epi32(__A, __B),
@@ -4874,7 +4874,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
                                              (__v4si)_mm_srli_epi32(__A, __B),
@@ -4882,7 +4882,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
+_mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
                                              (__v8si)_mm256_srli_epi32(__A, __B),
@@ -4890,7 +4890,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, int __B)
+_mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
                                              (__v8si)_mm256_srli_epi32(__A, __B),
@@ -4930,7 +4930,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
                                              (__v2di)_mm_srli_epi64(__A, __B),
@@ -4938,7 +4938,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
                                              (__v2di)_mm_srli_epi64(__A, __B),
@@ -4946,7 +4946,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __B)
+_mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
                                              (__v4di)_mm256_srli_epi64(__A, __B),
@@ -4954,7 +4954,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, int __B)
+_mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
                                              (__v4di)_mm256_srli_epi64(__A, __B),
@@ -6405,7 +6405,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
                                              (__v4si)_mm_srai_epi32(__A, __B),
@@ -6413,7 +6413,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
                                              (__v4si)_mm_srai_epi32(__A, __B),
@@ -6421,7 +6421,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
+_mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
                                              (__v8si)_mm256_srai_epi32(__A, __B),
@@ -6429,7 +6429,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, int __B)
+_mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
                                              (__v8si)_mm256_srai_epi32(__A, __B),
@@ -6481,13 +6481,13 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_srai_epi64(__m128i __A, int __imm)
+_mm_srai_epi64(__m128i __A, unsigned int __imm)
 {
   return (__m128i)__builtin_ia32_psraqi128((__v2di)__A, __imm);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __imm)
+_mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __imm)
 {
   return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
                                            (__v2di)_mm_srai_epi64(__A, __imm), \
@@ -6495,7 +6495,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, int __imm)
+_mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, unsigned int __imm)
 {
   return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
                                            (__v2di)_mm_srai_epi64(__A, __imm), \
@@ -6503,13 +6503,14 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srai_epi64(__m256i __A, int __imm)
+_mm256_srai_epi64(__m256i __A, unsigned int __imm)
 {
   return (__m256i)__builtin_ia32_psraqi256((__v4di)__A, __imm);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __imm)
+_mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A,
+                       unsigned int __imm)
 {
   return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
                                         (__v4di)_mm256_srai_epi64(__A, __imm), \
@@ -6517,7 +6518,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, int __imm)
+_mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, unsigned int __imm)
 {
   return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
                                         (__v4di)_mm256_srai_epi64(__A, __imm), \
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512vlvbmi2intrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512vlvbmi2intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512vlvbmi2intrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512vlvbmi2intrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512vlvnniintrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512vlvnniintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512vlvnniintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512vlvnniintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512vlvp2intersectintrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512vlvp2intersectintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512vlvp2intersectintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512vlvp2intersectintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512vnniintrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512vnniintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512vnniintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512vnniintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512vp2intersectintrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512vp2intersectintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512vp2intersectintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512vp2intersectintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512vpopcntdqintrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512vpopcntdqintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512vpopcntdqintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512vpopcntdqintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512vpopcntdqvlintrin.h b/darwin-x86/lib64/clang/11.0.5/include/avx512vpopcntdqvlintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/avx512vpopcntdqvlintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avx512vpopcntdqvlintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avxintrin.h b/darwin-x86/lib64/clang/11.0.5/include/avxintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/avxintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/avxintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/bits/stdatomic.h b/darwin-x86/lib64/clang/11.0.5/include/bits/stdatomic.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/bits/stdatomic.h
rename to darwin-x86/lib64/clang/11.0.5/include/bits/stdatomic.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/bmi2intrin.h b/darwin-x86/lib64/clang/11.0.5/include/bmi2intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/bmi2intrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/bmi2intrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/bmiintrin.h b/darwin-x86/lib64/clang/11.0.5/include/bmiintrin.h
similarity index 86%
rename from darwin-x86/lib64/clang/11.0.1/include/bmiintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/bmiintrin.h
index 841bd84..f583c21 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/bmiintrin.h
+++ b/darwin-x86/lib64/clang/11.0.5/include/bmiintrin.h
@@ -111,7 +111,8 @@
 
 #undef __RELAXED_FN_ATTRS
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__BMI__)
 
 /* Define the default attributes for the functions in this file. */
 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi")))
@@ -192,6 +193,28 @@
   return __builtin_ia32_bextr_u32 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
 }
 
+/* Intel-specified, single-leading-underscore version of BEXTR2 */
+/// Extracts the specified bits from the first operand and returns them
+///    in the least significant bits of the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
+///
+/// \param __X
+///    An unsigned integer whose bits are to be extracted.
+/// \param __Y
+///    An unsigned integer used to specify which bits are extracted. Bits [7:0]
+///    specify the index of the least significant bit. Bits [15:8] specify the
+///    number of bits to be extracted.
+/// \returns An unsigned integer whose least significant bits contain the
+///    extracted bits.
+/// \see __bextr_u32
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_bextr2_u32(unsigned int __X, unsigned int __Y) {
+  return __builtin_ia32_bextr_u32(__X, __Y);
+}
+
 /// Clears all bits in the source except for the least significant bit
 ///    containing a value of 1 and returns the result.
 ///
@@ -321,6 +344,28 @@
   return __builtin_ia32_bextr_u64 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
 }
 
+/* Intel-specified, single-leading-underscore version of BEXTR2 */
+/// Extracts the specified bits from the first operand and returns them
+///    in the least significant bits of the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
+///
+/// \param __X
+///    An unsigned 64-bit integer whose bits are to be extracted.
+/// \param __Y
+///    An unsigned 64-bit integer used to specify which bits are extracted. Bits
+///    [7:0] specify the index of the least significant bit. Bits [15:8] specify
+///    the number of bits to be extracted.
+/// \returns An unsigned 64-bit integer whose least significant bits contain the
+///    extracted bits.
+/// \see __bextr_u64
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_bextr2_u64(unsigned long long __X, unsigned long long __Y) {
+  return __builtin_ia32_bextr_u64(__X, __Y);
+}
+
 /// Clears all bits in the source except for the least significant bit
 ///    containing a value of 1 and returns the result.
 ///
@@ -376,6 +421,7 @@
 
 #undef __DEFAULT_FN_ATTRS
 
-#endif /* !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI__) */
+#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules)   \
+          || defined(__BMI__) */
 
 #endif /* __BMIINTRIN_H */
diff --git a/darwin-x86/lib64/clang/11.0.5/include/cet.h b/darwin-x86/lib64/clang/11.0.5/include/cet.h
new file mode 100644
index 0000000..ffb19de
--- /dev/null
+++ b/darwin-x86/lib64/clang/11.0.5/include/cet.h
@@ -0,0 +1,66 @@
+/*===------ cet.h -Control-flow Enforcement Technology  feature ------------===
+ * Add x86 feature with IBT and/or SHSTK bits to ELF program property if they
+ * are enabled. Otherwise, contents in this header file are unused. This file
+ * is mainly design for assembly source code which want to enable CET.
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __CET_H
+#define __CET_H
+
+#ifdef __ASSEMBLER__
+
+#ifndef __CET__
+# define _CET_ENDBR
+#endif
+
+#ifdef __CET__
+
+# ifdef __LP64__
+#  if __CET__ & 0x1
+#    define _CET_ENDBR endbr64
+#  else
+#    define _CET_ENDBR
+#  endif
+# else
+#  if __CET__ & 0x1
+#    define _CET_ENDBR endbr32
+#  else
+#    define _CET_ENDBR
+#  endif
+# endif
+
+
+#  ifdef __LP64__
+#   define __PROPERTY_ALIGN 3
+#  else
+#   define __PROPERTY_ALIGN 2
+#  endif
+
+	.pushsection ".note.gnu.property", "a"
+	.p2align __PROPERTY_ALIGN
+	.long 1f - 0f		/* name length.  */
+	.long 4f - 1f		/* data length.  */
+	/* NT_GNU_PROPERTY_TYPE_0.   */
+	.long 5			/* note type.  */
+0:
+	.asciz "GNU"		/* vendor name.  */
+1:
+	.p2align __PROPERTY_ALIGN
+	/* GNU_PROPERTY_X86_FEATURE_1_AND.  */
+	.long 0xc0000002	/* pr_type.  */
+	.long 3f - 2f		/* pr_datasz.  */
+2:
+	/* GNU_PROPERTY_X86_FEATURE_1_XXX.  */
+	.long __CET__
+3:
+	.p2align __PROPERTY_ALIGN
+4:
+	.popsection
+#endif
+#endif
+#endif
diff --git a/darwin-x86/lib64/clang/11.0.1/include/cetintrin.h b/darwin-x86/lib64/clang/11.0.5/include/cetintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/cetintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/cetintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/cldemoteintrin.h b/darwin-x86/lib64/clang/11.0.5/include/cldemoteintrin.h
similarity index 73%
rename from darwin-x86/lib64/clang/11.0.1/include/cldemoteintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/cldemoteintrin.h
index 2413e7d..cfb951c 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/cldemoteintrin.h
+++ b/darwin-x86/lib64/clang/11.0.5/include/cldemoteintrin.h
@@ -18,11 +18,19 @@
 #define __DEFAULT_FN_ATTRS \
   __attribute__((__always_inline__, __nodebug__,  __target__("cldemote")))
 
+/// Hint to hardware that the cache line that contains \p __P should be demoted
+/// from the cache closest to the processor core to a level more distant from
+/// the processor core.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> CLDEMOTE </c> instruction.
 static __inline__ void __DEFAULT_FN_ATTRS
 _cldemote(const void * __P) {
   __builtin_ia32_cldemote(__P);
 }
 
+#define _mm_cldemote(p) _cldemote(p)
 #undef __DEFAULT_FN_ATTRS
 
 #endif
diff --git a/darwin-x86/lib64/clang/11.0.1/include/clflushoptintrin.h b/darwin-x86/lib64/clang/11.0.5/include/clflushoptintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/clflushoptintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/clflushoptintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/clwbintrin.h b/darwin-x86/lib64/clang/11.0.5/include/clwbintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/clwbintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/clwbintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/clzerointrin.h b/darwin-x86/lib64/clang/11.0.5/include/clzerointrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/clzerointrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/clzerointrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/cpuid.h b/darwin-x86/lib64/clang/11.0.5/include/cpuid.h
similarity index 96%
rename from darwin-x86/lib64/clang/11.0.1/include/cpuid.h
rename to darwin-x86/lib64/clang/11.0.5/include/cpuid.h
index 4ddd648..2a88c04 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/cpuid.h
+++ b/darwin-x86/lib64/clang/11.0.5/include/cpuid.h
@@ -24,6 +24,10 @@
 #define signature_CYRIX_ebx 0x69727943
 #define signature_CYRIX_edx 0x736e4978
 #define signature_CYRIX_ecx 0x64616574
+/* HYGON:   "HygonGenuine" */
+#define signature_HYGON_ebx 0x6f677948
+#define signature_HYGON_edx 0x6e65476e
+#define signature_HYGON_ecx 0x656e6975
 /* INTEL:   "GenuineIntel" */
 #define signature_INTEL_ebx 0x756e6547
 #define signature_INTEL_edx 0x49656e69
@@ -182,8 +186,13 @@
 /* Features in %edx for leaf 7 sub-leaf 0 */
 #define bit_AVX5124VNNIW  0x00000004
 #define bit_AVX5124FMAPS  0x00000008
+#define bit_SERIALIZE     0x00004000
+#define bit_TSXLDTRK      0x00010000
 #define bit_PCONFIG       0x00040000
 #define bit_IBT           0x00100000
+#define bit_AMXBF16       0x00400000
+#define bit_AMXTILE       0x01000000
+#define bit_AMXINT8       0x02000000
 
 /* Features in %eax for leaf 7 sub-leaf 1 */
 #define bit_AVX512BF16    0x00000020
diff --git a/darwin-x86/lib64/clang/11.0.1/include/cuda_wrappers/algorithm b/darwin-x86/lib64/clang/11.0.5/include/cuda_wrappers/algorithm
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/cuda_wrappers/algorithm
rename to darwin-x86/lib64/clang/11.0.5/include/cuda_wrappers/algorithm
diff --git a/darwin-x86/lib64/clang/11.0.1/include/cuda_wrappers/complex b/darwin-x86/lib64/clang/11.0.5/include/cuda_wrappers/complex
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/cuda_wrappers/complex
rename to darwin-x86/lib64/clang/11.0.5/include/cuda_wrappers/complex
diff --git a/darwin-x86/lib64/clang/11.0.1/include/cuda_wrappers/new b/darwin-x86/lib64/clang/11.0.5/include/cuda_wrappers/new
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/cuda_wrappers/new
rename to darwin-x86/lib64/clang/11.0.5/include/cuda_wrappers/new
diff --git a/darwin-x86/lib64/clang/11.0.1/include/emmintrin.h b/darwin-x86/lib64/clang/11.0.5/include/emmintrin.h
similarity index 99%
rename from darwin-x86/lib64/clang/11.0.1/include/emmintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/emmintrin.h
index 993c688..73a777b 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/emmintrin.h
+++ b/darwin-x86/lib64/clang/11.0.5/include/emmintrin.h
@@ -4970,10 +4970,10 @@
 
 #define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
 
-#define _MM_DENORMALS_ZERO_ON   (0x0040)
-#define _MM_DENORMALS_ZERO_OFF  (0x0000)
+#define _MM_DENORMALS_ZERO_ON   (0x0040U)
+#define _MM_DENORMALS_ZERO_OFF  (0x0000U)
 
-#define _MM_DENORMALS_ZERO_MASK (0x0040)
+#define _MM_DENORMALS_ZERO_MASK (0x0040U)
 
 #define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK)
 #define _MM_SET_DENORMALS_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
diff --git a/darwin-x86/lib64/clang/11.0.1/include/enqcmdintrin.h b/darwin-x86/lib64/clang/11.0.5/include/enqcmdintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/enqcmdintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/enqcmdintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/f16cintrin.h b/darwin-x86/lib64/clang/11.0.5/include/f16cintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/f16cintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/f16cintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/float.h b/darwin-x86/lib64/clang/11.0.5/include/float.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/float.h
rename to darwin-x86/lib64/clang/11.0.5/include/float.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/fma4intrin.h b/darwin-x86/lib64/clang/11.0.5/include/fma4intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/fma4intrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/fma4intrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/fmaintrin.h b/darwin-x86/lib64/clang/11.0.5/include/fmaintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/fmaintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/fmaintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/fuzzer/FuzzedDataProvider.h b/darwin-x86/lib64/clang/11.0.5/include/fuzzer/FuzzedDataProvider.h
new file mode 100644
index 0000000..83bcd01
--- /dev/null
+++ b/darwin-x86/lib64/clang/11.0.5/include/fuzzer/FuzzedDataProvider.h
@@ -0,0 +1,387 @@
+//===- FuzzedDataProvider.h - Utility header for fuzz targets ---*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// A single header library providing an utility class to break up an array of
+// bytes. Whenever run on the same input, provides the same output, as long as
+// its methods are called in the same order, with the same arguments.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_
+#define LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_
+
+#include <algorithm>
+#include <climits>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <initializer_list>
+#include <string>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+// In addition to the comments below, the API is also briefly documented at
+// https://github.com/google/fuzzing/blob/master/docs/split-inputs.md#fuzzed-data-provider
+class FuzzedDataProvider {
+ public:
+  // |data| is an array of length |size| that the FuzzedDataProvider wraps to
+  // provide more granular access. |data| must outlive the FuzzedDataProvider.
+  FuzzedDataProvider(const uint8_t *data, size_t size)
+      : data_ptr_(data), remaining_bytes_(size) {}
+  ~FuzzedDataProvider() = default;
+
+  // See the implementation below (after the class definition) for more verbose
+  // comments for each of the methods.
+
+  // Methods returning std::vector of bytes. These are the most popular choice
+  // when splitting fuzzing input into pieces, as every piece is put into a
+  // separate buffer (i.e. ASan would catch any under-/overflow) and the memory
+  // will be released automatically.
+  template <typename T> std::vector<T> ConsumeBytes(size_t num_bytes);
+  template <typename T>
+  std::vector<T> ConsumeBytesWithTerminator(size_t num_bytes, T terminator = 0);
+  template <typename T> std::vector<T> ConsumeRemainingBytes();
+
+  // Methods returning strings. Use only when you need a std::string or a null
+  // terminated C-string. Otherwise, prefer the methods returning std::vector.
+  std::string ConsumeBytesAsString(size_t num_bytes);
+  std::string ConsumeRandomLengthString(size_t max_length);
+  std::string ConsumeRandomLengthString();
+  std::string ConsumeRemainingBytesAsString();
+
+  // Methods returning integer values.
+  template <typename T> T ConsumeIntegral();
+  template <typename T> T ConsumeIntegralInRange(T min, T max);
+
+  // Methods returning floating point values.
+  template <typename T> T ConsumeFloatingPoint();
+  template <typename T> T ConsumeFloatingPointInRange(T min, T max);
+
+  // 0 <= return value <= 1.
+  template <typename T> T ConsumeProbability();
+
+  bool ConsumeBool();
+
+  // Returns a value chosen from the given enum.
+  template <typename T> T ConsumeEnum();
+
+  // Returns a value from the given array.
+  template <typename T, size_t size> T PickValueInArray(const T (&array)[size]);
+  template <typename T> T PickValueInArray(std::initializer_list<const T> list);
+
+  // Writes data to the given destination and returns number of bytes written.
+  size_t ConsumeData(void *destination, size_t num_bytes);
+
+  // Reports the remaining bytes available for fuzzed input.
+  size_t remaining_bytes() { return remaining_bytes_; }
+
+ private:
+  FuzzedDataProvider(const FuzzedDataProvider &) = delete;
+  FuzzedDataProvider &operator=(const FuzzedDataProvider &) = delete;
+
+  void CopyAndAdvance(void *destination, size_t num_bytes);
+
+  void Advance(size_t num_bytes);
+
+  template <typename T>
+  std::vector<T> ConsumeBytes(size_t size, size_t num_bytes);
+
+  template <typename TS, typename TU> TS ConvertUnsignedToSigned(TU value);
+
+  const uint8_t *data_ptr_;
+  size_t remaining_bytes_;
+};
+
+// Returns a std::vector containing |num_bytes| of input data. If fewer than
+// |num_bytes| of data remain, returns a shorter std::vector containing all
+// of the data that's left. Can be used with any byte sized type, such as
+// char, unsigned char, uint8_t, etc.
+template <typename T>
+std::vector<T> FuzzedDataProvider::ConsumeBytes(size_t num_bytes) {
+  num_bytes = std::min(num_bytes, remaining_bytes_);
+  return ConsumeBytes<T>(num_bytes, num_bytes);
+}
+
+// Similar to |ConsumeBytes|, but also appends the terminator value at the end
+// of the resulting vector. Useful, when a mutable null-terminated C-string is
+// needed, for example. But that is a rare case. Better avoid it, if possible,
+// and prefer using |ConsumeBytes| or |ConsumeBytesAsString| methods.
+template <typename T>
+std::vector<T> FuzzedDataProvider::ConsumeBytesWithTerminator(size_t num_bytes,
+                                                              T terminator) {
+  num_bytes = std::min(num_bytes, remaining_bytes_);
+  std::vector<T> result = ConsumeBytes<T>(num_bytes + 1, num_bytes);
+  result.back() = terminator;
+  return result;
+}
+
+// Returns a std::vector containing all remaining bytes of the input data.
+template <typename T>
+std::vector<T> FuzzedDataProvider::ConsumeRemainingBytes() {
+  return ConsumeBytes<T>(remaining_bytes_);
+}
+
+// Returns a std::string containing |num_bytes| of input data. Using this and
+// |.c_str()| on the resulting string is the best way to get an immutable
+// null-terminated C string. If fewer than |num_bytes| of data remain, returns
+// a shorter std::string containing all of the data that's left.
+inline std::string FuzzedDataProvider::ConsumeBytesAsString(size_t num_bytes) {
+  static_assert(sizeof(std::string::value_type) == sizeof(uint8_t),
+                "ConsumeBytesAsString cannot convert the data to a string.");
+
+  num_bytes = std::min(num_bytes, remaining_bytes_);
+  std::string result(
+      reinterpret_cast<const std::string::value_type *>(data_ptr_), num_bytes);
+  Advance(num_bytes);
+  return result;
+}
+
+// Returns a std::string of length from 0 to |max_length|. When it runs out of
+// input data, returns what remains of the input. Designed to be more stable
+// with respect to a fuzzer inserting characters than just picking a random
+// length and then consuming that many bytes with |ConsumeBytes|.
+inline std::string
+FuzzedDataProvider::ConsumeRandomLengthString(size_t max_length) {
+  // Reads bytes from the start of |data_ptr_|. Maps "\\" to "\", and maps "\"
+  // followed by anything else to the end of the string. As a result of this
+  // logic, a fuzzer can insert characters into the string, and the string
+  // will be lengthened to include those new characters, resulting in a more
+  // stable fuzzer than picking the length of a string independently from
+  // picking its contents.
+  std::string result;
+
+  // Reserve the anticipated capaticity to prevent several reallocations.
+  result.reserve(std::min(max_length, remaining_bytes_));
+  for (size_t i = 0; i < max_length && remaining_bytes_ != 0; ++i) {
+    char next = ConvertUnsignedToSigned<char>(data_ptr_[0]);
+    Advance(1);
+    if (next == '\\' && remaining_bytes_ != 0) {
+      next = ConvertUnsignedToSigned<char>(data_ptr_[0]);
+      Advance(1);
+      if (next != '\\')
+        break;
+    }
+    result += next;
+  }
+
+  result.shrink_to_fit();
+  return result;
+}
+
+// Returns a std::string of length from 0 to |remaining_bytes_|.
+inline std::string FuzzedDataProvider::ConsumeRandomLengthString() {
+  return ConsumeRandomLengthString(remaining_bytes_);
+}
+
+// Returns a std::string containing all remaining bytes of the input data.
+// Prefer using |ConsumeRemainingBytes| unless you actually need a std::string
+// object.
+inline std::string FuzzedDataProvider::ConsumeRemainingBytesAsString() {
+  return ConsumeBytesAsString(remaining_bytes_);
+}
+
+// Returns a number in the range [Type's min, Type's max]. The value might
+// not be uniformly distributed in the given range. If there's no input data
+// left, always returns |min|.
+template <typename T> T FuzzedDataProvider::ConsumeIntegral() {
+  return ConsumeIntegralInRange(std::numeric_limits<T>::min(),
+                                std::numeric_limits<T>::max());
+}
+
+// Returns a number in the range [min, max] by consuming bytes from the
+// input data. The value might not be uniformly distributed in the given
+// range. If there's no input data left, always returns |min|. |min| must
+// be less than or equal to |max|.
+template <typename T>
+T FuzzedDataProvider::ConsumeIntegralInRange(T min, T max) {
+  static_assert(std::is_integral<T>::value, "An integral type is required.");
+  static_assert(sizeof(T) <= sizeof(uint64_t), "Unsupported integral type.");
+
+  if (min > max)
+    abort();
+
+  // Use the biggest type possible to hold the range and the result.
+  uint64_t range = static_cast<uint64_t>(max) - min;
+  uint64_t result = 0;
+  size_t offset = 0;
+
+  while (offset < sizeof(T) * CHAR_BIT && (range >> offset) > 0 &&
+         remaining_bytes_ != 0) {
+    // Pull bytes off the end of the seed data. Experimentally, this seems to
+    // allow the fuzzer to more easily explore the input space. This makes
+    // sense, since it works by modifying inputs that caused new code to run,
+    // and this data is often used to encode length of data read by
+    // |ConsumeBytes|. Separating out read lengths makes it easier modify the
+    // contents of the data that is actually read.
+    --remaining_bytes_;
+    result = (result << CHAR_BIT) | data_ptr_[remaining_bytes_];
+    offset += CHAR_BIT;
+  }
+
+  // Avoid division by 0, in case |range + 1| results in overflow.
+  if (range != std::numeric_limits<decltype(range)>::max())
+    result = result % (range + 1);
+
+  return static_cast<T>(min + result);
+}
+
+// Returns a floating point value in the range [Type's lowest, Type's max] by
+// consuming bytes from the input data. If there's no input data left, always
+// returns approximately 0.
+template <typename T> T FuzzedDataProvider::ConsumeFloatingPoint() {
+  return ConsumeFloatingPointInRange<T>(std::numeric_limits<T>::lowest(),
+                                        std::numeric_limits<T>::max());
+}
+
+// Returns a floating point value in the given range by consuming bytes from
+// the input data. If there's no input data left, returns |min|. Note that
+// |min| must be less than or equal to |max|.
+template <typename T>
+T FuzzedDataProvider::ConsumeFloatingPointInRange(T min, T max) {
+  if (min > max)
+    abort();
+
+  T range = .0;
+  T result = min;
+  constexpr T zero(.0);
+  if (max > zero && min < zero && max > min + std::numeric_limits<T>::max()) {
+    // The diff |max - min| would overflow the given floating point type. Use
+    // the half of the diff as the range and consume a bool to decide whether
+    // the result is in the first of the second part of the diff.
+    range = (max / 2.0) - (min / 2.0);
+    if (ConsumeBool()) {
+      result += range;
+    }
+  } else {
+    range = max - min;
+  }
+
+  return result + range * ConsumeProbability<T>();
+}
+
+// Returns a floating point number in the range [0.0, 1.0]. If there's no
+// input data left, always returns 0.
+template <typename T> T FuzzedDataProvider::ConsumeProbability() {
+  static_assert(std::is_floating_point<T>::value,
+                "A floating point type is required.");
+
+  // Use different integral types for different floating point types in order
+  // to provide better density of the resulting values.
+  using IntegralType =
+      typename std::conditional<(sizeof(T) <= sizeof(uint32_t)), uint32_t,
+                                uint64_t>::type;
+
+  T result = static_cast<T>(ConsumeIntegral<IntegralType>());
+  result /= static_cast<T>(std::numeric_limits<IntegralType>::max());
+  return result;
+}
+
+// Reads one byte and returns a bool, or false when no data remains.
+inline bool FuzzedDataProvider::ConsumeBool() {
+  return 1 & ConsumeIntegral<uint8_t>();
+}
+
+// Returns an enum value. The enum must start at 0 and be contiguous. It must
+// also contain |kMaxValue| aliased to its largest (inclusive) value. Such as:
+// enum class Foo { SomeValue, OtherValue, kMaxValue = OtherValue };
+template <typename T> T FuzzedDataProvider::ConsumeEnum() {
+  static_assert(std::is_enum<T>::value, "|T| must be an enum type.");
+  return static_cast<T>(
+      ConsumeIntegralInRange<uint32_t>(0, static_cast<uint32_t>(T::kMaxValue)));
+}
+
+// Returns a copy of the value selected from the given fixed-size |array|.
+template <typename T, size_t size>
+T FuzzedDataProvider::PickValueInArray(const T (&array)[size]) {
+  static_assert(size > 0, "The array must be non empty.");
+  return array[ConsumeIntegralInRange<size_t>(0, size - 1)];
+}
+
+template <typename T>
+T FuzzedDataProvider::PickValueInArray(std::initializer_list<const T> list) {
+  // TODO(Dor1s): switch to static_assert once C++14 is allowed.
+  if (!list.size())
+    abort();
+
+  return *(list.begin() + ConsumeIntegralInRange<size_t>(0, list.size() - 1));
+}
+
+// Writes |num_bytes| of input data to the given destination pointer. If there
+// is not enough data left, writes all remaining bytes. Return value is the
+// number of bytes written.
+// In general, it's better to avoid using this function, but it may be useful
+// in cases when it's necessary to fill a certain buffer or object with
+// fuzzing data.
+inline size_t FuzzedDataProvider::ConsumeData(void *destination,
+                                              size_t num_bytes) {
+  num_bytes = std::min(num_bytes, remaining_bytes_);
+  CopyAndAdvance(destination, num_bytes);
+  return num_bytes;
+}
+
+// Private methods.
+inline void FuzzedDataProvider::CopyAndAdvance(void *destination,
+                                               size_t num_bytes) {
+  std::memcpy(destination, data_ptr_, num_bytes);
+  Advance(num_bytes);
+}
+
+inline void FuzzedDataProvider::Advance(size_t num_bytes) {
+  if (num_bytes > remaining_bytes_)
+    abort();
+
+  data_ptr_ += num_bytes;
+  remaining_bytes_ -= num_bytes;
+}
+
+template <typename T>
+std::vector<T> FuzzedDataProvider::ConsumeBytes(size_t size, size_t num_bytes) {
+  static_assert(sizeof(T) == sizeof(uint8_t), "Incompatible data type.");
+
+  // The point of using the size-based constructor below is to increase the
+  // odds of having a vector object with capacity being equal to the length.
+  // That part is always implementation specific, but at least both libc++ and
+  // libstdc++ allocate the requested number of bytes in that constructor,
+  // which seems to be a natural choice for other implementations as well.
+  // To increase the odds even more, we also call |shrink_to_fit| below.
+  std::vector<T> result(size);
+  if (size == 0) {
+    if (num_bytes != 0)
+      abort();
+    return result;
+  }
+
+  CopyAndAdvance(result.data(), num_bytes);
+
+  // Even though |shrink_to_fit| is also implementation specific, we expect it
+  // to provide an additional assurance in case vector's constructor allocated
+  // a buffer which is larger than the actual amount of data we put inside it.
+  result.shrink_to_fit();
+  return result;
+}
+
+template <typename TS, typename TU>
+TS FuzzedDataProvider::ConvertUnsignedToSigned(TU value) {
+  static_assert(sizeof(TS) == sizeof(TU), "Incompatible data types.");
+  static_assert(!std::numeric_limits<TU>::is_signed,
+                "Source type must be unsigned.");
+
+  // TODO(Dor1s): change to `if constexpr` once C++17 becomes mainstream.
+  if (std::numeric_limits<TS>::is_modulo)
+    return static_cast<TS>(value);
+
+  // Avoid using implementation-defined unsigned to signed conversions.
+  // To learn more, see https://stackoverflow.com/questions/13150449.
+  if (value <= std::numeric_limits<TS>::max()) {
+    return static_cast<TS>(value);
+  } else {
+    constexpr auto TS_min = std::numeric_limits<TS>::min();
+    return TS_min + static_cast<char>(value - TS_min);
+  }
+}
+
+#endif // LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_
diff --git a/darwin-x86/lib64/clang/11.0.1/include/fxsrintrin.h b/darwin-x86/lib64/clang/11.0.5/include/fxsrintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/fxsrintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/fxsrintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/gfniintrin.h b/darwin-x86/lib64/clang/11.0.5/include/gfniintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/gfniintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/gfniintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/htmintrin.h b/darwin-x86/lib64/clang/11.0.5/include/htmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/htmintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/htmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/htmxlintrin.h b/darwin-x86/lib64/clang/11.0.5/include/htmxlintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/htmxlintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/htmxlintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/ia32intrin.h b/darwin-x86/lib64/clang/11.0.5/include/ia32intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/ia32intrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/ia32intrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/immintrin.h b/darwin-x86/lib64/clang/11.0.5/include/immintrin.h
similarity index 61%
rename from darwin-x86/lib64/clang/11.0.1/include/immintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/immintrin.h
index edf8c42..e9dff23 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/immintrin.h
+++ b/darwin-x86/lib64/clang/11.0.5/include/immintrin.h
@@ -10,198 +10,231 @@
 #ifndef __IMMINTRIN_H
 #define __IMMINTRIN_H
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__MMX__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__MMX__)
 #include <mmintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__SSE__)
 #include <xmmintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE2__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__SSE2__)
 #include <emmintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE3__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__SSE3__)
 #include <pmmintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSSE3__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__SSSE3__)
 #include <tmmintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     (defined(__SSE4_2__) || defined(__SSE4_1__))
 #include <smmintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     (defined(__AES__) || defined(__PCLMUL__))
 #include <wmmintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLFLUSHOPT__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__CLFLUSHOPT__)
 #include <clflushoptintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLWB__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__CLWB__)
 #include <clwbintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX__)
 #include <avxintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX2__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX2__)
 #include <avx2intrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__F16C__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__F16C__)
 #include <f16cintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__VPCLMULQDQ__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__VPCLMULQDQ__)
 #include <vpclmulqdqintrin.h>
 #endif
 
 /* No feature check desired due to internal checks */
 #include <bmiintrin.h>
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI2__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__BMI2__)
 #include <bmi2intrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__LZCNT__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__LZCNT__)
 #include <lzcntintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__POPCNT__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__POPCNT__)
 #include <popcntintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FMA__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__FMA__)
 #include <fmaintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512F__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512F__)
 #include <avx512fintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VL__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512VL__)
 #include <avx512vlintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512BW__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512BW__)
 #include <avx512bwintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512BITALG__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512BITALG__)
 #include <avx512bitalgintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512CD__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512CD__)
 #include <avx512cdintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VPOPCNTDQ__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512VPOPCNTDQ__)
 #include <avx512vpopcntdqintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     (defined(__AVX512VL__) && defined(__AVX512VPOPCNTDQ__))
 #include <avx512vpopcntdqvlintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VNNI__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512VNNI__)
 #include <avx512vnniintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     (defined(__AVX512VL__) && defined(__AVX512VNNI__))
 #include <avx512vlvnniintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512DQ__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512DQ__)
 #include <avx512dqintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     (defined(__AVX512VL__) && defined(__AVX512BITALG__))
 #include <avx512vlbitalgintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     (defined(__AVX512VL__) && defined(__AVX512BW__))
 #include <avx512vlbwintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     (defined(__AVX512VL__) && defined(__AVX512CD__))
 #include <avx512vlcdintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     (defined(__AVX512VL__) && defined(__AVX512DQ__))
 #include <avx512vldqintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512ER__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512ER__)
 #include <avx512erintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512IFMA__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512IFMA__)
 #include <avx512ifmaintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     (defined(__AVX512IFMA__) && defined(__AVX512VL__))
 #include <avx512ifmavlintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VBMI__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512VBMI__)
 #include <avx512vbmiintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     (defined(__AVX512VBMI__) && defined(__AVX512VL__))
 #include <avx512vbmivlintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VBMI2__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512VBMI2__)
 #include <avx512vbmi2intrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     (defined(__AVX512VBMI2__) && defined(__AVX512VL__))
 #include <avx512vlvbmi2intrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512PF__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512PF__)
 #include <avx512pfintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512BF16__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512BF16__)
 #include <avx512bf16intrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     (defined(__AVX512VL__) && defined(__AVX512BF16__))
 #include <avx512vlbf16intrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PKU__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__PKU__)
 #include <pkuintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__VAES__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__VAES__)
 #include <vaesintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__GFNI__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__GFNI__)
 #include <gfniintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDPID__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__RDPID__)
 /// Returns the value of the IA32_TSC_AUX MSR (0xc0000103).
 ///
 /// \headerfile <immintrin.h>
@@ -213,7 +246,8 @@
 }
 #endif // __RDPID__
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDRND__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__RDRND__)
 static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
 _rdrand16_step(unsigned short *__p)
 {
@@ -235,7 +269,8 @@
 #endif
 #endif /* __RDRND__ */
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FSGSBASE__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__FSGSBASE__)
 #ifdef __x86_64__
 static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
 _readfsbase_u32(void)
@@ -288,7 +323,8 @@
 #endif
 #endif /* __FSGSBASE__ */
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__MOVBE__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__MOVBE__)
 
 /* The structs used below are to force the load/store to be unaligned. This
  * is accomplished with the __packed__ attribute. The __may_alias__ prevents
@@ -347,35 +383,42 @@
 #endif
 #endif /* __MOVBE */
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RTM__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__RTM__)
 #include <rtmintrin.h>
 #include <xtestintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SHA__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__SHA__)
 #include <shaintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FXSR__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__FXSR__)
 #include <fxsrintrin.h>
 #endif
 
 /* No feature check desired due to internal MSC_VER checks */
 #include <xsaveintrin.h>
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVEOPT__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__XSAVEOPT__)
 #include <xsaveoptintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVEC__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__XSAVEC__)
 #include <xsavecintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVES__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__XSAVES__)
 #include <xsavesintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SHSTK__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__SHSTK__)
 #include <cetintrin.h>
 #endif
 
@@ -383,57 +426,81 @@
  * whereas others are also available at all times. */
 #include <adxintrin.h>
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDSEED__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__RDSEED__)
 #include <rdseedintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__WBNOINVD__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__WBNOINVD__)
 #include <wbnoinvdintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLDEMOTE__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__CLDEMOTE__)
 #include <cldemoteintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__WAITPKG__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__WAITPKG__)
 #include <waitpkgintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-  defined(__MOVDIRI__) || defined(__MOVDIR64B__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__MOVDIRI__) || defined(__MOVDIR64B__)
 #include <movdirintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PCONFIG__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__PCONFIG__)
 #include <pconfigintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SGX__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__SGX__)
 #include <sgxintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PTWRITE__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__PTWRITE__)
 #include <ptwriteintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__INVPCID__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__INVPCID__)
 #include <invpcidintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-  defined(__AVX512VP2INTERSECT__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AMXTILE__) || defined(__AMXINT8__) || defined(__AMXBF16__)
+#include <amxintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512VP2INTERSECT__)
 #include <avx512vp2intersectintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-  (defined(__AVX512VL__) && defined(__AVX512VP2INTERSECT__))
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    (defined(__AVX512VL__) && defined(__AVX512VP2INTERSECT__))
 #include <avx512vlvp2intersectintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__ENQCMD__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__ENQCMD__)
 #include <enqcmdintrin.h>
 #endif
 
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__SERIALIZE__)
+#include <serializeintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__TSXLDTRK__)
+#include <tsxldtrkintrin.h>
+#endif
+
 #if defined(_MSC_VER) && __has_extension(gnu_asm)
 /* Define the default attributes for these intrinsics */
 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
diff --git a/darwin-x86/lib64/clang/11.0.1/include/intrin.h b/darwin-x86/lib64/clang/11.0.5/include/intrin.h
similarity index 99%
rename from darwin-x86/lib64/clang/11.0.1/include/intrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/intrin.h
index f85f7a2..871b47c 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/intrin.h
+++ b/darwin-x86/lib64/clang/11.0.5/include/intrin.h
@@ -289,6 +289,9 @@
 static __inline__
 unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
 
+#endif
+
+#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
 static __inline__
 __int64 _InterlockedDecrement64(__int64 volatile *_Addend);
 static __inline__
diff --git a/darwin-x86/lib64/clang/11.0.1/include/inttypes.h b/darwin-x86/lib64/clang/11.0.5/include/inttypes.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/inttypes.h
rename to darwin-x86/lib64/clang/11.0.5/include/inttypes.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/invpcidintrin.h b/darwin-x86/lib64/clang/11.0.5/include/invpcidintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/invpcidintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/invpcidintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/iso646.h b/darwin-x86/lib64/clang/11.0.5/include/iso646.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/iso646.h
rename to darwin-x86/lib64/clang/11.0.5/include/iso646.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/limits.h b/darwin-x86/lib64/clang/11.0.5/include/limits.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/limits.h
rename to darwin-x86/lib64/clang/11.0.5/include/limits.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/lwpintrin.h b/darwin-x86/lib64/clang/11.0.5/include/lwpintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/lwpintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/lwpintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/lzcntintrin.h b/darwin-x86/lib64/clang/11.0.5/include/lzcntintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/lzcntintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/lzcntintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/mm3dnow.h b/darwin-x86/lib64/clang/11.0.5/include/mm3dnow.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/mm3dnow.h
rename to darwin-x86/lib64/clang/11.0.5/include/mm3dnow.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/mm_malloc.h b/darwin-x86/lib64/clang/11.0.5/include/mm_malloc.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/mm_malloc.h
rename to darwin-x86/lib64/clang/11.0.5/include/mm_malloc.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/mmintrin.h b/darwin-x86/lib64/clang/11.0.5/include/mmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/mmintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/mmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/module.modulemap b/darwin-x86/lib64/clang/11.0.5/include/module.modulemap
similarity index 97%
rename from darwin-x86/lib64/clang/11.0.1/include/module.modulemap
rename to darwin-x86/lib64/clang/11.0.5/include/module.modulemap
index 7954a77..6894672 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/module.modulemap
+++ b/darwin-x86/lib64/clang/11.0.5/include/module.modulemap
@@ -27,6 +27,12 @@
       header "arm_fp16.h"
       export *
     }
+
+    explicit module sve {
+      requires sve
+      header "arm_sve.h"
+      export *
+    }
   }
 
   explicit module intel {
diff --git a/darwin-x86/lib64/clang/11.0.1/include/movdirintrin.h b/darwin-x86/lib64/clang/11.0.5/include/movdirintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/movdirintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/movdirintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/msa.h b/darwin-x86/lib64/clang/11.0.5/include/msa.h
similarity index 99%
rename from darwin-x86/lib64/clang/11.0.1/include/msa.h
rename to darwin-x86/lib64/clang/11.0.5/include/msa.h
index 19ea607..0ca4900 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/msa.h
+++ b/darwin-x86/lib64/clang/11.0.5/include/msa.h
@@ -212,10 +212,14 @@
 #define __msa_ld_h __builtin_msa_ld_h
 #define __msa_ld_w __builtin_msa_ld_w
 #define __msa_ld_d __builtin_msa_ld_d
+#define __msa_ldr_d __builtin_msa_ldr_d
+#define __msa_ldr_w __builtin_msa_ldrq_w
 #define __msa_st_b __builtin_msa_st_b
 #define __msa_st_h __builtin_msa_st_h
 #define __msa_st_w __builtin_msa_st_w
 #define __msa_st_d __builtin_msa_st_d
+#define __msa_str_d __builtin_msa_str_d
+#define __msa_str_w __builtin_msa_strq_w
 #define __msa_sat_s_b __builtin_msa_sat_s_b
 #define __msa_sat_s_h __builtin_msa_sat_s_h
 #define __msa_sat_s_w __builtin_msa_sat_s_w
diff --git a/darwin-x86/lib64/clang/11.0.1/include/mwaitxintrin.h b/darwin-x86/lib64/clang/11.0.5/include/mwaitxintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/mwaitxintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/mwaitxintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/nmmintrin.h b/darwin-x86/lib64/clang/11.0.5/include/nmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/nmmintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/nmmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/omp-tools.h b/darwin-x86/lib64/clang/11.0.5/include/omp-tools.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/omp-tools.h
rename to darwin-x86/lib64/clang/11.0.5/include/omp-tools.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/omp.h b/darwin-x86/lib64/clang/11.0.5/include/omp.h
similarity index 99%
rename from darwin-x86/lib64/clang/11.0.1/include/omp.h
rename to darwin-x86/lib64/clang/11.0.5/include/omp.h
index 0581ab4..e2a62ab 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/omp.h
+++ b/darwin-x86/lib64/clang/11.0.5/include/omp.h
@@ -355,6 +355,9 @@
 
     extern int __KAI_KMPC_CONVENTION omp_get_supported_active_levels(void);
 
+    /* OpenMP 5.1 Display Environment */
+    extern void omp_display_env(int verbose);
+
 #   undef __KAI_KMPC_CONVENTION
 #   undef __KMP_IMP
 
diff --git a/darwin-x86/lib64/clang/11.0.1/include/ompt.h b/darwin-x86/lib64/clang/11.0.5/include/ompt.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/ompt.h
rename to darwin-x86/lib64/clang/11.0.5/include/ompt.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/opencl-c-base.h b/darwin-x86/lib64/clang/11.0.5/include/opencl-c-base.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/opencl-c-base.h
rename to darwin-x86/lib64/clang/11.0.5/include/opencl-c-base.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/opencl-c.h b/darwin-x86/lib64/clang/11.0.5/include/opencl-c.h
similarity index 95%
rename from darwin-x86/lib64/clang/11.0.1/include/opencl-c.h
rename to darwin-x86/lib64/clang/11.0.5/include/opencl-c.h
index 3210f93..66e18bd 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/opencl-c.h
+++ b/darwin-x86/lib64/clang/11.0.5/include/opencl-c.h
@@ -13432,18 +13432,12 @@
 uint __ovld atomic_fetch_min(volatile atomic_uint *object, uint operand);
 uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order);
 uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_min(volatile atomic_uint *object, int operand);
-uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, int operand, memory_order order);
-uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, int operand, memory_order order, memory_scope scope);
 int __ovld atomic_fetch_max(volatile atomic_int *object, int operand);
 int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order);
 int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
 uint __ovld atomic_fetch_max(volatile atomic_uint *object, uint operand);
 uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order);
 uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_max(volatile atomic_uint *object, int operand);
-uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, int operand, memory_order order);
-uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, int operand, memory_order order, memory_scope scope);
 
 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
 long __ovld atomic_fetch_add(volatile atomic_long *object, long operand);
@@ -13482,18 +13476,12 @@
 ulong __ovld atomic_fetch_min(volatile atomic_ulong *object, ulong operand);
 ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
 ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_min(volatile atomic_ulong *object, long operand);
-ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, long operand, memory_order order, memory_scope scope);
 long __ovld atomic_fetch_max(volatile atomic_long *object, long operand);
 long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order);
 long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
 ulong __ovld atomic_fetch_max(volatile atomic_ulong *object, ulong operand);
 ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
 ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_max(volatile atomic_ulong *object, long operand);
-ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, long operand, memory_order order, memory_scope scope);
 #endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
 
 // OpenCL v2.0 s6.13.11.7.5:
@@ -15472,6 +15460,674 @@
 
 #endif //cl_khr_subgroups cl_intel_subgroups
 
+#if defined(cl_khr_subgroup_extended_types)
+char __ovld __conv sub_group_broadcast( char value, uint index );
+char2 __ovld __conv sub_group_broadcast( char2 value, uint index );
+char3 __ovld __conv sub_group_broadcast( char3 value, uint index );
+char4 __ovld __conv sub_group_broadcast( char4 value, uint index );
+char8 __ovld __conv sub_group_broadcast( char8 value, uint index );
+char16 __ovld __conv sub_group_broadcast( char16 value, uint index );
+
+uchar __ovld __conv sub_group_broadcast( uchar value, uint index );
+uchar2 __ovld __conv sub_group_broadcast( uchar2 value, uint index );
+uchar3 __ovld __conv sub_group_broadcast( uchar3 value, uint index );
+uchar4 __ovld __conv sub_group_broadcast( uchar4 value, uint index );
+uchar8 __ovld __conv sub_group_broadcast( uchar8 value, uint index );
+uchar16 __ovld __conv sub_group_broadcast( uchar16 value, uint index );
+
+short __ovld __conv sub_group_broadcast( short value, uint index );
+short2 __ovld __conv sub_group_broadcast( short2 value, uint index );
+short3 __ovld __conv sub_group_broadcast( short3 value, uint index );
+short4 __ovld __conv sub_group_broadcast( short4 value, uint index );
+short8 __ovld __conv sub_group_broadcast( short8 value, uint index );
+short16 __ovld __conv sub_group_broadcast( short16 value, uint index );
+
+ushort __ovld __conv sub_group_broadcast( ushort value, uint index );
+ushort2 __ovld __conv sub_group_broadcast( ushort2 value, uint index );
+ushort3 __ovld __conv sub_group_broadcast( ushort3 value, uint index );
+ushort4 __ovld __conv sub_group_broadcast( ushort4 value, uint index );
+ushort8 __ovld __conv sub_group_broadcast( ushort8 value, uint index );
+ushort16 __ovld __conv sub_group_broadcast( ushort16 value, uint index );
+
+// scalar int broadcast is part of cl_khr_subgroups
+int2 __ovld __conv sub_group_broadcast( int2 value, uint index );
+int3 __ovld __conv sub_group_broadcast( int3 value, uint index );
+int4 __ovld __conv sub_group_broadcast( int4 value, uint index );
+int8 __ovld __conv sub_group_broadcast( int8 value, uint index );
+int16 __ovld __conv sub_group_broadcast( int16 value, uint index );
+
+// scalar uint broadcast is part of cl_khr_subgroups
+uint2 __ovld __conv sub_group_broadcast( uint2 value, uint index );
+uint3 __ovld __conv sub_group_broadcast( uint3 value, uint index );
+uint4 __ovld __conv sub_group_broadcast( uint4 value, uint index );
+uint8 __ovld __conv sub_group_broadcast( uint8 value, uint index );
+uint16 __ovld __conv sub_group_broadcast( uint16 value, uint index );
+
+// scalar long broadcast is part of cl_khr_subgroups
+long2 __ovld __conv sub_group_broadcast( long2 value, uint index );
+long3 __ovld __conv sub_group_broadcast( long3 value, uint index );
+long4 __ovld __conv sub_group_broadcast( long4 value, uint index );
+long8 __ovld __conv sub_group_broadcast( long8 value, uint index );
+long16 __ovld __conv sub_group_broadcast( long16 value, uint index );
+
+// scalar ulong broadcast is part of cl_khr_subgroups
+ulong2 __ovld __conv sub_group_broadcast( ulong2 value, uint index );
+ulong3 __ovld __conv sub_group_broadcast( ulong3 value, uint index );
+ulong4 __ovld __conv sub_group_broadcast( ulong4 value, uint index );
+ulong8 __ovld __conv sub_group_broadcast( ulong8 value, uint index );
+ulong16 __ovld __conv sub_group_broadcast( ulong16 value, uint index );
+
+// scalar float broadcast is part of cl_khr_subgroups
+float2 __ovld __conv sub_group_broadcast( float2 value, uint index );
+float3 __ovld __conv sub_group_broadcast( float3 value, uint index );
+float4 __ovld __conv sub_group_broadcast( float4 value, uint index );
+float8 __ovld __conv sub_group_broadcast( float8 value, uint index );
+float16 __ovld __conv sub_group_broadcast( float16 value, uint index );
+
+char __ovld __conv sub_group_reduce_add( char value );
+uchar __ovld __conv sub_group_reduce_add( uchar value );
+short __ovld __conv sub_group_reduce_add( short value );
+ushort __ovld __conv sub_group_reduce_add( ushort value );
+
+char __ovld __conv sub_group_reduce_min( char value );
+uchar __ovld __conv sub_group_reduce_min( uchar value );
+short __ovld __conv sub_group_reduce_min( short value );
+ushort __ovld __conv sub_group_reduce_min( ushort value );
+
+char __ovld __conv sub_group_reduce_max( char value );
+uchar __ovld __conv sub_group_reduce_max( uchar value );
+short __ovld __conv sub_group_reduce_max( short value );
+ushort __ovld __conv sub_group_reduce_max( ushort value );
+
+char __ovld __conv sub_group_scan_inclusive_add( char value );
+uchar __ovld __conv sub_group_scan_inclusive_add( uchar value );
+short __ovld __conv sub_group_scan_inclusive_add( short value );
+ushort __ovld __conv sub_group_scan_inclusive_add( ushort value );
+
+char __ovld __conv sub_group_scan_inclusive_min( char value );
+uchar __ovld __conv sub_group_scan_inclusive_min( uchar value );
+short __ovld __conv sub_group_scan_inclusive_min( short value );
+ushort __ovld __conv sub_group_scan_inclusive_min( ushort value );
+
+char __ovld __conv sub_group_scan_inclusive_max( char value );
+uchar __ovld __conv sub_group_scan_inclusive_max( uchar value );
+short __ovld __conv sub_group_scan_inclusive_max( short value );
+ushort __ovld __conv sub_group_scan_inclusive_max( ushort value );
+
+char __ovld __conv sub_group_scan_exclusive_add( char value );
+uchar __ovld __conv sub_group_scan_exclusive_add( uchar value );
+short __ovld __conv sub_group_scan_exclusive_add( short value );
+ushort __ovld __conv sub_group_scan_exclusive_add( ushort value );
+
+char __ovld __conv sub_group_scan_exclusive_min( char value );
+uchar __ovld __conv sub_group_scan_exclusive_min( uchar value );
+short __ovld __conv sub_group_scan_exclusive_min( short value );
+ushort __ovld __conv sub_group_scan_exclusive_min( ushort value );
+
+char __ovld __conv sub_group_scan_exclusive_max( char value );
+uchar __ovld __conv sub_group_scan_exclusive_max( uchar value );
+short __ovld __conv sub_group_scan_exclusive_max( short value );
+ushort __ovld __conv sub_group_scan_exclusive_max( ushort value );
+
+#if defined(cl_khr_fp16)
+// scalar half broadcast is part of cl_khr_subgroups
+half2 __ovld __conv sub_group_broadcast( half2 value, uint index );
+half3 __ovld __conv sub_group_broadcast( half3 value, uint index );
+half4 __ovld __conv sub_group_broadcast( half4 value, uint index );
+half8 __ovld __conv sub_group_broadcast( half8 value, uint index );
+half16 __ovld __conv sub_group_broadcast( half16 value, uint index );
+#endif  // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+// scalar double broadcast is part of cl_khr_subgroups
+double2 __ovld __conv sub_group_broadcast( double2 value, uint index );
+double3 __ovld __conv sub_group_broadcast( double3 value, uint index );
+double4 __ovld __conv sub_group_broadcast( double4 value, uint index );
+double8 __ovld __conv sub_group_broadcast( double8 value, uint index );
+double16 __ovld __conv sub_group_broadcast( double16 value, uint index );
+#endif  // cl_khr_fp64
+
+#endif  // cl_khr_subgroup_extended_types
+
+#if defined(cl_khr_subgroup_non_uniform_vote)
+int     __ovld sub_group_elect(void);
+int     __ovld sub_group_non_uniform_all( int predicate );
+int     __ovld sub_group_non_uniform_any( int predicate );
+
+int     __ovld sub_group_non_uniform_all_equal( char value );
+int     __ovld sub_group_non_uniform_all_equal( uchar value );
+int     __ovld sub_group_non_uniform_all_equal( short value );
+int     __ovld sub_group_non_uniform_all_equal( ushort value );
+int     __ovld sub_group_non_uniform_all_equal( int value );
+int     __ovld sub_group_non_uniform_all_equal( uint value );
+int     __ovld sub_group_non_uniform_all_equal( long value );
+int     __ovld sub_group_non_uniform_all_equal( ulong value );
+int     __ovld sub_group_non_uniform_all_equal( float value );
+
+#if defined(cl_khr_fp16)
+int     __ovld sub_group_non_uniform_all_equal( half value );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+int     __ovld sub_group_non_uniform_all_equal( double value );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_non_uniform_vote
+
+#if defined(cl_khr_subgroup_ballot)
+char    __ovld sub_group_non_uniform_broadcast( char value, uint index );
+char2   __ovld sub_group_non_uniform_broadcast( char2 value, uint index );
+char3   __ovld sub_group_non_uniform_broadcast( char3 value, uint index );
+char4   __ovld sub_group_non_uniform_broadcast( char4 value, uint index );
+char8   __ovld sub_group_non_uniform_broadcast( char8 value, uint index );
+char16  __ovld sub_group_non_uniform_broadcast( char16 value, uint index );
+
+uchar   __ovld sub_group_non_uniform_broadcast( uchar value, uint index );
+uchar2  __ovld sub_group_non_uniform_broadcast( uchar2 value, uint index );
+uchar3  __ovld sub_group_non_uniform_broadcast( uchar3 value, uint index );
+uchar4  __ovld sub_group_non_uniform_broadcast( uchar4 value, uint index );
+uchar8  __ovld sub_group_non_uniform_broadcast( uchar8 value, uint index );
+uchar16 __ovld sub_group_non_uniform_broadcast( uchar16 value, uint index );
+
+short   __ovld sub_group_non_uniform_broadcast( short value, uint index );
+short2  __ovld sub_group_non_uniform_broadcast( short2 value, uint index );
+short3  __ovld sub_group_non_uniform_broadcast( short3 value, uint index );
+short4  __ovld sub_group_non_uniform_broadcast( short4 value, uint index );
+short8  __ovld sub_group_non_uniform_broadcast( short8 value, uint index );
+short16 __ovld sub_group_non_uniform_broadcast( short16 value, uint index );
+
+ushort  __ovld sub_group_non_uniform_broadcast( ushort value, uint index );
+ushort2 __ovld sub_group_non_uniform_broadcast( ushort2 value, uint index );
+ushort3 __ovld sub_group_non_uniform_broadcast( ushort3 value, uint index );
+ushort4 __ovld sub_group_non_uniform_broadcast( ushort4 value, uint index );
+ushort8 __ovld sub_group_non_uniform_broadcast( ushort8 value, uint index );
+ushort16 __ovld sub_group_non_uniform_broadcast( ushort16 value, uint index );
+
+int     __ovld sub_group_non_uniform_broadcast( int value, uint index );
+int2    __ovld sub_group_non_uniform_broadcast( int2 value, uint index );
+int3    __ovld sub_group_non_uniform_broadcast( int3 value, uint index );
+int4    __ovld sub_group_non_uniform_broadcast( int4 value, uint index );
+int8    __ovld sub_group_non_uniform_broadcast( int8 value, uint index );
+int16   __ovld sub_group_non_uniform_broadcast( int16 value, uint index );
+
+uint    __ovld sub_group_non_uniform_broadcast( uint value, uint index );
+uint2   __ovld sub_group_non_uniform_broadcast( uint2 value, uint index );
+uint3   __ovld sub_group_non_uniform_broadcast( uint3 value, uint index );
+uint4   __ovld sub_group_non_uniform_broadcast( uint4 value, uint index );
+uint8   __ovld sub_group_non_uniform_broadcast( uint8 value, uint index );
+uint16  __ovld sub_group_non_uniform_broadcast( uint16 value, uint index );
+
+long    __ovld sub_group_non_uniform_broadcast( long value, uint index );
+long2   __ovld sub_group_non_uniform_broadcast( long2 value, uint index );
+long3   __ovld sub_group_non_uniform_broadcast( long3 value, uint index );
+long4   __ovld sub_group_non_uniform_broadcast( long4 value, uint index );
+long8   __ovld sub_group_non_uniform_broadcast( long8 value, uint index );
+long16  __ovld sub_group_non_uniform_broadcast( long16 value, uint index );
+
+ulong   __ovld sub_group_non_uniform_broadcast( ulong value, uint index );
+ulong2  __ovld sub_group_non_uniform_broadcast( ulong2 value, uint index );
+ulong3  __ovld sub_group_non_uniform_broadcast( ulong3 value, uint index );
+ulong4  __ovld sub_group_non_uniform_broadcast( ulong4 value, uint index );
+ulong8  __ovld sub_group_non_uniform_broadcast( ulong8 value, uint index );
+ulong16 __ovld sub_group_non_uniform_broadcast( ulong16 value, uint index );
+
+float   __ovld sub_group_non_uniform_broadcast( float value, uint index );
+float2  __ovld sub_group_non_uniform_broadcast( float2 value, uint index );
+float3  __ovld sub_group_non_uniform_broadcast( float3 value, uint index );
+float4  __ovld sub_group_non_uniform_broadcast( float4 value, uint index );
+float8  __ovld sub_group_non_uniform_broadcast( float8 value, uint index );
+float16 __ovld sub_group_non_uniform_broadcast( float16 value, uint index );
+
+char    __ovld sub_group_broadcast_first( char value );
+uchar   __ovld sub_group_broadcast_first( uchar value );
+short   __ovld sub_group_broadcast_first( short value );
+ushort  __ovld sub_group_broadcast_first( ushort value );
+int     __ovld sub_group_broadcast_first( int value );
+uint    __ovld sub_group_broadcast_first( uint value );
+long    __ovld sub_group_broadcast_first( long value );
+ulong   __ovld sub_group_broadcast_first( ulong value );
+float   __ovld sub_group_broadcast_first( float value );
+
+uint4   __ovld sub_group_ballot( int predicate );
+int     __ovld __cnfn sub_group_inverse_ballot( uint4 value );
+int     __ovld __cnfn sub_group_ballot_bit_extract( uint4 value, uint index );
+uint    __ovld __cnfn sub_group_ballot_bit_count( uint4 value );
+
+uint    __ovld sub_group_ballot_inclusive_scan( uint4 value );
+uint    __ovld sub_group_ballot_exclusive_scan( uint4 value );
+uint    __ovld sub_group_ballot_find_lsb( uint4 value );
+uint    __ovld sub_group_ballot_find_msb( uint4 value );
+
+uint4   __ovld __cnfn get_sub_group_eq_mask(void);
+uint4   __ovld __cnfn get_sub_group_ge_mask(void);
+uint4   __ovld __cnfn get_sub_group_gt_mask(void);
+uint4   __ovld __cnfn get_sub_group_le_mask(void);
+uint4   __ovld __cnfn get_sub_group_lt_mask(void);
+
+#if defined(cl_khr_fp16)
+half    __ovld sub_group_non_uniform_broadcast( half value, uint index );
+half2   __ovld sub_group_non_uniform_broadcast( half2 value, uint index );
+half3   __ovld sub_group_non_uniform_broadcast( half3 value, uint index );
+half4   __ovld sub_group_non_uniform_broadcast( half4 value, uint index );
+half8   __ovld sub_group_non_uniform_broadcast( half8 value, uint index );
+half16  __ovld sub_group_non_uniform_broadcast( half16 value, uint index );
+
+half    __ovld sub_group_broadcast_first( half value );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double   __ovld sub_group_non_uniform_broadcast( double value, uint index );
+double2  __ovld sub_group_non_uniform_broadcast( double2 value, uint index );
+double3  __ovld sub_group_non_uniform_broadcast( double3 value, uint index );
+double4  __ovld sub_group_non_uniform_broadcast( double4 value, uint index );
+double8  __ovld sub_group_non_uniform_broadcast( double8 value, uint index );
+double16 __ovld sub_group_non_uniform_broadcast( double16 value, uint index );
+
+double   __ovld sub_group_broadcast_first( double value );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_ballot
+
+#if defined(cl_khr_subgroup_non_uniform_arithmetic)
+char    __ovld sub_group_non_uniform_reduce_add( char value );
+uchar   __ovld sub_group_non_uniform_reduce_add( uchar value );
+short   __ovld sub_group_non_uniform_reduce_add( short value );
+ushort  __ovld sub_group_non_uniform_reduce_add( ushort value );
+int     __ovld sub_group_non_uniform_reduce_add( int value );
+uint    __ovld sub_group_non_uniform_reduce_add( uint value );
+long    __ovld sub_group_non_uniform_reduce_add( long value );
+ulong   __ovld sub_group_non_uniform_reduce_add( ulong value );
+float   __ovld sub_group_non_uniform_reduce_add( float value );
+
+char    __ovld sub_group_non_uniform_reduce_mul( char value );
+uchar   __ovld sub_group_non_uniform_reduce_mul( uchar value );
+short   __ovld sub_group_non_uniform_reduce_mul( short value );
+ushort  __ovld sub_group_non_uniform_reduce_mul( ushort value );
+int     __ovld sub_group_non_uniform_reduce_mul( int value );
+uint    __ovld sub_group_non_uniform_reduce_mul( uint value );
+long    __ovld sub_group_non_uniform_reduce_mul( long value );
+ulong   __ovld sub_group_non_uniform_reduce_mul( ulong value );
+float   __ovld sub_group_non_uniform_reduce_mul( float value );
+
+char    __ovld sub_group_non_uniform_reduce_min( char value );
+uchar   __ovld sub_group_non_uniform_reduce_min( uchar value );
+short   __ovld sub_group_non_uniform_reduce_min( short value );
+ushort  __ovld sub_group_non_uniform_reduce_min( ushort value );
+int     __ovld sub_group_non_uniform_reduce_min( int value );
+uint    __ovld sub_group_non_uniform_reduce_min( uint value );
+long    __ovld sub_group_non_uniform_reduce_min( long value );
+ulong   __ovld sub_group_non_uniform_reduce_min( ulong value );
+float   __ovld sub_group_non_uniform_reduce_min( float value );
+
+char    __ovld sub_group_non_uniform_reduce_max( char value );
+uchar   __ovld sub_group_non_uniform_reduce_max( uchar value );
+short   __ovld sub_group_non_uniform_reduce_max( short value );
+ushort  __ovld sub_group_non_uniform_reduce_max( ushort value );
+int     __ovld sub_group_non_uniform_reduce_max( int value );
+uint    __ovld sub_group_non_uniform_reduce_max( uint value );
+long    __ovld sub_group_non_uniform_reduce_max( long value );
+ulong   __ovld sub_group_non_uniform_reduce_max( ulong value );
+float   __ovld sub_group_non_uniform_reduce_max( float value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_add( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_add( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_add( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_add( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_add( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_add( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_add( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_add( ulong value );
+float   __ovld sub_group_non_uniform_scan_inclusive_add( float value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_mul( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_mul( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_mul( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_mul( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_mul( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_mul( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_mul( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_mul( ulong value );
+float   __ovld sub_group_non_uniform_scan_inclusive_mul( float value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_min( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_min( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_min( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_min( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_min( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_min( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_min( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_min( ulong value );
+float   __ovld sub_group_non_uniform_scan_inclusive_min( float value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_max( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_max( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_max( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_max( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_max( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_max( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_max( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_max( ulong value );
+float   __ovld sub_group_non_uniform_scan_inclusive_max( float value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_add( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_add( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_add( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_add( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_add( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_add( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_add( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_add( ulong value );
+float   __ovld sub_group_non_uniform_scan_exclusive_add( float value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_mul( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_mul( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_mul( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_mul( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_mul( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_mul( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_mul( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_mul( ulong value );
+float   __ovld sub_group_non_uniform_scan_exclusive_mul( float value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_min( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_min( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_min( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_min( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_min( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_min( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_min( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_min( ulong value );
+float   __ovld sub_group_non_uniform_scan_exclusive_min( float value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_max( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_max( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_max( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_max( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_max( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_max( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_max( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_max( ulong value );
+float   __ovld sub_group_non_uniform_scan_exclusive_max( float value );
+
+char    __ovld sub_group_non_uniform_reduce_and( char value );
+uchar   __ovld sub_group_non_uniform_reduce_and( uchar value );
+short   __ovld sub_group_non_uniform_reduce_and( short value );
+ushort  __ovld sub_group_non_uniform_reduce_and( ushort value );
+int     __ovld sub_group_non_uniform_reduce_and( int value );
+uint    __ovld sub_group_non_uniform_reduce_and( uint value );
+long    __ovld sub_group_non_uniform_reduce_and( long value );
+ulong   __ovld sub_group_non_uniform_reduce_and( ulong value );
+
+char    __ovld sub_group_non_uniform_reduce_or( char value );
+uchar   __ovld sub_group_non_uniform_reduce_or( uchar value );
+short   __ovld sub_group_non_uniform_reduce_or( short value );
+ushort  __ovld sub_group_non_uniform_reduce_or( ushort value );
+int     __ovld sub_group_non_uniform_reduce_or( int value );
+uint    __ovld sub_group_non_uniform_reduce_or( uint value );
+long    __ovld sub_group_non_uniform_reduce_or( long value );
+ulong   __ovld sub_group_non_uniform_reduce_or( ulong value );
+
+char    __ovld sub_group_non_uniform_reduce_xor( char value );
+uchar   __ovld sub_group_non_uniform_reduce_xor( uchar value );
+short   __ovld sub_group_non_uniform_reduce_xor( short value );
+ushort  __ovld sub_group_non_uniform_reduce_xor( ushort value );
+int     __ovld sub_group_non_uniform_reduce_xor( int value );
+uint    __ovld sub_group_non_uniform_reduce_xor( uint value );
+long    __ovld sub_group_non_uniform_reduce_xor( long value );
+ulong   __ovld sub_group_non_uniform_reduce_xor( ulong value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_and( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_and( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_and( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_and( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_and( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_and( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_and( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_and( ulong value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_or( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_or( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_or( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_or( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_or( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_or( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_or( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_or( ulong value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_xor( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_xor( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_xor( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_xor( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_xor( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_xor( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_xor( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_xor( ulong value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_and( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_and( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_and( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_and( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_and( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_and( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_and( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_and( ulong value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_or( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_or( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_or( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_or( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_or( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_or( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_or( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_or( ulong value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_xor( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_xor( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_xor( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_xor( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_xor( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_xor( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_xor( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_xor( ulong value );
+
+int     __ovld sub_group_non_uniform_reduce_logical_and( int predicate );
+int     __ovld sub_group_non_uniform_reduce_logical_or( int predicate );
+int     __ovld sub_group_non_uniform_reduce_logical_xor( int predicate );
+
+int     __ovld sub_group_non_uniform_scan_inclusive_logical_and( int predicate );
+int     __ovld sub_group_non_uniform_scan_inclusive_logical_or( int predicate );
+int     __ovld sub_group_non_uniform_scan_inclusive_logical_xor( int predicate );
+
+int     __ovld sub_group_non_uniform_scan_exclusive_logical_and( int predicate );
+int     __ovld sub_group_non_uniform_scan_exclusive_logical_or( int predicate );
+int     __ovld sub_group_non_uniform_scan_exclusive_logical_xor( int predicate );
+
+#if defined(cl_khr_fp16)
+half    __ovld sub_group_non_uniform_reduce_add( half value );
+half    __ovld sub_group_non_uniform_reduce_mul( half value );
+half    __ovld sub_group_non_uniform_reduce_min( half value );
+half    __ovld sub_group_non_uniform_reduce_max( half value );
+half    __ovld sub_group_non_uniform_scan_inclusive_add( half value );
+half    __ovld sub_group_non_uniform_scan_inclusive_mul( half value );
+half    __ovld sub_group_non_uniform_scan_inclusive_min( half value );
+half    __ovld sub_group_non_uniform_scan_inclusive_max( half value );
+half    __ovld sub_group_non_uniform_scan_exclusive_add( half value );
+half    __ovld sub_group_non_uniform_scan_exclusive_mul( half value );
+half    __ovld sub_group_non_uniform_scan_exclusive_min( half value );
+half    __ovld sub_group_non_uniform_scan_exclusive_max( half value );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double  __ovld sub_group_non_uniform_reduce_add( double value );
+double  __ovld sub_group_non_uniform_reduce_mul( double value );
+double  __ovld sub_group_non_uniform_reduce_min( double value );
+double  __ovld sub_group_non_uniform_reduce_max( double value );
+double  __ovld sub_group_non_uniform_scan_inclusive_add( double value );
+double  __ovld sub_group_non_uniform_scan_inclusive_mul( double value );
+double  __ovld sub_group_non_uniform_scan_inclusive_min( double value );
+double  __ovld sub_group_non_uniform_scan_inclusive_max( double value );
+double  __ovld sub_group_non_uniform_scan_exclusive_add( double value );
+double  __ovld sub_group_non_uniform_scan_exclusive_mul( double value );
+double  __ovld sub_group_non_uniform_scan_exclusive_min( double value );
+double  __ovld sub_group_non_uniform_scan_exclusive_max( double value );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_non_uniform_arithmetic
+
+#if defined(cl_khr_subgroup_shuffle)
+char    __ovld sub_group_shuffle( char value, uint index );
+uchar   __ovld sub_group_shuffle( uchar value, uint index );
+short   __ovld sub_group_shuffle( short value, uint index );
+ushort  __ovld sub_group_shuffle( ushort value, uint index );
+int     __ovld sub_group_shuffle( int value, uint index );
+uint    __ovld sub_group_shuffle( uint value, uint index );
+long    __ovld sub_group_shuffle( long value, uint index );
+ulong   __ovld sub_group_shuffle( ulong value, uint index );
+float   __ovld sub_group_shuffle( float value, uint index );
+
+char    __ovld sub_group_shuffle_xor( char value, uint mask );
+uchar   __ovld sub_group_shuffle_xor( uchar value, uint mask );
+short   __ovld sub_group_shuffle_xor( short value, uint mask );
+ushort  __ovld sub_group_shuffle_xor( ushort value, uint mask );
+int     __ovld sub_group_shuffle_xor( int value, uint mask );
+uint    __ovld sub_group_shuffle_xor( uint value, uint mask );
+long    __ovld sub_group_shuffle_xor( long value, uint mask );
+ulong   __ovld sub_group_shuffle_xor( ulong value, uint mask );
+float   __ovld sub_group_shuffle_xor( float value, uint mask );
+
+#if defined(cl_khr_fp16)
+half    __ovld sub_group_shuffle( half value, uint index );
+half    __ovld sub_group_shuffle_xor( half value, uint mask );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double  __ovld sub_group_shuffle( double value, uint index );
+double  __ovld sub_group_shuffle_xor( double value, uint mask );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_shuffle
+
+#if defined(cl_khr_subgroup_shuffle_relative)
+char    __ovld sub_group_shuffle_up( char value, uint delta );
+uchar   __ovld sub_group_shuffle_up( uchar value, uint delta );
+short   __ovld sub_group_shuffle_up( short value, uint delta );
+ushort  __ovld sub_group_shuffle_up( ushort value, uint delta );
+int     __ovld sub_group_shuffle_up( int value, uint delta );
+uint    __ovld sub_group_shuffle_up( uint value, uint delta );
+long    __ovld sub_group_shuffle_up( long value, uint delta );
+ulong   __ovld sub_group_shuffle_up( ulong value, uint delta );
+float   __ovld sub_group_shuffle_up( float value, uint delta );
+
+char    __ovld sub_group_shuffle_down( char value, uint delta );
+uchar   __ovld sub_group_shuffle_down( uchar value, uint delta );
+short   __ovld sub_group_shuffle_down( short value, uint delta );
+ushort  __ovld sub_group_shuffle_down( ushort value, uint delta );
+int     __ovld sub_group_shuffle_down( int value, uint delta );
+uint    __ovld sub_group_shuffle_down( uint value, uint delta );
+long    __ovld sub_group_shuffle_down( long value, uint delta );
+ulong   __ovld sub_group_shuffle_down( ulong value, uint delta );
+float   __ovld sub_group_shuffle_down( float value, uint delta );
+
+#if defined(cl_khr_fp16)
+half    __ovld sub_group_shuffle_up( half value, uint delta );
+half    __ovld sub_group_shuffle_down( half value, uint delta );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double  __ovld sub_group_shuffle_up( double value, uint delta );
+double  __ovld sub_group_shuffle_down( double value, uint delta );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_shuffle_relative
+
+#if defined(cl_khr_subgroup_clustered_reduce)
+char    __ovld sub_group_clustered_reduce_add( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_add( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_add( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_add( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_add( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_add( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_add( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_add( ulong value, uint clustersize );
+float   __ovld sub_group_clustered_reduce_add( float value, uint clustersize );
+
+char    __ovld sub_group_clustered_reduce_mul( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_mul( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_mul( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_mul( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_mul( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_mul( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_mul( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_mul( ulong value, uint clustersize );
+float   __ovld sub_group_clustered_reduce_mul( float value, uint clustersize );
+
+char    __ovld sub_group_clustered_reduce_min( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_min( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_min( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_min( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_min( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_min( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_min( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_min( ulong value, uint clustersize );
+float   __ovld sub_group_clustered_reduce_min( float value, uint clustersize );
+
+char    __ovld sub_group_clustered_reduce_max( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_max( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_max( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_max( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_max( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_max( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_max( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_max( ulong value, uint clustersize );
+float   __ovld sub_group_clustered_reduce_max( float value, uint clustersize );
+
+char    __ovld sub_group_clustered_reduce_and( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_and( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_and( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_and( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_and( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_and( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_and( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_and( ulong value, uint clustersize );
+
+char    __ovld sub_group_clustered_reduce_or( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_or( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_or( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_or( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_or( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_or( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_or( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_or( ulong value, uint clustersize );
+
+char    __ovld sub_group_clustered_reduce_xor( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_xor( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_xor( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_xor( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_xor( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_xor( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_xor( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_xor( ulong value, uint clustersize );
+
+int     __ovld sub_group_clustered_reduce_logical_and( int predicate, uint clustersize );
+int     __ovld sub_group_clustered_reduce_logical_or( int predicate, uint clustersize );
+int     __ovld sub_group_clustered_reduce_logical_xor( int predicate, uint clustersize );
+
+#if defined(cl_khr_fp16)
+half    __ovld sub_group_clustered_reduce_add( half value, uint clustersize );
+half    __ovld sub_group_clustered_reduce_mul( half value, uint clustersize );
+half    __ovld sub_group_clustered_reduce_min( half value, uint clustersize );
+half    __ovld sub_group_clustered_reduce_max( half value, uint clustersize );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double  __ovld sub_group_clustered_reduce_add( double value, uint clustersize );
+double  __ovld sub_group_clustered_reduce_mul( double value, uint clustersize );
+double  __ovld sub_group_clustered_reduce_min( double value, uint clustersize );
+double  __ovld sub_group_clustered_reduce_max( double value, uint clustersize );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_clustered_reduce
+
 #if defined(cl_intel_subgroups)
 // Intel-Specific Sub Group Functions
 float   __ovld __conv intel_sub_group_shuffle( float  x, uint c );
diff --git a/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/__clang_openmp_device_functions.h b/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/__clang_openmp_device_functions.h
new file mode 100644
index 0000000..406c974
--- /dev/null
+++ b/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/__clang_openmp_device_functions.h
@@ -0,0 +1,42 @@
+/*===- __clang_openmp_device_functions.h - OpenMP device function declares -===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_OPENMP_DEVICE_FUNCTIONS_H__
+#define __CLANG_OPENMP_DEVICE_FUNCTIONS_H__
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
+#endif
+
+#pragma omp begin declare variant match(                                       \
+    device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define __CUDA__
+#define __OPENMP_NVPTX__
+
+/// Include declarations for libdevice functions.
+#include <__clang_cuda_libdevice_declares.h>
+
+/// Provide definitions for these functions.
+#include <__clang_cuda_device_functions.h>
+
+#undef __OPENMP_NVPTX__
+#undef __CUDA__
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#pragma omp end declare variant
+
+#endif
diff --git a/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/cmath b/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/cmath
new file mode 100644
index 0000000..bd6011e
--- /dev/null
+++ b/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/cmath
@@ -0,0 +1,75 @@
+/*===-- __clang_openmp_device_functions.h - OpenMP math declares ------ c++ -===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_OPENMP_CMATH_H__
+#define __CLANG_OPENMP_CMATH_H__
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
+#endif
+
+#include_next <cmath>
+
+// Make sure we include our math.h overlay, it probably happend already but we
+// need to be sure.
+#include <math.h>
+
+// We (might) need cstdlib because __clang_cuda_cmath.h below declares `abs`
+// which might live in cstdlib.
+#include <cstdlib>
+
+#pragma omp begin declare variant match(                                       \
+    device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
+
+#define __CUDA__
+#define __OPENMP_NVPTX__
+#include <__clang_cuda_cmath.h>
+#undef __OPENMP_NVPTX__
+#undef __CUDA__
+
+// Overloads not provided by the CUDA wrappers but by the CUDA system headers.
+// Since we do not include the latter we define them ourselves.
+#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow))
+
+__DEVICE__ float acosh(float __x) { return ::acoshf(__x); }
+__DEVICE__ float asinh(float __x) { return ::asinhf(__x); }
+__DEVICE__ float atanh(float __x) { return ::atanhf(__x); }
+__DEVICE__ float cbrt(float __x) { return ::cbrtf(__x); }
+__DEVICE__ float erf(float __x) { return ::erff(__x); }
+__DEVICE__ float erfc(float __x) { return ::erfcf(__x); }
+__DEVICE__ float exp2(float __x) { return ::exp2f(__x); }
+__DEVICE__ float expm1(float __x) { return ::expm1f(__x); }
+__DEVICE__ float fdim(float __x, float __y) { return ::fdimf(__x, __y); }
+__DEVICE__ float hypot(float __x, float __y) { return ::hypotf(__x, __y); }
+__DEVICE__ int ilogb(float __x) { return ::ilogbf(__x); }
+__DEVICE__ float lgamma(float __x) { return ::lgammaf(__x); }
+__DEVICE__ long long int llrint(float __x) { return ::llrintf(__x); }
+__DEVICE__ long long int llround(float __x) { return ::llroundf(__x); }
+__DEVICE__ float log1p(float __x) { return ::log1pf(__x); }
+__DEVICE__ float log2(float __x) { return ::log2f(__x); }
+__DEVICE__ float logb(float __x) { return ::logbf(__x); }
+__DEVICE__ long int lrint(float __x) { return ::lrintf(__x); }
+__DEVICE__ long int lround(float __x) { return ::lroundf(__x); }
+__DEVICE__ float nextafter(float __x, float __y) {
+  return ::nextafterf(__x, __y);
+}
+__DEVICE__ float remainder(float __x, float __y) {
+  return ::remainderf(__x, __y);
+}
+__DEVICE__ float scalbln(float __x, long int __y) {
+  return ::scalblnf(__x, __y);
+}
+__DEVICE__ float scalbn(float __x, int __y) { return ::scalbnf(__x, __y); }
+__DEVICE__ float tgamma(float __x) { return ::tgammaf(__x); }
+
+#undef __DEVICE__
+
+#pragma omp end declare variant
+
+#endif
diff --git a/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/complex b/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/complex
new file mode 100644
index 0000000..1ed0b14
--- /dev/null
+++ b/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/complex
@@ -0,0 +1,25 @@
+/*===-- complex --- OpenMP complex wrapper for target regions --------- c++ -===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_OPENMP_COMPLEX__
+#define __CLANG_OPENMP_COMPLEX__
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
+#endif
+
+// We require std::math functions in the complex builtins below.
+#include <cmath>
+
+#define __CUDA__
+#include <__clang_cuda_complex_builtins.h>
+#endif
+
+// Grab the host header too.
+#include_next <complex>
diff --git a/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/complex.h b/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/complex.h
new file mode 100644
index 0000000..829c7a7
--- /dev/null
+++ b/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/complex.h
@@ -0,0 +1,25 @@
+/*===-- complex --- OpenMP complex wrapper for target regions --------- c++ -===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_OPENMP_COMPLEX_H__
+#define __CLANG_OPENMP_COMPLEX_H__
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
+#endif
+
+// We require math functions in the complex builtins below.
+#include <math.h>
+
+#define __CUDA__
+#include <__clang_cuda_complex_builtins.h>
+#endif
+
+// Grab the host header too.
+#include_next <complex.h>
diff --git a/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/math.h b/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/math.h
new file mode 100644
index 0000000..c64af8b
--- /dev/null
+++ b/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/math.h
@@ -0,0 +1,51 @@
+/*===---- openmp_wrapper/math.h -------- OpenMP math.h intercept ------ c++ -===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+// If we are in C++ mode and include <math.h> (not <cmath>) first, we still need
+// to make sure <cmath> is read first. The problem otherwise is that we haven't
+// seen the declarations of the math.h functions when the system math.h includes
+// our cmath overlay. However, our cmath overlay, or better the underlying
+// overlay, e.g. CUDA, uses the math.h functions. Since we haven't declared them
+// yet we get errors. CUDA avoids this by eagerly declaring all math functions
+// (in the __device__ space) but we cannot do this. Instead we break the
+// dependence by forcing cmath to go first. While our cmath will in turn include
+// this file, the cmath guards will prevent recursion.
+#ifdef __cplusplus
+#include <cmath>
+#endif
+
+#ifndef __CLANG_OPENMP_MATH_H__
+#define __CLANG_OPENMP_MATH_H__
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
+#endif
+
+#include_next <math.h>
+
+// We need limits.h for __clang_cuda_math.h below and because it should not hurt
+// we include it eagerly here.
+#include <limits.h>
+
+// We need stdlib.h because (for now) __clang_cuda_math.h below declares `abs`
+// which should live in stdlib.h.
+#include <stdlib.h>
+
+#pragma omp begin declare variant match(                                       \
+    device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
+
+#define __CUDA__
+#define __OPENMP_NVPTX__
+#include <__clang_cuda_math.h>
+#undef __OPENMP_NVPTX__
+#undef __CUDA__
+
+#pragma omp end declare variant
+
+#endif
diff --git a/darwin-x86/lib64/clang/11.0.1/include/openmp_wrappers/new b/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/new
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/openmp_wrappers/new
rename to darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/new
diff --git a/darwin-x86/lib64/clang/11.0.1/include/pconfigintrin.h b/darwin-x86/lib64/clang/11.0.5/include/pconfigintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/pconfigintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/pconfigintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/pkuintrin.h b/darwin-x86/lib64/clang/11.0.5/include/pkuintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/pkuintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/pkuintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/pmmintrin.h b/darwin-x86/lib64/clang/11.0.5/include/pmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/pmmintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/pmmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/popcntintrin.h b/darwin-x86/lib64/clang/11.0.5/include/popcntintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/popcntintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/popcntintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/ppc_wrappers/emmintrin.h b/darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/emmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/ppc_wrappers/emmintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/emmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/ppc_wrappers/mm_malloc.h b/darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/mm_malloc.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/ppc_wrappers/mm_malloc.h
rename to darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/mm_malloc.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/ppc_wrappers/mmintrin.h b/darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/mmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/ppc_wrappers/mmintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/mmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/ppc_wrappers/pmmintrin.h b/darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/pmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/ppc_wrappers/pmmintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/pmmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/ppc_wrappers/smmintrin.h b/darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/smmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/ppc_wrappers/smmintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/smmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/ppc_wrappers/tmmintrin.h b/darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/tmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/ppc_wrappers/tmmintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/tmmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/ppc_wrappers/xmmintrin.h b/darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/xmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/ppc_wrappers/xmmintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/xmmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/prfchwintrin.h b/darwin-x86/lib64/clang/11.0.5/include/prfchwintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/prfchwintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/prfchwintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/profile/InstrProfData.inc b/darwin-x86/lib64/clang/11.0.5/include/profile/InstrProfData.inc
similarity index 94%
rename from darwin-x86/lib64/clang/11.0.1/include/profile/InstrProfData.inc
rename to darwin-x86/lib64/clang/11.0.5/include/profile/InstrProfData.inc
index 99f41d8..a691352 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/profile/InstrProfData.inc
+++ b/darwin-x86/lib64/clang/11.0.5/include/profile/InstrProfData.inc
@@ -198,6 +198,14 @@
 #undef VALUE_PROF_KIND
 /* VALUE_PROF_KIND end */
 
+#undef COVMAP_V2_OR_V3
+#ifdef COVMAP_V2
+#define COVMAP_V2_OR_V3
+#endif
+#ifdef COVMAP_V3
+#define COVMAP_V2_OR_V3
+#endif
+
 /* COVMAP_FUNC_RECORD start */
 /* Definition of member fields of the function record structure in coverage
  * map.
@@ -214,16 +222,30 @@
 COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), NameSize, \
                    llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), \
                    NameValue.size()))
-#else
+#endif
+#ifdef COVMAP_V2_OR_V3
 COVMAP_FUNC_RECORD(const int64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \
-                   llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
-                   llvm::IndexedInstrProf::ComputeHash(NameValue)))
+                   llvm::ConstantInt::get( \
+                     llvm::Type::getInt64Ty(Ctx), NameHash))
 #endif
 COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), DataSize, \
-                   llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx),\
-                   CoverageMapping.size()))
+                   llvm::ConstantInt::get( \
+                     llvm::Type::getInt32Ty(Ctx), CoverageMapping.size()))
 COVMAP_FUNC_RECORD(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \
-                   llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), FuncHash))
+                   llvm::ConstantInt::get( \
+                     llvm::Type::getInt64Ty(Ctx), FuncHash))
+#ifdef COVMAP_V3
+COVMAP_FUNC_RECORD(const uint64_t, llvm::Type::getInt64Ty(Ctx), FilenamesRef, \
+                   llvm::ConstantInt::get( \
+                     llvm::Type::getInt64Ty(Ctx), FilenamesRef))
+COVMAP_FUNC_RECORD(const char, \
+                   llvm::ArrayType::get(llvm::Type::getInt8Ty(Ctx), \
+                                        CoverageMapping.size()), \
+                   CoverageMapping,
+                   llvm::ConstantDataArray::getRaw( \
+                     CoverageMapping, CoverageMapping.size(), \
+                     llvm::Type::getInt8Ty(Ctx)))
+#endif
 #undef COVMAP_FUNC_RECORD
 /* COVMAP_FUNC_RECORD end.  */
 
@@ -236,7 +258,7 @@
 #define INSTR_PROF_DATA_DEFINED
 #endif
 COVMAP_HEADER(uint32_t, Int32Ty, NRecords, \
-              llvm::ConstantInt::get(Int32Ty,  FunctionRecords.size()))
+              llvm::ConstantInt::get(Int32Ty, NRecords))
 COVMAP_HEADER(uint32_t, Int32Ty, FilenamesSize, \
               llvm::ConstantInt::get(Int32Ty, FilenamesSize))
 COVMAP_HEADER(uint32_t, Int32Ty, CoverageSize, \
@@ -267,6 +289,9 @@
 INSTR_PROF_SECT_ENTRY(IPSK_covmap, \
                       INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COMMON), \
                       INSTR_PROF_COVMAP_COFF, "__LLVM_COV,")
+INSTR_PROF_SECT_ENTRY(IPSK_covfun, \
+                      INSTR_PROF_QUOTE(INSTR_PROF_COVFUN_COMMON), \
+                      INSTR_PROF_COVFUN_COFF, "__LLVM_COV,")
 INSTR_PROF_SECT_ENTRY(IPSK_orderfile, \
                       INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COMMON), \
                       INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COFF), "__DATA,")
@@ -632,9 +657,9 @@
 /* Raw profile format version (start from 1). */
 #define INSTR_PROF_RAW_VERSION 5
 /* Indexed profile format version (start from 1). */
-#define INSTR_PROF_INDEX_VERSION 5
-/* Coverage mapping format vresion (start from 0). */
-#define INSTR_PROF_COVMAP_VERSION 2
+#define INSTR_PROF_INDEX_VERSION 6
+/* Coverage mapping format version (start from 0). */
+#define INSTR_PROF_COVMAP_VERSION 3
 
 /* Profile version is always of type uint64_t. Reserve the upper 8 bits in the
  * version for other variants of profile. We set the lowest bit of the upper 8
@@ -661,6 +686,7 @@
 #define INSTR_PROF_VALS_COMMON __llvm_prf_vals
 #define INSTR_PROF_VNODES_COMMON __llvm_prf_vnds
 #define INSTR_PROF_COVMAP_COMMON __llvm_covmap
+#define INSTR_PROF_COVFUN_COMMON __llvm_covfun
 #define INSTR_PROF_ORDERFILE_COMMON __llvm_orderfile
 /* Windows section names. Because these section names contain dollar characters,
  * they must be quoted.
@@ -671,6 +697,7 @@
 #define INSTR_PROF_VALS_COFF ".lprfv$M"
 #define INSTR_PROF_VNODES_COFF ".lprfnd$M"
 #define INSTR_PROF_COVMAP_COFF ".lcovmap$M"
+#define INSTR_PROF_COVFUN_COFF ".lcovfun$M"
 #define INSTR_PROF_ORDERFILE_COFF ".lorderfile$M"
 
 #ifdef _WIN32
@@ -685,6 +712,7 @@
 /* Value profile nodes section. */
 #define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_VNODES_COFF
 #define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_COVMAP_COFF
+#define INSTR_PROF_COVFUN_SECT_NAME INSTR_PROF_COVFUN_COFF
 #define INSTR_PROF_ORDERFILE_SECT_NAME INSTR_PROF_ORDERFILE_COFF
 #else
 /* Runtime section names and name strings.  */
@@ -698,6 +726,7 @@
 /* Value profile nodes section. */
 #define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_VNODES_COMMON)
 #define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COMMON)
+#define INSTR_PROF_COVFUN_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_COVFUN_COMMON)
 /* Order file instrumentation. */
 #define INSTR_PROF_ORDERFILE_SECT_NAME                                         \
   INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COMMON)
@@ -752,3 +781,5 @@
 #else
 #undef INSTR_PROF_DATA_DEFINED
 #endif
+
+#undef COVMAP_V2_OR_V3
diff --git a/darwin-x86/lib64/clang/11.0.1/include/ptwriteintrin.h b/darwin-x86/lib64/clang/11.0.5/include/ptwriteintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/ptwriteintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/ptwriteintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/rdseedintrin.h b/darwin-x86/lib64/clang/11.0.5/include/rdseedintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/rdseedintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/rdseedintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/rtmintrin.h b/darwin-x86/lib64/clang/11.0.5/include/rtmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/rtmintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/rtmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/s390intrin.h b/darwin-x86/lib64/clang/11.0.5/include/s390intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/s390intrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/s390intrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/sanitizer/allocator_interface.h b/darwin-x86/lib64/clang/11.0.5/include/sanitizer/allocator_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/sanitizer/allocator_interface.h
rename to darwin-x86/lib64/clang/11.0.5/include/sanitizer/allocator_interface.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/sanitizer/asan_interface.h b/darwin-x86/lib64/clang/11.0.5/include/sanitizer/asan_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/sanitizer/asan_interface.h
rename to darwin-x86/lib64/clang/11.0.5/include/sanitizer/asan_interface.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/sanitizer/common_interface_defs.h b/darwin-x86/lib64/clang/11.0.5/include/sanitizer/common_interface_defs.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/sanitizer/common_interface_defs.h
rename to darwin-x86/lib64/clang/11.0.5/include/sanitizer/common_interface_defs.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/sanitizer/coverage_interface.h b/darwin-x86/lib64/clang/11.0.5/include/sanitizer/coverage_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/sanitizer/coverage_interface.h
rename to darwin-x86/lib64/clang/11.0.5/include/sanitizer/coverage_interface.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/sanitizer/dfsan_interface.h b/darwin-x86/lib64/clang/11.0.5/include/sanitizer/dfsan_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/sanitizer/dfsan_interface.h
rename to darwin-x86/lib64/clang/11.0.5/include/sanitizer/dfsan_interface.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/sanitizer/hwasan_interface.h b/darwin-x86/lib64/clang/11.0.5/include/sanitizer/hwasan_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/sanitizer/hwasan_interface.h
rename to darwin-x86/lib64/clang/11.0.5/include/sanitizer/hwasan_interface.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/sanitizer/linux_syscall_hooks.h b/darwin-x86/lib64/clang/11.0.5/include/sanitizer/linux_syscall_hooks.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/sanitizer/linux_syscall_hooks.h
rename to darwin-x86/lib64/clang/11.0.5/include/sanitizer/linux_syscall_hooks.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/sanitizer/lsan_interface.h b/darwin-x86/lib64/clang/11.0.5/include/sanitizer/lsan_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/sanitizer/lsan_interface.h
rename to darwin-x86/lib64/clang/11.0.5/include/sanitizer/lsan_interface.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/sanitizer/msan_interface.h b/darwin-x86/lib64/clang/11.0.5/include/sanitizer/msan_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/sanitizer/msan_interface.h
rename to darwin-x86/lib64/clang/11.0.5/include/sanitizer/msan_interface.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/sanitizer/netbsd_syscall_hooks.h b/darwin-x86/lib64/clang/11.0.5/include/sanitizer/netbsd_syscall_hooks.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/sanitizer/netbsd_syscall_hooks.h
rename to darwin-x86/lib64/clang/11.0.5/include/sanitizer/netbsd_syscall_hooks.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/sanitizer/scudo_interface.h b/darwin-x86/lib64/clang/11.0.5/include/sanitizer/scudo_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/sanitizer/scudo_interface.h
rename to darwin-x86/lib64/clang/11.0.5/include/sanitizer/scudo_interface.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/sanitizer/tsan_interface.h b/darwin-x86/lib64/clang/11.0.5/include/sanitizer/tsan_interface.h
similarity index 91%
rename from darwin-x86/lib64/clang/11.0.1/include/sanitizer/tsan_interface.h
rename to darwin-x86/lib64/clang/11.0.5/include/sanitizer/tsan_interface.h
index 011b233..96b8ad5 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/sanitizer/tsan_interface.h
+++ b/darwin-x86/lib64/clang/11.0.5/include/sanitizer/tsan_interface.h
@@ -38,34 +38,34 @@
 
 // Mutex has static storage duration and no-op constructor and destructor.
 // This effectively makes tsan ignore destroy annotation.
-const unsigned __tsan_mutex_linker_init      = 1 << 0;
+static const unsigned __tsan_mutex_linker_init      = 1 << 0;
 // Mutex is write reentrant.
-const unsigned __tsan_mutex_write_reentrant  = 1 << 1;
+static const unsigned __tsan_mutex_write_reentrant  = 1 << 1;
 // Mutex is read reentrant.
-const unsigned __tsan_mutex_read_reentrant   = 1 << 2;
+static const unsigned __tsan_mutex_read_reentrant   = 1 << 2;
 // Mutex does not have static storage duration, and must not be used after
 // its destructor runs.  The opposite of __tsan_mutex_linker_init.
 // If this flag is passed to __tsan_mutex_destroy, then the destruction
 // is ignored unless this flag was previously set on the mutex.
-const unsigned __tsan_mutex_not_static       = 1 << 8;
+static const unsigned __tsan_mutex_not_static       = 1 << 8;
 
 // Mutex operation flags:
 
 // Denotes read lock operation.
-const unsigned __tsan_mutex_read_lock        = 1 << 3;
+static const unsigned __tsan_mutex_read_lock        = 1 << 3;
 // Denotes try lock operation.
-const unsigned __tsan_mutex_try_lock         = 1 << 4;
+static const unsigned __tsan_mutex_try_lock         = 1 << 4;
 // Denotes that a try lock operation has failed to acquire the mutex.
-const unsigned __tsan_mutex_try_lock_failed  = 1 << 5;
+static const unsigned __tsan_mutex_try_lock_failed  = 1 << 5;
 // Denotes that the lock operation acquires multiple recursion levels.
 // Number of levels is passed in recursion parameter.
 // This is useful for annotation of e.g. Java builtin monitors,
 // for which wait operation releases all recursive acquisitions of the mutex.
-const unsigned __tsan_mutex_recursive_lock   = 1 << 6;
+static const unsigned __tsan_mutex_recursive_lock   = 1 << 6;
 // Denotes that the unlock operation releases all recursion levels.
 // Number of released levels is returned and later must be passed to
 // the corresponding __tsan_mutex_post_lock annotation.
-const unsigned __tsan_mutex_recursive_unlock = 1 << 7;
+static const unsigned __tsan_mutex_recursive_unlock = 1 << 7;
 
 // Annotate creation of a mutex.
 // Supported flags: mutex creation flags.
@@ -152,7 +152,7 @@
 
 // Flags for __tsan_switch_to_fiber:
 // Do not establish a happens-before relation between fibers
-const unsigned __tsan_switch_to_fiber_no_sync = 1 << 0;
+static const unsigned __tsan_switch_to_fiber_no_sync = 1 << 0;
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/darwin-x86/lib64/clang/11.0.1/include/sanitizer/tsan_interface_atomic.h b/darwin-x86/lib64/clang/11.0.5/include/sanitizer/tsan_interface_atomic.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/sanitizer/tsan_interface_atomic.h
rename to darwin-x86/lib64/clang/11.0.5/include/sanitizer/tsan_interface_atomic.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/sanitizer/ubsan_interface.h b/darwin-x86/lib64/clang/11.0.5/include/sanitizer/ubsan_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/sanitizer/ubsan_interface.h
rename to darwin-x86/lib64/clang/11.0.5/include/sanitizer/ubsan_interface.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/serializeintrin.h b/darwin-x86/lib64/clang/11.0.5/include/serializeintrin.h
new file mode 100644
index 0000000..b774e5a
--- /dev/null
+++ b/darwin-x86/lib64/clang/11.0.5/include/serializeintrin.h
@@ -0,0 +1,30 @@
+/*===--------------- serializeintrin.h - serialize intrinsics --------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <serializeintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __SERIALIZEINTRIN_H
+#define __SERIALIZEINTRIN_H
+
+/// Serialize instruction fetch and execution.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> SERIALIZE </c> instruction.
+///
+static __inline__ void
+__attribute__((__always_inline__, __nodebug__, __target__("serialize")))
+_serialize (void)
+{
+  __builtin_ia32_serialize ();
+}
+
+#endif /* __SERIALIZEINTRIN_H */
diff --git a/darwin-x86/lib64/clang/11.0.1/include/sgxintrin.h b/darwin-x86/lib64/clang/11.0.5/include/sgxintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/sgxintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/sgxintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/shaintrin.h b/darwin-x86/lib64/clang/11.0.5/include/shaintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/shaintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/shaintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/smmintrin.h b/darwin-x86/lib64/clang/11.0.5/include/smmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/smmintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/smmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/stdalign.h b/darwin-x86/lib64/clang/11.0.5/include/stdalign.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/stdalign.h
rename to darwin-x86/lib64/clang/11.0.5/include/stdalign.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/stdarg.h b/darwin-x86/lib64/clang/11.0.5/include/stdarg.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/stdarg.h
rename to darwin-x86/lib64/clang/11.0.5/include/stdarg.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/stdatomic.h b/darwin-x86/lib64/clang/11.0.5/include/stdatomic.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/stdatomic.h
rename to darwin-x86/lib64/clang/11.0.5/include/stdatomic.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/stdbool.h b/darwin-x86/lib64/clang/11.0.5/include/stdbool.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/stdbool.h
rename to darwin-x86/lib64/clang/11.0.5/include/stdbool.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/stddef.h b/darwin-x86/lib64/clang/11.0.5/include/stddef.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/stddef.h
rename to darwin-x86/lib64/clang/11.0.5/include/stddef.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/stdint.h b/darwin-x86/lib64/clang/11.0.5/include/stdint.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/stdint.h
rename to darwin-x86/lib64/clang/11.0.5/include/stdint.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/stdnoreturn.h b/darwin-x86/lib64/clang/11.0.5/include/stdnoreturn.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/stdnoreturn.h
rename to darwin-x86/lib64/clang/11.0.5/include/stdnoreturn.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/tbmintrin.h b/darwin-x86/lib64/clang/11.0.5/include/tbmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/tbmintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/tbmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/tgmath.h b/darwin-x86/lib64/clang/11.0.5/include/tgmath.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/tgmath.h
rename to darwin-x86/lib64/clang/11.0.5/include/tgmath.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/tmmintrin.h b/darwin-x86/lib64/clang/11.0.5/include/tmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/tmmintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/tmmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/tsxldtrkintrin.h b/darwin-x86/lib64/clang/11.0.5/include/tsxldtrkintrin.h
new file mode 100644
index 0000000..491823e
--- /dev/null
+++ b/darwin-x86/lib64/clang/11.0.5/include/tsxldtrkintrin.h
@@ -0,0 +1,56 @@
+/*===------------- tsxldtrkintrin.h - tsxldtrk intrinsics ------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <tsxldtrkintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __TSXLDTRKINTRIN_H
+#define __TSXLDTRKINTRIN_H
+
+/* Define the default attributes for the functions in this file */
+#define _DEFAULT_FN_ATTRS \
+  __attribute__((__always_inline__, __nodebug__, __target__("tsxldtrk")))
+
+/// Marks the start of an TSX (RTM) suspend load address tracking region. If
+///    this intrinsic is used inside a transactional region, subsequent loads
+///    are not added to the read set of the transaction. If it's used inside a
+///    suspend load address tracking region it will cause transaction abort.
+///    If it's used outside of a transactional region it behaves like a NOP.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c XSUSLDTRK instruction.
+///
+static __inline__ void _DEFAULT_FN_ATTRS
+_xsusldtrk (void)
+{
+    __builtin_ia32_xsusldtrk();
+}
+
+/// Marks the end of an TSX (RTM) suspend load address tracking region. If this
+///    intrinsic is used inside a suspend load address tracking region it will
+///    end the suspend region and all following load addresses will be added to
+///    the transaction read set. If it's used inside an active transaction but
+///    not in a suspend region it will cause transaction abort. If it's used
+///    outside of a transactional region it behaves like a NOP.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c XRESLDTRK instruction.
+///
+static __inline__ void _DEFAULT_FN_ATTRS
+_xresldtrk (void)
+{
+    __builtin_ia32_xresldtrk();
+}
+
+#undef _DEFAULT_FN_ATTRS
+
+#endif /* __TSXLDTRKINTRIN_H */
diff --git a/darwin-x86/lib64/clang/11.0.1/include/unwind.h b/darwin-x86/lib64/clang/11.0.5/include/unwind.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/unwind.h
rename to darwin-x86/lib64/clang/11.0.5/include/unwind.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/vadefs.h b/darwin-x86/lib64/clang/11.0.5/include/vadefs.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/vadefs.h
rename to darwin-x86/lib64/clang/11.0.5/include/vadefs.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/vaesintrin.h b/darwin-x86/lib64/clang/11.0.5/include/vaesintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/vaesintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/vaesintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/varargs.h b/darwin-x86/lib64/clang/11.0.5/include/varargs.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/varargs.h
rename to darwin-x86/lib64/clang/11.0.5/include/varargs.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/vecintrin.h b/darwin-x86/lib64/clang/11.0.5/include/vecintrin.h
new file mode 100644
index 0000000..e58c976
--- /dev/null
+++ b/darwin-x86/lib64/clang/11.0.5/include/vecintrin.h
@@ -0,0 +1,10998 @@
+/*===---- vecintrin.h - Vector intrinsics ----------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if defined(__s390x__) && defined(__VEC__)
+
+#define __ATTRS_ai __attribute__((__always_inline__))
+#define __ATTRS_o __attribute__((__overloadable__))
+#define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__))
+
+#define __constant(PARM) \
+  __attribute__((__enable_if__ ((PARM) == (PARM), \
+     "argument must be a constant integer")))
+#define __constant_range(PARM, LOW, HIGH) \
+  __attribute__((__enable_if__ ((PARM) >= (LOW) && (PARM) <= (HIGH), \
+     "argument must be a constant integer from " #LOW " to " #HIGH)))
+#define __constant_pow2_range(PARM, LOW, HIGH) \
+  __attribute__((__enable_if__ ((PARM) >= (LOW) && (PARM) <= (HIGH) && \
+                                ((PARM) & ((PARM) - 1)) == 0, \
+     "argument must be a constant power of 2 from " #LOW " to " #HIGH)))
+
+/*-- __lcbb -----------------------------------------------------------------*/
+
+extern __ATTRS_o unsigned int
+__lcbb(const void *__ptr, unsigned short __len)
+  __constant_pow2_range(__len, 64, 4096);
+
+#define __lcbb(X, Y) ((__typeof__((__lcbb)((X), (Y)))) \
+  __builtin_s390_lcbb((X), __builtin_constant_p((Y))? \
+                           ((Y) == 64 ? 0 : \
+                            (Y) == 128 ? 1 : \
+                            (Y) == 256 ? 2 : \
+                            (Y) == 512 ? 3 : \
+                            (Y) == 1024 ? 4 : \
+                            (Y) == 2048 ? 5 : \
+                            (Y) == 4096 ? 6 : 0) : 0))
+
+/*-- vec_extract ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai signed char
+vec_extract(__vector signed char __vec, int __index) {
+  return __vec[__index & 15];
+}
+
+static inline __ATTRS_o_ai unsigned char
+vec_extract(__vector __bool char __vec, int __index) {
+  return __vec[__index & 15];
+}
+
+static inline __ATTRS_o_ai unsigned char
+vec_extract(__vector unsigned char __vec, int __index) {
+  return __vec[__index & 15];
+}
+
+static inline __ATTRS_o_ai signed short
+vec_extract(__vector signed short __vec, int __index) {
+  return __vec[__index & 7];
+}
+
+static inline __ATTRS_o_ai unsigned short
+vec_extract(__vector __bool short __vec, int __index) {
+  return __vec[__index & 7];
+}
+
+static inline __ATTRS_o_ai unsigned short
+vec_extract(__vector unsigned short __vec, int __index) {
+  return __vec[__index & 7];
+}
+
+static inline __ATTRS_o_ai signed int
+vec_extract(__vector signed int __vec, int __index) {
+  return __vec[__index & 3];
+}
+
+static inline __ATTRS_o_ai unsigned int
+vec_extract(__vector __bool int __vec, int __index) {
+  return __vec[__index & 3];
+}
+
+static inline __ATTRS_o_ai unsigned int
+vec_extract(__vector unsigned int __vec, int __index) {
+  return __vec[__index & 3];
+}
+
+static inline __ATTRS_o_ai signed long long
+vec_extract(__vector signed long long __vec, int __index) {
+  return __vec[__index & 1];
+}
+
+static inline __ATTRS_o_ai unsigned long long
+vec_extract(__vector __bool long long __vec, int __index) {
+  return __vec[__index & 1];
+}
+
+static inline __ATTRS_o_ai unsigned long long
+vec_extract(__vector unsigned long long __vec, int __index) {
+  return __vec[__index & 1];
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai float
+vec_extract(__vector float __vec, int __index) {
+  return __vec[__index & 3];
+}
+#endif
+
+static inline __ATTRS_o_ai double
+vec_extract(__vector double __vec, int __index) {
+  return __vec[__index & 1];
+}
+
+/*-- vec_insert -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_insert(signed char __scalar, __vector signed char __vec, int __index) {
+  __vec[__index & 15] = __scalar;
+  return __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_insert(unsigned char __scalar, __vector __bool char __vec, int __index) {
+  __vector unsigned char __newvec = (__vector unsigned char)__vec;
+  __newvec[__index & 15] = (unsigned char)__scalar;
+  return __newvec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_insert(unsigned char __scalar, __vector unsigned char __vec, int __index) {
+  __vec[__index & 15] = __scalar;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_insert(signed short __scalar, __vector signed short __vec, int __index) {
+  __vec[__index & 7] = __scalar;
+  return __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_insert(unsigned short __scalar, __vector __bool short __vec,
+           int __index) {
+  __vector unsigned short __newvec = (__vector unsigned short)__vec;
+  __newvec[__index & 7] = (unsigned short)__scalar;
+  return __newvec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_insert(unsigned short __scalar, __vector unsigned short __vec,
+           int __index) {
+  __vec[__index & 7] = __scalar;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_insert(signed int __scalar, __vector signed int __vec, int __index) {
+  __vec[__index & 3] = __scalar;
+  return __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_insert(unsigned int __scalar, __vector __bool int __vec, int __index) {
+  __vector unsigned int __newvec = (__vector unsigned int)__vec;
+  __newvec[__index & 3] = __scalar;
+  return __newvec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_insert(unsigned int __scalar, __vector unsigned int __vec, int __index) {
+  __vec[__index & 3] = __scalar;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_insert(signed long long __scalar, __vector signed long long __vec,
+           int __index) {
+  __vec[__index & 1] = __scalar;
+  return __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_insert(unsigned long long __scalar, __vector __bool long long __vec,
+           int __index) {
+  __vector unsigned long long __newvec = (__vector unsigned long long)__vec;
+  __newvec[__index & 1] = __scalar;
+  return __newvec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_insert(unsigned long long __scalar, __vector unsigned long long __vec,
+           int __index) {
+  __vec[__index & 1] = __scalar;
+  return __vec;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_insert(float __scalar, __vector float __vec, int __index) {
+  __vec[__index & 1] = __scalar;
+  return __vec;
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_insert(double __scalar, __vector double __vec, int __index) {
+  __vec[__index & 1] = __scalar;
+  return __vec;
+}
+
+/*-- vec_promote ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_promote(signed char __scalar, int __index) {
+  const __vector signed char __zero = (__vector signed char)0;
+  __vector signed char __vec = __builtin_shufflevector(__zero, __zero,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
+  __vec[__index & 15] = __scalar;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_promote(unsigned char __scalar, int __index) {
+  const __vector unsigned char __zero = (__vector unsigned char)0;
+  __vector unsigned char __vec = __builtin_shufflevector(__zero, __zero,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
+  __vec[__index & 15] = __scalar;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_promote(signed short __scalar, int __index) {
+  const __vector signed short __zero = (__vector signed short)0;
+  __vector signed short __vec = __builtin_shufflevector(__zero, __zero,
+                                -1, -1, -1, -1, -1, -1, -1, -1);
+  __vec[__index & 7] = __scalar;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_promote(unsigned short __scalar, int __index) {
+  const __vector unsigned short __zero = (__vector unsigned short)0;
+  __vector unsigned short __vec = __builtin_shufflevector(__zero, __zero,
+                                  -1, -1, -1, -1, -1, -1, -1, -1);
+  __vec[__index & 7] = __scalar;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_promote(signed int __scalar, int __index) {
+  const __vector signed int __zero = (__vector signed int)0;
+  __vector signed int __vec = __builtin_shufflevector(__zero, __zero,
+                                                      -1, -1, -1, -1);
+  __vec[__index & 3] = __scalar;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_promote(unsigned int __scalar, int __index) {
+  const __vector unsigned int __zero = (__vector unsigned int)0;
+  __vector unsigned int __vec = __builtin_shufflevector(__zero, __zero,
+                                                        -1, -1, -1, -1);
+  __vec[__index & 3] = __scalar;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_promote(signed long long __scalar, int __index) {
+  const __vector signed long long __zero = (__vector signed long long)0;
+  __vector signed long long __vec = __builtin_shufflevector(__zero, __zero,
+                                                            -1, -1);
+  __vec[__index & 1] = __scalar;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_promote(unsigned long long __scalar, int __index) {
+  const __vector unsigned long long __zero = (__vector unsigned long long)0;
+  __vector unsigned long long __vec = __builtin_shufflevector(__zero, __zero,
+                                                              -1, -1);
+  __vec[__index & 1] = __scalar;
+  return __vec;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_promote(float __scalar, int __index) {
+  const __vector float __zero = (__vector float)0.0f;
+  __vector float __vec = __builtin_shufflevector(__zero, __zero,
+                                                 -1, -1, -1, -1);
+  __vec[__index & 3] = __scalar;
+  return __vec;
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_promote(double __scalar, int __index) {
+  const __vector double __zero = (__vector double)0.0;
+  __vector double __vec = __builtin_shufflevector(__zero, __zero, -1, -1);
+  __vec[__index & 1] = __scalar;
+  return __vec;
+}
+
+/*-- vec_insert_and_zero ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_insert_and_zero(const signed char *__ptr) {
+  __vector signed char __vec = (__vector signed char)0;
+  __vec[7] = *__ptr;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_insert_and_zero(const unsigned char *__ptr) {
+  __vector unsigned char __vec = (__vector unsigned char)0;
+  __vec[7] = *__ptr;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_insert_and_zero(const signed short *__ptr) {
+  __vector signed short __vec = (__vector signed short)0;
+  __vec[3] = *__ptr;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_insert_and_zero(const unsigned short *__ptr) {
+  __vector unsigned short __vec = (__vector unsigned short)0;
+  __vec[3] = *__ptr;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_insert_and_zero(const signed int *__ptr) {
+  __vector signed int __vec = (__vector signed int)0;
+  __vec[1] = *__ptr;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_insert_and_zero(const unsigned int *__ptr) {
+  __vector unsigned int __vec = (__vector unsigned int)0;
+  __vec[1] = *__ptr;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_insert_and_zero(const signed long long *__ptr) {
+  __vector signed long long __vec = (__vector signed long long)0;
+  __vec[0] = *__ptr;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_insert_and_zero(const unsigned long long *__ptr) {
+  __vector unsigned long long __vec = (__vector unsigned long long)0;
+  __vec[0] = *__ptr;
+  return __vec;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_insert_and_zero(const float *__ptr) {
+  __vector float __vec = (__vector float)0.0f;
+  __vec[1] = *__ptr;
+  return __vec;
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_insert_and_zero(const double *__ptr) {
+  __vector double __vec = (__vector double)0.0;
+  __vec[0] = *__ptr;
+  return __vec;
+}
+
+/*-- vec_perm ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_perm(__vector signed char __a, __vector signed char __b,
+         __vector unsigned char __c) {
+  return (__vector signed char)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_perm(__vector unsigned char __a, __vector unsigned char __b,
+         __vector unsigned char __c) {
+  return (__vector unsigned char)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_perm(__vector __bool char __a, __vector __bool char __b,
+         __vector unsigned char __c) {
+  return (__vector __bool char)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_perm(__vector signed short __a, __vector signed short __b,
+         __vector unsigned char __c) {
+  return (__vector signed short)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_perm(__vector unsigned short __a, __vector unsigned short __b,
+         __vector unsigned char __c) {
+  return (__vector unsigned short)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_perm(__vector __bool short __a, __vector __bool short __b,
+         __vector unsigned char __c) {
+  return (__vector __bool short)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_perm(__vector signed int __a, __vector signed int __b,
+         __vector unsigned char __c) {
+  return (__vector signed int)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_perm(__vector unsigned int __a, __vector unsigned int __b,
+         __vector unsigned char __c) {
+  return (__vector unsigned int)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_perm(__vector __bool int __a, __vector __bool int __b,
+         __vector unsigned char __c) {
+  return (__vector __bool int)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_perm(__vector signed long long __a, __vector signed long long __b,
+         __vector unsigned char __c) {
+  return (__vector signed long long)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_perm(__vector unsigned long long __a, __vector unsigned long long __b,
+         __vector unsigned char __c) {
+  return (__vector unsigned long long)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_perm(__vector __bool long long __a, __vector __bool long long __b,
+         __vector unsigned char __c) {
+  return (__vector __bool long long)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_perm(__vector float __a, __vector float __b,
+         __vector unsigned char __c) {
+  return (__vector float)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_perm(__vector double __a, __vector double __b,
+         __vector unsigned char __c) {
+  return (__vector double)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+/*-- vec_permi --------------------------------------------------------------*/
+
+// This prototype is deprecated.
+extern __ATTRS_o __vector signed long long
+vec_permi(__vector signed long long __a, __vector signed long long __b,
+          int __c)
+  __constant_range(__c, 0, 3);
+
+// This prototype is deprecated.
+extern __ATTRS_o __vector unsigned long long
+vec_permi(__vector unsigned long long __a, __vector unsigned long long __b,
+          int __c)
+  __constant_range(__c, 0, 3);
+
+// This prototype is deprecated.
+extern __ATTRS_o __vector __bool long long
+vec_permi(__vector __bool long long __a, __vector __bool long long __b,
+          int __c)
+  __constant_range(__c, 0, 3);
+
+// This prototype is deprecated.
+extern __ATTRS_o __vector double
+vec_permi(__vector double __a, __vector double __b, int __c)
+  __constant_range(__c, 0, 3);
+
+#define vec_permi(X, Y, Z) ((__typeof__((vec_permi)((X), (Y), (Z)))) \
+  __builtin_s390_vpdi((__vector unsigned long long)(X), \
+                      (__vector unsigned long long)(Y), \
+                      (((Z) & 2) << 1) | ((Z) & 1)))
+
+/*-- vec_bperm_u128 ---------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_ai __vector unsigned long long
+vec_bperm_u128(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vbperm(__a, __b);
+}
+#endif
+
+/*-- vec_revb ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed short
+vec_revb(__vector signed short __vec) {
+  return (__vector signed short)
+         __builtin_s390_vlbrh((__vector unsigned short)__vec);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_revb(__vector unsigned short __vec) {
+  return __builtin_s390_vlbrh(__vec);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_revb(__vector signed int __vec) {
+  return (__vector signed int)
+         __builtin_s390_vlbrf((__vector unsigned int)__vec);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_revb(__vector unsigned int __vec) {
+  return __builtin_s390_vlbrf(__vec);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_revb(__vector signed long long __vec) {
+  return (__vector signed long long)
+         __builtin_s390_vlbrg((__vector unsigned long long)__vec);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_revb(__vector unsigned long long __vec) {
+  return __builtin_s390_vlbrg(__vec);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_revb(__vector float __vec) {
+  return (__vector float)
+         __builtin_s390_vlbrf((__vector unsigned int)__vec);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_revb(__vector double __vec) {
+  return (__vector double)
+         __builtin_s390_vlbrg((__vector unsigned long long)__vec);
+}
+
+/*-- vec_reve ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_reve(__vector signed char __vec) {
+  return (__vector signed char) { __vec[15], __vec[14], __vec[13], __vec[12],
+                                  __vec[11], __vec[10], __vec[9], __vec[8],
+                                  __vec[7], __vec[6], __vec[5], __vec[4],
+                                  __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_reve(__vector unsigned char __vec) {
+  return (__vector unsigned char) { __vec[15], __vec[14], __vec[13], __vec[12],
+                                    __vec[11], __vec[10], __vec[9], __vec[8],
+                                    __vec[7], __vec[6], __vec[5], __vec[4],
+                                    __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_reve(__vector __bool char __vec) {
+  return (__vector __bool char) { __vec[15], __vec[14], __vec[13], __vec[12],
+                                  __vec[11], __vec[10], __vec[9], __vec[8],
+                                  __vec[7], __vec[6], __vec[5], __vec[4],
+                                  __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_reve(__vector signed short __vec) {
+  return (__vector signed short) { __vec[7], __vec[6], __vec[5], __vec[4],
+                                   __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_reve(__vector unsigned short __vec) {
+  return (__vector unsigned short) { __vec[7], __vec[6], __vec[5], __vec[4],
+                                     __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_reve(__vector __bool short __vec) {
+  return (__vector __bool short) { __vec[7], __vec[6], __vec[5], __vec[4],
+                                   __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_reve(__vector signed int __vec) {
+  return (__vector signed int) { __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_reve(__vector unsigned int __vec) {
+  return (__vector unsigned int) { __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_reve(__vector __bool int __vec) {
+  return (__vector __bool int) { __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_reve(__vector signed long long __vec) {
+  return (__vector signed long long) { __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_reve(__vector unsigned long long __vec) {
+  return (__vector unsigned long long) { __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_reve(__vector __bool long long __vec) {
+  return (__vector __bool long long) { __vec[1], __vec[0] };
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_reve(__vector float __vec) {
+  return (__vector float) { __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_reve(__vector double __vec) {
+  return (__vector double) { __vec[1], __vec[0] };
+}
+
+/*-- vec_sel ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_sel(__vector signed char __a, __vector signed char __b,
+        __vector unsigned char __c) {
+  return (((__vector signed char)__c & __b) |
+          (~(__vector signed char)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector signed char
+vec_sel(__vector signed char __a, __vector signed char __b,
+        __vector __bool char __c) {
+  return (((__vector signed char)__c & __b) |
+          (~(__vector signed char)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_sel(__vector __bool char __a, __vector __bool char __b,
+        __vector unsigned char __c) {
+  return (((__vector __bool char)__c & __b) |
+          (~(__vector __bool char)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_sel(__vector __bool char __a, __vector __bool char __b,
+        __vector __bool char __c) {
+  return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sel(__vector unsigned char __a, __vector unsigned char __b,
+        __vector unsigned char __c) {
+  return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sel(__vector unsigned char __a, __vector unsigned char __b,
+        __vector __bool char __c) {
+  return (((__vector unsigned char)__c & __b) |
+          (~(__vector unsigned char)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_sel(__vector signed short __a, __vector signed short __b,
+        __vector unsigned short __c) {
+  return (((__vector signed short)__c & __b) |
+          (~(__vector signed short)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_sel(__vector signed short __a, __vector signed short __b,
+        __vector __bool short __c) {
+  return (((__vector signed short)__c & __b) |
+          (~(__vector signed short)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_sel(__vector __bool short __a, __vector __bool short __b,
+        __vector unsigned short __c) {
+  return (((__vector __bool short)__c & __b) |
+          (~(__vector __bool short)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_sel(__vector __bool short __a, __vector __bool short __b,
+        __vector __bool short __c) {
+  return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sel(__vector unsigned short __a, __vector unsigned short __b,
+        __vector unsigned short __c) {
+  return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sel(__vector unsigned short __a, __vector unsigned short __b,
+        __vector __bool short __c) {
+  return (((__vector unsigned short)__c & __b) |
+          (~(__vector unsigned short)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_sel(__vector signed int __a, __vector signed int __b,
+        __vector unsigned int __c) {
+  return (((__vector signed int)__c & __b) |
+          (~(__vector signed int)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_sel(__vector signed int __a, __vector signed int __b,
+        __vector __bool int __c) {
+  return (((__vector signed int)__c & __b) |
+          (~(__vector signed int)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_sel(__vector __bool int __a, __vector __bool int __b,
+        __vector unsigned int __c) {
+  return (((__vector __bool int)__c & __b) |
+          (~(__vector __bool int)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_sel(__vector __bool int __a, __vector __bool int __b,
+        __vector __bool int __c) {
+  return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sel(__vector unsigned int __a, __vector unsigned int __b,
+        __vector unsigned int __c) {
+  return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sel(__vector unsigned int __a, __vector unsigned int __b,
+        __vector __bool int __c) {
+  return (((__vector unsigned int)__c & __b) |
+          (~(__vector unsigned int)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_sel(__vector signed long long __a, __vector signed long long __b,
+        __vector unsigned long long __c) {
+  return (((__vector signed long long)__c & __b) |
+          (~(__vector signed long long)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_sel(__vector signed long long __a, __vector signed long long __b,
+        __vector __bool long long __c) {
+  return (((__vector signed long long)__c & __b) |
+          (~(__vector signed long long)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sel(__vector __bool long long __a, __vector __bool long long __b,
+        __vector unsigned long long __c) {
+  return (((__vector __bool long long)__c & __b) |
+          (~(__vector __bool long long)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sel(__vector __bool long long __a, __vector __bool long long __b,
+        __vector __bool long long __c) {
+  return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sel(__vector unsigned long long __a, __vector unsigned long long __b,
+        __vector unsigned long long __c) {
+  return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sel(__vector unsigned long long __a, __vector unsigned long long __b,
+        __vector __bool long long __c) {
+  return (((__vector unsigned long long)__c & __b) |
+          (~(__vector unsigned long long)__c & __a));
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_sel(__vector float __a, __vector float __b, __vector unsigned int __c) {
+  return (__vector float)((__c & (__vector unsigned int)__b) |
+                          (~__c & (__vector unsigned int)__a));
+}
+
+static inline __ATTRS_o_ai __vector float
+vec_sel(__vector float __a, __vector float __b, __vector __bool int __c) {
+  __vector unsigned int __ac = (__vector unsigned int)__a;
+  __vector unsigned int __bc = (__vector unsigned int)__b;
+  __vector unsigned int __cc = (__vector unsigned int)__c;
+  return (__vector float)((__cc & __bc) | (~__cc & __ac));
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_sel(__vector double __a, __vector double __b,
+        __vector unsigned long long __c) {
+  return (__vector double)((__c & (__vector unsigned long long)__b) |
+                         (~__c & (__vector unsigned long long)__a));
+}
+
+static inline __ATTRS_o_ai __vector double
+vec_sel(__vector double __a, __vector double __b,
+        __vector __bool long long __c) {
+  __vector unsigned long long __ac = (__vector unsigned long long)__a;
+  __vector unsigned long long __bc = (__vector unsigned long long)__b;
+  __vector unsigned long long __cc = (__vector unsigned long long)__c;
+  return (__vector double)((__cc & __bc) | (~__cc & __ac));
+}
+
+/*-- vec_gather_element -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed int
+vec_gather_element(__vector signed int __vec,
+                   __vector unsigned int __offset,
+                   const signed int *__ptr, int __index)
+  __constant_range(__index, 0, 3) {
+  __vec[__index] = *(const signed int *)(
+    (const char *)__ptr + __offset[__index]);
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_gather_element(__vector __bool int __vec,
+                   __vector unsigned int __offset,
+                   const unsigned int *__ptr, int __index)
+  __constant_range(__index, 0, 3) {
+  __vec[__index] = *(const unsigned int *)(
+    (const char *)__ptr + __offset[__index]);
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_gather_element(__vector unsigned int __vec,
+                   __vector unsigned int __offset,
+                   const unsigned int *__ptr, int __index)
+  __constant_range(__index, 0, 3) {
+  __vec[__index] = *(const unsigned int *)(
+    (const char *)__ptr + __offset[__index]);
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_gather_element(__vector signed long long __vec,
+                   __vector unsigned long long __offset,
+                   const signed long long *__ptr, int __index)
+  __constant_range(__index, 0, 1) {
+  __vec[__index] = *(const signed long long *)(
+    (const char *)__ptr + __offset[__index]);
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_gather_element(__vector __bool long long __vec,
+                   __vector unsigned long long __offset,
+                   const unsigned long long *__ptr, int __index)
+  __constant_range(__index, 0, 1) {
+  __vec[__index] = *(const unsigned long long *)(
+    (const char *)__ptr + __offset[__index]);
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_gather_element(__vector unsigned long long __vec,
+                   __vector unsigned long long __offset,
+                   const unsigned long long *__ptr, int __index)
+  __constant_range(__index, 0, 1) {
+  __vec[__index] = *(const unsigned long long *)(
+    (const char *)__ptr + __offset[__index]);
+  return __vec;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_gather_element(__vector float __vec,
+                   __vector unsigned int __offset,
+                   const float *__ptr, int __index)
+  __constant_range(__index, 0, 3) {
+  __vec[__index] = *(const float *)(
+    (const char *)__ptr + __offset[__index]);
+  return __vec;
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_gather_element(__vector double __vec,
+                   __vector unsigned long long __offset,
+                   const double *__ptr, int __index)
+  __constant_range(__index, 0, 1) {
+  __vec[__index] = *(const double *)(
+    (const char *)__ptr + __offset[__index]);
+  return __vec;
+}
+
+/*-- vec_scatter_element ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(__vector signed int __vec,
+                    __vector unsigned int __offset,
+                    signed int *__ptr, int __index)
+  __constant_range(__index, 0, 3) {
+  *(signed int *)((char *)__ptr + __offset[__index]) =
+    __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(__vector __bool int __vec,
+                    __vector unsigned int __offset,
+                    unsigned int *__ptr, int __index)
+  __constant_range(__index, 0, 3) {
+  *(unsigned int *)((char *)__ptr + __offset[__index]) =
+    __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(__vector unsigned int __vec,
+                    __vector unsigned int __offset,
+                    unsigned int *__ptr, int __index)
+  __constant_range(__index, 0, 3) {
+  *(unsigned int *)((char *)__ptr + __offset[__index]) =
+    __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(__vector signed long long __vec,
+                    __vector unsigned long long __offset,
+                    signed long long *__ptr, int __index)
+  __constant_range(__index, 0, 1) {
+  *(signed long long *)((char *)__ptr + __offset[__index]) =
+    __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(__vector __bool long long __vec,
+                    __vector unsigned long long __offset,
+                    unsigned long long *__ptr, int __index)
+  __constant_range(__index, 0, 1) {
+  *(unsigned long long *)((char *)__ptr + __offset[__index]) =
+    __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(__vector unsigned long long __vec,
+                    __vector unsigned long long __offset,
+                    unsigned long long *__ptr, int __index)
+  __constant_range(__index, 0, 1) {
+  *(unsigned long long *)((char *)__ptr + __offset[__index]) =
+    __vec[__index];
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai void
+vec_scatter_element(__vector float __vec,
+                    __vector unsigned int __offset,
+                    float *__ptr, int __index)
+  __constant_range(__index, 0, 3) {
+  *(float *)((char *)__ptr + __offset[__index]) =
+    __vec[__index];
+}
+#endif
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(__vector double __vec,
+                    __vector unsigned long long __offset,
+                    double *__ptr, int __index)
+  __constant_range(__index, 0, 1) {
+  *(double *)((char *)__ptr + __offset[__index]) =
+    __vec[__index];
+}
+
+/*-- vec_xl -----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_xl(long __offset, const signed char *__ptr) {
+  return *(const __vector signed char *)
+          ((const char *)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_xl(long __offset, const unsigned char *__ptr) {
+  return *(const __vector unsigned char *)
+          ((const char *)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_xl(long __offset, const signed short *__ptr) {
+  return *(const __vector signed short *)
+          ((const char *)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_xl(long __offset, const unsigned short *__ptr) {
+  return *(const __vector unsigned short *)
+          ((const char *)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_xl(long __offset, const signed int *__ptr) {
+  return *(const __vector signed int *)
+          ((const char *)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_xl(long __offset, const unsigned int *__ptr) {
+  return *(const __vector unsigned int *)
+          ((const char *)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_xl(long __offset, const signed long long *__ptr) {
+  return *(const __vector signed long long *)
+          ((const char *)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_xl(long __offset, const unsigned long long *__ptr) {
+  return *(const __vector unsigned long long *)
+          ((const char *)__ptr + __offset);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_xl(long __offset, const float *__ptr) {
+  return *(const __vector float *)
+          ((const char *)__ptr + __offset);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_xl(long __offset, const double *__ptr) {
+  return *(const __vector double *)
+          ((const char *)__ptr + __offset);
+}
+
+/*-- vec_xld2 ---------------------------------------------------------------*/
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_xld2(long __offset, const signed char *__ptr) {
+  return *(const __vector signed char *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_xld2(long __offset, const unsigned char *__ptr) {
+  return *(const __vector unsigned char *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_xld2(long __offset, const signed short *__ptr) {
+  return *(const __vector signed short *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_xld2(long __offset, const unsigned short *__ptr) {
+  return *(const __vector unsigned short *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_xld2(long __offset, const signed int *__ptr) {
+  return *(const __vector signed int *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_xld2(long __offset, const unsigned int *__ptr) {
+  return *(const __vector unsigned int *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_xld2(long __offset, const signed long long *__ptr) {
+  return *(const __vector signed long long *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_xld2(long __offset, const unsigned long long *__ptr) {
+  return *(const __vector unsigned long long *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector double
+vec_xld2(long __offset, const double *__ptr) {
+  return *(const __vector double *)
+          ((const char *)__ptr + __offset);
+}
+
+/*-- vec_xlw4 ---------------------------------------------------------------*/
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_xlw4(long __offset, const signed char *__ptr) {
+  return *(const __vector signed char *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_xlw4(long __offset, const unsigned char *__ptr) {
+  return *(const __vector unsigned char *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_xlw4(long __offset, const signed short *__ptr) {
+  return *(const __vector signed short *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_xlw4(long __offset, const unsigned short *__ptr) {
+  return *(const __vector unsigned short *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_xlw4(long __offset, const signed int *__ptr) {
+  return *(const __vector signed int *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_xlw4(long __offset, const unsigned int *__ptr) {
+  return *(const __vector unsigned int *)
+          ((const char *)__ptr + __offset);
+}
+
+/*-- vec_xst ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai void
+vec_xst(__vector signed char __vec, long __offset, signed char *__ptr) {
+  *(__vector signed char *)((char *)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xst(__vector unsigned char __vec, long __offset, unsigned char *__ptr) {
+  *(__vector unsigned char *)((char *)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xst(__vector signed short __vec, long __offset, signed short *__ptr) {
+  *(__vector signed short *)((char *)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xst(__vector unsigned short __vec, long __offset, unsigned short *__ptr) {
+  *(__vector unsigned short *)((char *)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xst(__vector signed int __vec, long __offset, signed int *__ptr) {
+  *(__vector signed int *)((char *)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xst(__vector unsigned int __vec, long __offset, unsigned int *__ptr) {
+  *(__vector unsigned int *)((char *)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xst(__vector signed long long __vec, long __offset,
+          signed long long *__ptr) {
+  *(__vector signed long long *)((char *)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xst(__vector unsigned long long __vec, long __offset,
+          unsigned long long *__ptr) {
+  *(__vector unsigned long long *)((char *)__ptr + __offset) = __vec;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai void
+vec_xst(__vector float __vec, long __offset, float *__ptr) {
+  *(__vector float *)((char *)__ptr + __offset) = __vec;
+}
+#endif
+
+static inline __ATTRS_o_ai void
+vec_xst(__vector double __vec, long __offset, double *__ptr) {
+  *(__vector double *)((char *)__ptr + __offset) = __vec;
+}
+
+/*-- vec_xstd2 --------------------------------------------------------------*/
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstd2(__vector signed char __vec, long __offset, signed char *__ptr) {
+  *(__vector signed char *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstd2(__vector unsigned char __vec, long __offset, unsigned char *__ptr) {
+  *(__vector unsigned char *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstd2(__vector signed short __vec, long __offset, signed short *__ptr) {
+  *(__vector signed short *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstd2(__vector unsigned short __vec, long __offset, unsigned short *__ptr) {
+  *(__vector unsigned short *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstd2(__vector signed int __vec, long __offset, signed int *__ptr) {
+  *(__vector signed int *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstd2(__vector unsigned int __vec, long __offset, unsigned int *__ptr) {
+  *(__vector unsigned int *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstd2(__vector signed long long __vec, long __offset,
+          signed long long *__ptr) {
+  *(__vector signed long long *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstd2(__vector unsigned long long __vec, long __offset,
+          unsigned long long *__ptr) {
+  *(__vector unsigned long long *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstd2(__vector double __vec, long __offset, double *__ptr) {
+  *(__vector double *)((char *)__ptr + __offset) = __vec;
+}
+
+/*-- vec_xstw4 --------------------------------------------------------------*/
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstw4(__vector signed char __vec, long __offset, signed char *__ptr) {
+  *(__vector signed char *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstw4(__vector unsigned char __vec, long __offset, unsigned char *__ptr) {
+  *(__vector unsigned char *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstw4(__vector signed short __vec, long __offset, signed short *__ptr) {
+  *(__vector signed short *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstw4(__vector unsigned short __vec, long __offset, unsigned short *__ptr) {
+  *(__vector unsigned short *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstw4(__vector signed int __vec, long __offset, signed int *__ptr) {
+  *(__vector signed int *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstw4(__vector unsigned int __vec, long __offset, unsigned int *__ptr) {
+  *(__vector unsigned int *)((char *)__ptr + __offset) = __vec;
+}
+
+/*-- vec_load_bndry ---------------------------------------------------------*/
+
+extern __ATTRS_o __vector signed char
+vec_load_bndry(const signed char *__ptr, unsigned short __len)
+  __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o __vector unsigned char
+vec_load_bndry(const unsigned char *__ptr, unsigned short __len)
+  __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o __vector signed short
+vec_load_bndry(const signed short *__ptr, unsigned short __len)
+  __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o __vector unsigned short
+vec_load_bndry(const unsigned short *__ptr, unsigned short __len)
+  __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o __vector signed int
+vec_load_bndry(const signed int *__ptr, unsigned short __len)
+  __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o __vector unsigned int
+vec_load_bndry(const unsigned int *__ptr, unsigned short __len)
+  __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o __vector signed long long
+vec_load_bndry(const signed long long *__ptr, unsigned short __len)
+  __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o __vector unsigned long long
+vec_load_bndry(const unsigned long long *__ptr, unsigned short __len)
+  __constant_pow2_range(__len, 64, 4096);
+
+#if __ARCH__ >= 12
+extern __ATTRS_o __vector float
+vec_load_bndry(const float *__ptr, unsigned short __len)
+  __constant_pow2_range(__len, 64, 4096);
+#endif
+
+extern __ATTRS_o __vector double
+vec_load_bndry(const double *__ptr, unsigned short __len)
+  __constant_pow2_range(__len, 64, 4096);
+
+#define vec_load_bndry(X, Y) ((__typeof__((vec_load_bndry)((X), (Y)))) \
+  __builtin_s390_vlbb((X), ((Y) == 64 ? 0 : \
+                            (Y) == 128 ? 1 : \
+                            (Y) == 256 ? 2 : \
+                            (Y) == 512 ? 3 : \
+                            (Y) == 1024 ? 4 : \
+                            (Y) == 2048 ? 5 : \
+                            (Y) == 4096 ? 6 : -1)))
+
+/*-- vec_load_len -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_load_len(const signed char *__ptr, unsigned int __len) {
+  return (__vector signed char)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_load_len(const unsigned char *__ptr, unsigned int __len) {
+  return (__vector unsigned char)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_load_len(const signed short *__ptr, unsigned int __len) {
+  return (__vector signed short)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_load_len(const unsigned short *__ptr, unsigned int __len) {
+  return (__vector unsigned short)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_load_len(const signed int *__ptr, unsigned int __len) {
+  return (__vector signed int)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_load_len(const unsigned int *__ptr, unsigned int __len) {
+  return (__vector unsigned int)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_load_len(const signed long long *__ptr, unsigned int __len) {
+  return (__vector signed long long)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_load_len(const unsigned long long *__ptr, unsigned int __len) {
+  return (__vector unsigned long long)__builtin_s390_vll(__len, __ptr);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_load_len(const float *__ptr, unsigned int __len) {
+  return (__vector float)__builtin_s390_vll(__len, __ptr);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_load_len(const double *__ptr, unsigned int __len) {
+  return (__vector double)__builtin_s390_vll(__len, __ptr);
+}
+
+/*-- vec_load_len_r ---------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_ai __vector unsigned char
+vec_load_len_r(const unsigned char *__ptr, unsigned int __len) {
+  return (__vector unsigned char)__builtin_s390_vlrl(__len, __ptr);
+}
+#endif
+
+/*-- vec_store_len ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai void
+vec_store_len(__vector signed char __vec, signed char *__ptr,
+              unsigned int __len) {
+  __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(__vector unsigned char __vec, unsigned char *__ptr,
+              unsigned int __len) {
+  __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(__vector signed short __vec, signed short *__ptr,
+              unsigned int __len) {
+  __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(__vector unsigned short __vec, unsigned short *__ptr,
+              unsigned int __len) {
+  __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(__vector signed int __vec, signed int *__ptr,
+              unsigned int __len) {
+  __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(__vector unsigned int __vec, unsigned int *__ptr,
+              unsigned int __len) {
+  __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(__vector signed long long __vec, signed long long *__ptr,
+              unsigned int __len) {
+  __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(__vector unsigned long long __vec, unsigned long long *__ptr,
+              unsigned int __len) {
+  __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai void
+vec_store_len(__vector float __vec, float *__ptr,
+              unsigned int __len) {
+  __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
+}
+#endif
+
+static inline __ATTRS_o_ai void
+vec_store_len(__vector double __vec, double *__ptr,
+              unsigned int __len) {
+  __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
+}
+
+/*-- vec_store_len_r --------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_ai void
+vec_store_len_r(__vector unsigned char __vec, unsigned char *__ptr,
+                unsigned int __len) {
+  __builtin_s390_vstrl((__vector signed char)__vec, __len, __ptr);
+}
+#endif
+
+/*-- vec_load_pair ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_load_pair(signed long long __a, signed long long __b) {
+  return (__vector signed long long)(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_load_pair(unsigned long long __a, unsigned long long __b) {
+  return (__vector unsigned long long)(__a, __b);
+}
+
+/*-- vec_genmask ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_genmask(unsigned short __mask)
+  __constant(__mask) {
+  return (__vector unsigned char)(
+    __mask & 0x8000 ? 0xff : 0,
+    __mask & 0x4000 ? 0xff : 0,
+    __mask & 0x2000 ? 0xff : 0,
+    __mask & 0x1000 ? 0xff : 0,
+    __mask & 0x0800 ? 0xff : 0,
+    __mask & 0x0400 ? 0xff : 0,
+    __mask & 0x0200 ? 0xff : 0,
+    __mask & 0x0100 ? 0xff : 0,
+    __mask & 0x0080 ? 0xff : 0,
+    __mask & 0x0040 ? 0xff : 0,
+    __mask & 0x0020 ? 0xff : 0,
+    __mask & 0x0010 ? 0xff : 0,
+    __mask & 0x0008 ? 0xff : 0,
+    __mask & 0x0004 ? 0xff : 0,
+    __mask & 0x0002 ? 0xff : 0,
+    __mask & 0x0001 ? 0xff : 0);
+}
+
+/*-- vec_genmasks_* ---------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_genmasks_8(unsigned char __first, unsigned char __last)
+  __constant(__first) __constant(__last) {
+  unsigned char __bit1 = __first & 7;
+  unsigned char __bit2 = __last & 7;
+  unsigned char __mask1 = (unsigned char)(1U << (7 - __bit1) << 1) - 1;
+  unsigned char __mask2 = (unsigned char)(1U << (7 - __bit2)) - 1;
+  unsigned char __value = (__bit1 <= __bit2 ?
+                           __mask1 & ~__mask2 :
+                           __mask1 | ~__mask2);
+  return (__vector unsigned char)__value;
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_genmasks_16(unsigned char __first, unsigned char __last)
+  __constant(__first) __constant(__last) {
+  unsigned char __bit1 = __first & 15;
+  unsigned char __bit2 = __last & 15;
+  unsigned short __mask1 = (unsigned short)(1U << (15 - __bit1) << 1) - 1;
+  unsigned short __mask2 = (unsigned short)(1U << (15 - __bit2)) - 1;
+  unsigned short __value = (__bit1 <= __bit2 ?
+                            __mask1 & ~__mask2 :
+                            __mask1 | ~__mask2);
+  return (__vector unsigned short)__value;
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_genmasks_32(unsigned char __first, unsigned char __last)
+  __constant(__first) __constant(__last) {
+  unsigned char __bit1 = __first & 31;
+  unsigned char __bit2 = __last & 31;
+  unsigned int __mask1 = (1U << (31 - __bit1) << 1) - 1;
+  unsigned int __mask2 = (1U << (31 - __bit2)) - 1;
+  unsigned int __value = (__bit1 <= __bit2 ?
+                          __mask1 & ~__mask2 :
+                          __mask1 | ~__mask2);
+  return (__vector unsigned int)__value;
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_genmasks_64(unsigned char __first, unsigned char __last)
+  __constant(__first) __constant(__last) {
+  unsigned char __bit1 = __first & 63;
+  unsigned char __bit2 = __last & 63;
+  unsigned long long __mask1 = (1ULL << (63 - __bit1) << 1) - 1;
+  unsigned long long __mask2 = (1ULL << (63 - __bit2)) - 1;
+  unsigned long long __value = (__bit1 <= __bit2 ?
+                                __mask1 & ~__mask2 :
+                                __mask1 | ~__mask2);
+  return (__vector unsigned long long)__value;
+}
+
+/*-- vec_splat --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_splat(__vector signed char __vec, int __index)
+  __constant_range(__index, 0, 15) {
+  return (__vector signed char)__vec[__index];
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_splat(__vector __bool char __vec, int __index)
+  __constant_range(__index, 0, 15) {
+  return (__vector __bool char)(__vector unsigned char)__vec[__index];
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_splat(__vector unsigned char __vec, int __index)
+  __constant_range(__index, 0, 15) {
+  return (__vector unsigned char)__vec[__index];
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_splat(__vector signed short __vec, int __index)
+  __constant_range(__index, 0, 7) {
+  return (__vector signed short)__vec[__index];
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_splat(__vector __bool short __vec, int __index)
+  __constant_range(__index, 0, 7) {
+  return (__vector __bool short)(__vector unsigned short)__vec[__index];
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_splat(__vector unsigned short __vec, int __index)
+  __constant_range(__index, 0, 7) {
+  return (__vector unsigned short)__vec[__index];
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_splat(__vector signed int __vec, int __index)
+  __constant_range(__index, 0, 3) {
+  return (__vector signed int)__vec[__index];
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_splat(__vector __bool int __vec, int __index)
+  __constant_range(__index, 0, 3) {
+  return (__vector __bool int)(__vector unsigned int)__vec[__index];
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_splat(__vector unsigned int __vec, int __index)
+  __constant_range(__index, 0, 3) {
+  return (__vector unsigned int)__vec[__index];
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_splat(__vector signed long long __vec, int __index)
+  __constant_range(__index, 0, 1) {
+  return (__vector signed long long)__vec[__index];
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_splat(__vector __bool long long __vec, int __index)
+  __constant_range(__index, 0, 1) {
+  return ((__vector __bool long long)
+          (__vector unsigned long long)__vec[__index]);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_splat(__vector unsigned long long __vec, int __index)
+  __constant_range(__index, 0, 1) {
+  return (__vector unsigned long long)__vec[__index];
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_splat(__vector float __vec, int __index)
+  __constant_range(__index, 0, 3) {
+  return (__vector float)__vec[__index];
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_splat(__vector double __vec, int __index)
+  __constant_range(__index, 0, 1) {
+  return (__vector double)__vec[__index];
+}
+
+/*-- vec_splat_s* -----------------------------------------------------------*/
+
+static inline __ATTRS_ai __vector signed char
+vec_splat_s8(signed char __scalar)
+  __constant(__scalar) {
+  return (__vector signed char)__scalar;
+}
+
+static inline __ATTRS_ai __vector signed short
+vec_splat_s16(signed short __scalar)
+  __constant(__scalar) {
+  return (__vector signed short)__scalar;
+}
+
+static inline __ATTRS_ai __vector signed int
+vec_splat_s32(signed short __scalar)
+  __constant(__scalar) {
+  return (__vector signed int)(signed int)__scalar;
+}
+
+static inline __ATTRS_ai __vector signed long long
+vec_splat_s64(signed short __scalar)
+  __constant(__scalar) {
+  return (__vector signed long long)(signed long)__scalar;
+}
+
+/*-- vec_splat_u* -----------------------------------------------------------*/
+
+static inline __ATTRS_ai __vector unsigned char
+vec_splat_u8(unsigned char __scalar)
+  __constant(__scalar) {
+  return (__vector unsigned char)__scalar;
+}
+
+static inline __ATTRS_ai __vector unsigned short
+vec_splat_u16(unsigned short __scalar)
+  __constant(__scalar) {
+  return (__vector unsigned short)__scalar;
+}
+
+static inline __ATTRS_ai __vector unsigned int
+vec_splat_u32(signed short __scalar)
+  __constant(__scalar) {
+  return (__vector unsigned int)(signed int)__scalar;
+}
+
+static inline __ATTRS_ai __vector unsigned long long
+vec_splat_u64(signed short __scalar)
+  __constant(__scalar) {
+  return (__vector unsigned long long)(signed long long)__scalar;
+}
+
+/*-- vec_splats -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_splats(signed char __scalar) {
+  return (__vector signed char)__scalar;
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_splats(unsigned char __scalar) {
+  return (__vector unsigned char)__scalar;
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_splats(signed short __scalar) {
+  return (__vector signed short)__scalar;
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_splats(unsigned short __scalar) {
+  return (__vector unsigned short)__scalar;
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_splats(signed int __scalar) {
+  return (__vector signed int)__scalar;
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_splats(unsigned int __scalar) {
+  return (__vector unsigned int)__scalar;
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_splats(signed long long __scalar) {
+  return (__vector signed long long)__scalar;
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_splats(unsigned long long __scalar) {
+  return (__vector unsigned long long)__scalar;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_splats(float __scalar) {
+  return (__vector float)__scalar;
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_splats(double __scalar) {
+  return (__vector double)__scalar;
+}
+
+/*-- vec_extend_s64 ---------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_extend_s64(__vector signed char __a) {
+  return (__vector signed long long)(__a[7], __a[15]);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_extend_s64(__vector signed short __a) {
+  return (__vector signed long long)(__a[3], __a[7]);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_extend_s64(__vector signed int __a) {
+  return (__vector signed long long)(__a[1], __a[3]);
+}
+
+/*-- vec_mergeh -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_mergeh(__vector signed char __a, __vector signed char __b) {
+  return (__vector signed char)(
+    __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
+    __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_mergeh(__vector __bool char __a, __vector __bool char __b) {
+  return (__vector __bool char)(
+    __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
+    __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_mergeh(__vector unsigned char __a, __vector unsigned char __b) {
+  return (__vector unsigned char)(
+    __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
+    __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_mergeh(__vector signed short __a, __vector signed short __b) {
+  return (__vector signed short)(
+    __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_mergeh(__vector __bool short __a, __vector __bool short __b) {
+  return (__vector __bool short)(
+    __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mergeh(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector unsigned short)(
+    __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_mergeh(__vector signed int __a, __vector signed int __b) {
+  return (__vector signed int)(__a[0], __b[0], __a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_mergeh(__vector __bool int __a, __vector __bool int __b) {
+  return (__vector __bool int)(__a[0], __b[0], __a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mergeh(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector unsigned int)(__a[0], __b[0], __a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_mergeh(__vector signed long long __a, __vector signed long long __b) {
+  return (__vector signed long long)(__a[0], __b[0]);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_mergeh(__vector __bool long long __a, __vector __bool long long __b) {
+  return (__vector __bool long long)(__a[0], __b[0]);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_mergeh(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return (__vector unsigned long long)(__a[0], __b[0]);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_mergeh(__vector float __a, __vector float __b) {
+  return (__vector float)(__a[0], __b[0], __a[1], __b[1]);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_mergeh(__vector double __a, __vector double __b) {
+  return (__vector double)(__a[0], __b[0]);
+}
+
+/*-- vec_mergel -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_mergel(__vector signed char __a, __vector signed char __b) {
+  return (__vector signed char)(
+    __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
+    __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_mergel(__vector __bool char __a, __vector __bool char __b) {
+  return (__vector __bool char)(
+    __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
+    __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_mergel(__vector unsigned char __a, __vector unsigned char __b) {
+  return (__vector unsigned char)(
+    __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
+    __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_mergel(__vector signed short __a, __vector signed short __b) {
+  return (__vector signed short)(
+    __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_mergel(__vector __bool short __a, __vector __bool short __b) {
+  return (__vector __bool short)(
+    __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mergel(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector unsigned short)(
+    __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_mergel(__vector signed int __a, __vector signed int __b) {
+  return (__vector signed int)(__a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_mergel(__vector __bool int __a, __vector __bool int __b) {
+  return (__vector __bool int)(__a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mergel(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector unsigned int)(__a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_mergel(__vector signed long long __a, __vector signed long long __b) {
+  return (__vector signed long long)(__a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_mergel(__vector __bool long long __a, __vector __bool long long __b) {
+  return (__vector __bool long long)(__a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_mergel(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return (__vector unsigned long long)(__a[1], __b[1]);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_mergel(__vector float __a, __vector float __b) {
+  return (__vector float)(__a[2], __b[2], __a[3], __b[3]);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_mergel(__vector double __a, __vector double __b) {
+  return (__vector double)(__a[1], __b[1]);
+}
+
+/*-- vec_pack ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_pack(__vector signed short __a, __vector signed short __b) {
+  __vector signed char __ac = (__vector signed char)__a;
+  __vector signed char __bc = (__vector signed char)__b;
+  return (__vector signed char)(
+    __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
+    __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_pack(__vector __bool short __a, __vector __bool short __b) {
+  __vector __bool char __ac = (__vector __bool char)__a;
+  __vector __bool char __bc = (__vector __bool char)__b;
+  return (__vector __bool char)(
+    __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
+    __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_pack(__vector unsigned short __a, __vector unsigned short __b) {
+  __vector unsigned char __ac = (__vector unsigned char)__a;
+  __vector unsigned char __bc = (__vector unsigned char)__b;
+  return (__vector unsigned char)(
+    __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
+    __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_pack(__vector signed int __a, __vector signed int __b) {
+  __vector signed short __ac = (__vector signed short)__a;
+  __vector signed short __bc = (__vector signed short)__b;
+  return (__vector signed short)(
+    __ac[1], __ac[3], __ac[5], __ac[7],
+    __bc[1], __bc[3], __bc[5], __bc[7]);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_pack(__vector __bool int __a, __vector __bool int __b) {
+  __vector __bool short __ac = (__vector __bool short)__a;
+  __vector __bool short __bc = (__vector __bool short)__b;
+  return (__vector __bool short)(
+    __ac[1], __ac[3], __ac[5], __ac[7],
+    __bc[1], __bc[3], __bc[5], __bc[7]);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_pack(__vector unsigned int __a, __vector unsigned int __b) {
+  __vector unsigned short __ac = (__vector unsigned short)__a;
+  __vector unsigned short __bc = (__vector unsigned short)__b;
+  return (__vector unsigned short)(
+    __ac[1], __ac[3], __ac[5], __ac[7],
+    __bc[1], __bc[3], __bc[5], __bc[7]);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_pack(__vector signed long long __a, __vector signed long long __b) {
+  __vector signed int __ac = (__vector signed int)__a;
+  __vector signed int __bc = (__vector signed int)__b;
+  return (__vector signed int)(__ac[1], __ac[3], __bc[1], __bc[3]);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_pack(__vector __bool long long __a, __vector __bool long long __b) {
+  __vector __bool int __ac = (__vector __bool int)__a;
+  __vector __bool int __bc = (__vector __bool int)__b;
+  return (__vector __bool int)(__ac[1], __ac[3], __bc[1], __bc[3]);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_pack(__vector unsigned long long __a, __vector unsigned long long __b) {
+  __vector unsigned int __ac = (__vector unsigned int)__a;
+  __vector unsigned int __bc = (__vector unsigned int)__b;
+  return (__vector unsigned int)(__ac[1], __ac[3], __bc[1], __bc[3]);
+}
+
+/*-- vec_packs --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_packs(__vector signed short __a, __vector signed short __b) {
+  return __builtin_s390_vpksh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_packs(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vpklsh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_packs(__vector signed int __a, __vector signed int __b) {
+  return __builtin_s390_vpksf(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_packs(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vpklsf(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_packs(__vector signed long long __a, __vector signed long long __b) {
+  return __builtin_s390_vpksg(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_packs(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return __builtin_s390_vpklsg(__a, __b);
+}
+
+/*-- vec_packs_cc -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_packs_cc(__vector signed short __a, __vector signed short __b, int *__cc) {
+  return __builtin_s390_vpkshs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_packs_cc(__vector unsigned short __a, __vector unsigned short __b,
+             int *__cc) {
+  return __builtin_s390_vpklshs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_packs_cc(__vector signed int __a, __vector signed int __b, int *__cc) {
+  return __builtin_s390_vpksfs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_packs_cc(__vector unsigned int __a, __vector unsigned int __b, int *__cc) {
+  return __builtin_s390_vpklsfs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_packs_cc(__vector signed long long __a, __vector signed long long __b,
+             int *__cc) {
+  return __builtin_s390_vpksgs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_packs_cc(__vector unsigned long long __a, __vector unsigned long long __b,
+             int *__cc) {
+  return __builtin_s390_vpklsgs(__a, __b, __cc);
+}
+
+/*-- vec_packsu -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_packsu(__vector signed short __a, __vector signed short __b) {
+  const __vector signed short __zero = (__vector signed short)0;
+  return __builtin_s390_vpklsh(
+    (__vector unsigned short)(__a >= __zero) & (__vector unsigned short)__a,
+    (__vector unsigned short)(__b >= __zero) & (__vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_packsu(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vpklsh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_packsu(__vector signed int __a, __vector signed int __b) {
+  const __vector signed int __zero = (__vector signed int)0;
+  return __builtin_s390_vpklsf(
+    (__vector unsigned int)(__a >= __zero) & (__vector unsigned int)__a,
+    (__vector unsigned int)(__b >= __zero) & (__vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_packsu(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vpklsf(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_packsu(__vector signed long long __a, __vector signed long long __b) {
+  const __vector signed long long __zero = (__vector signed long long)0;
+  return __builtin_s390_vpklsg(
+    (__vector unsigned long long)(__a >= __zero) &
+    (__vector unsigned long long)__a,
+    (__vector unsigned long long)(__b >= __zero) &
+    (__vector unsigned long long)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_packsu(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return __builtin_s390_vpklsg(__a, __b);
+}
+
+/*-- vec_packsu_cc ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_packsu_cc(__vector unsigned short __a, __vector unsigned short __b,
+              int *__cc) {
+  return __builtin_s390_vpklshs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_packsu_cc(__vector unsigned int __a, __vector unsigned int __b, int *__cc) {
+  return __builtin_s390_vpklsfs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_packsu_cc(__vector unsigned long long __a, __vector unsigned long long __b,
+              int *__cc) {
+  return __builtin_s390_vpklsgs(__a, __b, __cc);
+}
+
+/*-- vec_unpackh ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed short
+vec_unpackh(__vector signed char __a) {
+  return __builtin_s390_vuphb(__a);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_unpackh(__vector __bool char __a) {
+  return ((__vector __bool short)
+          __builtin_s390_vuphb((__vector signed char)__a));
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_unpackh(__vector unsigned char __a) {
+  return __builtin_s390_vuplhb(__a);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_unpackh(__vector signed short __a) {
+  return __builtin_s390_vuphh(__a);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_unpackh(__vector __bool short __a) {
+  return (__vector __bool int)__builtin_s390_vuphh((__vector signed short)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_unpackh(__vector unsigned short __a) {
+  return __builtin_s390_vuplhh(__a);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_unpackh(__vector signed int __a) {
+  return __builtin_s390_vuphf(__a);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_unpackh(__vector __bool int __a) {
+  return ((__vector __bool long long)
+          __builtin_s390_vuphf((__vector signed int)__a));
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_unpackh(__vector unsigned int __a) {
+  return __builtin_s390_vuplhf(__a);
+}
+
+/*-- vec_unpackl ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed short
+vec_unpackl(__vector signed char __a) {
+  return __builtin_s390_vuplb(__a);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_unpackl(__vector __bool char __a) {
+  return ((__vector __bool short)
+          __builtin_s390_vuplb((__vector signed char)__a));
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_unpackl(__vector unsigned char __a) {
+  return __builtin_s390_vupllb(__a);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_unpackl(__vector signed short __a) {
+  return __builtin_s390_vuplhw(__a);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_unpackl(__vector __bool short __a) {
+  return ((__vector __bool int)
+          __builtin_s390_vuplhw((__vector signed short)__a));
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_unpackl(__vector unsigned short __a) {
+  return __builtin_s390_vupllh(__a);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_unpackl(__vector signed int __a) {
+  return __builtin_s390_vuplf(__a);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_unpackl(__vector __bool int __a) {
+  return ((__vector __bool long long)
+          __builtin_s390_vuplf((__vector signed int)__a));
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_unpackl(__vector unsigned int __a) {
+  return __builtin_s390_vupllf(__a);
+}
+
+/*-- vec_cmpeq --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpeq(__vector __bool char __a, __vector __bool char __b) {
+  return (__vector __bool char)(__a == __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpeq(__vector signed char __a, __vector signed char __b) {
+  return (__vector __bool char)(__a == __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpeq(__vector unsigned char __a, __vector unsigned char __b) {
+  return (__vector __bool char)(__a == __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpeq(__vector __bool short __a, __vector __bool short __b) {
+  return (__vector __bool short)(__a == __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpeq(__vector signed short __a, __vector signed short __b) {
+  return (__vector __bool short)(__a == __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpeq(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector __bool short)(__a == __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpeq(__vector __bool int __a, __vector __bool int __b) {
+  return (__vector __bool int)(__a == __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpeq(__vector signed int __a, __vector signed int __b) {
+  return (__vector __bool int)(__a == __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpeq(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector __bool int)(__a == __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpeq(__vector __bool long long __a, __vector __bool long long __b) {
+  return (__vector __bool long long)(__a == __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpeq(__vector signed long long __a, __vector signed long long __b) {
+  return (__vector __bool long long)(__a == __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpeq(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return (__vector __bool long long)(__a == __b);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpeq(__vector float __a, __vector float __b) {
+  return (__vector __bool int)(__a == __b);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpeq(__vector double __a, __vector double __b) {
+  return (__vector __bool long long)(__a == __b);
+}
+
+/*-- vec_cmpge --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpge(__vector signed char __a, __vector signed char __b) {
+  return (__vector __bool char)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpge(__vector unsigned char __a, __vector unsigned char __b) {
+  return (__vector __bool char)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpge(__vector signed short __a, __vector signed short __b) {
+  return (__vector __bool short)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpge(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector __bool short)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpge(__vector signed int __a, __vector signed int __b) {
+  return (__vector __bool int)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpge(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector __bool int)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpge(__vector signed long long __a, __vector signed long long __b) {
+  return (__vector __bool long long)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpge(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return (__vector __bool long long)(__a >= __b);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpge(__vector float __a, __vector float __b) {
+  return (__vector __bool int)(__a >= __b);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpge(__vector double __a, __vector double __b) {
+  return (__vector __bool long long)(__a >= __b);
+}
+
+/*-- vec_cmpgt --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpgt(__vector signed char __a, __vector signed char __b) {
+  return (__vector __bool char)(__a > __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpgt(__vector unsigned char __a, __vector unsigned char __b) {
+  return (__vector __bool char)(__a > __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpgt(__vector signed short __a, __vector signed short __b) {
+  return (__vector __bool short)(__a > __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpgt(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector __bool short)(__a > __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpgt(__vector signed int __a, __vector signed int __b) {
+  return (__vector __bool int)(__a > __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpgt(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector __bool int)(__a > __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpgt(__vector signed long long __a, __vector signed long long __b) {
+  return (__vector __bool long long)(__a > __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpgt(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return (__vector __bool long long)(__a > __b);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpgt(__vector float __a, __vector float __b) {
+  return (__vector __bool int)(__a > __b);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpgt(__vector double __a, __vector double __b) {
+  return (__vector __bool long long)(__a > __b);
+}
+
+/*-- vec_cmple --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmple(__vector signed char __a, __vector signed char __b) {
+  return (__vector __bool char)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmple(__vector unsigned char __a, __vector unsigned char __b) {
+  return (__vector __bool char)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmple(__vector signed short __a, __vector signed short __b) {
+  return (__vector __bool short)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmple(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector __bool short)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmple(__vector signed int __a, __vector signed int __b) {
+  return (__vector __bool int)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmple(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector __bool int)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmple(__vector signed long long __a, __vector signed long long __b) {
+  return (__vector __bool long long)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmple(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return (__vector __bool long long)(__a <= __b);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmple(__vector float __a, __vector float __b) {
+  return (__vector __bool int)(__a <= __b);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmple(__vector double __a, __vector double __b) {
+  return (__vector __bool long long)(__a <= __b);
+}
+
+/*-- vec_cmplt --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmplt(__vector signed char __a, __vector signed char __b) {
+  return (__vector __bool char)(__a < __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmplt(__vector unsigned char __a, __vector unsigned char __b) {
+  return (__vector __bool char)(__a < __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmplt(__vector signed short __a, __vector signed short __b) {
+  return (__vector __bool short)(__a < __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmplt(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector __bool short)(__a < __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmplt(__vector signed int __a, __vector signed int __b) {
+  return (__vector __bool int)(__a < __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmplt(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector __bool int)(__a < __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmplt(__vector signed long long __a, __vector signed long long __b) {
+  return (__vector __bool long long)(__a < __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmplt(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return (__vector __bool long long)(__a < __b);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmplt(__vector float __a, __vector float __b) {
+  return (__vector __bool int)(__a < __b);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmplt(__vector double __a, __vector double __b) {
+  return (__vector __bool long long)(__a < __b);
+}
+
+/*-- vec_all_eq -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector signed char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vceqbs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector signed char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vceqbs(__a, (__vector signed char)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector __bool char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a, __b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector unsigned char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector unsigned char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector __bool char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector __bool char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector signed short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vceqhs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector signed short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vceqhs(__a, (__vector signed short)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector __bool short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a, __b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector unsigned short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector unsigned short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector __bool short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector __bool short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector signed int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vceqfs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector signed int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vceqfs(__a, (__vector signed int)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector __bool int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a, __b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector unsigned int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector unsigned int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector __bool int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector __bool int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector signed long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector signed long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs(__a, (__vector signed long long)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector __bool long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a, __b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector unsigned long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector unsigned long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector __bool long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector __bool long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc == 0;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfcesbs(__a, __b, &__cc);
+  return __cc == 0;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfcedbs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+/*-- vec_all_ne -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector signed char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vceqbs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector signed char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vceqbs(__a, (__vector signed char)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector __bool char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a, __b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector unsigned char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector unsigned char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector __bool char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector __bool char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector signed short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vceqhs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector signed short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vceqhs(__a, (__vector signed short)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector __bool short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a, __b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector unsigned short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector unsigned short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector __bool short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector __bool short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector signed int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vceqfs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector signed int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vceqfs(__a, (__vector signed int)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector __bool int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a, __b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector unsigned int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector unsigned int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector __bool int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector __bool int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector signed long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector signed long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs(__a, (__vector signed long long)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector __bool long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a, __b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector unsigned long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector unsigned long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector __bool long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector __bool long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc == 3;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfcesbs(__a, __b, &__cc);
+  return __cc == 3;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfcedbs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+/*-- vec_all_ge -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector signed char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector signed char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchbs((__vector signed char)__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector __bool char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__b, (__vector signed char)__a, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector unsigned char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector unsigned char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector __bool char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__b, (__vector unsigned char)__a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector __bool char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__b,
+                        (__vector unsigned char)__a, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector signed short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector signed short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchhs((__vector signed short)__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector __bool short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__b, (__vector signed short)__a, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector unsigned short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector unsigned short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector __bool short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__b, (__vector unsigned short)__a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector __bool short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__b,
+                        (__vector unsigned short)__a, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector signed int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector signed int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchfs((__vector signed int)__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector __bool int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__b, (__vector signed int)__a, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector unsigned int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector unsigned int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector __bool int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__b, (__vector unsigned int)__a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector __bool int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__b,
+                        (__vector unsigned int)__a, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector signed long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector signed long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchgs((__vector signed long long)__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector __bool long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__b, (__vector signed long long)__a, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector unsigned long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector unsigned long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector __bool long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__b, (__vector unsigned long long)__a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector __bool long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__b,
+                        (__vector unsigned long long)__a, &__cc);
+  return __cc == 3;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchesbs(__a, __b, &__cc);
+  return __cc == 0;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchedbs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+/*-- vec_all_gt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector signed char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector signed char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__a, (__vector signed char)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector __bool char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs((__vector signed char)__a, __b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector unsigned char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector unsigned char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__a, (__vector unsigned char)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector __bool char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector __bool char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__a,
+                        (__vector unsigned char)__b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector signed short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector signed short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__a, (__vector signed short)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector __bool short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs((__vector signed short)__a, __b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector unsigned short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector unsigned short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__a, (__vector unsigned short)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector __bool short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector __bool short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__a,
+                        (__vector unsigned short)__b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector signed int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector signed int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__a, (__vector signed int)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector __bool int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs((__vector signed int)__a, __b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector unsigned int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector unsigned int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__a, (__vector unsigned int)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector __bool int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector __bool int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__a,
+                        (__vector unsigned int)__b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector signed long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector signed long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__a, (__vector signed long long)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector __bool long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs((__vector signed long long)__a, __b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector unsigned long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector unsigned long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__a, (__vector unsigned long long)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector __bool long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector __bool long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__a,
+                        (__vector unsigned long long)__b, &__cc);
+  return __cc == 0;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchsbs(__a, __b, &__cc);
+  return __cc == 0;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchdbs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+/*-- vec_all_le -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_le(__vector signed char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector signed char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__a, (__vector signed char)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector __bool char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs((__vector signed char)__a, __b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(__vector unsigned char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector unsigned char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__a, (__vector unsigned char)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector __bool char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector __bool char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__a,
+                        (__vector unsigned char)__b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(__vector signed short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector signed short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__a, (__vector signed short)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector __bool short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs((__vector signed short)__a, __b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(__vector unsigned short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector unsigned short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__a, (__vector unsigned short)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector __bool short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector __bool short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__a,
+                        (__vector unsigned short)__b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(__vector signed int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector signed int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__a, (__vector signed int)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector __bool int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs((__vector signed int)__a, __b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(__vector unsigned int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector unsigned int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__a, (__vector unsigned int)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector __bool int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector __bool int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__a,
+                        (__vector unsigned int)__b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(__vector signed long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector signed long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__a, (__vector signed long long)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector __bool long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs((__vector signed long long)__a, __b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(__vector unsigned long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector unsigned long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__a, (__vector unsigned long long)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector __bool long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector __bool long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__a,
+                        (__vector unsigned long long)__b, &__cc);
+  return __cc == 3;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_all_le(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchesbs(__b, __a, &__cc);
+  return __cc == 0;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_all_le(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchedbs(__b, __a, &__cc);
+  return __cc == 0;
+}
+
+/*-- vec_all_lt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector signed char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector signed char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchbs((__vector signed char)__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector __bool char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__b, (__vector signed char)__a, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector unsigned char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector unsigned char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector __bool char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__b, (__vector unsigned char)__a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector __bool char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__b,
+                        (__vector unsigned char)__a, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector signed short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector signed short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchhs((__vector signed short)__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector __bool short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__b, (__vector signed short)__a, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector unsigned short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector unsigned short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector __bool short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__b, (__vector unsigned short)__a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector __bool short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__b,
+                        (__vector unsigned short)__a, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector signed int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector signed int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchfs((__vector signed int)__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector __bool int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__b, (__vector signed int)__a, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector unsigned int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector unsigned int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector __bool int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__b, (__vector unsigned int)__a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector __bool int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__b,
+                        (__vector unsigned int)__a, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector signed long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector signed long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchgs((__vector signed long long)__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector __bool long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__b, (__vector signed long long)__a, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector unsigned long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector unsigned long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector __bool long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__b, (__vector unsigned long long)__a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector __bool long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__b,
+                        (__vector unsigned long long)__a, &__cc);
+  return __cc == 0;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchsbs(__b, __a, &__cc);
+  return __cc == 0;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchdbs(__b, __a, &__cc);
+  return __cc == 0;
+}
+
+/*-- vec_all_nge ------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_all_nge(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchesbs(__a, __b, &__cc);
+  return __cc == 3;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_all_nge(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchedbs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+/*-- vec_all_ngt ------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_all_ngt(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchsbs(__a, __b, &__cc);
+  return __cc == 3;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_all_ngt(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchdbs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+/*-- vec_all_nle ------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_all_nle(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchesbs(__b, __a, &__cc);
+  return __cc == 3;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_all_nle(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchedbs(__b, __a, &__cc);
+  return __cc == 3;
+}
+
+/*-- vec_all_nlt ------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_all_nlt(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchsbs(__b, __a, &__cc);
+  return __cc == 3;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_all_nlt(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchdbs(__b, __a, &__cc);
+  return __cc == 3;
+}
+
+/*-- vec_all_nan ------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_all_nan(__vector float __a) {
+  int __cc;
+  __builtin_s390_vftcisb(__a, 15, &__cc);
+  return __cc == 0;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_all_nan(__vector double __a) {
+  int __cc;
+  __builtin_s390_vftcidb(__a, 15, &__cc);
+  return __cc == 0;
+}
+
+/*-- vec_all_numeric --------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_all_numeric(__vector float __a) {
+  int __cc;
+  __builtin_s390_vftcisb(__a, 15, &__cc);
+  return __cc == 3;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_all_numeric(__vector double __a) {
+  int __cc;
+  __builtin_s390_vftcidb(__a, 15, &__cc);
+  return __cc == 3;
+}
+
+/*-- vec_any_eq -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector signed char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vceqbs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector signed char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vceqbs(__a, (__vector signed char)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector __bool char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector unsigned char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector unsigned char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector __bool char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector __bool char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector signed short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vceqhs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector signed short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vceqhs(__a, (__vector signed short)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector __bool short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector unsigned short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector unsigned short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector __bool short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector __bool short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector signed int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vceqfs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector signed int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vceqfs(__a, (__vector signed int)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector __bool int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector unsigned int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector unsigned int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector __bool int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector __bool int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector signed long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector signed long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs(__a, (__vector signed long long)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector __bool long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector unsigned long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector unsigned long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector __bool long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector __bool long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc <= 1;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfcesbs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfcedbs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+/*-- vec_any_ne -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector signed char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vceqbs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector signed char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vceqbs(__a, (__vector signed char)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector __bool char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a, __b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector unsigned char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector unsigned char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector __bool char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector __bool char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector signed short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vceqhs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector signed short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vceqhs(__a, (__vector signed short)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector __bool short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a, __b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector unsigned short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector unsigned short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector __bool short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector __bool short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector signed int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vceqfs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector signed int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vceqfs(__a, (__vector signed int)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector __bool int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a, __b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector unsigned int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector unsigned int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector __bool int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector __bool int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector signed long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector signed long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs(__a, (__vector signed long long)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector __bool long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a, __b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector unsigned long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector unsigned long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector __bool long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector __bool long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc != 0;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfcesbs(__a, __b, &__cc);
+  return __cc != 0;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfcedbs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+/*-- vec_any_ge -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector signed char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector signed char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchbs((__vector signed char)__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector __bool char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__b, (__vector signed char)__a, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector unsigned char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector unsigned char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector __bool char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__b, (__vector unsigned char)__a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector __bool char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__b,
+                        (__vector unsigned char)__a, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector signed short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector signed short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchhs((__vector signed short)__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector __bool short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__b, (__vector signed short)__a, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector unsigned short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector unsigned short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector __bool short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__b, (__vector unsigned short)__a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector __bool short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__b,
+                        (__vector unsigned short)__a, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector signed int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector signed int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchfs((__vector signed int)__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector __bool int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__b, (__vector signed int)__a, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector unsigned int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector unsigned int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector __bool int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__b, (__vector unsigned int)__a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector __bool int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__b,
+                        (__vector unsigned int)__a, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector signed long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector signed long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchgs((__vector signed long long)__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector __bool long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__b, (__vector signed long long)__a, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector unsigned long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector unsigned long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector __bool long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__b, (__vector unsigned long long)__a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector __bool long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__b,
+                        (__vector unsigned long long)__a, &__cc);
+  return __cc != 0;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchesbs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchedbs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+/*-- vec_any_gt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector signed char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector signed char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__a, (__vector signed char)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector __bool char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs((__vector signed char)__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector unsigned char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector unsigned char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__a, (__vector unsigned char)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector __bool char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector __bool char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__a,
+                        (__vector unsigned char)__b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector signed short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector signed short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__a, (__vector signed short)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector __bool short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs((__vector signed short)__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector unsigned short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector unsigned short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__a, (__vector unsigned short)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector __bool short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector __bool short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__a,
+                        (__vector unsigned short)__b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector signed int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector signed int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__a, (__vector signed int)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector __bool int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs((__vector signed int)__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector unsigned int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector unsigned int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__a, (__vector unsigned int)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector __bool int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector __bool int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__a,
+                        (__vector unsigned int)__b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector signed long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector signed long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__a, (__vector signed long long)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector __bool long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs((__vector signed long long)__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector unsigned long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector unsigned long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__a, (__vector unsigned long long)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector __bool long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector __bool long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__a,
+                        (__vector unsigned long long)__b, &__cc);
+  return __cc <= 1;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchsbs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchdbs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+/*-- vec_any_le -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_le(__vector signed char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector signed char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__a, (__vector signed char)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector __bool char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs((__vector signed char)__a, __b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(__vector unsigned char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector unsigned char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__a, (__vector unsigned char)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector __bool char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector __bool char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__a,
+                        (__vector unsigned char)__b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(__vector signed short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector signed short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__a, (__vector signed short)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector __bool short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs((__vector signed short)__a, __b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(__vector unsigned short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector unsigned short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__a, (__vector unsigned short)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector __bool short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector __bool short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__a,
+                        (__vector unsigned short)__b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(__vector signed int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector signed int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__a, (__vector signed int)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector __bool int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs((__vector signed int)__a, __b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(__vector unsigned int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector unsigned int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__a, (__vector unsigned int)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector __bool int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector __bool int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__a,
+                        (__vector unsigned int)__b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(__vector signed long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector signed long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__a, (__vector signed long long)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector __bool long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs((__vector signed long long)__a, __b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(__vector unsigned long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector unsigned long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__a, (__vector unsigned long long)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector __bool long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector __bool long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__a,
+                        (__vector unsigned long long)__b, &__cc);
+  return __cc != 0;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_any_le(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchesbs(__b, __a, &__cc);
+  return __cc <= 1;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_any_le(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchedbs(__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+/*-- vec_any_lt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector signed char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector signed char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchbs((__vector signed char)__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector __bool char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__b, (__vector signed char)__a, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector unsigned char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector unsigned char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector __bool char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__b, (__vector unsigned char)__a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector __bool char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__b,
+                        (__vector unsigned char)__a, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector signed short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector signed short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchhs((__vector signed short)__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector __bool short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__b, (__vector signed short)__a, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector unsigned short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector unsigned short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector __bool short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__b, (__vector unsigned short)__a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector __bool short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__b,
+                        (__vector unsigned short)__a, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector signed int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector signed int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchfs((__vector signed int)__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector __bool int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__b, (__vector signed int)__a, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector unsigned int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector unsigned int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector __bool int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__b, (__vector unsigned int)__a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector __bool int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__b,
+                        (__vector unsigned int)__a, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector signed long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector signed long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchgs((__vector signed long long)__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector __bool long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__b, (__vector signed long long)__a, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector unsigned long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector unsigned long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector __bool long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__b, (__vector unsigned long long)__a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector __bool long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__b,
+                        (__vector unsigned long long)__a, &__cc);
+  return __cc <= 1;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchsbs(__b, __a, &__cc);
+  return __cc <= 1;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchdbs(__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+/*-- vec_any_nge ------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_any_nge(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchesbs(__a, __b, &__cc);
+  return __cc != 0;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_any_nge(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchedbs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+/*-- vec_any_ngt ------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_any_ngt(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchsbs(__a, __b, &__cc);
+  return __cc != 0;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_any_ngt(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchdbs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+/*-- vec_any_nle ------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_any_nle(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchesbs(__b, __a, &__cc);
+  return __cc != 0;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_any_nle(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchedbs(__b, __a, &__cc);
+  return __cc != 0;
+}
+
+/*-- vec_any_nlt ------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_any_nlt(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchsbs(__b, __a, &__cc);
+  return __cc != 0;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_any_nlt(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchdbs(__b, __a, &__cc);
+  return __cc != 0;
+}
+
+/*-- vec_any_nan ------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_any_nan(__vector float __a) {
+  int __cc;
+  __builtin_s390_vftcisb(__a, 15, &__cc);
+  return __cc != 3;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_any_nan(__vector double __a) {
+  int __cc;
+  __builtin_s390_vftcidb(__a, 15, &__cc);
+  return __cc != 3;
+}
+
+/*-- vec_any_numeric --------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_any_numeric(__vector float __a) {
+  int __cc;
+  __builtin_s390_vftcisb(__a, 15, &__cc);
+  return __cc != 0;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_any_numeric(__vector double __a) {
+  int __cc;
+  __builtin_s390_vftcidb(__a, 15, &__cc);
+  return __cc != 0;
+}
+
+/*-- vec_andc ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_andc(__vector __bool char __a, __vector __bool char __b) {
+  return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai __vector signed char
+vec_andc(__vector signed char __a, __vector signed char __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_andc(__vector __bool char __a, __vector signed char __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_andc(__vector signed char __a, __vector __bool char __b) {
+  return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_andc(__vector unsigned char __a, __vector unsigned char __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_andc(__vector __bool char __a, __vector unsigned char __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_andc(__vector unsigned char __a, __vector __bool char __b) {
+  return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_andc(__vector __bool short __a, __vector __bool short __b) {
+  return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_andc(__vector signed short __a, __vector signed short __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_andc(__vector __bool short __a, __vector signed short __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_andc(__vector signed short __a, __vector __bool short __b) {
+  return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_andc(__vector unsigned short __a, __vector unsigned short __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_andc(__vector __bool short __a, __vector unsigned short __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_andc(__vector unsigned short __a, __vector __bool short __b) {
+  return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_andc(__vector __bool int __a, __vector __bool int __b) {
+  return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_andc(__vector signed int __a, __vector signed int __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_andc(__vector __bool int __a, __vector signed int __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_andc(__vector signed int __a, __vector __bool int __b) {
+  return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_andc(__vector unsigned int __a, __vector unsigned int __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_andc(__vector __bool int __a, __vector unsigned int __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_andc(__vector unsigned int __a, __vector __bool int __b) {
+  return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_andc(__vector __bool long long __a, __vector __bool long long __b) {
+  return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_andc(__vector signed long long __a, __vector signed long long __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_andc(__vector __bool long long __a, __vector signed long long __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_andc(__vector signed long long __a, __vector __bool long long __b) {
+  return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_andc(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_andc(__vector __bool long long __a, __vector unsigned long long __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_andc(__vector unsigned long long __a, __vector __bool long long __b) {
+  return __a & ~__b;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_andc(__vector float __a, __vector float __b) {
+  return (__vector float)((__vector unsigned int)__a &
+                         ~(__vector unsigned int)__b);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_andc(__vector double __a, __vector double __b) {
+  return (__vector double)((__vector unsigned long long)__a &
+                         ~(__vector unsigned long long)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector double
+vec_andc(__vector __bool long long __a, __vector double __b) {
+  return (__vector double)((__vector unsigned long long)__a &
+                         ~(__vector unsigned long long)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector double
+vec_andc(__vector double __a, __vector __bool long long __b) {
+  return (__vector double)((__vector unsigned long long)__a &
+                         ~(__vector unsigned long long)__b);
+}
+
+/*-- vec_nor ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_nor(__vector __bool char __a, __vector __bool char __b) {
+  return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai __vector signed char
+vec_nor(__vector signed char __a, __vector signed char __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_nor(__vector __bool char __a, __vector signed char __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_nor(__vector signed char __a, __vector __bool char __b) {
+  return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_nor(__vector unsigned char __a, __vector unsigned char __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_nor(__vector __bool char __a, __vector unsigned char __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_nor(__vector unsigned char __a, __vector __bool char __b) {
+  return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_nor(__vector __bool short __a, __vector __bool short __b) {
+  return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_nor(__vector signed short __a, __vector signed short __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_nor(__vector __bool short __a, __vector signed short __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_nor(__vector signed short __a, __vector __bool short __b) {
+  return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_nor(__vector unsigned short __a, __vector unsigned short __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_nor(__vector __bool short __a, __vector unsigned short __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_nor(__vector unsigned short __a, __vector __bool short __b) {
+  return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_nor(__vector __bool int __a, __vector __bool int __b) {
+  return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_nor(__vector signed int __a, __vector signed int __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_nor(__vector __bool int __a, __vector signed int __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_nor(__vector signed int __a, __vector __bool int __b) {
+  return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_nor(__vector unsigned int __a, __vector unsigned int __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_nor(__vector __bool int __a, __vector unsigned int __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_nor(__vector unsigned int __a, __vector __bool int __b) {
+  return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_nor(__vector __bool long long __a, __vector __bool long long __b) {
+  return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_nor(__vector signed long long __a, __vector signed long long __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_nor(__vector __bool long long __a, __vector signed long long __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_nor(__vector signed long long __a, __vector __bool long long __b) {
+  return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_nor(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_nor(__vector __bool long long __a, __vector unsigned long long __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_nor(__vector unsigned long long __a, __vector __bool long long __b) {
+  return ~(__a | __b);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_nor(__vector float __a, __vector float __b) {
+  return (__vector float)~((__vector unsigned int)__a |
+                         (__vector unsigned int)__b);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_nor(__vector double __a, __vector double __b) {
+  return (__vector double)~((__vector unsigned long long)__a |
+                          (__vector unsigned long long)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector double
+vec_nor(__vector __bool long long __a, __vector double __b) {
+  return (__vector double)~((__vector unsigned long long)__a |
+                          (__vector unsigned long long)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector double
+vec_nor(__vector double __a, __vector __bool long long __b) {
+  return (__vector double)~((__vector unsigned long long)__a |
+                          (__vector unsigned long long)__b);
+}
+
+/*-- vec_orc ----------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector __bool char
+vec_orc(__vector __bool char __a, __vector __bool char __b) {
+  return __a | ~__b;
+}
+
+static inline __ATTRS_o_ai __vector signed char
+vec_orc(__vector signed char __a, __vector signed char __b) {
+  return __a | ~__b;
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_orc(__vector unsigned char __a, __vector unsigned char __b) {
+  return __a | ~__b;
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_orc(__vector __bool short __a, __vector __bool short __b) {
+  return __a | ~__b;
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_orc(__vector signed short __a, __vector signed short __b) {
+  return __a | ~__b;
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_orc(__vector unsigned short __a, __vector unsigned short __b) {
+  return __a | ~__b;
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_orc(__vector __bool int __a, __vector __bool int __b) {
+  return __a | ~__b;
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_orc(__vector signed int __a, __vector signed int __b) {
+  return __a | ~__b;
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_orc(__vector unsigned int __a, __vector unsigned int __b) {
+  return __a | ~__b;
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_orc(__vector __bool long long __a, __vector __bool long long __b) {
+  return __a | ~__b;
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_orc(__vector signed long long __a, __vector signed long long __b) {
+  return __a | ~__b;
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_orc(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return __a | ~__b;
+}
+
+static inline __ATTRS_o_ai __vector float
+vec_orc(__vector float __a, __vector float __b) {
+  return (__vector float)((__vector unsigned int)__a |
+                        ~(__vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai __vector double
+vec_orc(__vector double __a, __vector double __b) {
+  return (__vector double)((__vector unsigned long long)__a |
+                         ~(__vector unsigned long long)__b);
+}
+#endif
+
+/*-- vec_nand ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector __bool char
+vec_nand(__vector __bool char __a, __vector __bool char __b) {
+  return ~(__a & __b);
+}
+
+static inline __ATTRS_o_ai __vector signed char
+vec_nand(__vector signed char __a, __vector signed char __b) {
+  return ~(__a & __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_nand(__vector unsigned char __a, __vector unsigned char __b) {
+  return ~(__a & __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_nand(__vector __bool short __a, __vector __bool short __b) {
+  return ~(__a & __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_nand(__vector signed short __a, __vector signed short __b) {
+  return ~(__a & __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_nand(__vector unsigned short __a, __vector unsigned short __b) {
+  return ~(__a & __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_nand(__vector __bool int __a, __vector __bool int __b) {
+  return ~(__a & __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_nand(__vector signed int __a, __vector signed int __b) {
+  return ~(__a & __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_nand(__vector unsigned int __a, __vector unsigned int __b) {
+  return ~(__a & __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_nand(__vector __bool long long __a, __vector __bool long long __b) {
+  return ~(__a & __b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_nand(__vector signed long long __a, __vector signed long long __b) {
+  return ~(__a & __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_nand(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return ~(__a & __b);
+}
+
+static inline __ATTRS_o_ai __vector float
+vec_nand(__vector float __a, __vector float __b) {
+  return (__vector float)~((__vector unsigned int)__a &
+                         (__vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai __vector double
+vec_nand(__vector double __a, __vector double __b) {
+  return (__vector double)~((__vector unsigned long long)__a &
+                          (__vector unsigned long long)__b);
+}
+#endif
+
+/*-- vec_eqv ----------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector __bool char
+vec_eqv(__vector __bool char __a, __vector __bool char __b) {
+  return ~(__a ^ __b);
+}
+
+static inline __ATTRS_o_ai __vector signed char
+vec_eqv(__vector signed char __a, __vector signed char __b) {
+  return ~(__a ^ __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_eqv(__vector unsigned char __a, __vector unsigned char __b) {
+  return ~(__a ^ __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_eqv(__vector __bool short __a, __vector __bool short __b) {
+  return ~(__a ^ __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_eqv(__vector signed short __a, __vector signed short __b) {
+  return ~(__a ^ __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_eqv(__vector unsigned short __a, __vector unsigned short __b) {
+  return ~(__a ^ __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_eqv(__vector __bool int __a, __vector __bool int __b) {
+  return ~(__a ^ __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_eqv(__vector signed int __a, __vector signed int __b) {
+  return ~(__a ^ __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_eqv(__vector unsigned int __a, __vector unsigned int __b) {
+  return ~(__a ^ __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_eqv(__vector __bool long long __a, __vector __bool long long __b) {
+  return ~(__a ^ __b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_eqv(__vector signed long long __a, __vector signed long long __b) {
+  return ~(__a ^ __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_eqv(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return ~(__a ^ __b);
+}
+
+static inline __ATTRS_o_ai __vector float
+vec_eqv(__vector float __a, __vector float __b) {
+  return (__vector float)~((__vector unsigned int)__a ^
+                         (__vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai __vector double
+vec_eqv(__vector double __a, __vector double __b) {
+  return (__vector double)~((__vector unsigned long long)__a ^
+                          (__vector unsigned long long)__b);
+}
+#endif
+
+/*-- vec_cntlz --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cntlz(__vector signed char __a) {
+  return __builtin_s390_vclzb((__vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cntlz(__vector unsigned char __a) {
+  return __builtin_s390_vclzb(__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cntlz(__vector signed short __a) {
+  return __builtin_s390_vclzh((__vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cntlz(__vector unsigned short __a) {
+  return __builtin_s390_vclzh(__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cntlz(__vector signed int __a) {
+  return __builtin_s390_vclzf((__vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cntlz(__vector unsigned int __a) {
+  return __builtin_s390_vclzf(__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_cntlz(__vector signed long long __a) {
+  return __builtin_s390_vclzg((__vector unsigned long long)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_cntlz(__vector unsigned long long __a) {
+  return __builtin_s390_vclzg(__a);
+}
+
+/*-- vec_cnttz --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cnttz(__vector signed char __a) {
+  return __builtin_s390_vctzb((__vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cnttz(__vector unsigned char __a) {
+  return __builtin_s390_vctzb(__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cnttz(__vector signed short __a) {
+  return __builtin_s390_vctzh((__vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cnttz(__vector unsigned short __a) {
+  return __builtin_s390_vctzh(__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cnttz(__vector signed int __a) {
+  return __builtin_s390_vctzf((__vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cnttz(__vector unsigned int __a) {
+  return __builtin_s390_vctzf(__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_cnttz(__vector signed long long __a) {
+  return __builtin_s390_vctzg((__vector unsigned long long)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_cnttz(__vector unsigned long long __a) {
+  return __builtin_s390_vctzg(__a);
+}
+
+/*-- vec_popcnt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_popcnt(__vector signed char __a) {
+  return __builtin_s390_vpopctb((__vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_popcnt(__vector unsigned char __a) {
+  return __builtin_s390_vpopctb(__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_popcnt(__vector signed short __a) {
+  return __builtin_s390_vpopcth((__vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_popcnt(__vector unsigned short __a) {
+  return __builtin_s390_vpopcth(__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_popcnt(__vector signed int __a) {
+  return __builtin_s390_vpopctf((__vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_popcnt(__vector unsigned int __a) {
+  return __builtin_s390_vpopctf(__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_popcnt(__vector signed long long __a) {
+  return __builtin_s390_vpopctg((__vector unsigned long long)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_popcnt(__vector unsigned long long __a) {
+  return __builtin_s390_vpopctg(__a);
+}
+
+/*-- vec_rl -----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_rl(__vector signed char __a, __vector unsigned char __b) {
+  return (__vector signed char)__builtin_s390_verllvb(
+    (__vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_rl(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_verllvb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_rl(__vector signed short __a, __vector unsigned short __b) {
+  return (__vector signed short)__builtin_s390_verllvh(
+    (__vector unsigned short)__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_rl(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_verllvh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_rl(__vector signed int __a, __vector unsigned int __b) {
+  return (__vector signed int)__builtin_s390_verllvf(
+    (__vector unsigned int)__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_rl(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_verllvf(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_rl(__vector signed long long __a, __vector unsigned long long __b) {
+  return (__vector signed long long)__builtin_s390_verllvg(
+    (__vector unsigned long long)__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_rl(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return __builtin_s390_verllvg(__a, __b);
+}
+
+/*-- vec_rli ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_rli(__vector signed char __a, unsigned long __b) {
+  return (__vector signed char)__builtin_s390_verllb(
+    (__vector unsigned char)__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_rli(__vector unsigned char __a, unsigned long __b) {
+  return __builtin_s390_verllb(__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_rli(__vector signed short __a, unsigned long __b) {
+  return (__vector signed short)__builtin_s390_verllh(
+    (__vector unsigned short)__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_rli(__vector unsigned short __a, unsigned long __b) {
+  return __builtin_s390_verllh(__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_rli(__vector signed int __a, unsigned long __b) {
+  return (__vector signed int)__builtin_s390_verllf(
+    (__vector unsigned int)__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_rli(__vector unsigned int __a, unsigned long __b) {
+  return __builtin_s390_verllf(__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_rli(__vector signed long long __a, unsigned long __b) {
+  return (__vector signed long long)__builtin_s390_verllg(
+    (__vector unsigned long long)__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_rli(__vector unsigned long long __a, unsigned long __b) {
+  return __builtin_s390_verllg(__a, (int)__b);
+}
+
+/*-- vec_rl_mask ------------------------------------------------------------*/
+
+extern __ATTRS_o __vector signed char
+vec_rl_mask(__vector signed char __a, __vector unsigned char __b,
+            unsigned char __c) __constant(__c);
+
+extern __ATTRS_o __vector unsigned char
+vec_rl_mask(__vector unsigned char __a, __vector unsigned char __b,
+            unsigned char __c) __constant(__c);
+
+extern __ATTRS_o __vector signed short
+vec_rl_mask(__vector signed short __a, __vector unsigned short __b,
+            unsigned char __c) __constant(__c);
+
+extern __ATTRS_o __vector unsigned short
+vec_rl_mask(__vector unsigned short __a, __vector unsigned short __b,
+            unsigned char __c) __constant(__c);
+
+extern __ATTRS_o __vector signed int
+vec_rl_mask(__vector signed int __a, __vector unsigned int __b,
+            unsigned char __c) __constant(__c);
+
+extern __ATTRS_o __vector unsigned int
+vec_rl_mask(__vector unsigned int __a, __vector unsigned int __b,
+            unsigned char __c) __constant(__c);
+
+extern __ATTRS_o __vector signed long long
+vec_rl_mask(__vector signed long long __a, __vector unsigned long long __b,
+            unsigned char __c) __constant(__c);
+
+extern __ATTRS_o __vector unsigned long long
+vec_rl_mask(__vector unsigned long long __a, __vector unsigned long long __b,
+            unsigned char __c) __constant(__c);
+
+#define vec_rl_mask(X, Y, Z) ((__typeof__((vec_rl_mask)((X), (Y), (Z)))) \
+  __extension__ ({ \
+    __vector unsigned char __res; \
+    __vector unsigned char __x = (__vector unsigned char)(X); \
+    __vector unsigned char __y = (__vector unsigned char)(Y); \
+    switch (sizeof ((X)[0])) { \
+    case 1: __res = (__vector unsigned char) __builtin_s390_verimb( \
+             (__vector unsigned char)__x, (__vector unsigned char)__x, \
+             (__vector unsigned char)__y, (Z)); break; \
+    case 2: __res = (__vector unsigned char) __builtin_s390_verimh( \
+             (__vector unsigned short)__x, (__vector unsigned short)__x, \
+             (__vector unsigned short)__y, (Z)); break; \
+    case 4: __res = (__vector unsigned char) __builtin_s390_verimf( \
+             (__vector unsigned int)__x, (__vector unsigned int)__x, \
+             (__vector unsigned int)__y, (Z)); break; \
+    default: __res = (__vector unsigned char) __builtin_s390_verimg( \
+             (__vector unsigned long long)__x, (__vector unsigned long long)__x, \
+             (__vector unsigned long long)__y, (Z)); break; \
+    } __res; }))
+
+/*-- vec_sll ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_sll(__vector signed char __a, __vector unsigned char __b) {
+  return (__vector signed char)__builtin_s390_vsl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_sll(__vector signed char __a, __vector unsigned short __b) {
+  return (__vector signed char)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_sll(__vector signed char __a, __vector unsigned int __b) {
+  return (__vector signed char)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool char
+vec_sll(__vector __bool char __a, __vector unsigned char __b) {
+  return (__vector __bool char)__builtin_s390_vsl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool char
+vec_sll(__vector __bool char __a, __vector unsigned short __b) {
+  return (__vector __bool char)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool char
+vec_sll(__vector __bool char __a, __vector unsigned int __b) {
+  return (__vector __bool char)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sll(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vsl(__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sll(__vector unsigned char __a, __vector unsigned short __b) {
+  return __builtin_s390_vsl(__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sll(__vector unsigned char __a, __vector unsigned int __b) {
+  return __builtin_s390_vsl(__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_sll(__vector signed short __a, __vector unsigned char __b) {
+  return (__vector signed short)__builtin_s390_vsl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_sll(__vector signed short __a, __vector unsigned short __b) {
+  return (__vector signed short)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_sll(__vector signed short __a, __vector unsigned int __b) {
+  return (__vector signed short)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool short
+vec_sll(__vector __bool short __a, __vector unsigned char __b) {
+  return (__vector __bool short)__builtin_s390_vsl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool short
+vec_sll(__vector __bool short __a, __vector unsigned short __b) {
+  return (__vector __bool short)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool short
+vec_sll(__vector __bool short __a, __vector unsigned int __b) {
+  return (__vector __bool short)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sll(__vector unsigned short __a, __vector unsigned char __b) {
+  return (__vector unsigned short)__builtin_s390_vsl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sll(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector unsigned short)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sll(__vector unsigned short __a, __vector unsigned int __b) {
+  return (__vector unsigned short)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_sll(__vector signed int __a, __vector unsigned char __b) {
+  return (__vector signed int)__builtin_s390_vsl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_sll(__vector signed int __a, __vector unsigned short __b) {
+  return (__vector signed int)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_sll(__vector signed int __a, __vector unsigned int __b) {
+  return (__vector signed int)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool int
+vec_sll(__vector __bool int __a, __vector unsigned char __b) {
+  return (__vector __bool int)__builtin_s390_vsl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool int
+vec_sll(__vector __bool int __a, __vector unsigned short __b) {
+  return (__vector __bool int)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool int
+vec_sll(__vector __bool int __a, __vector unsigned int __b) {
+  return (__vector __bool int)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sll(__vector unsigned int __a, __vector unsigned char __b) {
+  return (__vector unsigned int)__builtin_s390_vsl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sll(__vector unsigned int __a, __vector unsigned short __b) {
+  return (__vector unsigned int)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sll(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector unsigned int)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_sll(__vector signed long long __a, __vector unsigned char __b) {
+  return (__vector signed long long)__builtin_s390_vsl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_sll(__vector signed long long __a, __vector unsigned short __b) {
+  return (__vector signed long long)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_sll(__vector signed long long __a, __vector unsigned int __b) {
+  return (__vector signed long long)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sll(__vector __bool long long __a, __vector unsigned char __b) {
+  return (__vector __bool long long)__builtin_s390_vsl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sll(__vector __bool long long __a, __vector unsigned short __b) {
+  return (__vector __bool long long)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sll(__vector __bool long long __a, __vector unsigned int __b) {
+  return (__vector __bool long long)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sll(__vector unsigned long long __a, __vector unsigned char __b) {
+  return (__vector unsigned long long)__builtin_s390_vsl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sll(__vector unsigned long long __a, __vector unsigned short __b) {
+  return (__vector unsigned long long)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sll(__vector unsigned long long __a, __vector unsigned int __b) {
+  return (__vector unsigned long long)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+/*-- vec_slb ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_slb(__vector signed char __a, __vector signed char __b) {
+  return (__vector signed char)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed char
+vec_slb(__vector signed char __a, __vector unsigned char __b) {
+  return (__vector signed char)__builtin_s390_vslb(
+    (__vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_slb(__vector unsigned char __a, __vector signed char __b) {
+  return __builtin_s390_vslb(__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_slb(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vslb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_slb(__vector signed short __a, __vector signed short __b) {
+  return (__vector signed short)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_slb(__vector signed short __a, __vector unsigned short __b) {
+  return (__vector signed short)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_slb(__vector unsigned short __a, __vector signed short __b) {
+  return (__vector unsigned short)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_slb(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector unsigned short)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_slb(__vector signed int __a, __vector signed int __b) {
+  return (__vector signed int)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_slb(__vector signed int __a, __vector unsigned int __b) {
+  return (__vector signed int)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_slb(__vector unsigned int __a, __vector signed int __b) {
+  return (__vector unsigned int)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_slb(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector unsigned int)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_slb(__vector signed long long __a, __vector signed long long __b) {
+  return (__vector signed long long)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_slb(__vector signed long long __a, __vector unsigned long long __b) {
+  return (__vector signed long long)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_slb(__vector unsigned long long __a, __vector signed long long __b) {
+  return (__vector unsigned long long)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_slb(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return (__vector unsigned long long)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_slb(__vector float __a, __vector signed int __b) {
+  return (__vector float)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector float
+vec_slb(__vector float __a, __vector unsigned int __b) {
+  return (__vector float)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_slb(__vector double __a, __vector signed long long __b) {
+  return (__vector double)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector double
+vec_slb(__vector double __a, __vector unsigned long long __b) {
+  return (__vector double)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+/*-- vec_sld ----------------------------------------------------------------*/
+
+extern __ATTRS_o __vector signed char
+vec_sld(__vector signed char __a, __vector signed char __b, int __c)
+  __constant_range(__c, 0, 15);
+
+extern __ATTRS_o __vector __bool char
+vec_sld(__vector __bool char __a, __vector __bool char __b, int __c)
+  __constant_range(__c, 0, 15);
+
+extern __ATTRS_o __vector unsigned char
+vec_sld(__vector unsigned char __a, __vector unsigned char __b, int __c)
+  __constant_range(__c, 0, 15);
+
+extern __ATTRS_o __vector signed short
+vec_sld(__vector signed short __a, __vector signed short __b, int __c)
+  __constant_range(__c, 0, 15);
+
+extern __ATTRS_o __vector __bool short
+vec_sld(__vector __bool short __a, __vector __bool short __b, int __c)
+  __constant_range(__c, 0, 15);
+
+extern __ATTRS_o __vector unsigned short
+vec_sld(__vector unsigned short __a, __vector unsigned short __b, int __c)
+  __constant_range(__c, 0, 15);
+
+extern __ATTRS_o __vector signed int
+vec_sld(__vector signed int __a, __vector signed int __b, int __c)
+  __constant_range(__c, 0, 15);
+
+extern __ATTRS_o __vector __bool int
+vec_sld(__vector __bool int __a, __vector __bool int __b, int __c)
+  __constant_range(__c, 0, 15);
+
+extern __ATTRS_o __vector unsigned int
+vec_sld(__vector unsigned int __a, __vector unsigned int __b, int __c)
+  __constant_range(__c, 0, 15);
+
+extern __ATTRS_o __vector signed long long
+vec_sld(__vector signed long long __a, __vector signed long long __b, int __c)
+  __constant_range(__c, 0, 15);
+
+extern __ATTRS_o __vector __bool long long
+vec_sld(__vector __bool long long __a, __vector __bool long long __b, int __c)
+  __constant_range(__c, 0, 15);
+
+extern __ATTRS_o __vector unsigned long long
+vec_sld(__vector unsigned long long __a, __vector unsigned long long __b,
+        int __c)
+  __constant_range(__c, 0, 15);
+
+#if __ARCH__ >= 12
+extern __ATTRS_o __vector float
+vec_sld(__vector float __a, __vector float __b, int __c)
+  __constant_range(__c, 0, 15);
+#endif
+
+extern __ATTRS_o __vector double
+vec_sld(__vector double __a, __vector double __b, int __c)
+  __constant_range(__c, 0, 15);
+
+#define vec_sld(X, Y, Z) ((__typeof__((vec_sld)((X), (Y), (Z)))) \
+  __builtin_s390_vsldb((__vector unsigned char)(X), \
+                       (__vector unsigned char)(Y), (Z)))
+
+/*-- vec_sldw ---------------------------------------------------------------*/
+
+extern __ATTRS_o __vector signed char
+vec_sldw(__vector signed char __a, __vector signed char __b, int __c)
+  __constant_range(__c, 0, 3);
+
+extern __ATTRS_o __vector unsigned char
+vec_sldw(__vector unsigned char __a, __vector unsigned char __b, int __c)
+  __constant_range(__c, 0, 3);
+
+extern __ATTRS_o __vector signed short
+vec_sldw(__vector signed short __a, __vector signed short __b, int __c)
+  __constant_range(__c, 0, 3);
+
+extern __ATTRS_o __vector unsigned short
+vec_sldw(__vector unsigned short __a, __vector unsigned short __b, int __c)
+  __constant_range(__c, 0, 3);
+
+extern __ATTRS_o __vector signed int
+vec_sldw(__vector signed int __a, __vector signed int __b, int __c)
+  __constant_range(__c, 0, 3);
+
+extern __ATTRS_o __vector unsigned int
+vec_sldw(__vector unsigned int __a, __vector unsigned int __b, int __c)
+  __constant_range(__c, 0, 3);
+
+extern __ATTRS_o __vector signed long long
+vec_sldw(__vector signed long long __a, __vector signed long long __b, int __c)
+  __constant_range(__c, 0, 3);
+
+extern __ATTRS_o __vector unsigned long long
+vec_sldw(__vector unsigned long long __a, __vector unsigned long long __b,
+         int __c)
+  __constant_range(__c, 0, 3);
+
+// This prototype is deprecated.
+extern __ATTRS_o __vector double
+vec_sldw(__vector double __a, __vector double __b, int __c)
+  __constant_range(__c, 0, 3);
+
+#define vec_sldw(X, Y, Z) ((__typeof__((vec_sldw)((X), (Y), (Z)))) \
+  __builtin_s390_vsldb((__vector unsigned char)(X), \
+                       (__vector unsigned char)(Y), (Z) * 4))
+
+/*-- vec_sldb ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 13
+
+extern __ATTRS_o __vector signed char
+vec_sldb(__vector signed char __a, __vector signed char __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector unsigned char
+vec_sldb(__vector unsigned char __a, __vector unsigned char __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector signed short
+vec_sldb(__vector signed short __a, __vector signed short __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector unsigned short
+vec_sldb(__vector unsigned short __a, __vector unsigned short __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector signed int
+vec_sldb(__vector signed int __a, __vector signed int __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector unsigned int
+vec_sldb(__vector unsigned int __a, __vector unsigned int __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector signed long long
+vec_sldb(__vector signed long long __a, __vector signed long long __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector unsigned long long
+vec_sldb(__vector unsigned long long __a, __vector unsigned long long __b,
+         int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector float
+vec_sldb(__vector float __a, __vector float __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector double
+vec_sldb(__vector double __a, __vector double __b, int __c)
+  __constant_range(__c, 0, 7);
+
+#define vec_sldb(X, Y, Z) ((__typeof__((vec_sldb)((X), (Y), (Z)))) \
+  __builtin_s390_vsld((__vector unsigned char)(X), \
+                      (__vector unsigned char)(Y), (Z)))
+
+#endif
+
+/*-- vec_sral ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_sral(__vector signed char __a, __vector unsigned char __b) {
+  return (__vector signed char)__builtin_s390_vsra(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_sral(__vector signed char __a, __vector unsigned short __b) {
+  return (__vector signed char)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_sral(__vector signed char __a, __vector unsigned int __b) {
+  return (__vector signed char)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool char
+vec_sral(__vector __bool char __a, __vector unsigned char __b) {
+  return (__vector __bool char)__builtin_s390_vsra(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool char
+vec_sral(__vector __bool char __a, __vector unsigned short __b) {
+  return (__vector __bool char)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool char
+vec_sral(__vector __bool char __a, __vector unsigned int __b) {
+  return (__vector __bool char)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sral(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vsra(__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sral(__vector unsigned char __a, __vector unsigned short __b) {
+  return __builtin_s390_vsra(__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sral(__vector unsigned char __a, __vector unsigned int __b) {
+  return __builtin_s390_vsra(__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_sral(__vector signed short __a, __vector unsigned char __b) {
+  return (__vector signed short)__builtin_s390_vsra(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_sral(__vector signed short __a, __vector unsigned short __b) {
+  return (__vector signed short)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_sral(__vector signed short __a, __vector unsigned int __b) {
+  return (__vector signed short)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool short
+vec_sral(__vector __bool short __a, __vector unsigned char __b) {
+  return (__vector __bool short)__builtin_s390_vsra(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool short
+vec_sral(__vector __bool short __a, __vector unsigned short __b) {
+  return (__vector __bool short)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool short
+vec_sral(__vector __bool short __a, __vector unsigned int __b) {
+  return (__vector __bool short)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sral(__vector unsigned short __a, __vector unsigned char __b) {
+  return (__vector unsigned short)__builtin_s390_vsra(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sral(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector unsigned short)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sral(__vector unsigned short __a, __vector unsigned int __b) {
+  return (__vector unsigned short)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_sral(__vector signed int __a, __vector unsigned char __b) {
+  return (__vector signed int)__builtin_s390_vsra(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_sral(__vector signed int __a, __vector unsigned short __b) {
+  return (__vector signed int)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_sral(__vector signed int __a, __vector unsigned int __b) {
+  return (__vector signed int)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool int
+vec_sral(__vector __bool int __a, __vector unsigned char __b) {
+  return (__vector __bool int)__builtin_s390_vsra(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool int
+vec_sral(__vector __bool int __a, __vector unsigned short __b) {
+  return (__vector __bool int)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool int
+vec_sral(__vector __bool int __a, __vector unsigned int __b) {
+  return (__vector __bool int)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sral(__vector unsigned int __a, __vector unsigned char __b) {
+  return (__vector unsigned int)__builtin_s390_vsra(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sral(__vector unsigned int __a, __vector unsigned short __b) {
+  return (__vector unsigned int)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sral(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector unsigned int)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_sral(__vector signed long long __a, __vector unsigned char __b) {
+  return (__vector signed long long)__builtin_s390_vsra(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_sral(__vector signed long long __a, __vector unsigned short __b) {
+  return (__vector signed long long)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_sral(__vector signed long long __a, __vector unsigned int __b) {
+  return (__vector signed long long)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sral(__vector __bool long long __a, __vector unsigned char __b) {
+  return (__vector __bool long long)__builtin_s390_vsra(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sral(__vector __bool long long __a, __vector unsigned short __b) {
+  return (__vector __bool long long)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sral(__vector __bool long long __a, __vector unsigned int __b) {
+  return (__vector __bool long long)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sral(__vector unsigned long long __a, __vector unsigned char __b) {
+  return (__vector unsigned long long)__builtin_s390_vsra(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sral(__vector unsigned long long __a, __vector unsigned short __b) {
+  return (__vector unsigned long long)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sral(__vector unsigned long long __a, __vector unsigned int __b) {
+  return (__vector unsigned long long)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+/*-- vec_srab ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_srab(__vector signed char __a, __vector signed char __b) {
+  return (__vector signed char)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed char
+vec_srab(__vector signed char __a, __vector unsigned char __b) {
+  return (__vector signed char)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srab(__vector unsigned char __a, __vector signed char __b) {
+  return __builtin_s390_vsrab(__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srab(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vsrab(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_srab(__vector signed short __a, __vector signed short __b) {
+  return (__vector signed short)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_srab(__vector signed short __a, __vector unsigned short __b) {
+  return (__vector signed short)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srab(__vector unsigned short __a, __vector signed short __b) {
+  return (__vector unsigned short)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srab(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector unsigned short)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_srab(__vector signed int __a, __vector signed int __b) {
+  return (__vector signed int)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_srab(__vector signed int __a, __vector unsigned int __b) {
+  return (__vector signed int)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srab(__vector unsigned int __a, __vector signed int __b) {
+  return (__vector unsigned int)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srab(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector unsigned int)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_srab(__vector signed long long __a, __vector signed long long __b) {
+  return (__vector signed long long)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_srab(__vector signed long long __a, __vector unsigned long long __b) {
+  return (__vector signed long long)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srab(__vector unsigned long long __a, __vector signed long long __b) {
+  return (__vector unsigned long long)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srab(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return (__vector unsigned long long)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_srab(__vector float __a, __vector signed int __b) {
+  return (__vector float)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector float
+vec_srab(__vector float __a, __vector unsigned int __b) {
+  return (__vector float)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_srab(__vector double __a, __vector signed long long __b) {
+  return (__vector double)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector double
+vec_srab(__vector double __a, __vector unsigned long long __b) {
+  return (__vector double)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+/*-- vec_srl ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_srl(__vector signed char __a, __vector unsigned char __b) {
+  return (__vector signed char)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_srl(__vector signed char __a, __vector unsigned short __b) {
+  return (__vector signed char)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_srl(__vector signed char __a, __vector unsigned int __b) {
+  return (__vector signed char)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool char
+vec_srl(__vector __bool char __a, __vector unsigned char __b) {
+  return (__vector __bool char)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool char
+vec_srl(__vector __bool char __a, __vector unsigned short __b) {
+  return (__vector __bool char)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool char
+vec_srl(__vector __bool char __a, __vector unsigned int __b) {
+  return (__vector __bool char)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srl(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vsrl(__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srl(__vector unsigned char __a, __vector unsigned short __b) {
+  return __builtin_s390_vsrl(__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srl(__vector unsigned char __a, __vector unsigned int __b) {
+  return __builtin_s390_vsrl(__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_srl(__vector signed short __a, __vector unsigned char __b) {
+  return (__vector signed short)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_srl(__vector signed short __a, __vector unsigned short __b) {
+  return (__vector signed short)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_srl(__vector signed short __a, __vector unsigned int __b) {
+  return (__vector signed short)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool short
+vec_srl(__vector __bool short __a, __vector unsigned char __b) {
+  return (__vector __bool short)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool short
+vec_srl(__vector __bool short __a, __vector unsigned short __b) {
+  return (__vector __bool short)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool short
+vec_srl(__vector __bool short __a, __vector unsigned int __b) {
+  return (__vector __bool short)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srl(__vector unsigned short __a, __vector unsigned char __b) {
+  return (__vector unsigned short)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srl(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector unsigned short)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srl(__vector unsigned short __a, __vector unsigned int __b) {
+  return (__vector unsigned short)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_srl(__vector signed int __a, __vector unsigned char __b) {
+  return (__vector signed int)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_srl(__vector signed int __a, __vector unsigned short __b) {
+  return (__vector signed int)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_srl(__vector signed int __a, __vector unsigned int __b) {
+  return (__vector signed int)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool int
+vec_srl(__vector __bool int __a, __vector unsigned char __b) {
+  return (__vector __bool int)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool int
+vec_srl(__vector __bool int __a, __vector unsigned short __b) {
+  return (__vector __bool int)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool int
+vec_srl(__vector __bool int __a, __vector unsigned int __b) {
+  return (__vector __bool int)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srl(__vector unsigned int __a, __vector unsigned char __b) {
+  return (__vector unsigned int)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srl(__vector unsigned int __a, __vector unsigned short __b) {
+  return (__vector unsigned int)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srl(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector unsigned int)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_srl(__vector signed long long __a, __vector unsigned char __b) {
+  return (__vector signed long long)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_srl(__vector signed long long __a, __vector unsigned short __b) {
+  return (__vector signed long long)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_srl(__vector signed long long __a, __vector unsigned int __b) {
+  return (__vector signed long long)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool long long
+vec_srl(__vector __bool long long __a, __vector unsigned char __b) {
+  return (__vector __bool long long)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool long long
+vec_srl(__vector __bool long long __a, __vector unsigned short __b) {
+  return (__vector __bool long long)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool long long
+vec_srl(__vector __bool long long __a, __vector unsigned int __b) {
+  return (__vector __bool long long)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srl(__vector unsigned long long __a, __vector unsigned char __b) {
+  return (__vector unsigned long long)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srl(__vector unsigned long long __a, __vector unsigned short __b) {
+  return (__vector unsigned long long)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srl(__vector unsigned long long __a, __vector unsigned int __b) {
+  return (__vector unsigned long long)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+/*-- vec_srb ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_srb(__vector signed char __a, __vector signed char __b) {
+  return (__vector signed char)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed char
+vec_srb(__vector signed char __a, __vector unsigned char __b) {
+  return (__vector signed char)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srb(__vector unsigned char __a, __vector signed char __b) {
+  return __builtin_s390_vsrlb(__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srb(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vsrlb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_srb(__vector signed short __a, __vector signed short __b) {
+  return (__vector signed short)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_srb(__vector signed short __a, __vector unsigned short __b) {
+  return (__vector signed short)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srb(__vector unsigned short __a, __vector signed short __b) {
+  return (__vector unsigned short)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srb(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector unsigned short)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_srb(__vector signed int __a, __vector signed int __b) {
+  return (__vector signed int)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_srb(__vector signed int __a, __vector unsigned int __b) {
+  return (__vector signed int)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srb(__vector unsigned int __a, __vector signed int __b) {
+  return (__vector unsigned int)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srb(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector unsigned int)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_srb(__vector signed long long __a, __vector signed long long __b) {
+  return (__vector signed long long)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_srb(__vector signed long long __a, __vector unsigned long long __b) {
+  return (__vector signed long long)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srb(__vector unsigned long long __a, __vector signed long long __b) {
+  return (__vector unsigned long long)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srb(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return (__vector unsigned long long)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_srb(__vector float __a, __vector signed int __b) {
+  return (__vector float)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector float
+vec_srb(__vector float __a, __vector unsigned int __b) {
+  return (__vector float)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_srb(__vector double __a, __vector signed long long __b) {
+  return (__vector double)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector double
+vec_srb(__vector double __a, __vector unsigned long long __b) {
+  return (__vector double)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+/*-- vec_srdb ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 13
+
+extern __ATTRS_o __vector signed char
+vec_srdb(__vector signed char __a, __vector signed char __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector unsigned char
+vec_srdb(__vector unsigned char __a, __vector unsigned char __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector signed short
+vec_srdb(__vector signed short __a, __vector signed short __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector unsigned short
+vec_srdb(__vector unsigned short __a, __vector unsigned short __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector signed int
+vec_srdb(__vector signed int __a, __vector signed int __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector unsigned int
+vec_srdb(__vector unsigned int __a, __vector unsigned int __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector signed long long
+vec_srdb(__vector signed long long __a, __vector signed long long __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector unsigned long long
+vec_srdb(__vector unsigned long long __a, __vector unsigned long long __b,
+         int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector float
+vec_srdb(__vector float __a, __vector float __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector double
+vec_srdb(__vector double __a, __vector double __b, int __c)
+  __constant_range(__c, 0, 7);
+
+#define vec_srdb(X, Y, Z) ((__typeof__((vec_srdb)((X), (Y), (Z)))) \
+  __builtin_s390_vsrd((__vector unsigned char)(X), \
+                      (__vector unsigned char)(Y), (Z)))
+
+#endif
+
+/*-- vec_abs ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_abs(__vector signed char __a) {
+  return vec_sel(__a, -__a, vec_cmplt(__a, (__vector signed char)0));
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_abs(__vector signed short __a) {
+  return vec_sel(__a, -__a, vec_cmplt(__a, (__vector signed short)0));
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_abs(__vector signed int __a) {
+  return vec_sel(__a, -__a, vec_cmplt(__a, (__vector signed int)0));
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_abs(__vector signed long long __a) {
+  return vec_sel(__a, -__a, vec_cmplt(__a, (__vector signed long long)0));
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_abs(__vector float __a) {
+  return __builtin_s390_vflpsb(__a);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_abs(__vector double __a) {
+  return __builtin_s390_vflpdb(__a);
+}
+
+/*-- vec_nabs ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_nabs(__vector float __a) {
+  return __builtin_s390_vflnsb(__a);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_nabs(__vector double __a) {
+  return __builtin_s390_vflndb(__a);
+}
+
+/*-- vec_max ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_max(__vector signed char __a, __vector signed char __b) {
+  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_max(__vector signed char __a, __vector __bool char __b) {
+  __vector signed char __bc = (__vector signed char)__b;
+  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_max(__vector __bool char __a, __vector signed char __b) {
+  __vector signed char __ac = (__vector signed char)__a;
+  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_max(__vector unsigned char __a, __vector unsigned char __b) {
+  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_max(__vector unsigned char __a, __vector __bool char __b) {
+  __vector unsigned char __bc = (__vector unsigned char)__b;
+  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_max(__vector __bool char __a, __vector unsigned char __b) {
+  __vector unsigned char __ac = (__vector unsigned char)__a;
+  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_max(__vector signed short __a, __vector signed short __b) {
+  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_max(__vector signed short __a, __vector __bool short __b) {
+  __vector signed short __bc = (__vector signed short)__b;
+  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_max(__vector __bool short __a, __vector signed short __b) {
+  __vector signed short __ac = (__vector signed short)__a;
+  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_max(__vector unsigned short __a, __vector unsigned short __b) {
+  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_max(__vector unsigned short __a, __vector __bool short __b) {
+  __vector unsigned short __bc = (__vector unsigned short)__b;
+  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_max(__vector __bool short __a, __vector unsigned short __b) {
+  __vector unsigned short __ac = (__vector unsigned short)__a;
+  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_max(__vector signed int __a, __vector signed int __b) {
+  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_max(__vector signed int __a, __vector __bool int __b) {
+  __vector signed int __bc = (__vector signed int)__b;
+  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_max(__vector __bool int __a, __vector signed int __b) {
+  __vector signed int __ac = (__vector signed int)__a;
+  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_max(__vector unsigned int __a, __vector unsigned int __b) {
+  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_max(__vector unsigned int __a, __vector __bool int __b) {
+  __vector unsigned int __bc = (__vector unsigned int)__b;
+  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_max(__vector __bool int __a, __vector unsigned int __b) {
+  __vector unsigned int __ac = (__vector unsigned int)__a;
+  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_max(__vector signed long long __a, __vector signed long long __b) {
+  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_max(__vector signed long long __a, __vector __bool long long __b) {
+  __vector signed long long __bc = (__vector signed long long)__b;
+  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_max(__vector __bool long long __a, __vector signed long long __b) {
+  __vector signed long long __ac = (__vector signed long long)__a;
+  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_max(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_max(__vector unsigned long long __a, __vector __bool long long __b) {
+  __vector unsigned long long __bc = (__vector unsigned long long)__b;
+  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_max(__vector __bool long long __a, __vector unsigned long long __b) {
+  __vector unsigned long long __ac = (__vector unsigned long long)__a;
+  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_max(__vector float __a, __vector float __b) {
+  return __builtin_s390_vfmaxsb(__a, __b, 0);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_max(__vector double __a, __vector double __b) {
+#if __ARCH__ >= 12
+  return __builtin_s390_vfmaxdb(__a, __b, 0);
+#else
+  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+#endif
+}
+
+/*-- vec_min ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_min(__vector signed char __a, __vector signed char __b) {
+  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_min(__vector signed char __a, __vector __bool char __b) {
+  __vector signed char __bc = (__vector signed char)__b;
+  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_min(__vector __bool char __a, __vector signed char __b) {
+  __vector signed char __ac = (__vector signed char)__a;
+  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_min(__vector unsigned char __a, __vector unsigned char __b) {
+  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_min(__vector unsigned char __a, __vector __bool char __b) {
+  __vector unsigned char __bc = (__vector unsigned char)__b;
+  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_min(__vector __bool char __a, __vector unsigned char __b) {
+  __vector unsigned char __ac = (__vector unsigned char)__a;
+  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_min(__vector signed short __a, __vector signed short __b) {
+  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_min(__vector signed short __a, __vector __bool short __b) {
+  __vector signed short __bc = (__vector signed short)__b;
+  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_min(__vector __bool short __a, __vector signed short __b) {
+  __vector signed short __ac = (__vector signed short)__a;
+  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_min(__vector unsigned short __a, __vector unsigned short __b) {
+  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_min(__vector unsigned short __a, __vector __bool short __b) {
+  __vector unsigned short __bc = (__vector unsigned short)__b;
+  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_min(__vector __bool short __a, __vector unsigned short __b) {
+  __vector unsigned short __ac = (__vector unsigned short)__a;
+  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_min(__vector signed int __a, __vector signed int __b) {
+  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_min(__vector signed int __a, __vector __bool int __b) {
+  __vector signed int __bc = (__vector signed int)__b;
+  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_min(__vector __bool int __a, __vector signed int __b) {
+  __vector signed int __ac = (__vector signed int)__a;
+  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_min(__vector unsigned int __a, __vector unsigned int __b) {
+  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_min(__vector unsigned int __a, __vector __bool int __b) {
+  __vector unsigned int __bc = (__vector unsigned int)__b;
+  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_min(__vector __bool int __a, __vector unsigned int __b) {
+  __vector unsigned int __ac = (__vector unsigned int)__a;
+  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_min(__vector signed long long __a, __vector signed long long __b) {
+  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_min(__vector signed long long __a, __vector __bool long long __b) {
+  __vector signed long long __bc = (__vector signed long long)__b;
+  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_min(__vector __bool long long __a, __vector signed long long __b) {
+  __vector signed long long __ac = (__vector signed long long)__a;
+  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_min(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_min(__vector unsigned long long __a, __vector __bool long long __b) {
+  __vector unsigned long long __bc = (__vector unsigned long long)__b;
+  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_min(__vector __bool long long __a, __vector unsigned long long __b) {
+  __vector unsigned long long __ac = (__vector unsigned long long)__a;
+  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_min(__vector float __a, __vector float __b) {
+  return __builtin_s390_vfminsb(__a, __b, 0);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_min(__vector double __a, __vector double __b) {
+#if __ARCH__ >= 12
+  return __builtin_s390_vfmindb(__a, __b, 0);
+#else
+  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+#endif
+}
+
+/*-- vec_add_u128 -----------------------------------------------------------*/
+
+static inline __ATTRS_ai __vector unsigned char
+vec_add_u128(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vaq(__a, __b);
+}
+
+/*-- vec_addc ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_addc(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vaccb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_addc(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vacch(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_addc(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vaccf(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_addc(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return __builtin_s390_vaccg(__a, __b);
+}
+
+/*-- vec_addc_u128 ----------------------------------------------------------*/
+
+static inline __ATTRS_ai __vector unsigned char
+vec_addc_u128(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vaccq(__a, __b);
+}
+
+/*-- vec_adde_u128 ----------------------------------------------------------*/
+
+static inline __ATTRS_ai __vector unsigned char
+vec_adde_u128(__vector unsigned char __a, __vector unsigned char __b,
+              __vector unsigned char __c) {
+  return __builtin_s390_vacq(__a, __b, __c);
+}
+
+/*-- vec_addec_u128 ---------------------------------------------------------*/
+
+static inline __ATTRS_ai __vector unsigned char
+vec_addec_u128(__vector unsigned char __a, __vector unsigned char __b,
+               __vector unsigned char __c) {
+  return __builtin_s390_vacccq(__a, __b, __c);
+}
+
+/*-- vec_avg ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_avg(__vector signed char __a, __vector signed char __b) {
+  return __builtin_s390_vavgb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_avg(__vector signed short __a, __vector signed short __b) {
+  return __builtin_s390_vavgh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_avg(__vector signed int __a, __vector signed int __b) {
+  return __builtin_s390_vavgf(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_avg(__vector signed long long __a, __vector signed long long __b) {
+  return __builtin_s390_vavgg(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_avg(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vavglb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_avg(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vavglh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_avg(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vavglf(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_avg(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return __builtin_s390_vavglg(__a, __b);
+}
+
+/*-- vec_checksum -----------------------------------------------------------*/
+
+static inline __ATTRS_ai __vector unsigned int
+vec_checksum(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vcksm(__a, __b);
+}
+
+/*-- vec_gfmsum -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_gfmsum(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vgfmb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_gfmsum(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vgfmh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_gfmsum(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vgfmf(__a, __b);
+}
+
+/*-- vec_gfmsum_128 ---------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_gfmsum_128(__vector unsigned long long __a,
+               __vector unsigned long long __b) {
+  return __builtin_s390_vgfmg(__a, __b);
+}
+
+/*-- vec_gfmsum_accum -------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_gfmsum_accum(__vector unsigned char __a, __vector unsigned char __b,
+                 __vector unsigned short __c) {
+  return __builtin_s390_vgfmab(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_gfmsum_accum(__vector unsigned short __a, __vector unsigned short __b,
+                 __vector unsigned int __c) {
+  return __builtin_s390_vgfmah(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_gfmsum_accum(__vector unsigned int __a, __vector unsigned int __b,
+                 __vector unsigned long long __c) {
+  return __builtin_s390_vgfmaf(__a, __b, __c);
+}
+
+/*-- vec_gfmsum_accum_128 ---------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_gfmsum_accum_128(__vector unsigned long long __a,
+                     __vector unsigned long long __b,
+                     __vector unsigned char __c) {
+  return __builtin_s390_vgfmag(__a, __b, __c);
+}
+
+/*-- vec_mladd --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_mladd(__vector signed char __a, __vector signed char __b,
+          __vector signed char __c) {
+  return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai __vector signed char
+vec_mladd(__vector unsigned char __a, __vector signed char __b,
+          __vector signed char __c) {
+  return (__vector signed char)__a * __b + __c;
+}
+
+static inline __ATTRS_o_ai __vector signed char
+vec_mladd(__vector signed char __a, __vector unsigned char __b,
+          __vector unsigned char __c) {
+  return __a * (__vector signed char)__b + (__vector signed char)__c;
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_mladd(__vector unsigned char __a, __vector unsigned char __b,
+          __vector unsigned char __c) {
+  return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_mladd(__vector signed short __a, __vector signed short __b,
+          __vector signed short __c) {
+  return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_mladd(__vector unsigned short __a, __vector signed short __b,
+          __vector signed short __c) {
+  return (__vector signed short)__a * __b + __c;
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_mladd(__vector signed short __a, __vector unsigned short __b,
+          __vector unsigned short __c) {
+  return __a * (__vector signed short)__b + (__vector signed short)__c;
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mladd(__vector unsigned short __a, __vector unsigned short __b,
+          __vector unsigned short __c) {
+  return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_mladd(__vector signed int __a, __vector signed int __b,
+          __vector signed int __c) {
+  return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_mladd(__vector unsigned int __a, __vector signed int __b,
+          __vector signed int __c) {
+  return (__vector signed int)__a * __b + __c;
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_mladd(__vector signed int __a, __vector unsigned int __b,
+          __vector unsigned int __c) {
+  return __a * (__vector signed int)__b + (__vector signed int)__c;
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mladd(__vector unsigned int __a, __vector unsigned int __b,
+          __vector unsigned int __c) {
+  return __a * __b + __c;
+}
+
+/*-- vec_mhadd --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_mhadd(__vector signed char __a, __vector signed char __b,
+          __vector signed char __c) {
+  return __builtin_s390_vmahb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_mhadd(__vector unsigned char __a, __vector unsigned char __b,
+          __vector unsigned char __c) {
+  return __builtin_s390_vmalhb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_mhadd(__vector signed short __a, __vector signed short __b,
+          __vector signed short __c) {
+  return __builtin_s390_vmahh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mhadd(__vector unsigned short __a, __vector unsigned short __b,
+          __vector unsigned short __c) {
+  return __builtin_s390_vmalhh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_mhadd(__vector signed int __a, __vector signed int __b,
+          __vector signed int __c) {
+  return __builtin_s390_vmahf(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mhadd(__vector unsigned int __a, __vector unsigned int __b,
+          __vector unsigned int __c) {
+  return __builtin_s390_vmalhf(__a, __b, __c);
+}
+
+/*-- vec_meadd --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed short
+vec_meadd(__vector signed char __a, __vector signed char __b,
+          __vector signed short __c) {
+  return __builtin_s390_vmaeb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_meadd(__vector unsigned char __a, __vector unsigned char __b,
+          __vector unsigned short __c) {
+  return __builtin_s390_vmaleb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_meadd(__vector signed short __a, __vector signed short __b,
+          __vector signed int __c) {
+  return __builtin_s390_vmaeh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_meadd(__vector unsigned short __a, __vector unsigned short __b,
+          __vector unsigned int __c) {
+  return __builtin_s390_vmaleh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_meadd(__vector signed int __a, __vector signed int __b,
+          __vector signed long long __c) {
+  return __builtin_s390_vmaef(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_meadd(__vector unsigned int __a, __vector unsigned int __b,
+          __vector unsigned long long __c) {
+  return __builtin_s390_vmalef(__a, __b, __c);
+}
+
+/*-- vec_moadd --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed short
+vec_moadd(__vector signed char __a, __vector signed char __b,
+          __vector signed short __c) {
+  return __builtin_s390_vmaob(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_moadd(__vector unsigned char __a, __vector unsigned char __b,
+          __vector unsigned short __c) {
+  return __builtin_s390_vmalob(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_moadd(__vector signed short __a, __vector signed short __b,
+          __vector signed int __c) {
+  return __builtin_s390_vmaoh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_moadd(__vector unsigned short __a, __vector unsigned short __b,
+          __vector unsigned int __c) {
+  return __builtin_s390_vmaloh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_moadd(__vector signed int __a, __vector signed int __b,
+          __vector signed long long __c) {
+  return __builtin_s390_vmaof(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_moadd(__vector unsigned int __a, __vector unsigned int __b,
+          __vector unsigned long long __c) {
+  return __builtin_s390_vmalof(__a, __b, __c);
+}
+
+/*-- vec_mulh ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_mulh(__vector signed char __a, __vector signed char __b) {
+  return __builtin_s390_vmhb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_mulh(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vmlhb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_mulh(__vector signed short __a, __vector signed short __b) {
+  return __builtin_s390_vmhh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mulh(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vmlhh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_mulh(__vector signed int __a, __vector signed int __b) {
+  return __builtin_s390_vmhf(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mulh(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vmlhf(__a, __b);
+}
+
+/*-- vec_mule ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed short
+vec_mule(__vector signed char __a, __vector signed char __b) {
+  return __builtin_s390_vmeb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mule(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vmleb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_mule(__vector signed short __a, __vector signed short __b) {
+  return __builtin_s390_vmeh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mule(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vmleh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_mule(__vector signed int __a, __vector signed int __b) {
+  return __builtin_s390_vmef(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_mule(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vmlef(__a, __b);
+}
+
+/*-- vec_mulo ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed short
+vec_mulo(__vector signed char __a, __vector signed char __b) {
+  return __builtin_s390_vmob(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mulo(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vmlob(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_mulo(__vector signed short __a, __vector signed short __b) {
+  return __builtin_s390_vmoh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mulo(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vmloh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_mulo(__vector signed int __a, __vector signed int __b) {
+  return __builtin_s390_vmof(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_mulo(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vmlof(__a, __b);
+}
+
+/*-- vec_msum_u128 ----------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+#define vec_msum_u128(X, Y, Z, W) \
+  ((__vector unsigned char)__builtin_s390_vmslg((X), (Y), (Z), (W)));
+#endif
+
+/*-- vec_sub_u128 -----------------------------------------------------------*/
+
+static inline __ATTRS_ai __vector unsigned char
+vec_sub_u128(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vsq(__a, __b);
+}
+
+/*-- vec_subc ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_subc(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vscbib(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_subc(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vscbih(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_subc(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vscbif(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_subc(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return __builtin_s390_vscbig(__a, __b);
+}
+
+/*-- vec_subc_u128 ----------------------------------------------------------*/
+
+static inline __ATTRS_ai __vector unsigned char
+vec_subc_u128(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vscbiq(__a, __b);
+}
+
+/*-- vec_sube_u128 ----------------------------------------------------------*/
+
+static inline __ATTRS_ai __vector unsigned char
+vec_sube_u128(__vector unsigned char __a, __vector unsigned char __b,
+              __vector unsigned char __c) {
+  return __builtin_s390_vsbiq(__a, __b, __c);
+}
+
+/*-- vec_subec_u128 ---------------------------------------------------------*/
+
+static inline __ATTRS_ai __vector unsigned char
+vec_subec_u128(__vector unsigned char __a, __vector unsigned char __b,
+               __vector unsigned char __c) {
+  return __builtin_s390_vsbcbiq(__a, __b, __c);
+}
+
+/*-- vec_sum2 ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sum2(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vsumgh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sum2(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vsumgf(__a, __b);
+}
+
+/*-- vec_sum_u128 -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sum_u128(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vsumqf(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sum_u128(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return __builtin_s390_vsumqg(__a, __b);
+}
+
+/*-- vec_sum4 ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sum4(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vsumb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sum4(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vsumh(__a, __b);
+}
+
+/*-- vec_test_mask ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_test_mask(__vector signed char __a, __vector unsigned char __b) {
+  return __builtin_s390_vtm((__vector unsigned char)__a,
+                            (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vtm(__a, __b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(__vector signed short __a, __vector unsigned short __b) {
+  return __builtin_s390_vtm((__vector unsigned char)__a,
+                            (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vtm((__vector unsigned char)__a,
+                            (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(__vector signed int __a, __vector unsigned int __b) {
+  return __builtin_s390_vtm((__vector unsigned char)__a,
+                            (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vtm((__vector unsigned char)__a,
+                            (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(__vector signed long long __a, __vector unsigned long long __b) {
+  return __builtin_s390_vtm((__vector unsigned char)__a,
+                            (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(__vector unsigned long long __a,
+              __vector unsigned long long __b) {
+  return __builtin_s390_vtm((__vector unsigned char)__a,
+                            (__vector unsigned char)__b);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_test_mask(__vector float __a, __vector unsigned int __b) {
+  return __builtin_s390_vtm((__vector unsigned char)__a,
+                            (__vector unsigned char)__b);
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_test_mask(__vector double __a, __vector unsigned long long __b) {
+  return __builtin_s390_vtm((__vector unsigned char)__a,
+                            (__vector unsigned char)__b);
+}
+
+/*-- vec_madd ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_madd(__vector float __a, __vector float __b, __vector float __c) {
+  return __builtin_s390_vfmasb(__a, __b, __c);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_madd(__vector double __a, __vector double __b, __vector double __c) {
+  return __builtin_s390_vfmadb(__a, __b, __c);
+}
+
+/*-- vec_msub ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_msub(__vector float __a, __vector float __b, __vector float __c) {
+  return __builtin_s390_vfmssb(__a, __b, __c);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_msub(__vector double __a, __vector double __b, __vector double __c) {
+  return __builtin_s390_vfmsdb(__a, __b, __c);
+}
+
+/*-- vec_nmadd ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_nmadd(__vector float __a, __vector float __b, __vector float __c) {
+  return __builtin_s390_vfnmasb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector double
+vec_nmadd(__vector double __a, __vector double __b, __vector double __c) {
+  return __builtin_s390_vfnmadb(__a, __b, __c);
+}
+#endif
+
+/*-- vec_nmsub ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_nmsub(__vector float __a, __vector float __b, __vector float __c) {
+  return __builtin_s390_vfnmssb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector double
+vec_nmsub(__vector double __a, __vector double __b, __vector double __c) {
+  return __builtin_s390_vfnmsdb(__a, __b, __c);
+}
+#endif
+
+/*-- vec_sqrt ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_sqrt(__vector float __a) {
+  return __builtin_s390_vfsqsb(__a);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_sqrt(__vector double __a) {
+  return __builtin_s390_vfsqdb(__a);
+}
+
+/*-- vec_ld2f ---------------------------------------------------------------*/
+
+// This prototype is deprecated.
+static inline __ATTRS_ai __vector double
+vec_ld2f(const float *__ptr) {
+  typedef float __v2f32 __attribute__((__vector_size__(8)));
+  return __builtin_convertvector(*(const __v2f32 *)__ptr, __vector double);
+}
+
+/*-- vec_st2f ---------------------------------------------------------------*/
+
+// This prototype is deprecated.
+static inline __ATTRS_ai void
+vec_st2f(__vector double __a, float *__ptr) {
+  typedef float __v2f32 __attribute__((__vector_size__(8)));
+  *(__v2f32 *)__ptr = __builtin_convertvector(__a, __v2f32);
+}
+
+/*-- vec_ctd ----------------------------------------------------------------*/
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector double
+vec_ctd(__vector signed long long __a, int __b)
+  __constant_range(__b, 0, 31) {
+  __vector double __conv = __builtin_convertvector(__a, __vector double);
+  __conv *= ((__vector double)(__vector unsigned long long)
+             ((0x3ffULL - __b) << 52));
+  return __conv;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector double
+vec_ctd(__vector unsigned long long __a, int __b)
+  __constant_range(__b, 0, 31) {
+  __vector double __conv = __builtin_convertvector(__a, __vector double);
+  __conv *= ((__vector double)(__vector unsigned long long)
+             ((0x3ffULL - __b) << 52));
+  return __conv;
+}
+
+/*-- vec_ctsl ---------------------------------------------------------------*/
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_ctsl(__vector double __a, int __b)
+  __constant_range(__b, 0, 31) {
+  __a *= ((__vector double)(__vector unsigned long long)
+          ((0x3ffULL + __b) << 52));
+  return __builtin_convertvector(__a, __vector signed long long);
+}
+
+/*-- vec_ctul ---------------------------------------------------------------*/
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_ctul(__vector double __a, int __b)
+  __constant_range(__b, 0, 31) {
+  __a *= ((__vector double)(__vector unsigned long long)
+          ((0x3ffULL + __b) << 52));
+  return __builtin_convertvector(__a, __vector unsigned long long);
+}
+
+/*-- vec_doublee ------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_ai __vector double
+vec_doublee(__vector float __a) {
+  typedef float __v2f32 __attribute__((__vector_size__(8)));
+  __v2f32 __pack = __builtin_shufflevector(__a, __a, 0, 2);
+  return __builtin_convertvector(__pack, __vector double);
+}
+#endif
+
+/*-- vec_floate -------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_ai __vector float
+vec_floate(__vector double __a) {
+  typedef float __v2f32 __attribute__((__vector_size__(8)));
+  __v2f32 __pack = __builtin_convertvector(__a, __v2f32);
+  return __builtin_shufflevector(__pack, __pack, 0, -1, 1, -1);
+}
+#endif
+
+/*-- vec_double -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector double
+vec_double(__vector signed long long __a) {
+  return __builtin_convertvector(__a, __vector double);
+}
+
+static inline __ATTRS_o_ai __vector double
+vec_double(__vector unsigned long long __a) {
+  return __builtin_convertvector(__a, __vector double);
+}
+
+/*-- vec_float --------------------------------------------------------------*/
+
+#if __ARCH__ >= 13
+
+static inline __ATTRS_o_ai __vector float
+vec_float(__vector signed int __a) {
+  return __builtin_convertvector(__a, __vector float);
+}
+
+static inline __ATTRS_o_ai __vector float
+vec_float(__vector unsigned int __a) {
+  return __builtin_convertvector(__a, __vector float);
+}
+
+#endif
+
+/*-- vec_signed -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_signed(__vector double __a) {
+  return __builtin_convertvector(__a, __vector signed long long);
+}
+
+#if __ARCH__ >= 13
+static inline __ATTRS_o_ai __vector signed int
+vec_signed(__vector float __a) {
+  return __builtin_convertvector(__a, __vector signed int);
+}
+#endif
+
+/*-- vec_unsigned -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_unsigned(__vector double __a) {
+  return __builtin_convertvector(__a, __vector unsigned long long);
+}
+
+#if __ARCH__ >= 13
+static inline __ATTRS_o_ai __vector unsigned int
+vec_unsigned(__vector float __a) {
+  return __builtin_convertvector(__a, __vector unsigned int);
+}
+#endif
+
+/*-- vec_roundp -------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_roundp(__vector float __a) {
+  return __builtin_s390_vfisb(__a, 4, 6);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_roundp(__vector double __a) {
+  return __builtin_s390_vfidb(__a, 4, 6);
+}
+
+/*-- vec_ceil ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_ceil(__vector float __a) {
+  // On this platform, vec_ceil never triggers the IEEE-inexact exception.
+  return __builtin_s390_vfisb(__a, 4, 6);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_ceil(__vector double __a) {
+  // On this platform, vec_ceil never triggers the IEEE-inexact exception.
+  return __builtin_s390_vfidb(__a, 4, 6);
+}
+
+/*-- vec_roundm -------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_roundm(__vector float __a) {
+  return __builtin_s390_vfisb(__a, 4, 7);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_roundm(__vector double __a) {
+  return __builtin_s390_vfidb(__a, 4, 7);
+}
+
+/*-- vec_floor --------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_floor(__vector float __a) {
+  // On this platform, vec_floor never triggers the IEEE-inexact exception.
+  return __builtin_s390_vfisb(__a, 4, 7);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_floor(__vector double __a) {
+  // On this platform, vec_floor never triggers the IEEE-inexact exception.
+  return __builtin_s390_vfidb(__a, 4, 7);
+}
+
+/*-- vec_roundz -------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_roundz(__vector float __a) {
+  return __builtin_s390_vfisb(__a, 4, 5);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_roundz(__vector double __a) {
+  return __builtin_s390_vfidb(__a, 4, 5);
+}
+
+/*-- vec_trunc --------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_trunc(__vector float __a) {
+  // On this platform, vec_trunc never triggers the IEEE-inexact exception.
+  return __builtin_s390_vfisb(__a, 4, 5);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_trunc(__vector double __a) {
+  // On this platform, vec_trunc never triggers the IEEE-inexact exception.
+  return __builtin_s390_vfidb(__a, 4, 5);
+}
+
+/*-- vec_roundc -------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_roundc(__vector float __a) {
+  return __builtin_s390_vfisb(__a, 4, 0);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_roundc(__vector double __a) {
+  return __builtin_s390_vfidb(__a, 4, 0);
+}
+
+/*-- vec_rint ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_rint(__vector float __a) {
+  // vec_rint may trigger the IEEE-inexact exception.
+  return __builtin_s390_vfisb(__a, 0, 0);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_rint(__vector double __a) {
+  // vec_rint may trigger the IEEE-inexact exception.
+  return __builtin_s390_vfidb(__a, 0, 0);
+}
+
+/*-- vec_round --------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_round(__vector float __a) {
+  return __builtin_s390_vfisb(__a, 4, 4);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_round(__vector double __a) {
+  return __builtin_s390_vfidb(__a, 4, 4);
+}
+
+/*-- vec_fp_test_data_class -------------------------------------------------*/
+
+#if __ARCH__ >= 12
+extern __ATTRS_o __vector __bool int
+vec_fp_test_data_class(__vector float __a, int __b, int *__c)
+  __constant_range(__b, 0, 4095);
+
+extern __ATTRS_o __vector __bool long long
+vec_fp_test_data_class(__vector double __a, int __b, int *__c)
+  __constant_range(__b, 0, 4095);
+
+#define vec_fp_test_data_class(X, Y, Z) \
+  ((__typeof__((vec_fp_test_data_class)((X), (Y), (Z)))) \
+   __extension__ ({ \
+     __vector unsigned char __res; \
+     __vector unsigned char __x = (__vector unsigned char)(X); \
+     int *__z = (Z); \
+     switch (sizeof ((X)[0])) { \
+     case 4:  __res = (__vector unsigned char) \
+                      __builtin_s390_vftcisb((__vector float)__x, (Y), __z); \
+              break; \
+     default: __res = (__vector unsigned char) \
+                      __builtin_s390_vftcidb((__vector double)__x, (Y), __z); \
+              break; \
+     } __res; }))
+#else
+#define vec_fp_test_data_class(X, Y, Z) \
+  ((__vector __bool long long)__builtin_s390_vftcidb((X), (Y), (Z)))
+#endif
+
+#define __VEC_CLASS_FP_ZERO_P (1 << 11)
+#define __VEC_CLASS_FP_ZERO_N (1 << 10)
+#define __VEC_CLASS_FP_ZERO (__VEC_CLASS_FP_ZERO_P | __VEC_CLASS_FP_ZERO_N)
+#define __VEC_CLASS_FP_NORMAL_P (1 << 9)
+#define __VEC_CLASS_FP_NORMAL_N (1 << 8)
+#define __VEC_CLASS_FP_NORMAL (__VEC_CLASS_FP_NORMAL_P | \
+                               __VEC_CLASS_FP_NORMAL_N)
+#define __VEC_CLASS_FP_SUBNORMAL_P (1 << 7)
+#define __VEC_CLASS_FP_SUBNORMAL_N (1 << 6)
+#define __VEC_CLASS_FP_SUBNORMAL (__VEC_CLASS_FP_SUBNORMAL_P | \
+                                  __VEC_CLASS_FP_SUBNORMAL_N)
+#define __VEC_CLASS_FP_INFINITY_P (1 << 5)
+#define __VEC_CLASS_FP_INFINITY_N (1 << 4)
+#define __VEC_CLASS_FP_INFINITY (__VEC_CLASS_FP_INFINITY_P | \
+                                 __VEC_CLASS_FP_INFINITY_N)
+#define __VEC_CLASS_FP_QNAN_P (1 << 3)
+#define __VEC_CLASS_FP_QNAN_N (1 << 2)
+#define __VEC_CLASS_FP_QNAN (__VEC_CLASS_FP_QNAN_P | __VEC_CLASS_FP_QNAN_N)
+#define __VEC_CLASS_FP_SNAN_P (1 << 1)
+#define __VEC_CLASS_FP_SNAN_N (1 << 0)
+#define __VEC_CLASS_FP_SNAN (__VEC_CLASS_FP_SNAN_P | __VEC_CLASS_FP_SNAN_N)
+#define __VEC_CLASS_FP_NAN (__VEC_CLASS_FP_QNAN | __VEC_CLASS_FP_SNAN)
+#define __VEC_CLASS_FP_NOT_NORMAL (__VEC_CLASS_FP_NAN | \
+                                   __VEC_CLASS_FP_SUBNORMAL | \
+                                   __VEC_CLASS_FP_ZERO | \
+                                   __VEC_CLASS_FP_INFINITY)
+
+/*-- vec_cp_until_zero ------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_cp_until_zero(__vector signed char __a) {
+  return ((__vector signed char)
+          __builtin_s390_vistrb((__vector unsigned char)__a));
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cp_until_zero(__vector __bool char __a) {
+  return ((__vector __bool char)
+          __builtin_s390_vistrb((__vector unsigned char)__a));
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cp_until_zero(__vector unsigned char __a) {
+  return __builtin_s390_vistrb(__a);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_cp_until_zero(__vector signed short __a) {
+  return ((__vector signed short)
+          __builtin_s390_vistrh((__vector unsigned short)__a));
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cp_until_zero(__vector __bool short __a) {
+  return ((__vector __bool short)
+          __builtin_s390_vistrh((__vector unsigned short)__a));
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cp_until_zero(__vector unsigned short __a) {
+  return __builtin_s390_vistrh(__a);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_cp_until_zero(__vector signed int __a) {
+  return ((__vector signed int)
+          __builtin_s390_vistrf((__vector unsigned int)__a));
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cp_until_zero(__vector __bool int __a) {
+  return ((__vector __bool int)
+          __builtin_s390_vistrf((__vector unsigned int)__a));
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cp_until_zero(__vector unsigned int __a) {
+  return __builtin_s390_vistrf(__a);
+}
+
+/*-- vec_cp_until_zero_cc ---------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_cp_until_zero_cc(__vector signed char __a, int *__cc) {
+  return (__vector signed char)
+    __builtin_s390_vistrbs((__vector unsigned char)__a, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cp_until_zero_cc(__vector __bool char __a, int *__cc) {
+  return (__vector __bool char)
+    __builtin_s390_vistrbs((__vector unsigned char)__a, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cp_until_zero_cc(__vector unsigned char __a, int *__cc) {
+  return __builtin_s390_vistrbs(__a, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_cp_until_zero_cc(__vector signed short __a, int *__cc) {
+  return (__vector signed short)
+    __builtin_s390_vistrhs((__vector unsigned short)__a, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cp_until_zero_cc(__vector __bool short __a, int *__cc) {
+  return (__vector __bool short)
+    __builtin_s390_vistrhs((__vector unsigned short)__a, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cp_until_zero_cc(__vector unsigned short __a, int *__cc) {
+  return __builtin_s390_vistrhs(__a, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_cp_until_zero_cc(__vector signed int __a, int *__cc) {
+  return (__vector signed int)
+    __builtin_s390_vistrfs((__vector unsigned int)__a, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cp_until_zero_cc(__vector __bool int __a, int *__cc) {
+  return (__vector __bool int)
+    __builtin_s390_vistrfs((__vector unsigned int)__a, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cp_until_zero_cc(__vector unsigned int __a, int *__cc) {
+  return __builtin_s390_vistrfs(__a, __cc);
+}
+
+/*-- vec_cmpeq_idx ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpeq_idx(__vector signed char __a, __vector signed char __b) {
+  return (__vector signed char)
+    __builtin_s390_vfeeb((__vector unsigned char)__a,
+                         (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_idx(__vector __bool char __a, __vector __bool char __b) {
+  return __builtin_s390_vfeeb((__vector unsigned char)__a,
+                              (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_idx(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vfeeb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpeq_idx(__vector signed short __a, __vector signed short __b) {
+  return (__vector signed short)
+    __builtin_s390_vfeeh((__vector unsigned short)__a,
+                         (__vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_idx(__vector __bool short __a, __vector __bool short __b) {
+  return __builtin_s390_vfeeh((__vector unsigned short)__a,
+                              (__vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_idx(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vfeeh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpeq_idx(__vector signed int __a, __vector signed int __b) {
+  return (__vector signed int)
+    __builtin_s390_vfeef((__vector unsigned int)__a,
+                         (__vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_idx(__vector __bool int __a, __vector __bool int __b) {
+  return __builtin_s390_vfeef((__vector unsigned int)__a,
+                              (__vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_idx(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vfeef(__a, __b);
+}
+
+/*-- vec_cmpeq_idx_cc -------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpeq_idx_cc(__vector signed char __a, __vector signed char __b, int *__cc) {
+  return (__vector signed char)
+    __builtin_s390_vfeebs((__vector unsigned char)__a,
+                          (__vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_idx_cc(__vector __bool char __a, __vector __bool char __b, int *__cc) {
+  return __builtin_s390_vfeebs((__vector unsigned char)__a,
+                               (__vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
+                 int *__cc) {
+  return __builtin_s390_vfeebs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpeq_idx_cc(__vector signed short __a, __vector signed short __b,
+                 int *__cc) {
+  return (__vector signed short)
+    __builtin_s390_vfeehs((__vector unsigned short)__a,
+                          (__vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_idx_cc(__vector __bool short __a, __vector __bool short __b, int *__cc) {
+  return __builtin_s390_vfeehs((__vector unsigned short)__a,
+                               (__vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
+                 int *__cc) {
+  return __builtin_s390_vfeehs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpeq_idx_cc(__vector signed int __a, __vector signed int __b, int *__cc) {
+  return (__vector signed int)
+    __builtin_s390_vfeefs((__vector unsigned int)__a,
+                          (__vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_idx_cc(__vector __bool int __a, __vector __bool int __b, int *__cc) {
+  return __builtin_s390_vfeefs((__vector unsigned int)__a,
+                               (__vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
+                 int *__cc) {
+  return __builtin_s390_vfeefs(__a, __b, __cc);
+}
+
+/*-- vec_cmpeq_or_0_idx -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpeq_or_0_idx(__vector signed char __a, __vector signed char __b) {
+  return (__vector signed char)
+    __builtin_s390_vfeezb((__vector unsigned char)__a,
+                          (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_or_0_idx(__vector __bool char __a, __vector __bool char __b) {
+  return __builtin_s390_vfeezb((__vector unsigned char)__a,
+                               (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_or_0_idx(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vfeezb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpeq_or_0_idx(__vector signed short __a, __vector signed short __b) {
+  return (__vector signed short)
+    __builtin_s390_vfeezh((__vector unsigned short)__a,
+                          (__vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_or_0_idx(__vector __bool short __a, __vector __bool short __b) {
+  return __builtin_s390_vfeezh((__vector unsigned short)__a,
+                               (__vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_or_0_idx(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vfeezh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpeq_or_0_idx(__vector signed int __a, __vector signed int __b) {
+  return (__vector signed int)
+    __builtin_s390_vfeezf((__vector unsigned int)__a,
+                          (__vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_or_0_idx(__vector __bool int __a, __vector __bool int __b) {
+  return __builtin_s390_vfeezf((__vector unsigned int)__a,
+                               (__vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_or_0_idx(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vfeezf(__a, __b);
+}
+
+/*-- vec_cmpeq_or_0_idx_cc --------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpeq_or_0_idx_cc(__vector signed char __a, __vector signed char __b,
+                      int *__cc) {
+  return (__vector signed char)
+    __builtin_s390_vfeezbs((__vector unsigned char)__a,
+                           (__vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_or_0_idx_cc(__vector __bool char __a, __vector __bool char __b,
+                      int *__cc) {
+  return __builtin_s390_vfeezbs((__vector unsigned char)__a,
+                                (__vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_or_0_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
+                      int *__cc) {
+  return __builtin_s390_vfeezbs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpeq_or_0_idx_cc(__vector signed short __a, __vector signed short __b,
+                      int *__cc) {
+  return (__vector signed short)
+    __builtin_s390_vfeezhs((__vector unsigned short)__a,
+                           (__vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_or_0_idx_cc(__vector __bool short __a, __vector __bool short __b,
+                      int *__cc) {
+  return __builtin_s390_vfeezhs((__vector unsigned short)__a,
+                                (__vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_or_0_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
+                      int *__cc) {
+  return __builtin_s390_vfeezhs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpeq_or_0_idx_cc(__vector signed int __a, __vector signed int __b,
+                      int *__cc) {
+  return (__vector signed int)
+    __builtin_s390_vfeezfs((__vector unsigned int)__a,
+                           (__vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_or_0_idx_cc(__vector __bool int __a, __vector __bool int __b,
+                      int *__cc) {
+  return __builtin_s390_vfeezfs((__vector unsigned int)__a,
+                                (__vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_or_0_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
+                      int *__cc) {
+  return __builtin_s390_vfeezfs(__a, __b, __cc);
+}
+
+/*-- vec_cmpne_idx ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpne_idx(__vector signed char __a, __vector signed char __b) {
+  return (__vector signed char)
+    __builtin_s390_vfeneb((__vector unsigned char)__a,
+                          (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_idx(__vector __bool char __a, __vector __bool char __b) {
+  return __builtin_s390_vfeneb((__vector unsigned char)__a,
+                               (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_idx(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vfeneb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpne_idx(__vector signed short __a, __vector signed short __b) {
+  return (__vector signed short)
+    __builtin_s390_vfeneh((__vector unsigned short)__a,
+                          (__vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_idx(__vector __bool short __a, __vector __bool short __b) {
+  return __builtin_s390_vfeneh((__vector unsigned short)__a,
+                               (__vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_idx(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vfeneh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpne_idx(__vector signed int __a, __vector signed int __b) {
+  return (__vector signed int)
+    __builtin_s390_vfenef((__vector unsigned int)__a,
+                          (__vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_idx(__vector __bool int __a, __vector __bool int __b) {
+  return __builtin_s390_vfenef((__vector unsigned int)__a,
+                               (__vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_idx(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vfenef(__a, __b);
+}
+
+/*-- vec_cmpne_idx_cc -------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpne_idx_cc(__vector signed char __a, __vector signed char __b, int *__cc) {
+  return (__vector signed char)
+    __builtin_s390_vfenebs((__vector unsigned char)__a,
+                           (__vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_idx_cc(__vector __bool char __a, __vector __bool char __b, int *__cc) {
+  return __builtin_s390_vfenebs((__vector unsigned char)__a,
+                                (__vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
+                 int *__cc) {
+  return __builtin_s390_vfenebs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpne_idx_cc(__vector signed short __a, __vector signed short __b,
+                 int *__cc) {
+  return (__vector signed short)
+    __builtin_s390_vfenehs((__vector unsigned short)__a,
+                           (__vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_idx_cc(__vector __bool short __a, __vector __bool short __b,
+                 int *__cc) {
+  return __builtin_s390_vfenehs((__vector unsigned short)__a,
+                                (__vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
+                 int *__cc) {
+  return __builtin_s390_vfenehs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpne_idx_cc(__vector signed int __a, __vector signed int __b, int *__cc) {
+  return (__vector signed int)
+    __builtin_s390_vfenefs((__vector unsigned int)__a,
+                           (__vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_idx_cc(__vector __bool int __a, __vector __bool int __b, int *__cc) {
+  return __builtin_s390_vfenefs((__vector unsigned int)__a,
+                                (__vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
+                 int *__cc) {
+  return __builtin_s390_vfenefs(__a, __b, __cc);
+}
+
+/*-- vec_cmpne_or_0_idx -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpne_or_0_idx(__vector signed char __a, __vector signed char __b) {
+  return (__vector signed char)
+    __builtin_s390_vfenezb((__vector unsigned char)__a,
+                           (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_or_0_idx(__vector __bool char __a, __vector __bool char __b) {
+  return __builtin_s390_vfenezb((__vector unsigned char)__a,
+                                (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_or_0_idx(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vfenezb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpne_or_0_idx(__vector signed short __a, __vector signed short __b) {
+  return (__vector signed short)
+    __builtin_s390_vfenezh((__vector unsigned short)__a,
+                           (__vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_or_0_idx(__vector __bool short __a, __vector __bool short __b) {
+  return __builtin_s390_vfenezh((__vector unsigned short)__a,
+                                (__vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_or_0_idx(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vfenezh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpne_or_0_idx(__vector signed int __a, __vector signed int __b) {
+  return (__vector signed int)
+    __builtin_s390_vfenezf((__vector unsigned int)__a,
+                           (__vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_or_0_idx(__vector __bool int __a, __vector __bool int __b) {
+  return __builtin_s390_vfenezf((__vector unsigned int)__a,
+                                (__vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_or_0_idx(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vfenezf(__a, __b);
+}
+
+/*-- vec_cmpne_or_0_idx_cc --------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpne_or_0_idx_cc(__vector signed char __a, __vector signed char __b,
+                      int *__cc) {
+  return (__vector signed char)
+    __builtin_s390_vfenezbs((__vector unsigned char)__a,
+                            (__vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_or_0_idx_cc(__vector __bool char __a, __vector __bool char __b,
+                      int *__cc) {
+  return __builtin_s390_vfenezbs((__vector unsigned char)__a,
+                                 (__vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_or_0_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
+                      int *__cc) {
+  return __builtin_s390_vfenezbs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpne_or_0_idx_cc(__vector signed short __a, __vector signed short __b,
+                      int *__cc) {
+  return (__vector signed short)
+    __builtin_s390_vfenezhs((__vector unsigned short)__a,
+                            (__vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_or_0_idx_cc(__vector __bool short __a, __vector __bool short __b,
+                      int *__cc) {
+  return __builtin_s390_vfenezhs((__vector unsigned short)__a,
+                                 (__vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_or_0_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
+                      int *__cc) {
+  return __builtin_s390_vfenezhs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpne_or_0_idx_cc(__vector signed int __a, __vector signed int __b,
+                      int *__cc) {
+  return (__vector signed int)
+    __builtin_s390_vfenezfs((__vector unsigned int)__a,
+                            (__vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_or_0_idx_cc(__vector __bool int __a, __vector __bool int __b,
+                      int *__cc) {
+  return __builtin_s390_vfenezfs((__vector unsigned int)__a,
+                                 (__vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_or_0_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
+                      int *__cc) {
+  return __builtin_s390_vfenezfs(__a, __b, __cc);
+}
+
+/*-- vec_cmprg --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmprg(__vector unsigned char __a, __vector unsigned char __b,
+          __vector unsigned char __c) {
+  return (__vector __bool char)__builtin_s390_vstrcb(__a, __b, __c, 4);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmprg(__vector unsigned short __a, __vector unsigned short __b,
+          __vector unsigned short __c) {
+  return (__vector __bool short)__builtin_s390_vstrch(__a, __b, __c, 4);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmprg(__vector unsigned int __a, __vector unsigned int __b,
+          __vector unsigned int __c) {
+  return (__vector __bool int)__builtin_s390_vstrcf(__a, __b, __c, 4);
+}
+
+/*-- vec_cmprg_cc -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmprg_cc(__vector unsigned char __a, __vector unsigned char __b,
+             __vector unsigned char __c, int *__cc) {
+  return (__vector __bool char)__builtin_s390_vstrcbs(__a, __b, __c, 4, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmprg_cc(__vector unsigned short __a, __vector unsigned short __b,
+             __vector unsigned short __c, int *__cc) {
+  return (__vector __bool short)__builtin_s390_vstrchs(__a, __b, __c, 4, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmprg_cc(__vector unsigned int __a, __vector unsigned int __b,
+             __vector unsigned int __c, int *__cc) {
+  return (__vector __bool int)__builtin_s390_vstrcfs(__a, __b, __c, 4, __cc);
+}
+
+/*-- vec_cmprg_idx ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmprg_idx(__vector unsigned char __a, __vector unsigned char __b,
+              __vector unsigned char __c) {
+  return __builtin_s390_vstrcb(__a, __b, __c, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmprg_idx(__vector unsigned short __a, __vector unsigned short __b,
+              __vector unsigned short __c) {
+  return __builtin_s390_vstrch(__a, __b, __c, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmprg_idx(__vector unsigned int __a, __vector unsigned int __b,
+              __vector unsigned int __c) {
+  return __builtin_s390_vstrcf(__a, __b, __c, 0);
+}
+
+/*-- vec_cmprg_idx_cc -------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmprg_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
+                 __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrcbs(__a, __b, __c, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmprg_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
+                 __vector unsigned short __c, int *__cc) {
+  return __builtin_s390_vstrchs(__a, __b, __c, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmprg_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
+                 __vector unsigned int __c, int *__cc) {
+  return __builtin_s390_vstrcfs(__a, __b, __c, 0, __cc);
+}
+
+/*-- vec_cmprg_or_0_idx -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmprg_or_0_idx(__vector unsigned char __a, __vector unsigned char __b,
+                   __vector unsigned char __c) {
+  return __builtin_s390_vstrczb(__a, __b, __c, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmprg_or_0_idx(__vector unsigned short __a, __vector unsigned short __b,
+                   __vector unsigned short __c) {
+  return __builtin_s390_vstrczh(__a, __b, __c, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmprg_or_0_idx(__vector unsigned int __a, __vector unsigned int __b,
+                   __vector unsigned int __c) {
+  return __builtin_s390_vstrczf(__a, __b, __c, 0);
+}
+
+/*-- vec_cmprg_or_0_idx_cc --------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmprg_or_0_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
+                      __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrczbs(__a, __b, __c, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmprg_or_0_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
+                      __vector unsigned short __c, int *__cc) {
+  return __builtin_s390_vstrczhs(__a, __b, __c, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmprg_or_0_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
+                      __vector unsigned int __c, int *__cc) {
+  return __builtin_s390_vstrczfs(__a, __b, __c, 0, __cc);
+}
+
+/*-- vec_cmpnrg -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpnrg(__vector unsigned char __a, __vector unsigned char __b,
+           __vector unsigned char __c) {
+  return (__vector __bool char)__builtin_s390_vstrcb(__a, __b, __c, 12);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpnrg(__vector unsigned short __a, __vector unsigned short __b,
+           __vector unsigned short __c) {
+  return (__vector __bool short)__builtin_s390_vstrch(__a, __b, __c, 12);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpnrg(__vector unsigned int __a, __vector unsigned int __b,
+           __vector unsigned int __c) {
+  return (__vector __bool int)__builtin_s390_vstrcf(__a, __b, __c, 12);
+}
+
+/*-- vec_cmpnrg_cc ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpnrg_cc(__vector unsigned char __a, __vector unsigned char __b,
+              __vector unsigned char __c, int *__cc) {
+  return (__vector __bool char)
+    __builtin_s390_vstrcbs(__a, __b, __c, 12, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpnrg_cc(__vector unsigned short __a, __vector unsigned short __b,
+              __vector unsigned short __c, int *__cc) {
+  return (__vector __bool short)
+    __builtin_s390_vstrchs(__a, __b, __c, 12, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpnrg_cc(__vector unsigned int __a, __vector unsigned int __b,
+              __vector unsigned int __c, int *__cc) {
+  return (__vector __bool int)
+    __builtin_s390_vstrcfs(__a, __b, __c, 12, __cc);
+}
+
+/*-- vec_cmpnrg_idx ---------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpnrg_idx(__vector unsigned char __a, __vector unsigned char __b,
+               __vector unsigned char __c) {
+  return __builtin_s390_vstrcb(__a, __b, __c, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpnrg_idx(__vector unsigned short __a, __vector unsigned short __b,
+               __vector unsigned short __c) {
+  return __builtin_s390_vstrch(__a, __b, __c, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpnrg_idx(__vector unsigned int __a, __vector unsigned int __b,
+               __vector unsigned int __c) {
+  return __builtin_s390_vstrcf(__a, __b, __c, 8);
+}
+
+/*-- vec_cmpnrg_idx_cc ------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpnrg_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
+                  __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrcbs(__a, __b, __c, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpnrg_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
+                  __vector unsigned short __c, int *__cc) {
+  return __builtin_s390_vstrchs(__a, __b, __c, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpnrg_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
+                  __vector unsigned int __c, int *__cc) {
+  return __builtin_s390_vstrcfs(__a, __b, __c, 8, __cc);
+}
+
+/*-- vec_cmpnrg_or_0_idx ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpnrg_or_0_idx(__vector unsigned char __a, __vector unsigned char __b,
+                    __vector unsigned char __c) {
+  return __builtin_s390_vstrczb(__a, __b, __c, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpnrg_or_0_idx(__vector unsigned short __a, __vector unsigned short __b,
+                    __vector unsigned short __c) {
+  return __builtin_s390_vstrczh(__a, __b, __c, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpnrg_or_0_idx(__vector unsigned int __a, __vector unsigned int __b,
+                    __vector unsigned int __c) {
+  return __builtin_s390_vstrczf(__a, __b, __c, 8);
+}
+
+/*-- vec_cmpnrg_or_0_idx_cc -------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpnrg_or_0_idx_cc(__vector unsigned char __a,
+                       __vector unsigned char __b,
+                       __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrczbs(__a, __b, __c, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpnrg_or_0_idx_cc(__vector unsigned short __a,
+                       __vector unsigned short __b,
+                       __vector unsigned short __c, int *__cc) {
+  return __builtin_s390_vstrczhs(__a, __b, __c, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpnrg_or_0_idx_cc(__vector unsigned int __a,
+                       __vector unsigned int __b,
+                       __vector unsigned int __c, int *__cc) {
+  return __builtin_s390_vstrczfs(__a, __b, __c, 8, __cc);
+}
+
+/*-- vec_find_any_eq --------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_eq(__vector signed char __a, __vector signed char __b) {
+  return (__vector __bool char)
+    __builtin_s390_vfaeb((__vector unsigned char)__a,
+                         (__vector unsigned char)__b, 4);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_eq(__vector __bool char __a, __vector __bool char __b) {
+  return (__vector __bool char)
+    __builtin_s390_vfaeb((__vector unsigned char)__a,
+                         (__vector unsigned char)__b, 4);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_eq(__vector unsigned char __a, __vector unsigned char __b) {
+  return (__vector __bool char)__builtin_s390_vfaeb(__a, __b, 4);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_eq(__vector signed short __a, __vector signed short __b) {
+  return (__vector __bool short)
+    __builtin_s390_vfaeh((__vector unsigned short)__a,
+                         (__vector unsigned short)__b, 4);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_eq(__vector __bool short __a, __vector __bool short __b) {
+  return (__vector __bool short)
+    __builtin_s390_vfaeh((__vector unsigned short)__a,
+                         (__vector unsigned short)__b, 4);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_eq(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector __bool short)__builtin_s390_vfaeh(__a, __b, 4);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_eq(__vector signed int __a, __vector signed int __b) {
+  return (__vector __bool int)
+    __builtin_s390_vfaef((__vector unsigned int)__a,
+                         (__vector unsigned int)__b, 4);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_eq(__vector __bool int __a, __vector __bool int __b) {
+  return (__vector __bool int)
+    __builtin_s390_vfaef((__vector unsigned int)__a,
+                         (__vector unsigned int)__b, 4);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_eq(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector __bool int)__builtin_s390_vfaef(__a, __b, 4);
+}
+
+/*-- vec_find_any_eq_cc -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_eq_cc(__vector signed char __a, __vector signed char __b,
+                   int *__cc) {
+  return (__vector __bool char)
+    __builtin_s390_vfaebs((__vector unsigned char)__a,
+                          (__vector unsigned char)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_eq_cc(__vector __bool char __a, __vector __bool char __b,
+                   int *__cc) {
+  return (__vector __bool char)
+    __builtin_s390_vfaebs((__vector unsigned char)__a,
+                          (__vector unsigned char)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_eq_cc(__vector unsigned char __a, __vector unsigned char __b,
+                   int *__cc) {
+  return (__vector __bool char)__builtin_s390_vfaebs(__a, __b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_eq_cc(__vector signed short __a, __vector signed short __b,
+                   int *__cc) {
+  return (__vector __bool short)
+    __builtin_s390_vfaehs((__vector unsigned short)__a,
+                          (__vector unsigned short)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_eq_cc(__vector __bool short __a, __vector __bool short __b,
+                   int *__cc) {
+  return (__vector __bool short)
+    __builtin_s390_vfaehs((__vector unsigned short)__a,
+                          (__vector unsigned short)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_eq_cc(__vector unsigned short __a, __vector unsigned short __b,
+                   int *__cc) {
+  return (__vector __bool short)__builtin_s390_vfaehs(__a, __b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_eq_cc(__vector signed int __a, __vector signed int __b,
+                   int *__cc) {
+  return (__vector __bool int)
+    __builtin_s390_vfaefs((__vector unsigned int)__a,
+                          (__vector unsigned int)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_eq_cc(__vector __bool int __a, __vector __bool int __b,
+                   int *__cc) {
+  return (__vector __bool int)
+    __builtin_s390_vfaefs((__vector unsigned int)__a,
+                          (__vector unsigned int)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_eq_cc(__vector unsigned int __a, __vector unsigned int __b,
+                   int *__cc) {
+  return (__vector __bool int)__builtin_s390_vfaefs(__a, __b, 4, __cc);
+}
+
+/*-- vec_find_any_eq_idx ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_eq_idx(__vector signed char __a, __vector signed char __b) {
+  return (__vector signed char)
+    __builtin_s390_vfaeb((__vector unsigned char)__a,
+                         (__vector unsigned char)__b, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_idx(__vector __bool char __a, __vector __bool char __b) {
+  return __builtin_s390_vfaeb((__vector unsigned char)__a,
+                              (__vector unsigned char)__b, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_idx(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vfaeb(__a, __b, 0);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_eq_idx(__vector signed short __a, __vector signed short __b) {
+  return (__vector signed short)
+    __builtin_s390_vfaeh((__vector unsigned short)__a,
+                         (__vector unsigned short)__b, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_idx(__vector __bool short __a, __vector __bool short __b) {
+  return __builtin_s390_vfaeh((__vector unsigned short)__a,
+                              (__vector unsigned short)__b, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_idx(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vfaeh(__a, __b, 0);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_eq_idx(__vector signed int __a, __vector signed int __b) {
+  return (__vector signed int)
+    __builtin_s390_vfaef((__vector unsigned int)__a,
+                         (__vector unsigned int)__b, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_idx(__vector __bool int __a, __vector __bool int __b) {
+  return __builtin_s390_vfaef((__vector unsigned int)__a,
+                              (__vector unsigned int)__b, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_idx(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vfaef(__a, __b, 0);
+}
+
+/*-- vec_find_any_eq_idx_cc -------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_eq_idx_cc(__vector signed char __a,
+                       __vector signed char __b, int *__cc) {
+  return (__vector signed char)
+    __builtin_s390_vfaebs((__vector unsigned char)__a,
+                          (__vector unsigned char)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_idx_cc(__vector __bool char __a,
+                       __vector __bool char __b, int *__cc) {
+  return __builtin_s390_vfaebs((__vector unsigned char)__a,
+                               (__vector unsigned char)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_idx_cc(__vector unsigned char __a,
+                       __vector unsigned char __b, int *__cc) {
+  return __builtin_s390_vfaebs(__a, __b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_eq_idx_cc(__vector signed short __a,
+                       __vector signed short __b, int *__cc) {
+  return (__vector signed short)
+    __builtin_s390_vfaehs((__vector unsigned short)__a,
+                          (__vector unsigned short)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_idx_cc(__vector __bool short __a,
+                       __vector __bool short __b, int *__cc) {
+  return __builtin_s390_vfaehs((__vector unsigned short)__a,
+                               (__vector unsigned short)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_idx_cc(__vector unsigned short __a,
+                       __vector unsigned short __b, int *__cc) {
+  return __builtin_s390_vfaehs(__a, __b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_eq_idx_cc(__vector signed int __a,
+                       __vector signed int __b, int *__cc) {
+  return (__vector signed int)
+    __builtin_s390_vfaefs((__vector unsigned int)__a,
+                          (__vector unsigned int)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_idx_cc(__vector __bool int __a,
+                       __vector __bool int __b, int *__cc) {
+  return __builtin_s390_vfaefs((__vector unsigned int)__a,
+                               (__vector unsigned int)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_idx_cc(__vector unsigned int __a,
+                       __vector unsigned int __b, int *__cc) {
+  return __builtin_s390_vfaefs(__a, __b, 0, __cc);
+}
+
+/*-- vec_find_any_eq_or_0_idx -----------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_eq_or_0_idx(__vector signed char __a,
+                         __vector signed char __b) {
+  return (__vector signed char)
+    __builtin_s390_vfaezb((__vector unsigned char)__a,
+                          (__vector unsigned char)__b, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_or_0_idx(__vector __bool char __a,
+                         __vector __bool char __b) {
+  return __builtin_s390_vfaezb((__vector unsigned char)__a,
+                               (__vector unsigned char)__b, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_or_0_idx(__vector unsigned char __a,
+                         __vector unsigned char __b) {
+  return __builtin_s390_vfaezb(__a, __b, 0);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_eq_or_0_idx(__vector signed short __a,
+                         __vector signed short __b) {
+  return (__vector signed short)
+    __builtin_s390_vfaezh((__vector unsigned short)__a,
+                          (__vector unsigned short)__b, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_or_0_idx(__vector __bool short __a,
+                         __vector __bool short __b) {
+  return __builtin_s390_vfaezh((__vector unsigned short)__a,
+                               (__vector unsigned short)__b, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_or_0_idx(__vector unsigned short __a,
+                         __vector unsigned short __b) {
+  return __builtin_s390_vfaezh(__a, __b, 0);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_eq_or_0_idx(__vector signed int __a,
+                         __vector signed int __b) {
+  return (__vector signed int)
+    __builtin_s390_vfaezf((__vector unsigned int)__a,
+                          (__vector unsigned int)__b, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_or_0_idx(__vector __bool int __a,
+                         __vector __bool int __b) {
+  return __builtin_s390_vfaezf((__vector unsigned int)__a,
+                               (__vector unsigned int)__b, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_or_0_idx(__vector unsigned int __a,
+                         __vector unsigned int __b) {
+  return __builtin_s390_vfaezf(__a, __b, 0);
+}
+
+/*-- vec_find_any_eq_or_0_idx_cc --------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_eq_or_0_idx_cc(__vector signed char __a,
+                            __vector signed char __b, int *__cc) {
+  return (__vector signed char)
+    __builtin_s390_vfaezbs((__vector unsigned char)__a,
+                           (__vector unsigned char)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_or_0_idx_cc(__vector __bool char __a,
+                            __vector __bool char __b, int *__cc) {
+  return __builtin_s390_vfaezbs((__vector unsigned char)__a,
+                                (__vector unsigned char)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_or_0_idx_cc(__vector unsigned char __a,
+                            __vector unsigned char __b, int *__cc) {
+  return __builtin_s390_vfaezbs(__a, __b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_eq_or_0_idx_cc(__vector signed short __a,
+                            __vector signed short __b, int *__cc) {
+  return (__vector signed short)
+    __builtin_s390_vfaezhs((__vector unsigned short)__a,
+                           (__vector unsigned short)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_or_0_idx_cc(__vector __bool short __a,
+                            __vector __bool short __b, int *__cc) {
+  return __builtin_s390_vfaezhs((__vector unsigned short)__a,
+                                (__vector unsigned short)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_or_0_idx_cc(__vector unsigned short __a,
+                            __vector unsigned short __b, int *__cc) {
+  return __builtin_s390_vfaezhs(__a, __b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_eq_or_0_idx_cc(__vector signed int __a,
+                            __vector signed int __b, int *__cc) {
+  return (__vector signed int)
+    __builtin_s390_vfaezfs((__vector unsigned int)__a,
+                           (__vector unsigned int)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_or_0_idx_cc(__vector __bool int __a,
+                            __vector __bool int __b, int *__cc) {
+  return __builtin_s390_vfaezfs((__vector unsigned int)__a,
+                                (__vector unsigned int)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_or_0_idx_cc(__vector unsigned int __a,
+                            __vector unsigned int __b, int *__cc) {
+  return __builtin_s390_vfaezfs(__a, __b, 0, __cc);
+}
+
+/*-- vec_find_any_ne --------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_ne(__vector signed char __a, __vector signed char __b) {
+  return (__vector __bool char)
+    __builtin_s390_vfaeb((__vector unsigned char)__a,
+                         (__vector unsigned char)__b, 12);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_ne(__vector __bool char __a, __vector __bool char __b) {
+  return (__vector __bool char)
+    __builtin_s390_vfaeb((__vector unsigned char)__a,
+                         (__vector unsigned char)__b, 12);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_ne(__vector unsigned char __a, __vector unsigned char __b) {
+  return (__vector __bool char)__builtin_s390_vfaeb(__a, __b, 12);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_ne(__vector signed short __a, __vector signed short __b) {
+  return (__vector __bool short)
+    __builtin_s390_vfaeh((__vector unsigned short)__a,
+                         (__vector unsigned short)__b, 12);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_ne(__vector __bool short __a, __vector __bool short __b) {
+  return (__vector __bool short)
+    __builtin_s390_vfaeh((__vector unsigned short)__a,
+                         (__vector unsigned short)__b, 12);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_ne(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector __bool short)__builtin_s390_vfaeh(__a, __b, 12);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_ne(__vector signed int __a, __vector signed int __b) {
+  return (__vector __bool int)
+    __builtin_s390_vfaef((__vector unsigned int)__a,
+                         (__vector unsigned int)__b, 12);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_ne(__vector __bool int __a, __vector __bool int __b) {
+  return (__vector __bool int)
+    __builtin_s390_vfaef((__vector unsigned int)__a,
+                         (__vector unsigned int)__b, 12);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_ne(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector __bool int)__builtin_s390_vfaef(__a, __b, 12);
+}
+
+/*-- vec_find_any_ne_cc -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_ne_cc(__vector signed char __a,
+                   __vector signed char __b, int *__cc) {
+  return (__vector __bool char)
+    __builtin_s390_vfaebs((__vector unsigned char)__a,
+                          (__vector unsigned char)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_ne_cc(__vector __bool char __a,
+                   __vector __bool char __b, int *__cc) {
+  return (__vector __bool char)
+    __builtin_s390_vfaebs((__vector unsigned char)__a,
+                          (__vector unsigned char)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_ne_cc(__vector unsigned char __a,
+                   __vector unsigned char __b, int *__cc) {
+  return (__vector __bool char)__builtin_s390_vfaebs(__a, __b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_ne_cc(__vector signed short __a,
+                   __vector signed short __b, int *__cc) {
+  return (__vector __bool short)
+    __builtin_s390_vfaehs((__vector unsigned short)__a,
+                          (__vector unsigned short)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_ne_cc(__vector __bool short __a,
+                   __vector __bool short __b, int *__cc) {
+  return (__vector __bool short)
+    __builtin_s390_vfaehs((__vector unsigned short)__a,
+                          (__vector unsigned short)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_ne_cc(__vector unsigned short __a,
+                   __vector unsigned short __b, int *__cc) {
+  return (__vector __bool short)__builtin_s390_vfaehs(__a, __b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_ne_cc(__vector signed int __a,
+                   __vector signed int __b, int *__cc) {
+  return (__vector __bool int)
+    __builtin_s390_vfaefs((__vector unsigned int)__a,
+                          (__vector unsigned int)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_ne_cc(__vector __bool int __a,
+                   __vector __bool int __b, int *__cc) {
+  return (__vector __bool int)
+    __builtin_s390_vfaefs((__vector unsigned int)__a,
+                          (__vector unsigned int)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_ne_cc(__vector unsigned int __a,
+                   __vector unsigned int __b, int *__cc) {
+  return (__vector __bool int)__builtin_s390_vfaefs(__a, __b, 12, __cc);
+}
+
+/*-- vec_find_any_ne_idx ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_ne_idx(__vector signed char __a, __vector signed char __b) {
+  return (__vector signed char)
+    __builtin_s390_vfaeb((__vector unsigned char)__a,
+                         (__vector unsigned char)__b, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_idx(__vector __bool char __a, __vector __bool char __b) {
+  return __builtin_s390_vfaeb((__vector unsigned char)__a,
+                              (__vector unsigned char)__b, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_idx(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vfaeb(__a, __b, 8);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_ne_idx(__vector signed short __a, __vector signed short __b) {
+  return (__vector signed short)
+    __builtin_s390_vfaeh((__vector unsigned short)__a,
+                         (__vector unsigned short)__b, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_idx(__vector __bool short __a, __vector __bool short __b) {
+  return __builtin_s390_vfaeh((__vector unsigned short)__a,
+                              (__vector unsigned short)__b, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_idx(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vfaeh(__a, __b, 8);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_ne_idx(__vector signed int __a, __vector signed int __b) {
+  return (__vector signed int)
+    __builtin_s390_vfaef((__vector unsigned int)__a,
+                         (__vector unsigned int)__b, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_idx(__vector __bool int __a, __vector __bool int __b) {
+  return __builtin_s390_vfaef((__vector unsigned int)__a,
+                              (__vector unsigned int)__b, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_idx(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vfaef(__a, __b, 8);
+}
+
+/*-- vec_find_any_ne_idx_cc -------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_ne_idx_cc(__vector signed char __a,
+                       __vector signed char __b, int *__cc) {
+  return (__vector signed char)
+    __builtin_s390_vfaebs((__vector unsigned char)__a,
+                          (__vector unsigned char)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_idx_cc(__vector __bool char __a,
+                       __vector __bool char __b, int *__cc) {
+  return __builtin_s390_vfaebs((__vector unsigned char)__a,
+                               (__vector unsigned char)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_idx_cc(__vector unsigned char __a,
+                       __vector unsigned char __b,
+                       int *__cc) {
+  return __builtin_s390_vfaebs(__a, __b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_ne_idx_cc(__vector signed short __a,
+                       __vector signed short __b, int *__cc) {
+  return (__vector signed short)
+    __builtin_s390_vfaehs((__vector unsigned short)__a,
+                          (__vector unsigned short)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_idx_cc(__vector __bool short __a,
+                       __vector __bool short __b, int *__cc) {
+  return __builtin_s390_vfaehs((__vector unsigned short)__a,
+                               (__vector unsigned short)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_idx_cc(__vector unsigned short __a,
+                       __vector unsigned short __b, int *__cc) {
+  return __builtin_s390_vfaehs(__a, __b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_ne_idx_cc(__vector signed int __a,
+                       __vector signed int __b, int *__cc) {
+  return (__vector signed int)
+    __builtin_s390_vfaefs((__vector unsigned int)__a,
+                          (__vector unsigned int)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_idx_cc(__vector __bool int __a,
+                       __vector __bool int __b, int *__cc) {
+  return __builtin_s390_vfaefs((__vector unsigned int)__a,
+                               (__vector unsigned int)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_idx_cc(__vector unsigned int __a,
+                       __vector unsigned int __b, int *__cc) {
+  return __builtin_s390_vfaefs(__a, __b, 8, __cc);
+}
+
+/*-- vec_find_any_ne_or_0_idx -----------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_ne_or_0_idx(__vector signed char __a,
+                         __vector signed char __b) {
+  return (__vector signed char)
+    __builtin_s390_vfaezb((__vector unsigned char)__a,
+                          (__vector unsigned char)__b, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_or_0_idx(__vector __bool char __a,
+                         __vector __bool char __b) {
+  return __builtin_s390_vfaezb((__vector unsigned char)__a,
+                               (__vector unsigned char)__b, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_or_0_idx(__vector unsigned char __a,
+                         __vector unsigned char __b) {
+  return __builtin_s390_vfaezb(__a, __b, 8);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_ne_or_0_idx(__vector signed short __a,
+                         __vector signed short __b) {
+  return (__vector signed short)
+    __builtin_s390_vfaezh((__vector unsigned short)__a,
+                          (__vector unsigned short)__b, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_or_0_idx(__vector __bool short __a,
+                         __vector __bool short __b) {
+  return __builtin_s390_vfaezh((__vector unsigned short)__a,
+                               (__vector unsigned short)__b, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_or_0_idx(__vector unsigned short __a,
+                         __vector unsigned short __b) {
+  return __builtin_s390_vfaezh(__a, __b, 8);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_ne_or_0_idx(__vector signed int __a,
+                         __vector signed int __b) {
+  return (__vector signed int)
+    __builtin_s390_vfaezf((__vector unsigned int)__a,
+                          (__vector unsigned int)__b, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_or_0_idx(__vector __bool int __a,
+                         __vector __bool int __b) {
+  return __builtin_s390_vfaezf((__vector unsigned int)__a,
+                               (__vector unsigned int)__b, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_or_0_idx(__vector unsigned int __a,
+                         __vector unsigned int __b) {
+  return __builtin_s390_vfaezf(__a, __b, 8);
+}
+
+/*-- vec_find_any_ne_or_0_idx_cc --------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_ne_or_0_idx_cc(__vector signed char __a,
+                            __vector signed char __b, int *__cc) {
+  return (__vector signed char)
+    __builtin_s390_vfaezbs((__vector unsigned char)__a,
+                           (__vector unsigned char)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_or_0_idx_cc(__vector __bool char __a,
+                            __vector __bool char __b, int *__cc) {
+  return __builtin_s390_vfaezbs((__vector unsigned char)__a,
+                                (__vector unsigned char)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_or_0_idx_cc(__vector unsigned char __a,
+                            __vector unsigned char __b, int *__cc) {
+  return __builtin_s390_vfaezbs(__a, __b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_ne_or_0_idx_cc(__vector signed short __a,
+                            __vector signed short __b, int *__cc) {
+  return (__vector signed short)
+    __builtin_s390_vfaezhs((__vector unsigned short)__a,
+                           (__vector unsigned short)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_or_0_idx_cc(__vector __bool short __a,
+                            __vector __bool short __b, int *__cc) {
+  return __builtin_s390_vfaezhs((__vector unsigned short)__a,
+                                (__vector unsigned short)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_or_0_idx_cc(__vector unsigned short __a,
+                            __vector unsigned short __b, int *__cc) {
+  return __builtin_s390_vfaezhs(__a, __b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_ne_or_0_idx_cc(__vector signed int __a,
+                            __vector signed int __b, int *__cc) {
+  return (__vector signed int)
+    __builtin_s390_vfaezfs((__vector unsigned int)__a,
+                           (__vector unsigned int)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_or_0_idx_cc(__vector __bool int __a,
+                            __vector __bool int __b, int *__cc) {
+  return __builtin_s390_vfaezfs((__vector unsigned int)__a,
+                                (__vector unsigned int)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_or_0_idx_cc(__vector unsigned int __a,
+                            __vector unsigned int __b, int *__cc) {
+  return __builtin_s390_vfaezfs(__a, __b, 8, __cc);
+}
+
+/*-- vec_search_string_cc ---------------------------------------------------*/
+
+#if __ARCH__ >= 13
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector signed char __a, __vector signed char __b,
+                     __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrsb((__vector unsigned char)__a,
+                               (__vector unsigned char)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector __bool char __a, __vector __bool char __b,
+                     __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrsb((__vector unsigned char)__a,
+                               (__vector unsigned char)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector unsigned char __a, __vector unsigned char __b,
+                     __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrsb(__a, __b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector signed short __a, __vector signed short __b,
+                     __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrsh((__vector unsigned short)__a,
+                               (__vector unsigned short)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector __bool short __a, __vector __bool short __b,
+                     __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrsh((__vector unsigned short)__a,
+                               (__vector unsigned short)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector unsigned short __a, __vector unsigned short __b,
+                     __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrsh(__a, __b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector signed int __a, __vector signed int __b,
+                     __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrsf((__vector unsigned int)__a,
+                               (__vector unsigned int)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector __bool int __a, __vector __bool int __b,
+                     __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrsf((__vector unsigned int)__a,
+                               (__vector unsigned int)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector unsigned int __a, __vector unsigned int __b,
+                     __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrsf(__a, __b, __c, __cc);
+}
+
+#endif
+
+/*-- vec_search_string_until_zero_cc ----------------------------------------*/
+
+#if __ARCH__ >= 13
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector signed char __a,
+                                __vector signed char __b,
+                                __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrszb((__vector unsigned char)__a,
+                                (__vector unsigned char)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector __bool char __a,
+                                __vector __bool char __b,
+                                __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrszb((__vector unsigned char)__a,
+                                (__vector unsigned char)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector unsigned char __a,
+                                __vector unsigned char __b,
+                                __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrszb(__a, __b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector signed short __a,
+                                __vector signed short __b,
+                                __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrszh((__vector unsigned short)__a,
+                                (__vector unsigned short)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector __bool short __a,
+                                __vector __bool short __b,
+                                __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrszh((__vector unsigned short)__a,
+                                (__vector unsigned short)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector unsigned short __a,
+                                __vector unsigned short __b,
+                                __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrszh(__a, __b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector signed int __a,
+                                __vector signed int __b,
+                                __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrszf((__vector unsigned int)__a,
+                                (__vector unsigned int)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector __bool int __a,
+                                __vector __bool int __b,
+                                __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrszf((__vector unsigned int)__a,
+                                (__vector unsigned int)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector unsigned int __a,
+                                __vector unsigned int __b,
+                                __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrszf(__a, __b, __c, __cc);
+}
+
+#endif
+
+#undef __constant_pow2_range
+#undef __constant_range
+#undef __constant
+#undef __ATTRS_o
+#undef __ATTRS_o_ai
+#undef __ATTRS_ai
+
+#else
+
+#error "Use -fzvector to enable vector extensions"
+
+#endif
diff --git a/darwin-x86/lib64/clang/11.0.1/include/vpclmulqdqintrin.h b/darwin-x86/lib64/clang/11.0.5/include/vpclmulqdqintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/vpclmulqdqintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/vpclmulqdqintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/waitpkgintrin.h b/darwin-x86/lib64/clang/11.0.5/include/waitpkgintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/waitpkgintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/waitpkgintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/wasm_simd128.h b/darwin-x86/lib64/clang/11.0.5/include/wasm_simd128.h
new file mode 100644
index 0000000..b781238
--- /dev/null
+++ b/darwin-x86/lib64/clang/11.0.5/include/wasm_simd128.h
@@ -0,0 +1,1133 @@
+/*===---- wasm_simd128.h - WebAssembly portable SIMD intrinsics ------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __WASM_SIMD128_H
+#define __WASM_SIMD128_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+// User-facing type
+typedef int32_t v128_t __attribute__((__vector_size__(16), __aligned__(16)));
+
+// Internal types determined by clang builtin definitions
+typedef int32_t __v128_u __attribute__((__vector_size__(16), __aligned__(1)));
+typedef char __i8x16 __attribute__((__vector_size__(16), __aligned__(16)));
+typedef signed char __s8x16
+    __attribute__((__vector_size__(16), __aligned__(16)));
+typedef unsigned char __u8x16
+    __attribute__((__vector_size__(16), __aligned__(16)));
+typedef short __i16x8 __attribute__((__vector_size__(16), __aligned__(16)));
+typedef unsigned short __u16x8
+    __attribute__((__vector_size__(16), __aligned__(16)));
+typedef int __i32x4 __attribute__((__vector_size__(16), __aligned__(16)));
+typedef unsigned int __u32x4
+    __attribute__((__vector_size__(16), __aligned__(16)));
+typedef long long __i64x2 __attribute__((__vector_size__(16), __aligned__(16)));
+typedef unsigned long long __u64x2
+    __attribute__((__vector_size__(16), __aligned__(16)));
+typedef float __f32x4 __attribute__((__vector_size__(16), __aligned__(16)));
+typedef double __f64x2 __attribute__((__vector_size__(16), __aligned__(16)));
+
+#define __DEFAULT_FN_ATTRS                                                     \
+  __attribute__((__always_inline__, __nodebug__, __target__("simd128"),        \
+                 __min_vector_width__(128)))
+
+#define __REQUIRE_CONSTANT(e)                                                  \
+  _Static_assert(__builtin_constant_p(e), "Expected constant")
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load(const void *__mem) {
+  // UB-free unaligned access copied from xmmintrin.h
+  struct __wasm_v128_load_struct {
+    __v128_u __v;
+  } __attribute__((__packed__, __may_alias__));
+  return ((const struct __wasm_v128_load_struct *)__mem)->__v;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_v8x16_load_splat(const void *__mem) {
+  struct __wasm_v8x16_load_splat_struct {
+    uint8_t __v;
+  } __attribute__((__packed__, __may_alias__));
+  uint8_t __v = ((const struct __wasm_v8x16_load_splat_struct *)__mem)->__v;
+  return (v128_t)(__u8x16){__v, __v, __v, __v, __v, __v, __v, __v,
+                           __v, __v, __v, __v, __v, __v, __v, __v};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_v16x8_load_splat(const void *__mem) {
+  struct __wasm_v16x8_load_splat_struct {
+    uint16_t __v;
+  } __attribute__((__packed__, __may_alias__));
+  uint16_t __v = ((const struct __wasm_v16x8_load_splat_struct *)__mem)->__v;
+  return (v128_t)(__u16x8){__v, __v, __v, __v, __v, __v, __v, __v};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_v32x4_load_splat(const void *__mem) {
+  struct __wasm_v32x4_load_splat_struct {
+    uint32_t __v;
+  } __attribute__((__packed__, __may_alias__));
+  uint32_t __v = ((const struct __wasm_v32x4_load_splat_struct *)__mem)->__v;
+  return (v128_t)(__u32x4){__v, __v, __v, __v};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_v64x2_load_splat(const void *__mem) {
+  struct __wasm_v64x2_load_splat_struct {
+    uint64_t __v;
+  } __attribute__((__packed__, __may_alias__));
+  uint64_t __v = ((const struct __wasm_v64x2_load_splat_struct *)__mem)->__v;
+  return (v128_t)(__u64x2){__v, __v};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_load_8x8(const void *__mem) {
+  typedef int8_t __i8x8 __attribute__((__vector_size__(8), __aligned__(8)));
+  struct __wasm_i16x8_load_8x8_struct {
+    __i8x8 __v;
+  } __attribute__((__packed__, __may_alias__));
+  __i8x8 __v = ((const struct __wasm_i16x8_load_8x8_struct *)__mem)->__v;
+  return (v128_t) __builtin_convertvector(__v, __i16x8);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u16x8_load_8x8(const void *__mem) {
+  typedef uint8_t __u8x8 __attribute__((__vector_size__(8), __aligned__(8)));
+  struct __wasm_u16x8_load_8x8_struct {
+    __u8x8 __v;
+  } __attribute__((__packed__, __may_alias__));
+  __u8x8 __v = ((const struct __wasm_u16x8_load_8x8_struct *)__mem)->__v;
+  return (v128_t) __builtin_convertvector(__v, __u16x8);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_load_16x4(const void *__mem) {
+  typedef int16_t __i16x4 __attribute__((__vector_size__(8), __aligned__(8)));
+  struct __wasm_i32x4_load_16x4_struct {
+    __i16x4 __v;
+  } __attribute__((__packed__, __may_alias__));
+  __i16x4 __v = ((const struct __wasm_i32x4_load_16x4_struct *)__mem)->__v;
+  return (v128_t) __builtin_convertvector(__v, __i32x4);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u32x4_load_16x4(const void *__mem) {
+  typedef uint16_t __u16x4 __attribute__((__vector_size__(8), __aligned__(8)));
+  struct __wasm_u32x4_load_16x4_struct {
+    __u16x4 __v;
+  } __attribute__((__packed__, __may_alias__));
+  __u16x4 __v = ((const struct __wasm_u32x4_load_16x4_struct *)__mem)->__v;
+  return (v128_t) __builtin_convertvector(__v, __u32x4);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i64x2_load_32x2(const void *__mem) {
+  typedef int32_t __i32x2 __attribute__((__vector_size__(8), __aligned__(8)));
+  struct __wasm_i64x2_load_32x2_struct {
+    __i32x2 __v;
+  } __attribute__((__packed__, __may_alias__));
+  __i32x2 __v = ((const struct __wasm_i64x2_load_32x2_struct *)__mem)->__v;
+  return (v128_t) __builtin_convertvector(__v, __i64x2);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u64x2_load_32x2(const void *__mem) {
+  typedef uint32_t __u32x2 __attribute__((__vector_size__(8), __aligned__(8)));
+  struct __wasm_u64x2_load_32x2_struct {
+    __u32x2 __v;
+  } __attribute__((__packed__, __may_alias__));
+  __u32x2 __v = ((const struct __wasm_u64x2_load_32x2_struct *)__mem)->__v;
+  return (v128_t) __builtin_convertvector(__v, __u64x2);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store(void *__mem,
+                                                          v128_t __a) {
+  // UB-free unaligned access copied from xmmintrin.h
+  struct __wasm_v128_store_struct {
+    __v128_u __v;
+  } __attribute__((__packed__, __may_alias__));
+  ((struct __wasm_v128_store_struct *)__mem)->__v = __a;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i8x16_make(int8_t __c0, int8_t __c1, int8_t __c2, int8_t __c3, int8_t __c4,
+                int8_t __c5, int8_t __c6, int8_t __c7, int8_t __c8, int8_t __c9,
+                int8_t __c10, int8_t __c11, int8_t __c12, int8_t __c13,
+                int8_t __c14, int8_t __c15) {
+  return (v128_t)(__i8x16){__c0,  __c1,  __c2,  __c3, __c4,  __c5,
+                           __c6,  __c7,  __c8,  __c9, __c10, __c11,
+                           __c12, __c13, __c14, __c15};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_make(int16_t __c0, int16_t __c1, int16_t __c2, int16_t __c3,
+                int16_t __c4, int16_t __c5, int16_t __c6, int16_t __c7) {
+  return (v128_t)(__i16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_make(int32_t __c0,
+                                                            int32_t __c1,
+                                                            int32_t __c2,
+                                                            int32_t __c3) {
+  return (v128_t)(__i32x4){__c0, __c1, __c2, __c3};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_make(float __c0,
+                                                            float __c1,
+                                                            float __c2,
+                                                            float __c3) {
+  return (v128_t)(__f32x4){__c0, __c1, __c2, __c3};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_make(int64_t __c0,
+                                                            int64_t __c1) {
+  return (v128_t)(__i64x2){__c0, __c1};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_make(double __c0,
+                                                            double __c1) {
+  return (v128_t)(__f64x2){__c0, __c1};
+}
+
+#define wasm_i8x16_const(__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7, __c8, \
+                         __c9, __c10, __c11, __c12, __c13, __c14, __c15)       \
+  __extension__({                                                              \
+    __REQUIRE_CONSTANT(__c0);                                                  \
+    __REQUIRE_CONSTANT(__c1);                                                  \
+    __REQUIRE_CONSTANT(__c2);                                                  \
+    __REQUIRE_CONSTANT(__c3);                                                  \
+    __REQUIRE_CONSTANT(__c4);                                                  \
+    __REQUIRE_CONSTANT(__c5);                                                  \
+    __REQUIRE_CONSTANT(__c6);                                                  \
+    __REQUIRE_CONSTANT(__c7);                                                  \
+    __REQUIRE_CONSTANT(__c8);                                                  \
+    __REQUIRE_CONSTANT(__c9);                                                  \
+    __REQUIRE_CONSTANT(__c10);                                                 \
+    __REQUIRE_CONSTANT(__c11);                                                 \
+    __REQUIRE_CONSTANT(__c12);                                                 \
+    __REQUIRE_CONSTANT(__c13);                                                 \
+    __REQUIRE_CONSTANT(__c14);                                                 \
+    __REQUIRE_CONSTANT(__c15);                                                 \
+    (v128_t)(__i8x16){__c0, __c1, __c2,  __c3,  __c4,  __c5,  __c6,  __c7,     \
+                      __c8, __c9, __c10, __c11, __c12, __c13, __c14, __c15};   \
+  })
+
+#define wasm_i16x8_const(__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7)       \
+  __extension__({                                                              \
+    __REQUIRE_CONSTANT(__c0);                                                  \
+    __REQUIRE_CONSTANT(__c1);                                                  \
+    __REQUIRE_CONSTANT(__c2);                                                  \
+    __REQUIRE_CONSTANT(__c3);                                                  \
+    __REQUIRE_CONSTANT(__c4);                                                  \
+    __REQUIRE_CONSTANT(__c5);                                                  \
+    __REQUIRE_CONSTANT(__c6);                                                  \
+    __REQUIRE_CONSTANT(__c7);                                                  \
+    (v128_t)(__i16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7};         \
+  })
+
+#define wasm_i32x4_const(__c0, __c1, __c2, __c3)                               \
+  __extension__({                                                              \
+    __REQUIRE_CONSTANT(__c0);                                                  \
+    __REQUIRE_CONSTANT(__c1);                                                  \
+    __REQUIRE_CONSTANT(__c2);                                                  \
+    __REQUIRE_CONSTANT(__c3);                                                  \
+    (v128_t)(__i32x4){__c0, __c1, __c2, __c3};                                 \
+  })
+
+#define wasm_f32x4_const(__c0, __c1, __c2, __c3)                               \
+  __extension__({                                                              \
+    __REQUIRE_CONSTANT(__c0);                                                  \
+    __REQUIRE_CONSTANT(__c1);                                                  \
+    __REQUIRE_CONSTANT(__c2);                                                  \
+    __REQUIRE_CONSTANT(__c3);                                                  \
+    (v128_t)(__f32x4){__c0, __c1, __c2, __c3};                                 \
+  })
+
+#define wasm_i64x2_const(__c0, __c1)                                           \
+  __extension__({                                                              \
+    __REQUIRE_CONSTANT(__c0);                                                  \
+    __REQUIRE_CONSTANT(__c1);                                                  \
+    (v128_t)(__i64x2){__c0, __c1};                                             \
+  })
+
+#define wasm_f64x2_const(__c0, __c1)                                           \
+  __extension__({                                                              \
+    __REQUIRE_CONSTANT(__c0);                                                  \
+    __REQUIRE_CONSTANT(__c1);                                                  \
+    (v128_t)(__f64x2){__c0, __c1};                                             \
+  })
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_splat(int8_t __a) {
+  return (v128_t)(__i8x16){__a, __a, __a, __a, __a, __a, __a, __a,
+                           __a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+#define wasm_i8x16_extract_lane(__a, __i)                                      \
+  (__builtin_wasm_extract_lane_s_i8x16((__i8x16)(__a), __i))
+
+#define wasm_u8x16_extract_lane(__a, __i)                                      \
+  (__builtin_wasm_extract_lane_u_i8x16((__i8x16)(__a), __i))
+
+#define wasm_i8x16_replace_lane(__a, __i, __b)                                 \
+  ((v128_t)__builtin_wasm_replace_lane_i8x16((__i8x16)(__a), __i, __b))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_splat(int16_t __a) {
+  return (v128_t)(__i16x8){__a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+#define wasm_i16x8_extract_lane(__a, __i)                                      \
+  (__builtin_wasm_extract_lane_s_i16x8((__i16x8)(__a), __i))
+
+#define wasm_u16x8_extract_lane(__a, __i)                                      \
+  (__builtin_wasm_extract_lane_u_i16x8((__i16x8)(__a), __i))
+
+#define wasm_i16x8_replace_lane(__a, __i, __b)                                 \
+  ((v128_t)__builtin_wasm_replace_lane_i16x8((__i16x8)(__a), __i, __b))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_splat(int32_t __a) {
+  return (v128_t)(__i32x4){__a, __a, __a, __a};
+}
+
+#define wasm_i32x4_extract_lane(__a, __i)                                      \
+  (__builtin_wasm_extract_lane_i32x4((__i32x4)(__a), __i))
+
+#define wasm_i32x4_replace_lane(__a, __i, __b)                                 \
+  ((v128_t)__builtin_wasm_replace_lane_i32x4((__i32x4)(__a), __i, __b))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_splat(int64_t __a) {
+  return (v128_t)(__i64x2){__a, __a};
+}
+
+#define wasm_i64x2_extract_lane(__a, __i)                                      \
+  (__builtin_wasm_extract_lane_i64x2((__i64x2)(__a), __i))
+
+#define wasm_i64x2_replace_lane(__a, __i, __b)                                 \
+  ((v128_t)__builtin_wasm_replace_lane_i64x2((__i64x2)(__a), __i, __b))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_splat(float __a) {
+  return (v128_t)(__f32x4){__a, __a, __a, __a};
+}
+
+#define wasm_f32x4_extract_lane(__a, __i)                                      \
+  (__builtin_wasm_extract_lane_f32x4((__f32x4)(__a), __i))
+
+#define wasm_f32x4_replace_lane(__a, __i, __b)                                 \
+  ((v128_t)__builtin_wasm_replace_lane_f32x4((__f32x4)(__a), __i, __b))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_splat(double __a) {
+  return (v128_t)(__f64x2){__a, __a};
+}
+
+#define wasm_f64x2_extract_lane(__a, __i)                                      \
+  (__builtin_wasm_extract_lane_f64x2((__f64x2)(__a), __i))
+
+#define wasm_f64x2_replace_lane(__a, __i, __b)                                 \
+  ((v128_t)__builtin_wasm_replace_lane_f64x2((__f64x2)(__a), __i, __b))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_eq(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__s8x16)__a == (__s8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_ne(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__s8x16)__a != (__s8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_lt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__s8x16)__a < (__s8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_lt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u8x16)__a < (__u8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_gt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__s8x16)__a > (__s8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_gt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u8x16)__a > (__u8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_le(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__s8x16)__a <= (__s8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_le(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u8x16)__a <= (__u8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_ge(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__s8x16)__a >= (__s8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_ge(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u8x16)__a >= (__u8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_eq(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__i16x8)__a == (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_ne(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u16x8)__a != (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_lt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__i16x8)__a < (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_lt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u16x8)__a < (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_gt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__i16x8)__a > (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_gt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u16x8)__a > (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_le(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__i16x8)__a <= (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_le(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u16x8)__a <= (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_ge(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__i16x8)__a >= (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_ge(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u16x8)__a >= (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_eq(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__i32x4)__a == (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_ne(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__i32x4)__a != (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_lt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__i32x4)__a < (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_lt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u32x4)__a < (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_gt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__i32x4)__a > (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_gt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u32x4)__a > (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_le(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__i32x4)__a <= (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_le(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u32x4)__a <= (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_ge(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__i32x4)__a >= (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_ge(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u32x4)__a >= (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_eq(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__f32x4)__a == (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_ne(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__f32x4)__a != (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_lt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__f32x4)__a < (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_gt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__f32x4)__a > (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_le(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__f32x4)__a <= (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_ge(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__f32x4)__a >= (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_eq(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__f64x2)__a == (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_ne(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__f64x2)__a != (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_lt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__f64x2)__a < (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_gt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__f64x2)__a > (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_le(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__f64x2)__a <= (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_ge(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__f64x2)__a >= (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_not(v128_t __a) {
+  return ~__a;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_and(v128_t __a,
+                                                          v128_t __b) {
+  return __a & __b;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_or(v128_t __a,
+                                                         v128_t __b) {
+  return __a | __b;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_xor(v128_t __a,
+                                                          v128_t __b) {
+  return __a ^ __b;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_andnot(v128_t __a,
+                                                             v128_t __b) {
+  return __a & ~__b;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_bitselect(v128_t __a,
+                                                                v128_t __b,
+                                                                v128_t __mask) {
+  return (v128_t)__builtin_wasm_bitselect((__i32x4)__a, (__i32x4)__b,
+                                          (__i32x4)__mask);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_abs(v128_t __a) {
+  return (v128_t)__builtin_wasm_abs_i8x16((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_neg(v128_t __a) {
+  return (v128_t)(-(__u8x16)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i8x16_any_true(v128_t __a) {
+  return __builtin_wasm_any_true_i8x16((__i8x16)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i8x16_all_true(v128_t __a) {
+  return __builtin_wasm_all_true_i8x16((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shl(v128_t __a,
+                                                           int32_t __b) {
+  return (v128_t)((__i8x16)__a << __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shr(v128_t __a,
+                                                           int32_t __b) {
+  return (v128_t)((__s8x16)__a >> __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_shr(v128_t __a,
+                                                           int32_t __b) {
+  return (v128_t)((__u8x16)__a >> __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_add(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__u8x16)__a + (__u8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i8x16_add_saturate(v128_t __a, v128_t __b) {
+  return (v128_t)__builtin_wasm_add_saturate_s_i8x16((__i8x16)__a,
+                                                     (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u8x16_add_saturate(v128_t __a, v128_t __b) {
+  return (v128_t)__builtin_wasm_add_saturate_u_i8x16((__i8x16)__a,
+                                                     (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_sub(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__u8x16)__a - (__u8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i8x16_sub_saturate(v128_t __a, v128_t __b) {
+  return (v128_t)__builtin_wasm_sub_saturate_s_i8x16((__i8x16)__a,
+                                                     (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u8x16_sub_saturate(v128_t __a, v128_t __b) {
+  return (v128_t)__builtin_wasm_sub_saturate_u_i8x16((__i8x16)__a,
+                                                     (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_min(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_min_s_i8x16((__i8x16)__a, (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_min(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_min_u_i8x16((__i8x16)__a, (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_max(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_max_s_i8x16((__i8x16)__a, (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_max(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_max_u_i8x16((__i8x16)__a, (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_avgr(v128_t __a,
+                                                            v128_t __b) {
+  return (v128_t)__builtin_wasm_avgr_u_i8x16((__i8x16)__a, (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_abs(v128_t __a) {
+  return (v128_t)__builtin_wasm_abs_i16x8((__i16x8)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_neg(v128_t __a) {
+  return (v128_t)(-(__u16x8)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i16x8_any_true(v128_t __a) {
+  return __builtin_wasm_any_true_i16x8((__i16x8)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i16x8_all_true(v128_t __a) {
+  return __builtin_wasm_all_true_i16x8((__i16x8)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shl(v128_t __a,
+                                                           int32_t __b) {
+  return (v128_t)((__i16x8)__a << __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shr(v128_t __a,
+                                                           int32_t __b) {
+  return (v128_t)((__i16x8)__a >> __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_shr(v128_t __a,
+                                                           int32_t __b) {
+  return (v128_t)((__u16x8)__a >> __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_add(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__u16x8)__a + (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_add_saturate(v128_t __a, v128_t __b) {
+  return (v128_t)__builtin_wasm_add_saturate_s_i16x8((__i16x8)__a,
+                                                     (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u16x8_add_saturate(v128_t __a, v128_t __b) {
+  return (v128_t)__builtin_wasm_add_saturate_u_i16x8((__i16x8)__a,
+                                                     (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_sub(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__i16x8)__a - (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_sub_saturate(v128_t __a, v128_t __b) {
+  return (v128_t)__builtin_wasm_sub_saturate_s_i16x8((__i16x8)__a,
+                                                     (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u16x8_sub_saturate(v128_t __a, v128_t __b) {
+  return (v128_t)__builtin_wasm_sub_saturate_u_i16x8((__i16x8)__a,
+                                                     (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_mul(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__u16x8)__a * (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_min(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_min_s_i16x8((__i16x8)__a, (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_min(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_min_u_i16x8((__i16x8)__a, (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_max(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_max_s_i16x8((__i16x8)__a, (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_max(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_max_u_i16x8((__i16x8)__a, (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_avgr(v128_t __a,
+                                                            v128_t __b) {
+  return (v128_t)__builtin_wasm_avgr_u_i16x8((__i16x8)__a, (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_abs(v128_t __a) {
+  return (v128_t)__builtin_wasm_abs_i32x4((__i32x4)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_neg(v128_t __a) {
+  return (v128_t)(-(__u32x4)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i32x4_any_true(v128_t __a) {
+  return __builtin_wasm_any_true_i32x4((__i32x4)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i32x4_all_true(v128_t __a) {
+  return __builtin_wasm_all_true_i32x4((__i32x4)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shl(v128_t __a,
+                                                           int32_t __b) {
+  return (v128_t)((__i32x4)__a << __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shr(v128_t __a,
+                                                           int32_t __b) {
+  return (v128_t)((__i32x4)__a >> __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_shr(v128_t __a,
+                                                           int32_t __b) {
+  return (v128_t)((__u32x4)__a >> __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_add(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__u32x4)__a + (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_sub(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__u32x4)__a - (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_mul(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__u32x4)__a * (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_min(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_min_s_i32x4((__i32x4)__a, (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_min(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_min_u_i32x4((__i32x4)__a, (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_max(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_max_s_i32x4((__i32x4)__a, (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_max(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_max_u_i32x4((__i32x4)__a, (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_neg(v128_t __a) {
+  return (v128_t)(-(__u64x2)__a);
+}
+
+#ifdef __wasm_unimplemented_simd128__
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i64x2_any_true(v128_t __a) {
+  return __builtin_wasm_any_true_i64x2((__i64x2)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i64x2_all_true(v128_t __a) {
+  return __builtin_wasm_all_true_i64x2((__i64x2)__a);
+}
+
+#endif // __wasm_unimplemented_simd128__
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shl(v128_t __a,
+                                                           int32_t __b) {
+  return (v128_t)((__i64x2)__a << (int64_t)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shr(v128_t __a,
+                                                           int32_t __b) {
+  return (v128_t)((__i64x2)__a >> (int64_t)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_shr(v128_t __a,
+                                                           int32_t __b) {
+  return (v128_t)((__u64x2)__a >> (int64_t)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_add(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__u64x2)__a + (__u64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_sub(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__u64x2)__a - (__u64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_mul(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__u64x2)__a * (__u64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_abs(v128_t __a) {
+  return (v128_t)__builtin_wasm_abs_f32x4((__f32x4)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_neg(v128_t __a) {
+  return (v128_t)(-(__f32x4)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_sqrt(v128_t __a) {
+  return (v128_t)__builtin_wasm_sqrt_f32x4((__f32x4)__a);
+}
+
+#ifdef __wasm_unimplemented_simd128__
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_qfma(v128_t __a,
+                                                            v128_t __b,
+                                                            v128_t __c) {
+  return (v128_t)__builtin_wasm_qfma_f32x4((__f32x4)__a, (__f32x4)__b,
+                                           (__f32x4)__c);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_qfms(v128_t __a,
+                                                            v128_t __b,
+                                                            v128_t __c) {
+  return (v128_t)__builtin_wasm_qfms_f32x4((__f32x4)__a, (__f32x4)__b,
+                                           (__f32x4)__c);
+}
+
+#endif // __wasm_unimplemented_simd128__
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_add(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__f32x4)__a + (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_sub(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__f32x4)__a - (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_mul(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__f32x4)__a * (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_div(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__f32x4)__a / (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_min(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_min_f32x4((__f32x4)__a, (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_max(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_max_f32x4((__f32x4)__a, (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_pmin(v128_t __a,
+                                                            v128_t __b) {
+  return (v128_t)__builtin_wasm_pmin_f32x4((__f32x4)__a, (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_pmax(v128_t __a,
+                                                            v128_t __b) {
+  return (v128_t)__builtin_wasm_pmax_f32x4((__f32x4)__a, (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_abs(v128_t __a) {
+  return (v128_t)__builtin_wasm_abs_f64x2((__f64x2)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_neg(v128_t __a) {
+  return (v128_t)(-(__f64x2)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_sqrt(v128_t __a) {
+  return (v128_t)__builtin_wasm_sqrt_f64x2((__f64x2)__a);
+}
+
+#ifdef __wasm_unimplemented_simd128__
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_qfma(v128_t __a,
+                                                            v128_t __b,
+                                                            v128_t __c) {
+  return (v128_t)__builtin_wasm_qfma_f64x2((__f64x2)__a, (__f64x2)__b,
+                                           (__f64x2)__c);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_qfms(v128_t __a,
+                                                            v128_t __b,
+                                                            v128_t __c) {
+  return (v128_t)__builtin_wasm_qfms_f64x2((__f64x2)__a, (__f64x2)__b,
+                                           (__f64x2)__c);
+}
+
+#endif // __wasm_unimplemented_simd128__
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_add(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__f64x2)__a + (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_sub(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__f64x2)__a - (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_mul(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__f64x2)__a * (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_div(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__f64x2)__a / (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_min(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_min_f64x2((__f64x2)__a, (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_max(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_max_f64x2((__f64x2)__a, (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_pmin(v128_t __a,
+                                                            v128_t __b) {
+  return (v128_t)__builtin_wasm_pmin_f64x2((__f64x2)__a, (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_pmax(v128_t __a,
+                                                            v128_t __b) {
+  return (v128_t)__builtin_wasm_pmax_f64x2((__f64x2)__a, (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_trunc_saturate_f32x4(v128_t __a) {
+  return (v128_t)__builtin_wasm_trunc_saturate_s_i32x4_f32x4((__f32x4)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u32x4_trunc_saturate_f32x4(v128_t __a) {
+  return (v128_t)__builtin_wasm_trunc_saturate_u_i32x4_f32x4((__f32x4)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_f32x4_convert_i32x4(v128_t __a) {
+  return (v128_t) __builtin_convertvector((__i32x4)__a, __f32x4);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_f32x4_convert_u32x4(v128_t __a) {
+  return (v128_t) __builtin_convertvector((__u32x4)__a, __f32x4);
+}
+
+#define wasm_v8x16_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
+                           __c7, __c8, __c9, __c10, __c11, __c12, __c13,       \
+                           __c14, __c15)                                       \
+  ((v128_t)__builtin_wasm_shuffle_v8x16(                                       \
+      (__i8x16)(__a), (__i8x16)(__b), __c0, __c1, __c2, __c3, __c4, __c5,      \
+      __c6, __c7, __c8, __c9, __c10, __c11, __c12, __c13, __c14, __c15))
+
+#define wasm_v16x8_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
+                           __c7)                                               \
+  ((v128_t)__builtin_wasm_shuffle_v8x16(                                       \
+      (__i8x16)(__a), (__i8x16)(__b), (__c0)*2, (__c0)*2 + 1, (__c1)*2,        \
+      (__c1)*2 + 1, (__c2)*2, (__c2)*2 + 1, (__c3)*2, (__c3)*2 + 1, (__c4)*2,  \
+      (__c4)*2 + 1, (__c5)*2, (__c5)*2 + 1, (__c6)*2, (__c6)*2 + 1, (__c7)*2,  \
+      (__c7)*2 + 1))
+
+#define wasm_v32x4_shuffle(__a, __b, __c0, __c1, __c2, __c3)                   \
+  ((v128_t)__builtin_wasm_shuffle_v8x16(                                       \
+      (__i8x16)(__a), (__i8x16)(__b), (__c0)*4, (__c0)*4 + 1, (__c0)*4 + 2,    \
+      (__c0)*4 + 3, (__c1)*4, (__c1)*4 + 1, (__c1)*4 + 2, (__c1)*4 + 3,        \
+      (__c2)*4, (__c2)*4 + 1, (__c2)*4 + 2, (__c2)*4 + 3, (__c3)*4,            \
+      (__c3)*4 + 1, (__c3)*4 + 2, (__c3)*4 + 3))
+
+#define wasm_v64x2_shuffle(__a, __b, __c0, __c1)                               \
+  ((v128_t)__builtin_wasm_shuffle_v8x16(                                       \
+      (__i8x16)(__a), (__i8x16)(__b), (__c0)*8, (__c0)*8 + 1, (__c0)*8 + 2,    \
+      (__c0)*8 + 3, (__c0)*8 + 4, (__c0)*8 + 5, (__c0)*8 + 6, (__c0)*8 + 7,    \
+      (__c1)*8, (__c1)*8 + 1, (__c1)*8 + 2, (__c1)*8 + 3, (__c1)*8 + 4,        \
+      (__c1)*8 + 5, (__c1)*8 + 6, (__c1)*8 + 7))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v8x16_swizzle(v128_t __a,
+                                                               v128_t __b) {
+  return (v128_t)__builtin_wasm_swizzle_v8x16((__i8x16)__a, (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i8x16_narrow_i16x8(v128_t __a, v128_t __b) {
+  return (v128_t)__builtin_wasm_narrow_s_i8x16_i16x8((__i16x8)__a,
+                                                     (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u8x16_narrow_i16x8(v128_t __a, v128_t __b) {
+  return (v128_t)__builtin_wasm_narrow_u_i8x16_i16x8((__i16x8)__a,
+                                                     (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_narrow_i32x4(v128_t __a, v128_t __b) {
+  return (v128_t)__builtin_wasm_narrow_s_i16x8_i32x4((__i32x4)__a,
+                                                     (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u16x8_narrow_i32x4(v128_t __a, v128_t __b) {
+  return (v128_t)__builtin_wasm_narrow_u_i16x8_i32x4((__i32x4)__a,
+                                                     (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_widen_low_i8x16(v128_t __a) {
+  return (v128_t)__builtin_wasm_widen_low_s_i16x8_i8x16((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_widen_high_i8x16(v128_t __a) {
+  return (v128_t)__builtin_wasm_widen_high_s_i16x8_i8x16((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_widen_low_u8x16(v128_t __a) {
+  return (v128_t)__builtin_wasm_widen_low_u_i16x8_i8x16((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_widen_high_u8x16(v128_t __a) {
+  return (v128_t)__builtin_wasm_widen_high_u_i16x8_i8x16((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_widen_low_i16x8(v128_t __a) {
+  return (v128_t)__builtin_wasm_widen_low_s_i32x4_i16x8((__i16x8)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_widen_high_i16x8(v128_t __a) {
+  return (v128_t)__builtin_wasm_widen_high_s_i32x4_i16x8((__i16x8)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_widen_low_u16x8(v128_t __a) {
+  return (v128_t)__builtin_wasm_widen_low_u_i32x4_i16x8((__i16x8)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_widen_high_u16x8(v128_t __a) {
+  return (v128_t)__builtin_wasm_widen_high_u_i32x4_i16x8((__i16x8)__a);
+}
+
+// Undefine helper macros
+#undef __DEFAULT_FN_ATTRS
+
+#endif // __WASM_SIMD128_H
diff --git a/darwin-x86/lib64/clang/11.0.1/include/wbnoinvdintrin.h b/darwin-x86/lib64/clang/11.0.5/include/wbnoinvdintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/wbnoinvdintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/wbnoinvdintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/wmmintrin.h b/darwin-x86/lib64/clang/11.0.5/include/wmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/wmmintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/wmmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/x86intrin.h b/darwin-x86/lib64/clang/11.0.5/include/x86intrin.h
new file mode 100644
index 0000000..768d0e5
--- /dev/null
+++ b/darwin-x86/lib64/clang/11.0.5/include/x86intrin.h
@@ -0,0 +1,63 @@
+/*===---- x86intrin.h - X86 intrinsics -------------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#define __X86INTRIN_H
+
+#include <ia32intrin.h>
+
+#include <immintrin.h>
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__3dNOW__)
+#include <mm3dnow.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__PRFCHW__)
+#include <prfchwintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__SSE4A__)
+#include <ammintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__FMA4__)
+#include <fma4intrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__XOP__)
+#include <xopintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__TBM__)
+#include <tbmintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__LWP__)
+#include <lwpintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__MWAITX__)
+#include <mwaitxintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__CLZERO__)
+#include <clzerointrin.h>
+#endif
+
+
+#endif /* __X86INTRIN_H */
diff --git a/darwin-x86/lib64/clang/11.0.1/include/xmmintrin.h b/darwin-x86/lib64/clang/11.0.5/include/xmmintrin.h
similarity index 98%
rename from darwin-x86/lib64/clang/11.0.1/include/xmmintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/xmmintrin.h
index 9b8de63..f468669 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/xmmintrin.h
+++ b/darwin-x86/lib64/clang/11.0.5/include/xmmintrin.h
@@ -2931,31 +2931,31 @@
 
 #define _MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
 
-#define _MM_EXCEPT_INVALID    (0x0001)
-#define _MM_EXCEPT_DENORM     (0x0002)
-#define _MM_EXCEPT_DIV_ZERO   (0x0004)
-#define _MM_EXCEPT_OVERFLOW   (0x0008)
-#define _MM_EXCEPT_UNDERFLOW  (0x0010)
-#define _MM_EXCEPT_INEXACT    (0x0020)
-#define _MM_EXCEPT_MASK       (0x003f)
+#define _MM_EXCEPT_INVALID    (0x0001U)
+#define _MM_EXCEPT_DENORM     (0x0002U)
+#define _MM_EXCEPT_DIV_ZERO   (0x0004U)
+#define _MM_EXCEPT_OVERFLOW   (0x0008U)
+#define _MM_EXCEPT_UNDERFLOW  (0x0010U)
+#define _MM_EXCEPT_INEXACT    (0x0020U)
+#define _MM_EXCEPT_MASK       (0x003fU)
 
-#define _MM_MASK_INVALID      (0x0080)
-#define _MM_MASK_DENORM       (0x0100)
-#define _MM_MASK_DIV_ZERO     (0x0200)
-#define _MM_MASK_OVERFLOW     (0x0400)
-#define _MM_MASK_UNDERFLOW    (0x0800)
-#define _MM_MASK_INEXACT      (0x1000)
-#define _MM_MASK_MASK         (0x1f80)
+#define _MM_MASK_INVALID      (0x0080U)
+#define _MM_MASK_DENORM       (0x0100U)
+#define _MM_MASK_DIV_ZERO     (0x0200U)
+#define _MM_MASK_OVERFLOW     (0x0400U)
+#define _MM_MASK_UNDERFLOW    (0x0800U)
+#define _MM_MASK_INEXACT      (0x1000U)
+#define _MM_MASK_MASK         (0x1f80U)
 
-#define _MM_ROUND_NEAREST     (0x0000)
-#define _MM_ROUND_DOWN        (0x2000)
-#define _MM_ROUND_UP          (0x4000)
-#define _MM_ROUND_TOWARD_ZERO (0x6000)
-#define _MM_ROUND_MASK        (0x6000)
+#define _MM_ROUND_NEAREST     (0x0000U)
+#define _MM_ROUND_DOWN        (0x2000U)
+#define _MM_ROUND_UP          (0x4000U)
+#define _MM_ROUND_TOWARD_ZERO (0x6000U)
+#define _MM_ROUND_MASK        (0x6000U)
 
-#define _MM_FLUSH_ZERO_MASK   (0x8000)
-#define _MM_FLUSH_ZERO_ON     (0x8000)
-#define _MM_FLUSH_ZERO_OFF    (0x0000)
+#define _MM_FLUSH_ZERO_MASK   (0x8000U)
+#define _MM_FLUSH_ZERO_ON     (0x8000U)
+#define _MM_FLUSH_ZERO_OFF    (0x0000U)
 
 #define _MM_GET_EXCEPTION_MASK() (_mm_getcsr() & _MM_MASK_MASK)
 #define _MM_GET_EXCEPTION_STATE() (_mm_getcsr() & _MM_EXCEPT_MASK)
diff --git a/darwin-x86/lib64/clang/11.0.1/include/xopintrin.h b/darwin-x86/lib64/clang/11.0.5/include/xopintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/xopintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/xopintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/xsavecintrin.h b/darwin-x86/lib64/clang/11.0.5/include/xsavecintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/xsavecintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/xsavecintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/xsaveintrin.h b/darwin-x86/lib64/clang/11.0.5/include/xsaveintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/xsaveintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/xsaveintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/xsaveoptintrin.h b/darwin-x86/lib64/clang/11.0.5/include/xsaveoptintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/xsaveoptintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/xsaveoptintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/xsavesintrin.h b/darwin-x86/lib64/clang/11.0.5/include/xsavesintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/xsavesintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/xsavesintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/xtestintrin.h b/darwin-x86/lib64/clang/11.0.5/include/xtestintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/include/xtestintrin.h
rename to darwin-x86/lib64/clang/11.0.5/include/xtestintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/share/asan_blacklist.txt b/darwin-x86/lib64/clang/11.0.5/share/asan_blacklist.txt
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/share/asan_blacklist.txt
rename to darwin-x86/lib64/clang/11.0.5/share/asan_blacklist.txt
diff --git a/darwin-x86/lib64/clang/11.0.1/share/cfi_blacklist.txt b/darwin-x86/lib64/clang/11.0.5/share/cfi_blacklist.txt
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.1/share/cfi_blacklist.txt
rename to darwin-x86/lib64/clang/11.0.5/share/cfi_blacklist.txt
diff --git a/darwin-x86/lib64/libLLVM.dylib b/darwin-x86/lib64/libLLVM.dylib
index 5f96617..bdf2fcc 100755
--- a/darwin-x86/lib64/libLLVM.dylib
+++ b/darwin-x86/lib64/libLLVM.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libbase.dylib b/darwin-x86/lib64/libbase.dylib
index 8f546cc..e09786c 100755
--- a/darwin-x86/lib64/libbase.dylib
+++ b/darwin-x86/lib64/libbase.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libc++.1.dylib b/darwin-x86/lib64/libc++.1.dylib
index 19460db..ee89286 100755
--- a/darwin-x86/lib64/libc++.1.dylib
+++ b/darwin-x86/lib64/libc++.1.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libc++.dylib b/darwin-x86/lib64/libc++.dylib
index ea1d2c6..7235172 100755
--- a/darwin-x86/lib64/libc++.dylib
+++ b/darwin-x86/lib64/libc++.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libc++abi.1.dylib b/darwin-x86/lib64/libc++abi.1.dylib
index 4f00957..cd3c0d1 100755
--- a/darwin-x86/lib64/libc++abi.1.dylib
+++ b/darwin-x86/lib64/libc++abi.1.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libclang_cxx.dylib b/darwin-x86/lib64/libclang_cxx.dylib
index 90cd78d..1f2795f 100755
--- a/darwin-x86/lib64/libclang_cxx.dylib
+++ b/darwin-x86/lib64/libclang_cxx.dylib
Binary files differ
diff --git a/darwin-x86/lib64/liblog.dylib b/darwin-x86/lib64/liblog.dylib
index 21d3d7b..d2aecee 100755
--- a/darwin-x86/lib64/liblog.dylib
+++ b/darwin-x86/lib64/liblog.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libprotobuf-cpp-full.dylib b/darwin-x86/lib64/libprotobuf-cpp-full.dylib
index f3af7e2..8bc37ec 100755
--- a/darwin-x86/lib64/libprotobuf-cpp-full.dylib
+++ b/darwin-x86/lib64/libprotobuf-cpp-full.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libxml2.2.9.10.dylib b/darwin-x86/lib64/libxml2.2.9.10.dylib
new file mode 100755
index 0000000..a8d1171
--- /dev/null
+++ b/darwin-x86/lib64/libxml2.2.9.10.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libz-host.dylib b/darwin-x86/lib64/libz-host.dylib
index c4e69aa..d8db66b 100755
--- a/darwin-x86/lib64/libz-host.dylib
+++ b/darwin-x86/lib64/libz-host.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libziparchive.dylib b/darwin-x86/lib64/libziparchive.dylib
index 68f1576..b8fc965 100755
--- a/darwin-x86/lib64/libziparchive.dylib
+++ b/darwin-x86/lib64/libziparchive.dylib
Binary files differ
diff --git a/linux-x86/bin/cxx_extractor b/linux-x86/bin/cxx_extractor
index fe677cd..890128c 100755
--- a/linux-x86/bin/cxx_extractor
+++ b/linux-x86/bin/cxx_extractor
Binary files differ
diff --git a/linux-x86/bin/header-abi-diff b/linux-x86/bin/header-abi-diff
index 7bd9753..3d8471a 100755
--- a/linux-x86/bin/header-abi-diff
+++ b/linux-x86/bin/header-abi-diff
Binary files differ
diff --git a/linux-x86/bin/header-abi-dumper b/linux-x86/bin/header-abi-dumper
index 594b00b..dd64c6c 100755
--- a/linux-x86/bin/header-abi-dumper
+++ b/linux-x86/bin/header-abi-dumper
Binary files differ
diff --git a/linux-x86/bin/header-abi-linker b/linux-x86/bin/header-abi-linker
index f74f9c2..618326c 100755
--- a/linux-x86/bin/header-abi-linker
+++ b/linux-x86/bin/header-abi-linker
Binary files differ
diff --git a/linux-x86/bin/proto_metadata_plugin b/linux-x86/bin/proto_metadata_plugin
index 6897662..d6470e0 100755
--- a/linux-x86/bin/proto_metadata_plugin
+++ b/linux-x86/bin/proto_metadata_plugin
Binary files differ
diff --git a/linux-x86/bin/protoc_extractor b/linux-x86/bin/protoc_extractor
index 805261c..9178e95 100755
--- a/linux-x86/bin/protoc_extractor
+++ b/linux-x86/bin/protoc_extractor
Binary files differ
diff --git a/linux-x86/bin/versioner b/linux-x86/bin/versioner
index 6944e47..06e7cf5 100755
--- a/linux-x86/bin/versioner
+++ b/linux-x86/bin/versioner
Binary files differ
diff --git a/linux-x86/clang-headers b/linux-x86/clang-headers
index 1d9e782..e8f09e7 120000
--- a/linux-x86/clang-headers
+++ b/linux-x86/clang-headers
@@ -1 +1 @@
-lib64/clang/11.0.1/include
\ No newline at end of file
+lib64/clang/11.0.5/include
\ No newline at end of file
diff --git a/linux-x86/lib64/clang/11.0.1/include/__clang_cuda_cmath.h b/linux-x86/lib64/clang/11.0.1/include/__clang_cuda_cmath.h
deleted file mode 100644
index 834a2e3..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/__clang_cuda_cmath.h
+++ /dev/null
@@ -1,485 +0,0 @@
-/*===---- __clang_cuda_cmath.h - Device-side CUDA cmath support ------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-#ifndef __CLANG_CUDA_CMATH_H__
-#define __CLANG_CUDA_CMATH_H__
-#ifndef __CUDA__
-#error "This file is for CUDA compilation only."
-#endif
-
-#include <limits>
-
-// CUDA lets us use various std math functions on the device side.  This file
-// works in concert with __clang_cuda_math_forward_declares.h to make this work.
-//
-// Specifically, the forward-declares header declares __device__ overloads for
-// these functions in the global namespace, then pulls them into namespace std
-// with 'using' statements.  Then this file implements those functions, after
-// their implementations have been pulled in.
-//
-// It's important that we declare the functions in the global namespace and pull
-// them into namespace std with using statements, as opposed to simply declaring
-// these functions in namespace std, because our device functions need to
-// overload the standard library functions, which may be declared in the global
-// namespace or in std, depending on the degree of conformance of the stdlib
-// implementation.  Declaring in the global namespace and pulling into namespace
-// std covers all of the known knowns.
-
-#ifdef _OPENMP
-#define __DEVICE__ static __attribute__((always_inline))
-#else
-#define __DEVICE__ static __device__ __inline__ __attribute__((always_inline))
-#endif
-
-// For C++ 17 we need to include noexcept attribute to be compatible
-// with the header-defined version. This may be removed once
-// variant is supported.
-#if defined(_OPENMP) && defined(__cplusplus) && __cplusplus >= 201703L
-#define __NOEXCEPT noexcept
-#else
-#define __NOEXCEPT
-#endif
-
-#if !(defined(_OPENMP) && defined(__cplusplus))
-__DEVICE__ long long abs(long long __n) { return ::llabs(__n); }
-__DEVICE__ long abs(long __n) { return ::labs(__n); }
-__DEVICE__ float abs(float __x) { return ::fabsf(__x); }
-__DEVICE__ double abs(double __x) { return ::fabs(__x); }
-#endif
-// TODO: remove once variat is supported.
-#if defined(_OPENMP) && defined(__cplusplus)
-__DEVICE__ const float abs(const float __x) { return ::fabsf((float)__x); }
-__DEVICE__ const double abs(const double __x) { return ::fabs((double)__x); }
-#endif
-__DEVICE__ float acos(float __x) { return ::acosf(__x); }
-__DEVICE__ float asin(float __x) { return ::asinf(__x); }
-__DEVICE__ float atan(float __x) { return ::atanf(__x); }
-__DEVICE__ float atan2(float __x, float __y) { return ::atan2f(__x, __y); }
-__DEVICE__ float ceil(float __x) { return ::ceilf(__x); }
-__DEVICE__ float cos(float __x) { return ::cosf(__x); }
-__DEVICE__ float cosh(float __x) { return ::coshf(__x); }
-__DEVICE__ float exp(float __x) { return ::expf(__x); }
-__DEVICE__ float fabs(float __x) __NOEXCEPT { return ::fabsf(__x); }
-__DEVICE__ float floor(float __x) { return ::floorf(__x); }
-__DEVICE__ float fmod(float __x, float __y) { return ::fmodf(__x, __y); }
-// TODO: remove when variant is supported
-#ifndef _OPENMP
-__DEVICE__ int fpclassify(float __x) {
-  return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
-                              FP_ZERO, __x);
-}
-__DEVICE__ int fpclassify(double __x) {
-  return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
-                              FP_ZERO, __x);
-}
-#endif
-__DEVICE__ float frexp(float __arg, int *__exp) {
-  return ::frexpf(__arg, __exp);
-}
-
-// For inscrutable reasons, the CUDA headers define these functions for us on
-// Windows.
-#ifndef _MSC_VER
-__DEVICE__ bool isinf(float __x) { return ::__isinff(__x); }
-__DEVICE__ bool isinf(double __x) { return ::__isinf(__x); }
-__DEVICE__ bool isfinite(float __x) { return ::__finitef(__x); }
-// For inscrutable reasons, __finite(), the double-precision version of
-// __finitef, does not exist when compiling for MacOS.  __isfinited is available
-// everywhere and is just as good.
-__DEVICE__ bool isfinite(double __x) { return ::__isfinited(__x); }
-__DEVICE__ bool isnan(float __x) { return ::__isnanf(__x); }
-__DEVICE__ bool isnan(double __x) { return ::__isnan(__x); }
-#endif
-
-__DEVICE__ bool isgreater(float __x, float __y) {
-  return __builtin_isgreater(__x, __y);
-}
-__DEVICE__ bool isgreater(double __x, double __y) {
-  return __builtin_isgreater(__x, __y);
-}
-__DEVICE__ bool isgreaterequal(float __x, float __y) {
-  return __builtin_isgreaterequal(__x, __y);
-}
-__DEVICE__ bool isgreaterequal(double __x, double __y) {
-  return __builtin_isgreaterequal(__x, __y);
-}
-__DEVICE__ bool isless(float __x, float __y) {
-  return __builtin_isless(__x, __y);
-}
-__DEVICE__ bool isless(double __x, double __y) {
-  return __builtin_isless(__x, __y);
-}
-__DEVICE__ bool islessequal(float __x, float __y) {
-  return __builtin_islessequal(__x, __y);
-}
-__DEVICE__ bool islessequal(double __x, double __y) {
-  return __builtin_islessequal(__x, __y);
-}
-__DEVICE__ bool islessgreater(float __x, float __y) {
-  return __builtin_islessgreater(__x, __y);
-}
-__DEVICE__ bool islessgreater(double __x, double __y) {
-  return __builtin_islessgreater(__x, __y);
-}
-__DEVICE__ bool isnormal(float __x) { return __builtin_isnormal(__x); }
-__DEVICE__ bool isnormal(double __x) { return __builtin_isnormal(__x); }
-__DEVICE__ bool isunordered(float __x, float __y) {
-  return __builtin_isunordered(__x, __y);
-}
-__DEVICE__ bool isunordered(double __x, double __y) {
-  return __builtin_isunordered(__x, __y);
-}
-__DEVICE__ float ldexp(float __arg, int __exp) {
-  return ::ldexpf(__arg, __exp);
-}
-__DEVICE__ float log(float __x) { return ::logf(__x); }
-__DEVICE__ float log10(float __x) { return ::log10f(__x); }
-__DEVICE__ float modf(float __x, float *__iptr) { return ::modff(__x, __iptr); }
-__DEVICE__ float pow(float __base, float __exp) {
-  return ::powf(__base, __exp);
-}
-__DEVICE__ float pow(float __base, int __iexp) {
-  return ::powif(__base, __iexp);
-}
-__DEVICE__ double pow(double __base, int __iexp) {
-  return ::powi(__base, __iexp);
-}
-__DEVICE__ bool signbit(float __x) { return ::__signbitf(__x); }
-__DEVICE__ bool signbit(double __x) { return ::__signbitd(__x); }
-__DEVICE__ float sin(float __x) { return ::sinf(__x); }
-__DEVICE__ float sinh(float __x) { return ::sinhf(__x); }
-__DEVICE__ float sqrt(float __x) { return ::sqrtf(__x); }
-__DEVICE__ float tan(float __x) { return ::tanf(__x); }
-__DEVICE__ float tanh(float __x) { return ::tanhf(__x); }
-
-// Notably missing above is nexttoward.  We omit it because
-// libdevice doesn't provide an implementation, and we don't want to be in the
-// business of implementing tricky libm functions in this header.
-
-// Now we've defined everything we promised we'd define in
-// __clang_cuda_math_forward_declares.h.  We need to do two additional things to
-// fix up our math functions.
-//
-// 1) Define __device__ overloads for e.g. sin(int).  The CUDA headers define
-//    only sin(float) and sin(double), which means that e.g. sin(0) is
-//    ambiguous.
-//
-// 2) Pull the __device__ overloads of "foobarf" math functions into namespace
-//    std.  These are defined in the CUDA headers in the global namespace,
-//    independent of everything else we've done here.
-
-// We can't use std::enable_if, because we want to be pre-C++11 compatible.  But
-// we go ahead and unconditionally define functions that are only available when
-// compiling for C++11 to match the behavior of the CUDA headers.
-template<bool __B, class __T = void>
-struct __clang_cuda_enable_if {};
-
-template <class __T> struct __clang_cuda_enable_if<true, __T> {
-  typedef __T type;
-};
-
-// Defines an overload of __fn that accepts one integral argument, calls
-// __fn((double)x), and returns __retty.
-#define __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(__retty, __fn)                      \
-  template <typename __T>                                                      \
-  __DEVICE__                                                                   \
-      typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer,    \
-                                      __retty>::type                           \
-      __fn(__T __x) {                                                          \
-    return ::__fn((double)__x);                                                \
-  }
-
-// Defines an overload of __fn that accepts one two arithmetic arguments, calls
-// __fn((double)x, (double)y), and returns a double.
-//
-// Note this is different from OVERLOAD_1, which generates an overload that
-// accepts only *integral* arguments.
-#define __CUDA_CLANG_FN_INTEGER_OVERLOAD_2(__retty, __fn)                      \
-  template <typename __T1, typename __T2>                                      \
-  __DEVICE__ typename __clang_cuda_enable_if<                                  \
-      std::numeric_limits<__T1>::is_specialized &&                             \
-          std::numeric_limits<__T2>::is_specialized,                           \
-      __retty>::type                                                           \
-  __fn(__T1 __x, __T2 __y) {                                                   \
-    return __fn((double)__x, (double)__y);                                     \
-  }
-
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, acos)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, acosh)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, asin)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, asinh)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, atan)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, atan2);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, atanh)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, cbrt)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, ceil)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, copysign);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, cos)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, cosh)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, erf)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, erfc)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, exp)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, exp2)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, expm1)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, fabs)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fdim);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, floor)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fmax);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fmin);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fmod);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(int, fpclassify)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, hypot);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(int, ilogb)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isfinite)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isgreater);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isgreaterequal);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isinf);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isless);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, islessequal);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, islessgreater);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isnan);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isnormal)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isunordered);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, lgamma)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log10)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log1p)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log2)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, logb)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long long, llrint)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long long, llround)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long, lrint)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long, lround)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, nearbyint);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, nextafter);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, pow);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, remainder);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, rint);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, round);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, signbit)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, sin)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, sinh)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, sqrt)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, tan)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, tanh)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, tgamma)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, trunc);
-
-#undef __CUDA_CLANG_FN_INTEGER_OVERLOAD_1
-#undef __CUDA_CLANG_FN_INTEGER_OVERLOAD_2
-
-// Overloads for functions that don't match the patterns expected by
-// __CUDA_CLANG_FN_INTEGER_OVERLOAD_{1,2}.
-template <typename __T1, typename __T2, typename __T3>
-__DEVICE__ typename __clang_cuda_enable_if<
-    std::numeric_limits<__T1>::is_specialized &&
-        std::numeric_limits<__T2>::is_specialized &&
-        std::numeric_limits<__T3>::is_specialized,
-    double>::type
-fma(__T1 __x, __T2 __y, __T3 __z) {
-  return std::fma((double)__x, (double)__y, (double)__z);
-}
-
-template <typename __T>
-__DEVICE__ typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer,
-                                           double>::type
-frexp(__T __x, int *__exp) {
-  return std::frexp((double)__x, __exp);
-}
-
-template <typename __T>
-__DEVICE__ typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer,
-                                           double>::type
-ldexp(__T __x, int __exp) {
-  return std::ldexp((double)__x, __exp);
-}
-
-template <typename __T1, typename __T2>
-__DEVICE__ typename __clang_cuda_enable_if<
-    std::numeric_limits<__T1>::is_specialized &&
-        std::numeric_limits<__T2>::is_specialized,
-    double>::type
-remquo(__T1 __x, __T2 __y, int *__quo) {
-  return std::remquo((double)__x, (double)__y, __quo);
-}
-
-template <typename __T>
-__DEVICE__ typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer,
-                                           double>::type
-scalbln(__T __x, long __exp) {
-  return std::scalbln((double)__x, __exp);
-}
-
-template <typename __T>
-__DEVICE__ typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer,
-                                           double>::type
-scalbn(__T __x, int __exp) {
-  return std::scalbn((double)__x, __exp);
-}
-
-// We need to define these overloads in exactly the namespace our standard
-// library uses (including the right inline namespace), otherwise they won't be
-// picked up by other functions in the standard library (e.g. functions in
-// <complex>).  Thus the ugliness below.
-#ifdef _LIBCPP_BEGIN_NAMESPACE_STD
-_LIBCPP_BEGIN_NAMESPACE_STD
-#else
-namespace std {
-#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
-_GLIBCXX_BEGIN_NAMESPACE_VERSION
-#endif
-#endif
-
-// Pull the new overloads we defined above into namespace std.
-using ::acos;
-using ::acosh;
-using ::asin;
-using ::asinh;
-using ::atan;
-using ::atan2;
-using ::atanh;
-using ::cbrt;
-using ::ceil;
-using ::copysign;
-using ::cos;
-using ::cosh;
-using ::erf;
-using ::erfc;
-using ::exp;
-using ::exp2;
-using ::expm1;
-using ::fabs;
-using ::fdim;
-using ::floor;
-using ::fma;
-using ::fmax;
-using ::fmin;
-using ::fmod;
-using ::fpclassify;
-using ::frexp;
-using ::hypot;
-using ::ilogb;
-using ::isfinite;
-using ::isgreater;
-using ::isgreaterequal;
-using ::isless;
-using ::islessequal;
-using ::islessgreater;
-using ::isnormal;
-using ::isunordered;
-using ::ldexp;
-using ::lgamma;
-using ::llrint;
-using ::llround;
-using ::log;
-using ::log10;
-using ::log1p;
-using ::log2;
-using ::logb;
-using ::lrint;
-using ::lround;
-using ::nearbyint;
-using ::nextafter;
-using ::pow;
-using ::remainder;
-using ::remquo;
-using ::rint;
-using ::round;
-using ::scalbln;
-using ::scalbn;
-using ::signbit;
-using ::sin;
-using ::sinh;
-using ::sqrt;
-using ::tan;
-using ::tanh;
-using ::tgamma;
-using ::trunc;
-
-// Well this is fun: We need to pull these symbols in for libc++, but we can't
-// pull them in with libstdc++, because its ::isinf and ::isnan are different
-// than its std::isinf and std::isnan.
-#ifndef __GLIBCXX__
-using ::isinf;
-using ::isnan;
-#endif
-
-// Finally, pull the "foobarf" functions that CUDA defines in its headers into
-// namespace std.
-using ::acosf;
-using ::acoshf;
-using ::asinf;
-using ::asinhf;
-using ::atan2f;
-using ::atanf;
-using ::atanhf;
-using ::cbrtf;
-using ::ceilf;
-using ::copysignf;
-using ::cosf;
-using ::coshf;
-using ::erfcf;
-using ::erff;
-using ::exp2f;
-using ::expf;
-using ::expm1f;
-using ::fabsf;
-using ::fdimf;
-using ::floorf;
-using ::fmaf;
-using ::fmaxf;
-using ::fminf;
-using ::fmodf;
-using ::frexpf;
-using ::hypotf;
-using ::ilogbf;
-using ::ldexpf;
-using ::lgammaf;
-using ::llrintf;
-using ::llroundf;
-using ::log10f;
-using ::log1pf;
-using ::log2f;
-using ::logbf;
-using ::logf;
-using ::lrintf;
-using ::lroundf;
-using ::modff;
-using ::nearbyintf;
-using ::nextafterf;
-using ::powf;
-using ::remainderf;
-using ::remquof;
-using ::rintf;
-using ::roundf;
-// TODO: remove once variant is supported
-#ifndef _OPENMP
-using ::scalblnf;
-#endif
-using ::scalbnf;
-using ::sinf;
-using ::sinhf;
-using ::sqrtf;
-using ::tanf;
-using ::tanhf;
-using ::tgammaf;
-using ::truncf;
-
-#ifdef _LIBCPP_END_NAMESPACE_STD
-_LIBCPP_END_NAMESPACE_STD
-#else
-#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
-_GLIBCXX_END_NAMESPACE_VERSION
-#endif
-} // namespace std
-#endif
-
-#undef __NOEXCEPT
-#undef __DEVICE__
-
-#endif
diff --git a/linux-x86/lib64/clang/11.0.1/include/__clang_cuda_complex_builtins.h b/linux-x86/lib64/clang/11.0.1/include/__clang_cuda_complex_builtins.h
deleted file mode 100644
index 576a958..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/__clang_cuda_complex_builtins.h
+++ /dev/null
@@ -1,189 +0,0 @@
-/*===-- __clang_cuda_complex_builtins - CUDA impls of runtime complex fns ---===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __CLANG_CUDA_COMPLEX_BUILTINS
-#define __CLANG_CUDA_COMPLEX_BUILTINS
-
-// This header defines __muldc3, __mulsc3, __divdc3, and __divsc3.  These are
-// libgcc functions that clang assumes are available when compiling c99 complex
-// operations.  (These implementations come from libc++, and have been modified
-// to work with CUDA.)
-
-extern "C" inline __device__ double _Complex __muldc3(double __a, double __b,
-                                                      double __c, double __d) {
-  double __ac = __a * __c;
-  double __bd = __b * __d;
-  double __ad = __a * __d;
-  double __bc = __b * __c;
-  double _Complex z;
-  __real__(z) = __ac - __bd;
-  __imag__(z) = __ad + __bc;
-  if (std::isnan(__real__(z)) && std::isnan(__imag__(z))) {
-    int __recalc = 0;
-    if (std::isinf(__a) || std::isinf(__b)) {
-      __a = std::copysign(std::isinf(__a) ? 1 : 0, __a);
-      __b = std::copysign(std::isinf(__b) ? 1 : 0, __b);
-      if (std::isnan(__c))
-        __c = std::copysign(0, __c);
-      if (std::isnan(__d))
-        __d = std::copysign(0, __d);
-      __recalc = 1;
-    }
-    if (std::isinf(__c) || std::isinf(__d)) {
-      __c = std::copysign(std::isinf(__c) ? 1 : 0, __c);
-      __d = std::copysign(std::isinf(__d) ? 1 : 0, __d);
-      if (std::isnan(__a))
-        __a = std::copysign(0, __a);
-      if (std::isnan(__b))
-        __b = std::copysign(0, __b);
-      __recalc = 1;
-    }
-    if (!__recalc && (std::isinf(__ac) || std::isinf(__bd) ||
-                      std::isinf(__ad) || std::isinf(__bc))) {
-      if (std::isnan(__a))
-        __a = std::copysign(0, __a);
-      if (std::isnan(__b))
-        __b = std::copysign(0, __b);
-      if (std::isnan(__c))
-        __c = std::copysign(0, __c);
-      if (std::isnan(__d))
-        __d = std::copysign(0, __d);
-      __recalc = 1;
-    }
-    if (__recalc) {
-      // Can't use std::numeric_limits<double>::infinity() -- that doesn't have
-      // a device overload (and isn't constexpr before C++11, naturally).
-      __real__(z) = __builtin_huge_valf() * (__a * __c - __b * __d);
-      __imag__(z) = __builtin_huge_valf() * (__a * __d + __b * __c);
-    }
-  }
-  return z;
-}
-
-extern "C" inline __device__ float _Complex __mulsc3(float __a, float __b,
-                                                     float __c, float __d) {
-  float __ac = __a * __c;
-  float __bd = __b * __d;
-  float __ad = __a * __d;
-  float __bc = __b * __c;
-  float _Complex z;
-  __real__(z) = __ac - __bd;
-  __imag__(z) = __ad + __bc;
-  if (std::isnan(__real__(z)) && std::isnan(__imag__(z))) {
-    int __recalc = 0;
-    if (std::isinf(__a) || std::isinf(__b)) {
-      __a = std::copysign(std::isinf(__a) ? 1 : 0, __a);
-      __b = std::copysign(std::isinf(__b) ? 1 : 0, __b);
-      if (std::isnan(__c))
-        __c = std::copysign(0, __c);
-      if (std::isnan(__d))
-        __d = std::copysign(0, __d);
-      __recalc = 1;
-    }
-    if (std::isinf(__c) || std::isinf(__d)) {
-      __c = std::copysign(std::isinf(__c) ? 1 : 0, __c);
-      __d = std::copysign(std::isinf(__d) ? 1 : 0, __d);
-      if (std::isnan(__a))
-        __a = std::copysign(0, __a);
-      if (std::isnan(__b))
-        __b = std::copysign(0, __b);
-      __recalc = 1;
-    }
-    if (!__recalc && (std::isinf(__ac) || std::isinf(__bd) ||
-                      std::isinf(__ad) || std::isinf(__bc))) {
-      if (std::isnan(__a))
-        __a = std::copysign(0, __a);
-      if (std::isnan(__b))
-        __b = std::copysign(0, __b);
-      if (std::isnan(__c))
-        __c = std::copysign(0, __c);
-      if (std::isnan(__d))
-        __d = std::copysign(0, __d);
-      __recalc = 1;
-    }
-    if (__recalc) {
-      __real__(z) = __builtin_huge_valf() * (__a * __c - __b * __d);
-      __imag__(z) = __builtin_huge_valf() * (__a * __d + __b * __c);
-    }
-  }
-  return z;
-}
-
-extern "C" inline __device__ double _Complex __divdc3(double __a, double __b,
-                                                      double __c, double __d) {
-  int __ilogbw = 0;
-  // Can't use std::max, because that's defined in <algorithm>, and we don't
-  // want to pull that in for every compile.  The CUDA headers define
-  // ::max(float, float) and ::max(double, double), which is sufficient for us.
-  double __logbw = std::logb(max(std::abs(__c), std::abs(__d)));
-  if (std::isfinite(__logbw)) {
-    __ilogbw = (int)__logbw;
-    __c = std::scalbn(__c, -__ilogbw);
-    __d = std::scalbn(__d, -__ilogbw);
-  }
-  double __denom = __c * __c + __d * __d;
-  double _Complex z;
-  __real__(z) = std::scalbn((__a * __c + __b * __d) / __denom, -__ilogbw);
-  __imag__(z) = std::scalbn((__b * __c - __a * __d) / __denom, -__ilogbw);
-  if (std::isnan(__real__(z)) && std::isnan(__imag__(z))) {
-    if ((__denom == 0.0) && (!std::isnan(__a) || !std::isnan(__b))) {
-      __real__(z) = std::copysign(__builtin_huge_valf(), __c) * __a;
-      __imag__(z) = std::copysign(__builtin_huge_valf(), __c) * __b;
-    } else if ((std::isinf(__a) || std::isinf(__b)) && std::isfinite(__c) &&
-               std::isfinite(__d)) {
-      __a = std::copysign(std::isinf(__a) ? 1.0 : 0.0, __a);
-      __b = std::copysign(std::isinf(__b) ? 1.0 : 0.0, __b);
-      __real__(z) = __builtin_huge_valf() * (__a * __c + __b * __d);
-      __imag__(z) = __builtin_huge_valf() * (__b * __c - __a * __d);
-    } else if (std::isinf(__logbw) && __logbw > 0.0 && std::isfinite(__a) &&
-               std::isfinite(__b)) {
-      __c = std::copysign(std::isinf(__c) ? 1.0 : 0.0, __c);
-      __d = std::copysign(std::isinf(__d) ? 1.0 : 0.0, __d);
-      __real__(z) = 0.0 * (__a * __c + __b * __d);
-      __imag__(z) = 0.0 * (__b * __c - __a * __d);
-    }
-  }
-  return z;
-}
-
-extern "C" inline __device__ float _Complex __divsc3(float __a, float __b,
-                                                     float __c, float __d) {
-  int __ilogbw = 0;
-  float __logbw = std::logb(max(std::abs(__c), std::abs(__d)));
-  if (std::isfinite(__logbw)) {
-    __ilogbw = (int)__logbw;
-    __c = std::scalbn(__c, -__ilogbw);
-    __d = std::scalbn(__d, -__ilogbw);
-  }
-  float __denom = __c * __c + __d * __d;
-  float _Complex z;
-  __real__(z) = std::scalbn((__a * __c + __b * __d) / __denom, -__ilogbw);
-  __imag__(z) = std::scalbn((__b * __c - __a * __d) / __denom, -__ilogbw);
-  if (std::isnan(__real__(z)) && std::isnan(__imag__(z))) {
-    if ((__denom == 0) && (!std::isnan(__a) || !std::isnan(__b))) {
-      __real__(z) = std::copysign(__builtin_huge_valf(), __c) * __a;
-      __imag__(z) = std::copysign(__builtin_huge_valf(), __c) * __b;
-    } else if ((std::isinf(__a) || std::isinf(__b)) && std::isfinite(__c) &&
-               std::isfinite(__d)) {
-      __a = std::copysign(std::isinf(__a) ? 1 : 0, __a);
-      __b = std::copysign(std::isinf(__b) ? 1 : 0, __b);
-      __real__(z) = __builtin_huge_valf() * (__a * __c + __b * __d);
-      __imag__(z) = __builtin_huge_valf() * (__b * __c - __a * __d);
-    } else if (std::isinf(__logbw) && __logbw > 0 && std::isfinite(__a) &&
-               std::isfinite(__b)) {
-      __c = std::copysign(std::isinf(__c) ? 1 : 0, __c);
-      __d = std::copysign(std::isinf(__d) ? 1 : 0, __d);
-      __real__(z) = 0 * (__a * __c + __b * __d);
-      __imag__(z) = 0 * (__b * __c - __a * __d);
-    }
-  }
-  return z;
-}
-
-#endif // __CLANG_CUDA_COMPLEX_BUILTINS
diff --git a/linux-x86/lib64/clang/11.0.1/include/__clang_cuda_device_functions.h b/linux-x86/lib64/clang/11.0.1/include/__clang_cuda_device_functions.h
deleted file mode 100644
index 50ad674..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/__clang_cuda_device_functions.h
+++ /dev/null
@@ -1,1793 +0,0 @@
-/*===---- __clang_cuda_device_functions.h - CUDA runtime support -----------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __CLANG_CUDA_DEVICE_FUNCTIONS_H__
-#define __CLANG_CUDA_DEVICE_FUNCTIONS_H__
-
-#ifndef _OPENMP
-#if CUDA_VERSION < 9000
-#error This file is intended to be used with CUDA-9+ only.
-#endif
-#endif
-
-// __DEVICE__ is a helper macro with common set of attributes for the wrappers
-// we implement in this file. We need static in order to avoid emitting unused
-// functions and __forceinline__ helps inlining these wrappers at -O1.
-#pragma push_macro("__DEVICE__")
-#ifdef _OPENMP
-#define __DEVICE__ static __attribute__((always_inline))
-#else
-#define __DEVICE__ static __device__ __forceinline__
-#endif
-
-// libdevice provides fast low precision and slow full-recision implementations
-// for some functions. Which one gets selected depends on
-// __CLANG_CUDA_APPROX_TRANSCENDENTALS__ which gets defined by clang if
-// -ffast-math or -fcuda-approx-transcendentals are in effect.
-#pragma push_macro("__FAST_OR_SLOW")
-#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__)
-#define __FAST_OR_SLOW(fast, slow) fast
-#else
-#define __FAST_OR_SLOW(fast, slow) slow
-#endif
-
-// For C++ 17 we need to include noexcept attribute to be compatible
-// with the header-defined version. This may be removed once
-// variant is supported.
-#if defined(_OPENMP) && defined(__cplusplus) && __cplusplus >= 201703L
-#define __NOEXCEPT noexcept
-#else
-#define __NOEXCEPT
-#endif
-
-__DEVICE__ int __all(int __a) { return __nvvm_vote_all(__a); }
-__DEVICE__ int __any(int __a) { return __nvvm_vote_any(__a); }
-__DEVICE__ unsigned int __ballot(int __a) { return __nvvm_vote_ballot(__a); }
-__DEVICE__ unsigned int __brev(unsigned int __a) { return __nv_brev(__a); }
-__DEVICE__ unsigned long long __brevll(unsigned long long __a) {
-  return __nv_brevll(__a);
-}
-#if defined(__cplusplus)
-__DEVICE__ void __brkpt() { asm volatile("brkpt;"); }
-__DEVICE__ void __brkpt(int __a) { __brkpt(); }
-#else
-__DEVICE__ void __attribute__((overloadable)) __brkpt(void) { asm volatile("brkpt;"); }
-__DEVICE__ void __attribute__((overloadable)) __brkpt(int __a) { __brkpt(); }
-#endif
-__DEVICE__ unsigned int __byte_perm(unsigned int __a, unsigned int __b,
-                                    unsigned int __c) {
-  return __nv_byte_perm(__a, __b, __c);
-}
-__DEVICE__ int __clz(int __a) { return __nv_clz(__a); }
-__DEVICE__ int __clzll(long long __a) { return __nv_clzll(__a); }
-__DEVICE__ float __cosf(float __a) { return __nv_fast_cosf(__a); }
-__DEVICE__ double __dAtomicAdd(double *__p, double __v) {
-  return __nvvm_atom_add_gen_d(__p, __v);
-}
-__DEVICE__ double __dAtomicAdd_block(double *__p, double __v) {
-  return __nvvm_atom_cta_add_gen_d(__p, __v);
-}
-__DEVICE__ double __dAtomicAdd_system(double *__p, double __v) {
-  return __nvvm_atom_sys_add_gen_d(__p, __v);
-}
-__DEVICE__ double __dadd_rd(double __a, double __b) {
-  return __nv_dadd_rd(__a, __b);
-}
-__DEVICE__ double __dadd_rn(double __a, double __b) {
-  return __nv_dadd_rn(__a, __b);
-}
-__DEVICE__ double __dadd_ru(double __a, double __b) {
-  return __nv_dadd_ru(__a, __b);
-}
-__DEVICE__ double __dadd_rz(double __a, double __b) {
-  return __nv_dadd_rz(__a, __b);
-}
-__DEVICE__ double __ddiv_rd(double __a, double __b) {
-  return __nv_ddiv_rd(__a, __b);
-}
-__DEVICE__ double __ddiv_rn(double __a, double __b) {
-  return __nv_ddiv_rn(__a, __b);
-}
-__DEVICE__ double __ddiv_ru(double __a, double __b) {
-  return __nv_ddiv_ru(__a, __b);
-}
-__DEVICE__ double __ddiv_rz(double __a, double __b) {
-  return __nv_ddiv_rz(__a, __b);
-}
-__DEVICE__ double __dmul_rd(double __a, double __b) {
-  return __nv_dmul_rd(__a, __b);
-}
-__DEVICE__ double __dmul_rn(double __a, double __b) {
-  return __nv_dmul_rn(__a, __b);
-}
-__DEVICE__ double __dmul_ru(double __a, double __b) {
-  return __nv_dmul_ru(__a, __b);
-}
-__DEVICE__ double __dmul_rz(double __a, double __b) {
-  return __nv_dmul_rz(__a, __b);
-}
-__DEVICE__ float __double2float_rd(double __a) {
-  return __nv_double2float_rd(__a);
-}
-__DEVICE__ float __double2float_rn(double __a) {
-  return __nv_double2float_rn(__a);
-}
-__DEVICE__ float __double2float_ru(double __a) {
-  return __nv_double2float_ru(__a);
-}
-__DEVICE__ float __double2float_rz(double __a) {
-  return __nv_double2float_rz(__a);
-}
-__DEVICE__ int __double2hiint(double __a) { return __nv_double2hiint(__a); }
-__DEVICE__ int __double2int_rd(double __a) { return __nv_double2int_rd(__a); }
-__DEVICE__ int __double2int_rn(double __a) { return __nv_double2int_rn(__a); }
-__DEVICE__ int __double2int_ru(double __a) { return __nv_double2int_ru(__a); }
-__DEVICE__ int __double2int_rz(double __a) { return __nv_double2int_rz(__a); }
-__DEVICE__ long long __double2ll_rd(double __a) {
-  return __nv_double2ll_rd(__a);
-}
-__DEVICE__ long long __double2ll_rn(double __a) {
-  return __nv_double2ll_rn(__a);
-}
-__DEVICE__ long long __double2ll_ru(double __a) {
-  return __nv_double2ll_ru(__a);
-}
-__DEVICE__ long long __double2ll_rz(double __a) {
-  return __nv_double2ll_rz(__a);
-}
-__DEVICE__ int __double2loint(double __a) { return __nv_double2loint(__a); }
-__DEVICE__ unsigned int __double2uint_rd(double __a) {
-  return __nv_double2uint_rd(__a);
-}
-__DEVICE__ unsigned int __double2uint_rn(double __a) {
-  return __nv_double2uint_rn(__a);
-}
-__DEVICE__ unsigned int __double2uint_ru(double __a) {
-  return __nv_double2uint_ru(__a);
-}
-__DEVICE__ unsigned int __double2uint_rz(double __a) {
-  return __nv_double2uint_rz(__a);
-}
-__DEVICE__ unsigned long long __double2ull_rd(double __a) {
-  return __nv_double2ull_rd(__a);
-}
-__DEVICE__ unsigned long long __double2ull_rn(double __a) {
-  return __nv_double2ull_rn(__a);
-}
-__DEVICE__ unsigned long long __double2ull_ru(double __a) {
-  return __nv_double2ull_ru(__a);
-}
-__DEVICE__ unsigned long long __double2ull_rz(double __a) {
-  return __nv_double2ull_rz(__a);
-}
-__DEVICE__ long long __double_as_longlong(double __a) {
-  return __nv_double_as_longlong(__a);
-}
-__DEVICE__ double __drcp_rd(double __a) { return __nv_drcp_rd(__a); }
-__DEVICE__ double __drcp_rn(double __a) { return __nv_drcp_rn(__a); }
-__DEVICE__ double __drcp_ru(double __a) { return __nv_drcp_ru(__a); }
-__DEVICE__ double __drcp_rz(double __a) { return __nv_drcp_rz(__a); }
-__DEVICE__ double __dsqrt_rd(double __a) { return __nv_dsqrt_rd(__a); }
-__DEVICE__ double __dsqrt_rn(double __a) { return __nv_dsqrt_rn(__a); }
-__DEVICE__ double __dsqrt_ru(double __a) { return __nv_dsqrt_ru(__a); }
-__DEVICE__ double __dsqrt_rz(double __a) { return __nv_dsqrt_rz(__a); }
-__DEVICE__ double __dsub_rd(double __a, double __b) {
-  return __nv_dsub_rd(__a, __b);
-}
-__DEVICE__ double __dsub_rn(double __a, double __b) {
-  return __nv_dsub_rn(__a, __b);
-}
-__DEVICE__ double __dsub_ru(double __a, double __b) {
-  return __nv_dsub_ru(__a, __b);
-}
-__DEVICE__ double __dsub_rz(double __a, double __b) {
-  return __nv_dsub_rz(__a, __b);
-}
-__DEVICE__ float __exp10f(float __a) { return __nv_fast_exp10f(__a); }
-__DEVICE__ float __expf(float __a) { return __nv_fast_expf(__a); }
-__DEVICE__ float __fAtomicAdd(float *__p, float __v) {
-  return __nvvm_atom_add_gen_f(__p, __v);
-}
-__DEVICE__ float __fAtomicAdd_block(float *__p, float __v) {
-  return __nvvm_atom_cta_add_gen_f(__p, __v);
-}
-__DEVICE__ float __fAtomicAdd_system(float *__p, float __v) {
-  return __nvvm_atom_sys_add_gen_f(__p, __v);
-}
-__DEVICE__ float __fAtomicExch(float *__p, float __v) {
-  return __nv_int_as_float(
-      __nvvm_atom_xchg_gen_i((int *)__p, __nv_float_as_int(__v)));
-}
-__DEVICE__ float __fAtomicExch_block(float *__p, float __v) {
-  return __nv_int_as_float(
-      __nvvm_atom_cta_xchg_gen_i((int *)__p, __nv_float_as_int(__v)));
-}
-__DEVICE__ float __fAtomicExch_system(float *__p, float __v) {
-  return __nv_int_as_float(
-      __nvvm_atom_sys_xchg_gen_i((int *)__p, __nv_float_as_int(__v)));
-}
-__DEVICE__ float __fadd_rd(float __a, float __b) {
-  return __nv_fadd_rd(__a, __b);
-}
-__DEVICE__ float __fadd_rn(float __a, float __b) {
-  return __nv_fadd_rn(__a, __b);
-}
-__DEVICE__ float __fadd_ru(float __a, float __b) {
-  return __nv_fadd_ru(__a, __b);
-}
-__DEVICE__ float __fadd_rz(float __a, float __b) {
-  return __nv_fadd_rz(__a, __b);
-}
-__DEVICE__ float __fdiv_rd(float __a, float __b) {
-  return __nv_fdiv_rd(__a, __b);
-}
-__DEVICE__ float __fdiv_rn(float __a, float __b) {
-  return __nv_fdiv_rn(__a, __b);
-}
-__DEVICE__ float __fdiv_ru(float __a, float __b) {
-  return __nv_fdiv_ru(__a, __b);
-}
-__DEVICE__ float __fdiv_rz(float __a, float __b) {
-  return __nv_fdiv_rz(__a, __b);
-}
-__DEVICE__ float __fdividef(float __a, float __b) {
-  return __nv_fast_fdividef(__a, __b);
-}
-__DEVICE__ int __ffs(int __a) { return __nv_ffs(__a); }
-__DEVICE__ int __ffsll(long long __a) { return __nv_ffsll(__a); }
-__DEVICE__ int __finite(double __a) { return __nv_isfinited(__a); }
-__DEVICE__ int __finitef(float __a) { return __nv_finitef(__a); }
-#ifdef _MSC_VER
-__DEVICE__ int __finitel(long double __a);
-#endif
-__DEVICE__ int __float2int_rd(float __a) { return __nv_float2int_rd(__a); }
-__DEVICE__ int __float2int_rn(float __a) { return __nv_float2int_rn(__a); }
-__DEVICE__ int __float2int_ru(float __a) { return __nv_float2int_ru(__a); }
-__DEVICE__ int __float2int_rz(float __a) { return __nv_float2int_rz(__a); }
-__DEVICE__ long long __float2ll_rd(float __a) { return __nv_float2ll_rd(__a); }
-__DEVICE__ long long __float2ll_rn(float __a) { return __nv_float2ll_rn(__a); }
-__DEVICE__ long long __float2ll_ru(float __a) { return __nv_float2ll_ru(__a); }
-__DEVICE__ long long __float2ll_rz(float __a) { return __nv_float2ll_rz(__a); }
-__DEVICE__ unsigned int __float2uint_rd(float __a) {
-  return __nv_float2uint_rd(__a);
-}
-__DEVICE__ unsigned int __float2uint_rn(float __a) {
-  return __nv_float2uint_rn(__a);
-}
-__DEVICE__ unsigned int __float2uint_ru(float __a) {
-  return __nv_float2uint_ru(__a);
-}
-__DEVICE__ unsigned int __float2uint_rz(float __a) {
-  return __nv_float2uint_rz(__a);
-}
-__DEVICE__ unsigned long long __float2ull_rd(float __a) {
-  return __nv_float2ull_rd(__a);
-}
-__DEVICE__ unsigned long long __float2ull_rn(float __a) {
-  return __nv_float2ull_rn(__a);
-}
-__DEVICE__ unsigned long long __float2ull_ru(float __a) {
-  return __nv_float2ull_ru(__a);
-}
-__DEVICE__ unsigned long long __float2ull_rz(float __a) {
-  return __nv_float2ull_rz(__a);
-}
-__DEVICE__ int __float_as_int(float __a) { return __nv_float_as_int(__a); }
-__DEVICE__ unsigned int __float_as_uint(float __a) {
-  return __nv_float_as_uint(__a);
-}
-__DEVICE__ double __fma_rd(double __a, double __b, double __c) {
-  return __nv_fma_rd(__a, __b, __c);
-}
-__DEVICE__ double __fma_rn(double __a, double __b, double __c) {
-  return __nv_fma_rn(__a, __b, __c);
-}
-__DEVICE__ double __fma_ru(double __a, double __b, double __c) {
-  return __nv_fma_ru(__a, __b, __c);
-}
-__DEVICE__ double __fma_rz(double __a, double __b, double __c) {
-  return __nv_fma_rz(__a, __b, __c);
-}
-__DEVICE__ float __fmaf_ieee_rd(float __a, float __b, float __c) {
-  return __nv_fmaf_ieee_rd(__a, __b, __c);
-}
-__DEVICE__ float __fmaf_ieee_rn(float __a, float __b, float __c) {
-  return __nv_fmaf_ieee_rn(__a, __b, __c);
-}
-__DEVICE__ float __fmaf_ieee_ru(float __a, float __b, float __c) {
-  return __nv_fmaf_ieee_ru(__a, __b, __c);
-}
-__DEVICE__ float __fmaf_ieee_rz(float __a, float __b, float __c) {
-  return __nv_fmaf_ieee_rz(__a, __b, __c);
-}
-__DEVICE__ float __fmaf_rd(float __a, float __b, float __c) {
-  return __nv_fmaf_rd(__a, __b, __c);
-}
-__DEVICE__ float __fmaf_rn(float __a, float __b, float __c) {
-  return __nv_fmaf_rn(__a, __b, __c);
-}
-__DEVICE__ float __fmaf_ru(float __a, float __b, float __c) {
-  return __nv_fmaf_ru(__a, __b, __c);
-}
-__DEVICE__ float __fmaf_rz(float __a, float __b, float __c) {
-  return __nv_fmaf_rz(__a, __b, __c);
-}
-__DEVICE__ float __fmul_rd(float __a, float __b) {
-  return __nv_fmul_rd(__a, __b);
-}
-__DEVICE__ float __fmul_rn(float __a, float __b) {
-  return __nv_fmul_rn(__a, __b);
-}
-__DEVICE__ float __fmul_ru(float __a, float __b) {
-  return __nv_fmul_ru(__a, __b);
-}
-__DEVICE__ float __fmul_rz(float __a, float __b) {
-  return __nv_fmul_rz(__a, __b);
-}
-__DEVICE__ float __frcp_rd(float __a) { return __nv_frcp_rd(__a); }
-__DEVICE__ float __frcp_rn(float __a) { return __nv_frcp_rn(__a); }
-__DEVICE__ float __frcp_ru(float __a) { return __nv_frcp_ru(__a); }
-__DEVICE__ float __frcp_rz(float __a) { return __nv_frcp_rz(__a); }
-__DEVICE__ float __frsqrt_rn(float __a) { return __nv_frsqrt_rn(__a); }
-__DEVICE__ float __fsqrt_rd(float __a) { return __nv_fsqrt_rd(__a); }
-__DEVICE__ float __fsqrt_rn(float __a) { return __nv_fsqrt_rn(__a); }
-__DEVICE__ float __fsqrt_ru(float __a) { return __nv_fsqrt_ru(__a); }
-__DEVICE__ float __fsqrt_rz(float __a) { return __nv_fsqrt_rz(__a); }
-__DEVICE__ float __fsub_rd(float __a, float __b) {
-  return __nv_fsub_rd(__a, __b);
-}
-__DEVICE__ float __fsub_rn(float __a, float __b) {
-  return __nv_fsub_rn(__a, __b);
-}
-__DEVICE__ float __fsub_ru(float __a, float __b) {
-  return __nv_fsub_ru(__a, __b);
-}
-__DEVICE__ float __fsub_rz(float __a, float __b) {
-  return __nv_fsub_rz(__a, __b);
-}
-__DEVICE__ int __hadd(int __a, int __b) { return __nv_hadd(__a, __b); }
-__DEVICE__ double __hiloint2double(int __a, int __b) {
-  return __nv_hiloint2double(__a, __b);
-}
-__DEVICE__ int __iAtomicAdd(int *__p, int __v) {
-  return __nvvm_atom_add_gen_i(__p, __v);
-}
-__DEVICE__ int __iAtomicAdd_block(int *__p, int __v) {
-  __nvvm_atom_cta_add_gen_i(__p, __v);
-}
-__DEVICE__ int __iAtomicAdd_system(int *__p, int __v) {
-  __nvvm_atom_sys_add_gen_i(__p, __v);
-}
-__DEVICE__ int __iAtomicAnd(int *__p, int __v) {
-  return __nvvm_atom_and_gen_i(__p, __v);
-}
-__DEVICE__ int __iAtomicAnd_block(int *__p, int __v) {
-  return __nvvm_atom_cta_and_gen_i(__p, __v);
-}
-__DEVICE__ int __iAtomicAnd_system(int *__p, int __v) {
-  return __nvvm_atom_sys_and_gen_i(__p, __v);
-}
-__DEVICE__ int __iAtomicCAS(int *__p, int __cmp, int __v) {
-  return __nvvm_atom_cas_gen_i(__p, __cmp, __v);
-}
-__DEVICE__ int __iAtomicCAS_block(int *__p, int __cmp, int __v) {
-  return __nvvm_atom_cta_cas_gen_i(__p, __cmp, __v);
-}
-__DEVICE__ int __iAtomicCAS_system(int *__p, int __cmp, int __v) {
-  return __nvvm_atom_sys_cas_gen_i(__p, __cmp, __v);
-}
-__DEVICE__ int __iAtomicExch(int *__p, int __v) {
-  return __nvvm_atom_xchg_gen_i(__p, __v);
-}
-__DEVICE__ int __iAtomicExch_block(int *__p, int __v) {
-  return __nvvm_atom_cta_xchg_gen_i(__p, __v);
-}
-__DEVICE__ int __iAtomicExch_system(int *__p, int __v) {
-  return __nvvm_atom_sys_xchg_gen_i(__p, __v);
-}
-__DEVICE__ int __iAtomicMax(int *__p, int __v) {
-  return __nvvm_atom_max_gen_i(__p, __v);
-}
-__DEVICE__ int __iAtomicMax_block(int *__p, int __v) {
-  return __nvvm_atom_cta_max_gen_i(__p, __v);
-}
-__DEVICE__ int __iAtomicMax_system(int *__p, int __v) {
-  return __nvvm_atom_sys_max_gen_i(__p, __v);
-}
-__DEVICE__ int __iAtomicMin(int *__p, int __v) {
-  return __nvvm_atom_min_gen_i(__p, __v);
-}
-__DEVICE__ int __iAtomicMin_block(int *__p, int __v) {
-  return __nvvm_atom_cta_min_gen_i(__p, __v);
-}
-__DEVICE__ int __iAtomicMin_system(int *__p, int __v) {
-  return __nvvm_atom_sys_min_gen_i(__p, __v);
-}
-__DEVICE__ int __iAtomicOr(int *__p, int __v) {
-  return __nvvm_atom_or_gen_i(__p, __v);
-}
-__DEVICE__ int __iAtomicOr_block(int *__p, int __v) {
-  return __nvvm_atom_cta_or_gen_i(__p, __v);
-}
-__DEVICE__ int __iAtomicOr_system(int *__p, int __v) {
-  return __nvvm_atom_sys_or_gen_i(__p, __v);
-}
-__DEVICE__ int __iAtomicXor(int *__p, int __v) {
-  return __nvvm_atom_xor_gen_i(__p, __v);
-}
-__DEVICE__ int __iAtomicXor_block(int *__p, int __v) {
-  return __nvvm_atom_cta_xor_gen_i(__p, __v);
-}
-__DEVICE__ int __iAtomicXor_system(int *__p, int __v) {
-  return __nvvm_atom_sys_xor_gen_i(__p, __v);
-}
-__DEVICE__ long long __illAtomicMax(long long *__p, long long __v) {
-  return __nvvm_atom_max_gen_ll(__p, __v);
-}
-__DEVICE__ long long __illAtomicMax_block(long long *__p, long long __v) {
-  return __nvvm_atom_cta_max_gen_ll(__p, __v);
-}
-__DEVICE__ long long __illAtomicMax_system(long long *__p, long long __v) {
-  return __nvvm_atom_sys_max_gen_ll(__p, __v);
-}
-__DEVICE__ long long __illAtomicMin(long long *__p, long long __v) {
-  return __nvvm_atom_min_gen_ll(__p, __v);
-}
-__DEVICE__ long long __illAtomicMin_block(long long *__p, long long __v) {
-  return __nvvm_atom_cta_min_gen_ll(__p, __v);
-}
-__DEVICE__ long long __illAtomicMin_system(long long *__p, long long __v) {
-  return __nvvm_atom_sys_min_gen_ll(__p, __v);
-}
-__DEVICE__ double __int2double_rn(int __a) { return __nv_int2double_rn(__a); }
-__DEVICE__ float __int2float_rd(int __a) { return __nv_int2float_rd(__a); }
-__DEVICE__ float __int2float_rn(int __a) { return __nv_int2float_rn(__a); }
-__DEVICE__ float __int2float_ru(int __a) { return __nv_int2float_ru(__a); }
-__DEVICE__ float __int2float_rz(int __a) { return __nv_int2float_rz(__a); }
-__DEVICE__ float __int_as_float(int __a) { return __nv_int_as_float(__a); }
-__DEVICE__ int __isfinited(double __a) { return __nv_isfinited(__a); }
-__DEVICE__ int __isinf(double __a) { return __nv_isinfd(__a); }
-__DEVICE__ int __isinff(float __a) { return __nv_isinff(__a); }
-#ifdef _MSC_VER
-__DEVICE__ int __isinfl(long double __a);
-#endif
-__DEVICE__ int __isnan(double __a) { return __nv_isnand(__a); }
-__DEVICE__ int __isnanf(float __a) { return __nv_isnanf(__a); }
-#ifdef _MSC_VER
-__DEVICE__ int __isnanl(long double __a);
-#endif
-__DEVICE__ double __ll2double_rd(long long __a) {
-  return __nv_ll2double_rd(__a);
-}
-__DEVICE__ double __ll2double_rn(long long __a) {
-  return __nv_ll2double_rn(__a);
-}
-__DEVICE__ double __ll2double_ru(long long __a) {
-  return __nv_ll2double_ru(__a);
-}
-__DEVICE__ double __ll2double_rz(long long __a) {
-  return __nv_ll2double_rz(__a);
-}
-__DEVICE__ float __ll2float_rd(long long __a) { return __nv_ll2float_rd(__a); }
-__DEVICE__ float __ll2float_rn(long long __a) { return __nv_ll2float_rn(__a); }
-__DEVICE__ float __ll2float_ru(long long __a) { return __nv_ll2float_ru(__a); }
-__DEVICE__ float __ll2float_rz(long long __a) { return __nv_ll2float_rz(__a); }
-__DEVICE__ long long __llAtomicAnd(long long *__p, long long __v) {
-  return __nvvm_atom_and_gen_ll(__p, __v);
-}
-__DEVICE__ long long __llAtomicAnd_block(long long *__p, long long __v) {
-  return __nvvm_atom_cta_and_gen_ll(__p, __v);
-}
-__DEVICE__ long long __llAtomicAnd_system(long long *__p, long long __v) {
-  return __nvvm_atom_sys_and_gen_ll(__p, __v);
-}
-__DEVICE__ long long __llAtomicOr(long long *__p, long long __v) {
-  return __nvvm_atom_or_gen_ll(__p, __v);
-}
-__DEVICE__ long long __llAtomicOr_block(long long *__p, long long __v) {
-  return __nvvm_atom_cta_or_gen_ll(__p, __v);
-}
-__DEVICE__ long long __llAtomicOr_system(long long *__p, long long __v) {
-  return __nvvm_atom_sys_or_gen_ll(__p, __v);
-}
-__DEVICE__ long long __llAtomicXor(long long *__p, long long __v) {
-  return __nvvm_atom_xor_gen_ll(__p, __v);
-}
-__DEVICE__ long long __llAtomicXor_block(long long *__p, long long __v) {
-  return __nvvm_atom_cta_xor_gen_ll(__p, __v);
-}
-__DEVICE__ long long __llAtomicXor_system(long long *__p, long long __v) {
-  return __nvvm_atom_sys_xor_gen_ll(__p, __v);
-}
-__DEVICE__ float __log10f(float __a) { return __nv_fast_log10f(__a); }
-__DEVICE__ float __log2f(float __a) { return __nv_fast_log2f(__a); }
-__DEVICE__ float __logf(float __a) { return __nv_fast_logf(__a); }
-__DEVICE__ double __longlong_as_double(long long __a) {
-  return __nv_longlong_as_double(__a);
-}
-__DEVICE__ int __mul24(int __a, int __b) { return __nv_mul24(__a, __b); }
-__DEVICE__ long long __mul64hi(long long __a, long long __b) {
-  return __nv_mul64hi(__a, __b);
-}
-__DEVICE__ int __mulhi(int __a, int __b) { return __nv_mulhi(__a, __b); }
-__DEVICE__ unsigned int __pm0(void) { return __nvvm_read_ptx_sreg_pm0(); }
-__DEVICE__ unsigned int __pm1(void) { return __nvvm_read_ptx_sreg_pm1(); }
-__DEVICE__ unsigned int __pm2(void) { return __nvvm_read_ptx_sreg_pm2(); }
-__DEVICE__ unsigned int __pm3(void) { return __nvvm_read_ptx_sreg_pm3(); }
-__DEVICE__ int __popc(int __a) { return __nv_popc(__a); }
-__DEVICE__ int __popcll(long long __a) { return __nv_popcll(__a); }
-__DEVICE__ float __powf(float __a, float __b) {
-  return __nv_fast_powf(__a, __b);
-}
-
-// Parameter must have a known integer value.
-#define __prof_trigger(__a) asm __volatile__("pmevent \t%0;" ::"i"(__a))
-__DEVICE__ int __rhadd(int __a, int __b) { return __nv_rhadd(__a, __b); }
-__DEVICE__ unsigned int __sad(int __a, int __b, unsigned int __c) {
-  return __nv_sad(__a, __b, __c);
-}
-__DEVICE__ float __saturatef(float __a) { return __nv_saturatef(__a); }
-__DEVICE__ int __signbitd(double __a) { return __nv_signbitd(__a); }
-__DEVICE__ int __signbitf(float __a) { return __nv_signbitf(__a); }
-__DEVICE__ void __sincosf(float __a, float *__s, float *__c) {
-  return __nv_fast_sincosf(__a, __s, __c);
-}
-__DEVICE__ float __sinf(float __a) { return __nv_fast_sinf(__a); }
-__DEVICE__ int __syncthreads_and(int __a) { return __nvvm_bar0_and(__a); }
-__DEVICE__ int __syncthreads_count(int __a) { return __nvvm_bar0_popc(__a); }
-__DEVICE__ int __syncthreads_or(int __a) { return __nvvm_bar0_or(__a); }
-__DEVICE__ float __tanf(float __a) { return __nv_fast_tanf(__a); }
-__DEVICE__ void __threadfence(void) { __nvvm_membar_gl(); }
-__DEVICE__ void __threadfence_block(void) { __nvvm_membar_cta(); };
-__DEVICE__ void __threadfence_system(void) { __nvvm_membar_sys(); };
-__DEVICE__ void __trap(void) { asm volatile("trap;"); }
-__DEVICE__ unsigned int __uAtomicAdd(unsigned int *__p, unsigned int __v) {
-  return __nvvm_atom_add_gen_i((int *)__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicAdd_block(unsigned int *__p,
-                                           unsigned int __v) {
-  return __nvvm_atom_cta_add_gen_i((int *)__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicAdd_system(unsigned int *__p,
-                                            unsigned int __v) {
-  return __nvvm_atom_sys_add_gen_i((int *)__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicAnd(unsigned int *__p, unsigned int __v) {
-  return __nvvm_atom_and_gen_i((int *)__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicAnd_block(unsigned int *__p,
-                                           unsigned int __v) {
-  return __nvvm_atom_cta_and_gen_i((int *)__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicAnd_system(unsigned int *__p,
-                                            unsigned int __v) {
-  return __nvvm_atom_sys_and_gen_i((int *)__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicCAS(unsigned int *__p, unsigned int __cmp,
-                                     unsigned int __v) {
-  return __nvvm_atom_cas_gen_i((int *)__p, __cmp, __v);
-}
-__DEVICE__ unsigned int
-__uAtomicCAS_block(unsigned int *__p, unsigned int __cmp, unsigned int __v) {
-  return __nvvm_atom_cta_cas_gen_i((int *)__p, __cmp, __v);
-}
-__DEVICE__ unsigned int
-__uAtomicCAS_system(unsigned int *__p, unsigned int __cmp, unsigned int __v) {
-  return __nvvm_atom_sys_cas_gen_i((int *)__p, __cmp, __v);
-}
-__DEVICE__ unsigned int __uAtomicDec(unsigned int *__p, unsigned int __v) {
-  return __nvvm_atom_dec_gen_ui(__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicDec_block(unsigned int *__p,
-                                           unsigned int __v) {
-  return __nvvm_atom_cta_dec_gen_ui(__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicDec_system(unsigned int *__p,
-                                            unsigned int __v) {
-  return __nvvm_atom_sys_dec_gen_ui(__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicExch(unsigned int *__p, unsigned int __v) {
-  return __nvvm_atom_xchg_gen_i((int *)__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicExch_block(unsigned int *__p,
-                                            unsigned int __v) {
-  return __nvvm_atom_cta_xchg_gen_i((int *)__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicExch_system(unsigned int *__p,
-                                             unsigned int __v) {
-  return __nvvm_atom_sys_xchg_gen_i((int *)__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicInc(unsigned int *__p, unsigned int __v) {
-  return __nvvm_atom_inc_gen_ui(__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicInc_block(unsigned int *__p,
-                                           unsigned int __v) {
-  return __nvvm_atom_cta_inc_gen_ui(__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicInc_system(unsigned int *__p,
-                                            unsigned int __v) {
-  return __nvvm_atom_sys_inc_gen_ui(__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicMax(unsigned int *__p, unsigned int __v) {
-  return __nvvm_atom_max_gen_ui(__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicMax_block(unsigned int *__p,
-                                           unsigned int __v) {
-  return __nvvm_atom_cta_max_gen_ui(__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicMax_system(unsigned int *__p,
-                                            unsigned int __v) {
-  return __nvvm_atom_sys_max_gen_ui(__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicMin(unsigned int *__p, unsigned int __v) {
-  return __nvvm_atom_min_gen_ui(__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicMin_block(unsigned int *__p,
-                                           unsigned int __v) {
-  return __nvvm_atom_cta_min_gen_ui(__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicMin_system(unsigned int *__p,
-                                            unsigned int __v) {
-  return __nvvm_atom_sys_min_gen_ui(__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicOr(unsigned int *__p, unsigned int __v) {
-  return __nvvm_atom_or_gen_i((int *)__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicOr_block(unsigned int *__p, unsigned int __v) {
-  return __nvvm_atom_cta_or_gen_i((int *)__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicOr_system(unsigned int *__p,
-                                           unsigned int __v) {
-  return __nvvm_atom_sys_or_gen_i((int *)__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicXor(unsigned int *__p, unsigned int __v) {
-  return __nvvm_atom_xor_gen_i((int *)__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicXor_block(unsigned int *__p,
-                                           unsigned int __v) {
-  return __nvvm_atom_cta_xor_gen_i((int *)__p, __v);
-}
-__DEVICE__ unsigned int __uAtomicXor_system(unsigned int *__p,
-                                            unsigned int __v) {
-  return __nvvm_atom_sys_xor_gen_i((int *)__p, __v);
-}
-__DEVICE__ unsigned int __uhadd(unsigned int __a, unsigned int __b) {
-  return __nv_uhadd(__a, __b);
-}
-__DEVICE__ double __uint2double_rn(unsigned int __a) {
-  return __nv_uint2double_rn(__a);
-}
-__DEVICE__ float __uint2float_rd(unsigned int __a) {
-  return __nv_uint2float_rd(__a);
-}
-__DEVICE__ float __uint2float_rn(unsigned int __a) {
-  return __nv_uint2float_rn(__a);
-}
-__DEVICE__ float __uint2float_ru(unsigned int __a) {
-  return __nv_uint2float_ru(__a);
-}
-__DEVICE__ float __uint2float_rz(unsigned int __a) {
-  return __nv_uint2float_rz(__a);
-}
-__DEVICE__ float __uint_as_float(unsigned int __a) {
-  return __nv_uint_as_float(__a);
-} //
-__DEVICE__ double __ull2double_rd(unsigned long long __a) {
-  return __nv_ull2double_rd(__a);
-}
-__DEVICE__ double __ull2double_rn(unsigned long long __a) {
-  return __nv_ull2double_rn(__a);
-}
-__DEVICE__ double __ull2double_ru(unsigned long long __a) {
-  return __nv_ull2double_ru(__a);
-}
-__DEVICE__ double __ull2double_rz(unsigned long long __a) {
-  return __nv_ull2double_rz(__a);
-}
-__DEVICE__ float __ull2float_rd(unsigned long long __a) {
-  return __nv_ull2float_rd(__a);
-}
-__DEVICE__ float __ull2float_rn(unsigned long long __a) {
-  return __nv_ull2float_rn(__a);
-}
-__DEVICE__ float __ull2float_ru(unsigned long long __a) {
-  return __nv_ull2float_ru(__a);
-}
-__DEVICE__ float __ull2float_rz(unsigned long long __a) {
-  return __nv_ull2float_rz(__a);
-}
-__DEVICE__ unsigned long long __ullAtomicAdd(unsigned long long *__p,
-                                             unsigned long long __v) {
-  return __nvvm_atom_add_gen_ll((long long *)__p, __v);
-}
-__DEVICE__ unsigned long long __ullAtomicAdd_block(unsigned long long *__p,
-                                                   unsigned long long __v) {
-  return __nvvm_atom_cta_add_gen_ll((long long *)__p, __v);
-}
-__DEVICE__ unsigned long long __ullAtomicAdd_system(unsigned long long *__p,
-                                                    unsigned long long __v) {
-  return __nvvm_atom_sys_add_gen_ll((long long *)__p, __v);
-}
-__DEVICE__ unsigned long long __ullAtomicAnd(unsigned long long *__p,
-                                             unsigned long long __v) {
-  return __nvvm_atom_and_gen_ll((long long *)__p, __v);
-}
-__DEVICE__ unsigned long long __ullAtomicAnd_block(unsigned long long *__p,
-                                                   unsigned long long __v) {
-  return __nvvm_atom_cta_and_gen_ll((long long *)__p, __v);
-}
-__DEVICE__ unsigned long long __ullAtomicAnd_system(unsigned long long *__p,
-                                                    unsigned long long __v) {
-  return __nvvm_atom_sys_and_gen_ll((long long *)__p, __v);
-}
-__DEVICE__ unsigned long long __ullAtomicCAS(unsigned long long *__p,
-                                             unsigned long long __cmp,
-                                             unsigned long long __v) {
-  return __nvvm_atom_cas_gen_ll((long long *)__p, __cmp, __v);
-}
-__DEVICE__ unsigned long long __ullAtomicCAS_block(unsigned long long *__p,
-                                                   unsigned long long __cmp,
-                                                   unsigned long long __v) {
-  return __nvvm_atom_cta_cas_gen_ll((long long *)__p, __cmp, __v);
-}
-__DEVICE__ unsigned long long __ullAtomicCAS_system(unsigned long long *__p,
-                                                    unsigned long long __cmp,
-                                                    unsigned long long __v) {
-  return __nvvm_atom_sys_cas_gen_ll((long long *)__p, __cmp, __v);
-}
-__DEVICE__ unsigned long long __ullAtomicExch(unsigned long long *__p,
-                                              unsigned long long __v) {
-  return __nvvm_atom_xchg_gen_ll((long long *)__p, __v);
-}
-__DEVICE__ unsigned long long __ullAtomicExch_block(unsigned long long *__p,
-                                                    unsigned long long __v) {
-  return __nvvm_atom_cta_xchg_gen_ll((long long *)__p, __v);
-}
-__DEVICE__ unsigned long long __ullAtomicExch_system(unsigned long long *__p,
-                                                     unsigned long long __v) {
-  return __nvvm_atom_sys_xchg_gen_ll((long long *)__p, __v);
-}
-__DEVICE__ unsigned long long __ullAtomicMax(unsigned long long *__p,
-                                             unsigned long long __v) {
-  return __nvvm_atom_max_gen_ull(__p, __v);
-}
-__DEVICE__ unsigned long long __ullAtomicMax_block(unsigned long long *__p,
-                                                   unsigned long long __v) {
-  return __nvvm_atom_cta_max_gen_ull(__p, __v);
-}
-__DEVICE__ unsigned long long __ullAtomicMax_system(unsigned long long *__p,
-                                                    unsigned long long __v) {
-  return __nvvm_atom_sys_max_gen_ull(__p, __v);
-}
-__DEVICE__ unsigned long long __ullAtomicMin(unsigned long long *__p,
-                                             unsigned long long __v) {
-  return __nvvm_atom_min_gen_ull(__p, __v);
-}
-__DEVICE__ unsigned long long __ullAtomicMin_block(unsigned long long *__p,
-                                                   unsigned long long __v) {
-  return __nvvm_atom_cta_min_gen_ull(__p, __v);
-}
-__DEVICE__ unsigned long long __ullAtomicMin_system(unsigned long long *__p,
-                                                    unsigned long long __v) {
-  return __nvvm_atom_sys_min_gen_ull(__p, __v);
-}
-__DEVICE__ unsigned long long __ullAtomicOr(unsigned long long *__p,
-                                            unsigned long long __v) {
-  return __nvvm_atom_or_gen_ll((long long *)__p, __v);
-}
-__DEVICE__ unsigned long long __ullAtomicOr_block(unsigned long long *__p,
-                                                  unsigned long long __v) {
-  return __nvvm_atom_cta_or_gen_ll((long long *)__p, __v);
-}
-__DEVICE__ unsigned long long __ullAtomicOr_system(unsigned long long *__p,
-                                                   unsigned long long __v) {
-  return __nvvm_atom_sys_or_gen_ll((long long *)__p, __v);
-}
-__DEVICE__ unsigned long long __ullAtomicXor(unsigned long long *__p,
-                                             unsigned long long __v) {
-  return __nvvm_atom_xor_gen_ll((long long *)__p, __v);
-}
-__DEVICE__ unsigned long long __ullAtomicXor_block(unsigned long long *__p,
-                                                   unsigned long long __v) {
-  return __nvvm_atom_cta_xor_gen_ll((long long *)__p, __v);
-}
-__DEVICE__ unsigned long long __ullAtomicXor_system(unsigned long long *__p,
-                                                    unsigned long long __v) {
-  return __nvvm_atom_sys_xor_gen_ll((long long *)__p, __v);
-}
-__DEVICE__ unsigned int __umul24(unsigned int __a, unsigned int __b) {
-  return __nv_umul24(__a, __b);
-}
-__DEVICE__ unsigned long long __umul64hi(unsigned long long __a,
-                                         unsigned long long __b) {
-  return __nv_umul64hi(__a, __b);
-}
-__DEVICE__ unsigned int __umulhi(unsigned int __a, unsigned int __b) {
-  return __nv_umulhi(__a, __b);
-}
-__DEVICE__ unsigned int __urhadd(unsigned int __a, unsigned int __b) {
-  return __nv_urhadd(__a, __b);
-}
-__DEVICE__ unsigned int __usad(unsigned int __a, unsigned int __b,
-                               unsigned int __c) {
-  return __nv_usad(__a, __b, __c);
-}
-
-#if CUDA_VERSION >= 9000 && CUDA_VERSION < 9020
-__DEVICE__ unsigned int __vabs2(unsigned int __a) { return __nv_vabs2(__a); }
-__DEVICE__ unsigned int __vabs4(unsigned int __a) { return __nv_vabs4(__a); }
-__DEVICE__ unsigned int __vabsdiffs2(unsigned int __a, unsigned int __b) {
-  return __nv_vabsdiffs2(__a, __b);
-}
-__DEVICE__ unsigned int __vabsdiffs4(unsigned int __a, unsigned int __b) {
-  return __nv_vabsdiffs4(__a, __b);
-}
-__DEVICE__ unsigned int __vabsdiffu2(unsigned int __a, unsigned int __b) {
-  return __nv_vabsdiffu2(__a, __b);
-}
-__DEVICE__ unsigned int __vabsdiffu4(unsigned int __a, unsigned int __b) {
-  return __nv_vabsdiffu4(__a, __b);
-}
-__DEVICE__ unsigned int __vabsss2(unsigned int __a) {
-  return __nv_vabsss2(__a);
-}
-__DEVICE__ unsigned int __vabsss4(unsigned int __a) {
-  return __nv_vabsss4(__a);
-}
-__DEVICE__ unsigned int __vadd2(unsigned int __a, unsigned int __b) {
-  return __nv_vadd2(__a, __b);
-}
-__DEVICE__ unsigned int __vadd4(unsigned int __a, unsigned int __b) {
-  return __nv_vadd4(__a, __b);
-}
-__DEVICE__ unsigned int __vaddss2(unsigned int __a, unsigned int __b) {
-  return __nv_vaddss2(__a, __b);
-}
-__DEVICE__ unsigned int __vaddss4(unsigned int __a, unsigned int __b) {
-  return __nv_vaddss4(__a, __b);
-}
-__DEVICE__ unsigned int __vaddus2(unsigned int __a, unsigned int __b) {
-  return __nv_vaddus2(__a, __b);
-}
-__DEVICE__ unsigned int __vaddus4(unsigned int __a, unsigned int __b) {
-  return __nv_vaddus4(__a, __b);
-}
-__DEVICE__ unsigned int __vavgs2(unsigned int __a, unsigned int __b) {
-  return __nv_vavgs2(__a, __b);
-}
-__DEVICE__ unsigned int __vavgs4(unsigned int __a, unsigned int __b) {
-  return __nv_vavgs4(__a, __b);
-}
-__DEVICE__ unsigned int __vavgu2(unsigned int __a, unsigned int __b) {
-  return __nv_vavgu2(__a, __b);
-}
-__DEVICE__ unsigned int __vavgu4(unsigned int __a, unsigned int __b) {
-  return __nv_vavgu4(__a, __b);
-}
-__DEVICE__ unsigned int __vcmpeq2(unsigned int __a, unsigned int __b) {
-  return __nv_vcmpeq2(__a, __b);
-}
-__DEVICE__ unsigned int __vcmpeq4(unsigned int __a, unsigned int __b) {
-  return __nv_vcmpeq4(__a, __b);
-}
-__DEVICE__ unsigned int __vcmpges2(unsigned int __a, unsigned int __b) {
-  return __nv_vcmpges2(__a, __b);
-}
-__DEVICE__ unsigned int __vcmpges4(unsigned int __a, unsigned int __b) {
-  return __nv_vcmpges4(__a, __b);
-}
-__DEVICE__ unsigned int __vcmpgeu2(unsigned int __a, unsigned int __b) {
-  return __nv_vcmpgeu2(__a, __b);
-}
-__DEVICE__ unsigned int __vcmpgeu4(unsigned int __a, unsigned int __b) {
-  return __nv_vcmpgeu4(__a, __b);
-}
-__DEVICE__ unsigned int __vcmpgts2(unsigned int __a, unsigned int __b) {
-  return __nv_vcmpgts2(__a, __b);
-}
-__DEVICE__ unsigned int __vcmpgts4(unsigned int __a, unsigned int __b) {
-  return __nv_vcmpgts4(__a, __b);
-}
-__DEVICE__ unsigned int __vcmpgtu2(unsigned int __a, unsigned int __b) {
-  return __nv_vcmpgtu2(__a, __b);
-}
-__DEVICE__ unsigned int __vcmpgtu4(unsigned int __a, unsigned int __b) {
-  return __nv_vcmpgtu4(__a, __b);
-}
-__DEVICE__ unsigned int __vcmples2(unsigned int __a, unsigned int __b) {
-  return __nv_vcmples2(__a, __b);
-}
-__DEVICE__ unsigned int __vcmples4(unsigned int __a, unsigned int __b) {
-  return __nv_vcmples4(__a, __b);
-}
-__DEVICE__ unsigned int __vcmpleu2(unsigned int __a, unsigned int __b) {
-  return __nv_vcmpleu2(__a, __b);
-}
-__DEVICE__ unsigned int __vcmpleu4(unsigned int __a, unsigned int __b) {
-  return __nv_vcmpleu4(__a, __b);
-}
-__DEVICE__ unsigned int __vcmplts2(unsigned int __a, unsigned int __b) {
-  return __nv_vcmplts2(__a, __b);
-}
-__DEVICE__ unsigned int __vcmplts4(unsigned int __a, unsigned int __b) {
-  return __nv_vcmplts4(__a, __b);
-}
-__DEVICE__ unsigned int __vcmpltu2(unsigned int __a, unsigned int __b) {
-  return __nv_vcmpltu2(__a, __b);
-}
-__DEVICE__ unsigned int __vcmpltu4(unsigned int __a, unsigned int __b) {
-  return __nv_vcmpltu4(__a, __b);
-}
-__DEVICE__ unsigned int __vcmpne2(unsigned int __a, unsigned int __b) {
-  return __nv_vcmpne2(__a, __b);
-}
-__DEVICE__ unsigned int __vcmpne4(unsigned int __a, unsigned int __b) {
-  return __nv_vcmpne4(__a, __b);
-}
-__DEVICE__ unsigned int __vhaddu2(unsigned int __a, unsigned int __b) {
-  return __nv_vhaddu2(__a, __b);
-}
-__DEVICE__ unsigned int __vhaddu4(unsigned int __a, unsigned int __b) {
-  return __nv_vhaddu4(__a, __b);
-}
-__DEVICE__ unsigned int __vmaxs2(unsigned int __a, unsigned int __b) {
-  return __nv_vmaxs2(__a, __b);
-}
-__DEVICE__ unsigned int __vmaxs4(unsigned int __a, unsigned int __b) {
-  return __nv_vmaxs4(__a, __b);
-}
-__DEVICE__ unsigned int __vmaxu2(unsigned int __a, unsigned int __b) {
-  return __nv_vmaxu2(__a, __b);
-}
-__DEVICE__ unsigned int __vmaxu4(unsigned int __a, unsigned int __b) {
-  return __nv_vmaxu4(__a, __b);
-}
-__DEVICE__ unsigned int __vmins2(unsigned int __a, unsigned int __b) {
-  return __nv_vmins2(__a, __b);
-}
-__DEVICE__ unsigned int __vmins4(unsigned int __a, unsigned int __b) {
-  return __nv_vmins4(__a, __b);
-}
-__DEVICE__ unsigned int __vminu2(unsigned int __a, unsigned int __b) {
-  return __nv_vminu2(__a, __b);
-}
-__DEVICE__ unsigned int __vminu4(unsigned int __a, unsigned int __b) {
-  return __nv_vminu4(__a, __b);
-}
-__DEVICE__ unsigned int __vneg2(unsigned int __a) { return __nv_vneg2(__a); }
-__DEVICE__ unsigned int __vneg4(unsigned int __a) { return __nv_vneg4(__a); }
-__DEVICE__ unsigned int __vnegss2(unsigned int __a) {
-  return __nv_vnegss2(__a);
-}
-__DEVICE__ unsigned int __vnegss4(unsigned int __a) {
-  return __nv_vnegss4(__a);
-}
-__DEVICE__ unsigned int __vsads2(unsigned int __a, unsigned int __b) {
-  return __nv_vsads2(__a, __b);
-}
-__DEVICE__ unsigned int __vsads4(unsigned int __a, unsigned int __b) {
-  return __nv_vsads4(__a, __b);
-}
-__DEVICE__ unsigned int __vsadu2(unsigned int __a, unsigned int __b) {
-  return __nv_vsadu2(__a, __b);
-}
-__DEVICE__ unsigned int __vsadu4(unsigned int __a, unsigned int __b) {
-  return __nv_vsadu4(__a, __b);
-}
-__DEVICE__ unsigned int __vseteq2(unsigned int __a, unsigned int __b) {
-  return __nv_vseteq2(__a, __b);
-}
-__DEVICE__ unsigned int __vseteq4(unsigned int __a, unsigned int __b) {
-  return __nv_vseteq4(__a, __b);
-}
-__DEVICE__ unsigned int __vsetges2(unsigned int __a, unsigned int __b) {
-  return __nv_vsetges2(__a, __b);
-}
-__DEVICE__ unsigned int __vsetges4(unsigned int __a, unsigned int __b) {
-  return __nv_vsetges4(__a, __b);
-}
-__DEVICE__ unsigned int __vsetgeu2(unsigned int __a, unsigned int __b) {
-  return __nv_vsetgeu2(__a, __b);
-}
-__DEVICE__ unsigned int __vsetgeu4(unsigned int __a, unsigned int __b) {
-  return __nv_vsetgeu4(__a, __b);
-}
-__DEVICE__ unsigned int __vsetgts2(unsigned int __a, unsigned int __b) {
-  return __nv_vsetgts2(__a, __b);
-}
-__DEVICE__ unsigned int __vsetgts4(unsigned int __a, unsigned int __b) {
-  return __nv_vsetgts4(__a, __b);
-}
-__DEVICE__ unsigned int __vsetgtu2(unsigned int __a, unsigned int __b) {
-  return __nv_vsetgtu2(__a, __b);
-}
-__DEVICE__ unsigned int __vsetgtu4(unsigned int __a, unsigned int __b) {
-  return __nv_vsetgtu4(__a, __b);
-}
-__DEVICE__ unsigned int __vsetles2(unsigned int __a, unsigned int __b) {
-  return __nv_vsetles2(__a, __b);
-}
-__DEVICE__ unsigned int __vsetles4(unsigned int __a, unsigned int __b) {
-  return __nv_vsetles4(__a, __b);
-}
-__DEVICE__ unsigned int __vsetleu2(unsigned int __a, unsigned int __b) {
-  return __nv_vsetleu2(__a, __b);
-}
-__DEVICE__ unsigned int __vsetleu4(unsigned int __a, unsigned int __b) {
-  return __nv_vsetleu4(__a, __b);
-}
-__DEVICE__ unsigned int __vsetlts2(unsigned int __a, unsigned int __b) {
-  return __nv_vsetlts2(__a, __b);
-}
-__DEVICE__ unsigned int __vsetlts4(unsigned int __a, unsigned int __b) {
-  return __nv_vsetlts4(__a, __b);
-}
-__DEVICE__ unsigned int __vsetltu2(unsigned int __a, unsigned int __b) {
-  return __nv_vsetltu2(__a, __b);
-}
-__DEVICE__ unsigned int __vsetltu4(unsigned int __a, unsigned int __b) {
-  return __nv_vsetltu4(__a, __b);
-}
-__DEVICE__ unsigned int __vsetne2(unsigned int __a, unsigned int __b) {
-  return __nv_vsetne2(__a, __b);
-}
-__DEVICE__ unsigned int __vsetne4(unsigned int __a, unsigned int __b) {
-  return __nv_vsetne4(__a, __b);
-}
-__DEVICE__ unsigned int __vsub2(unsigned int __a, unsigned int __b) {
-  return __nv_vsub2(__a, __b);
-}
-__DEVICE__ unsigned int __vsub4(unsigned int __a, unsigned int __b) {
-  return __nv_vsub4(__a, __b);
-}
-__DEVICE__ unsigned int __vsubss2(unsigned int __a, unsigned int __b) {
-  return __nv_vsubss2(__a, __b);
-}
-__DEVICE__ unsigned int __vsubss4(unsigned int __a, unsigned int __b) {
-  return __nv_vsubss4(__a, __b);
-}
-__DEVICE__ unsigned int __vsubus2(unsigned int __a, unsigned int __b) {
-  return __nv_vsubus2(__a, __b);
-}
-__DEVICE__ unsigned int __vsubus4(unsigned int __a, unsigned int __b) {
-  return __nv_vsubus4(__a, __b);
-}
-#else // CUDA_VERSION >= 9020
-// CUDA no longer provides inline assembly (or bitcode) implementation of these
-// functions, so we have to reimplment them. The implementation is naive and is
-// not optimized for performance.
-
-// Helper function to convert N-bit boolean subfields into all-0 or all-1.
-// E.g. __bool2mask(0x01000100,8) -> 0xff00ff00
-//      __bool2mask(0x00010000,16) -> 0xffff0000
-__DEVICE__ unsigned int __bool2mask(unsigned int __a, int shift) {
-  return (__a << shift) - __a;
-}
-__DEVICE__ unsigned int __vabs2(unsigned int __a) {
-  unsigned int r;
-  asm("vabsdiff2.s32.s32.s32 %0,%1,%2,%3;"
-      : "=r"(r)
-      : "r"(__a), "r"(0), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vabs4(unsigned int __a) {
-  unsigned int r;
-  asm("vabsdiff4.s32.s32.s32 %0,%1,%2,%3;"
-      : "=r"(r)
-      : "r"(__a), "r"(0), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vabsdiffs2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vabsdiff2.s32.s32.s32 %0,%1,%2,%3;"
-      : "=r"(r)
-      : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-
-__DEVICE__ unsigned int __vabsdiffs4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vabsdiff4.s32.s32.s32 %0,%1,%2,%3;"
-      : "=r"(r)
-      : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vabsdiffu2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vabsdiff2.u32.u32.u32 %0,%1,%2,%3;"
-      : "=r"(r)
-      : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vabsdiffu4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vabsdiff4.u32.u32.u32 %0,%1,%2,%3;"
-      : "=r"(r)
-      : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vabsss2(unsigned int __a) {
-  unsigned int r;
-  asm("vabsdiff2.s32.s32.s32.sat %0,%1,%2,%3;"
-      : "=r"(r)
-      : "r"(__a), "r"(0), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vabsss4(unsigned int __a) {
-  unsigned int r;
-  asm("vabsdiff4.s32.s32.s32.sat %0,%1,%2,%3;"
-      : "=r"(r)
-      : "r"(__a), "r"(0), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vadd2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vadd2.u32.u32.u32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vadd4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vadd4.u32.u32.u32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vaddss2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vadd2.s32.s32.s32.sat %0,%1,%2,%3;"
-      : "=r"(r)
-      : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vaddss4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vadd4.s32.s32.s32.sat %0,%1,%2,%3;"
-      : "=r"(r)
-      : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vaddus2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vadd2.u32.u32.u32.sat %0,%1,%2,%3;"
-      : "=r"(r)
-      : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vaddus4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vadd4.u32.u32.u32.sat %0,%1,%2,%3;"
-      : "=r"(r)
-      : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vavgs2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vavrg2.s32.s32.s32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vavgs4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vavrg4.s32.s32.s32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vavgu2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vavrg2.u32.u32.u32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vavgu4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vavrg4.u32.u32.u32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vseteq2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vset2.u32.u32.eq %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vcmpeq2(unsigned int __a, unsigned int __b) {
-  return __bool2mask(__vseteq2(__a, __b), 16);
-}
-__DEVICE__ unsigned int __vseteq4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vset4.u32.u32.eq %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vcmpeq4(unsigned int __a, unsigned int __b) {
-  return __bool2mask(__vseteq4(__a, __b), 8);
-}
-__DEVICE__ unsigned int __vsetges2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vset2.s32.s32.ge %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vcmpges2(unsigned int __a, unsigned int __b) {
-  return __bool2mask(__vsetges2(__a, __b), 16);
-}
-__DEVICE__ unsigned int __vsetges4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vset4.s32.s32.ge %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vcmpges4(unsigned int __a, unsigned int __b) {
-  return __bool2mask(__vsetges4(__a, __b), 8);
-}
-__DEVICE__ unsigned int __vsetgeu2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vset2.u32.u32.ge %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vcmpgeu2(unsigned int __a, unsigned int __b) {
-  return __bool2mask(__vsetgeu2(__a, __b), 16);
-}
-__DEVICE__ unsigned int __vsetgeu4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vset4.u32.u32.ge %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vcmpgeu4(unsigned int __a, unsigned int __b) {
-  return __bool2mask(__vsetgeu4(__a, __b), 8);
-}
-__DEVICE__ unsigned int __vsetgts2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vset2.s32.s32.gt %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vcmpgts2(unsigned int __a, unsigned int __b) {
-  return __bool2mask(__vsetgts2(__a, __b), 16);
-}
-__DEVICE__ unsigned int __vsetgts4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vset4.s32.s32.gt %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vcmpgts4(unsigned int __a, unsigned int __b) {
-  return __bool2mask(__vsetgts4(__a, __b), 8);
-}
-__DEVICE__ unsigned int __vsetgtu2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vset2.u32.u32.gt %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vcmpgtu2(unsigned int __a, unsigned int __b) {
-  return __bool2mask(__vsetgtu2(__a, __b), 16);
-}
-__DEVICE__ unsigned int __vsetgtu4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vset4.u32.u32.gt %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vcmpgtu4(unsigned int __a, unsigned int __b) {
-  return __bool2mask(__vsetgtu4(__a, __b), 8);
-}
-__DEVICE__ unsigned int __vsetles2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vset2.s32.s32.le %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vcmples2(unsigned int __a, unsigned int __b) {
-  return __bool2mask(__vsetles2(__a, __b), 16);
-}
-__DEVICE__ unsigned int __vsetles4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vset4.s32.s32.le %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vcmples4(unsigned int __a, unsigned int __b) {
-  return __bool2mask(__vsetles4(__a, __b), 8);
-}
-__DEVICE__ unsigned int __vsetleu2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vset2.u32.u32.le %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vcmpleu2(unsigned int __a, unsigned int __b) {
-  return __bool2mask(__vsetleu2(__a, __b), 16);
-}
-__DEVICE__ unsigned int __vsetleu4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vset4.u32.u32.le %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vcmpleu4(unsigned int __a, unsigned int __b) {
-  return __bool2mask(__vsetleu4(__a, __b), 8);
-}
-__DEVICE__ unsigned int __vsetlts2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vset2.s32.s32.lt %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vcmplts2(unsigned int __a, unsigned int __b) {
-  return __bool2mask(__vsetlts2(__a, __b), 16);
-}
-__DEVICE__ unsigned int __vsetlts4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vset4.s32.s32.lt %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vcmplts4(unsigned int __a, unsigned int __b) {
-  return __bool2mask(__vsetlts4(__a, __b), 8);
-}
-__DEVICE__ unsigned int __vsetltu2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vset2.u32.u32.lt %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vcmpltu2(unsigned int __a, unsigned int __b) {
-  return __bool2mask(__vsetltu2(__a, __b), 16);
-}
-__DEVICE__ unsigned int __vsetltu4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vset4.u32.u32.lt %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vcmpltu4(unsigned int __a, unsigned int __b) {
-  return __bool2mask(__vsetltu4(__a, __b), 8);
-}
-__DEVICE__ unsigned int __vsetne2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vset2.u32.u32.ne %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vcmpne2(unsigned int __a, unsigned int __b) {
-  return __bool2mask(__vsetne2(__a, __b), 16);
-}
-__DEVICE__ unsigned int __vsetne4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vset4.u32.u32.ne %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vcmpne4(unsigned int __a, unsigned int __b) {
-  return __bool2mask(__vsetne4(__a, __b), 8);
-}
-
-// Based on ITEM 23 in AIM-239: http://dspace.mit.edu/handle/1721.1/6086
-// (a & b) + (a | b) = a + b = (a ^ b) + 2 * (a & b) =>
-// (a + b) / 2 = ((a ^ b) >> 1) + (a & b)
-// To operate on multiple sub-elements we need to make sure to mask out bits
-// that crossed over into adjacent elements during the shift.
-__DEVICE__ unsigned int __vhaddu2(unsigned int __a, unsigned int __b) {
-  return (((__a ^ __b) >> 1) & ~0x80008000u) + (__a & __b);
-}
-__DEVICE__ unsigned int __vhaddu4(unsigned int __a, unsigned int __b) {
-  return (((__a ^ __b) >> 1) & ~0x80808080u) + (__a & __b);
-}
-
-__DEVICE__ unsigned int __vmaxs2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  if ((__a & 0x8000) && (__b & 0x8000)) {
-    // Work around a bug in ptxas which produces invalid result if low element
-    // is negative.
-    unsigned mask = __vcmpgts2(__a, __b);
-    r = (__a & mask) | (__b & ~mask);
-  } else {
-    asm("vmax2.s32.s32.s32 %0,%1,%2,%3;"
-        : "=r"(r)
-        : "r"(__a), "r"(__b), "r"(0));
-  }
-  return r;
-}
-__DEVICE__ unsigned int __vmaxs4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vmax4.s32.s32.s32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vmaxu2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vmax2.u32.u32.u32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vmaxu4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vmax4.u32.u32.u32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vmins2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vmin2.s32.s32.s32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vmins4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vmin4.s32.s32.s32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vminu2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vmin2.u32.u32.u32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vminu4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vmin4.u32.u32.u32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vsads2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vabsdiff2.s32.s32.s32.add %0,%1,%2,%3;"
-      : "=r"(r)
-      : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vsads4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vabsdiff4.s32.s32.s32.add %0,%1,%2,%3;"
-      : "=r"(r)
-      : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vsadu2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vabsdiff2.u32.u32.u32.add %0,%1,%2,%3;"
-      : "=r"(r)
-      : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vsadu4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vabsdiff4.u32.u32.u32.add %0,%1,%2,%3;"
-      : "=r"(r)
-      : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-
-__DEVICE__ unsigned int __vsub2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vsub2.u32.u32.u32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vneg2(unsigned int __a) { return __vsub2(0, __a); }
-
-__DEVICE__ unsigned int __vsub4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vsub4.u32.u32.u32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vneg4(unsigned int __a) { return __vsub4(0, __a); }
-__DEVICE__ unsigned int __vsubss2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vsub2.s32.s32.s32.sat %0,%1,%2,%3;"
-      : "=r"(r)
-      : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vnegss2(unsigned int __a) {
-  return __vsubss2(0, __a);
-}
-__DEVICE__ unsigned int __vsubss4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vsub4.s32.s32.s32.sat %0,%1,%2,%3;"
-      : "=r"(r)
-      : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vnegss4(unsigned int __a) {
-  return __vsubss4(0, __a);
-}
-__DEVICE__ unsigned int __vsubus2(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vsub2.u32.u32.u32.sat %0,%1,%2,%3;"
-      : "=r"(r)
-      : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-__DEVICE__ unsigned int __vsubus4(unsigned int __a, unsigned int __b) {
-  unsigned int r;
-  asm("vsub4.u32.u32.u32.sat %0,%1,%2,%3;"
-      : "=r"(r)
-      : "r"(__a), "r"(__b), "r"(0));
-  return r;
-}
-#endif // CUDA_VERSION >= 9020
-__DEVICE__ int abs(int __a) __NOEXCEPT { return __nv_abs(__a); }
-__DEVICE__ double fabs(double __a) __NOEXCEPT { return __nv_fabs(__a); }
-__DEVICE__ double acos(double __a) { return __nv_acos(__a); }
-__DEVICE__ float acosf(float __a) { return __nv_acosf(__a); }
-__DEVICE__ double acosh(double __a) { return __nv_acosh(__a); }
-__DEVICE__ float acoshf(float __a) { return __nv_acoshf(__a); }
-__DEVICE__ double asin(double __a) { return __nv_asin(__a); }
-__DEVICE__ float asinf(float __a) { return __nv_asinf(__a); }
-__DEVICE__ double asinh(double __a) { return __nv_asinh(__a); }
-__DEVICE__ float asinhf(float __a) { return __nv_asinhf(__a); }
-__DEVICE__ double atan(double __a) { return __nv_atan(__a); }
-__DEVICE__ double atan2(double __a, double __b) { return __nv_atan2(__a, __b); }
-__DEVICE__ float atan2f(float __a, float __b) { return __nv_atan2f(__a, __b); }
-__DEVICE__ float atanf(float __a) { return __nv_atanf(__a); }
-__DEVICE__ double atanh(double __a) { return __nv_atanh(__a); }
-__DEVICE__ float atanhf(float __a) { return __nv_atanhf(__a); }
-__DEVICE__ double cbrt(double __a) { return __nv_cbrt(__a); }
-__DEVICE__ float cbrtf(float __a) { return __nv_cbrtf(__a); }
-__DEVICE__ double ceil(double __a) { return __nv_ceil(__a); }
-__DEVICE__ float ceilf(float __a) { return __nv_ceilf(__a); }
-#ifndef _OPENMP
-__DEVICE__ int clock() { return __nvvm_read_ptx_sreg_clock(); }
-__DEVICE__ long long clock64() { return __nvvm_read_ptx_sreg_clock64(); }
-#endif
-__DEVICE__ double copysign(double __a, double __b) {
-  return __nv_copysign(__a, __b);
-}
-__DEVICE__ float copysignf(float __a, float __b) {
-  return __nv_copysignf(__a, __b);
-}
-__DEVICE__ double cos(double __a) { return __nv_cos(__a); }
-__DEVICE__ float cosf(float __a) {
-  return __FAST_OR_SLOW(__nv_fast_cosf, __nv_cosf)(__a);
-}
-__DEVICE__ double cosh(double __a) { return __nv_cosh(__a); }
-__DEVICE__ float coshf(float __a) { return __nv_coshf(__a); }
-__DEVICE__ double cospi(double __a) { return __nv_cospi(__a); }
-__DEVICE__ float cospif(float __a) { return __nv_cospif(__a); }
-__DEVICE__ double cyl_bessel_i0(double __a) { return __nv_cyl_bessel_i0(__a); }
-__DEVICE__ float cyl_bessel_i0f(float __a) { return __nv_cyl_bessel_i0f(__a); }
-__DEVICE__ double cyl_bessel_i1(double __a) { return __nv_cyl_bessel_i1(__a); }
-__DEVICE__ float cyl_bessel_i1f(float __a) { return __nv_cyl_bessel_i1f(__a); }
-__DEVICE__ double erf(double __a) { return __nv_erf(__a); }
-__DEVICE__ double erfc(double __a) { return __nv_erfc(__a); }
-__DEVICE__ float erfcf(float __a) { return __nv_erfcf(__a); }
-__DEVICE__ double erfcinv(double __a) { return __nv_erfcinv(__a); }
-__DEVICE__ float erfcinvf(float __a) { return __nv_erfcinvf(__a); }
-__DEVICE__ double erfcx(double __a) { return __nv_erfcx(__a); }
-__DEVICE__ float erfcxf(float __a) { return __nv_erfcxf(__a); }
-__DEVICE__ float erff(float __a) { return __nv_erff(__a); }
-__DEVICE__ double erfinv(double __a) { return __nv_erfinv(__a); }
-__DEVICE__ float erfinvf(float __a) { return __nv_erfinvf(__a); }
-__DEVICE__ double exp(double __a) { return __nv_exp(__a); }
-__DEVICE__ double exp10(double __a) { return __nv_exp10(__a); }
-__DEVICE__ float exp10f(float __a) { return __nv_exp10f(__a); }
-__DEVICE__ double exp2(double __a) { return __nv_exp2(__a); }
-__DEVICE__ float exp2f(float __a) { return __nv_exp2f(__a); }
-__DEVICE__ float expf(float __a) { return __nv_expf(__a); }
-__DEVICE__ double expm1(double __a) { return __nv_expm1(__a); }
-__DEVICE__ float expm1f(float __a) { return __nv_expm1f(__a); }
-__DEVICE__ float fabsf(float __a) { return __nv_fabsf(__a); }
-__DEVICE__ double fdim(double __a, double __b) { return __nv_fdim(__a, __b); }
-__DEVICE__ float fdimf(float __a, float __b) { return __nv_fdimf(__a, __b); }
-__DEVICE__ double fdivide(double __a, double __b) { return __a / __b; }
-__DEVICE__ float fdividef(float __a, float __b) {
-#if __FAST_MATH__ && !__CUDA_PREC_DIV
-  return __nv_fast_fdividef(__a, __b);
-#else
-  return __a / __b;
-#endif
-}
-__DEVICE__ double floor(double __f) { return __nv_floor(__f); }
-__DEVICE__ float floorf(float __f) { return __nv_floorf(__f); }
-__DEVICE__ double fma(double __a, double __b, double __c) {
-  return __nv_fma(__a, __b, __c);
-}
-__DEVICE__ float fmaf(float __a, float __b, float __c) {
-  return __nv_fmaf(__a, __b, __c);
-}
-__DEVICE__ double fmax(double __a, double __b) { return __nv_fmax(__a, __b); }
-__DEVICE__ float fmaxf(float __a, float __b) { return __nv_fmaxf(__a, __b); }
-__DEVICE__ double fmin(double __a, double __b) { return __nv_fmin(__a, __b); }
-__DEVICE__ float fminf(float __a, float __b) { return __nv_fminf(__a, __b); }
-__DEVICE__ double fmod(double __a, double __b) { return __nv_fmod(__a, __b); }
-__DEVICE__ float fmodf(float __a, float __b) { return __nv_fmodf(__a, __b); }
-__DEVICE__ double frexp(double __a, int *__b) { return __nv_frexp(__a, __b); }
-__DEVICE__ float frexpf(float __a, int *__b) { return __nv_frexpf(__a, __b); }
-__DEVICE__ double hypot(double __a, double __b) { return __nv_hypot(__a, __b); }
-__DEVICE__ float hypotf(float __a, float __b) { return __nv_hypotf(__a, __b); }
-__DEVICE__ int ilogb(double __a) { return __nv_ilogb(__a); }
-__DEVICE__ int ilogbf(float __a) { return __nv_ilogbf(__a); }
-__DEVICE__ double j0(double __a) { return __nv_j0(__a); }
-__DEVICE__ float j0f(float __a) { return __nv_j0f(__a); }
-__DEVICE__ double j1(double __a) { return __nv_j1(__a); }
-__DEVICE__ float j1f(float __a) { return __nv_j1f(__a); }
-__DEVICE__ double jn(int __n, double __a) { return __nv_jn(__n, __a); }
-__DEVICE__ float jnf(int __n, float __a) { return __nv_jnf(__n, __a); }
-#if defined(__LP64__) || defined(_WIN64)
-__DEVICE__ long labs(long __a) __NOEXCEPT { return __nv_llabs(__a); };
-#else
-__DEVICE__ long labs(long __a) __NOEXCEPT { return __nv_abs(__a); };
-#endif
-__DEVICE__ double ldexp(double __a, int __b) { return __nv_ldexp(__a, __b); }
-__DEVICE__ float ldexpf(float __a, int __b) { return __nv_ldexpf(__a, __b); }
-__DEVICE__ double lgamma(double __a) { return __nv_lgamma(__a); }
-__DEVICE__ float lgammaf(float __a) { return __nv_lgammaf(__a); }
-__DEVICE__ long long llabs(long long __a) __NOEXCEPT { return __nv_llabs(__a); }
-__DEVICE__ long long llmax(long long __a, long long __b) {
-  return __nv_llmax(__a, __b);
-}
-__DEVICE__ long long llmin(long long __a, long long __b) {
-  return __nv_llmin(__a, __b);
-}
-__DEVICE__ long long llrint(double __a) { return __nv_llrint(__a); }
-__DEVICE__ long long llrintf(float __a) { return __nv_llrintf(__a); }
-__DEVICE__ long long llround(double __a) { return __nv_llround(__a); }
-__DEVICE__ long long llroundf(float __a) { return __nv_llroundf(__a); }
-__DEVICE__ double log(double __a) { return __nv_log(__a); }
-__DEVICE__ double log10(double __a) { return __nv_log10(__a); }
-__DEVICE__ float log10f(float __a) { return __nv_log10f(__a); }
-__DEVICE__ double log1p(double __a) { return __nv_log1p(__a); }
-__DEVICE__ float log1pf(float __a) { return __nv_log1pf(__a); }
-__DEVICE__ double log2(double __a) { return __nv_log2(__a); }
-__DEVICE__ float log2f(float __a) {
-  return __FAST_OR_SLOW(__nv_fast_log2f, __nv_log2f)(__a);
-}
-__DEVICE__ double logb(double __a) { return __nv_logb(__a); }
-__DEVICE__ float logbf(float __a) { return __nv_logbf(__a); }
-__DEVICE__ float logf(float __a) {
-  return __FAST_OR_SLOW(__nv_fast_logf, __nv_logf)(__a);
-}
-#if defined(__LP64__) || defined(_WIN64)
-__DEVICE__ long lrint(double __a) { return llrint(__a); }
-__DEVICE__ long lrintf(float __a) { return __float2ll_rn(__a); }
-__DEVICE__ long lround(double __a) { return llround(__a); }
-__DEVICE__ long lroundf(float __a) { return llroundf(__a); }
-#else
-__DEVICE__ long lrint(double __a) { return (long)rint(__a); }
-__DEVICE__ long lrintf(float __a) { return __float2int_rn(__a); }
-__DEVICE__ long lround(double __a) { return round(__a); }
-__DEVICE__ long lroundf(float __a) { return roundf(__a); }
-#endif
-__DEVICE__ int max(int __a, int __b) { return __nv_max(__a, __b); }
-// These functions shouldn't be declared when including this header
-// for math function resolution purposes.
-#ifndef _OPENMP
-__DEVICE__ void *memcpy(void *__a, const void *__b, size_t __c) {
-  return __builtin_memcpy(__a, __b, __c);
-}
-__DEVICE__ void *memset(void *__a, int __b, size_t __c) {
-  return __builtin_memset(__a, __b, __c);
-}
-#endif
-__DEVICE__ int min(int __a, int __b) { return __nv_min(__a, __b); }
-__DEVICE__ double modf(double __a, double *__b) { return __nv_modf(__a, __b); }
-__DEVICE__ float modff(float __a, float *__b) { return __nv_modff(__a, __b); }
-__DEVICE__ double nearbyint(double __a) { return __nv_nearbyint(__a); }
-__DEVICE__ float nearbyintf(float __a) { return __nv_nearbyintf(__a); }
-__DEVICE__ double nextafter(double __a, double __b) {
-  return __nv_nextafter(__a, __b);
-}
-__DEVICE__ float nextafterf(float __a, float __b) {
-  return __nv_nextafterf(__a, __b);
-}
-__DEVICE__ double norm(int __dim, const double *__t) {
-  return __nv_norm(__dim, __t);
-}
-__DEVICE__ double norm3d(double __a, double __b, double __c) {
-  return __nv_norm3d(__a, __b, __c);
-}
-__DEVICE__ float norm3df(float __a, float __b, float __c) {
-  return __nv_norm3df(__a, __b, __c);
-}
-__DEVICE__ double norm4d(double __a, double __b, double __c, double __d) {
-  return __nv_norm4d(__a, __b, __c, __d);
-}
-__DEVICE__ float norm4df(float __a, float __b, float __c, float __d) {
-  return __nv_norm4df(__a, __b, __c, __d);
-}
-__DEVICE__ double normcdf(double __a) { return __nv_normcdf(__a); }
-__DEVICE__ float normcdff(float __a) { return __nv_normcdff(__a); }
-__DEVICE__ double normcdfinv(double __a) { return __nv_normcdfinv(__a); }
-__DEVICE__ float normcdfinvf(float __a) { return __nv_normcdfinvf(__a); }
-__DEVICE__ float normf(int __dim, const float *__t) {
-  return __nv_normf(__dim, __t);
-}
-__DEVICE__ double pow(double __a, double __b) { return __nv_pow(__a, __b); }
-__DEVICE__ float powf(float __a, float __b) { return __nv_powf(__a, __b); }
-__DEVICE__ double powi(double __a, int __b) { return __nv_powi(__a, __b); }
-__DEVICE__ float powif(float __a, int __b) { return __nv_powif(__a, __b); }
-__DEVICE__ double rcbrt(double __a) { return __nv_rcbrt(__a); }
-__DEVICE__ float rcbrtf(float __a) { return __nv_rcbrtf(__a); }
-__DEVICE__ double remainder(double __a, double __b) {
-  return __nv_remainder(__a, __b);
-}
-__DEVICE__ float remainderf(float __a, float __b) {
-  return __nv_remainderf(__a, __b);
-}
-__DEVICE__ double remquo(double __a, double __b, int *__c) {
-  return __nv_remquo(__a, __b, __c);
-}
-__DEVICE__ float remquof(float __a, float __b, int *__c) {
-  return __nv_remquof(__a, __b, __c);
-}
-__DEVICE__ double rhypot(double __a, double __b) {
-  return __nv_rhypot(__a, __b);
-}
-__DEVICE__ float rhypotf(float __a, float __b) {
-  return __nv_rhypotf(__a, __b);
-}
-__DEVICE__ double rint(double __a) { return __nv_rint(__a); }
-__DEVICE__ float rintf(float __a) { return __nv_rintf(__a); }
-__DEVICE__ double rnorm(int __a, const double *__b) {
-  return __nv_rnorm(__a, __b);
-}
-__DEVICE__ double rnorm3d(double __a, double __b, double __c) {
-  return __nv_rnorm3d(__a, __b, __c);
-}
-__DEVICE__ float rnorm3df(float __a, float __b, float __c) {
-  return __nv_rnorm3df(__a, __b, __c);
-}
-__DEVICE__ double rnorm4d(double __a, double __b, double __c, double __d) {
-  return __nv_rnorm4d(__a, __b, __c, __d);
-}
-__DEVICE__ float rnorm4df(float __a, float __b, float __c, float __d) {
-  return __nv_rnorm4df(__a, __b, __c, __d);
-}
-__DEVICE__ float rnormf(int __dim, const float *__t) {
-  return __nv_rnormf(__dim, __t);
-}
-__DEVICE__ double round(double __a) { return __nv_round(__a); }
-__DEVICE__ float roundf(float __a) { return __nv_roundf(__a); }
-__DEVICE__ double rsqrt(double __a) { return __nv_rsqrt(__a); }
-__DEVICE__ float rsqrtf(float __a) { return __nv_rsqrtf(__a); }
-__DEVICE__ double scalbn(double __a, int __b) { return __nv_scalbn(__a, __b); }
-__DEVICE__ float scalbnf(float __a, int __b) { return __nv_scalbnf(__a, __b); }
-// TODO: remove once variant is supported
-#ifndef _OPENMP
-__DEVICE__ double scalbln(double __a, long __b) {
-  if (__b > INT_MAX)
-    return __a > 0 ? HUGE_VAL : -HUGE_VAL;
-  if (__b < INT_MIN)
-    return __a > 0 ? 0.0 : -0.0;
-  return scalbn(__a, (int)__b);
-}
-__DEVICE__ float scalblnf(float __a, long __b) {
-  if (__b > INT_MAX)
-    return __a > 0 ? HUGE_VALF : -HUGE_VALF;
-  if (__b < INT_MIN)
-    return __a > 0 ? 0.f : -0.f;
-  return scalbnf(__a, (int)__b);
-}
-#endif
-__DEVICE__ double sin(double __a) { return __nv_sin(__a); }
-__DEVICE__ void sincos(double __a, double *__s, double *__c) {
-  return __nv_sincos(__a, __s, __c);
-}
-__DEVICE__ void sincosf(float __a, float *__s, float *__c) {
-  return __FAST_OR_SLOW(__nv_fast_sincosf, __nv_sincosf)(__a, __s, __c);
-}
-__DEVICE__ void sincospi(double __a, double *__s, double *__c) {
-  return __nv_sincospi(__a, __s, __c);
-}
-__DEVICE__ void sincospif(float __a, float *__s, float *__c) {
-  return __nv_sincospif(__a, __s, __c);
-}
-__DEVICE__ float sinf(float __a) {
-  return __FAST_OR_SLOW(__nv_fast_sinf, __nv_sinf)(__a);
-}
-__DEVICE__ double sinh(double __a) { return __nv_sinh(__a); }
-__DEVICE__ float sinhf(float __a) { return __nv_sinhf(__a); }
-__DEVICE__ double sinpi(double __a) { return __nv_sinpi(__a); }
-__DEVICE__ float sinpif(float __a) { return __nv_sinpif(__a); }
-__DEVICE__ double sqrt(double __a) { return __nv_sqrt(__a); }
-__DEVICE__ float sqrtf(float __a) { return __nv_sqrtf(__a); }
-__DEVICE__ double tan(double __a) { return __nv_tan(__a); }
-__DEVICE__ float tanf(float __a) { return __nv_tanf(__a); }
-__DEVICE__ double tanh(double __a) { return __nv_tanh(__a); }
-__DEVICE__ float tanhf(float __a) { return __nv_tanhf(__a); }
-__DEVICE__ double tgamma(double __a) { return __nv_tgamma(__a); }
-__DEVICE__ float tgammaf(float __a) { return __nv_tgammaf(__a); }
-__DEVICE__ double trunc(double __a) { return __nv_trunc(__a); }
-__DEVICE__ float truncf(float __a) { return __nv_truncf(__a); }
-__DEVICE__ unsigned long long ullmax(unsigned long long __a,
-                                     unsigned long long __b) {
-  return __nv_ullmax(__a, __b);
-}
-__DEVICE__ unsigned long long ullmin(unsigned long long __a,
-                                     unsigned long long __b) {
-  return __nv_ullmin(__a, __b);
-}
-__DEVICE__ unsigned int umax(unsigned int __a, unsigned int __b) {
-  return __nv_umax(__a, __b);
-}
-__DEVICE__ unsigned int umin(unsigned int __a, unsigned int __b) {
-  return __nv_umin(__a, __b);
-}
-__DEVICE__ double y0(double __a) { return __nv_y0(__a); }
-__DEVICE__ float y0f(float __a) { return __nv_y0f(__a); }
-__DEVICE__ double y1(double __a) { return __nv_y1(__a); }
-__DEVICE__ float y1f(float __a) { return __nv_y1f(__a); }
-__DEVICE__ double yn(int __a, double __b) { return __nv_yn(__a, __b); }
-__DEVICE__ float ynf(int __a, float __b) { return __nv_ynf(__a, __b); }
-
-#undef __NOEXCEPT
-#pragma pop_macro("__DEVICE__")
-#pragma pop_macro("__FAST_OR_SLOW")
-#endif // __CLANG_CUDA_DEVICE_FUNCTIONS_H__
diff --git a/linux-x86/lib64/clang/11.0.1/include/__clang_cuda_libdevice_declares.h b/linux-x86/lib64/clang/11.0.1/include/__clang_cuda_libdevice_declares.h
deleted file mode 100644
index 4d70353..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/__clang_cuda_libdevice_declares.h
+++ /dev/null
@@ -1,462 +0,0 @@
-/*===-- __clang_cuda_libdevice_declares.h - decls for libdevice functions --===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __CLANG_CUDA_LIBDEVICE_DECLARES_H__
-#define __CLANG_CUDA_LIBDEVICE_DECLARES_H__
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-#if defined(_OPENMP)
-#define __DEVICE__
-#elif defined(__CUDA__)
-#define __DEVICE__ __device__
-#endif
-
-__DEVICE__ int __nv_abs(int __a);
-__DEVICE__ double __nv_acos(double __a);
-__DEVICE__ float __nv_acosf(float __a);
-__DEVICE__ double __nv_acosh(double __a);
-__DEVICE__ float __nv_acoshf(float __a);
-__DEVICE__ double __nv_asin(double __a);
-__DEVICE__ float __nv_asinf(float __a);
-__DEVICE__ double __nv_asinh(double __a);
-__DEVICE__ float __nv_asinhf(float __a);
-__DEVICE__ double __nv_atan2(double __a, double __b);
-__DEVICE__ float __nv_atan2f(float __a, float __b);
-__DEVICE__ double __nv_atan(double __a);
-__DEVICE__ float __nv_atanf(float __a);
-__DEVICE__ double __nv_atanh(double __a);
-__DEVICE__ float __nv_atanhf(float __a);
-__DEVICE__ int __nv_brev(int __a);
-__DEVICE__ long long __nv_brevll(long long __a);
-__DEVICE__ int __nv_byte_perm(int __a, int __b, int __c);
-__DEVICE__ double __nv_cbrt(double __a);
-__DEVICE__ float __nv_cbrtf(float __a);
-__DEVICE__ double __nv_ceil(double __a);
-__DEVICE__ float __nv_ceilf(float __a);
-__DEVICE__ int __nv_clz(int __a);
-__DEVICE__ int __nv_clzll(long long __a);
-__DEVICE__ double __nv_copysign(double __a, double __b);
-__DEVICE__ float __nv_copysignf(float __a, float __b);
-__DEVICE__ double __nv_cos(double __a);
-__DEVICE__ float __nv_cosf(float __a);
-__DEVICE__ double __nv_cosh(double __a);
-__DEVICE__ float __nv_coshf(float __a);
-__DEVICE__ double __nv_cospi(double __a);
-__DEVICE__ float __nv_cospif(float __a);
-__DEVICE__ double __nv_cyl_bessel_i0(double __a);
-__DEVICE__ float __nv_cyl_bessel_i0f(float __a);
-__DEVICE__ double __nv_cyl_bessel_i1(double __a);
-__DEVICE__ float __nv_cyl_bessel_i1f(float __a);
-__DEVICE__ double __nv_dadd_rd(double __a, double __b);
-__DEVICE__ double __nv_dadd_rn(double __a, double __b);
-__DEVICE__ double __nv_dadd_ru(double __a, double __b);
-__DEVICE__ double __nv_dadd_rz(double __a, double __b);
-__DEVICE__ double __nv_ddiv_rd(double __a, double __b);
-__DEVICE__ double __nv_ddiv_rn(double __a, double __b);
-__DEVICE__ double __nv_ddiv_ru(double __a, double __b);
-__DEVICE__ double __nv_ddiv_rz(double __a, double __b);
-__DEVICE__ double __nv_dmul_rd(double __a, double __b);
-__DEVICE__ double __nv_dmul_rn(double __a, double __b);
-__DEVICE__ double __nv_dmul_ru(double __a, double __b);
-__DEVICE__ double __nv_dmul_rz(double __a, double __b);
-__DEVICE__ float __nv_double2float_rd(double __a);
-__DEVICE__ float __nv_double2float_rn(double __a);
-__DEVICE__ float __nv_double2float_ru(double __a);
-__DEVICE__ float __nv_double2float_rz(double __a);
-__DEVICE__ int __nv_double2hiint(double __a);
-__DEVICE__ int __nv_double2int_rd(double __a);
-__DEVICE__ int __nv_double2int_rn(double __a);
-__DEVICE__ int __nv_double2int_ru(double __a);
-__DEVICE__ int __nv_double2int_rz(double __a);
-__DEVICE__ long long __nv_double2ll_rd(double __a);
-__DEVICE__ long long __nv_double2ll_rn(double __a);
-__DEVICE__ long long __nv_double2ll_ru(double __a);
-__DEVICE__ long long __nv_double2ll_rz(double __a);
-__DEVICE__ int __nv_double2loint(double __a);
-__DEVICE__ unsigned int __nv_double2uint_rd(double __a);
-__DEVICE__ unsigned int __nv_double2uint_rn(double __a);
-__DEVICE__ unsigned int __nv_double2uint_ru(double __a);
-__DEVICE__ unsigned int __nv_double2uint_rz(double __a);
-__DEVICE__ unsigned long long __nv_double2ull_rd(double __a);
-__DEVICE__ unsigned long long __nv_double2ull_rn(double __a);
-__DEVICE__ unsigned long long __nv_double2ull_ru(double __a);
-__DEVICE__ unsigned long long __nv_double2ull_rz(double __a);
-__DEVICE__ unsigned long long __nv_double_as_longlong(double __a);
-__DEVICE__ double __nv_drcp_rd(double __a);
-__DEVICE__ double __nv_drcp_rn(double __a);
-__DEVICE__ double __nv_drcp_ru(double __a);
-__DEVICE__ double __nv_drcp_rz(double __a);
-__DEVICE__ double __nv_dsqrt_rd(double __a);
-__DEVICE__ double __nv_dsqrt_rn(double __a);
-__DEVICE__ double __nv_dsqrt_ru(double __a);
-__DEVICE__ double __nv_dsqrt_rz(double __a);
-__DEVICE__ double __nv_dsub_rd(double __a, double __b);
-__DEVICE__ double __nv_dsub_rn(double __a, double __b);
-__DEVICE__ double __nv_dsub_ru(double __a, double __b);
-__DEVICE__ double __nv_dsub_rz(double __a, double __b);
-__DEVICE__ double __nv_erfc(double __a);
-__DEVICE__ float __nv_erfcf(float __a);
-__DEVICE__ double __nv_erfcinv(double __a);
-__DEVICE__ float __nv_erfcinvf(float __a);
-__DEVICE__ double __nv_erfcx(double __a);
-__DEVICE__ float __nv_erfcxf(float __a);
-__DEVICE__ double __nv_erf(double __a);
-__DEVICE__ float __nv_erff(float __a);
-__DEVICE__ double __nv_erfinv(double __a);
-__DEVICE__ float __nv_erfinvf(float __a);
-__DEVICE__ double __nv_exp10(double __a);
-__DEVICE__ float __nv_exp10f(float __a);
-__DEVICE__ double __nv_exp2(double __a);
-__DEVICE__ float __nv_exp2f(float __a);
-__DEVICE__ double __nv_exp(double __a);
-__DEVICE__ float __nv_expf(float __a);
-__DEVICE__ double __nv_expm1(double __a);
-__DEVICE__ float __nv_expm1f(float __a);
-__DEVICE__ double __nv_fabs(double __a);
-__DEVICE__ float __nv_fabsf(float __a);
-__DEVICE__ float __nv_fadd_rd(float __a, float __b);
-__DEVICE__ float __nv_fadd_rn(float __a, float __b);
-__DEVICE__ float __nv_fadd_ru(float __a, float __b);
-__DEVICE__ float __nv_fadd_rz(float __a, float __b);
-__DEVICE__ float __nv_fast_cosf(float __a);
-__DEVICE__ float __nv_fast_exp10f(float __a);
-__DEVICE__ float __nv_fast_expf(float __a);
-__DEVICE__ float __nv_fast_fdividef(float __a, float __b);
-__DEVICE__ float __nv_fast_log10f(float __a);
-__DEVICE__ float __nv_fast_log2f(float __a);
-__DEVICE__ float __nv_fast_logf(float __a);
-__DEVICE__ float __nv_fast_powf(float __a, float __b);
-__DEVICE__ void __nv_fast_sincosf(float __a, float *__s, float *__c);
-__DEVICE__ float __nv_fast_sinf(float __a);
-__DEVICE__ float __nv_fast_tanf(float __a);
-__DEVICE__ double __nv_fdim(double __a, double __b);
-__DEVICE__ float __nv_fdimf(float __a, float __b);
-__DEVICE__ float __nv_fdiv_rd(float __a, float __b);
-__DEVICE__ float __nv_fdiv_rn(float __a, float __b);
-__DEVICE__ float __nv_fdiv_ru(float __a, float __b);
-__DEVICE__ float __nv_fdiv_rz(float __a, float __b);
-__DEVICE__ int __nv_ffs(int __a);
-__DEVICE__ int __nv_ffsll(long long __a);
-__DEVICE__ int __nv_finitef(float __a);
-__DEVICE__ unsigned short __nv_float2half_rn(float __a);
-__DEVICE__ int __nv_float2int_rd(float __a);
-__DEVICE__ int __nv_float2int_rn(float __a);
-__DEVICE__ int __nv_float2int_ru(float __a);
-__DEVICE__ int __nv_float2int_rz(float __a);
-__DEVICE__ long long __nv_float2ll_rd(float __a);
-__DEVICE__ long long __nv_float2ll_rn(float __a);
-__DEVICE__ long long __nv_float2ll_ru(float __a);
-__DEVICE__ long long __nv_float2ll_rz(float __a);
-__DEVICE__ unsigned int __nv_float2uint_rd(float __a);
-__DEVICE__ unsigned int __nv_float2uint_rn(float __a);
-__DEVICE__ unsigned int __nv_float2uint_ru(float __a);
-__DEVICE__ unsigned int __nv_float2uint_rz(float __a);
-__DEVICE__ unsigned long long __nv_float2ull_rd(float __a);
-__DEVICE__ unsigned long long __nv_float2ull_rn(float __a);
-__DEVICE__ unsigned long long __nv_float2ull_ru(float __a);
-__DEVICE__ unsigned long long __nv_float2ull_rz(float __a);
-__DEVICE__ int __nv_float_as_int(float __a);
-__DEVICE__ unsigned int __nv_float_as_uint(float __a);
-__DEVICE__ double __nv_floor(double __a);
-__DEVICE__ float __nv_floorf(float __a);
-__DEVICE__ double __nv_fma(double __a, double __b, double __c);
-__DEVICE__ float __nv_fmaf(float __a, float __b, float __c);
-__DEVICE__ float __nv_fmaf_ieee_rd(float __a, float __b, float __c);
-__DEVICE__ float __nv_fmaf_ieee_rn(float __a, float __b, float __c);
-__DEVICE__ float __nv_fmaf_ieee_ru(float __a, float __b, float __c);
-__DEVICE__ float __nv_fmaf_ieee_rz(float __a, float __b, float __c);
-__DEVICE__ float __nv_fmaf_rd(float __a, float __b, float __c);
-__DEVICE__ float __nv_fmaf_rn(float __a, float __b, float __c);
-__DEVICE__ float __nv_fmaf_ru(float __a, float __b, float __c);
-__DEVICE__ float __nv_fmaf_rz(float __a, float __b, float __c);
-__DEVICE__ double __nv_fma_rd(double __a, double __b, double __c);
-__DEVICE__ double __nv_fma_rn(double __a, double __b, double __c);
-__DEVICE__ double __nv_fma_ru(double __a, double __b, double __c);
-__DEVICE__ double __nv_fma_rz(double __a, double __b, double __c);
-__DEVICE__ double __nv_fmax(double __a, double __b);
-__DEVICE__ float __nv_fmaxf(float __a, float __b);
-__DEVICE__ double __nv_fmin(double __a, double __b);
-__DEVICE__ float __nv_fminf(float __a, float __b);
-__DEVICE__ double __nv_fmod(double __a, double __b);
-__DEVICE__ float __nv_fmodf(float __a, float __b);
-__DEVICE__ float __nv_fmul_rd(float __a, float __b);
-__DEVICE__ float __nv_fmul_rn(float __a, float __b);
-__DEVICE__ float __nv_fmul_ru(float __a, float __b);
-__DEVICE__ float __nv_fmul_rz(float __a, float __b);
-__DEVICE__ float __nv_frcp_rd(float __a);
-__DEVICE__ float __nv_frcp_rn(float __a);
-__DEVICE__ float __nv_frcp_ru(float __a);
-__DEVICE__ float __nv_frcp_rz(float __a);
-__DEVICE__ double __nv_frexp(double __a, int *__b);
-__DEVICE__ float __nv_frexpf(float __a, int *__b);
-__DEVICE__ float __nv_frsqrt_rn(float __a);
-__DEVICE__ float __nv_fsqrt_rd(float __a);
-__DEVICE__ float __nv_fsqrt_rn(float __a);
-__DEVICE__ float __nv_fsqrt_ru(float __a);
-__DEVICE__ float __nv_fsqrt_rz(float __a);
-__DEVICE__ float __nv_fsub_rd(float __a, float __b);
-__DEVICE__ float __nv_fsub_rn(float __a, float __b);
-__DEVICE__ float __nv_fsub_ru(float __a, float __b);
-__DEVICE__ float __nv_fsub_rz(float __a, float __b);
-__DEVICE__ int __nv_hadd(int __a, int __b);
-__DEVICE__ float __nv_half2float(unsigned short __h);
-__DEVICE__ double __nv_hiloint2double(int __a, int __b);
-__DEVICE__ double __nv_hypot(double __a, double __b);
-__DEVICE__ float __nv_hypotf(float __a, float __b);
-__DEVICE__ int __nv_ilogb(double __a);
-__DEVICE__ int __nv_ilogbf(float __a);
-__DEVICE__ double __nv_int2double_rn(int __a);
-__DEVICE__ float __nv_int2float_rd(int __a);
-__DEVICE__ float __nv_int2float_rn(int __a);
-__DEVICE__ float __nv_int2float_ru(int __a);
-__DEVICE__ float __nv_int2float_rz(int __a);
-__DEVICE__ float __nv_int_as_float(int __a);
-__DEVICE__ int __nv_isfinited(double __a);
-__DEVICE__ int __nv_isinfd(double __a);
-__DEVICE__ int __nv_isinff(float __a);
-__DEVICE__ int __nv_isnand(double __a);
-__DEVICE__ int __nv_isnanf(float __a);
-__DEVICE__ double __nv_j0(double __a);
-__DEVICE__ float __nv_j0f(float __a);
-__DEVICE__ double __nv_j1(double __a);
-__DEVICE__ float __nv_j1f(float __a);
-__DEVICE__ float __nv_jnf(int __a, float __b);
-__DEVICE__ double __nv_jn(int __a, double __b);
-__DEVICE__ double __nv_ldexp(double __a, int __b);
-__DEVICE__ float __nv_ldexpf(float __a, int __b);
-__DEVICE__ double __nv_lgamma(double __a);
-__DEVICE__ float __nv_lgammaf(float __a);
-__DEVICE__ double __nv_ll2double_rd(long long __a);
-__DEVICE__ double __nv_ll2double_rn(long long __a);
-__DEVICE__ double __nv_ll2double_ru(long long __a);
-__DEVICE__ double __nv_ll2double_rz(long long __a);
-__DEVICE__ float __nv_ll2float_rd(long long __a);
-__DEVICE__ float __nv_ll2float_rn(long long __a);
-__DEVICE__ float __nv_ll2float_ru(long long __a);
-__DEVICE__ float __nv_ll2float_rz(long long __a);
-__DEVICE__ long long __nv_llabs(long long __a);
-__DEVICE__ long long __nv_llmax(long long __a, long long __b);
-__DEVICE__ long long __nv_llmin(long long __a, long long __b);
-__DEVICE__ long long __nv_llrint(double __a);
-__DEVICE__ long long __nv_llrintf(float __a);
-__DEVICE__ long long __nv_llround(double __a);
-__DEVICE__ long long __nv_llroundf(float __a);
-__DEVICE__ double __nv_log10(double __a);
-__DEVICE__ float __nv_log10f(float __a);
-__DEVICE__ double __nv_log1p(double __a);
-__DEVICE__ float __nv_log1pf(float __a);
-__DEVICE__ double __nv_log2(double __a);
-__DEVICE__ float __nv_log2f(float __a);
-__DEVICE__ double __nv_logb(double __a);
-__DEVICE__ float __nv_logbf(float __a);
-__DEVICE__ double __nv_log(double __a);
-__DEVICE__ float __nv_logf(float __a);
-__DEVICE__ double __nv_longlong_as_double(long long __a);
-__DEVICE__ int __nv_max(int __a, int __b);
-__DEVICE__ int __nv_min(int __a, int __b);
-__DEVICE__ double __nv_modf(double __a, double *__b);
-__DEVICE__ float __nv_modff(float __a, float *__b);
-__DEVICE__ int __nv_mul24(int __a, int __b);
-__DEVICE__ long long __nv_mul64hi(long long __a, long long __b);
-__DEVICE__ int __nv_mulhi(int __a, int __b);
-__DEVICE__ double __nv_nan(const signed char *__a);
-__DEVICE__ float __nv_nanf(const signed char *__a);
-__DEVICE__ double __nv_nearbyint(double __a);
-__DEVICE__ float __nv_nearbyintf(float __a);
-__DEVICE__ double __nv_nextafter(double __a, double __b);
-__DEVICE__ float __nv_nextafterf(float __a, float __b);
-__DEVICE__ double __nv_norm3d(double __a, double __b, double __c);
-__DEVICE__ float __nv_norm3df(float __a, float __b, float __c);
-__DEVICE__ double __nv_norm4d(double __a, double __b, double __c, double __d);
-__DEVICE__ float __nv_norm4df(float __a, float __b, float __c, float __d);
-__DEVICE__ double __nv_normcdf(double __a);
-__DEVICE__ float __nv_normcdff(float __a);
-__DEVICE__ double __nv_normcdfinv(double __a);
-__DEVICE__ float __nv_normcdfinvf(float __a);
-__DEVICE__ float __nv_normf(int __a, const float *__b);
-__DEVICE__ double __nv_norm(int __a, const double *__b);
-__DEVICE__ int __nv_popc(int __a);
-__DEVICE__ int __nv_popcll(long long __a);
-__DEVICE__ double __nv_pow(double __a, double __b);
-__DEVICE__ float __nv_powf(float __a, float __b);
-__DEVICE__ double __nv_powi(double __a, int __b);
-__DEVICE__ float __nv_powif(float __a, int __b);
-__DEVICE__ double __nv_rcbrt(double __a);
-__DEVICE__ float __nv_rcbrtf(float __a);
-__DEVICE__ double __nv_rcp64h(double __a);
-__DEVICE__ double __nv_remainder(double __a, double __b);
-__DEVICE__ float __nv_remainderf(float __a, float __b);
-__DEVICE__ double __nv_remquo(double __a, double __b, int *__c);
-__DEVICE__ float __nv_remquof(float __a, float __b, int *__c);
-__DEVICE__ int __nv_rhadd(int __a, int __b);
-__DEVICE__ double __nv_rhypot(double __a, double __b);
-__DEVICE__ float __nv_rhypotf(float __a, float __b);
-__DEVICE__ double __nv_rint(double __a);
-__DEVICE__ float __nv_rintf(float __a);
-__DEVICE__ double __nv_rnorm3d(double __a, double __b, double __c);
-__DEVICE__ float __nv_rnorm3df(float __a, float __b, float __c);
-__DEVICE__ double __nv_rnorm4d(double __a, double __b, double __c, double __d);
-__DEVICE__ float __nv_rnorm4df(float __a, float __b, float __c, float __d);
-__DEVICE__ float __nv_rnormf(int __a, const float *__b);
-__DEVICE__ double __nv_rnorm(int __a, const double *__b);
-__DEVICE__ double __nv_round(double __a);
-__DEVICE__ float __nv_roundf(float __a);
-__DEVICE__ double __nv_rsqrt(double __a);
-__DEVICE__ float __nv_rsqrtf(float __a);
-__DEVICE__ int __nv_sad(int __a, int __b, int __c);
-__DEVICE__ float __nv_saturatef(float __a);
-__DEVICE__ double __nv_scalbn(double __a, int __b);
-__DEVICE__ float __nv_scalbnf(float __a, int __b);
-__DEVICE__ int __nv_signbitd(double __a);
-__DEVICE__ int __nv_signbitf(float __a);
-__DEVICE__ void __nv_sincos(double __a, double *__b, double *__c);
-__DEVICE__ void __nv_sincosf(float __a, float *__b, float *__c);
-__DEVICE__ void __nv_sincospi(double __a, double *__b, double *__c);
-__DEVICE__ void __nv_sincospif(float __a, float *__b, float *__c);
-__DEVICE__ double __nv_sin(double __a);
-__DEVICE__ float __nv_sinf(float __a);
-__DEVICE__ double __nv_sinh(double __a);
-__DEVICE__ float __nv_sinhf(float __a);
-__DEVICE__ double __nv_sinpi(double __a);
-__DEVICE__ float __nv_sinpif(float __a);
-__DEVICE__ double __nv_sqrt(double __a);
-__DEVICE__ float __nv_sqrtf(float __a);
-__DEVICE__ double __nv_tan(double __a);
-__DEVICE__ float __nv_tanf(float __a);
-__DEVICE__ double __nv_tanh(double __a);
-__DEVICE__ float __nv_tanhf(float __a);
-__DEVICE__ double __nv_tgamma(double __a);
-__DEVICE__ float __nv_tgammaf(float __a);
-__DEVICE__ double __nv_trunc(double __a);
-__DEVICE__ float __nv_truncf(float __a);
-__DEVICE__ int __nv_uhadd(unsigned int __a, unsigned int __b);
-__DEVICE__ double __nv_uint2double_rn(unsigned int __i);
-__DEVICE__ float __nv_uint2float_rd(unsigned int __a);
-__DEVICE__ float __nv_uint2float_rn(unsigned int __a);
-__DEVICE__ float __nv_uint2float_ru(unsigned int __a);
-__DEVICE__ float __nv_uint2float_rz(unsigned int __a);
-__DEVICE__ float __nv_uint_as_float(unsigned int __a);
-__DEVICE__ double __nv_ull2double_rd(unsigned long long __a);
-__DEVICE__ double __nv_ull2double_rn(unsigned long long __a);
-__DEVICE__ double __nv_ull2double_ru(unsigned long long __a);
-__DEVICE__ double __nv_ull2double_rz(unsigned long long __a);
-__DEVICE__ float __nv_ull2float_rd(unsigned long long __a);
-__DEVICE__ float __nv_ull2float_rn(unsigned long long __a);
-__DEVICE__ float __nv_ull2float_ru(unsigned long long __a);
-__DEVICE__ float __nv_ull2float_rz(unsigned long long __a);
-__DEVICE__ unsigned long long __nv_ullmax(unsigned long long __a,
-                                          unsigned long long __b);
-__DEVICE__ unsigned long long __nv_ullmin(unsigned long long __a,
-                                          unsigned long long __b);
-__DEVICE__ unsigned int __nv_umax(unsigned int __a, unsigned int __b);
-__DEVICE__ unsigned int __nv_umin(unsigned int __a, unsigned int __b);
-__DEVICE__ unsigned int __nv_umul24(unsigned int __a, unsigned int __b);
-__DEVICE__ unsigned long long __nv_umul64hi(unsigned long long __a,
-                                            unsigned long long __b);
-__DEVICE__ unsigned int __nv_umulhi(unsigned int __a, unsigned int __b);
-__DEVICE__ unsigned int __nv_urhadd(unsigned int __a, unsigned int __b);
-__DEVICE__ unsigned int __nv_usad(unsigned int __a, unsigned int __b,
-                                  unsigned int __c);
-#if CUDA_VERSION >= 9000 && CUDA_VERSION < 9020
-__DEVICE__ int __nv_vabs2(int __a);
-__DEVICE__ int __nv_vabs4(int __a);
-__DEVICE__ int __nv_vabsdiffs2(int __a, int __b);
-__DEVICE__ int __nv_vabsdiffs4(int __a, int __b);
-__DEVICE__ int __nv_vabsdiffu2(int __a, int __b);
-__DEVICE__ int __nv_vabsdiffu4(int __a, int __b);
-__DEVICE__ int __nv_vabsss2(int __a);
-__DEVICE__ int __nv_vabsss4(int __a);
-__DEVICE__ int __nv_vadd2(int __a, int __b);
-__DEVICE__ int __nv_vadd4(int __a, int __b);
-__DEVICE__ int __nv_vaddss2(int __a, int __b);
-__DEVICE__ int __nv_vaddss4(int __a, int __b);
-__DEVICE__ int __nv_vaddus2(int __a, int __b);
-__DEVICE__ int __nv_vaddus4(int __a, int __b);
-__DEVICE__ int __nv_vavgs2(int __a, int __b);
-__DEVICE__ int __nv_vavgs4(int __a, int __b);
-__DEVICE__ int __nv_vavgu2(int __a, int __b);
-__DEVICE__ int __nv_vavgu4(int __a, int __b);
-__DEVICE__ int __nv_vcmpeq2(int __a, int __b);
-__DEVICE__ int __nv_vcmpeq4(int __a, int __b);
-__DEVICE__ int __nv_vcmpges2(int __a, int __b);
-__DEVICE__ int __nv_vcmpges4(int __a, int __b);
-__DEVICE__ int __nv_vcmpgeu2(int __a, int __b);
-__DEVICE__ int __nv_vcmpgeu4(int __a, int __b);
-__DEVICE__ int __nv_vcmpgts2(int __a, int __b);
-__DEVICE__ int __nv_vcmpgts4(int __a, int __b);
-__DEVICE__ int __nv_vcmpgtu2(int __a, int __b);
-__DEVICE__ int __nv_vcmpgtu4(int __a, int __b);
-__DEVICE__ int __nv_vcmples2(int __a, int __b);
-__DEVICE__ int __nv_vcmples4(int __a, int __b);
-__DEVICE__ int __nv_vcmpleu2(int __a, int __b);
-__DEVICE__ int __nv_vcmpleu4(int __a, int __b);
-__DEVICE__ int __nv_vcmplts2(int __a, int __b);
-__DEVICE__ int __nv_vcmplts4(int __a, int __b);
-__DEVICE__ int __nv_vcmpltu2(int __a, int __b);
-__DEVICE__ int __nv_vcmpltu4(int __a, int __b);
-__DEVICE__ int __nv_vcmpne2(int __a, int __b);
-__DEVICE__ int __nv_vcmpne4(int __a, int __b);
-__DEVICE__ int __nv_vhaddu2(int __a, int __b);
-__DEVICE__ int __nv_vhaddu4(int __a, int __b);
-__DEVICE__ int __nv_vmaxs2(int __a, int __b);
-__DEVICE__ int __nv_vmaxs4(int __a, int __b);
-__DEVICE__ int __nv_vmaxu2(int __a, int __b);
-__DEVICE__ int __nv_vmaxu4(int __a, int __b);
-__DEVICE__ int __nv_vmins2(int __a, int __b);
-__DEVICE__ int __nv_vmins4(int __a, int __b);
-__DEVICE__ int __nv_vminu2(int __a, int __b);
-__DEVICE__ int __nv_vminu4(int __a, int __b);
-__DEVICE__ int __nv_vneg2(int __a);
-__DEVICE__ int __nv_vneg4(int __a);
-__DEVICE__ int __nv_vnegss2(int __a);
-__DEVICE__ int __nv_vnegss4(int __a);
-__DEVICE__ int __nv_vsads2(int __a, int __b);
-__DEVICE__ int __nv_vsads4(int __a, int __b);
-__DEVICE__ int __nv_vsadu2(int __a, int __b);
-__DEVICE__ int __nv_vsadu4(int __a, int __b);
-__DEVICE__ int __nv_vseteq2(int __a, int __b);
-__DEVICE__ int __nv_vseteq4(int __a, int __b);
-__DEVICE__ int __nv_vsetges2(int __a, int __b);
-__DEVICE__ int __nv_vsetges4(int __a, int __b);
-__DEVICE__ int __nv_vsetgeu2(int __a, int __b);
-__DEVICE__ int __nv_vsetgeu4(int __a, int __b);
-__DEVICE__ int __nv_vsetgts2(int __a, int __b);
-__DEVICE__ int __nv_vsetgts4(int __a, int __b);
-__DEVICE__ int __nv_vsetgtu2(int __a, int __b);
-__DEVICE__ int __nv_vsetgtu4(int __a, int __b);
-__DEVICE__ int __nv_vsetles2(int __a, int __b);
-__DEVICE__ int __nv_vsetles4(int __a, int __b);
-__DEVICE__ int __nv_vsetleu2(int __a, int __b);
-__DEVICE__ int __nv_vsetleu4(int __a, int __b);
-__DEVICE__ int __nv_vsetlts2(int __a, int __b);
-__DEVICE__ int __nv_vsetlts4(int __a, int __b);
-__DEVICE__ int __nv_vsetltu2(int __a, int __b);
-__DEVICE__ int __nv_vsetltu4(int __a, int __b);
-__DEVICE__ int __nv_vsetne2(int __a, int __b);
-__DEVICE__ int __nv_vsetne4(int __a, int __b);
-__DEVICE__ int __nv_vsub2(int __a, int __b);
-__DEVICE__ int __nv_vsub4(int __a, int __b);
-__DEVICE__ int __nv_vsubss2(int __a, int __b);
-__DEVICE__ int __nv_vsubss4(int __a, int __b);
-__DEVICE__ int __nv_vsubus2(int __a, int __b);
-__DEVICE__ int __nv_vsubus4(int __a, int __b);
-#endif  // CUDA_VERSION
-__DEVICE__ double __nv_y0(double __a);
-__DEVICE__ float __nv_y0f(float __a);
-__DEVICE__ double __nv_y1(double __a);
-__DEVICE__ float __nv_y1f(float __a);
-__DEVICE__ float __nv_ynf(int __a, float __b);
-__DEVICE__ double __nv_yn(int __a, double __b);
-#if defined(__cplusplus)
-} // extern "C"
-#endif
-#endif // __CLANG_CUDA_LIBDEVICE_DECLARES_H__
diff --git a/linux-x86/lib64/clang/11.0.1/include/__clang_cuda_math_forward_declares.h b/linux-x86/lib64/clang/11.0.1/include/__clang_cuda_math_forward_declares.h
deleted file mode 100644
index 0afe4db..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/__clang_cuda_math_forward_declares.h
+++ /dev/null
@@ -1,308 +0,0 @@
-/*===- __clang_math_forward_declares.h - Prototypes of __device__ math fns --===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-#ifndef __CLANG__CUDA_MATH_FORWARD_DECLARES_H__
-#define __CLANG__CUDA_MATH_FORWARD_DECLARES_H__
-#ifndef __CUDA__
-#error "This file is for CUDA compilation only."
-#endif
-
-// This file forward-declares of some math functions we (or the CUDA headers)
-// will define later.  We need to do this, and do it before cmath is included,
-// because the standard library may have constexpr math functions.  In the
-// absence of a prior __device__ decl, those constexpr functions may become
-// implicitly host+device.  host+device functions can't be overloaded, so that
-// would preclude the use of our own __device__ overloads for these functions.
-
-#pragma push_macro("__DEVICE__")
-#ifdef _OPENMP
-#define __DEVICE__ static __inline__ __attribute__((always_inline))
-#else
-#define __DEVICE__                                                             \
-  static __inline__ __attribute__((always_inline)) __attribute__((device))
-#endif
-
-// For C++ 17 we need to include noexcept attribute to be compatible
-// with the header-defined version. This may be removed once
-// variant is supported.
-#if defined(_OPENMP) && defined(__cplusplus) && __cplusplus >= 201703L
-#define __NOEXCEPT noexcept
-#else
-#define __NOEXCEPT
-#endif
-
-#if !(defined(_OPENMP) && defined(__cplusplus))
-__DEVICE__ long abs(long);
-__DEVICE__ long long abs(long long);
-__DEVICE__ double abs(double);
-__DEVICE__ float abs(float);
-#endif
-// While providing the CUDA declarations and definitions for math functions,
-// we may manually define additional functions.
-// TODO: Once variant is supported the additional functions will have
-// to be removed.
-#if defined(_OPENMP) && defined(__cplusplus)
-__DEVICE__ const double abs(const double);
-__DEVICE__ const float abs(const float);
-#endif
-__DEVICE__ int abs(int) __NOEXCEPT;
-__DEVICE__ double acos(double);
-__DEVICE__ float acos(float);
-__DEVICE__ double acosh(double);
-__DEVICE__ float acosh(float);
-__DEVICE__ double asin(double);
-__DEVICE__ float asin(float);
-__DEVICE__ double asinh(double);
-__DEVICE__ float asinh(float);
-__DEVICE__ double atan2(double, double);
-__DEVICE__ float atan2(float, float);
-__DEVICE__ double atan(double);
-__DEVICE__ float atan(float);
-__DEVICE__ double atanh(double);
-__DEVICE__ float atanh(float);
-__DEVICE__ double cbrt(double);
-__DEVICE__ float cbrt(float);
-__DEVICE__ double ceil(double);
-__DEVICE__ float ceil(float);
-__DEVICE__ double copysign(double, double);
-__DEVICE__ float copysign(float, float);
-__DEVICE__ double cos(double);
-__DEVICE__ float cos(float);
-__DEVICE__ double cosh(double);
-__DEVICE__ float cosh(float);
-__DEVICE__ double erfc(double);
-__DEVICE__ float erfc(float);
-__DEVICE__ double erf(double);
-__DEVICE__ float erf(float);
-__DEVICE__ double exp2(double);
-__DEVICE__ float exp2(float);
-__DEVICE__ double exp(double);
-__DEVICE__ float exp(float);
-__DEVICE__ double expm1(double);
-__DEVICE__ float expm1(float);
-__DEVICE__ double fabs(double) __NOEXCEPT;
-__DEVICE__ float fabs(float) __NOEXCEPT;
-__DEVICE__ double fdim(double, double);
-__DEVICE__ float fdim(float, float);
-__DEVICE__ double floor(double);
-__DEVICE__ float floor(float);
-__DEVICE__ double fma(double, double, double);
-__DEVICE__ float fma(float, float, float);
-__DEVICE__ double fmax(double, double);
-__DEVICE__ float fmax(float, float);
-__DEVICE__ double fmin(double, double);
-__DEVICE__ float fmin(float, float);
-__DEVICE__ double fmod(double, double);
-__DEVICE__ float fmod(float, float);
-__DEVICE__ int fpclassify(double);
-__DEVICE__ int fpclassify(float);
-__DEVICE__ double frexp(double, int *);
-__DEVICE__ float frexp(float, int *);
-__DEVICE__ double hypot(double, double);
-__DEVICE__ float hypot(float, float);
-__DEVICE__ int ilogb(double);
-__DEVICE__ int ilogb(float);
-#ifdef _MSC_VER
-__DEVICE__ bool isfinite(long double);
-#endif
-__DEVICE__ bool isfinite(double);
-__DEVICE__ bool isfinite(float);
-__DEVICE__ bool isgreater(double, double);
-__DEVICE__ bool isgreaterequal(double, double);
-__DEVICE__ bool isgreaterequal(float, float);
-__DEVICE__ bool isgreater(float, float);
-#ifdef _MSC_VER
-__DEVICE__ bool isinf(long double);
-#endif
-__DEVICE__ bool isinf(double);
-__DEVICE__ bool isinf(float);
-__DEVICE__ bool isless(double, double);
-__DEVICE__ bool islessequal(double, double);
-__DEVICE__ bool islessequal(float, float);
-__DEVICE__ bool isless(float, float);
-__DEVICE__ bool islessgreater(double, double);
-__DEVICE__ bool islessgreater(float, float);
-#ifdef _MSC_VER
-__DEVICE__ bool isnan(long double);
-#endif
-__DEVICE__ bool isnan(double);
-__DEVICE__ bool isnan(float);
-__DEVICE__ bool isnormal(double);
-__DEVICE__ bool isnormal(float);
-__DEVICE__ bool isunordered(double, double);
-__DEVICE__ bool isunordered(float, float);
-__DEVICE__ long labs(long) __NOEXCEPT;
-__DEVICE__ double ldexp(double, int);
-__DEVICE__ float ldexp(float, int);
-__DEVICE__ double lgamma(double);
-__DEVICE__ float lgamma(float);
-__DEVICE__ long long llabs(long long) __NOEXCEPT;
-__DEVICE__ long long llrint(double);
-__DEVICE__ long long llrint(float);
-__DEVICE__ double log10(double);
-__DEVICE__ float log10(float);
-__DEVICE__ double log1p(double);
-__DEVICE__ float log1p(float);
-__DEVICE__ double log2(double);
-__DEVICE__ float log2(float);
-__DEVICE__ double logb(double);
-__DEVICE__ float logb(float);
-#if defined(_OPENMP) && defined(__cplusplus)
-__DEVICE__ long double log(long double);
-#endif
-__DEVICE__ double log(double);
-__DEVICE__ float log(float);
-__DEVICE__ long lrint(double);
-__DEVICE__ long lrint(float);
-__DEVICE__ long lround(double);
-__DEVICE__ long lround(float);
-__DEVICE__ long long llround(float); // No llround(double).
-__DEVICE__ double modf(double, double *);
-__DEVICE__ float modf(float, float *);
-__DEVICE__ double nan(const char *);
-__DEVICE__ float nanf(const char *);
-__DEVICE__ double nearbyint(double);
-__DEVICE__ float nearbyint(float);
-__DEVICE__ double nextafter(double, double);
-__DEVICE__ float nextafter(float, float);
-__DEVICE__ double pow(double, double);
-__DEVICE__ double pow(double, int);
-__DEVICE__ float pow(float, float);
-__DEVICE__ float pow(float, int);
-__DEVICE__ double remainder(double, double);
-__DEVICE__ float remainder(float, float);
-__DEVICE__ double remquo(double, double, int *);
-__DEVICE__ float remquo(float, float, int *);
-__DEVICE__ double rint(double);
-__DEVICE__ float rint(float);
-__DEVICE__ double round(double);
-__DEVICE__ float round(float);
-__DEVICE__ double scalbln(double, long);
-__DEVICE__ float scalbln(float, long);
-__DEVICE__ double scalbn(double, int);
-__DEVICE__ float scalbn(float, int);
-__DEVICE__ bool signbit(double);
-__DEVICE__ bool signbit(float);
-__DEVICE__ double sin(double);
-__DEVICE__ float sin(float);
-__DEVICE__ double sinh(double);
-__DEVICE__ float sinh(float);
-__DEVICE__ double sqrt(double);
-__DEVICE__ float sqrt(float);
-__DEVICE__ double tan(double);
-__DEVICE__ float tan(float);
-__DEVICE__ double tanh(double);
-__DEVICE__ float tanh(float);
-__DEVICE__ double tgamma(double);
-__DEVICE__ float tgamma(float);
-__DEVICE__ double trunc(double);
-__DEVICE__ float trunc(float);
-
-// Notably missing above is nexttoward, which we don't define on
-// the device side because libdevice doesn't give us an implementation, and we
-// don't want to be in the business of writing one ourselves.
-
-// We need to define these overloads in exactly the namespace our standard
-// library uses (including the right inline namespace), otherwise they won't be
-// picked up by other functions in the standard library (e.g. functions in
-// <complex>).  Thus the ugliness below.
-#ifdef _LIBCPP_BEGIN_NAMESPACE_STD
-_LIBCPP_BEGIN_NAMESPACE_STD
-#else
-namespace std {
-#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
-_GLIBCXX_BEGIN_NAMESPACE_VERSION
-#endif
-#endif
-
-using ::abs;
-using ::acos;
-using ::acosh;
-using ::asin;
-using ::asinh;
-using ::atan;
-using ::atan2;
-using ::atanh;
-using ::cbrt;
-using ::ceil;
-using ::copysign;
-using ::cos;
-using ::cosh;
-using ::erf;
-using ::erfc;
-using ::exp;
-using ::exp2;
-using ::expm1;
-using ::fabs;
-using ::fdim;
-using ::floor;
-using ::fma;
-using ::fmax;
-using ::fmin;
-using ::fmod;
-using ::fpclassify;
-using ::frexp;
-using ::hypot;
-using ::ilogb;
-using ::isfinite;
-using ::isgreater;
-using ::isgreaterequal;
-using ::isinf;
-using ::isless;
-using ::islessequal;
-using ::islessgreater;
-using ::isnan;
-using ::isnormal;
-using ::isunordered;
-using ::labs;
-using ::ldexp;
-using ::lgamma;
-using ::llabs;
-using ::llrint;
-using ::log;
-using ::log10;
-using ::log1p;
-using ::log2;
-using ::logb;
-using ::lrint;
-using ::lround;
-using ::llround;
-using ::modf;
-using ::nan;
-using ::nanf;
-using ::nearbyint;
-using ::nextafter;
-using ::pow;
-using ::remainder;
-using ::remquo;
-using ::rint;
-using ::round;
-using ::scalbln;
-using ::scalbn;
-using ::signbit;
-using ::sin;
-using ::sinh;
-using ::sqrt;
-using ::tan;
-using ::tanh;
-using ::tgamma;
-using ::trunc;
-
-#ifdef _LIBCPP_END_NAMESPACE_STD
-_LIBCPP_END_NAMESPACE_STD
-#else
-#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
-_GLIBCXX_END_NAMESPACE_VERSION
-#endif
-} // namespace std
-#endif
-
-#undef __NOEXCEPT
-#pragma pop_macro("__DEVICE__")
-
-#endif
diff --git a/linux-x86/lib64/clang/11.0.1/include/__clang_cuda_runtime_wrapper.h b/linux-x86/lib64/clang/11.0.1/include/__clang_cuda_runtime_wrapper.h
deleted file mode 100644
index e91de3c..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/__clang_cuda_runtime_wrapper.h
+++ /dev/null
@@ -1,426 +0,0 @@
-/*===---- __clang_cuda_runtime_wrapper.h - CUDA runtime support -------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-/*
- * WARNING: This header is intended to be directly -include'd by
- * the compiler and is not supposed to be included by users.
- *
- * CUDA headers are implemented in a way that currently makes it
- * impossible for user code to #include directly when compiling with
- * Clang. They present different view of CUDA-supplied functions
- * depending on where in NVCC's compilation pipeline the headers are
- * included. Neither of these modes provides function definitions with
- * correct attributes, so we use preprocessor to force the headers
- * into a form that Clang can use.
- *
- * Similarly to NVCC which -include's cuda_runtime.h, Clang -include's
- * this file during every CUDA compilation.
- */
-
-#ifndef __CLANG_CUDA_RUNTIME_WRAPPER_H__
-#define __CLANG_CUDA_RUNTIME_WRAPPER_H__
-
-#if defined(__CUDA__) && defined(__clang__)
-
-// Include some forward declares that must come before cmath.
-#include <__clang_cuda_math_forward_declares.h>
-
-// Include some standard headers to avoid CUDA headers including them
-// while some required macros (like __THROW) are in a weird state.
-#include <cmath>
-#include <cstdlib>
-#include <stdlib.h>
-
-// Preserve common macros that will be changed below by us or by CUDA
-// headers.
-#pragma push_macro("__THROW")
-#pragma push_macro("__CUDA_ARCH__")
-
-// WARNING: Preprocessor hacks below are based on specific details of
-// CUDA-7.x headers and are not expected to work with any other
-// version of CUDA headers.
-#include "cuda.h"
-#if !defined(CUDA_VERSION)
-#error "cuda.h did not define CUDA_VERSION"
-#elif CUDA_VERSION < 7000
-#error "Unsupported CUDA version!"
-#endif
-
-#pragma push_macro("__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__")
-#if CUDA_VERSION >= 10000
-#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
-#endif
-
-// Make largest subset of device functions available during host
-// compilation -- SM_35 for the time being.
-#ifndef __CUDA_ARCH__
-#define __CUDA_ARCH__ 350
-#endif
-
-#include "__clang_cuda_builtin_vars.h"
-
-// No need for device_launch_parameters.h as __clang_cuda_builtin_vars.h above
-// has taken care of builtin variables declared in the file.
-#define __DEVICE_LAUNCH_PARAMETERS_H__
-
-// {math,device}_functions.h only have declarations of the
-// functions. We don't need them as we're going to pull in their
-// definitions from .hpp files.
-#define __DEVICE_FUNCTIONS_H__
-#define __MATH_FUNCTIONS_H__
-#define __COMMON_FUNCTIONS_H__
-// device_functions_decls is replaced by __clang_cuda_device_functions.h
-// included below.
-#define __DEVICE_FUNCTIONS_DECLS_H__
-
-#undef __CUDACC__
-#if CUDA_VERSION < 9000
-#define __CUDABE__
-#else
-#define __CUDA_LIBDEVICE__
-#endif
-// Disables definitions of device-side runtime support stubs in
-// cuda_device_runtime_api.h
-#include "driver_types.h"
-#include "host_config.h"
-#include "host_defines.h"
-
-// Temporarily replace "nv_weak" with weak, so __attribute__((nv_weak)) in
-// cuda_device_runtime_api.h ends up being __attribute__((weak)) which is the
-// functional equivalent of what we need.
-#pragma push_macro("nv_weak")
-#define nv_weak weak
-#undef __CUDABE__
-#undef __CUDA_LIBDEVICE__
-#define __CUDACC__
-#include "cuda_runtime.h"
-
-#pragma pop_macro("nv_weak")
-#undef __CUDACC__
-#define __CUDABE__
-
-// CUDA headers use __nvvm_memcpy and __nvvm_memset which Clang does
-// not have at the moment. Emulate them with a builtin memcpy/memset.
-#define __nvvm_memcpy(s, d, n, a) __builtin_memcpy(s, d, n)
-#define __nvvm_memset(d, c, n, a) __builtin_memset(d, c, n)
-
-#if CUDA_VERSION < 9000
-#include "crt/device_runtime.h"
-#endif
-#include "crt/host_runtime.h"
-// device_runtime.h defines __cxa_* macros that will conflict with
-// cxxabi.h.
-// FIXME: redefine these as __device__ functions.
-#undef __cxa_vec_ctor
-#undef __cxa_vec_cctor
-#undef __cxa_vec_dtor
-#undef __cxa_vec_new
-#undef __cxa_vec_new2
-#undef __cxa_vec_new3
-#undef __cxa_vec_delete2
-#undef __cxa_vec_delete
-#undef __cxa_vec_delete3
-#undef __cxa_pure_virtual
-
-// math_functions.hpp expects this host function be defined on MacOS, but it
-// ends up not being there because of the games we play here.  Just define it
-// ourselves; it's simple enough.
-#ifdef __APPLE__
-inline __host__ double __signbitd(double x) {
-  return std::signbit(x);
-}
-#endif
-
-// CUDA 9.1 no longer provides declarations for libdevice functions, so we need
-// to provide our own.
-#include <__clang_cuda_libdevice_declares.h>
-
-// Wrappers for many device-side standard library functions became compiler
-// builtins in CUDA-9 and have been removed from the CUDA headers. Clang now
-// provides its own implementation of the wrappers.
-#if CUDA_VERSION >= 9000
-#include <__clang_cuda_device_functions.h>
-#endif
-
-// __THROW is redefined to be empty by device_functions_decls.h in CUDA. Clang's
-// counterpart does not do it, so we need to make it empty here to keep
-// following CUDA includes happy.
-#undef __THROW
-#define __THROW
-
-// CUDA 8.0.41 relies on __USE_FAST_MATH__ and __CUDA_PREC_DIV's values.
-// Previous versions used to check whether they are defined or not.
-// CU_DEVICE_INVALID macro is only defined in 8.0.41, so we use it
-// here to detect the switch.
-
-#if defined(CU_DEVICE_INVALID)
-#if !defined(__USE_FAST_MATH__)
-#define __USE_FAST_MATH__ 0
-#endif
-
-#if !defined(__CUDA_PREC_DIV)
-#define __CUDA_PREC_DIV 0
-#endif
-#endif
-
-// Temporarily poison __host__ macro to ensure it's not used by any of
-// the headers we're about to include.
-#pragma push_macro("__host__")
-#define __host__ UNEXPECTED_HOST_ATTRIBUTE
-
-// device_functions.hpp and math_functions*.hpp use 'static
-// __forceinline__' (with no __device__) for definitions of device
-// functions. Temporarily redefine __forceinline__ to include
-// __device__.
-#pragma push_macro("__forceinline__")
-#define __forceinline__ __device__ __inline__ __attribute__((always_inline))
-#if CUDA_VERSION < 9000
-#include "device_functions.hpp"
-#endif
-
-// math_function.hpp uses the __USE_FAST_MATH__ macro to determine whether we
-// get the slow-but-accurate or fast-but-inaccurate versions of functions like
-// sin and exp.  This is controlled in clang by -fcuda-approx-transcendentals.
-//
-// device_functions.hpp uses __USE_FAST_MATH__ for a different purpose (fast vs.
-// slow divides), so we need to scope our define carefully here.
-#pragma push_macro("__USE_FAST_MATH__")
-#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__)
-#define __USE_FAST_MATH__ 1
-#endif
-
-#if CUDA_VERSION >= 9000
-// CUDA-9.2 needs host-side memcpy for some host functions in
-// device_functions.hpp
-#if CUDA_VERSION >= 9020
-#include <string.h>
-#endif
-#include "crt/math_functions.hpp"
-#else
-#include "math_functions.hpp"
-#endif
-
-#pragma pop_macro("__USE_FAST_MATH__")
-
-#if CUDA_VERSION < 9000
-#include "math_functions_dbl_ptx3.hpp"
-#endif
-#pragma pop_macro("__forceinline__")
-
-// Pull in host-only functions that are only available when neither
-// __CUDACC__ nor __CUDABE__ are defined.
-#undef __MATH_FUNCTIONS_HPP__
-#undef __CUDABE__
-#if CUDA_VERSION < 9000
-#include "math_functions.hpp"
-#endif
-// Alas, additional overloads for these functions are hard to get to.
-// Considering that we only need these overloads for a few functions,
-// we can provide them here.
-static inline float rsqrt(float __a) { return rsqrtf(__a); }
-static inline float rcbrt(float __a) { return rcbrtf(__a); }
-static inline float sinpi(float __a) { return sinpif(__a); }
-static inline float cospi(float __a) { return cospif(__a); }
-static inline void sincospi(float __a, float *__b, float *__c) {
-  return sincospif(__a, __b, __c);
-}
-static inline float erfcinv(float __a) { return erfcinvf(__a); }
-static inline float normcdfinv(float __a) { return normcdfinvf(__a); }
-static inline float normcdf(float __a) { return normcdff(__a); }
-static inline float erfcx(float __a) { return erfcxf(__a); }
-
-#if CUDA_VERSION < 9000
-// For some reason single-argument variant is not always declared by
-// CUDA headers. Alas, device_functions.hpp included below needs it.
-static inline __device__ void __brkpt(int __c) { __brkpt(); }
-#endif
-
-// Now include *.hpp with definitions of various GPU functions.  Alas,
-// a lot of thins get declared/defined with __host__ attribute which
-// we don't want and we have to define it out. We also have to include
-// {device,math}_functions.hpp again in order to extract the other
-// branch of #if/else inside.
-#define __host__
-#undef __CUDABE__
-#define __CUDACC__
-#if CUDA_VERSION >= 9000
-// Some atomic functions became compiler builtins in CUDA-9 , so we need their
-// declarations.
-#include "device_atomic_functions.h"
-#endif
-#undef __DEVICE_FUNCTIONS_HPP__
-#include "device_atomic_functions.hpp"
-#if CUDA_VERSION >= 9000
-#include "crt/device_functions.hpp"
-#include "crt/device_double_functions.hpp"
-#else
-#include "device_functions.hpp"
-#define __CUDABE__
-#include "device_double_functions.h"
-#undef __CUDABE__
-#endif
-#include "sm_20_atomic_functions.hpp"
-#include "sm_20_intrinsics.hpp"
-#include "sm_32_atomic_functions.hpp"
-
-// Don't include sm_30_intrinsics.h and sm_32_intrinsics.h.  These define the
-// __shfl and __ldg intrinsics using inline (volatile) asm, but we want to
-// define them using builtins so that the optimizer can reason about and across
-// these instructions.  In particular, using intrinsics for ldg gets us the
-// [addr+imm] addressing mode, which, although it doesn't actually exist in the
-// hardware, seems to generate faster machine code because ptxas can more easily
-// reason about our code.
-
-#if CUDA_VERSION >= 8000
-#pragma push_macro("__CUDA_ARCH__")
-#undef __CUDA_ARCH__
-#include "sm_60_atomic_functions.hpp"
-#include "sm_61_intrinsics.hpp"
-#pragma pop_macro("__CUDA_ARCH__")
-#endif
-
-#undef __MATH_FUNCTIONS_HPP__
-
-// math_functions.hpp defines ::signbit as a __host__ __device__ function.  This
-// conflicts with libstdc++'s constexpr ::signbit, so we have to rename
-// math_function.hpp's ::signbit.  It's guarded by #undef signbit, but that's
-// conditional on __GNUC__.  :)
-#pragma push_macro("signbit")
-#pragma push_macro("__GNUC__")
-#undef __GNUC__
-#define signbit __ignored_cuda_signbit
-
-// CUDA-9 omits device-side definitions of some math functions if it sees
-// include guard from math.h wrapper from libstdc++. We have to undo the header
-// guard temporarily to get the definitions we need.
-#pragma push_macro("_GLIBCXX_MATH_H")
-#pragma push_macro("_LIBCPP_VERSION")
-#if CUDA_VERSION >= 9000
-#undef _GLIBCXX_MATH_H
-// We also need to undo another guard that checks for libc++ 3.8+
-#ifdef _LIBCPP_VERSION
-#define _LIBCPP_VERSION 3700
-#endif
-#endif
-
-#if CUDA_VERSION >= 9000
-#include "crt/math_functions.hpp"
-#else
-#include "math_functions.hpp"
-#endif
-#pragma pop_macro("_GLIBCXX_MATH_H")
-#pragma pop_macro("_LIBCPP_VERSION")
-#pragma pop_macro("__GNUC__")
-#pragma pop_macro("signbit")
-
-#pragma pop_macro("__host__")
-
-#include "texture_indirect_functions.h"
-
-// Restore state of __CUDA_ARCH__ and __THROW we had on entry.
-#pragma pop_macro("__CUDA_ARCH__")
-#pragma pop_macro("__THROW")
-
-// Set up compiler macros expected to be seen during compilation.
-#undef __CUDABE__
-#define __CUDACC__
-
-extern "C" {
-// Device-side CUDA system calls.
-// http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability/index.html#system-calls
-// We need these declarations and wrappers for device-side
-// malloc/free/printf calls to work without relying on
-// -fcuda-disable-target-call-checks option.
-__device__ int vprintf(const char *, const char *);
-__device__ void free(void *) __attribute((nothrow));
-__device__ void *malloc(size_t) __attribute((nothrow)) __attribute__((malloc));
-__device__ void __assertfail(const char *__message, const char *__file,
-                             unsigned __line, const char *__function,
-                             size_t __charSize) __attribute__((noreturn));
-
-// In order for standard assert() macro on linux to work we need to
-// provide device-side __assert_fail()
-__device__ static inline void __assert_fail(const char *__message,
-                                            const char *__file, unsigned __line,
-                                            const char *__function) {
-  __assertfail(__message, __file, __line, __function, sizeof(char));
-}
-
-// Clang will convert printf into vprintf, but we still need
-// device-side declaration for it.
-__device__ int printf(const char *, ...);
-} // extern "C"
-
-// We also need device-side std::malloc and std::free.
-namespace std {
-__device__ static inline void free(void *__ptr) { ::free(__ptr); }
-__device__ static inline void *malloc(size_t __size) {
-  return ::malloc(__size);
-}
-} // namespace std
-
-// Out-of-line implementations from __clang_cuda_builtin_vars.h.  These need to
-// come after we've pulled in the definition of uint3 and dim3.
-
-__device__ inline __cuda_builtin_threadIdx_t::operator uint3() const {
-  uint3 ret;
-  ret.x = x;
-  ret.y = y;
-  ret.z = z;
-  return ret;
-}
-
-__device__ inline __cuda_builtin_blockIdx_t::operator uint3() const {
-  uint3 ret;
-  ret.x = x;
-  ret.y = y;
-  ret.z = z;
-  return ret;
-}
-
-__device__ inline __cuda_builtin_blockDim_t::operator dim3() const {
-  return dim3(x, y, z);
-}
-
-__device__ inline __cuda_builtin_gridDim_t::operator dim3() const {
-  return dim3(x, y, z);
-}
-
-#include <__clang_cuda_cmath.h>
-#include <__clang_cuda_intrinsics.h>
-#include <__clang_cuda_complex_builtins.h>
-
-// curand_mtgp32_kernel helpfully redeclares blockDim and threadIdx in host
-// mode, giving them their "proper" types of dim3 and uint3.  This is
-// incompatible with the types we give in __clang_cuda_builtin_vars.h.  As as
-// hack, force-include the header (nvcc doesn't include it by default) but
-// redefine dim3 and uint3 to our builtin types.  (Thankfully dim3 and uint3 are
-// only used here for the redeclarations of blockDim and threadIdx.)
-#pragma push_macro("dim3")
-#pragma push_macro("uint3")
-#define dim3 __cuda_builtin_blockDim_t
-#define uint3 __cuda_builtin_threadIdx_t
-#include "curand_mtgp32_kernel.h"
-#pragma pop_macro("dim3")
-#pragma pop_macro("uint3")
-#pragma pop_macro("__USE_FAST_MATH__")
-#pragma pop_macro("__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__")
-
-// CUDA runtime uses this undocumented function to access kernel launch
-// configuration. The declaration is in crt/device_functions.h but that file
-// includes a lot of other stuff we don't want. Instead, we'll provide our own
-// declaration for it here.
-#if CUDA_VERSION >= 9020
-extern "C" unsigned __cudaPushCallConfiguration(dim3 gridDim, dim3 blockDim,
-                                                size_t sharedMem = 0,
-                                                void *stream = 0);
-#endif
-
-#endif // __CUDA__
-#endif // __CLANG_CUDA_RUNTIME_WRAPPER_H__
diff --git a/linux-x86/lib64/clang/11.0.1/include/altivec.h b/linux-x86/lib64/clang/11.0.1/include/altivec.h
deleted file mode 100644
index 7e231a2..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/altivec.h
+++ /dev/null
@@ -1,16766 +0,0 @@
-/*===---- altivec.h - Standard header for type generic math ---------------===*\
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
-\*===----------------------------------------------------------------------===*/
-
-#ifndef __ALTIVEC_H
-#define __ALTIVEC_H
-
-#ifndef __ALTIVEC__
-#error "AltiVec support not enabled"
-#endif
-
-/* Constants for mapping CR6 bits to predicate result. */
-
-#define __CR6_EQ 0
-#define __CR6_EQ_REV 1
-#define __CR6_LT 2
-#define __CR6_LT_REV 3
-
-/* Constants for vec_test_data_class */
-#define __VEC_CLASS_FP_SUBNORMAL_N (1 << 0)
-#define __VEC_CLASS_FP_SUBNORMAL_P (1 << 1)
-#define __VEC_CLASS_FP_SUBNORMAL (__VEC_CLASS_FP_SUBNORMAL_P | \
-                                  __VEC_CLASS_FP_SUBNORMAL_N)
-#define __VEC_CLASS_FP_ZERO_N (1<<2)
-#define __VEC_CLASS_FP_ZERO_P (1<<3)
-#define __VEC_CLASS_FP_ZERO (__VEC_CLASS_FP_ZERO_P           | \
-                             __VEC_CLASS_FP_ZERO_N)
-#define __VEC_CLASS_FP_INFINITY_N (1<<4)
-#define __VEC_CLASS_FP_INFINITY_P (1<<5)
-#define __VEC_CLASS_FP_INFINITY (__VEC_CLASS_FP_INFINITY_P   | \
-                                 __VEC_CLASS_FP_INFINITY_N)
-#define __VEC_CLASS_FP_NAN (1<<6)
-#define __VEC_CLASS_FP_NOT_NORMAL (__VEC_CLASS_FP_NAN        | \
-                                   __VEC_CLASS_FP_SUBNORMAL  | \
-                                   __VEC_CLASS_FP_ZERO       | \
-                                   __VEC_CLASS_FP_INFINITY)
-
-#define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__))
-
-#ifdef __POWER9_VECTOR__
-#include <stddef.h>
-#endif
-
-static __inline__ vector signed char __ATTRS_o_ai vec_perm(
-    vector signed char __a, vector signed char __b, vector unsigned char __c);
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_perm(vector unsigned char __a, vector unsigned char __b,
-         vector unsigned char __c);
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_perm(vector bool char __a, vector bool char __b, vector unsigned char __c);
-
-static __inline__ vector short __ATTRS_o_ai vec_perm(vector signed short __a,
-                                                     vector signed short __b,
-                                                     vector unsigned char __c);
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_perm(vector unsigned short __a, vector unsigned short __b,
-         vector unsigned char __c);
-
-static __inline__ vector bool short __ATTRS_o_ai vec_perm(
-    vector bool short __a, vector bool short __b, vector unsigned char __c);
-
-static __inline__ vector pixel __ATTRS_o_ai vec_perm(vector pixel __a,
-                                                     vector pixel __b,
-                                                     vector unsigned char __c);
-
-static __inline__ vector int __ATTRS_o_ai vec_perm(vector signed int __a,
-                                                   vector signed int __b,
-                                                   vector unsigned char __c);
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_perm(
-    vector unsigned int __a, vector unsigned int __b, vector unsigned char __c);
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_perm(vector bool int __a, vector bool int __b, vector unsigned char __c);
-
-static __inline__ vector float __ATTRS_o_ai vec_perm(vector float __a,
-                                                     vector float __b,
-                                                     vector unsigned char __c);
-
-#ifdef __VSX__
-static __inline__ vector long long __ATTRS_o_ai
-vec_perm(vector signed long long __a, vector signed long long __b,
-         vector unsigned char __c);
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_perm(vector unsigned long long __a, vector unsigned long long __b,
-         vector unsigned char __c);
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_perm(vector bool long long __a, vector bool long long __b,
-         vector unsigned char __c);
-
-static __inline__ vector double __ATTRS_o_ai vec_perm(vector double __a,
-                                                      vector double __b,
-                                                      vector unsigned char __c);
-#endif
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xor(vector unsigned char __a, vector unsigned char __b);
-
-/* vec_abs */
-
-#define __builtin_altivec_abs_v16qi vec_abs
-#define __builtin_altivec_abs_v8hi vec_abs
-#define __builtin_altivec_abs_v4si vec_abs
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_abs(vector signed char __a) {
-  return __builtin_altivec_vmaxsb(__a, -__a);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_abs(vector signed short __a) {
-  return __builtin_altivec_vmaxsh(__a, -__a);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_abs(vector signed int __a) {
-  return __builtin_altivec_vmaxsw(__a, -__a);
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_abs(vector signed long long __a) {
-  return __builtin_altivec_vmaxsd(__a, -__a);
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_abs(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvabssp(__a);
-#else
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)(0x7FFFFFFF);
-  return (vector float)__res;
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_abs(vector double __a) {
-  return __builtin_vsx_xvabsdp(__a);
-}
-#endif
-
-/* vec_abss */
-#define __builtin_altivec_abss_v16qi vec_abss
-#define __builtin_altivec_abss_v8hi vec_abss
-#define __builtin_altivec_abss_v4si vec_abss
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_abss(vector signed char __a) {
-  return __builtin_altivec_vmaxsb(
-      __a, __builtin_altivec_vsubsbs((vector signed char)(0), __a));
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_abss(vector signed short __a) {
-  return __builtin_altivec_vmaxsh(
-      __a, __builtin_altivec_vsubshs((vector signed short)(0), __a));
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_abss(vector signed int __a) {
-  return __builtin_altivec_vmaxsw(
-      __a, __builtin_altivec_vsubsws((vector signed int)(0), __a));
-}
-
-/* vec_absd */
-#if defined(__POWER9_VECTOR__)
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_absd(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vabsdub(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_absd(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vabsduh(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_absd(vector unsigned int __a,  vector unsigned int __b) {
-  return __builtin_altivec_vabsduw(__a, __b);
-}
-
-#endif /* End __POWER9_VECTOR__ */
-
-/* vec_add */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_add(vector signed char __a, vector signed char __b) {
-  return __a + __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_add(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a + __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_add(vector signed char __a, vector bool char __b) {
-  return __a + (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_add(vector unsigned char __a, vector unsigned char __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_add(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a + __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_add(vector unsigned char __a, vector bool char __b) {
-  return __a + (vector unsigned char)__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_add(vector short __a,
-                                                    vector short __b) {
-  return __a + __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_add(vector bool short __a,
-                                                    vector short __b) {
-  return (vector short)__a + __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_add(vector short __a,
-                                                    vector bool short __b) {
-  return __a + (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_add(vector unsigned short __a, vector unsigned short __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_add(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a + __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_add(vector unsigned short __a, vector bool short __b) {
-  return __a + (vector unsigned short)__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_add(vector int __a,
-                                                  vector int __b) {
-  return __a + __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_add(vector bool int __a,
-                                                  vector int __b) {
-  return (vector int)__a + __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_add(vector int __a,
-                                                  vector bool int __b) {
-  return __a + (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_add(vector unsigned int __a, vector unsigned int __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_add(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a + __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_add(vector unsigned int __a, vector bool int __b) {
-  return __a + (vector unsigned int)__b;
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_add(vector signed long long __a, vector signed long long __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_add(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a + __b;
-}
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_add(vector signed __int128 __a, vector signed __int128 __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_add(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __a + __b;
-}
-#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-
-static __inline__ vector float __ATTRS_o_ai vec_add(vector float __a,
-                                                    vector float __b) {
-  return __a + __b;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_add(vector double __a,
-                                                     vector double __b) {
-  return __a + __b;
-}
-#endif // __VSX__
-
-/* vec_adde */
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_adde(vector signed __int128 __a, vector signed __int128 __b,
-         vector signed __int128 __c) {
-  return __builtin_altivec_vaddeuqm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_adde(vector unsigned __int128 __a, vector unsigned __int128 __b,
-         vector unsigned __int128 __c) {
-  return __builtin_altivec_vaddeuqm(__a, __b, __c);
-}
-#endif
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_adde(vector signed int __a, vector signed int __b,
-         vector signed int __c) {
-  vector signed int __mask = {1, 1, 1, 1};
-  vector signed int __carry = __c & __mask;
-  return vec_add(vec_add(__a, __b), __carry);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_adde(vector unsigned int __a, vector unsigned int __b,
-         vector unsigned int __c) {
-  vector unsigned int __mask = {1, 1, 1, 1};
-  vector unsigned int __carry = __c & __mask;
-  return vec_add(vec_add(__a, __b), __carry);
-}
-
-/* vec_addec */
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_addec(vector signed __int128 __a, vector signed __int128 __b,
-          vector signed __int128 __c) {
-  return __builtin_altivec_vaddecuq(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_addec(vector unsigned __int128 __a, vector unsigned __int128 __b,
-          vector unsigned __int128 __c) {
-  return __builtin_altivec_vaddecuq(__a, __b, __c);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_addec(vector signed int __a, vector signed int __b,
-          vector signed int __c) {
-
-  signed int __result[4];
-  for (int i = 0; i < 4; i++) {
-    unsigned int __tempa = (unsigned int) __a[i];
-    unsigned int __tempb = (unsigned int) __b[i];
-    unsigned int __tempc = (unsigned int) __c[i];
-    __tempc = __tempc & 0x00000001;
-    unsigned long long __longa = (unsigned long long) __tempa;
-    unsigned long long __longb = (unsigned long long) __tempb;
-    unsigned long long __longc = (unsigned long long) __tempc;
-    unsigned long long __sum = __longa + __longb + __longc;
-    unsigned long long __res = (__sum >> 32) & 0x01;
-    unsigned long long __tempres = (unsigned int) __res;
-    __result[i] = (signed int) __tempres;
-  }
-
-  vector signed int ret = { __result[0], __result[1], __result[2], __result[3] };
-  return ret;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_addec(vector unsigned int __a, vector unsigned int __b,
-          vector unsigned int __c) {
-
-  unsigned int __result[4];
-  for (int i = 0; i < 4; i++) {
-    unsigned int __tempc = __c[i] & 1;
-    unsigned long long __longa = (unsigned long long) __a[i];
-    unsigned long long __longb = (unsigned long long) __b[i];
-    unsigned long long __longc = (unsigned long long) __tempc;
-    unsigned long long __sum = __longa + __longb + __longc;
-    unsigned long long __res = (__sum >> 32) & 0x01;
-    unsigned long long __tempres = (unsigned int) __res;
-    __result[i] = (signed int) __tempres;
-  }
-
-  vector unsigned int ret = { __result[0], __result[1], __result[2], __result[3] };
-  return ret;
-}
-
-#endif
-
-/* vec_vaddubm */
-
-#define __builtin_altivec_vaddubm vec_vaddubm
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vaddubm(vector signed char __a, vector signed char __b) {
-  return __a + __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vaddubm(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a + __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vaddubm(vector signed char __a, vector bool char __b) {
-  return __a + (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vaddubm(vector unsigned char __a, vector unsigned char __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vaddubm(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a + __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vaddubm(vector unsigned char __a, vector bool char __b) {
-  return __a + (vector unsigned char)__b;
-}
-
-/* vec_vadduhm */
-
-#define __builtin_altivec_vadduhm vec_vadduhm
-
-static __inline__ vector short __ATTRS_o_ai vec_vadduhm(vector short __a,
-                                                        vector short __b) {
-  return __a + __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vadduhm(vector bool short __a,
-                                                        vector short __b) {
-  return (vector short)__a + __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vadduhm(vector short __a,
-                                                        vector bool short __b) {
-  return __a + (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vadduhm(vector unsigned short __a, vector unsigned short __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vadduhm(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a + __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vadduhm(vector unsigned short __a, vector bool short __b) {
-  return __a + (vector unsigned short)__b;
-}
-
-/* vec_vadduwm */
-
-#define __builtin_altivec_vadduwm vec_vadduwm
-
-static __inline__ vector int __ATTRS_o_ai vec_vadduwm(vector int __a,
-                                                      vector int __b) {
-  return __a + __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vadduwm(vector bool int __a,
-                                                      vector int __b) {
-  return (vector int)__a + __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vadduwm(vector int __a,
-                                                      vector bool int __b) {
-  return __a + (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vadduwm(vector unsigned int __a, vector unsigned int __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vadduwm(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a + __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vadduwm(vector unsigned int __a, vector bool int __b) {
-  return __a + (vector unsigned int)__b;
-}
-
-/* vec_vaddfp */
-
-#define __builtin_altivec_vaddfp vec_vaddfp
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vaddfp(vector float __a, vector float __b) {
-  return __a + __b;
-}
-
-/* vec_addc */
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_addc(vector signed int __a, vector signed int __b) {
-  return (vector signed int)__builtin_altivec_vaddcuw((vector unsigned int)__a,
-                                                      (vector unsigned int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_addc(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vaddcuw(__a, __b);
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_addc(vector signed __int128 __a, vector signed __int128 __b) {
-  return (vector signed __int128)__builtin_altivec_vaddcuq(
-      (vector unsigned __int128)__a, (vector unsigned __int128)__b);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_addc(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __builtin_altivec_vaddcuq(__a, __b);
-}
-#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-
-/* vec_vaddcuw */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vaddcuw(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vaddcuw(__a, __b);
-}
-
-/* vec_adds */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_adds(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vaddsbs(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_adds(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vaddsbs((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_adds(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vaddsbs(__a, (vector signed char)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_adds(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vaddubs(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_adds(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vaddubs((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_adds(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vaddubs(__a, (vector unsigned char)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_adds(vector short __a,
-                                                     vector short __b) {
-  return __builtin_altivec_vaddshs(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_adds(vector bool short __a,
-                                                     vector short __b) {
-  return __builtin_altivec_vaddshs((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_adds(vector short __a,
-                                                     vector bool short __b) {
-  return __builtin_altivec_vaddshs(__a, (vector short)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_adds(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vadduhs(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_adds(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vadduhs((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_adds(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vadduhs(__a, (vector unsigned short)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_adds(vector int __a,
-                                                   vector int __b) {
-  return __builtin_altivec_vaddsws(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_adds(vector bool int __a,
-                                                   vector int __b) {
-  return __builtin_altivec_vaddsws((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_adds(vector int __a,
-                                                   vector bool int __b) {
-  return __builtin_altivec_vaddsws(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_adds(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vadduws(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_adds(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vadduws((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_adds(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vadduws(__a, (vector unsigned int)__b);
-}
-
-/* vec_vaddsbs */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vaddsbs(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vaddsbs(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vaddsbs(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vaddsbs((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vaddsbs(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vaddsbs(__a, (vector signed char)__b);
-}
-
-/* vec_vaddubs */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vaddubs(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vaddubs(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vaddubs(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vaddubs((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vaddubs(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vaddubs(__a, (vector unsigned char)__b);
-}
-
-/* vec_vaddshs */
-
-static __inline__ vector short __ATTRS_o_ai vec_vaddshs(vector short __a,
-                                                        vector short __b) {
-  return __builtin_altivec_vaddshs(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vaddshs(vector bool short __a,
-                                                        vector short __b) {
-  return __builtin_altivec_vaddshs((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vaddshs(vector short __a,
-                                                        vector bool short __b) {
-  return __builtin_altivec_vaddshs(__a, (vector short)__b);
-}
-
-/* vec_vadduhs */
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vadduhs(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vadduhs(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vadduhs(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vadduhs((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vadduhs(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vadduhs(__a, (vector unsigned short)__b);
-}
-
-/* vec_vaddsws */
-
-static __inline__ vector int __ATTRS_o_ai vec_vaddsws(vector int __a,
-                                                      vector int __b) {
-  return __builtin_altivec_vaddsws(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vaddsws(vector bool int __a,
-                                                      vector int __b) {
-  return __builtin_altivec_vaddsws((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vaddsws(vector int __a,
-                                                      vector bool int __b) {
-  return __builtin_altivec_vaddsws(__a, (vector int)__b);
-}
-
-/* vec_vadduws */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vadduws(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vadduws(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vadduws(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vadduws((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vadduws(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vadduws(__a, (vector unsigned int)__b);
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-/* vec_vadduqm */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vadduqm(vector signed __int128 __a, vector signed __int128 __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vadduqm(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __a + __b;
-}
-
-/* vec_vaddeuqm */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vaddeuqm(vector signed __int128 __a, vector signed __int128 __b,
-             vector signed __int128 __c) {
-  return __builtin_altivec_vaddeuqm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vaddeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b,
-             vector unsigned __int128 __c) {
-  return __builtin_altivec_vaddeuqm(__a, __b, __c);
-}
-
-/* vec_vaddcuq */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vaddcuq(vector signed __int128 __a, vector signed __int128 __b) {
-  return __builtin_altivec_vaddcuq(__a, __b);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vaddcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __builtin_altivec_vaddcuq(__a, __b);
-}
-
-/* vec_vaddecuq */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vaddecuq(vector signed __int128 __a, vector signed __int128 __b,
-             vector signed __int128 __c) {
-  return __builtin_altivec_vaddecuq(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vaddecuq(vector unsigned __int128 __a, vector unsigned __int128 __b,
-             vector unsigned __int128 __c) {
-  return __builtin_altivec_vaddecuq(__a, __b, __c);
-}
-#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-
-/* vec_and */
-
-#define __builtin_altivec_vand vec_and
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_and(vector signed char __a, vector signed char __b) {
-  return __a & __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_and(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a & __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_and(vector signed char __a, vector bool char __b) {
-  return __a & (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_and(vector unsigned char __a, vector unsigned char __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_and(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a & __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_and(vector unsigned char __a, vector bool char __b) {
-  return __a & (vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_and(vector bool char __a,
-                                                        vector bool char __b) {
-  return __a & __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_and(vector short __a,
-                                                    vector short __b) {
-  return __a & __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_and(vector bool short __a,
-                                                    vector short __b) {
-  return (vector short)__a & __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_and(vector short __a,
-                                                    vector bool short __b) {
-  return __a & (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_and(vector unsigned short __a, vector unsigned short __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_and(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a & __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_and(vector unsigned short __a, vector bool short __b) {
-  return __a & (vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_and(vector bool short __a, vector bool short __b) {
-  return __a & __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_and(vector int __a,
-                                                  vector int __b) {
-  return __a & __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_and(vector bool int __a,
-                                                  vector int __b) {
-  return (vector int)__a & __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_and(vector int __a,
-                                                  vector bool int __b) {
-  return __a & (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_and(vector unsigned int __a, vector unsigned int __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_and(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a & __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_and(vector unsigned int __a, vector bool int __b) {
-  return __a & (vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_and(vector bool int __a,
-                                                       vector bool int __b) {
-  return __a & __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_and(vector float __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_and(vector bool int __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_and(vector float __a,
-                                                    vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_and(vector bool long long __a,
-                                                     vector double __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a & (vector unsigned long long)__b;
-  return (vector double)__res;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_and(vector double __a, vector bool long long __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a & (vector unsigned long long)__b;
-  return (vector double)__res;
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_and(vector double __a,
-                                                     vector double __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a & (vector unsigned long long)__b;
-  return (vector double)__res;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_and(vector signed long long __a, vector signed long long __b) {
-  return __a & __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_and(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a & __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_and(vector signed long long __a, vector bool long long __b) {
-  return __a & (vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_and(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_and(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a & __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_and(vector unsigned long long __a, vector bool long long __b) {
-  return __a & (vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_and(vector bool long long __a, vector bool long long __b) {
-  return __a & __b;
-}
-#endif
-
-/* vec_vand */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vand(vector signed char __a, vector signed char __b) {
-  return __a & __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vand(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a & __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vand(vector signed char __a, vector bool char __b) {
-  return __a & (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vand(vector unsigned char __a, vector unsigned char __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vand(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a & __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vand(vector unsigned char __a, vector bool char __b) {
-  return __a & (vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vand(vector bool char __a,
-                                                         vector bool char __b) {
-  return __a & __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vand(vector short __a,
-                                                     vector short __b) {
-  return __a & __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vand(vector bool short __a,
-                                                     vector short __b) {
-  return (vector short)__a & __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vand(vector short __a,
-                                                     vector bool short __b) {
-  return __a & (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vand(vector unsigned short __a, vector unsigned short __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vand(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a & __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vand(vector unsigned short __a, vector bool short __b) {
-  return __a & (vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vand(vector bool short __a, vector bool short __b) {
-  return __a & __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vand(vector int __a,
-                                                   vector int __b) {
-  return __a & __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vand(vector bool int __a,
-                                                   vector int __b) {
-  return (vector int)__a & __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vand(vector int __a,
-                                                   vector bool int __b) {
-  return __a & (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vand(vector unsigned int __a, vector unsigned int __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vand(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a & __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vand(vector unsigned int __a, vector bool int __b) {
-  return __a & (vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vand(vector bool int __a,
-                                                        vector bool int __b) {
-  return __a & __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vand(vector float __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vand(vector bool int __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vand(vector float __a,
-                                                     vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vand(vector signed long long __a, vector signed long long __b) {
-  return __a & __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vand(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a & __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vand(vector signed long long __a, vector bool long long __b) {
-  return __a & (vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vand(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vand(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a & __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vand(vector unsigned long long __a, vector bool long long __b) {
-  return __a & (vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_vand(vector bool long long __a, vector bool long long __b) {
-  return __a & __b;
-}
-#endif
-
-/* vec_andc */
-
-#define __builtin_altivec_vandc vec_andc
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_andc(vector signed char __a, vector signed char __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_andc(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a & ~__b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_andc(vector signed char __a, vector bool char __b) {
-  return __a & ~(vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_andc(vector unsigned char __a, vector unsigned char __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_andc(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a & ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_andc(vector unsigned char __a, vector bool char __b) {
-  return __a & ~(vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_andc(vector bool char __a,
-                                                         vector bool char __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_andc(vector short __a,
-                                                     vector short __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_andc(vector bool short __a,
-                                                     vector short __b) {
-  return (vector short)__a & ~__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_andc(vector short __a,
-                                                     vector bool short __b) {
-  return __a & ~(vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_andc(vector unsigned short __a, vector unsigned short __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_andc(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a & ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_andc(vector unsigned short __a, vector bool short __b) {
-  return __a & ~(vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_andc(vector bool short __a, vector bool short __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_andc(vector int __a,
-                                                   vector int __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_andc(vector bool int __a,
-                                                   vector int __b) {
-  return (vector int)__a & ~__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_andc(vector int __a,
-                                                   vector bool int __b) {
-  return __a & ~(vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_andc(vector unsigned int __a, vector unsigned int __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_andc(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a & ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_andc(vector unsigned int __a, vector bool int __b) {
-  return __a & ~(vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_andc(vector bool int __a,
-                                                        vector bool int __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_andc(vector float __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & ~(vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_andc(vector bool int __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & ~(vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_andc(vector float __a,
-                                                     vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & ~(vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_andc(vector bool long long __a,
-                                                      vector double __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a & ~(vector unsigned long long)__b;
-  return (vector double)__res;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_andc(vector double __a, vector bool long long __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a & ~(vector unsigned long long)__b;
-  return (vector double)__res;
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_andc(vector double __a,
-                                                      vector double __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a & ~(vector unsigned long long)__b;
-  return (vector double)__res;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_andc(vector signed long long __a, vector signed long long __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_andc(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a & ~__b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_andc(vector signed long long __a, vector bool long long __b) {
-  return __a & ~(vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_andc(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_andc(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a & ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_andc(vector unsigned long long __a, vector bool long long __b) {
-  return __a & ~(vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_andc(vector bool long long __a, vector bool long long __b) {
-  return __a & ~__b;
-}
-#endif
-
-/* vec_vandc */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vandc(vector signed char __a, vector signed char __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vandc(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a & ~__b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vandc(vector signed char __a, vector bool char __b) {
-  return __a & ~(vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vandc(vector unsigned char __a, vector unsigned char __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vandc(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a & ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vandc(vector unsigned char __a, vector bool char __b) {
-  return __a & ~(vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vandc(vector bool char __a, vector bool char __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vandc(vector short __a,
-                                                      vector short __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vandc(vector bool short __a,
-                                                      vector short __b) {
-  return (vector short)__a & ~__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vandc(vector short __a,
-                                                      vector bool short __b) {
-  return __a & ~(vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vandc(vector unsigned short __a, vector unsigned short __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vandc(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a & ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vandc(vector unsigned short __a, vector bool short __b) {
-  return __a & ~(vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vandc(vector bool short __a, vector bool short __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vandc(vector int __a,
-                                                    vector int __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vandc(vector bool int __a,
-                                                    vector int __b) {
-  return (vector int)__a & ~__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vandc(vector int __a,
-                                                    vector bool int __b) {
-  return __a & ~(vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vandc(vector unsigned int __a, vector unsigned int __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vandc(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a & ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vandc(vector unsigned int __a, vector bool int __b) {
-  return __a & ~(vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vandc(vector bool int __a,
-                                                         vector bool int __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vandc(vector float __a,
-                                                      vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & ~(vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vandc(vector bool int __a,
-                                                      vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & ~(vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vandc(vector float __a,
-                                                      vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & ~(vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vandc(vector signed long long __a, vector signed long long __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vandc(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a & ~__b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vandc(vector signed long long __a, vector bool long long __b) {
-  return __a & ~(vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vandc(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vandc(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a & ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vandc(vector unsigned long long __a, vector bool long long __b) {
-  return __a & ~(vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_vandc(vector bool long long __a, vector bool long long __b) {
-  return __a & ~__b;
-}
-#endif
-
-/* vec_avg */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_avg(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vavgsb(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_avg(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vavgub(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_avg(vector short __a,
-                                                    vector short __b) {
-  return __builtin_altivec_vavgsh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_avg(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vavguh(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_avg(vector int __a,
-                                                  vector int __b) {
-  return __builtin_altivec_vavgsw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_avg(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vavguw(__a, __b);
-}
-
-/* vec_vavgsb */
-
-static __inline__ vector signed char __attribute__((__always_inline__))
-vec_vavgsb(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vavgsb(__a, __b);
-}
-
-/* vec_vavgub */
-
-static __inline__ vector unsigned char __attribute__((__always_inline__))
-vec_vavgub(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vavgub(__a, __b);
-}
-
-/* vec_vavgsh */
-
-static __inline__ vector short __attribute__((__always_inline__))
-vec_vavgsh(vector short __a, vector short __b) {
-  return __builtin_altivec_vavgsh(__a, __b);
-}
-
-/* vec_vavguh */
-
-static __inline__ vector unsigned short __attribute__((__always_inline__))
-vec_vavguh(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vavguh(__a, __b);
-}
-
-/* vec_vavgsw */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vavgsw(vector int __a, vector int __b) {
-  return __builtin_altivec_vavgsw(__a, __b);
-}
-
-/* vec_vavguw */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vavguw(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vavguw(__a, __b);
-}
-
-/* vec_ceil */
-
-static __inline__ vector float __ATTRS_o_ai vec_ceil(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvrspip(__a);
-#else
-  return __builtin_altivec_vrfip(__a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_ceil(vector double __a) {
-  return __builtin_vsx_xvrdpip(__a);
-}
-#endif
-
-/* vec_vrfip */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vrfip(vector float __a) {
-  return __builtin_altivec_vrfip(__a);
-}
-
-/* vec_cmpb */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_cmpb(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpbfp(__a, __b);
-}
-
-/* vec_vcmpbfp */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vcmpbfp(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpbfp(__a, __b);
-}
-
-/* vec_cmpeq */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpeq(vector signed char __a, vector signed char __b) {
-  return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a,
-                                                      (vector char)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpeq(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a,
-                                                      (vector char)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpeq(vector bool char __a, vector bool char __b) {
-  return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a,
-                                                      (vector char)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_cmpeq(vector short __a,
-                                                           vector short __b) {
-  return (vector bool short)__builtin_altivec_vcmpequh(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpeq(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vcmpequh((vector short)__a,
-                                                       (vector short)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpeq(vector bool short __a, vector bool short __b) {
-  return (vector bool short)__builtin_altivec_vcmpequh((vector short)__a,
-                                                       (vector short)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector int __a,
-                                                         vector int __b) {
-  return (vector bool int)__builtin_altivec_vcmpequw(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpeq(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vcmpequw((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector bool int __a,
-                                                         vector bool int __b) {
-  return (vector bool int)__builtin_altivec_vcmpequw((vector int)__a,
-                                                     (vector int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpeq(vector signed long long __a, vector signed long long __b) {
-  return (vector bool long long)__builtin_altivec_vcmpequd(__a, __b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector bool long long)__builtin_altivec_vcmpequd(
-      (vector long long)__a, (vector long long)__b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpeq(vector bool long long __a, vector bool long long __b) {
-  return (vector bool long long)__builtin_altivec_vcmpequd(
-      (vector long long)__a, (vector long long)__b);
-}
-
-#endif
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector float __a,
-                                                         vector float __b) {
-#ifdef __VSX__
-  return (vector bool int)__builtin_vsx_xvcmpeqsp(__a, __b);
-#else
-  return (vector bool int)__builtin_altivec_vcmpeqfp(__a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpeq(vector double __a, vector double __b) {
-  return (vector bool long long)__builtin_vsx_xvcmpeqdp(__a, __b);
-}
-#endif
-
-#ifdef __POWER9_VECTOR__
-/* vec_cmpne */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpne(vector bool char __a, vector bool char __b) {
-  return (vector bool char)__builtin_altivec_vcmpneb((vector char)__a,
-                                                     (vector char)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpne(vector signed char __a, vector signed char __b) {
-  return (vector bool char)__builtin_altivec_vcmpneb((vector char)__a,
-                                                     (vector char)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpne(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vcmpneb((vector char)__a,
-                                                     (vector char)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpne(vector bool short __a, vector bool short __b) {
-  return (vector bool short)__builtin_altivec_vcmpneh((vector short)__a,
-                                                      (vector short)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpne(vector signed short __a, vector signed short __b) {
-  return (vector bool short)__builtin_altivec_vcmpneh((vector short)__a,
-                                                      (vector short)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpne(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vcmpneh((vector short)__a,
-                                                      (vector short)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpne(vector bool int __a, vector bool int __b) {
-  return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpne(vector signed int __a, vector signed int __b) {
-  return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpne(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector bool long long __a, vector bool long long __b) {
-  return (vector bool long long)
-    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector signed long long __a, vector signed long long __b) {
-  return (vector bool long long)
-    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector bool long long)
-    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpne(vector float __a, vector float __b) {
-  return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector double __a, vector double __b) {
-  return (vector bool long long)
-    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
-}
-
-/* vec_cmpnez */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpnez(vector signed char __a, vector signed char __b) {
-  return (vector bool char)__builtin_altivec_vcmpnezb((vector char)__a,
-                                                      (vector char)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpnez(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vcmpnezb((vector char)__a,
-                                                      (vector char)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpnez(vector signed short __a, vector signed short __b) {
-  return (vector bool short)__builtin_altivec_vcmpnezh((vector short)__a,
-                                                       (vector short)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpnez(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vcmpnezh((vector short)__a,
-                                                       (vector short)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpnez(vector signed int __a, vector signed int __b) {
-  return (vector bool int)__builtin_altivec_vcmpnezw((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpnez(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vcmpnezw((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ signed int __ATTRS_o_ai
-vec_cntlz_lsbb(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vctzlsbb(__a);
-#else
-  return __builtin_altivec_vclzlsbb(__a);
-#endif
-}
-
-static __inline__ signed int __ATTRS_o_ai
-vec_cntlz_lsbb(vector unsigned char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vctzlsbb(__a);
-#else
-  return __builtin_altivec_vclzlsbb(__a);
-#endif
-}
-
-static __inline__ signed int __ATTRS_o_ai
-vec_cnttz_lsbb(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclzlsbb(__a);
-#else
-  return __builtin_altivec_vctzlsbb(__a);
-#endif
-}
-
-static __inline__ signed int __ATTRS_o_ai
-vec_cnttz_lsbb(vector unsigned char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclzlsbb(__a);
-#else
-  return __builtin_altivec_vctzlsbb(__a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_parity_lsbb(vector unsigned int __a) {
-  return __builtin_altivec_vprtybw(__a);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_parity_lsbb(vector signed int __a) {
-  return __builtin_altivec_vprtybw(__a);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_parity_lsbb(vector unsigned __int128 __a) {
-  return __builtin_altivec_vprtybq(__a);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_parity_lsbb(vector signed __int128 __a) {
-  return __builtin_altivec_vprtybq(__a);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_parity_lsbb(vector unsigned long long __a) {
-  return __builtin_altivec_vprtybd(__a);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_parity_lsbb(vector signed long long __a) {
-  return __builtin_altivec_vprtybd(__a);
-}
-
-#endif
-
-/* vec_cmpgt */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpgt(vector signed char __a, vector signed char __b) {
-  return (vector bool char)__builtin_altivec_vcmpgtsb(__a, __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpgt(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vcmpgtub(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_cmpgt(vector short __a,
-                                                           vector short __b) {
-  return (vector bool short)__builtin_altivec_vcmpgtsh(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpgt(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vcmpgtuh(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmpgt(vector int __a,
-                                                         vector int __b) {
-  return (vector bool int)__builtin_altivec_vcmpgtsw(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpgt(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vcmpgtuw(__a, __b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpgt(vector signed long long __a, vector signed long long __b) {
-  return (vector bool long long)__builtin_altivec_vcmpgtsd(__a, __b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector bool long long)__builtin_altivec_vcmpgtud(__a, __b);
-}
-#endif
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmpgt(vector float __a,
-                                                         vector float __b) {
-#ifdef __VSX__
-  return (vector bool int)__builtin_vsx_xvcmpgtsp(__a, __b);
-#else
-  return (vector bool int)__builtin_altivec_vcmpgtfp(__a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpgt(vector double __a, vector double __b) {
-  return (vector bool long long)__builtin_vsx_xvcmpgtdp(__a, __b);
-}
-#endif
-
-/* vec_cmpge */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpge(vector signed char __a, vector signed char __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpge(vector unsigned char __a, vector unsigned char __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpge(vector signed short __a, vector signed short __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpge(vector unsigned short __a, vector unsigned short __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpge(vector signed int __a, vector signed int __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpge(vector unsigned int __a, vector unsigned int __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmpge(vector float __a,
-                                                         vector float __b) {
-#ifdef __VSX__
-  return (vector bool int)__builtin_vsx_xvcmpgesp(__a, __b);
-#else
-  return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpge(vector double __a, vector double __b) {
-  return (vector bool long long)__builtin_vsx_xvcmpgedp(__a, __b);
-}
-#endif
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpge(vector signed long long __a, vector signed long long __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-#endif
-
-/* vec_vcmpgefp */
-
-static __inline__ vector bool int __attribute__((__always_inline__))
-vec_vcmpgefp(vector float __a, vector float __b) {
-  return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b);
-}
-
-/* vec_vcmpgtsb */
-
-static __inline__ vector bool char __attribute__((__always_inline__))
-vec_vcmpgtsb(vector signed char __a, vector signed char __b) {
-  return (vector bool char)__builtin_altivec_vcmpgtsb(__a, __b);
-}
-
-/* vec_vcmpgtub */
-
-static __inline__ vector bool char __attribute__((__always_inline__))
-vec_vcmpgtub(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vcmpgtub(__a, __b);
-}
-
-/* vec_vcmpgtsh */
-
-static __inline__ vector bool short __attribute__((__always_inline__))
-vec_vcmpgtsh(vector short __a, vector short __b) {
-  return (vector bool short)__builtin_altivec_vcmpgtsh(__a, __b);
-}
-
-/* vec_vcmpgtuh */
-
-static __inline__ vector bool short __attribute__((__always_inline__))
-vec_vcmpgtuh(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vcmpgtuh(__a, __b);
-}
-
-/* vec_vcmpgtsw */
-
-static __inline__ vector bool int __attribute__((__always_inline__))
-vec_vcmpgtsw(vector int __a, vector int __b) {
-  return (vector bool int)__builtin_altivec_vcmpgtsw(__a, __b);
-}
-
-/* vec_vcmpgtuw */
-
-static __inline__ vector bool int __attribute__((__always_inline__))
-vec_vcmpgtuw(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vcmpgtuw(__a, __b);
-}
-
-/* vec_vcmpgtfp */
-
-static __inline__ vector bool int __attribute__((__always_inline__))
-vec_vcmpgtfp(vector float __a, vector float __b) {
-  return (vector bool int)__builtin_altivec_vcmpgtfp(__a, __b);
-}
-
-/* vec_cmple */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmple(vector signed char __a, vector signed char __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmple(vector unsigned char __a, vector unsigned char __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmple(vector signed short __a, vector signed short __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmple(vector unsigned short __a, vector unsigned short __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmple(vector signed int __a, vector signed int __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmple(vector unsigned int __a, vector unsigned int __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmple(vector float __a,
-                                                         vector float __b) {
-  return vec_cmpge(__b, __a);
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmple(vector double __a, vector double __b) {
-  return vec_cmpge(__b, __a);
-}
-#endif
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmple(vector signed long long __a, vector signed long long __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmple(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_cmpge(__b, __a);
-}
-#endif
-
-/* vec_cmplt */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmplt(vector signed char __a, vector signed char __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmplt(vector unsigned char __a, vector unsigned char __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_cmplt(vector short __a,
-                                                           vector short __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmplt(vector unsigned short __a, vector unsigned short __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmplt(vector int __a,
-                                                         vector int __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmplt(vector unsigned int __a, vector unsigned int __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmplt(vector float __a,
-                                                         vector float __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmplt(vector double __a, vector double __b) {
-  return vec_cmpgt(__b, __a);
-}
-#endif
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmplt(vector signed long long __a, vector signed long long __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-/* vec_popcnt */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_popcnt(vector signed char __a) {
-  return __builtin_altivec_vpopcntb(__a);
-}
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_popcnt(vector unsigned char __a) {
-  return __builtin_altivec_vpopcntb(__a);
-}
-static __inline__ vector signed short __ATTRS_o_ai
-vec_popcnt(vector signed short __a) {
-  return __builtin_altivec_vpopcnth(__a);
-}
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_popcnt(vector unsigned short __a) {
-  return __builtin_altivec_vpopcnth(__a);
-}
-static __inline__ vector signed int __ATTRS_o_ai
-vec_popcnt(vector signed int __a) {
-  return __builtin_altivec_vpopcntw(__a);
-}
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_popcnt(vector unsigned int __a) {
-  return __builtin_altivec_vpopcntw(__a);
-}
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_popcnt(vector signed long long __a) {
-  return __builtin_altivec_vpopcntd(__a);
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_popcnt(vector unsigned long long __a) {
-  return __builtin_altivec_vpopcntd(__a);
-}
-
-/* vec_cntlz */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_cntlz(vector signed char __a) {
-  return __builtin_altivec_vclzb(__a);
-}
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_cntlz(vector unsigned char __a) {
-  return __builtin_altivec_vclzb(__a);
-}
-static __inline__ vector signed short __ATTRS_o_ai
-vec_cntlz(vector signed short __a) {
-  return __builtin_altivec_vclzh(__a);
-}
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_cntlz(vector unsigned short __a) {
-  return __builtin_altivec_vclzh(__a);
-}
-static __inline__ vector signed int __ATTRS_o_ai
-vec_cntlz(vector signed int __a) {
-  return __builtin_altivec_vclzw(__a);
-}
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_cntlz(vector unsigned int __a) {
-  return __builtin_altivec_vclzw(__a);
-}
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_cntlz(vector signed long long __a) {
-  return __builtin_altivec_vclzd(__a);
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_cntlz(vector unsigned long long __a) {
-  return __builtin_altivec_vclzd(__a);
-}
-#endif
-
-#ifdef __POWER9_VECTOR__
-
-/* vec_cnttz */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_cnttz(vector signed char __a) {
-  return __builtin_altivec_vctzb(__a);
-}
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_cnttz(vector unsigned char __a) {
-  return __builtin_altivec_vctzb(__a);
-}
-static __inline__ vector signed short __ATTRS_o_ai
-vec_cnttz(vector signed short __a) {
-  return __builtin_altivec_vctzh(__a);
-}
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_cnttz(vector unsigned short __a) {
-  return __builtin_altivec_vctzh(__a);
-}
-static __inline__ vector signed int __ATTRS_o_ai
-vec_cnttz(vector signed int __a) {
-  return __builtin_altivec_vctzw(__a);
-}
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_cnttz(vector unsigned int __a) {
-  return __builtin_altivec_vctzw(__a);
-}
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_cnttz(vector signed long long __a) {
-  return __builtin_altivec_vctzd(__a);
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_cnttz(vector unsigned long long __a) {
-  return __builtin_altivec_vctzd(__a);
-}
-
-/* vec_first_match_index */
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_index(vector signed char __a, vector signed char __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_index(vector unsigned char __a, vector unsigned char __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_index(vector signed short __a, vector signed short __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_index(vector unsigned short __a, vector unsigned short __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_index(vector signed int __a, vector signed int __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_index(vector unsigned int __a, vector unsigned int __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-/* vec_first_match_or_eos_index */
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_or_eos_index(vector signed char __a, vector signed char __b) {
-  /* Compare the result of the comparison of two vectors with either and OR the
-     result. Either the elements are equal or one will equal the comparison
-     result if either is zero.
-  */
-  vector bool char __tmp1 = vec_cmpeq(__a, __b);
-  vector bool char __tmp2 = __tmp1 |
-                            vec_cmpeq((vector signed char)__tmp1, __a) |
-                            vec_cmpeq((vector signed char)__tmp1, __b);
-
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-      vec_cnttz((vector unsigned long long)__tmp2);
-#else
-      vec_cntlz((vector unsigned long long)__tmp2);
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_or_eos_index(vector unsigned char __a,
-                             vector unsigned char __b) {
-  vector bool char __tmp1 = vec_cmpeq(__a, __b);
-  vector bool char __tmp2 = __tmp1 |
-                            vec_cmpeq((vector unsigned char)__tmp1, __a) |
-                            vec_cmpeq((vector unsigned char)__tmp1, __b);
-
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-      vec_cnttz((vector unsigned long long)__tmp2);
-#else
-      vec_cntlz((vector unsigned long long)__tmp2);
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_or_eos_index(vector signed short __a, vector signed short __b) {
-  vector bool short __tmp1 = vec_cmpeq(__a, __b);
-  vector bool short __tmp2 = __tmp1 |
-                             vec_cmpeq((vector signed short)__tmp1, __a) |
-                             vec_cmpeq((vector signed short)__tmp1, __b);
-
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-      vec_cnttz((vector unsigned long long)__tmp2);
-#else
-      vec_cntlz((vector unsigned long long)__tmp2);
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_or_eos_index(vector unsigned short __a,
-                             vector unsigned short __b) {
-  vector bool short __tmp1 = vec_cmpeq(__a, __b);
-  vector bool short __tmp2 = __tmp1 |
-                             vec_cmpeq((vector unsigned short)__tmp1, __a) |
-                             vec_cmpeq((vector unsigned short)__tmp1, __b);
-
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-      vec_cnttz((vector unsigned long long)__tmp2);
-#else
-      vec_cntlz((vector unsigned long long)__tmp2);
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_or_eos_index(vector signed int __a, vector signed int __b) {
-  vector bool int __tmp1 = vec_cmpeq(__a, __b);
-  vector bool int __tmp2 = __tmp1 | vec_cmpeq((vector signed int)__tmp1, __a) |
-                           vec_cmpeq((vector signed int)__tmp1, __b);
-
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-      vec_cnttz((vector unsigned long long)__tmp2);
-#else
-      vec_cntlz((vector unsigned long long)__tmp2);
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_or_eos_index(vector unsigned int __a, vector unsigned int __b) {
-  vector bool int __tmp1 = vec_cmpeq(__a, __b);
-  vector bool int __tmp2 = __tmp1 |
-                           vec_cmpeq((vector unsigned int)__tmp1, __a) |
-                           vec_cmpeq((vector unsigned int)__tmp1, __b);
-
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)__tmp2);
-#else
-    vec_cntlz((vector unsigned long long)__tmp2);
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-/* vec_first_mismatch_index */
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_index(vector signed char __a, vector signed char __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_index(vector unsigned char __a, vector unsigned char __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_index(vector signed short __a, vector signed short __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_index(vector unsigned short __a, vector unsigned short __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_index(vector signed int __a, vector signed int __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_index(vector unsigned int __a, vector unsigned int __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-/* vec_first_mismatch_or_eos_index */
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_or_eos_index(vector signed char __a,
-                                vector signed char __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_or_eos_index(vector unsigned char __a,
-                                vector unsigned char __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_or_eos_index(vector signed short __a,
-                                vector signed short __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_or_eos_index(vector unsigned short __a,
-                                vector unsigned short __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_or_eos_index(vector signed int __a, vector signed int __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_or_eos_index(vector unsigned int __a,
-                                vector unsigned int __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-static __inline__ vector double  __ATTRS_o_ai
-vec_insert_exp(vector double __a, vector unsigned long long __b) {
-  return __builtin_vsx_xviexpdp((vector unsigned long long)__a,__b);
-}
-
-static __inline__ vector double  __ATTRS_o_ai
-vec_insert_exp(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_vsx_xviexpdp(__a,__b);
-}
-
-static __inline__ vector float  __ATTRS_o_ai
-vec_insert_exp(vector float __a, vector unsigned int __b) {
-  return __builtin_vsx_xviexpsp((vector unsigned int)__a,__b);
-}
-
-static __inline__ vector float  __ATTRS_o_ai
-vec_insert_exp(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_vsx_xviexpsp(__a,__b);
-}
-
-#if defined(__powerpc64__)
-static __inline__ vector signed char __ATTRS_o_ai vec_xl_len(signed char *__a,
-                                                             size_t __b) {
-  return (vector signed char)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xl_len(unsigned char *__a, size_t __b) {
-  return (vector unsigned char)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector signed short __ATTRS_o_ai vec_xl_len(signed short *__a,
-                                                              size_t __b) {
-  return (vector signed short)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xl_len(unsigned short *__a, size_t __b) {
-  return (vector unsigned short)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector signed int __ATTRS_o_ai vec_xl_len(signed int *__a,
-                                                            size_t __b) {
-  return (vector signed int)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_xl_len(unsigned int *__a,
-                                                              size_t __b) {
-  return (vector unsigned int)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_xl_len(float *__a, size_t __b) {
-  return (vector float)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_xl_len(signed __int128 *__a, size_t __b) {
-  return (vector signed __int128)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_len(unsigned __int128 *__a, size_t __b) {
-  return (vector unsigned __int128)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_xl_len(signed long long *__a, size_t __b) {
-  return (vector signed long long)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xl_len(unsigned long long *__a, size_t __b) {
-  return (vector unsigned long long)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_xl_len(double *__a,
-                                                        size_t __b) {
-  return (vector double)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xl_len_r(unsigned char *__a, size_t __b) {
-  vector unsigned char __res =
-      (vector unsigned char)__builtin_vsx_lxvll(__a, (__b << 56));
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __mask =
-      (vector unsigned char)__builtin_altivec_lvsr(16 - __b, (int *)NULL);
-  __res = (vector unsigned char)__builtin_altivec_vperm_4si(
-      (vector int)__res, (vector int)__res, __mask);
-#endif
-  return __res;
-}
-
-// vec_xst_len
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned char __a,
-                                                unsigned char *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed char __a,
-                                                signed char *__b, size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed short __a,
-                                                signed short *__b, size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned short __a,
-                                                unsigned short *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed int __a,
-                                                signed int *__b, size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned int __a,
-                                                unsigned int *__b, size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector float __a, float *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed __int128 __a,
-                                                signed __int128 *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned __int128 __a,
-                                                unsigned __int128 *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed long long __a,
-                                                signed long long *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned long long __a,
-                                                unsigned long long *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector double __a, double *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len_r(vector unsigned char __a,
-                                                  unsigned char *__b,
-                                                  size_t __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __mask =
-      (vector unsigned char)__builtin_altivec_lvsl(16 - __c, (int *)NULL);
-  vector unsigned char __res =
-      __builtin_altivec_vperm_4si((vector int)__a, (vector int)__a, __mask);
-  return __builtin_vsx_stxvll((vector int)__res, __b, (__c << 56));
-#else
-  return __builtin_vsx_stxvll((vector int)__a, __b, (__c << 56));
-#endif
-}
-#endif
-#endif
-
-/* vec_cpsgn */
-
-#ifdef __VSX__
-static __inline__ vector float __ATTRS_o_ai vec_cpsgn(vector float __a,
-                                                      vector float __b) {
-  return __builtin_vsx_xvcpsgnsp(__a, __b);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
-                                                       vector double __b) {
-  return __builtin_vsx_xvcpsgndp(__a, __b);
-}
-#endif
-
-/* vec_ctf */
-
-#ifdef __VSX__
-#define vec_ctf(__a, __b)                                                      \
-  _Generic((__a), vector int                                                   \
-           : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)),  \
-             vector unsigned int                                               \
-           : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
-                                                   (__b)),                     \
-             vector unsigned long long                                         \
-           : (__builtin_convertvector((vector unsigned long long)(__a),        \
-                                      vector double) *                         \
-              (vector double)(vector unsigned long long)((0x3ffULL - (__b))    \
-                                                         << 52)),              \
-             vector signed long long                                           \
-           : (__builtin_convertvector((vector signed long long)(__a),          \
-                                      vector double) *                         \
-              (vector double)(vector unsigned long long)((0x3ffULL - (__b))    \
-                                                         << 52)))
-#else
-#define vec_ctf(__a, __b)                                                      \
-  _Generic((__a), vector int                                                   \
-           : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)),  \
-             vector unsigned int                                               \
-           : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
-                                                   (__b)))
-#endif
-
-/* vec_vcfsx */
-
-#define vec_vcfux __builtin_altivec_vcfux
-
-/* vec_vcfux */
-
-#define vec_vcfsx(__a, __b) __builtin_altivec_vcfsx((vector int)(__a), (__b))
-
-/* vec_cts */
-
-#ifdef __VSX__
-#define vec_cts(__a, __b)                                                      \
-  _Generic((__a), vector float                                                 \
-           : __builtin_altivec_vctsxs((vector float)(__a), (__b)),             \
-             vector double                                                     \
-           : __extension__({                                                   \
-             vector double __ret =                                             \
-                 (vector double)(__a) *                                        \
-                 (vector double)(vector unsigned long long)((0x3ffULL + (__b)) \
-                                                            << 52);            \
-             __builtin_convertvector(__ret, vector signed long long);          \
-           }))
-#else
-#define vec_cts __builtin_altivec_vctsxs
-#endif
-
-/* vec_vctsxs */
-
-#define vec_vctsxs __builtin_altivec_vctsxs
-
-/* vec_ctu */
-
-#ifdef __VSX__
-#define vec_ctu(__a, __b)                                                      \
-  _Generic((__a), vector float                                                 \
-           : __builtin_altivec_vctuxs((vector float)(__a), (__b)),             \
-             vector double                                                     \
-           : __extension__({                                                   \
-             vector double __ret =                                             \
-                 (vector double)(__a) *                                        \
-                 (vector double)(vector unsigned long long)((0x3ffULL + __b)   \
-                                                            << 52);            \
-             __builtin_convertvector(__ret, vector unsigned long long);        \
-           }))
-#else
-#define vec_ctu __builtin_altivec_vctuxs
-#endif
-
-/* vec_vctuxs */
-
-#define vec_vctuxs __builtin_altivec_vctuxs
-
-/* vec_signed */
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_sld(vector signed int, vector signed int, unsigned const int __c);
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_signed(vector float __a) {
-  return __builtin_convertvector(__a, vector signed int);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_signed(vector double __a) {
-  return __builtin_convertvector(__a, vector signed long long);
-}
-
-static __inline__ vector signed int __attribute__((__always_inline__))
-vec_signed2(vector double __a, vector double __b) {
-  return (vector signed int) { __a[0], __a[1], __b[0], __b[1] };
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_signede(vector double __a) {
-#ifdef __LITTLE_ENDIAN__
-  vector signed int __ret = __builtin_vsx_xvcvdpsxws(__a);
-  return vec_sld(__ret, __ret, 12);
-#else
-  return __builtin_vsx_xvcvdpsxws(__a);
-#endif
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_signedo(vector double __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvdpsxws(__a);
-#else
-  vector signed int __ret = __builtin_vsx_xvcvdpsxws(__a);
-  return vec_sld(__ret, __ret, 12);
-#endif
-}
-#endif
-
-/* vec_unsigned */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sld(vector unsigned int, vector unsigned int, unsigned const int __c);
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_unsigned(vector float __a) {
-  return __builtin_convertvector(__a, vector unsigned int);
-}
-
-#ifdef __VSX__
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_unsigned(vector double __a) {
-  return __builtin_convertvector(__a, vector unsigned long long);
-}
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_unsigned2(vector double __a, vector double __b) {
-  return (vector unsigned int) { __a[0], __a[1], __b[0], __b[1] };
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_unsignede(vector double __a) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned int __ret = __builtin_vsx_xvcvdpuxws(__a);
-  return vec_sld(__ret, __ret, 12);
-#else
-  return __builtin_vsx_xvcvdpuxws(__a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_unsignedo(vector double __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvdpuxws(__a);
-#else
-  vector unsigned int __ret = __builtin_vsx_xvcvdpuxws(__a);
-  return vec_sld(__ret, __ret, 12);
-#endif
-}
-#endif
-
-/* vec_float */
-
-static __inline__ vector float __ATTRS_o_ai
-vec_sld(vector float, vector float, unsigned const int __c);
-
-static __inline__ vector float __ATTRS_o_ai
-vec_float(vector signed int __a) {
-  return __builtin_convertvector(__a, vector float);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_float(vector unsigned int __a) {
-  return __builtin_convertvector(__a, vector float);
-}
-
-#ifdef __VSX__
-static __inline__ vector float __ATTRS_o_ai
-vec_float2(vector signed long long __a, vector signed long long __b) {
-  return (vector float) { __a[0], __a[1], __b[0], __b[1] };
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_float2(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector float) { __a[0], __a[1], __b[0], __b[1] };
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_float2(vector double __a, vector double __b) {
-  return (vector float) { __a[0], __a[1], __b[0], __b[1] };
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_floate(vector signed long long __a) {
-#ifdef __LITTLE_ENDIAN__
-  vector float __ret = __builtin_vsx_xvcvsxdsp(__a);
-  return vec_sld(__ret, __ret, 12);
-#else
-  return __builtin_vsx_xvcvsxdsp(__a);
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_floate(vector unsigned long long __a) {
-#ifdef __LITTLE_ENDIAN__
-  vector float __ret = __builtin_vsx_xvcvuxdsp(__a);
-  return vec_sld(__ret, __ret, 12);
-#else
-  return __builtin_vsx_xvcvuxdsp(__a);
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_floate(vector double __a) {
-#ifdef __LITTLE_ENDIAN__
-  vector float __ret = __builtin_vsx_xvcvdpsp(__a);
-  return vec_sld(__ret, __ret, 12);
-#else
-  return __builtin_vsx_xvcvdpsp(__a);
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_floato(vector signed long long __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvsxdsp(__a);
-#else
-  vector float __ret = __builtin_vsx_xvcvsxdsp(__a);
-  return vec_sld(__ret, __ret, 12);
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_floato(vector unsigned long long __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvuxdsp(__a);
-#else
-  vector float __ret = __builtin_vsx_xvcvuxdsp(__a);
-  return vec_sld(__ret, __ret, 12);
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_floato(vector double __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvdpsp(__a);
-#else
-  vector float __ret = __builtin_vsx_xvcvdpsp(__a);
-  return vec_sld(__ret, __ret, 12);
-#endif
-}
-#endif
-
-/* vec_double */
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai
-vec_double(vector signed long long __a) {
-  return __builtin_convertvector(__a, vector double);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_double(vector unsigned long long __a) {
-  return __builtin_convertvector(__a, vector double);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doublee(vector signed int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvsxwdp(vec_sld(__a, __a, 4));
-#else
-  return __builtin_vsx_xvcvsxwdp(__a);
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doublee(vector unsigned int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvuxwdp(vec_sld(__a, __a, 4));
-#else
-  return __builtin_vsx_xvcvuxwdp(__a);
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doublee(vector float __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvspdp(vec_sld(__a, __a, 4));
-#else
-  return __builtin_vsx_xvcvspdp(__a);
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doubleh(vector signed int __a) {
-  vector double __ret = {__a[0], __a[1]};
-  return __ret;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doubleh(vector unsigned int __a) {
-  vector double __ret = {__a[0], __a[1]};
-  return __ret;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doubleh(vector float __a) {
-  vector double __ret = {__a[0], __a[1]};
-  return __ret;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doublel(vector signed int __a) {
-  vector double __ret = {__a[2], __a[3]};
-  return __ret;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doublel(vector unsigned int __a) {
-  vector double __ret = {__a[2], __a[3]};
-  return __ret;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doublel(vector float __a) {
-  vector double __ret = {__a[2], __a[3]};
-  return __ret;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doubleo(vector signed int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvsxwdp(__a);
-#else
-  return __builtin_vsx_xvcvsxwdp(vec_sld(__a, __a, 4));
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doubleo(vector unsigned int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvuxwdp(__a);
-#else
-  return __builtin_vsx_xvcvuxwdp(vec_sld(__a, __a, 4));
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doubleo(vector float __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvspdp(__a);
-#else
-  return __builtin_vsx_xvcvspdp(vec_sld(__a, __a, 4));
-#endif
-}
-#endif
-
-/* vec_div */
-
-/* Integer vector divides (vectors are scalarized, elements divided
-   and the vectors reassembled).
-*/
-static __inline__ vector signed char __ATTRS_o_ai
-vec_div(vector signed char __a, vector signed char __b) {
-  return __a / __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_div(vector unsigned char __a, vector unsigned char __b) {
-  return __a / __b;
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_div(vector signed short __a, vector signed short __b) {
-  return __a / __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_div(vector unsigned short __a, vector unsigned short __b) {
-  return __a / __b;
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_div(vector signed int __a, vector signed int __b) {
-  return __a / __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_div(vector unsigned int __a, vector unsigned int __b) {
-  return __a / __b;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_div(vector signed long long __a, vector signed long long __b) {
-  return __a / __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_div(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a / __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_div(vector float __a,
-                                                    vector float __b) {
-  return __a / __b;
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_div(vector double __a,
-                                                     vector double __b) {
-  return __a / __b;
-}
-#endif
-
-/* vec_dss */
-
-#define vec_dss __builtin_altivec_dss
-
-/* vec_dssall */
-
-static __inline__ void __attribute__((__always_inline__)) vec_dssall(void) {
-  __builtin_altivec_dssall();
-}
-
-/* vec_dst */
-#define vec_dst(__PTR, __CW, __STR) \
-  __extension__(                    \
-      { __builtin_altivec_dst((const void *)(__PTR), (__CW), (__STR)); })
-
-/* vec_dstst */
-#define vec_dstst(__PTR, __CW, __STR) \
-  __extension__(                      \
-      { __builtin_altivec_dstst((const void *)(__PTR), (__CW), (__STR)); })
-
-/* vec_dststt */
-#define vec_dststt(__PTR, __CW, __STR) \
-  __extension__(                       \
-      { __builtin_altivec_dststt((const void *)(__PTR), (__CW), (__STR)); })
-
-/* vec_dstt */
-#define vec_dstt(__PTR, __CW, __STR) \
-  __extension__(                     \
-      { __builtin_altivec_dstt((const void *)(__PTR), (__CW), (__STR)); })
-
-/* vec_eqv */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed char __ATTRS_o_ai
-vec_eqv(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                  (vector unsigned int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_eqv(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                    (vector unsigned int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_eqv(vector bool char __a,
-                                                        vector bool char __b) {
-  return (vector bool char)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                (vector unsigned int)__b);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_eqv(vector signed short __a, vector signed short __b) {
-  return (vector signed short)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                   (vector unsigned int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_eqv(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                     (vector unsigned int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_eqv(vector bool short __a, vector bool short __b) {
-  return (vector bool short)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                 (vector unsigned int)__b);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_eqv(vector signed int __a, vector signed int __b) {
-  return (vector signed int)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                 (vector unsigned int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_eqv(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_vsx_xxleqv(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_eqv(vector bool int __a,
-                                                       vector bool int __b) {
-  return (vector bool int)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                               (vector unsigned int)__b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_eqv(vector signed long long __a, vector signed long long __b) {
-  return (vector signed long long)__builtin_vsx_xxleqv(
-      (vector unsigned int)__a, (vector unsigned int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_eqv(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__builtin_vsx_xxleqv(
-      (vector unsigned int)__a, (vector unsigned int)__b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_eqv(vector bool long long __a, vector bool long long __b) {
-  return (vector bool long long)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                     (vector unsigned int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_eqv(vector float __a,
-                                                    vector float __b) {
-  return (vector float)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                            (vector unsigned int)__b);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_eqv(vector double __a,
-                                                     vector double __b) {
-  return (vector double)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                             (vector unsigned int)__b);
-}
-#endif
-
-/* vec_expte */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_expte(vector float __a) {
-  return __builtin_altivec_vexptefp(__a);
-}
-
-/* vec_vexptefp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vexptefp(vector float __a) {
-  return __builtin_altivec_vexptefp(__a);
-}
-
-/* vec_floor */
-
-static __inline__ vector float __ATTRS_o_ai vec_floor(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvrspim(__a);
-#else
-  return __builtin_altivec_vrfim(__a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_floor(vector double __a) {
-  return __builtin_vsx_xvrdpim(__a);
-}
-#endif
-
-/* vec_vrfim */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vrfim(vector float __a) {
-  return __builtin_altivec_vrfim(__a);
-}
-
-/* vec_ld */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_ld(int __a, const vector signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_ld(int __a, const signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_ld(int __a, const vector unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_ld(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_ld(int __a, const vector bool char *__b) {
-  return (vector bool char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_ld(int __a,
-                                                   const vector short *__b) {
-  return (vector short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_ld(int __a, const short *__b) {
-  return (vector short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_ld(int __a, const vector unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_ld(int __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_ld(int __a, const vector bool short *__b) {
-  return (vector bool short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_ld(int __a,
-                                                   const vector pixel *__b) {
-  return (vector pixel)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_ld(int __a,
-                                                 const vector int *__b) {
-  return (vector int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_ld(int __a, const int *__b) {
-  return (vector int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_ld(int __a, const vector unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_ld(int __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_ld(int __a, const vector bool int *__b) {
-  return (vector bool int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_ld(int __a,
-                                                   const vector float *__b) {
-  return (vector float)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_ld(int __a, const float *__b) {
-  return (vector float)__builtin_altivec_lvx(__a, __b);
-}
-
-/* vec_lvx */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvx(int __a, const vector signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvx(int __a, const signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvx(int __a, const vector unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvx(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_lvx(int __a, const vector bool char *__b) {
-  return (vector bool char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvx(int __a,
-                                                    const vector short *__b) {
-  return (vector short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvx(int __a, const short *__b) {
-  return (vector short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvx(int __a, const vector unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvx(int __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_lvx(int __a, const vector bool short *__b) {
-  return (vector bool short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_lvx(int __a,
-                                                    const vector pixel *__b) {
-  return (vector pixel)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvx(int __a,
-                                                  const vector int *__b) {
-  return (vector int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvx(int __a, const int *__b) {
-  return (vector int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvx(int __a, const vector unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvx(int __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_lvx(int __a, const vector bool int *__b) {
-  return (vector bool int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvx(int __a,
-                                                    const vector float *__b) {
-  return (vector float)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvx(int __a, const float *__b) {
-  return (vector float)__builtin_altivec_lvx(__a, __b);
-}
-
-/* vec_lde */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lde(int __a, const signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvebx(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lde(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvebx(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lde(int __a, const short *__b) {
-  return (vector short)__builtin_altivec_lvehx(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lde(int __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvehx(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lde(int __a, const int *__b) {
-  return (vector int)__builtin_altivec_lvewx(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lde(int __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvewx(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lde(int __a, const float *__b) {
-  return (vector float)__builtin_altivec_lvewx(__a, __b);
-}
-
-/* vec_lvebx */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvebx(int __a, const signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvebx(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvebx(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvebx(__a, __b);
-}
-
-/* vec_lvehx */
-
-static __inline__ vector short __ATTRS_o_ai vec_lvehx(int __a,
-                                                      const short *__b) {
-  return (vector short)__builtin_altivec_lvehx(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvehx(int __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvehx(__a, __b);
-}
-
-/* vec_lvewx */
-
-static __inline__ vector int __ATTRS_o_ai vec_lvewx(int __a, const int *__b) {
-  return (vector int)__builtin_altivec_lvewx(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvewx(int __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvewx(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvewx(int __a,
-                                                      const float *__b) {
-  return (vector float)__builtin_altivec_lvewx(__a, __b);
-}
-
-/* vec_ldl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_ldl(int __a, const vector signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_ldl(int __a, const signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_ldl(int __a, const vector unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_ldl(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_ldl(int __a, const vector bool char *__b) {
-  return (vector bool char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_ldl(int __a,
-                                                    const vector short *__b) {
-  return (vector short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_ldl(int __a, const short *__b) {
-  return (vector short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_ldl(int __a, const vector unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_ldl(int __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_ldl(int __a, const vector bool short *__b) {
-  return (vector bool short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_ldl(int __a,
-                                                    const vector pixel *__b) {
-  return (vector pixel short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_ldl(int __a,
-                                                  const vector int *__b) {
-  return (vector int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_ldl(int __a, const int *__b) {
-  return (vector int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_ldl(int __a, const vector unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_ldl(int __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_ldl(int __a, const vector bool int *__b) {
-  return (vector bool int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_ldl(int __a,
-                                                    const vector float *__b) {
-  return (vector float)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_ldl(int __a, const float *__b) {
-  return (vector float)__builtin_altivec_lvxl(__a, __b);
-}
-
-/* vec_lvxl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvxl(int __a, const vector signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvxl(int __a, const signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvxl(int __a, const vector unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvxl(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_lvxl(int __a, const vector bool char *__b) {
-  return (vector bool char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvxl(int __a,
-                                                     const vector short *__b) {
-  return (vector short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvxl(int __a,
-                                                     const short *__b) {
-  return (vector short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvxl(int __a, const vector unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvxl(int __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_lvxl(int __a, const vector bool short *__b) {
-  return (vector bool short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_lvxl(int __a,
-                                                     const vector pixel *__b) {
-  return (vector pixel)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvxl(int __a,
-                                                   const vector int *__b) {
-  return (vector int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvxl(int __a, const int *__b) {
-  return (vector int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvxl(int __a, const vector unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvxl(int __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_lvxl(int __a, const vector bool int *__b) {
-  return (vector bool int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvxl(int __a,
-                                                     const vector float *__b) {
-  return (vector float)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvxl(int __a,
-                                                     const float *__b) {
-  return (vector float)__builtin_altivec_lvxl(__a, __b);
-}
-
-/* vec_loge */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_loge(vector float __a) {
-  return __builtin_altivec_vlogefp(__a);
-}
-
-/* vec_vlogefp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vlogefp(vector float __a) {
-  return __builtin_altivec_vlogefp(__a);
-}
-
-/* vec_lvsl */
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const signed char *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsl(int __a, const signed char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const unsigned char *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsl(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const short *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
-                                                             const short *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const unsigned short *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsl(int __a, const unsigned short *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const int *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
-                                                             const int *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const unsigned int *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsl(int __a, const unsigned int *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const float *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
-                                                             const float *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-/* vec_lvsr */
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const signed char *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsr(int __a, const signed char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const unsigned char *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsr(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const short *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
-                                                             const short *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const unsigned short *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsr(int __a, const unsigned short *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const int *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
-                                                             const int *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const unsigned int *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsr(int __a, const unsigned int *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const float *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
-                                                             const float *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-/* vec_madd */
-static __inline__ vector signed short __ATTRS_o_ai
-vec_mladd(vector signed short, vector signed short, vector signed short);
-static __inline__ vector signed short __ATTRS_o_ai
-vec_mladd(vector signed short, vector unsigned short, vector unsigned short);
-static __inline__ vector signed short __ATTRS_o_ai
-vec_mladd(vector unsigned short, vector signed short, vector signed short);
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mladd(vector unsigned short, vector unsigned short, vector unsigned short);
-
-static __inline__ vector signed short __ATTRS_o_ai vec_madd(
-    vector signed short __a, vector signed short __b, vector signed short __c) {
-  return vec_mladd(__a, __b, __c);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_madd(vector signed short __a, vector unsigned short __b,
-         vector unsigned short __c) {
-  return vec_mladd(__a, __b, __c);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_madd(vector unsigned short __a, vector signed short __b,
-         vector signed short __c) {
-  return vec_mladd(__a, __b, __c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_madd(vector unsigned short __a, vector unsigned short __b,
-         vector unsigned short __c) {
-  return vec_mladd(__a, __b, __c);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_madd(vector float __a,
-                                                     vector float __b,
-                                                     vector float __c) {
-#ifdef __VSX__
-  return __builtin_vsx_xvmaddasp(__a, __b, __c);
-#else
-  return __builtin_altivec_vmaddfp(__a, __b, __c);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_madd(vector double __a,
-                                                      vector double __b,
-                                                      vector double __c) {
-  return __builtin_vsx_xvmaddadp(__a, __b, __c);
-}
-#endif
-
-/* vec_vmaddfp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vmaddfp(vector float __a, vector float __b, vector float __c) {
-  return __builtin_altivec_vmaddfp(__a, __b, __c);
-}
-
-/* vec_madds */
-
-static __inline__ vector signed short __attribute__((__always_inline__))
-vec_madds(vector signed short __a, vector signed short __b,
-          vector signed short __c) {
-  return __builtin_altivec_vmhaddshs(__a, __b, __c);
-}
-
-/* vec_vmhaddshs */
-static __inline__ vector signed short __attribute__((__always_inline__))
-vec_vmhaddshs(vector signed short __a, vector signed short __b,
-              vector signed short __c) {
-  return __builtin_altivec_vmhaddshs(__a, __b, __c);
-}
-
-/* vec_msub */
-
-#ifdef __VSX__
-static __inline__ vector float __ATTRS_o_ai vec_msub(vector float __a,
-                                                     vector float __b,
-                                                     vector float __c) {
-  return __builtin_vsx_xvmsubasp(__a, __b, __c);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_msub(vector double __a,
-                                                      vector double __b,
-                                                      vector double __c) {
-  return __builtin_vsx_xvmsubadp(__a, __b, __c);
-}
-#endif
-
-/* vec_max */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_max(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vmaxsb(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_max(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vmaxsb((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_max(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vmaxsb(__a, (vector signed char)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_max(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vmaxub(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_max(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vmaxub((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_max(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vmaxub(__a, (vector unsigned char)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_max(vector short __a,
-                                                    vector short __b) {
-  return __builtin_altivec_vmaxsh(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_max(vector bool short __a,
-                                                    vector short __b) {
-  return __builtin_altivec_vmaxsh((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_max(vector short __a,
-                                                    vector bool short __b) {
-  return __builtin_altivec_vmaxsh(__a, (vector short)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_max(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vmaxuh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_max(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vmaxuh((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_max(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vmaxuh(__a, (vector unsigned short)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_max(vector int __a,
-                                                  vector int __b) {
-  return __builtin_altivec_vmaxsw(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_max(vector bool int __a,
-                                                  vector int __b) {
-  return __builtin_altivec_vmaxsw((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_max(vector int __a,
-                                                  vector bool int __b) {
-  return __builtin_altivec_vmaxsw(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_max(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vmaxuw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_max(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vmaxuw((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_max(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vmaxuw(__a, (vector unsigned int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_max(vector signed long long __a, vector signed long long __b) {
-  return __builtin_altivec_vmaxsd(__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_max(vector bool long long __a, vector signed long long __b) {
-  return __builtin_altivec_vmaxsd((vector signed long long)__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_max(vector signed long long __a, vector bool long long __b) {
-  return __builtin_altivec_vmaxsd(__a, (vector signed long long)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_max(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vmaxud(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_max(vector bool long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vmaxud((vector unsigned long long)__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_max(vector unsigned long long __a, vector bool long long __b) {
-  return __builtin_altivec_vmaxud(__a, (vector unsigned long long)__b);
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_max(vector float __a,
-                                                    vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvmaxsp(__a, __b);
-#else
-  return __builtin_altivec_vmaxfp(__a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_max(vector double __a,
-                                                     vector double __b) {
-  return __builtin_vsx_xvmaxdp(__a, __b);
-}
-#endif
-
-/* vec_vmaxsb */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vmaxsb(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vmaxsb(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vmaxsb(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vmaxsb((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vmaxsb(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vmaxsb(__a, (vector signed char)__b);
-}
-
-/* vec_vmaxub */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vmaxub(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vmaxub(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vmaxub(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vmaxub((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vmaxub(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vmaxub(__a, (vector unsigned char)__b);
-}
-
-/* vec_vmaxsh */
-
-static __inline__ vector short __ATTRS_o_ai vec_vmaxsh(vector short __a,
-                                                       vector short __b) {
-  return __builtin_altivec_vmaxsh(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vmaxsh(vector bool short __a,
-                                                       vector short __b) {
-  return __builtin_altivec_vmaxsh((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vmaxsh(vector short __a,
-                                                       vector bool short __b) {
-  return __builtin_altivec_vmaxsh(__a, (vector short)__b);
-}
-
-/* vec_vmaxuh */
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vmaxuh(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vmaxuh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vmaxuh(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vmaxuh((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vmaxuh(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vmaxuh(__a, (vector unsigned short)__b);
-}
-
-/* vec_vmaxsw */
-
-static __inline__ vector int __ATTRS_o_ai vec_vmaxsw(vector int __a,
-                                                     vector int __b) {
-  return __builtin_altivec_vmaxsw(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vmaxsw(vector bool int __a,
-                                                     vector int __b) {
-  return __builtin_altivec_vmaxsw((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vmaxsw(vector int __a,
-                                                     vector bool int __b) {
-  return __builtin_altivec_vmaxsw(__a, (vector int)__b);
-}
-
-/* vec_vmaxuw */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vmaxuw(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vmaxuw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vmaxuw(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vmaxuw((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vmaxuw(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vmaxuw(__a, (vector unsigned int)__b);
-}
-
-/* vec_vmaxfp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vmaxfp(vector float __a, vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvmaxsp(__a, __b);
-#else
-  return __builtin_altivec_vmaxfp(__a, __b);
-#endif
-}
-
-/* vec_mergeh */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_mergeh(vector signed char __a, vector signed char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
-                                         0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
-                                         0x06, 0x16, 0x07, 0x17));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_mergeh(vector unsigned char __a, vector unsigned char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
-                                         0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
-                                         0x06, 0x16, 0x07, 0x17));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_mergeh(vector bool char __a, vector bool char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
-                                         0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
-                                         0x06, 0x16, 0x07, 0x17));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_mergeh(vector short __a,
-                                                       vector short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mergeh(vector unsigned short __a, vector unsigned short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_mergeh(vector bool short __a, vector bool short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_mergeh(vector pixel __a,
-                                                       vector pixel __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_mergeh(vector int __a,
-                                                     vector int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mergeh(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_mergeh(vector bool int __a,
-                                                          vector bool int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_mergeh(vector float __a,
-                                                       vector float __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergeh(vector signed long long __a, vector signed long long __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergeh(vector signed long long __a, vector bool long long __b) {
-  return vec_perm(__a, (vector signed long long)__b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergeh(vector bool long long __a, vector signed long long __b) {
-  return vec_perm((vector signed long long)__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergeh(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergeh(vector unsigned long long __a, vector bool long long __b) {
-  return vec_perm(__a, (vector unsigned long long)__b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergeh(vector bool long long __a, vector unsigned long long __b) {
-  return vec_perm((vector unsigned long long)__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_mergeh(vector bool long long __a, vector bool long long __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_mergeh(vector double __a,
-                                                        vector double __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-static __inline__ vector double __ATTRS_o_ai
-vec_mergeh(vector double __a, vector bool long long __b) {
-  return vec_perm(__a, (vector double)__b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-static __inline__ vector double __ATTRS_o_ai
-vec_mergeh(vector bool long long __a, vector double __b) {
-  return vec_perm((vector double)__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-#endif
-
-/* vec_vmrghb */
-
-#define __builtin_altivec_vmrghb vec_vmrghb
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vmrghb(vector signed char __a, vector signed char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
-                                         0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
-                                         0x06, 0x16, 0x07, 0x17));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vmrghb(vector unsigned char __a, vector unsigned char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
-                                         0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
-                                         0x06, 0x16, 0x07, 0x17));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vmrghb(vector bool char __a, vector bool char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
-                                         0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
-                                         0x06, 0x16, 0x07, 0x17));
-}
-
-/* vec_vmrghh */
-
-#define __builtin_altivec_vmrghh vec_vmrghh
-
-static __inline__ vector short __ATTRS_o_ai vec_vmrghh(vector short __a,
-                                                       vector short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vmrghh(vector unsigned short __a, vector unsigned short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vmrghh(vector bool short __a, vector bool short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vmrghh(vector pixel __a,
-                                                       vector pixel __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-/* vec_vmrghw */
-
-#define __builtin_altivec_vmrghw vec_vmrghw
-
-static __inline__ vector int __ATTRS_o_ai vec_vmrghw(vector int __a,
-                                                     vector int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vmrghw(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vmrghw(vector bool int __a,
-                                                          vector bool int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vmrghw(vector float __a,
-                                                       vector float __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-/* vec_mergel */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_mergel(vector signed char __a, vector signed char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
-                                         0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
-                                         0x0E, 0x1E, 0x0F, 0x1F));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_mergel(vector unsigned char __a, vector unsigned char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
-                                         0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
-                                         0x0E, 0x1E, 0x0F, 0x1F));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_mergel(vector bool char __a, vector bool char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
-                                         0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
-                                         0x0E, 0x1E, 0x0F, 0x1F));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_mergel(vector short __a,
-                                                       vector short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mergel(vector unsigned short __a, vector unsigned short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_mergel(vector bool short __a, vector bool short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_mergel(vector pixel __a,
-                                                       vector pixel __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_mergel(vector int __a,
-                                                     vector int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mergel(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_mergel(vector bool int __a,
-                                                          vector bool int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_mergel(vector float __a,
-                                                       vector float __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergel(vector signed long long __a, vector signed long long __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergel(vector signed long long __a, vector bool long long __b) {
-  return vec_perm(__a, (vector signed long long)__b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergel(vector bool long long __a, vector signed long long __b) {
-  return vec_perm((vector signed long long)__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergel(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergel(vector unsigned long long __a, vector bool long long __b) {
-  return vec_perm(__a, (vector unsigned long long)__b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergel(vector bool long long __a, vector unsigned long long __b) {
-  return vec_perm((vector unsigned long long)__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_mergel(vector bool long long __a, vector bool long long __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector double __ATTRS_o_ai vec_mergel(vector double __a,
-                                                        vector double __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector double __ATTRS_o_ai
-vec_mergel(vector double __a, vector bool long long __b) {
-  return vec_perm(__a, (vector double)__b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector double __ATTRS_o_ai
-vec_mergel(vector bool long long __a, vector double __b) {
-  return vec_perm((vector double)__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-#endif
-
-/* vec_vmrglb */
-
-#define __builtin_altivec_vmrglb vec_vmrglb
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vmrglb(vector signed char __a, vector signed char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
-                                         0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
-                                         0x0E, 0x1E, 0x0F, 0x1F));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vmrglb(vector unsigned char __a, vector unsigned char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
-                                         0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
-                                         0x0E, 0x1E, 0x0F, 0x1F));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vmrglb(vector bool char __a, vector bool char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
-                                         0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
-                                         0x0E, 0x1E, 0x0F, 0x1F));
-}
-
-/* vec_vmrglh */
-
-#define __builtin_altivec_vmrglh vec_vmrglh
-
-static __inline__ vector short __ATTRS_o_ai vec_vmrglh(vector short __a,
-                                                       vector short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vmrglh(vector unsigned short __a, vector unsigned short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vmrglh(vector bool short __a, vector bool short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vmrglh(vector pixel __a,
-                                                       vector pixel __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-/* vec_vmrglw */
-
-#define __builtin_altivec_vmrglw vec_vmrglw
-
-static __inline__ vector int __ATTRS_o_ai vec_vmrglw(vector int __a,
-                                                     vector int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vmrglw(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vmrglw(vector bool int __a,
-                                                          vector bool int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vmrglw(vector float __a,
-                                                       vector float __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-#ifdef __POWER8_VECTOR__
-/* vec_mergee */
-
-static __inline__ vector bool int __ATTRS_o_ai vec_mergee(vector bool int __a,
-                                                          vector bool int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B,
-                                         0x18, 0x19, 0x1A, 0x1B));
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_mergee(vector signed int __a, vector signed int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B,
-                                         0x18, 0x19, 0x1A, 0x1B));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mergee(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B,
-                                         0x18, 0x19, 0x1A, 0x1B));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_mergee(vector bool long long __a, vector bool long long __b) {
-  return vec_mergeh(__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergee(vector signed long long __a, vector signed long long __b) {
-  return vec_mergeh(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergee(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_mergeh(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_mergee(vector float __a, vector float __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B,
-                                         0x18, 0x19, 0x1A, 0x1B));
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_mergee(vector double __a, vector double __b) {
-  return vec_mergeh(__a, __b);
-}
-
-/* vec_mergeo */
-
-static __inline__ vector bool int __ATTRS_o_ai vec_mergeo(vector bool int __a,
-                                                          vector bool int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15,
-                                         0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_mergeo(vector signed int __a, vector signed int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15,
-                                         0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mergeo(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15,
-                                         0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_mergeo(vector bool long long __a, vector bool long long __b) {
-  return vec_mergel(__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergeo(vector signed long long __a, vector signed long long __b) {
-  return vec_mergel(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergeo(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_mergel(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_mergeo(vector float __a, vector float __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15,
-                                         0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_mergeo(vector double __a, vector double __b) {
-  return vec_mergel(__a, __b);
-}
-
-#endif
-
-/* vec_mfvscr */
-
-static __inline__ vector unsigned short __attribute__((__always_inline__))
-vec_mfvscr(void) {
-  return __builtin_altivec_mfvscr();
-}
-
-/* vec_min */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_min(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vminsb(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_min(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vminsb((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_min(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vminsb(__a, (vector signed char)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_min(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vminub(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_min(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vminub((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_min(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vminub(__a, (vector unsigned char)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_min(vector short __a,
-                                                    vector short __b) {
-  return __builtin_altivec_vminsh(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_min(vector bool short __a,
-                                                    vector short __b) {
-  return __builtin_altivec_vminsh((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_min(vector short __a,
-                                                    vector bool short __b) {
-  return __builtin_altivec_vminsh(__a, (vector short)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_min(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vminuh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_min(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vminuh((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_min(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vminuh(__a, (vector unsigned short)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_min(vector int __a,
-                                                  vector int __b) {
-  return __builtin_altivec_vminsw(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_min(vector bool int __a,
-                                                  vector int __b) {
-  return __builtin_altivec_vminsw((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_min(vector int __a,
-                                                  vector bool int __b) {
-  return __builtin_altivec_vminsw(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_min(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vminuw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_min(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vminuw((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_min(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vminuw(__a, (vector unsigned int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_min(vector signed long long __a, vector signed long long __b) {
-  return __builtin_altivec_vminsd(__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_min(vector bool long long __a, vector signed long long __b) {
-  return __builtin_altivec_vminsd((vector signed long long)__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_min(vector signed long long __a, vector bool long long __b) {
-  return __builtin_altivec_vminsd(__a, (vector signed long long)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_min(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vminud(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_min(vector bool long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vminud((vector unsigned long long)__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_min(vector unsigned long long __a, vector bool long long __b) {
-  return __builtin_altivec_vminud(__a, (vector unsigned long long)__b);
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_min(vector float __a,
-                                                    vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvminsp(__a, __b);
-#else
-  return __builtin_altivec_vminfp(__a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_min(vector double __a,
-                                                     vector double __b) {
-  return __builtin_vsx_xvmindp(__a, __b);
-}
-#endif
-
-/* vec_vminsb */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vminsb(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vminsb(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vminsb(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vminsb((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vminsb(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vminsb(__a, (vector signed char)__b);
-}
-
-/* vec_vminub */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vminub(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vminub(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vminub(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vminub((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vminub(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vminub(__a, (vector unsigned char)__b);
-}
-
-/* vec_vminsh */
-
-static __inline__ vector short __ATTRS_o_ai vec_vminsh(vector short __a,
-                                                       vector short __b) {
-  return __builtin_altivec_vminsh(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vminsh(vector bool short __a,
-                                                       vector short __b) {
-  return __builtin_altivec_vminsh((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vminsh(vector short __a,
-                                                       vector bool short __b) {
-  return __builtin_altivec_vminsh(__a, (vector short)__b);
-}
-
-/* vec_vminuh */
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vminuh(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vminuh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vminuh(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vminuh((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vminuh(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vminuh(__a, (vector unsigned short)__b);
-}
-
-/* vec_vminsw */
-
-static __inline__ vector int __ATTRS_o_ai vec_vminsw(vector int __a,
-                                                     vector int __b) {
-  return __builtin_altivec_vminsw(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vminsw(vector bool int __a,
-                                                     vector int __b) {
-  return __builtin_altivec_vminsw((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vminsw(vector int __a,
-                                                     vector bool int __b) {
-  return __builtin_altivec_vminsw(__a, (vector int)__b);
-}
-
-/* vec_vminuw */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vminuw(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vminuw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vminuw(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vminuw((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vminuw(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vminuw(__a, (vector unsigned int)__b);
-}
-
-/* vec_vminfp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vminfp(vector float __a, vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvminsp(__a, __b);
-#else
-  return __builtin_altivec_vminfp(__a, __b);
-#endif
-}
-
-/* vec_mladd */
-
-#define __builtin_altivec_vmladduhm vec_mladd
-
-static __inline__ vector short __ATTRS_o_ai vec_mladd(vector short __a,
-                                                      vector short __b,
-                                                      vector short __c) {
-  return __a * __b + __c;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_mladd(
-    vector short __a, vector unsigned short __b, vector unsigned short __c) {
-  return __a * (vector short)__b + (vector short)__c;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_mladd(vector unsigned short __a,
-                                                      vector short __b,
-                                                      vector short __c) {
-  return (vector short)__a * __b + __c;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mladd(vector unsigned short __a, vector unsigned short __b,
-          vector unsigned short __c) {
-  return __a * __b + __c;
-}
-
-/* vec_vmladduhm */
-
-static __inline__ vector short __ATTRS_o_ai vec_vmladduhm(vector short __a,
-                                                          vector short __b,
-                                                          vector short __c) {
-  return __a * __b + __c;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vmladduhm(
-    vector short __a, vector unsigned short __b, vector unsigned short __c) {
-  return __a * (vector short)__b + (vector short)__c;
-}
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vmladduhm(vector unsigned short __a, vector short __b, vector short __c) {
-  return (vector short)__a * __b + __c;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vmladduhm(vector unsigned short __a, vector unsigned short __b,
-              vector unsigned short __c) {
-  return __a * __b + __c;
-}
-
-/* vec_mradds */
-
-static __inline__ vector short __attribute__((__always_inline__))
-vec_mradds(vector short __a, vector short __b, vector short __c) {
-  return __builtin_altivec_vmhraddshs(__a, __b, __c);
-}
-
-/* vec_vmhraddshs */
-
-static __inline__ vector short __attribute__((__always_inline__))
-vec_vmhraddshs(vector short __a, vector short __b, vector short __c) {
-  return __builtin_altivec_vmhraddshs(__a, __b, __c);
-}
-
-/* vec_msum */
-
-static __inline__ vector int __ATTRS_o_ai vec_msum(vector signed char __a,
-                                                   vector unsigned char __b,
-                                                   vector int __c) {
-  return __builtin_altivec_vmsummbm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_msum(vector unsigned char __a, vector unsigned char __b,
-         vector unsigned int __c) {
-  return __builtin_altivec_vmsumubm(__a, __b, __c);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_msum(vector short __a,
-                                                   vector short __b,
-                                                   vector int __c) {
-  return __builtin_altivec_vmsumshm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_msum(vector unsigned short __a, vector unsigned short __b,
-         vector unsigned int __c) {
-  return __builtin_altivec_vmsumuhm(__a, __b, __c);
-}
-
-/* vec_vmsummbm */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vmsummbm(vector signed char __a, vector unsigned char __b, vector int __c) {
-  return __builtin_altivec_vmsummbm(__a, __b, __c);
-}
-
-/* vec_vmsumubm */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vmsumubm(vector unsigned char __a, vector unsigned char __b,
-             vector unsigned int __c) {
-  return __builtin_altivec_vmsumubm(__a, __b, __c);
-}
-
-/* vec_vmsumshm */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vmsumshm(vector short __a, vector short __b, vector int __c) {
-  return __builtin_altivec_vmsumshm(__a, __b, __c);
-}
-
-/* vec_vmsumuhm */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vmsumuhm(vector unsigned short __a, vector unsigned short __b,
-             vector unsigned int __c) {
-  return __builtin_altivec_vmsumuhm(__a, __b, __c);
-}
-
-/* vec_msums */
-
-static __inline__ vector int __ATTRS_o_ai vec_msums(vector short __a,
-                                                    vector short __b,
-                                                    vector int __c) {
-  return __builtin_altivec_vmsumshs(__a, __b, __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_msums(vector unsigned short __a, vector unsigned short __b,
-          vector unsigned int __c) {
-  return __builtin_altivec_vmsumuhs(__a, __b, __c);
-}
-
-/* vec_vmsumshs */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vmsumshs(vector short __a, vector short __b, vector int __c) {
-  return __builtin_altivec_vmsumshs(__a, __b, __c);
-}
-
-/* vec_vmsumuhs */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vmsumuhs(vector unsigned short __a, vector unsigned short __b,
-             vector unsigned int __c) {
-  return __builtin_altivec_vmsumuhs(__a, __b, __c);
-}
-
-/* vec_mtvscr */
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector signed char __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector unsigned char __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector bool char __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector short __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector unsigned short __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector bool short __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector pixel __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector int __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector unsigned int __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector bool int __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector float __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-/* vec_mul */
-
-/* Integer vector multiplication will involve multiplication of the odd/even
-   elements separately, then truncating the results and moving to the
-   result vector.
-*/
-static __inline__ vector signed char __ATTRS_o_ai
-vec_mul(vector signed char __a, vector signed char __b) {
-  return __a * __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_mul(vector unsigned char __a, vector unsigned char __b) {
-  return __a * __b;
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_mul(vector signed short __a, vector signed short __b) {
-  return __a * __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mul(vector unsigned short __a, vector unsigned short __b) {
-  return __a * __b;
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_mul(vector signed int __a, vector signed int __b) {
-  return __a * __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mul(vector unsigned int __a, vector unsigned int __b) {
-  return __a * __b;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mul(vector signed long long __a, vector signed long long __b) {
-  return __a * __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mul(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a * __b;
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_mul(vector float __a,
-                                                    vector float __b) {
-  return __a * __b;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_mul(vector double __a,
-                                                     vector double __b) {
-  return __a * __b;
-}
-#endif
-
-/* The vmulos* and vmules* instructions have a big endian bias, so
-   we must reverse the meaning of "even" and "odd" for little endian.  */
-
-/* vec_mule */
-
-static __inline__ vector short __ATTRS_o_ai vec_mule(vector signed char __a,
-                                                     vector signed char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulosb(__a, __b);
-#else
-  return __builtin_altivec_vmulesb(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mule(vector unsigned char __a, vector unsigned char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuloub(__a, __b);
-#else
-  return __builtin_altivec_vmuleub(__a, __b);
-#endif
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_mule(vector short __a,
-                                                   vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulosh(__a, __b);
-#else
-  return __builtin_altivec_vmulesh(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mule(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulouh(__a, __b);
-#else
-  return __builtin_altivec_vmuleuh(__a, __b);
-#endif
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mule(vector signed int __a, vector signed int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulosw(__a, __b);
-#else
-  return __builtin_altivec_vmulesw(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mule(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulouw(__a, __b);
-#else
-  return __builtin_altivec_vmuleuw(__a, __b);
-#endif
-}
-#endif
-
-/* vec_vmulesb */
-
-static __inline__ vector short __attribute__((__always_inline__))
-vec_vmulesb(vector signed char __a, vector signed char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulosb(__a, __b);
-#else
-  return __builtin_altivec_vmulesb(__a, __b);
-#endif
-}
-
-/* vec_vmuleub */
-
-static __inline__ vector unsigned short __attribute__((__always_inline__))
-vec_vmuleub(vector unsigned char __a, vector unsigned char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuloub(__a, __b);
-#else
-  return __builtin_altivec_vmuleub(__a, __b);
-#endif
-}
-
-/* vec_vmulesh */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vmulesh(vector short __a, vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulosh(__a, __b);
-#else
-  return __builtin_altivec_vmulesh(__a, __b);
-#endif
-}
-
-/* vec_vmuleuh */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vmuleuh(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulouh(__a, __b);
-#else
-  return __builtin_altivec_vmuleuh(__a, __b);
-#endif
-}
-
-/* vec_mulo */
-
-static __inline__ vector short __ATTRS_o_ai vec_mulo(vector signed char __a,
-                                                     vector signed char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulesb(__a, __b);
-#else
-  return __builtin_altivec_vmulosb(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mulo(vector unsigned char __a, vector unsigned char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuleub(__a, __b);
-#else
-  return __builtin_altivec_vmuloub(__a, __b);
-#endif
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_mulo(vector short __a,
-                                                   vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulesh(__a, __b);
-#else
-  return __builtin_altivec_vmulosh(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mulo(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuleuh(__a, __b);
-#else
-  return __builtin_altivec_vmulouh(__a, __b);
-#endif
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mulo(vector signed int __a, vector signed int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulesw(__a, __b);
-#else
-  return __builtin_altivec_vmulosw(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mulo(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuleuw(__a, __b);
-#else
-  return __builtin_altivec_vmulouw(__a, __b);
-#endif
-}
-#endif
-
-/* vec_vmulosb */
-
-static __inline__ vector short __attribute__((__always_inline__))
-vec_vmulosb(vector signed char __a, vector signed char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulesb(__a, __b);
-#else
-  return __builtin_altivec_vmulosb(__a, __b);
-#endif
-}
-
-/* vec_vmuloub */
-
-static __inline__ vector unsigned short __attribute__((__always_inline__))
-vec_vmuloub(vector unsigned char __a, vector unsigned char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuleub(__a, __b);
-#else
-  return __builtin_altivec_vmuloub(__a, __b);
-#endif
-}
-
-/* vec_vmulosh */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vmulosh(vector short __a, vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulesh(__a, __b);
-#else
-  return __builtin_altivec_vmulosh(__a, __b);
-#endif
-}
-
-/* vec_vmulouh */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vmulouh(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuleuh(__a, __b);
-#else
-  return __builtin_altivec_vmulouh(__a, __b);
-#endif
-}
-
-/*  vec_nand */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed char __ATTRS_o_ai
-vec_nand(vector signed char __a, vector signed char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_nand(vector signed char __a, vector bool char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_nand(vector bool char __a, vector signed char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_nand(vector unsigned char __a, vector unsigned char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_nand(vector unsigned char __a, vector bool char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_nand(vector bool char __a, vector unsigned char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_nand(vector bool char __a,
-                                                         vector bool char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_nand(vector signed short __a, vector signed short __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_nand(vector signed short __a, vector bool short __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_nand(vector bool short __a, vector signed short __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_nand(vector unsigned short __a, vector unsigned short __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_nand(vector unsigned short __a, vector bool short __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_nand(vector bool short __a, vector bool short __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_nand(vector signed int __a, vector signed int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai vec_nand(vector signed int __a,
-                                                          vector bool int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_nand(vector bool int __a, vector signed int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_nand(vector unsigned int __a, vector unsigned int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_nand(vector unsigned int __a, vector bool int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_nand(vector bool int __a, vector unsigned int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_nand(vector bool int __a,
-                                                        vector bool int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_nand(vector float __a, vector float __b) {
-  return (vector float)(~((vector unsigned int)__a &
-                          (vector unsigned int)__b));
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_nand(vector signed long long __a, vector signed long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_nand(vector signed long long __a, vector bool long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_nand(vector bool long long __a, vector signed long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_nand(vector unsigned long long __a, vector unsigned long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_nand(vector unsigned long long __a, vector bool long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_nand(vector bool long long __a, vector unsigned long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_nand(vector bool long long __a, vector bool long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_nand(vector double __a, vector double __b) {
-  return (vector double)(~((vector unsigned long long)__a &
-                           (vector unsigned long long)__b));
-}
-
-#endif
-
-/* vec_nmadd */
-
-#ifdef __VSX__
-static __inline__ vector float __ATTRS_o_ai vec_nmadd(vector float __a,
-                                                      vector float __b,
-                                                      vector float __c) {
-  return __builtin_vsx_xvnmaddasp(__a, __b, __c);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_nmadd(vector double __a,
-                                                       vector double __b,
-                                                       vector double __c) {
-  return __builtin_vsx_xvnmaddadp(__a, __b, __c);
-}
-#endif
-
-/* vec_nmsub */
-
-static __inline__ vector float __ATTRS_o_ai vec_nmsub(vector float __a,
-                                                      vector float __b,
-                                                      vector float __c) {
-#ifdef __VSX__
-  return __builtin_vsx_xvnmsubasp(__a, __b, __c);
-#else
-  return __builtin_altivec_vnmsubfp(__a, __b, __c);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_nmsub(vector double __a,
-                                                       vector double __b,
-                                                       vector double __c) {
-  return __builtin_vsx_xvnmsubadp(__a, __b, __c);
-}
-#endif
-
-/* vec_vnmsubfp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vnmsubfp(vector float __a, vector float __b, vector float __c) {
-  return __builtin_altivec_vnmsubfp(__a, __b, __c);
-}
-
-/* vec_nor */
-
-#define __builtin_altivec_vnor vec_nor
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_nor(vector signed char __a, vector signed char __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_nor(vector unsigned char __a, vector unsigned char __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_nor(vector bool char __a,
-                                                        vector bool char __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_nor(vector short __a,
-                                                    vector short __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_nor(vector unsigned short __a, vector unsigned short __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_nor(vector bool short __a, vector bool short __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_nor(vector int __a,
-                                                  vector int __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_nor(vector unsigned int __a, vector unsigned int __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_nor(vector bool int __a,
-                                                       vector bool int __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_nor(vector float __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      ~((vector unsigned int)__a | (vector unsigned int)__b);
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_nor(vector double __a,
-                                                     vector double __b) {
-  vector unsigned long long __res =
-      ~((vector unsigned long long)__a | (vector unsigned long long)__b);
-  return (vector double)__res;
-}
-#endif
-
-/* vec_vnor */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vnor(vector signed char __a, vector signed char __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vnor(vector unsigned char __a, vector unsigned char __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vnor(vector bool char __a,
-                                                         vector bool char __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vnor(vector short __a,
-                                                     vector short __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vnor(vector unsigned short __a, vector unsigned short __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vnor(vector bool short __a, vector bool short __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vnor(vector int __a,
-                                                   vector int __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vnor(vector unsigned int __a, vector unsigned int __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vnor(vector bool int __a,
-                                                        vector bool int __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vnor(vector float __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      ~((vector unsigned int)__a | (vector unsigned int)__b);
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_nor(vector signed long long __a, vector signed long long __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_nor(vector unsigned long long __a, vector unsigned long long __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_nor(vector bool long long __a, vector bool long long __b) {
-  return ~(__a | __b);
-}
-#endif
-
-/* vec_or */
-
-#define __builtin_altivec_vor vec_or
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_or(vector signed char __a, vector signed char __b) {
-  return __a | __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_or(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a | __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai vec_or(vector signed char __a,
-                                                         vector bool char __b) {
-  return __a | (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_or(vector unsigned char __a, vector unsigned char __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_or(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a | __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_or(vector unsigned char __a, vector bool char __b) {
-  return __a | (vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_or(vector bool char __a,
-                                                       vector bool char __b) {
-  return __a | __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_or(vector short __a,
-                                                   vector short __b) {
-  return __a | __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_or(vector bool short __a,
-                                                   vector short __b) {
-  return (vector short)__a | __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_or(vector short __a,
-                                                   vector bool short __b) {
-  return __a | (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_or(vector unsigned short __a, vector unsigned short __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_or(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a | __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_or(vector unsigned short __a, vector bool short __b) {
-  return __a | (vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_or(vector bool short __a,
-                                                        vector bool short __b) {
-  return __a | __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_or(vector int __a,
-                                                 vector int __b) {
-  return __a | __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_or(vector bool int __a,
-                                                 vector int __b) {
-  return (vector int)__a | __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_or(vector int __a,
-                                                 vector bool int __b) {
-  return __a | (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_or(vector unsigned int __a, vector unsigned int __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_or(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a | __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_or(vector unsigned int __a, vector bool int __b) {
-  return __a | (vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_or(vector bool int __a,
-                                                      vector bool int __b) {
-  return __a | __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_or(vector float __a,
-                                                   vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a | (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_or(vector bool int __a,
-                                                   vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a | (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_or(vector float __a,
-                                                   vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a | (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_or(vector bool long long __a,
-                                                    vector double __b) {
-  return (vector double)((vector unsigned long long)__a |
-                         (vector unsigned long long)__b);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_or(vector double __a,
-                                                    vector bool long long __b) {
-  return (vector double)((vector unsigned long long)__a |
-                         (vector unsigned long long)__b);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_or(vector double __a,
-                                                    vector double __b) {
-  return (vector double)((vector unsigned long long)__a |
-                         (vector unsigned long long)__b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_or(vector signed long long __a, vector signed long long __b) {
-  return __a | __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_or(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a | __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_or(vector signed long long __a, vector bool long long __b) {
-  return __a | (vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_or(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_or(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a | __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_or(vector unsigned long long __a, vector bool long long __b) {
-  return __a | (vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_or(vector bool long long __a, vector bool long long __b) {
-  return __a | __b;
-}
-#endif
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed char __ATTRS_o_ai
-vec_orc(vector signed char __a, vector signed char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_orc(vector signed char __a, vector bool char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_orc(vector bool char __a, vector signed char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_orc(vector unsigned char __a, vector unsigned char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_orc(vector unsigned char __a, vector bool char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_orc(vector bool char __a, vector unsigned char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_orc(vector bool char __a,
-                                                        vector bool char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_orc(vector signed short __a, vector signed short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_orc(vector signed short __a, vector bool short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_orc(vector bool short __a, vector signed short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_orc(vector unsigned short __a, vector unsigned short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_orc(vector unsigned short __a, vector bool short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_orc(vector bool short __a, vector unsigned short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_orc(vector bool short __a, vector bool short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_orc(vector signed int __a, vector signed int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed int __ATTRS_o_ai vec_orc(vector signed int __a,
-                                                         vector bool int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_orc(vector bool int __a, vector signed int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_orc(vector unsigned int __a, vector unsigned int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_orc(vector unsigned int __a, vector bool int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_orc(vector bool int __a, vector unsigned int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_orc(vector bool int __a,
-                                                       vector bool int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_orc(vector bool int __a, vector float __b) {
- return (vector float)(__a | ~(vector unsigned int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_orc(vector float __a, vector bool int __b) {
-  return (vector float)((vector unsigned int)__a | ~__b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_orc(vector signed long long __a, vector signed long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_orc(vector signed long long __a, vector bool long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_orc(vector bool long long __a, vector signed long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_orc(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_orc(vector unsigned long long __a, vector bool long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_orc(vector bool long long __a, vector unsigned long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_orc(vector bool long long __a, vector bool long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_orc(vector double __a, vector bool long long __b) {
-  return (vector double)((vector unsigned long long)__a | ~__b);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_orc(vector bool long long __a, vector double __b) {
-  return (vector double)(__a | ~(vector unsigned long long)__b);
-}
-#endif
-
-/* vec_vor */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vor(vector signed char __a, vector signed char __b) {
-  return __a | __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vor(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a | __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vor(vector signed char __a, vector bool char __b) {
-  return __a | (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vor(vector unsigned char __a, vector unsigned char __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vor(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a | __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vor(vector unsigned char __a, vector bool char __b) {
-  return __a | (vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vor(vector bool char __a,
-                                                        vector bool char __b) {
-  return __a | __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vor(vector short __a,
-                                                    vector short __b) {
-  return __a | __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vor(vector bool short __a,
-                                                    vector short __b) {
-  return (vector short)__a | __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vor(vector short __a,
-                                                    vector bool short __b) {
-  return __a | (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vor(vector unsigned short __a, vector unsigned short __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vor(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a | __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vor(vector unsigned short __a, vector bool short __b) {
-  return __a | (vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vor(vector bool short __a, vector bool short __b) {
-  return __a | __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vor(vector int __a,
-                                                  vector int __b) {
-  return __a | __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vor(vector bool int __a,
-                                                  vector int __b) {
-  return (vector int)__a | __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vor(vector int __a,
-                                                  vector bool int __b) {
-  return __a | (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vor(vector unsigned int __a, vector unsigned int __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vor(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a | __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vor(vector unsigned int __a, vector bool int __b) {
-  return __a | (vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vor(vector bool int __a,
-                                                       vector bool int __b) {
-  return __a | __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vor(vector float __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a | (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vor(vector bool int __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a | (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vor(vector float __a,
-                                                    vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a | (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vor(vector signed long long __a, vector signed long long __b) {
-  return __a | __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vor(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a | __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vor(vector signed long long __a, vector bool long long __b) {
-  return __a | (vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vor(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vor(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a | __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vor(vector unsigned long long __a, vector bool long long __b) {
-  return __a | (vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_vor(vector bool long long __a, vector bool long long __b) {
-  return __a | __b;
-}
-#endif
-
-/* vec_pack */
-
-/* The various vector pack instructions have a big-endian bias, so for
-   little endian we must handle reversed element numbering.  */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_pack(vector signed short __a, vector signed short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector signed char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
-                             0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
-#else
-  return (vector signed char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
-                             0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_pack(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
-                             0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
-#else
-  return (vector unsigned char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
-                             0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
-#endif
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_pack(vector bool short __a, vector bool short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
-                             0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
-#else
-  return (vector bool char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
-                             0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
-#endif
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_pack(vector int __a,
-                                                     vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
-                             0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
-#else
-  return (vector short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
-                             0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_pack(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
-                             0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
-#else
-  return (vector unsigned short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
-                             0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_pack(vector bool int __a,
-                                                          vector bool int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
-                             0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
-#else
-  return (vector bool short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
-                             0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector signed int __ATTRS_o_ai
-vec_pack(vector signed long long __a, vector signed long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector signed int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
-                             0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
-#else
-  return (vector signed int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
-                             0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
-#endif
-}
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_pack(vector unsigned long long __a, vector unsigned long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
-                             0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
-#else
-  return (vector unsigned int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
-                             0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_pack(vector bool long long __a, vector bool long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
-                             0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
-#else
-  return (vector bool int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
-                             0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_pack(vector double __a, vector double __b) {
-  return (vector float) (__a[0], __a[1], __b[0], __b[1]);
-}
-#endif
-
-#ifdef __POWER9_VECTOR__
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_pack_to_short_fp32(vector float __a, vector float __b) {
-  vector float __resa = __builtin_vsx_xvcvsphp(__a);
-  vector float __resb = __builtin_vsx_xvcvsphp(__b);
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned short)vec_mergee(__resa, __resb);
-#else
-  return (vector unsigned short)vec_mergeo(__resa, __resb);
-#endif
-}
-
-#endif
-/* vec_vpkuhum */
-
-#define __builtin_altivec_vpkuhum vec_vpkuhum
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vpkuhum(vector signed short __a, vector signed short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector signed char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
-                             0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
-#else
-  return (vector signed char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
-                             0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vpkuhum(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
-                             0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
-#else
-  return (vector unsigned char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
-                             0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
-#endif
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vpkuhum(vector bool short __a, vector bool short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
-                             0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
-#else
-  return (vector bool char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
-                             0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
-#endif
-}
-
-/* vec_vpkuwum */
-
-#define __builtin_altivec_vpkuwum vec_vpkuwum
-
-static __inline__ vector short __ATTRS_o_ai vec_vpkuwum(vector int __a,
-                                                        vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
-                             0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
-#else
-  return (vector short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
-                             0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vpkuwum(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
-                             0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
-#else
-  return (vector unsigned short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
-                             0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vpkuwum(vector bool int __a, vector bool int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
-                             0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
-#else
-  return (vector bool short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
-                             0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
-#endif
-}
-
-/* vec_vpkudum */
-
-#ifdef __POWER8_VECTOR__
-#define __builtin_altivec_vpkudum vec_vpkudum
-
-static __inline__ vector int __ATTRS_o_ai vec_vpkudum(vector long long __a,
-                                                      vector long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
-                             0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
-#else
-  return (vector int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
-                             0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vpkudum(vector unsigned long long __a, vector unsigned long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
-                             0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
-#else
-  return (vector unsigned int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
-                             0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vpkudum(vector bool long long __a, vector bool long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool int)vec_perm(
-      (vector long long)__a, (vector long long)__b,
-      (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
-                             0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
-#else
-  return (vector bool int)vec_perm(
-      (vector long long)__a, (vector long long)__b,
-      (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
-                             0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
-#endif
-}
-#endif
-
-/* vec_packpx */
-
-static __inline__ vector pixel __attribute__((__always_inline__))
-vec_packpx(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector pixel)__builtin_altivec_vpkpx(__b, __a);
-#else
-  return (vector pixel)__builtin_altivec_vpkpx(__a, __b);
-#endif
-}
-
-/* vec_vpkpx */
-
-static __inline__ vector pixel __attribute__((__always_inline__))
-vec_vpkpx(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector pixel)__builtin_altivec_vpkpx(__b, __a);
-#else
-  return (vector pixel)__builtin_altivec_vpkpx(__a, __b);
-#endif
-}
-
-/* vec_packs */
-
-static __inline__ vector signed char __ATTRS_o_ai vec_packs(vector short __a,
-                                                            vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkshss(__b, __a);
-#else
-  return __builtin_altivec_vpkshss(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_packs(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuhus(__b, __a);
-#else
-  return __builtin_altivec_vpkuhus(__a, __b);
-#endif
-}
-
-static __inline__ vector signed short __ATTRS_o_ai vec_packs(vector int __a,
-                                                             vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkswss(__b, __a);
-#else
-  return __builtin_altivec_vpkswss(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_packs(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuwus(__b, __a);
-#else
-  return __builtin_altivec_vpkuwus(__a, __b);
-#endif
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector int __ATTRS_o_ai vec_packs(vector long long __a,
-                                                    vector long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpksdss(__b, __a);
-#else
-  return __builtin_altivec_vpksdss(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_packs(vector unsigned long long __a, vector unsigned long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkudus(__b, __a);
-#else
-  return __builtin_altivec_vpkudus(__a, __b);
-#endif
-}
-#endif
-
-/* vec_vpkshss */
-
-static __inline__ vector signed char __attribute__((__always_inline__))
-vec_vpkshss(vector short __a, vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkshss(__b, __a);
-#else
-  return __builtin_altivec_vpkshss(__a, __b);
-#endif
-}
-
-/* vec_vpksdss */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector int __ATTRS_o_ai vec_vpksdss(vector long long __a,
-                                                      vector long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpksdss(__b, __a);
-#else
-  return __builtin_altivec_vpksdss(__a, __b);
-#endif
-}
-#endif
-
-/* vec_vpkuhus */
-
-static __inline__ vector unsigned char __attribute__((__always_inline__))
-vec_vpkuhus(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuhus(__b, __a);
-#else
-  return __builtin_altivec_vpkuhus(__a, __b);
-#endif
-}
-
-/* vec_vpkudus */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vpkudus(vector unsigned long long __a, vector unsigned long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkudus(__b, __a);
-#else
-  return __builtin_altivec_vpkudus(__a, __b);
-#endif
-}
-#endif
-
-/* vec_vpkswss */
-
-static __inline__ vector signed short __attribute__((__always_inline__))
-vec_vpkswss(vector int __a, vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkswss(__b, __a);
-#else
-  return __builtin_altivec_vpkswss(__a, __b);
-#endif
-}
-
-/* vec_vpkuwus */
-
-static __inline__ vector unsigned short __attribute__((__always_inline__))
-vec_vpkuwus(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuwus(__b, __a);
-#else
-  return __builtin_altivec_vpkuwus(__a, __b);
-#endif
-}
-
-/* vec_packsu */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_packsu(vector short __a, vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkshus(__b, __a);
-#else
-  return __builtin_altivec_vpkshus(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_packsu(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuhus(__b, __a);
-#else
-  return __builtin_altivec_vpkuhus(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_packsu(vector int __a, vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkswus(__b, __a);
-#else
-  return __builtin_altivec_vpkswus(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_packsu(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuwus(__b, __a);
-#else
-  return __builtin_altivec_vpkuwus(__a, __b);
-#endif
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_packsu(vector long long __a, vector long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpksdus(__b, __a);
-#else
-  return __builtin_altivec_vpksdus(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_packsu(vector unsigned long long __a, vector unsigned long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkudus(__b, __a);
-#else
-  return __builtin_altivec_vpkudus(__a, __b);
-#endif
-}
-#endif
-
-/* vec_vpkshus */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vpkshus(vector short __a, vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkshus(__b, __a);
-#else
-  return __builtin_altivec_vpkshus(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vpkshus(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuhus(__b, __a);
-#else
-  return __builtin_altivec_vpkuhus(__a, __b);
-#endif
-}
-
-/* vec_vpkswus */
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vpkswus(vector int __a, vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkswus(__b, __a);
-#else
-  return __builtin_altivec_vpkswus(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vpkswus(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuwus(__b, __a);
-#else
-  return __builtin_altivec_vpkuwus(__a, __b);
-#endif
-}
-
-/* vec_vpksdus */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vpksdus(vector long long __a, vector long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpksdus(__b, __a);
-#else
-  return __builtin_altivec_vpksdus(__a, __b);
-#endif
-}
-#endif
-
-/* vec_perm */
-
-// The vperm instruction is defined architecturally with a big-endian bias.
-// For little endian, we swap the input operands and invert the permute
-// control vector.  Only the rightmost 5 bits matter, so we could use
-// a vector of all 31s instead of all 255s to perform the inversion.
-// However, when the PCV is not a constant, using 255 has an advantage
-// in that the vec_xor can be recognized as a vec_nor (and for P8 and
-// later, possibly a vec_nand).
-
-static __inline__ vector signed char __ATTRS_o_ai vec_perm(
-    vector signed char __a, vector signed char __b, vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector signed char)__builtin_altivec_vperm_4si((vector int)__b,
-                                                         (vector int)__a, __d);
-#else
-  return (vector signed char)__builtin_altivec_vperm_4si((vector int)__a,
-                                                         (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_perm(vector unsigned char __a, vector unsigned char __b,
-         vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector unsigned char)__builtin_altivec_vperm_4si(
-      (vector int)__b, (vector int)__a, __d);
-#else
-  return (vector unsigned char)__builtin_altivec_vperm_4si(
-      (vector int)__a, (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_perm(vector bool char __a, vector bool char __b, vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector bool char)__builtin_altivec_vperm_4si((vector int)__b,
-                                                       (vector int)__a, __d);
-#else
-  return (vector bool char)__builtin_altivec_vperm_4si((vector int)__a,
-                                                       (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_perm(vector signed short __a,
-                                                     vector signed short __b,
-                                                     vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector signed short)__builtin_altivec_vperm_4si((vector int)__b,
-                                                          (vector int)__a, __d);
-#else
-  return (vector signed short)__builtin_altivec_vperm_4si((vector int)__a,
-                                                          (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_perm(vector unsigned short __a, vector unsigned short __b,
-         vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector unsigned short)__builtin_altivec_vperm_4si(
-      (vector int)__b, (vector int)__a, __d);
-#else
-  return (vector unsigned short)__builtin_altivec_vperm_4si(
-      (vector int)__a, (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_perm(
-    vector bool short __a, vector bool short __b, vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector bool short)__builtin_altivec_vperm_4si((vector int)__b,
-                                                        (vector int)__a, __d);
-#else
-  return (vector bool short)__builtin_altivec_vperm_4si((vector int)__a,
-                                                        (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_perm(vector pixel __a,
-                                                     vector pixel __b,
-                                                     vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector pixel)__builtin_altivec_vperm_4si((vector int)__b,
-                                                   (vector int)__a, __d);
-#else
-  return (vector pixel)__builtin_altivec_vperm_4si((vector int)__a,
-                                                   (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_perm(vector signed int __a,
-                                                   vector signed int __b,
-                                                   vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector signed int)__builtin_altivec_vperm_4si(__b, __a, __d);
-#else
-  return (vector signed int)__builtin_altivec_vperm_4si(__a, __b, __c);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_perm(vector unsigned int __a, vector unsigned int __b,
-         vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector unsigned int)__builtin_altivec_vperm_4si((vector int)__b,
-                                                          (vector int)__a, __d);
-#else
-  return (vector unsigned int)__builtin_altivec_vperm_4si((vector int)__a,
-                                                          (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_perm(vector bool int __a, vector bool int __b, vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector bool int)__builtin_altivec_vperm_4si((vector int)__b,
-                                                      (vector int)__a, __d);
-#else
-  return (vector bool int)__builtin_altivec_vperm_4si((vector int)__a,
-                                                      (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_perm(vector float __a,
-                                                     vector float __b,
-                                                     vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector float)__builtin_altivec_vperm_4si((vector int)__b,
-                                                   (vector int)__a, __d);
-#else
-  return (vector float)__builtin_altivec_vperm_4si((vector int)__a,
-                                                   (vector int)__b, __c);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector long long __ATTRS_o_ai
-vec_perm(vector signed long long __a, vector signed long long __b,
-         vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector signed long long)__builtin_altivec_vperm_4si(
-      (vector int)__b, (vector int)__a, __d);
-#else
-  return (vector signed long long)__builtin_altivec_vperm_4si(
-      (vector int)__a, (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_perm(vector unsigned long long __a, vector unsigned long long __b,
-         vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector unsigned long long)__builtin_altivec_vperm_4si(
-      (vector int)__b, (vector int)__a, __d);
-#else
-  return (vector unsigned long long)__builtin_altivec_vperm_4si(
-      (vector int)__a, (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_perm(vector bool long long __a, vector bool long long __b,
-         vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector bool long long)__builtin_altivec_vperm_4si(
-      (vector int)__b, (vector int)__a, __d);
-#else
-  return (vector bool long long)__builtin_altivec_vperm_4si(
-      (vector int)__a, (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_perm(vector double __a, vector double __b, vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector double)__builtin_altivec_vperm_4si((vector int)__b,
-                                                    (vector int)__a, __d);
-#else
-  return (vector double)__builtin_altivec_vperm_4si((vector int)__a,
-                                                    (vector int)__b, __c);
-#endif
-}
-#endif
-
-/* vec_vperm */
-
-static __inline__ vector signed char __ATTRS_o_ai vec_vperm(
-    vector signed char __a, vector signed char __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vperm(vector unsigned char __a, vector unsigned char __b,
-          vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vperm(
-    vector bool char __a, vector bool char __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vperm(vector short __a, vector short __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vperm(vector unsigned short __a, vector unsigned short __b,
-          vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_vperm(
-    vector bool short __a, vector bool short __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai
-vec_vperm(vector pixel __a, vector pixel __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vperm(vector int __a,
-                                                    vector int __b,
-                                                    vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vperm(vector unsigned int __a, vector unsigned int __b,
-          vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vperm(vector bool int __a, vector bool int __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_vperm(vector float __a, vector float __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-#ifdef __VSX__
-static __inline__ vector long long __ATTRS_o_ai vec_vperm(
-    vector long long __a, vector long long __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vperm(vector unsigned long long __a, vector unsigned long long __b,
-          vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_vperm(vector double __a, vector double __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-#endif
-
-/* vec_re */
-
-static __inline__ vector float __ATTRS_o_ai vec_re(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvresp(__a);
-#else
-  return __builtin_altivec_vrefp(__a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_re(vector double __a) {
-  return __builtin_vsx_xvredp(__a);
-}
-#endif
-
-/* vec_vrefp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vrefp(vector float __a) {
-  return __builtin_altivec_vrefp(__a);
-}
-
-/* vec_rl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_rl(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vrlb((vector char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_rl(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vrlb((vector char)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_rl(vector short __a,
-                                                   vector unsigned short __b) {
-  return __builtin_altivec_vrlh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_rl(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vrlh((vector short)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_rl(vector int __a,
-                                                 vector unsigned int __b) {
-  return __builtin_altivec_vrlw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_rl(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vrlw((vector int)__a, __b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_rl(vector signed long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vrld(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_rl(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vrld(__a, __b);
-}
-#endif
-
-/* vec_rlmi */
-#ifdef __POWER9_VECTOR__
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_rlmi(vector unsigned int __a, vector unsigned int __b,
-         vector unsigned int __c) {
-  return __builtin_altivec_vrlwmi(__a, __c, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_rlmi(vector unsigned long long __a, vector unsigned long long __b,
-         vector unsigned long long __c) {
-  return __builtin_altivec_vrldmi(__a, __c, __b);
-}
-
-/* vec_rlnm */
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_rlnm(vector unsigned int __a, vector unsigned int __b,
-         vector unsigned int __c) {
-  vector unsigned int OneByte = { 0x8, 0x8, 0x8, 0x8 };
-  return __builtin_altivec_vrlwnm(__a, ((__c << OneByte) | __b));
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_rlnm(vector unsigned long long __a, vector unsigned long long __b,
-         vector unsigned long long __c) {
-  vector unsigned long long OneByte = { 0x8, 0x8 };
-  return __builtin_altivec_vrldnm(__a, ((__c << OneByte) | __b));
-}
-#endif
-
-/* vec_vrlb */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vrlb(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vrlb((vector char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vrlb(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vrlb((vector char)__a, __b);
-}
-
-/* vec_vrlh */
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vrlh(vector short __a, vector unsigned short __b) {
-  return __builtin_altivec_vrlh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vrlh(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vrlh((vector short)__a, __b);
-}
-
-/* vec_vrlw */
-
-static __inline__ vector int __ATTRS_o_ai vec_vrlw(vector int __a,
-                                                   vector unsigned int __b) {
-  return __builtin_altivec_vrlw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vrlw(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vrlw((vector int)__a, __b);
-}
-
-/* vec_round */
-
-static __inline__ vector float __ATTRS_o_ai vec_round(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvrspi(__a);
-#else
-  return __builtin_altivec_vrfin(__a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_round(vector double __a) {
-  return __builtin_vsx_xvrdpi(__a);
-}
-
-/* vec_rint */
-
-static __inline__ vector float __ATTRS_o_ai vec_rint(vector float __a) {
-  return __builtin_vsx_xvrspic(__a);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_rint(vector double __a) {
-  return __builtin_vsx_xvrdpic(__a);
-}
-
-/* vec_nearbyint */
-
-static __inline__ vector float __ATTRS_o_ai vec_nearbyint(vector float __a) {
-  return __builtin_vsx_xvrspi(__a);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_nearbyint(vector double __a) {
-  return __builtin_vsx_xvrdpi(__a);
-}
-#endif
-
-/* vec_vrfin */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vrfin(vector float __a) {
-  return __builtin_altivec_vrfin(__a);
-}
-
-/* vec_sqrt */
-
-#ifdef __VSX__
-static __inline__ vector float __ATTRS_o_ai vec_sqrt(vector float __a) {
-  return __builtin_vsx_xvsqrtsp(__a);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_sqrt(vector double __a) {
-  return __builtin_vsx_xvsqrtdp(__a);
-}
-#endif
-
-/* vec_rsqrte */
-
-static __inline__ vector float __ATTRS_o_ai vec_rsqrte(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvrsqrtesp(__a);
-#else
-  return __builtin_altivec_vrsqrtefp(__a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_rsqrte(vector double __a) {
-  return __builtin_vsx_xvrsqrtedp(__a);
-}
-#endif
-
-/* vec_vrsqrtefp */
-
-static __inline__ __vector float __attribute__((__always_inline__))
-vec_vrsqrtefp(vector float __a) {
-  return __builtin_altivec_vrsqrtefp(__a);
-}
-
-/* vec_sel */
-
-#define __builtin_altivec_vsel_4si vec_sel
-
-static __inline__ vector signed char __ATTRS_o_ai vec_sel(
-    vector signed char __a, vector signed char __b, vector unsigned char __c) {
-  return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sel(vector signed char __a, vector signed char __b, vector bool char __c) {
-  return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sel(vector unsigned char __a, vector unsigned char __b,
-        vector unsigned char __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai vec_sel(
-    vector unsigned char __a, vector unsigned char __b, vector bool char __c) {
-  return (__a & ~(vector unsigned char)__c) | (__b & (vector unsigned char)__c);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_sel(vector bool char __a, vector bool char __b, vector unsigned char __c) {
-  return (__a & ~(vector bool char)__c) | (__b & (vector bool char)__c);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_sel(vector bool char __a,
-                                                        vector bool char __b,
-                                                        vector bool char __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sel(vector short __a,
-                                                    vector short __b,
-                                                    vector unsigned short __c) {
-  return (__a & ~(vector short)__c) | (__b & (vector short)__c);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sel(vector short __a,
-                                                    vector short __b,
-                                                    vector bool short __c) {
-  return (__a & ~(vector short)__c) | (__b & (vector short)__c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sel(vector unsigned short __a, vector unsigned short __b,
-        vector unsigned short __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sel(vector unsigned short __a, vector unsigned short __b,
-        vector bool short __c) {
-  return (__a & ~(vector unsigned short)__c) |
-         (__b & (vector unsigned short)__c);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_sel(
-    vector bool short __a, vector bool short __b, vector unsigned short __c) {
-  return (__a & ~(vector bool short)__c) | (__b & (vector bool short)__c);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_sel(vector bool short __a, vector bool short __b, vector bool short __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sel(vector int __a,
-                                                  vector int __b,
-                                                  vector unsigned int __c) {
-  return (__a & ~(vector int)__c) | (__b & (vector int)__c);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sel(vector int __a,
-                                                  vector int __b,
-                                                  vector bool int __c) {
-  return (__a & ~(vector int)__c) | (__b & (vector int)__c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_sel(
-    vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sel(vector unsigned int __a, vector unsigned int __b, vector bool int __c) {
-  return (__a & ~(vector unsigned int)__c) | (__b & (vector unsigned int)__c);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_sel(vector bool int __a, vector bool int __b, vector unsigned int __c) {
-  return (__a & ~(vector bool int)__c) | (__b & (vector bool int)__c);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_sel(vector bool int __a,
-                                                       vector bool int __b,
-                                                       vector bool int __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_sel(vector float __a,
-                                                    vector float __b,
-                                                    vector unsigned int __c) {
-  vector int __res = ((vector int)__a & ~(vector int)__c) |
-                     ((vector int)__b & (vector int)__c);
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_sel(vector float __a,
-                                                    vector float __b,
-                                                    vector bool int __c) {
-  vector int __res = ((vector int)__a & ~(vector int)__c) |
-                     ((vector int)__b & (vector int)__c);
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai
-vec_sel(vector double __a, vector double __b, vector bool long long __c) {
-  vector long long __res = ((vector long long)__a & ~(vector long long)__c) |
-                           ((vector long long)__b & (vector long long)__c);
-  return (vector double)__res;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_sel(vector double __a, vector double __b, vector unsigned long long __c) {
-  vector long long __res = ((vector long long)__a & ~(vector long long)__c) |
-                           ((vector long long)__b & (vector long long)__c);
-  return (vector double)__res;
-}
-#endif
-
-/* vec_vsel */
-
-static __inline__ vector signed char __ATTRS_o_ai vec_vsel(
-    vector signed char __a, vector signed char __b, vector unsigned char __c) {
-  return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsel(vector signed char __a, vector signed char __b, vector bool char __c) {
-  return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsel(vector unsigned char __a, vector unsigned char __b,
-         vector unsigned char __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai vec_vsel(
-    vector unsigned char __a, vector unsigned char __b, vector bool char __c) {
-  return (__a & ~(vector unsigned char)__c) | (__b & (vector unsigned char)__c);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsel(vector bool char __a, vector bool char __b, vector unsigned char __c) {
-  return (__a & ~(vector bool char)__c) | (__b & (vector bool char)__c);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vsel(vector bool char __a,
-                                                         vector bool char __b,
-                                                         vector bool char __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vsel(vector short __a, vector short __b, vector unsigned short __c) {
-  return (__a & ~(vector short)__c) | (__b & (vector short)__c);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsel(vector short __a,
-                                                     vector short __b,
-                                                     vector bool short __c) {
-  return (__a & ~(vector short)__c) | (__b & (vector short)__c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsel(vector unsigned short __a, vector unsigned short __b,
-         vector unsigned short __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsel(vector unsigned short __a, vector unsigned short __b,
-         vector bool short __c) {
-  return (__a & ~(vector unsigned short)__c) |
-         (__b & (vector unsigned short)__c);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_vsel(
-    vector bool short __a, vector bool short __b, vector unsigned short __c) {
-  return (__a & ~(vector bool short)__c) | (__b & (vector bool short)__c);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsel(vector bool short __a, vector bool short __b, vector bool short __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsel(vector int __a,
-                                                   vector int __b,
-                                                   vector unsigned int __c) {
-  return (__a & ~(vector int)__c) | (__b & (vector int)__c);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsel(vector int __a,
-                                                   vector int __b,
-                                                   vector bool int __c) {
-  return (__a & ~(vector int)__c) | (__b & (vector int)__c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_vsel(
-    vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_vsel(
-    vector unsigned int __a, vector unsigned int __b, vector bool int __c) {
-  return (__a & ~(vector unsigned int)__c) | (__b & (vector unsigned int)__c);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsel(vector bool int __a, vector bool int __b, vector unsigned int __c) {
-  return (__a & ~(vector bool int)__c) | (__b & (vector bool int)__c);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vsel(vector bool int __a,
-                                                        vector bool int __b,
-                                                        vector bool int __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vsel(vector float __a,
-                                                     vector float __b,
-                                                     vector unsigned int __c) {
-  vector int __res = ((vector int)__a & ~(vector int)__c) |
-                     ((vector int)__b & (vector int)__c);
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vsel(vector float __a,
-                                                     vector float __b,
-                                                     vector bool int __c) {
-  vector int __res = ((vector int)__a & ~(vector int)__c) |
-                     ((vector int)__b & (vector int)__c);
-  return (vector float)__res;
-}
-
-/* vec_sl */
-
-// vec_sl does modulo arithmetic on __b first, so __b is allowed to be more
-// than the length of __a.
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sl(vector unsigned char __a, vector unsigned char __b) {
-  return __a << (__b %
-                 (vector unsigned char)(sizeof(unsigned char) * __CHAR_BIT__));
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sl(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)vec_sl((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sl(vector unsigned short __a, vector unsigned short __b) {
-  return __a << (__b % (vector unsigned short)(sizeof(unsigned short) *
-                                               __CHAR_BIT__));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sl(vector short __a,
-                                                   vector unsigned short __b) {
-  return (vector short)vec_sl((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sl(vector unsigned int __a, vector unsigned int __b) {
-  return __a << (__b %
-                 (vector unsigned int)(sizeof(unsigned int) * __CHAR_BIT__));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sl(vector int __a,
-                                                 vector unsigned int __b) {
-  return (vector int)vec_sl((vector unsigned int)__a, __b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sl(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a << (__b % (vector unsigned long long)(sizeof(unsigned long long) *
-                                                   __CHAR_BIT__));
-}
-
-static __inline__ vector long long __ATTRS_o_ai
-vec_sl(vector long long __a, vector unsigned long long __b) {
-  return (vector long long)vec_sl((vector unsigned long long)__a, __b);
-}
-#endif
-
-/* vec_vslb */
-
-#define __builtin_altivec_vslb vec_vslb
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vslb(vector signed char __a, vector unsigned char __b) {
-  return vec_sl(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vslb(vector unsigned char __a, vector unsigned char __b) {
-  return vec_sl(__a, __b);
-}
-
-/* vec_vslh */
-
-#define __builtin_altivec_vslh vec_vslh
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vslh(vector short __a, vector unsigned short __b) {
-  return vec_sl(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vslh(vector unsigned short __a, vector unsigned short __b) {
-  return vec_sl(__a, __b);
-}
-
-/* vec_vslw */
-
-#define __builtin_altivec_vslw vec_vslw
-
-static __inline__ vector int __ATTRS_o_ai vec_vslw(vector int __a,
-                                                   vector unsigned int __b) {
-  return vec_sl(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vslw(vector unsigned int __a, vector unsigned int __b) {
-  return vec_sl(__a, __b);
-}
-
-/* vec_sld */
-
-#define __builtin_altivec_vsldoi_4si vec_sld
-
-static __inline__ vector signed char __ATTRS_o_ai vec_sld(
-    vector signed char __a, vector signed char __b, unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sld(vector unsigned char __a, vector unsigned char __b,
-        unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_sld(vector bool char __a, vector bool char __b, unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector signed short __ATTRS_o_ai vec_sld(
-    vector signed short __a, vector signed short __b, unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sld(vector unsigned short __a, vector unsigned short __b,
-        unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_sld(vector bool short __a, vector bool short __b, unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_sld(vector pixel __a,
-                                                    vector pixel __b,
-                                                    unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_sld(vector signed int __a, vector signed int __b, unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_sld(
-    vector unsigned int __a, vector unsigned int __b, unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_sld(vector bool int __a,
-                                                       vector bool int __b,
-                                                       unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_sld(vector float __a,
-                                                    vector float __b,
-                                                    unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_sld(vector bool long long __a, vector bool long long __b,
-        unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sld(vector signed long long __a, vector signed long long __b,
-        unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sld(vector unsigned long long __a, vector unsigned long long __b,
-        unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_sld(vector double __a,
-                                                     vector double __b,
-                                                     unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-#endif
-
-/* vec_sldw */
-static __inline__ vector signed char __ATTRS_o_ai vec_sldw(
-    vector signed char __a, vector signed char __b, unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sldw(vector unsigned char __a, vector unsigned char __b,
-         unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector signed short __ATTRS_o_ai vec_sldw(
-    vector signed short __a, vector signed short __b, unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sldw(vector unsigned short __a, vector unsigned short __b,
-         unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_sldw(vector signed int __a, vector signed int __b, unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_sldw(
-    vector unsigned int __a, vector unsigned int __b, unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sldw(vector signed long long __a, vector signed long long __b,
-         unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sldw(vector unsigned long long __a, vector unsigned long long __b,
-         unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-#endif
-
-#ifdef __POWER9_VECTOR__
-/* vec_slv */
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_slv(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vslv(__a, __b);
-}
-
-/* vec_srv */
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_srv(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vsrv(__a, __b);
-}
-#endif
-
-/* vec_vsldoi */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsldoi(vector signed char __a, vector signed char __b, unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai vec_vsldoi(
-    vector unsigned char __a, vector unsigned char __b, unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsldoi(vector short __a,
-                                                       vector short __b,
-                                                       unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai vec_vsldoi(
-    vector unsigned short __a, vector unsigned short __b, unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsldoi(vector pixel __a,
-                                                       vector pixel __b,
-                                                       unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsldoi(vector int __a,
-                                                     vector int __b,
-                                                     unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_vsldoi(
-    vector unsigned int __a, vector unsigned int __b, unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vsldoi(vector float __a,
-                                                       vector float __b,
-                                                       unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-/* vec_sll */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sll(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsl((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sll(vector signed char __a, vector unsigned short __b) {
-  return (vector signed char)__builtin_altivec_vsl((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sll(vector signed char __a, vector unsigned int __b) {
-  return (vector signed char)__builtin_altivec_vsl((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sll(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sll(vector unsigned char __a, vector unsigned short __b) {
-  return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sll(vector unsigned char __a, vector unsigned int __b) {
-  return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_sll(vector bool char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vsl((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_sll(vector bool char __a, vector unsigned short __b) {
-  return (vector bool char)__builtin_altivec_vsl((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_sll(vector bool char __a, vector unsigned int __b) {
-  return (vector bool char)__builtin_altivec_vsl((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sll(vector short __a,
-                                                    vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sll(vector short __a,
-                                                    vector unsigned short __b) {
-  return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sll(vector short __a,
-                                                    vector unsigned int __b) {
-  return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sll(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sll(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sll(vector unsigned short __a, vector unsigned int __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_sll(vector bool short __a, vector unsigned char __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_sll(vector bool short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_sll(vector bool short __a, vector unsigned int __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
-                                                    vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
-                                                    vector unsigned short __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
-                                                    vector unsigned int __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sll(vector int __a,
-                                                  vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sll(vector int __a,
-                                                  vector unsigned short __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sll(vector int __a,
-                                                  vector unsigned int __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sll(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sll(vector unsigned int __a, vector unsigned short __b) {
-  return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sll(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_sll(vector bool int __a, vector unsigned char __b) {
-  return (vector bool int)__builtin_altivec_vsl((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_sll(vector bool int __a, vector unsigned short __b) {
-  return (vector bool int)__builtin_altivec_vsl((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_sll(vector bool int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vsl((vector int)__a,
-                                                (vector int)__b);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sll(vector signed long long __a, vector unsigned char __b) {
-  return (vector signed long long)__builtin_altivec_vsl((vector int)__a,
-                                                        (vector int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sll(vector unsigned long long __a, vector unsigned char __b) {
-  return (vector unsigned long long)__builtin_altivec_vsl((vector int)__a,
-                                                          (vector int)__b);
-}
-#endif
-
-/* vec_vsl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsl(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsl((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsl(vector signed char __a, vector unsigned short __b) {
-  return (vector signed char)__builtin_altivec_vsl((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsl(vector signed char __a, vector unsigned int __b) {
-  return (vector signed char)__builtin_altivec_vsl((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsl(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsl(vector unsigned char __a, vector unsigned short __b) {
-  return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsl(vector unsigned char __a, vector unsigned int __b) {
-  return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsl(vector bool char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vsl((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsl(vector bool char __a, vector unsigned short __b) {
-  return (vector bool char)__builtin_altivec_vsl((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsl(vector bool char __a, vector unsigned int __b) {
-  return (vector bool char)__builtin_altivec_vsl((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsl(vector short __a,
-                                                    vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsl(vector short __a,
-                                                    vector unsigned short __b) {
-  return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsl(vector short __a,
-                                                    vector unsigned int __b) {
-  return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsl(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsl(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsl(vector unsigned short __a, vector unsigned int __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsl(vector bool short __a, vector unsigned char __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsl(vector bool short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsl(vector bool short __a, vector unsigned int __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
-                                                    vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
-                                                    vector unsigned short __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
-                                                    vector unsigned int __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsl(vector int __a,
-                                                  vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsl(vector int __a,
-                                                  vector unsigned short __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsl(vector int __a,
-                                                  vector unsigned int __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsl(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsl(vector unsigned int __a, vector unsigned short __b) {
-  return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsl(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsl(vector bool int __a, vector unsigned char __b) {
-  return (vector bool int)__builtin_altivec_vsl((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsl(vector bool int __a, vector unsigned short __b) {
-  return (vector bool int)__builtin_altivec_vsl((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsl(vector bool int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vsl((vector int)__a,
-                                                (vector int)__b);
-}
-
-/* vec_slo */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_slo(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_altivec_vslo((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_slo(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vslo((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_slo(vector unsigned char __a, vector signed char __b) {
-  return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_slo(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_slo(vector short __a,
-                                                    vector signed char __b) {
-  return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_slo(vector short __a,
-                                                    vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_slo(vector unsigned short __a, vector signed char __b) {
-  return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_slo(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_slo(vector pixel __a,
-                                                    vector signed char __b) {
-  return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_slo(vector pixel __a,
-                                                    vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_slo(vector int __a,
-                                                  vector signed char __b) {
-  return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_slo(vector int __a,
-                                                  vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_slo(vector unsigned int __a, vector signed char __b) {
-  return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_slo(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_slo(vector float __a,
-                                                    vector signed char __b) {
-  return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_slo(vector float __a,
-                                                    vector unsigned char __b) {
-  return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_slo(vector signed long long __a, vector signed char __b) {
-  return (vector signed long long)__builtin_altivec_vslo((vector int)__a,
-                                                         (vector int)__b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_slo(vector signed long long __a, vector unsigned char __b) {
-  return (vector signed long long)__builtin_altivec_vslo((vector int)__a,
-                                                         (vector int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_slo(vector unsigned long long __a, vector signed char __b) {
-  return (vector unsigned long long)__builtin_altivec_vslo((vector int)__a,
-                                                           (vector int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_slo(vector unsigned long long __a, vector unsigned char __b) {
-  return (vector unsigned long long)__builtin_altivec_vslo((vector int)__a,
-                                                           (vector int)__b);
-}
-#endif
-
-/* vec_vslo */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vslo(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_altivec_vslo((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vslo(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vslo((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vslo(vector unsigned char __a, vector signed char __b) {
-  return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vslo(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vslo(vector short __a,
-                                                     vector signed char __b) {
-  return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vslo(vector short __a,
-                                                     vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vslo(vector unsigned short __a, vector signed char __b) {
-  return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vslo(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vslo(vector pixel __a,
-                                                     vector signed char __b) {
-  return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vslo(vector pixel __a,
-                                                     vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vslo(vector int __a,
-                                                   vector signed char __b) {
-  return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vslo(vector int __a,
-                                                   vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vslo(vector unsigned int __a, vector signed char __b) {
-  return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vslo(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vslo(vector float __a,
-                                                     vector signed char __b) {
-  return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vslo(vector float __a,
-                                                     vector unsigned char __b) {
-  return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-/* vec_splat */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_splat(vector signed char __a, unsigned const int __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_splat(vector unsigned char __a, unsigned const int __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_splat(vector bool char __a, unsigned const int __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_splat(vector signed short __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x07) * 2;
-  unsigned char b1 = b0 + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
-                                         b0, b1, b0, b1, b0, b1));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_splat(vector unsigned short __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x07) * 2;
-  unsigned char b1 = b0 + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
-                                         b0, b1, b0, b1, b0, b1));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_splat(vector bool short __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x07) * 2;
-  unsigned char b1 = b0 + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
-                                         b0, b1, b0, b1, b0, b1));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_splat(vector pixel __a,
-                                                      unsigned const int __b) {
-  unsigned char b0 = (__b & 0x07) * 2;
-  unsigned char b1 = b0 + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
-                                         b0, b1, b0, b1, b0, b1));
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_splat(vector signed int __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x03) * 4;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1,
-                                         b2, b3, b0, b1, b2, b3));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_splat(vector unsigned int __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x03) * 4;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1,
-                                         b2, b3, b0, b1, b2, b3));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_splat(vector bool int __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x03) * 4;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1,
-                                         b2, b3, b0, b1, b2, b3));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_splat(vector float __a,
-                                                      unsigned const int __b) {
-  unsigned char b0 = (__b & 0x03) * 4;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1,
-                                         b2, b3, b0, b1, b2, b3));
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_splat(vector double __a,
-                                                       unsigned const int __b) {
-  unsigned char b0 = (__b & 0x01) * 8;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5,
-                b6 = b0 + 6, b7 = b0 + 7;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1,
-                                         b2, b3, b4, b5, b6, b7));
-}
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_splat(vector bool long long __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x01) * 8;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5,
-                b6 = b0 + 6, b7 = b0 + 7;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1,
-                                         b2, b3, b4, b5, b6, b7));
-}
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_splat(vector signed long long __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x01) * 8;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5,
-                b6 = b0 + 6, b7 = b0 + 7;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1,
-                                         b2, b3, b4, b5, b6, b7));
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_splat(vector unsigned long long __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x01) * 8;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5,
-                b6 = b0 + 6, b7 = b0 + 7;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1,
-                                         b2, b3, b4, b5, b6, b7));
-}
-#endif
-
-/* vec_vspltb */
-
-#define __builtin_altivec_vspltb vec_vspltb
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vspltb(vector signed char __a, unsigned char __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vspltb(vector unsigned char __a, unsigned char __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vspltb(vector bool char __a,
-                                                           unsigned char __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b));
-}
-
-/* vec_vsplth */
-
-#define __builtin_altivec_vsplth vec_vsplth
-
-static __inline__ vector short __ATTRS_o_ai vec_vsplth(vector short __a,
-                                                       unsigned char __b) {
-  __b *= 2;
-  unsigned char b1 = __b + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
-                                         __b, b1, __b, b1, __b, b1, __b, b1));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsplth(vector unsigned short __a, unsigned char __b) {
-  __b *= 2;
-  unsigned char b1 = __b + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
-                                         __b, b1, __b, b1, __b, b1, __b, b1));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsplth(vector bool short __a, unsigned char __b) {
-  __b *= 2;
-  unsigned char b1 = __b + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
-                                         __b, b1, __b, b1, __b, b1, __b, b1));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsplth(vector pixel __a,
-                                                       unsigned char __b) {
-  __b *= 2;
-  unsigned char b1 = __b + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
-                                         __b, b1, __b, b1, __b, b1, __b, b1));
-}
-
-/* vec_vspltw */
-
-#define __builtin_altivec_vspltw vec_vspltw
-
-static __inline__ vector int __ATTRS_o_ai vec_vspltw(vector int __a,
-                                                     unsigned char __b) {
-  __b *= 4;
-  unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
-                                         b1, b2, b3, __b, b1, b2, b3));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vspltw(vector unsigned int __a, unsigned char __b) {
-  __b *= 4;
-  unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
-                                         b1, b2, b3, __b, b1, b2, b3));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vspltw(vector bool int __a,
-                                                          unsigned char __b) {
-  __b *= 4;
-  unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
-                                         b1, b2, b3, __b, b1, b2, b3));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vspltw(vector float __a,
-                                                       unsigned char __b) {
-  __b *= 4;
-  unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
-                                         b1, b2, b3, __b, b1, b2, b3));
-}
-
-/* vec_splat_s8 */
-
-#define __builtin_altivec_vspltisb vec_splat_s8
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector signed char __ATTRS_o_ai
-vec_splat_s8(signed char __a) {
-  return (vector signed char)(__a);
-}
-
-/* vec_vspltisb */
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vspltisb(signed char __a) {
-  return (vector signed char)(__a);
-}
-
-/* vec_splat_s16 */
-
-#define __builtin_altivec_vspltish vec_splat_s16
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector short __ATTRS_o_ai vec_splat_s16(signed char __a) {
-  return (vector short)(__a);
-}
-
-/* vec_vspltish */
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector short __ATTRS_o_ai vec_vspltish(signed char __a) {
-  return (vector short)(__a);
-}
-
-/* vec_splat_s32 */
-
-#define __builtin_altivec_vspltisw vec_splat_s32
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector int __ATTRS_o_ai vec_splat_s32(signed char __a) {
-  return (vector int)(__a);
-}
-
-/* vec_vspltisw */
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector int __ATTRS_o_ai vec_vspltisw(signed char __a) {
-  return (vector int)(__a);
-}
-
-/* vec_splat_u8 */
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_splat_u8(unsigned char __a) {
-  return (vector unsigned char)(__a);
-}
-
-/* vec_splat_u16 */
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_splat_u16(signed char __a) {
-  return (vector unsigned short)(__a);
-}
-
-/* vec_splat_u32 */
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_splat_u32(signed char __a) {
-  return (vector unsigned int)(__a);
-}
-
-/* vec_sr */
-
-// vec_sr does modulo arithmetic on __b first, so __b is allowed to be more
-// than the length of __a.
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sr(vector unsigned char __a, vector unsigned char __b) {
-  return __a >>
-         (__b % (vector unsigned char)(sizeof(unsigned char) * __CHAR_BIT__));
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sr(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)vec_sr((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sr(vector unsigned short __a, vector unsigned short __b) {
-  return __a >>
-         (__b % (vector unsigned short)(sizeof(unsigned short) * __CHAR_BIT__));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sr(vector short __a,
-                                                   vector unsigned short __b) {
-  return (vector short)vec_sr((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sr(vector unsigned int __a, vector unsigned int __b) {
-  return __a >>
-         (__b % (vector unsigned int)(sizeof(unsigned int) * __CHAR_BIT__));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sr(vector int __a,
-                                                 vector unsigned int __b) {
-  return (vector int)vec_sr((vector unsigned int)__a, __b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sr(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a >> (__b % (vector unsigned long long)(sizeof(unsigned long long) *
-                                                   __CHAR_BIT__));
-}
-
-static __inline__ vector long long __ATTRS_o_ai
-vec_sr(vector long long __a, vector unsigned long long __b) {
-  return (vector long long)vec_sr((vector unsigned long long)__a, __b);
-}
-#endif
-
-/* vec_vsrb */
-
-#define __builtin_altivec_vsrb vec_vsrb
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsrb(vector signed char __a, vector unsigned char __b) {
-  return vec_sr(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsrb(vector unsigned char __a, vector unsigned char __b) {
-  return vec_sr(__a, __b);
-}
-
-/* vec_vsrh */
-
-#define __builtin_altivec_vsrh vec_vsrh
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vsrh(vector short __a, vector unsigned short __b) {
-  return vec_sr(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsrh(vector unsigned short __a, vector unsigned short __b) {
-  return vec_sr(__a, __b);
-}
-
-/* vec_vsrw */
-
-#define __builtin_altivec_vsrw vec_vsrw
-
-static __inline__ vector int __ATTRS_o_ai vec_vsrw(vector int __a,
-                                                   vector unsigned int __b) {
-  return vec_sr(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsrw(vector unsigned int __a, vector unsigned int __b) {
-  return vec_sr(__a, __b);
-}
-
-/* vec_sra */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sra(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsrab((vector char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sra(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsrab((vector char)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sra(vector short __a,
-                                                    vector unsigned short __b) {
-  return __builtin_altivec_vsrah(__a, (vector unsigned short)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sra(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsrah((vector short)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sra(vector int __a,
-                                                  vector unsigned int __b) {
-  return __builtin_altivec_vsraw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sra(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vsraw((vector int)__a, __b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sra(vector signed long long __a, vector unsigned long long __b) {
-  return __a >> __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sra(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)((vector signed long long)__a >> __b);
-}
-#endif
-
-/* vec_vsrab */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsrab(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsrab((vector char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsrab(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsrab((vector char)__a, __b);
-}
-
-/* vec_vsrah */
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vsrah(vector short __a, vector unsigned short __b) {
-  return __builtin_altivec_vsrah(__a, (vector unsigned short)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsrah(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsrah((vector short)__a, __b);
-}
-
-/* vec_vsraw */
-
-static __inline__ vector int __ATTRS_o_ai vec_vsraw(vector int __a,
-                                                    vector unsigned int __b) {
-  return __builtin_altivec_vsraw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsraw(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vsraw((vector int)__a, __b);
-}
-
-/* vec_srl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_srl(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsr((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_srl(vector signed char __a, vector unsigned short __b) {
-  return (vector signed char)__builtin_altivec_vsr((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_srl(vector signed char __a, vector unsigned int __b) {
-  return (vector signed char)__builtin_altivec_vsr((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_srl(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_srl(vector unsigned char __a, vector unsigned short __b) {
-  return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_srl(vector unsigned char __a, vector unsigned int __b) {
-  return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_srl(vector bool char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vsr((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_srl(vector bool char __a, vector unsigned short __b) {
-  return (vector bool char)__builtin_altivec_vsr((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_srl(vector bool char __a, vector unsigned int __b) {
-  return (vector bool char)__builtin_altivec_vsr((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_srl(vector short __a,
-                                                    vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_srl(vector short __a,
-                                                    vector unsigned short __b) {
-  return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_srl(vector short __a,
-                                                    vector unsigned int __b) {
-  return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_srl(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_srl(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_srl(vector unsigned short __a, vector unsigned int __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_srl(vector bool short __a, vector unsigned char __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_srl(vector bool short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_srl(vector bool short __a, vector unsigned int __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
-                                                    vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
-                                                    vector unsigned short __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
-                                                    vector unsigned int __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_srl(vector int __a,
-                                                  vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_srl(vector int __a,
-                                                  vector unsigned short __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_srl(vector int __a,
-                                                  vector unsigned int __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_srl(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_srl(vector unsigned int __a, vector unsigned short __b) {
-  return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_srl(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_srl(vector bool int __a, vector unsigned char __b) {
-  return (vector bool int)__builtin_altivec_vsr((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_srl(vector bool int __a, vector unsigned short __b) {
-  return (vector bool int)__builtin_altivec_vsr((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_srl(vector bool int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vsr((vector int)__a,
-                                                (vector int)__b);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_srl(vector signed long long __a, vector unsigned char __b) {
-  return (vector signed long long)__builtin_altivec_vsr((vector int)__a,
-                                                        (vector int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_srl(vector unsigned long long __a, vector unsigned char __b) {
-  return (vector unsigned long long)__builtin_altivec_vsr((vector int)__a,
-                                                          (vector int)__b);
-}
-#endif
-
-/* vec_vsr */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsr(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsr((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsr(vector signed char __a, vector unsigned short __b) {
-  return (vector signed char)__builtin_altivec_vsr((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsr(vector signed char __a, vector unsigned int __b) {
-  return (vector signed char)__builtin_altivec_vsr((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsr(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsr(vector unsigned char __a, vector unsigned short __b) {
-  return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsr(vector unsigned char __a, vector unsigned int __b) {
-  return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsr(vector bool char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vsr((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsr(vector bool char __a, vector unsigned short __b) {
-  return (vector bool char)__builtin_altivec_vsr((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsr(vector bool char __a, vector unsigned int __b) {
-  return (vector bool char)__builtin_altivec_vsr((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsr(vector short __a,
-                                                    vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsr(vector short __a,
-                                                    vector unsigned short __b) {
-  return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsr(vector short __a,
-                                                    vector unsigned int __b) {
-  return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsr(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsr(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsr(vector unsigned short __a, vector unsigned int __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsr(vector bool short __a, vector unsigned char __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsr(vector bool short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsr(vector bool short __a, vector unsigned int __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
-                                                    vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
-                                                    vector unsigned short __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
-                                                    vector unsigned int __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsr(vector int __a,
-                                                  vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsr(vector int __a,
-                                                  vector unsigned short __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsr(vector int __a,
-                                                  vector unsigned int __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsr(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsr(vector unsigned int __a, vector unsigned short __b) {
-  return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsr(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsr(vector bool int __a, vector unsigned char __b) {
-  return (vector bool int)__builtin_altivec_vsr((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsr(vector bool int __a, vector unsigned short __b) {
-  return (vector bool int)__builtin_altivec_vsr((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsr(vector bool int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vsr((vector int)__a,
-                                                (vector int)__b);
-}
-
-/* vec_sro */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sro(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_altivec_vsro((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sro(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsro((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sro(vector unsigned char __a, vector signed char __b) {
-  return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sro(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sro(vector short __a,
-                                                    vector signed char __b) {
-  return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sro(vector short __a,
-                                                    vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sro(vector unsigned short __a, vector signed char __b) {
-  return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sro(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_sro(vector pixel __a,
-                                                    vector signed char __b) {
-  return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_sro(vector pixel __a,
-                                                    vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sro(vector int __a,
-                                                  vector signed char __b) {
-  return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sro(vector int __a,
-                                                  vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sro(vector unsigned int __a, vector signed char __b) {
-  return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sro(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_sro(vector float __a,
-                                                    vector signed char __b) {
-  return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_sro(vector float __a,
-                                                    vector unsigned char __b) {
-  return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sro(vector signed long long __a, vector signed char __b) {
-  return (vector signed long long)__builtin_altivec_vsro((vector int)__a,
-                                                         (vector int)__b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sro(vector signed long long __a, vector unsigned char __b) {
-  return (vector signed long long)__builtin_altivec_vsro((vector int)__a,
-                                                         (vector int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sro(vector unsigned long long __a, vector signed char __b) {
-  return (vector unsigned long long)__builtin_altivec_vsro((vector int)__a,
-                                                           (vector int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sro(vector unsigned long long __a, vector unsigned char __b) {
-  return (vector unsigned long long)__builtin_altivec_vsro((vector int)__a,
-                                                           (vector int)__b);
-}
-#endif
-
-/* vec_vsro */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsro(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_altivec_vsro((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsro(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsro((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsro(vector unsigned char __a, vector signed char __b) {
-  return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsro(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsro(vector short __a,
-                                                     vector signed char __b) {
-  return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsro(vector short __a,
-                                                     vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsro(vector unsigned short __a, vector signed char __b) {
-  return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsro(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsro(vector pixel __a,
-                                                     vector signed char __b) {
-  return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsro(vector pixel __a,
-                                                     vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsro(vector int __a,
-                                                   vector signed char __b) {
-  return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsro(vector int __a,
-                                                   vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsro(vector unsigned int __a, vector signed char __b) {
-  return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsro(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vsro(vector float __a,
-                                                     vector signed char __b) {
-  return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vsro(vector float __a,
-                                                     vector unsigned char __b) {
-  return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-/* vec_st */
-
-static __inline__ void __ATTRS_o_ai vec_st(vector signed char __a, int __b,
-                                           vector signed char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector signed char __a, int __b,
-                                           signed char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned char __a, int __b,
-                                           vector unsigned char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned char __a, int __b,
-                                           unsigned char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
-                                           signed char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
-                                           unsigned char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
-                                           vector bool char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector short __a, int __b,
-                                           vector short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector short __a, int __b,
-                                           short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned short __a, int __b,
-                                           vector unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned short __a, int __b,
-                                           unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, int __b,
-                                           short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, int __b,
-                                           unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, int __b,
-                                           vector bool short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, int __b,
-                                           short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, int __b,
-                                           unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, int __b,
-                                           vector pixel *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector int __a, int __b,
-                                           vector int *__c) {
-  __builtin_altivec_stvx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector int __a, int __b, int *__c) {
-  __builtin_altivec_stvx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned int __a, int __b,
-                                           vector unsigned int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned int __a, int __b,
-                                           unsigned int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, int __b,
-                                           int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, int __b,
-                                           unsigned int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, int __b,
-                                           vector bool int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector float __a, int __b,
-                                           vector float *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector float __a, int __b,
-                                           float *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-/* vec_stvx */
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector signed char __a, int __b,
-                                             vector signed char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector signed char __a, int __b,
-                                             signed char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned char __a, int __b,
-                                             vector unsigned char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned char __a, int __b,
-                                             unsigned char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
-                                             signed char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
-                                             unsigned char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
-                                             vector bool char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector short __a, int __b,
-                                             vector short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector short __a, int __b,
-                                             short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned short __a, int __b,
-                                             vector unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned short __a, int __b,
-                                             unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b,
-                                             short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b,
-                                             unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b,
-                                             vector bool short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b,
-                                             short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b,
-                                             unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b,
-                                             vector pixel *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector int __a, int __b,
-                                             vector int *__c) {
-  __builtin_altivec_stvx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector int __a, int __b,
-                                             int *__c) {
-  __builtin_altivec_stvx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned int __a, int __b,
-                                             vector unsigned int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned int __a, int __b,
-                                             unsigned int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b,
-                                             int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b,
-                                             unsigned int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b,
-                                             vector bool int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector float __a, int __b,
-                                             vector float *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector float __a, int __b,
-                                             float *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-/* vec_ste */
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector signed char __a, int __b,
-                                            signed char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned char __a, int __b,
-                                            unsigned char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool char __a, int __b,
-                                            signed char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool char __a, int __b,
-                                            unsigned char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector short __a, int __b,
-                                            short *__c) {
-  __builtin_altivec_stvehx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned short __a, int __b,
-                                            unsigned short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool short __a, int __b,
-                                            short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool short __a, int __b,
-                                            unsigned short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector pixel __a, int __b,
-                                            short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector pixel __a, int __b,
-                                            unsigned short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector int __a, int __b, int *__c) {
-  __builtin_altivec_stvewx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned int __a, int __b,
-                                            unsigned int *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool int __a, int __b,
-                                            int *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool int __a, int __b,
-                                            unsigned int *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector float __a, int __b,
-                                            float *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-/* vec_stvebx */
-
-static __inline__ void __ATTRS_o_ai vec_stvebx(vector signed char __a, int __b,
-                                               signed char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvebx(vector unsigned char __a,
-                                               int __b, unsigned char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvebx(vector bool char __a, int __b,
-                                               signed char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvebx(vector bool char __a, int __b,
-                                               unsigned char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-/* vec_stvehx */
-
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector short __a, int __b,
-                                               short *__c) {
-  __builtin_altivec_stvehx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector unsigned short __a,
-                                               int __b, unsigned short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector bool short __a, int __b,
-                                               short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector bool short __a, int __b,
-                                               unsigned short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector pixel __a, int __b,
-                                               short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector pixel __a, int __b,
-                                               unsigned short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-/* vec_stvewx */
-
-static __inline__ void __ATTRS_o_ai vec_stvewx(vector int __a, int __b,
-                                               int *__c) {
-  __builtin_altivec_stvewx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvewx(vector unsigned int __a, int __b,
-                                               unsigned int *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvewx(vector bool int __a, int __b,
-                                               int *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvewx(vector bool int __a, int __b,
-                                               unsigned int *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvewx(vector float __a, int __b,
-                                               float *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-/* vec_stl */
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector signed char __a, int __b,
-                                            vector signed char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector signed char __a, int __b,
-                                            signed char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned char __a, int __b,
-                                            vector unsigned char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned char __a, int __b,
-                                            unsigned char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
-                                            signed char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
-                                            unsigned char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
-                                            vector bool char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector short __a, int __b,
-                                            vector short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector short __a, int __b,
-                                            short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned short __a, int __b,
-                                            vector unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned short __a, int __b,
-                                            unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool short __a, int __b,
-                                            short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool short __a, int __b,
-                                            unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool short __a, int __b,
-                                            vector bool short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector pixel __a, int __b,
-                                            short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector pixel __a, int __b,
-                                            unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector pixel __a, int __b,
-                                            vector pixel *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector int __a, int __b,
-                                            vector int *__c) {
-  __builtin_altivec_stvxl(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector int __a, int __b, int *__c) {
-  __builtin_altivec_stvxl(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned int __a, int __b,
-                                            vector unsigned int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned int __a, int __b,
-                                            unsigned int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool int __a, int __b,
-                                            int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool int __a, int __b,
-                                            unsigned int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool int __a, int __b,
-                                            vector bool int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector float __a, int __b,
-                                            vector float *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector float __a, int __b,
-                                            float *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-/* vec_stvxl */
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector signed char __a, int __b,
-                                              vector signed char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector signed char __a, int __b,
-                                              signed char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned char __a, int __b,
-                                              vector unsigned char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned char __a, int __b,
-                                              unsigned char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
-                                              signed char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
-                                              unsigned char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
-                                              vector bool char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector short __a, int __b,
-                                              vector short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector short __a, int __b,
-                                              short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned short __a,
-                                              int __b,
-                                              vector unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned short __a,
-                                              int __b, unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b,
-                                              short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b,
-                                              unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b,
-                                              vector bool short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b,
-                                              short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b,
-                                              unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b,
-                                              vector pixel *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector int __a, int __b,
-                                              vector int *__c) {
-  __builtin_altivec_stvxl(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector int __a, int __b,
-                                              int *__c) {
-  __builtin_altivec_stvxl(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned int __a, int __b,
-                                              vector unsigned int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned int __a, int __b,
-                                              unsigned int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b,
-                                              int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b,
-                                              unsigned int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b,
-                                              vector bool int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector float __a, int __b,
-                                              vector float *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector float __a, int __b,
-                                              float *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-/* vec_sub */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sub(vector signed char __a, vector signed char __b) {
-  return __a - __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sub(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a - __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sub(vector signed char __a, vector bool char __b) {
-  return __a - (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sub(vector unsigned char __a, vector unsigned char __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sub(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a - __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sub(vector unsigned char __a, vector bool char __b) {
-  return __a - (vector unsigned char)__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sub(vector short __a,
-                                                    vector short __b) {
-  return __a - __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sub(vector bool short __a,
-                                                    vector short __b) {
-  return (vector short)__a - __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sub(vector short __a,
-                                                    vector bool short __b) {
-  return __a - (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sub(vector unsigned short __a, vector unsigned short __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sub(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a - __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sub(vector unsigned short __a, vector bool short __b) {
-  return __a - (vector unsigned short)__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sub(vector int __a,
-                                                  vector int __b) {
-  return __a - __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sub(vector bool int __a,
-                                                  vector int __b) {
-  return (vector int)__a - __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sub(vector int __a,
-                                                  vector bool int __b) {
-  return __a - (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sub(vector unsigned int __a, vector unsigned int __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sub(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a - __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sub(vector unsigned int __a, vector bool int __b) {
-  return __a - (vector unsigned int)__b;
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_sub(vector signed __int128 __a, vector signed __int128 __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_sub(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __a - __b;
-}
-#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sub(vector signed long long __a, vector signed long long __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sub(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a - __b;
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_sub(vector double __a,
-                                                     vector double __b) {
-  return __a - __b;
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_sub(vector float __a,
-                                                    vector float __b) {
-  return __a - __b;
-}
-
-/* vec_vsububm */
-
-#define __builtin_altivec_vsububm vec_vsububm
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsububm(vector signed char __a, vector signed char __b) {
-  return __a - __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsububm(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a - __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsububm(vector signed char __a, vector bool char __b) {
-  return __a - (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsububm(vector unsigned char __a, vector unsigned char __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsububm(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a - __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsububm(vector unsigned char __a, vector bool char __b) {
-  return __a - (vector unsigned char)__b;
-}
-
-/* vec_vsubuhm */
-
-#define __builtin_altivec_vsubuhm vec_vsubuhm
-
-static __inline__ vector short __ATTRS_o_ai vec_vsubuhm(vector short __a,
-                                                        vector short __b) {
-  return __a - __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsubuhm(vector bool short __a,
-                                                        vector short __b) {
-  return (vector short)__a - __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsubuhm(vector short __a,
-                                                        vector bool short __b) {
-  return __a - (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsubuhm(vector unsigned short __a, vector unsigned short __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsubuhm(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a - __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsubuhm(vector unsigned short __a, vector bool short __b) {
-  return __a - (vector unsigned short)__b;
-}
-
-/* vec_vsubuwm */
-
-#define __builtin_altivec_vsubuwm vec_vsubuwm
-
-static __inline__ vector int __ATTRS_o_ai vec_vsubuwm(vector int __a,
-                                                      vector int __b) {
-  return __a - __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsubuwm(vector bool int __a,
-                                                      vector int __b) {
-  return (vector int)__a - __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsubuwm(vector int __a,
-                                                      vector bool int __b) {
-  return __a - (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsubuwm(vector unsigned int __a, vector unsigned int __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsubuwm(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a - __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsubuwm(vector unsigned int __a, vector bool int __b) {
-  return __a - (vector unsigned int)__b;
-}
-
-/* vec_vsubfp */
-
-#define __builtin_altivec_vsubfp vec_vsubfp
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vsubfp(vector float __a, vector float __b) {
-  return __a - __b;
-}
-
-/* vec_subc */
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_subc(vector signed int __a, vector signed int __b) {
-  return (vector signed int)__builtin_altivec_vsubcuw((vector unsigned int)__a,
-                                                      (vector unsigned int) __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_subc(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vsubcuw(__a, __b);
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_subc(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __builtin_altivec_vsubcuq(__a, __b);
-}
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_subc(vector signed __int128 __a, vector signed __int128 __b) {
-  return __builtin_altivec_vsubcuq(__a, __b);
-}
-#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-
-/* vec_vsubcuw */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vsubcuw(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vsubcuw(__a, __b);
-}
-
-/* vec_subs */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_subs(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vsubsbs(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_subs(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vsubsbs((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_subs(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vsubsbs(__a, (vector signed char)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_subs(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vsububs(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_subs(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vsububs((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_subs(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vsububs(__a, (vector unsigned char)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_subs(vector short __a,
-                                                     vector short __b) {
-  return __builtin_altivec_vsubshs(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_subs(vector bool short __a,
-                                                     vector short __b) {
-  return __builtin_altivec_vsubshs((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_subs(vector short __a,
-                                                     vector bool short __b) {
-  return __builtin_altivec_vsubshs(__a, (vector short)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_subs(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vsubuhs(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_subs(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vsubuhs((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_subs(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vsubuhs(__a, (vector unsigned short)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_subs(vector int __a,
-                                                   vector int __b) {
-  return __builtin_altivec_vsubsws(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_subs(vector bool int __a,
-                                                   vector int __b) {
-  return __builtin_altivec_vsubsws((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_subs(vector int __a,
-                                                   vector bool int __b) {
-  return __builtin_altivec_vsubsws(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_subs(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vsubuws(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_subs(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vsubuws((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_subs(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vsubuws(__a, (vector unsigned int)__b);
-}
-
-/* vec_vsubsbs */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsubsbs(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vsubsbs(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsubsbs(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vsubsbs((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsubsbs(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vsubsbs(__a, (vector signed char)__b);
-}
-
-/* vec_vsububs */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsububs(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vsububs(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsububs(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vsububs((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsububs(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vsububs(__a, (vector unsigned char)__b);
-}
-
-/* vec_vsubshs */
-
-static __inline__ vector short __ATTRS_o_ai vec_vsubshs(vector short __a,
-                                                        vector short __b) {
-  return __builtin_altivec_vsubshs(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsubshs(vector bool short __a,
-                                                        vector short __b) {
-  return __builtin_altivec_vsubshs((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsubshs(vector short __a,
-                                                        vector bool short __b) {
-  return __builtin_altivec_vsubshs(__a, (vector short)__b);
-}
-
-/* vec_vsubuhs */
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsubuhs(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vsubuhs(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsubuhs(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vsubuhs((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsubuhs(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vsubuhs(__a, (vector unsigned short)__b);
-}
-
-/* vec_vsubsws */
-
-static __inline__ vector int __ATTRS_o_ai vec_vsubsws(vector int __a,
-                                                      vector int __b) {
-  return __builtin_altivec_vsubsws(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsubsws(vector bool int __a,
-                                                      vector int __b) {
-  return __builtin_altivec_vsubsws((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsubsws(vector int __a,
-                                                      vector bool int __b) {
-  return __builtin_altivec_vsubsws(__a, (vector int)__b);
-}
-
-/* vec_vsubuws */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsubuws(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vsubuws(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsubuws(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vsubuws((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsubuws(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vsubuws(__a, (vector unsigned int)__b);
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-/* vec_vsubuqm */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vsubuqm(vector signed __int128 __a, vector signed __int128 __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vsubuqm(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __a - __b;
-}
-
-/* vec_vsubeuqm */
-
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vsubeuqm(vector signed __int128 __a, vector signed __int128 __b,
-             vector signed __int128 __c) {
-  return __builtin_altivec_vsubeuqm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vsubeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b,
-             vector unsigned __int128 __c) {
-  return __builtin_altivec_vsubeuqm(__a, __b, __c);
-}
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_sube(vector signed __int128 __a, vector signed __int128 __b,
-             vector signed __int128 __c) {
-  return __builtin_altivec_vsubeuqm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_sube(vector unsigned __int128 __a, vector unsigned __int128 __b,
-             vector unsigned __int128 __c) {
-  return __builtin_altivec_vsubeuqm(__a, __b, __c);
-}
-
-/* vec_vsubcuq */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vsubcuq(vector signed __int128 __a, vector signed __int128 __b) {
-  return __builtin_altivec_vsubcuq(__a, __b);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vsubcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __builtin_altivec_vsubcuq(__a, __b);
-}
-
-/* vec_vsubecuq */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vsubecuq(vector signed __int128 __a, vector signed __int128 __b,
-             vector signed __int128 __c) {
-  return __builtin_altivec_vsubecuq(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vsubecuq(vector unsigned __int128 __a, vector unsigned __int128 __b,
-             vector unsigned __int128 __c) {
-  return __builtin_altivec_vsubecuq(__a, __b, __c);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_subec(vector signed int __a, vector signed int __b,
-             vector signed int __c) {
-  return vec_addec(__a, ~__b, __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_subec(vector unsigned int __a, vector unsigned int __b,
-             vector unsigned int __c) {
-  return vec_addec(__a, ~__b, __c);
-}
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_subec(vector signed __int128 __a, vector signed __int128 __b,
-             vector signed __int128 __c) {
-  return __builtin_altivec_vsubecuq(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_subec(vector unsigned __int128 __a, vector unsigned __int128 __b,
-             vector unsigned __int128 __c) {
-  return __builtin_altivec_vsubecuq(__a, __b, __c);
-}
-#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_sube(vector signed int __a, vector signed int __b,
-         vector signed int __c) {
-  vector signed int __mask = {1, 1, 1, 1};
-  vector signed int __carry = __c & __mask;
-  return vec_adde(__a, ~__b, __carry);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sube(vector unsigned int __a, vector unsigned int __b,
-         vector unsigned int __c) {
-  vector unsigned int __mask = {1, 1, 1, 1};
-  vector unsigned int __carry = __c & __mask;
-  return vec_adde(__a, ~__b, __carry);
-}
-/* vec_sum4s */
-
-static __inline__ vector int __ATTRS_o_ai vec_sum4s(vector signed char __a,
-                                                    vector int __b) {
-  return __builtin_altivec_vsum4sbs(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sum4s(vector unsigned char __a, vector unsigned int __b) {
-  return __builtin_altivec_vsum4ubs(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sum4s(vector signed short __a,
-                                                    vector int __b) {
-  return __builtin_altivec_vsum4shs(__a, __b);
-}
-
-/* vec_vsum4sbs */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vsum4sbs(vector signed char __a, vector int __b) {
-  return __builtin_altivec_vsum4sbs(__a, __b);
-}
-
-/* vec_vsum4ubs */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vsum4ubs(vector unsigned char __a, vector unsigned int __b) {
-  return __builtin_altivec_vsum4ubs(__a, __b);
-}
-
-/* vec_vsum4shs */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vsum4shs(vector signed short __a, vector int __b) {
-  return __builtin_altivec_vsum4shs(__a, __b);
-}
-
-/* vec_sum2s */
-
-/* The vsum2sws instruction has a big-endian bias, so that the second
-   input vector and the result always reference big-endian elements
-   1 and 3 (little-endian element 0 and 2).  For ease of porting the
-   programmer wants elements 1 and 3 in both cases, so for little
-   endian we must perform some permutes.  */
-
-static __inline__ vector signed int __attribute__((__always_inline__))
-vec_sum2s(vector int __a, vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  vector int __c = (vector signed int)vec_perm(
-      __b, __b, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
-                                       8, 9, 10, 11));
-  __c = __builtin_altivec_vsum2sws(__a, __c);
-  return (vector signed int)vec_perm(
-      __c, __c, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
-                                       8, 9, 10, 11));
-#else
-  return __builtin_altivec_vsum2sws(__a, __b);
-#endif
-}
-
-/* vec_vsum2sws */
-
-static __inline__ vector signed int __attribute__((__always_inline__))
-vec_vsum2sws(vector int __a, vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  vector int __c = (vector signed int)vec_perm(
-      __b, __b, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
-                                       8, 9, 10, 11));
-  __c = __builtin_altivec_vsum2sws(__a, __c);
-  return (vector signed int)vec_perm(
-      __c, __c, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
-                                       8, 9, 10, 11));
-#else
-  return __builtin_altivec_vsum2sws(__a, __b);
-#endif
-}
-
-/* vec_sums */
-
-/* The vsumsws instruction has a big-endian bias, so that the second
-   input vector and the result always reference big-endian element 3
-   (little-endian element 0).  For ease of porting the programmer
-   wants element 3 in both cases, so for little endian we must perform
-   some permutes.  */
-
-static __inline__ vector signed int __attribute__((__always_inline__))
-vec_sums(vector signed int __a, vector signed int __b) {
-#ifdef __LITTLE_ENDIAN__
-  __b = (vector signed int)vec_splat(__b, 3);
-  __b = __builtin_altivec_vsumsws(__a, __b);
-  return (vector signed int)(0, 0, 0, __b[0]);
-#else
-  return __builtin_altivec_vsumsws(__a, __b);
-#endif
-}
-
-/* vec_vsumsws */
-
-static __inline__ vector signed int __attribute__((__always_inline__))
-vec_vsumsws(vector signed int __a, vector signed int __b) {
-#ifdef __LITTLE_ENDIAN__
-  __b = (vector signed int)vec_splat(__b, 3);
-  __b = __builtin_altivec_vsumsws(__a, __b);
-  return (vector signed int)(0, 0, 0, __b[0]);
-#else
-  return __builtin_altivec_vsumsws(__a, __b);
-#endif
-}
-
-/* vec_trunc */
-
-static __inline__ vector float __ATTRS_o_ai vec_trunc(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvrspiz(__a);
-#else
-  return __builtin_altivec_vrfiz(__a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_trunc(vector double __a) {
-  return __builtin_vsx_xvrdpiz(__a);
-}
-#endif
-
-/* vec_vrfiz */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vrfiz(vector float __a) {
-  return __builtin_altivec_vrfiz(__a);
-}
-
-/* vec_unpackh */
-
-/* The vector unpack instructions all have a big-endian bias, so for
-   little endian we must reverse the meanings of "high" and "low."  */
-
-static __inline__ vector short __ATTRS_o_ai
-vec_unpackh(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupklsb((vector char)__a);
-#else
-  return __builtin_altivec_vupkhsb((vector char)__a);
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_unpackh(vector bool char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
-#else
-  return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
-#endif
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_unpackh(vector short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupklsh(__a);
-#else
-  return __builtin_altivec_vupkhsh(__a);
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_unpackh(vector bool short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
-#else
-  return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_unpackh(vector pixel __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
-#else
-  return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
-#endif
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector long long __ATTRS_o_ai vec_unpackh(vector int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupklsw(__a);
-#else
-  return __builtin_altivec_vupkhsw(__a);
-#endif
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_unpackh(vector bool int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
-#else
-  return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_unpackh(vector float __a) {
-  return (vector double)(__a[0], __a[1]);
-}
-#endif
-
-/* vec_vupkhsb */
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vupkhsb(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupklsb((vector char)__a);
-#else
-  return __builtin_altivec_vupkhsb((vector char)__a);
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vupkhsb(vector bool char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
-#else
-  return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
-#endif
-}
-
-/* vec_vupkhsh */
-
-static __inline__ vector int __ATTRS_o_ai vec_vupkhsh(vector short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupklsh(__a);
-#else
-  return __builtin_altivec_vupkhsh(__a);
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vupkhsh(vector bool short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
-#else
-  return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vupkhsh(vector pixel __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
-#else
-  return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
-#endif
-}
-
-/* vec_vupkhsw */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector long long __ATTRS_o_ai vec_vupkhsw(vector int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupklsw(__a);
-#else
-  return __builtin_altivec_vupkhsw(__a);
-#endif
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_vupkhsw(vector bool int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
-#else
-  return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
-#endif
-}
-#endif
-
-/* vec_unpackl */
-
-static __inline__ vector short __ATTRS_o_ai
-vec_unpackl(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupkhsb((vector char)__a);
-#else
-  return __builtin_altivec_vupklsb((vector char)__a);
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_unpackl(vector bool char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
-#else
-  return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
-#endif
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_unpackl(vector short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupkhsh(__a);
-#else
-  return __builtin_altivec_vupklsh(__a);
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_unpackl(vector bool short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
-#else
-  return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_unpackl(vector pixel __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
-#else
-  return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
-#endif
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector long long __ATTRS_o_ai vec_unpackl(vector int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupkhsw(__a);
-#else
-  return __builtin_altivec_vupklsw(__a);
-#endif
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_unpackl(vector bool int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
-#else
-  return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_unpackl(vector float __a) {
-  return (vector double)(__a[2], __a[3]);
-}
-#endif
-
-/* vec_vupklsb */
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vupklsb(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupkhsb((vector char)__a);
-#else
-  return __builtin_altivec_vupklsb((vector char)__a);
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vupklsb(vector bool char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
-#else
-  return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
-#endif
-}
-
-/* vec_vupklsh */
-
-static __inline__ vector int __ATTRS_o_ai vec_vupklsh(vector short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupkhsh(__a);
-#else
-  return __builtin_altivec_vupklsh(__a);
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vupklsh(vector bool short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
-#else
-  return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vupklsh(vector pixel __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
-#else
-  return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
-#endif
-}
-
-/* vec_vupklsw */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector long long __ATTRS_o_ai vec_vupklsw(vector int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupkhsw(__a);
-#else
-  return __builtin_altivec_vupklsw(__a);
-#endif
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_vupklsw(vector bool int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
-#else
-  return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
-#endif
-}
-#endif
-
-/* vec_vsx_ld */
-
-#ifdef __VSX__
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector bool int *__b) {
-  return (vector bool int)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector signed int *__b) {
-  return (vector signed int)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_vsx_ld(int __a, const signed int *__b) {
-  return (vector signed int)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector unsigned int *__b) {
-  return (vector unsigned int)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsx_ld(int __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector float *__b) {
-  return (vector float)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vsx_ld(int __a,
-                                                       const float *__b) {
-  return (vector float)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector signed long long *__b) {
-  return (vector signed long long)__builtin_vsx_lxvd2x(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector unsigned long long *__b) {
-  return (vector unsigned long long)__builtin_vsx_lxvd2x(__a, __b);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector double *__b) {
-  return (vector double)__builtin_vsx_lxvd2x(__a, __b);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_vsx_ld(int __a, const double *__b) {
-  return (vector double)__builtin_vsx_lxvd2x(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector bool short *__b) {
-  return (vector bool short)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector signed short *__b) {
-  return (vector signed short)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_vsx_ld(int __a, const signed short *__b) {
-  return (vector signed short)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector unsigned short *__b) {
-  return (vector unsigned short)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsx_ld(int __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector bool char *__b) {
-  return (vector bool char)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector signed char *__b) {
-  return (vector signed char)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsx_ld(int __a, const signed char *__b) {
-  return (vector signed char)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector unsigned char *__b) {
-  return (vector unsigned char)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsx_ld(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-#endif
-
-/* vec_vsx_st */
-
-#ifdef __VSX__
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b,
-                                               vector bool int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b,
-                                               signed int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b,
-                                               unsigned int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed int __a, int __b,
-                                               vector signed int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed int __a, int __b,
-                                               signed int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned int __a, int __b,
-                                               vector unsigned int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned int __a, int __b,
-                                               unsigned int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector float __a, int __b,
-                                               vector float *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector float __a, int __b,
-                                               float *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed long long __a,
-                                               int __b,
-                                               vector signed long long *__c) {
-  __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned long long __a,
-                                               int __b,
-                                               vector unsigned long long *__c) {
-  __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector double __a, int __b,
-                                               vector double *__c) {
-  __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector double __a, int __b,
-                                               double *__c) {
-  __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b,
-                                               vector bool short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b,
-                                               signed short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b,
-                                               unsigned short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed short __a, int __b,
-                                               vector signed short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed short __a, int __b,
-                                               signed short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned short __a,
-                                               int __b,
-                                               vector unsigned short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned short __a,
-                                               int __b, unsigned short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b,
-                                               vector bool char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b,
-                                               signed char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b,
-                                               unsigned char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed char __a, int __b,
-                                               vector signed char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed char __a, int __b,
-                                               signed char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned char __a,
-                                               int __b,
-                                               vector unsigned char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned char __a,
-                                               int __b, unsigned char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-#endif
-
-#ifdef __VSX__
-#define vec_xxpermdi __builtin_vsx_xxpermdi
-#define vec_xxsldwi __builtin_vsx_xxsldwi
-#endif
-
-/* vec_xor */
-
-#define __builtin_altivec_vxor vec_xor
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_xor(vector signed char __a, vector signed char __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_xor(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a ^ __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_xor(vector signed char __a, vector bool char __b) {
-  return __a ^ (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xor(vector unsigned char __a, vector unsigned char __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xor(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a ^ __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xor(vector unsigned char __a, vector bool char __b) {
-  return __a ^ (vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_xor(vector bool char __a,
-                                                        vector bool char __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_xor(vector short __a,
-                                                    vector short __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_xor(vector bool short __a,
-                                                    vector short __b) {
-  return (vector short)__a ^ __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_xor(vector short __a,
-                                                    vector bool short __b) {
-  return __a ^ (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xor(vector unsigned short __a, vector unsigned short __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xor(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a ^ __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xor(vector unsigned short __a, vector bool short __b) {
-  return __a ^ (vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_xor(vector bool short __a, vector bool short __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_xor(vector int __a,
-                                                  vector int __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_xor(vector bool int __a,
-                                                  vector int __b) {
-  return (vector int)__a ^ __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_xor(vector int __a,
-                                                  vector bool int __b) {
-  return __a ^ (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_xor(vector unsigned int __a, vector unsigned int __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_xor(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a ^ __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_xor(vector unsigned int __a, vector bool int __b) {
-  return __a ^ (vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_xor(vector bool int __a,
-                                                       vector bool int __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_xor(vector float __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a ^ (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_xor(vector bool int __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a ^ (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_xor(vector float __a,
-                                                    vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a ^ (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_xor(vector signed long long __a, vector signed long long __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_xor(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a ^ __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_xor(vector signed long long __a, vector bool long long __b) {
-  return __a ^ (vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xor(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xor(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a ^ __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xor(vector unsigned long long __a, vector bool long long __b) {
-  return __a ^ (vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_xor(vector bool long long __a, vector bool long long __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_xor(vector double __a,
-                                                     vector double __b) {
-  return (vector double)((vector unsigned long long)__a ^
-                         (vector unsigned long long)__b);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_xor(vector double __a, vector bool long long __b) {
-  return (vector double)((vector unsigned long long)__a ^
-                         (vector unsigned long long)__b);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_xor(vector bool long long __a,
-                                                     vector double __b) {
-  return (vector double)((vector unsigned long long)__a ^
-                         (vector unsigned long long)__b);
-}
-#endif
-
-/* vec_vxor */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vxor(vector signed char __a, vector signed char __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vxor(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a ^ __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vxor(vector signed char __a, vector bool char __b) {
-  return __a ^ (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vxor(vector unsigned char __a, vector unsigned char __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vxor(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a ^ __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vxor(vector unsigned char __a, vector bool char __b) {
-  return __a ^ (vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vxor(vector bool char __a,
-                                                         vector bool char __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vxor(vector short __a,
-                                                     vector short __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vxor(vector bool short __a,
-                                                     vector short __b) {
-  return (vector short)__a ^ __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vxor(vector short __a,
-                                                     vector bool short __b) {
-  return __a ^ (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vxor(vector unsigned short __a, vector unsigned short __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vxor(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a ^ __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vxor(vector unsigned short __a, vector bool short __b) {
-  return __a ^ (vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vxor(vector bool short __a, vector bool short __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vxor(vector int __a,
-                                                   vector int __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vxor(vector bool int __a,
-                                                   vector int __b) {
-  return (vector int)__a ^ __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vxor(vector int __a,
-                                                   vector bool int __b) {
-  return __a ^ (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vxor(vector unsigned int __a, vector unsigned int __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vxor(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a ^ __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vxor(vector unsigned int __a, vector bool int __b) {
-  return __a ^ (vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vxor(vector bool int __a,
-                                                        vector bool int __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vxor(vector float __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a ^ (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vxor(vector bool int __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a ^ (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vxor(vector float __a,
-                                                     vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a ^ (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vxor(vector signed long long __a, vector signed long long __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vxor(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a ^ __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vxor(vector signed long long __a, vector bool long long __b) {
-  return __a ^ (vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vxor(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vxor(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a ^ __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vxor(vector unsigned long long __a, vector bool long long __b) {
-  return __a ^ (vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_vxor(vector bool long long __a, vector bool long long __b) {
-  return __a ^ __b;
-}
-#endif
-
-/* ------------------------ extensions for CBEA ----------------------------- */
-
-/* vec_extract */
-
-static __inline__ signed char __ATTRS_o_ai vec_extract(vector signed char __a,
-                                                       int __b) {
-  return __a[__b];
-}
-
-static __inline__ unsigned char __ATTRS_o_ai
-vec_extract(vector unsigned char __a, int __b) {
-  return __a[__b];
-}
-
-static __inline__ unsigned char __ATTRS_o_ai vec_extract(vector bool char __a,
-                                                         int __b) {
-  return __a[__b];
-}
-
-static __inline__ signed short __ATTRS_o_ai vec_extract(vector signed short __a,
-                                                        int __b) {
-  return __a[__b];
-}
-
-static __inline__ unsigned short __ATTRS_o_ai
-vec_extract(vector unsigned short __a, int __b) {
-  return __a[__b];
-}
-
-static __inline__ unsigned short __ATTRS_o_ai vec_extract(vector bool short __a,
-                                                          int __b) {
-  return __a[__b];
-}
-
-static __inline__ signed int __ATTRS_o_ai vec_extract(vector signed int __a,
-                                                      int __b) {
-  return __a[__b];
-}
-
-static __inline__ unsigned int __ATTRS_o_ai vec_extract(vector unsigned int __a,
-                                                        int __b) {
-  return __a[__b];
-}
-
-static __inline__ unsigned int __ATTRS_o_ai vec_extract(vector bool int __a,
-                                                        int __b) {
-  return __a[__b];
-}
-
-#ifdef __VSX__
-static __inline__ signed long long __ATTRS_o_ai
-vec_extract(vector signed long long __a, int __b) {
-  return __a[__b];
-}
-
-static __inline__ unsigned long long __ATTRS_o_ai
-vec_extract(vector unsigned long long __a, int __b) {
-  return __a[__b];
-}
-
-static __inline__ unsigned long long __ATTRS_o_ai
-vec_extract(vector bool long long __a, int __b) {
-  return __a[__b];
-}
-
-static __inline__ double __ATTRS_o_ai vec_extract(vector double __a, int __b) {
-  return __a[__b];
-}
-#endif
-
-static __inline__ float __ATTRS_o_ai vec_extract(vector float __a, int __b) {
-  return __a[__b];
-}
-
-#ifdef __POWER9_VECTOR__
-
-#define vec_insert4b __builtin_vsx_insertword
-#define vec_extract4b __builtin_vsx_extractuword
-
-/* vec_extract_exp */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_extract_exp(vector float __a) {
-  return __builtin_vsx_xvxexpsp(__a);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_extract_exp(vector double __a) {
-  return __builtin_vsx_xvxexpdp(__a);
-}
-
-/* vec_extract_sig */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_extract_sig(vector float __a) {
-  return __builtin_vsx_xvxsigsp(__a);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_extract_sig (vector double __a) {
-  return __builtin_vsx_xvxsigdp(__a);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_extract_fp32_from_shorth(vector unsigned short __a) {
-  vector unsigned short __b =
-#ifdef __LITTLE_ENDIAN__
-            __builtin_shufflevector(__a, __a, 0, -1, 1, -1, 2, -1, 3, -1);
-#else
-            __builtin_shufflevector(__a, __a, -1, 0, -1, 1, -1, 2, -1, 3);
-#endif
-  return __builtin_vsx_xvcvhpsp(__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_extract_fp32_from_shortl(vector unsigned short __a) {
-  vector unsigned short __b =
-#ifdef __LITTLE_ENDIAN__
-            __builtin_shufflevector(__a, __a, 4, -1, 5, -1, 6, -1, 7, -1);
-#else
-            __builtin_shufflevector(__a, __a, -1, 4, -1, 5, -1, 6, -1, 7);
-#endif
-  return __builtin_vsx_xvcvhpsp(__b);
-}
-#endif /* __POWER9_VECTOR__ */
-
-/* vec_insert */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_insert(signed char __a, vector signed char __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_insert(unsigned char __a, vector unsigned char __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_insert(unsigned char __a,
-                                                           vector bool char __b,
-                                                           int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_insert(signed short __a, vector signed short __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_insert(unsigned short __a, vector unsigned short __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_insert(unsigned short __a, vector bool short __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_insert(signed int __a, vector signed int __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_insert(unsigned int __a, vector unsigned int __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_insert(unsigned int __a,
-                                                          vector bool int __b,
-                                                          int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_insert(signed long long __a, vector signed long long __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_insert(unsigned long long __a, vector unsigned long long __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_insert(unsigned long long __a, vector bool long long __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-static __inline__ vector double __ATTRS_o_ai vec_insert(double __a,
-                                                        vector double __b,
-                                                        int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_insert(float __a,
-                                                       vector float __b,
-                                                       int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-/* vec_lvlx */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvlx(int __a, const signed char *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector signed char)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvlx(int __a, const vector signed char *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector signed char)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvlx(int __a, const unsigned char *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector unsigned char)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvlx(int __a, const vector unsigned char *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector unsigned char)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_lvlx(int __a, const vector bool char *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector bool char)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvlx(int __a,
-                                                     const short *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector short)(0), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvlx(int __a,
-                                                     const vector short *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector short)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvlx(int __a, const unsigned short *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector unsigned short)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvlx(int __a, const vector unsigned short *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector unsigned short)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_lvlx(int __a, const vector bool short *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector bool short)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_lvlx(int __a,
-                                                     const vector pixel *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector pixel)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvlx(int __a, const int *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector int)(0), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvlx(int __a,
-                                                   const vector int *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector int)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvlx(int __a, const unsigned int *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector unsigned int)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvlx(int __a, const vector unsigned int *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector unsigned int)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_lvlx(int __a, const vector bool int *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector bool int)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvlx(int __a,
-                                                     const float *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector float)(0), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvlx(int __a,
-                                                     const vector float *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector float)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-/* vec_lvlxl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvlxl(int __a, const signed char *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector signed char)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvlxl(int __a, const vector signed char *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector signed char)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvlxl(int __a, const unsigned char *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector unsigned char)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvlxl(int __a, const vector unsigned char *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector unsigned char)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_lvlxl(int __a, const vector bool char *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector bool char)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvlxl(int __a,
-                                                      const short *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector short)(0), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvlxl(int __a,
-                                                      const vector short *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector short)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvlxl(int __a, const unsigned short *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector unsigned short)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvlxl(int __a, const vector unsigned short *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector unsigned short)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_lvlxl(int __a, const vector bool short *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector bool short)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_lvlxl(int __a,
-                                                      const vector pixel *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector pixel)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvlxl(int __a, const int *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector int)(0), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvlxl(int __a,
-                                                    const vector int *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector int)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvlxl(int __a, const unsigned int *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector unsigned int)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvlxl(int __a, const vector unsigned int *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector unsigned int)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_lvlxl(int __a, const vector bool int *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector bool int)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvlxl(int __a,
-                                                      const float *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector float)(0), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvlxl(int __a,
-                                                      vector float *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector float)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-/* vec_lvrx */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvrx(int __a, const signed char *__b) {
-  return vec_perm((vector signed char)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvrx(int __a, const vector signed char *__b) {
-  return vec_perm((vector signed char)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvrx(int __a, const unsigned char *__b) {
-  return vec_perm((vector unsigned char)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvrx(int __a, const vector unsigned char *__b) {
-  return vec_perm((vector unsigned char)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_lvrx(int __a, const vector bool char *__b) {
-  return vec_perm((vector bool char)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvrx(int __a,
-                                                     const short *__b) {
-  return vec_perm((vector short)(0), vec_ld(__a, __b), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvrx(int __a,
-                                                     const vector short *__b) {
-  return vec_perm((vector short)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvrx(int __a, const unsigned short *__b) {
-  return vec_perm((vector unsigned short)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvrx(int __a, const vector unsigned short *__b) {
-  return vec_perm((vector unsigned short)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_lvrx(int __a, const vector bool short *__b) {
-  return vec_perm((vector bool short)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_lvrx(int __a,
-                                                     const vector pixel *__b) {
-  return vec_perm((vector pixel)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvrx(int __a, const int *__b) {
-  return vec_perm((vector int)(0), vec_ld(__a, __b), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvrx(int __a,
-                                                   const vector int *__b) {
-  return vec_perm((vector int)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvrx(int __a, const unsigned int *__b) {
-  return vec_perm((vector unsigned int)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvrx(int __a, const vector unsigned int *__b) {
-  return vec_perm((vector unsigned int)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_lvrx(int __a, const vector bool int *__b) {
-  return vec_perm((vector bool int)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvrx(int __a,
-                                                     const float *__b) {
-  return vec_perm((vector float)(0), vec_ld(__a, __b), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvrx(int __a,
-                                                     const vector float *__b) {
-  return vec_perm((vector float)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-/* vec_lvrxl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvrxl(int __a, const signed char *__b) {
-  return vec_perm((vector signed char)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvrxl(int __a, const vector signed char *__b) {
-  return vec_perm((vector signed char)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvrxl(int __a, const unsigned char *__b) {
-  return vec_perm((vector unsigned char)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvrxl(int __a, const vector unsigned char *__b) {
-  return vec_perm((vector unsigned char)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_lvrxl(int __a, const vector bool char *__b) {
-  return vec_perm((vector bool char)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvrxl(int __a,
-                                                      const short *__b) {
-  return vec_perm((vector short)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvrxl(int __a,
-                                                      const vector short *__b) {
-  return vec_perm((vector short)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvrxl(int __a, const unsigned short *__b) {
-  return vec_perm((vector unsigned short)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvrxl(int __a, const vector unsigned short *__b) {
-  return vec_perm((vector unsigned short)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_lvrxl(int __a, const vector bool short *__b) {
-  return vec_perm((vector bool short)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_lvrxl(int __a,
-                                                      const vector pixel *__b) {
-  return vec_perm((vector pixel)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvrxl(int __a, const int *__b) {
-  return vec_perm((vector int)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvrxl(int __a,
-                                                    const vector int *__b) {
-  return vec_perm((vector int)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvrxl(int __a, const unsigned int *__b) {
-  return vec_perm((vector unsigned int)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvrxl(int __a, const vector unsigned int *__b) {
-  return vec_perm((vector unsigned int)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_lvrxl(int __a, const vector bool int *__b) {
-  return vec_perm((vector bool int)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvrxl(int __a,
-                                                      const float *__b) {
-  return vec_perm((vector float)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvrxl(int __a,
-                                                      const vector float *__b) {
-  return vec_perm((vector float)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-/* vec_stvlx */
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector signed char __a, int __b,
-                                              signed char *__c) {
-  return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector signed char __a, int __b,
-                                              vector signed char *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned char __a, int __b,
-                                              unsigned char *__c) {
-  return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned char __a, int __b,
-                                              vector unsigned char *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector bool char __a, int __b,
-                                              vector bool char *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector short __a, int __b,
-                                              short *__c) {
-  return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector short __a, int __b,
-                                              vector short *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned short __a,
-                                              int __b, unsigned short *__c) {
-  return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned short __a,
-                                              int __b,
-                                              vector unsigned short *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector bool short __a, int __b,
-                                              vector bool short *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector pixel __a, int __b,
-                                              vector pixel *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector int __a, int __b,
-                                              int *__c) {
-  return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector int __a, int __b,
-                                              vector int *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned int __a, int __b,
-                                              unsigned int *__c) {
-  return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned int __a, int __b,
-                                              vector unsigned int *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector bool int __a, int __b,
-                                              vector bool int *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector float __a, int __b,
-                                              vector float *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-/* vec_stvlxl */
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector signed char __a, int __b,
-                                               signed char *__c) {
-  return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector signed char __a, int __b,
-                                               vector signed char *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned char __a,
-                                               int __b, unsigned char *__c) {
-  return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned char __a,
-                                               int __b,
-                                               vector unsigned char *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector bool char __a, int __b,
-                                               vector bool char *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector short __a, int __b,
-                                               short *__c) {
-  return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector short __a, int __b,
-                                               vector short *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned short __a,
-                                               int __b, unsigned short *__c) {
-  return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned short __a,
-                                               int __b,
-                                               vector unsigned short *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector bool short __a, int __b,
-                                               vector bool short *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector pixel __a, int __b,
-                                               vector pixel *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector int __a, int __b,
-                                               int *__c) {
-  return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector int __a, int __b,
-                                               vector int *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned int __a, int __b,
-                                               unsigned int *__c) {
-  return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned int __a, int __b,
-                                               vector unsigned int *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector bool int __a, int __b,
-                                               vector bool int *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector float __a, int __b,
-                                               vector float *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-/* vec_stvrx */
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector signed char __a, int __b,
-                                              signed char *__c) {
-  return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector signed char __a, int __b,
-                                              vector signed char *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned char __a, int __b,
-                                              unsigned char *__c) {
-  return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned char __a, int __b,
-                                              vector unsigned char *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector bool char __a, int __b,
-                                              vector bool char *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector short __a, int __b,
-                                              short *__c) {
-  return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector short __a, int __b,
-                                              vector short *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned short __a,
-                                              int __b, unsigned short *__c) {
-  return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned short __a,
-                                              int __b,
-                                              vector unsigned short *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector bool short __a, int __b,
-                                              vector bool short *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector pixel __a, int __b,
-                                              vector pixel *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector int __a, int __b,
-                                              int *__c) {
-  return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector int __a, int __b,
-                                              vector int *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned int __a, int __b,
-                                              unsigned int *__c) {
-  return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned int __a, int __b,
-                                              vector unsigned int *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector bool int __a, int __b,
-                                              vector bool int *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector float __a, int __b,
-                                              vector float *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-/* vec_stvrxl */
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector signed char __a, int __b,
-                                               signed char *__c) {
-  return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector signed char __a, int __b,
-                                               vector signed char *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned char __a,
-                                               int __b, unsigned char *__c) {
-  return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned char __a,
-                                               int __b,
-                                               vector unsigned char *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector bool char __a, int __b,
-                                               vector bool char *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector short __a, int __b,
-                                               short *__c) {
-  return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector short __a, int __b,
-                                               vector short *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned short __a,
-                                               int __b, unsigned short *__c) {
-  return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned short __a,
-                                               int __b,
-                                               vector unsigned short *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector bool short __a, int __b,
-                                               vector bool short *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector pixel __a, int __b,
-                                               vector pixel *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector int __a, int __b,
-                                               int *__c) {
-  return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector int __a, int __b,
-                                               vector int *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned int __a, int __b,
-                                               unsigned int *__c) {
-  return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned int __a, int __b,
-                                               vector unsigned int *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector bool int __a, int __b,
-                                               vector bool int *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector float __a, int __b,
-                                               vector float *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-/* vec_promote */
-
-static __inline__ vector signed char __ATTRS_o_ai vec_promote(signed char __a,
-                                                              int __b) {
-  vector signed char __res = (vector signed char)(0);
-  __res[__b] = __a;
-  return __res;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_promote(unsigned char __a, int __b) {
-  vector unsigned char __res = (vector unsigned char)(0);
-  __res[__b] = __a;
-  return __res;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_promote(short __a, int __b) {
-  vector short __res = (vector short)(0);
-  __res[__b] = __a;
-  return __res;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_promote(unsigned short __a, int __b) {
-  vector unsigned short __res = (vector unsigned short)(0);
-  __res[__b] = __a;
-  return __res;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_promote(int __a, int __b) {
-  vector int __res = (vector int)(0);
-  __res[__b] = __a;
-  return __res;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_promote(unsigned int __a,
-                                                               int __b) {
-  vector unsigned int __res = (vector unsigned int)(0);
-  __res[__b] = __a;
-  return __res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_promote(float __a, int __b) {
-  vector float __res = (vector float)(0);
-  __res[__b] = __a;
-  return __res;
-}
-
-/* vec_splats */
-
-static __inline__ vector signed char __ATTRS_o_ai vec_splats(signed char __a) {
-  return (vector signed char)(__a);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_splats(unsigned char __a) {
-  return (vector unsigned char)(__a);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_splats(short __a) {
-  return (vector short)(__a);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_splats(unsigned short __a) {
-  return (vector unsigned short)(__a);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_splats(int __a) {
-  return (vector int)(__a);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_splats(unsigned int __a) {
-  return (vector unsigned int)(__a);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_splats(signed long long __a) {
-  return (vector signed long long)(__a);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_splats(unsigned long long __a) {
-  return (vector unsigned long long)(__a);
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_splats(signed __int128 __a) {
-  return (vector signed __int128)(__a);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_splats(unsigned __int128 __a) {
-  return (vector unsigned __int128)(__a);
-}
-
-#endif
-
-static __inline__ vector double __ATTRS_o_ai vec_splats(double __a) {
-  return (vector double)(__a);
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_splats(float __a) {
-  return (vector float)(__a);
-}
-
-/* ----------------------------- predicates --------------------------------- */
-
-/* vec_all_eq */
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector pixel __a,
-                                              vector pixel __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
-                                      (vector int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT, __a, (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
-                                      (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
-                                      (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
-                                              vector long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
-                                      (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
-                                      (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
-                                      (vector long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpeqsp_p(__CR6_LT, __a, __b);
-#else
-  return __builtin_altivec_vcmpeqfp_p(__CR6_LT, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_LT, __a, __b);
-}
-#endif
-
-/* vec_all_ge */
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, (vector signed char)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __b, (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, (vector short)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, (vector int)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __b, (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __b, __a);
-}
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, (vector signed long long)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgesp_p(__CR6_LT, __a, __b);
-#else
-  return __builtin_altivec_vcmpgefp_p(__CR6_LT, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgedp_p(__CR6_LT, __a, __b);
-}
-#endif
-
-/* vec_all_gt */
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __a, (vector signed char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, __a, (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __a, (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __a, __b);
-}
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, __a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgtsp_p(__CR6_LT, __a, __b);
-#else
-  return __builtin_altivec_vcmpgtfp_p(__CR6_LT, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgtdp_p(__CR6_LT, __a, __b);
-}
-#endif
-
-/* vec_all_in */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_all_in(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpbfp_p(__CR6_EQ, __a, __b);
-}
-
-/* vec_all_le */
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __a, (vector signed char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __a, (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __a, (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_all_le(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgesp_p(__CR6_LT, __b, __a);
-#else
-  return __builtin_altivec_vcmpgefp_p(__CR6_LT, __b, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_le(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgedp_p(__CR6_LT, __b, __a);
-}
-#endif
-
-/* vec_all_lt */
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT, (vector signed char)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, __b, (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT, (vector short)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT, (vector int)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __b, (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT, (vector signed long long)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, __b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgtsp_p(__CR6_LT, __b, __a);
-#else
-  return __builtin_altivec_vcmpgtfp_p(__CR6_LT, __b, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgtdp_p(__CR6_LT, __b, __a);
-}
-#endif
-
-/* vec_all_nan */
-
-static __inline__ int __ATTRS_o_ai vec_all_nan(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ, __a, __a);
-#else
-  return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_nan(vector double __a) {
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __a);
-}
-#endif
-
-/* vec_all_ne */
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector pixel __a,
-                                              vector pixel __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
-                                      (vector int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector long long)__a,
-                                      (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
-                                      (vector signed long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ, __a, __b);
-#else
-  return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __b);
-}
-#endif
-
-/* vec_all_nge */
-
-static __inline__ int __ATTRS_o_ai vec_all_nge(vector float __a,
-                                               vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgesp_p(__CR6_EQ, __a, __b);
-#else
-  return __builtin_altivec_vcmpgefp_p(__CR6_EQ, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_nge(vector double __a,
-                                               vector double __b) {
-  return __builtin_vsx_xvcmpgedp_p(__CR6_EQ, __a, __b);
-}
-#endif
-
-/* vec_all_ngt */
-
-static __inline__ int __ATTRS_o_ai vec_all_ngt(vector float __a,
-                                               vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ, __a, __b);
-#else
-  return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_ngt(vector double __a,
-                                               vector double __b) {
-  return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ, __a, __b);
-}
-#endif
-
-/* vec_all_nle */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_all_nle(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpgefp_p(__CR6_EQ, __b, __a);
-}
-
-/* vec_all_nlt */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_all_nlt(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, __b, __a);
-}
-
-/* vec_all_numeric */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_all_numeric(vector float __a) {
-  return __builtin_altivec_vcmpeqfp_p(__CR6_LT, __a, __a);
-}
-
-/* vec_any_eq */
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector pixel __a,
-                                              vector pixel __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, (vector long long)__a,
-                                      (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ_REV, __a, __b);
-#else
-  return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ_REV, __a, __b);
-}
-#endif
-
-/* vec_any_ge */
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, (vector signed char)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, (vector short)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, (vector int)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __b,
-                                      (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV,
-                                      (vector signed long long)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
-                                      (vector unsigned long long)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
-                                      (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
-                                      (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgesp_p(__CR6_EQ_REV, __a, __b);
-#else
-  return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgedp_p(__CR6_EQ_REV, __a, __b);
-}
-#endif
-
-/* vec_any_gt */
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __a,
-                                      (vector signed char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __a,
-                                      (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
-                                      (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
-                                      (vector unsigned long long)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
-                                      (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ_REV, __a, __b);
-#else
-  return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ_REV, __a, __b);
-}
-#endif
-
-/* vec_any_le */
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __a,
-                                      (vector signed char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __a,
-                                      (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_any_le(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
-                                      (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
-                                      (vector unsigned long long)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
-                                      (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgesp_p(__CR6_EQ_REV, __b, __a);
-#else
-  return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, __b, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_le(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgedp_p(__CR6_EQ_REV, __b, __a);
-}
-#endif
-
-/* vec_any_lt */
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, (vector signed char)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, (vector short)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, (vector int)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __b,
-                                      (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV,
-                                      (vector signed long long)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
-                                      (vector unsigned long long)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
-                                      (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
-                                      (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ_REV, __b, __a);
-#else
-  return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, __b, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ_REV, __b, __a);
-}
-#endif
-
-/* vec_any_nan */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_any_nan(vector float __a) {
-  return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, __a, __a);
-}
-
-/* vec_any_ne */
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector pixel __a,
-                                              vector pixel __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, (vector long long)__a,
-                                      (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpeqsp_p(__CR6_LT_REV, __a, __b);
-#else
-  return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_LT_REV, __a, __b);
-}
-#endif
-
-/* vec_any_nge */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_any_nge(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpgefp_p(__CR6_LT_REV, __a, __b);
-}
-
-/* vec_any_ngt */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_any_ngt(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, __a, __b);
-}
-
-/* vec_any_nle */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_any_nle(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpgefp_p(__CR6_LT_REV, __b, __a);
-}
-
-/* vec_any_nlt */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_any_nlt(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, __b, __a);
-}
-
-/* vec_any_numeric */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_any_numeric(vector float __a) {
-  return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, __a, __a);
-}
-
-/* vec_any_out */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_any_out(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpbfp_p(__CR6_EQ_REV, __a, __b);
-}
-
-/* Power 8 Crypto functions
-Note: We diverge from the current GCC implementation with regard
-to cryptography and related functions as follows:
-- Only the SHA and AES instructions and builtins are disabled by -mno-crypto
-- The remaining ones are only available on Power8 and up so
-  require -mpower8-vector
-The justification for this is that export requirements require that
-Category:Vector.Crypto is optional (i.e. compliant hardware may not provide
-support). As a result, we need to be able to turn off support for those.
-The remaining ones (currently controlled by -mcrypto for GCC) still
-need to be provided on compliant hardware even if Vector.Crypto is not
-provided.
-*/
-#ifdef __CRYPTO__
-#define vec_sbox_be __builtin_altivec_crypto_vsbox
-#define vec_cipher_be __builtin_altivec_crypto_vcipher
-#define vec_cipherlast_be __builtin_altivec_crypto_vcipherlast
-#define vec_ncipher_be __builtin_altivec_crypto_vncipher
-#define vec_ncipherlast_be __builtin_altivec_crypto_vncipherlast
-
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-__builtin_crypto_vsbox(vector unsigned long long __a) {
-  return __builtin_altivec_crypto_vsbox(__a);
-}
-
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-__builtin_crypto_vcipher(vector unsigned long long __a,
-                         vector unsigned long long __b) {
-  return __builtin_altivec_crypto_vcipher(__a, __b);
-}
-
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-__builtin_crypto_vcipherlast(vector unsigned long long __a,
-                             vector unsigned long long __b) {
-  return __builtin_altivec_crypto_vcipherlast(__a, __b);
-}
-
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-__builtin_crypto_vncipher(vector unsigned long long __a,
-                          vector unsigned long long __b) {
-  return __builtin_altivec_crypto_vncipher(__a, __b);
-}
-
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-__builtin_crypto_vncipherlast(vector unsigned long long __a,
-                              vector unsigned long long __b) {
-  return __builtin_altivec_crypto_vncipherlast(__a, __b);
-}
-
-#define __builtin_crypto_vshasigmad __builtin_altivec_crypto_vshasigmad
-#define __builtin_crypto_vshasigmaw __builtin_altivec_crypto_vshasigmaw
-
-#define vec_shasigma_be(X, Y, Z)                                               \
-  _Generic((X), vector unsigned int                                            \
-           : __builtin_crypto_vshasigmaw, vector unsigned long long            \
-           : __builtin_crypto_vshasigmad)((X), (Y), (Z))
-#endif
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector bool char __ATTRS_o_ai
-vec_permxor(vector bool char __a, vector bool char __b,
-            vector bool char __c) {
-  return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_permxor(vector signed char __a, vector signed char __b,
-            vector signed char __c) {
-  return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_permxor(vector unsigned char __a, vector unsigned char __b,
-            vector unsigned char __c) {
-  return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-__builtin_crypto_vpermxor(vector unsigned char __a, vector unsigned char __b,
-                          vector unsigned char __c) {
-  return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-__builtin_crypto_vpermxor(vector unsigned short __a, vector unsigned short __b,
-                          vector unsigned short __c) {
-  return (vector unsigned short)__builtin_altivec_crypto_vpermxor(
-      (vector unsigned char)__a, (vector unsigned char)__b,
-      (vector unsigned char)__c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai __builtin_crypto_vpermxor(
-    vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) {
-  return (vector unsigned int)__builtin_altivec_crypto_vpermxor(
-      (vector unsigned char)__a, (vector unsigned char)__b,
-      (vector unsigned char)__c);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-__builtin_crypto_vpermxor(vector unsigned long long __a,
-                          vector unsigned long long __b,
-                          vector unsigned long long __c) {
-  return (vector unsigned long long)__builtin_altivec_crypto_vpermxor(
-      (vector unsigned char)__a, (vector unsigned char)__b,
-      (vector unsigned char)__c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-__builtin_crypto_vpmsumb(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_crypto_vpmsumb(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-__builtin_crypto_vpmsumb(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_crypto_vpmsumh(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-__builtin_crypto_vpmsumb(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_crypto_vpmsumw(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-__builtin_crypto_vpmsumb(vector unsigned long long __a,
-                         vector unsigned long long __b) {
-  return __builtin_altivec_crypto_vpmsumd(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vgbbd(vector signed char __a) {
-  return __builtin_altivec_vgbbd((vector unsigned char)__a);
-}
-
-#define vec_pmsum_be __builtin_crypto_vpmsumb
-#define vec_gb __builtin_altivec_vgbbd
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vgbbd(vector unsigned char __a) {
-  return __builtin_altivec_vgbbd(__a);
-}
-
-static __inline__ vector long long __ATTRS_o_ai
-vec_vbpermq(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vbpermq((vector unsigned char)__a,
-                                   (vector unsigned char)__b);
-}
-
-static __inline__ vector long long __ATTRS_o_ai
-vec_vbpermq(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vbpermq(__a, __b);
-}
-
-#ifdef __powerpc64__
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-vec_bperm(vector unsigned __int128 __a, vector unsigned char __b) {
-  return __builtin_altivec_vbpermq((vector unsigned char)__a,
-                                   (vector unsigned char)__b);
-}
-#endif
-#endif
-
-
-/* vec_reve */
-
-static inline __ATTRS_o_ai vector bool char vec_reve(vector bool char __a) {
-  return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
-                                 5, 4, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector signed char vec_reve(vector signed char __a) {
-  return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
-                                 5, 4, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_reve(vector unsigned char __a) {
-  return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
-                                 5, 4, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector bool int vec_reve(vector bool int __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector signed int vec_reve(vector signed int __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_reve(vector unsigned int __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector bool short vec_reve(vector bool short __a) {
-  return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_reve(vector signed short __a) {
-  return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_reve(vector unsigned short __a) {
-  return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector float vec_reve(vector float __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
-}
-
-#ifdef __VSX__
-static inline __ATTRS_o_ai vector bool long long
-vec_reve(vector bool long long __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_reve(vector signed long long __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_reve(vector unsigned long long __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector double vec_reve(vector double __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0);
-}
-#endif
-
-/* vec_revb */
-static __inline__ vector bool char __ATTRS_o_ai
-vec_revb(vector bool char __a) {
-  return __a;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_revb(vector signed char __a) {
-  return __a;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_revb(vector unsigned char __a) {
-  return __a;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_revb(vector bool short __a) {
-  vector unsigned char __indices =
-      { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_revb(vector signed short __a) {
-  vector unsigned char __indices =
-      { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_revb(vector unsigned short __a) {
-  vector unsigned char __indices =
-     { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_revb(vector bool int __a) {
-  vector unsigned char __indices =
-      { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_revb(vector signed int __a) {
-  vector unsigned char __indices =
-      { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_revb(vector unsigned int __a) {
-  vector unsigned char __indices =
-      { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_revb(vector float __a) {
- vector unsigned char __indices =
-      { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 };
- return vec_perm(__a, __a, __indices);
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_revb(vector bool long long __a) {
-  vector unsigned char __indices =
-      { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_revb(vector signed long long __a) {
-  vector unsigned char __indices =
-      { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_revb(vector unsigned long long __a) {
-  vector unsigned char __indices =
-      { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_revb(vector double __a) {
-  vector unsigned char __indices =
-      { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 };
-  return vec_perm(__a, __a, __indices);
-}
-#endif /* End __VSX__ */
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_revb(vector signed __int128 __a) {
-  vector unsigned char __indices =
-      { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 };
-  return (vector signed __int128)vec_perm((vector signed int)__a,
-                                          (vector signed int)__a,
-                                           __indices);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_revb(vector unsigned __int128 __a) {
-  vector unsigned char __indices =
-      { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 };
-  return (vector unsigned __int128)vec_perm((vector signed int)__a,
-                                            (vector signed int)__a,
-                                             __indices);
-}
-#endif /* END __POWER8_VECTOR__ && __powerpc64__ */
-
-/* vec_xl */
-
-typedef vector signed char unaligned_vec_schar __attribute__((aligned(1)));
-typedef vector unsigned char unaligned_vec_uchar __attribute__((aligned(1)));
-typedef vector signed short unaligned_vec_sshort __attribute__((aligned(1)));
-typedef vector unsigned short unaligned_vec_ushort __attribute__((aligned(1)));
-typedef vector signed int unaligned_vec_sint __attribute__((aligned(1)));
-typedef vector unsigned int unaligned_vec_uint __attribute__((aligned(1)));
-typedef vector float unaligned_vec_float __attribute__((aligned(1)));
-
-static inline __ATTRS_o_ai vector signed char vec_xl(signed long long __offset,
-                                                     signed char *__ptr) {
-  return *(unaligned_vec_schar *)(__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_xl(signed long long __offset, unsigned char *__ptr) {
-  return *(unaligned_vec_uchar*)(__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector signed short vec_xl(signed long long __offset,
-                                                      signed short *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_sshort *)__addr;
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_xl(signed long long __offset, unsigned short *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_ushort *)__addr;
-}
-
-static inline __ATTRS_o_ai vector signed int vec_xl(signed long long __offset,
-                                                    signed int *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_sint *)__addr;
-}
-
-static inline __ATTRS_o_ai vector unsigned int vec_xl(signed long long __offset,
-                                                      unsigned int *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_uint *)__addr;
-}
-
-static inline __ATTRS_o_ai vector float vec_xl(signed long long __offset,
-                                               float *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_float *)__addr;
-}
-
-#ifdef __VSX__
-typedef vector signed long long unaligned_vec_sll __attribute__((aligned(1)));
-typedef vector unsigned long long unaligned_vec_ull __attribute__((aligned(1)));
-typedef vector double unaligned_vec_double __attribute__((aligned(1)));
-
-static inline __ATTRS_o_ai vector signed long long
-vec_xl(signed long long __offset, signed long long *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_sll *)__addr;
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_xl(signed long long __offset, unsigned long long *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_ull *)__addr;
-}
-
-static inline __ATTRS_o_ai vector double vec_xl(signed long long __offset,
-                                                double *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_double *)__addr;
-}
-#endif
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-typedef vector signed __int128 unaligned_vec_si128 __attribute__((aligned(1)));
-typedef vector unsigned __int128 unaligned_vec_ui128
-    __attribute__((aligned(1)));
-static inline __ATTRS_o_ai vector signed __int128
-vec_xl(signed long long __offset, signed __int128 *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_si128 *)__addr;
-}
-
-static inline __ATTRS_o_ai vector unsigned __int128
-vec_xl(signed long long __offset, unsigned __int128 *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_ui128 *)__addr;
-}
-#endif
-
-/* vec_xl_be */
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector signed char __ATTRS_o_ai
-vec_xl_be(signed long long __offset, signed char *__ptr) {
-  vector signed char __vec = (vector signed char)__builtin_vsx_lxvd2x_be(__offset, __ptr);
-  return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
-                                 13, 12, 11, 10, 9, 8);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xl_be(signed long long __offset, unsigned char *__ptr) {
-  vector unsigned char __vec = (vector unsigned char)__builtin_vsx_lxvd2x_be(__offset, __ptr);
-  return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
-                                 13, 12, 11, 10, 9, 8);
-}
-
-static __inline__ vector signed short  __ATTRS_o_ai
-vec_xl_be(signed long long __offset, signed short *__ptr) {
-  vector signed short __vec = (vector signed short)__builtin_vsx_lxvd2x_be(__offset, __ptr);
-  return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xl_be(signed long long __offset, unsigned short *__ptr) {
-  vector unsigned short __vec = (vector unsigned short)__builtin_vsx_lxvd2x_be(__offset, __ptr);
-  return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, signed int *__ptr) {
-  return (vector signed int)__builtin_vsx_lxvw4x_be(__offset, __ptr);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, unsigned int *__ptr) {
-  return (vector unsigned int)__builtin_vsx_lxvw4x_be(__offset, __ptr);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, float *__ptr) {
-  return (vector float)__builtin_vsx_lxvw4x_be(__offset, __ptr);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, signed long long *__ptr) {
-  return (vector signed long long)__builtin_vsx_lxvd2x_be(__offset, __ptr);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, unsigned long long *__ptr) {
-  return (vector unsigned long long)__builtin_vsx_lxvd2x_be(__offset, __ptr);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, double *__ptr) {
-  return (vector double)__builtin_vsx_lxvd2x_be(__offset, __ptr);
-}
-#endif
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, signed __int128 *__ptr) {
-  return vec_xl(__offset, __ptr);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, unsigned __int128 *__ptr) {
-  return vec_xl(__offset, __ptr);
-}
-#endif
-#else
-  #define vec_xl_be vec_xl
-#endif
-
-/* vec_xst */
-
-static inline __ATTRS_o_ai void vec_xst(vector signed char __vec,
-                                        signed long long __offset,
-                                        signed char *__ptr) {
-  *(unaligned_vec_schar *)(__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector unsigned char __vec,
-                                        signed long long __offset,
-                                        unsigned char *__ptr) {
-  *(unaligned_vec_uchar *)(__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector signed short __vec,
-                                        signed long long __offset,
-                                        signed short *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_sshort *)__addr = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector unsigned short __vec,
-                                        signed long long __offset,
-                                        unsigned short *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_ushort *)__addr = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector signed int __vec,
-                                        signed long long __offset,
-                                        signed int *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_sint *)__addr = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector unsigned int __vec,
-                                        signed long long __offset,
-                                        unsigned int *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_uint *)__addr = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector float __vec,
-                                        signed long long __offset,
-                                        float *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_float *)__addr = __vec;
-}
-
-#ifdef __VSX__
-static inline __ATTRS_o_ai void vec_xst(vector signed long long __vec,
-                                        signed long long __offset,
-                                        signed long long *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_sll *)__addr = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector unsigned long long __vec,
-                                        signed long long __offset,
-                                        unsigned long long *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_ull *)__addr = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector double __vec,
-                                        signed long long __offset,
-                                        double *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_double *)__addr = __vec;
-}
-#endif
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static inline __ATTRS_o_ai void vec_xst(vector signed __int128 __vec,
-                                        signed long long __offset,
-                                        signed __int128 *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_si128 *)__addr = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector unsigned __int128 __vec,
-                                        signed long long __offset,
-                                        unsigned __int128 *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_ui128 *)__addr = __vec;
-}
-#endif
-
-/* vec_xst_be */
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed char __vec,
-                                               signed long long  __offset,
-                                               signed char *__ptr) {
-  vector signed char __tmp =
-     __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
-                             13, 12, 11, 10, 9, 8);
-  typedef __attribute__((vector_size(sizeof(__tmp)))) double __vector_double;
-  __builtin_vsx_stxvd2x_be((__vector_double)__tmp, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned char __vec,
-                                               signed long long  __offset,
-                                               unsigned char *__ptr) {
-  vector unsigned char __tmp =
-     __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
-                             13, 12, 11, 10, 9, 8);
-  typedef __attribute__((vector_size(sizeof(__tmp)))) double __vector_double;
-  __builtin_vsx_stxvd2x_be((__vector_double)__tmp, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed short __vec,
-                                               signed long long  __offset,
-                                               signed short *__ptr) {
-  vector signed short __tmp =
-     __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
-  typedef __attribute__((vector_size(sizeof(__tmp)))) double __vector_double;
-  __builtin_vsx_stxvd2x_be((__vector_double)__tmp, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned short __vec,
-                                               signed long long  __offset,
-                                               unsigned short *__ptr) {
-  vector unsigned short __tmp =
-     __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
-  typedef __attribute__((vector_size(sizeof(__tmp)))) double __vector_double;
-  __builtin_vsx_stxvd2x_be((__vector_double)__tmp, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed int __vec,
-                                               signed long long  __offset,
-                                               signed int *__ptr) {
-  __builtin_vsx_stxvw4x_be(__vec, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned int __vec,
-                                               signed long long  __offset,
-                                               unsigned int *__ptr) {
-  __builtin_vsx_stxvw4x_be((vector int)__vec, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector float __vec,
-                                               signed long long  __offset,
-                                               float *__ptr) {
-  __builtin_vsx_stxvw4x_be((vector int)__vec, __offset, __ptr);
-}
-
-#ifdef __VSX__
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed long long __vec,
-                                               signed long long  __offset,
-                                               signed long long *__ptr) {
-  __builtin_vsx_stxvd2x_be((vector double)__vec, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned long long __vec,
-                                               signed long long  __offset,
-                                               unsigned long long *__ptr) {
-  __builtin_vsx_stxvd2x_be((vector double)__vec, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector double __vec,
-                                               signed long long  __offset,
-                                               double *__ptr) {
-  __builtin_vsx_stxvd2x_be((vector double)__vec, __offset, __ptr);
-}
-#endif
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed __int128 __vec,
-                                               signed long long  __offset,
-                                               signed __int128 *__ptr) {
-  vec_xst(__vec, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned __int128 __vec,
-                                               signed long long  __offset,
-                                               unsigned __int128 *__ptr) {
-  vec_xst(__vec, __offset, __ptr);
-}
-#endif
-#else
-  #define vec_xst_be vec_xst
-#endif
-
-#ifdef __POWER9_VECTOR__
-#define vec_test_data_class(__a, __b)                                          \
-  _Generic(                                                                    \
-      (__a), vector float                                                      \
-      : (vector bool int)__builtin_vsx_xvtstdcsp((vector float)(__a), (__b)),  \
-        vector double                                                          \
-      : (vector bool long long)__builtin_vsx_xvtstdcdp((vector double)(__a),   \
-                                                       (__b)))
-
-#endif /* #ifdef __POWER9_VECTOR__ */
-
-static vector float __ATTRS_o_ai vec_neg(vector float __a) {
-  return -__a;
-}
-
-#ifdef __VSX__
-static vector double __ATTRS_o_ai vec_neg(vector double __a) {
-  return -__a;
-}
-
-#endif
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static vector long long __ATTRS_o_ai vec_neg(vector long long __a) {
-  return -__a;
-}
-#endif
-
-static vector signed int __ATTRS_o_ai vec_neg(vector signed int __a) {
-  return -__a;
-}
-
-static vector signed short __ATTRS_o_ai vec_neg(vector signed short __a) {
-  return -__a;
-}
-
-static vector signed char __ATTRS_o_ai vec_neg(vector signed char __a) {
-  return -__a;
-}
-
-static vector float __ATTRS_o_ai vec_nabs(vector float __a) {
-  return - vec_abs(__a);
-}
-
-#ifdef __VSX__
-static vector double __ATTRS_o_ai vec_nabs(vector double __a) {
-  return - vec_abs(__a);
-}
-
-#endif
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static vector long long __ATTRS_o_ai vec_nabs(vector long long __a) {
-  return __builtin_altivec_vminsd(__a, -__a);
-}
-#endif
-
-static vector signed int __ATTRS_o_ai vec_nabs(vector signed int __a) {
-  return __builtin_altivec_vminsw(__a, -__a);
-}
-
-static vector signed short __ATTRS_o_ai vec_nabs(vector signed short __a) {
-  return __builtin_altivec_vminsh(__a, -__a);
-}
-
-static vector signed char __ATTRS_o_ai vec_nabs(vector signed char __a) {
-  return __builtin_altivec_vminsb(__a, -__a);
-}
-#undef __ATTRS_o_ai
-
-#endif /* __ALTIVEC_H */
diff --git a/linux-x86/lib64/clang/11.0.1/include/arm_acle.h b/linux-x86/lib64/clang/11.0.1/include/arm_acle.h
deleted file mode 100644
index 596ea03..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/arm_acle.h
+++ /dev/null
@@ -1,678 +0,0 @@
-/*===---- arm_acle.h - ARM Non-Neon intrinsics -----------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __ARM_ACLE_H
-#define __ARM_ACLE_H
-
-#ifndef __ARM_ACLE
-#error "ACLE intrinsics support not enabled."
-#endif
-
-#include <stdint.h>
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/* 8 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */
-/* 8.3 Memory barriers */
-#if !defined(_MSC_VER)
-#define __dmb(i) __builtin_arm_dmb(i)
-#define __dsb(i) __builtin_arm_dsb(i)
-#define __isb(i) __builtin_arm_isb(i)
-#endif
-
-/* 8.4 Hints */
-
-#if !defined(_MSC_VER)
-static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfi(void) {
-  __builtin_arm_wfi();
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfe(void) {
-  __builtin_arm_wfe();
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sev(void) {
-  __builtin_arm_sev();
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sevl(void) {
-  __builtin_arm_sevl();
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(void) {
-  __builtin_arm_yield();
-}
-#endif
-
-#if __ARM_32BIT_STATE
-#define __dbg(t) __builtin_arm_dbg(t)
-#endif
-
-/* 8.5 Swap */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__swp(uint32_t __x, volatile uint32_t *__p) {
-  uint32_t v;
-  do
-    v = __builtin_arm_ldrex(__p);
-  while (__builtin_arm_strex(__x, __p));
-  return v;
-}
-
-/* 8.6 Memory prefetch intrinsics */
-/* 8.6.1 Data prefetch */
-#define __pld(addr) __pldx(0, 0, 0, addr)
-
-#if __ARM_32BIT_STATE
-#define __pldx(access_kind, cache_level, retention_policy, addr) \
-  __builtin_arm_prefetch(addr, access_kind, 1)
-#else
-#define __pldx(access_kind, cache_level, retention_policy, addr) \
-  __builtin_arm_prefetch(addr, access_kind, cache_level, retention_policy, 1)
-#endif
-
-/* 8.6.2 Instruction prefetch */
-#define __pli(addr) __plix(0, 0, addr)
-
-#if __ARM_32BIT_STATE
-#define __plix(cache_level, retention_policy, addr) \
-  __builtin_arm_prefetch(addr, 0, 0)
-#else
-#define __plix(cache_level, retention_policy, addr) \
-  __builtin_arm_prefetch(addr, 0, cache_level, retention_policy, 0)
-#endif
-
-/* 8.7 NOP */
-#if !defined(_MSC_VER) || !defined(__aarch64__)
-static __inline__ void __attribute__((__always_inline__, __nodebug__)) __nop(void) {
-  __builtin_arm_nop();
-}
-#endif
-
-/* 9 DATA-PROCESSING INTRINSICS */
-/* 9.2 Miscellaneous data-processing intrinsics */
-/* ROR */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__ror(uint32_t __x, uint32_t __y) {
-  __y %= 32;
-  if (__y == 0)
-    return __x;
-  return (__x >> __y) | (__x << (32 - __y));
-}
-
-static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
-__rorll(uint64_t __x, uint32_t __y) {
-  __y %= 64;
-  if (__y == 0)
-    return __x;
-  return (__x >> __y) | (__x << (64 - __y));
-}
-
-static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
-__rorl(unsigned long __x, uint32_t __y) {
-#if __SIZEOF_LONG__ == 4
-  return __ror(__x, __y);
-#else
-  return __rorll(__x, __y);
-#endif
-}
-
-
-/* CLZ */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__clz(uint32_t __t) {
-  return __builtin_clz(__t);
-}
-
-static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
-__clzl(unsigned long __t) {
-  return __builtin_clzl(__t);
-}
-
-static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
-__clzll(uint64_t __t) {
-  return __builtin_clzll(__t);
-}
-
-/* CLS */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__cls(uint32_t __t) {
-  return __builtin_arm_cls(__t);
-}
-
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__clsl(unsigned long __t) {
-#if __SIZEOF_LONG__ == 4
-  return __builtin_arm_cls(__t);
-#else
-  return __builtin_arm_cls64(__t);
-#endif
-}
-
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__clsll(uint64_t __t) {
-  return __builtin_arm_cls64(__t);
-}
-
-/* REV */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__rev(uint32_t __t) {
-  return __builtin_bswap32(__t);
-}
-
-static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
-__revl(unsigned long __t) {
-#if __SIZEOF_LONG__ == 4
-  return __builtin_bswap32(__t);
-#else
-  return __builtin_bswap64(__t);
-#endif
-}
-
-static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
-__revll(uint64_t __t) {
-  return __builtin_bswap64(__t);
-}
-
-/* REV16 */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__rev16(uint32_t __t) {
-  return __ror(__rev(__t), 16);
-}
-
-static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
-__rev16ll(uint64_t __t) {
-  return (((uint64_t)__rev16(__t >> 32)) << 32) | __rev16(__t);
-}
-
-static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
-__rev16l(unsigned long __t) {
-#if __SIZEOF_LONG__ == 4
-    return __rev16(__t);
-#else
-    return __rev16ll(__t);
-#endif
-}
-
-/* REVSH */
-static __inline__ int16_t __attribute__((__always_inline__, __nodebug__))
-__revsh(int16_t __t) {
-  return __builtin_bswap16(__t);
-}
-
-/* RBIT */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__rbit(uint32_t __t) {
-  return __builtin_arm_rbit(__t);
-}
-
-static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
-__rbitll(uint64_t __t) {
-#if __ARM_32BIT_STATE
-  return (((uint64_t)__builtin_arm_rbit(__t)) << 32) |
-         __builtin_arm_rbit(__t >> 32);
-#else
-  return __builtin_arm_rbit64(__t);
-#endif
-}
-
-static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
-__rbitl(unsigned long __t) {
-#if __SIZEOF_LONG__ == 4
-  return __rbit(__t);
-#else
-  return __rbitll(__t);
-#endif
-}
-
-/*
- * 9.3 16-bit multiplications
- */
-#if __ARM_FEATURE_DSP
-static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
-__smulbb(int32_t __a, int32_t __b) {
-  return __builtin_arm_smulbb(__a, __b);
-}
-static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
-__smulbt(int32_t __a, int32_t __b) {
-  return __builtin_arm_smulbt(__a, __b);
-}
-static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
-__smultb(int32_t __a, int32_t __b) {
-  return __builtin_arm_smultb(__a, __b);
-}
-static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
-__smultt(int32_t __a, int32_t __b) {
-  return __builtin_arm_smultt(__a, __b);
-}
-static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
-__smulwb(int32_t __a, int32_t __b) {
-  return __builtin_arm_smulwb(__a, __b);
-}
-static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
-__smulwt(int32_t __a, int32_t __b) {
-  return __builtin_arm_smulwt(__a, __b);
-}
-#endif
-
-/*
- * 9.4 Saturating intrinsics
- *
- * FIXME: Change guard to their corrosponding __ARM_FEATURE flag when Q flag
- * intrinsics are implemented and the flag is enabled.
- */
-/* 9.4.1 Width-specified saturation intrinsics */
-#if __ARM_FEATURE_SAT
-#define __ssat(x, y) __builtin_arm_ssat(x, y)
-#define __usat(x, y) __builtin_arm_usat(x, y)
-#endif
-
-/* 9.4.2 Saturating addition and subtraction intrinsics */
-#if __ARM_FEATURE_DSP
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__qadd(int32_t __t, int32_t __v) {
-  return __builtin_arm_qadd(__t, __v);
-}
-
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__qsub(int32_t __t, int32_t __v) {
-  return __builtin_arm_qsub(__t, __v);
-}
-
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__qdbl(int32_t __t) {
-  return __builtin_arm_qadd(__t, __t);
-}
-#endif
-
-/* 9.4.3 Accumultating multiplications */
-#if __ARM_FEATURE_DSP
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlabb(int32_t __a, int32_t __b, int32_t __c) {
-  return __builtin_arm_smlabb(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlabt(int32_t __a, int32_t __b, int32_t __c) {
-  return __builtin_arm_smlabt(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlatb(int32_t __a, int32_t __b, int32_t __c) {
-  return __builtin_arm_smlatb(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlatt(int32_t __a, int32_t __b, int32_t __c) {
-  return __builtin_arm_smlatt(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlawb(int32_t __a, int32_t __b, int32_t __c) {
-  return __builtin_arm_smlawb(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlawt(int32_t __a, int32_t __b, int32_t __c) {
-  return __builtin_arm_smlawt(__a, __b, __c);
-}
-#endif
-
-
-/* 9.5.4 Parallel 16-bit saturation */
-#if __ARM_FEATURE_SIMD32
-#define __ssat16(x, y) __builtin_arm_ssat16(x, y)
-#define __usat16(x, y) __builtin_arm_usat16(x, y)
-#endif
-
-/* 9.5.5 Packing and unpacking */
-#if __ARM_FEATURE_SIMD32
-typedef int32_t int8x4_t;
-typedef int32_t int16x2_t;
-typedef uint32_t uint8x4_t;
-typedef uint32_t uint16x2_t;
-
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__sxtab16(int16x2_t __a, int8x4_t __b) {
-  return __builtin_arm_sxtab16(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__sxtb16(int8x4_t __a) {
-  return __builtin_arm_sxtb16(__a);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__uxtab16(int16x2_t __a, int8x4_t __b) {
-  return __builtin_arm_uxtab16(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__uxtb16(int8x4_t __a) {
-  return __builtin_arm_uxtb16(__a);
-}
-#endif
-
-/* 9.5.6 Parallel selection */
-#if __ARM_FEATURE_SIMD32
-static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
-__sel(uint8x4_t __a, uint8x4_t __b) {
-  return __builtin_arm_sel(__a, __b);
-}
-#endif
-
-/* 9.5.7 Parallel 8-bit addition and subtraction */
-#if __ARM_FEATURE_SIMD32
-static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
-__qadd8(int8x4_t __a, int8x4_t __b) {
-  return __builtin_arm_qadd8(__a, __b);
-}
-static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
-__qsub8(int8x4_t __a, int8x4_t __b) {
-  return __builtin_arm_qsub8(__a, __b);
-}
-static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
-__sadd8(int8x4_t __a, int8x4_t __b) {
-  return __builtin_arm_sadd8(__a, __b);
-}
-static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
-__shadd8(int8x4_t __a, int8x4_t __b) {
-  return __builtin_arm_shadd8(__a, __b);
-}
-static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
-__shsub8(int8x4_t __a, int8x4_t __b) {
-  return __builtin_arm_shsub8(__a, __b);
-}
-static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
-__ssub8(int8x4_t __a, int8x4_t __b) {
-  return __builtin_arm_ssub8(__a, __b);
-}
-static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
-__uadd8(uint8x4_t __a, uint8x4_t __b) {
-  return __builtin_arm_uadd8(__a, __b);
-}
-static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
-__uhadd8(uint8x4_t __a, uint8x4_t __b) {
-  return __builtin_arm_uhadd8(__a, __b);
-}
-static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
-__uhsub8(uint8x4_t __a, uint8x4_t __b) {
-  return __builtin_arm_uhsub8(__a, __b);
-}
-static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
-__uqadd8(uint8x4_t __a, uint8x4_t __b) {
-  return __builtin_arm_uqadd8(__a, __b);
-}
-static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
-__uqsub8(uint8x4_t __a, uint8x4_t __b) {
-  return __builtin_arm_uqsub8(__a, __b);
-}
-static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
-__usub8(uint8x4_t __a, uint8x4_t __b) {
-  return __builtin_arm_usub8(__a, __b);
-}
-#endif
-
-/* 9.5.8 Sum of 8-bit absolute differences */
-#if __ARM_FEATURE_SIMD32
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__usad8(uint8x4_t __a, uint8x4_t __b) {
-  return __builtin_arm_usad8(__a, __b);
-}
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__usada8(uint8x4_t __a, uint8x4_t __b, uint32_t __c) {
-  return __builtin_arm_usada8(__a, __b, __c);
-}
-#endif
-
-/* 9.5.9 Parallel 16-bit addition and subtraction */
-#if __ARM_FEATURE_SIMD32
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__qadd16(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_qadd16(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__qasx(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_qasx(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__qsax(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_qsax(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__qsub16(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_qsub16(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__sadd16(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_sadd16(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__sasx(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_sasx(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__shadd16(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_shadd16(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__shasx(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_shasx(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__shsax(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_shsax(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__shsub16(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_shsub16(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__ssax(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_ssax(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__ssub16(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_ssub16(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uadd16(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uadd16(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uasx(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uasx(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uhadd16(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uhadd16(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uhasx(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uhasx(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uhsax(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uhsax(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uhsub16(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uhsub16(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uqadd16(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uqadd16(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uqasx(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uqasx(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uqsax(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uqsax(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uqsub16(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uqsub16(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__usax(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_usax(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__usub16(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_usub16(__a, __b);
-}
-#endif
-
-/* 9.5.10 Parallel 16-bit multiplications */
-#if __ARM_FEATURE_SIMD32
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlad(int16x2_t __a, int16x2_t __b, int32_t __c) {
-  return __builtin_arm_smlad(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smladx(int16x2_t __a, int16x2_t __b, int32_t __c) {
-  return __builtin_arm_smladx(__a, __b, __c);
-}
-static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
-__smlald(int16x2_t __a, int16x2_t __b, int64_t __c) {
-  return __builtin_arm_smlald(__a, __b, __c);
-}
-static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
-__smlaldx(int16x2_t __a, int16x2_t __b, int64_t __c) {
-  return __builtin_arm_smlaldx(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlsd(int16x2_t __a, int16x2_t __b, int32_t __c) {
-  return __builtin_arm_smlsd(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlsdx(int16x2_t __a, int16x2_t __b, int32_t __c) {
-  return __builtin_arm_smlsdx(__a, __b, __c);
-}
-static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
-__smlsld(int16x2_t __a, int16x2_t __b, int64_t __c) {
-  return __builtin_arm_smlsld(__a, __b, __c);
-}
-static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
-__smlsldx(int16x2_t __a, int16x2_t __b, int64_t __c) {
-  return __builtin_arm_smlsldx(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smuad(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_smuad(__a, __b);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smuadx(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_smuadx(__a, __b);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smusd(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_smusd(__a, __b);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smusdx(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_smusdx(__a, __b);
-}
-#endif
-
-/* 9.7 CRC32 intrinsics */
-#if __ARM_FEATURE_CRC32
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__crc32b(uint32_t __a, uint8_t __b) {
-  return __builtin_arm_crc32b(__a, __b);
-}
-
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__crc32h(uint32_t __a, uint16_t __b) {
-  return __builtin_arm_crc32h(__a, __b);
-}
-
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__crc32w(uint32_t __a, uint32_t __b) {
-  return __builtin_arm_crc32w(__a, __b);
-}
-
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__crc32d(uint32_t __a, uint64_t __b) {
-  return __builtin_arm_crc32d(__a, __b);
-}
-
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__crc32cb(uint32_t __a, uint8_t __b) {
-  return __builtin_arm_crc32cb(__a, __b);
-}
-
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__crc32ch(uint32_t __a, uint16_t __b) {
-  return __builtin_arm_crc32ch(__a, __b);
-}
-
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__crc32cw(uint32_t __a, uint32_t __b) {
-  return __builtin_arm_crc32cw(__a, __b);
-}
-
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__crc32cd(uint32_t __a, uint64_t __b) {
-  return __builtin_arm_crc32cd(__a, __b);
-}
-#endif
-
-/* Armv8.3-A Javascript conversion intrinsic */
-#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_JCVT)
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__jcvt(double __a) {
-  return __builtin_arm_jcvt(__a);
-}
-#endif
-
-/* 10.1 Special register intrinsics */
-#define __arm_rsr(sysreg) __builtin_arm_rsr(sysreg)
-#define __arm_rsr64(sysreg) __builtin_arm_rsr64(sysreg)
-#define __arm_rsrp(sysreg) __builtin_arm_rsrp(sysreg)
-#define __arm_rsrf(sysreg) __builtin_bit_cast(float, __arm_rsr(sysreg))
-#define __arm_rsrf64(sysreg) __builtin_bit_cast(double, __arm_rsr64(sysreg))
-#define __arm_wsr(sysreg, v) __builtin_arm_wsr(sysreg, v)
-#define __arm_wsr64(sysreg, v) __builtin_arm_wsr64(sysreg, v)
-#define __arm_wsrp(sysreg, v) __builtin_arm_wsrp(sysreg, v)
-#define __arm_wsrf(sysreg, v) __arm_wsr(sysreg, __builtin_bit_cast(uint32_t, v))
-#define __arm_wsrf64(sysreg, v) __arm_wsr64(sysreg, __builtin_bit_cast(uint64_t, v))
-
-/* Memory Tagging Extensions (MTE) Intrinsics */
-#if __ARM_FEATURE_MEMORY_TAGGING
-#define __arm_mte_create_random_tag(__ptr, __mask)  __builtin_arm_irg(__ptr, __mask)
-#define __arm_mte_increment_tag(__ptr, __tag_offset)  __builtin_arm_addg(__ptr, __tag_offset)
-#define __arm_mte_exclude_tag(__ptr, __excluded)  __builtin_arm_gmi(__ptr, __excluded)
-#define __arm_mte_get_tag(__ptr) __builtin_arm_ldg(__ptr)
-#define __arm_mte_set_tag(__ptr) __builtin_arm_stg(__ptr)
-#define __arm_mte_ptrdiff(__ptra, __ptrb) __builtin_arm_subp(__ptra, __ptrb)
-#endif
-
-/* Transactional Memory Extension (TME) Intrinsics */
-#if __ARM_FEATURE_TME
-
-#define _TMFAILURE_REASON  0x00007fffu
-#define _TMFAILURE_RTRY    0x00008000u
-#define _TMFAILURE_CNCL    0x00010000u
-#define _TMFAILURE_MEM     0x00020000u
-#define _TMFAILURE_IMP     0x00040000u
-#define _TMFAILURE_ERR     0x00080000u
-#define _TMFAILURE_SIZE    0x00100000u
-#define _TMFAILURE_NEST    0x00200000u
-#define _TMFAILURE_DBG     0x00400000u
-#define _TMFAILURE_INT     0x00800000u
-#define _TMFAILURE_TRIVIAL 0x01000000u
-
-#define __tstart()        __builtin_arm_tstart()
-#define __tcommit()       __builtin_arm_tcommit()
-#define __tcancel(__arg)  __builtin_arm_tcancel(__arg)
-#define __ttest()         __builtin_arm_ttest()
-
-#endif /* __ARM_FEATURE_TME */
-
-#if defined(__cplusplus)
-}
-#endif
-
-#endif /* __ARM_ACLE_H */
diff --git a/linux-x86/lib64/clang/11.0.1/include/arm_mve.h b/linux-x86/lib64/clang/11.0.1/include/arm_mve.h
deleted file mode 100644
index 8a4b58e..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/arm_mve.h
+++ /dev/null
@@ -1,13787 +0,0 @@
-/*===---- arm_mve.h - ARM MVE intrinsics -----------------------------------===
- *
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __ARM_MVE_H
-#define __ARM_MVE_H
-
-#if !__ARM_FEATURE_MVE
-#error "MVE support not enabled"
-#endif
-
-#include <stdint.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef uint16_t mve_pred16_t;
-typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) int16_t int16x8_t;
-typedef struct { int16x8_t val[2]; } int16x8x2_t;
-typedef struct { int16x8_t val[4]; } int16x8x4_t;
-typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) int32_t int32x4_t;
-typedef struct { int32x4_t val[2]; } int32x4x2_t;
-typedef struct { int32x4_t val[4]; } int32x4x4_t;
-typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) int64_t int64x2_t;
-typedef struct { int64x2_t val[2]; } int64x2x2_t;
-typedef struct { int64x2_t val[4]; } int64x2x4_t;
-typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) int8_t int8x16_t;
-typedef struct { int8x16_t val[2]; } int8x16x2_t;
-typedef struct { int8x16_t val[4]; } int8x16x4_t;
-typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) uint16_t uint16x8_t;
-typedef struct { uint16x8_t val[2]; } uint16x8x2_t;
-typedef struct { uint16x8_t val[4]; } uint16x8x4_t;
-typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) uint32_t uint32x4_t;
-typedef struct { uint32x4_t val[2]; } uint32x4x2_t;
-typedef struct { uint32x4_t val[4]; } uint32x4x4_t;
-typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) uint64_t uint64x2_t;
-typedef struct { uint64x2_t val[2]; } uint64x2x2_t;
-typedef struct { uint64x2_t val[4]; } uint64x2x4_t;
-typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) uint8_t uint8x16_t;
-typedef struct { uint8x16_t val[2]; } uint8x16x2_t;
-typedef struct { uint8x16_t val[4]; } uint8x16x4_t;
-
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_asrl)))
-int64_t __arm_asrl(int64_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_lsll)))
-uint64_t __arm_lsll(uint64_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqrshr)))
-int32_t __arm_sqrshr(int32_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqrshrl)))
-int64_t __arm_sqrshrl(int64_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqrshrl_sat48)))
-int64_t __arm_sqrshrl_sat48(int64_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqshl)))
-int32_t __arm_sqshl(int32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqshll)))
-int64_t __arm_sqshll(int64_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_srshr)))
-int32_t __arm_srshr(int32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_srshrl)))
-int64_t __arm_srshrl(int64_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqrshl)))
-uint32_t __arm_uqrshl(uint32_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqrshll)))
-uint64_t __arm_uqrshll(uint64_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqrshll_sat48)))
-uint64_t __arm_uqrshll_sat48(uint64_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqshl)))
-uint32_t __arm_uqshl(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqshll)))
-uint64_t __arm_uqshll(uint64_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_urshr)))
-uint32_t __arm_urshr(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_urshrl)))
-uint64_t __arm_urshrl(uint64_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_s16)))
-uint32_t __arm_vabavq_p_s16(uint32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_s16)))
-uint32_t __arm_vabavq_p(uint32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_s32)))
-uint32_t __arm_vabavq_p_s32(uint32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_s32)))
-uint32_t __arm_vabavq_p(uint32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_s8)))
-uint32_t __arm_vabavq_p_s8(uint32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_s8)))
-uint32_t __arm_vabavq_p(uint32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_u16)))
-uint32_t __arm_vabavq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_u16)))
-uint32_t __arm_vabavq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_u32)))
-uint32_t __arm_vabavq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_u32)))
-uint32_t __arm_vabavq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_u8)))
-uint32_t __arm_vabavq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_u8)))
-uint32_t __arm_vabavq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_s16)))
-uint32_t __arm_vabavq_s16(uint32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_s16)))
-uint32_t __arm_vabavq(uint32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_s32)))
-uint32_t __arm_vabavq_s32(uint32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_s32)))
-uint32_t __arm_vabavq(uint32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_s8)))
-uint32_t __arm_vabavq_s8(uint32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_s8)))
-uint32_t __arm_vabavq(uint32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_u16)))
-uint32_t __arm_vabavq_u16(uint32_t, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_u16)))
-uint32_t __arm_vabavq(uint32_t, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_u32)))
-uint32_t __arm_vabavq_u32(uint32_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_u32)))
-uint32_t __arm_vabavq(uint32_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_u8)))
-uint32_t __arm_vabavq_u8(uint32_t, uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_u8)))
-uint32_t __arm_vabavq(uint32_t, uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_s16)))
-int16x8_t __arm_vabdq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_s16)))
-int16x8_t __arm_vabdq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_s32)))
-int32x4_t __arm_vabdq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_s32)))
-int32x4_t __arm_vabdq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_s8)))
-int8x16_t __arm_vabdq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_s8)))
-int8x16_t __arm_vabdq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_u16)))
-uint16x8_t __arm_vabdq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_u16)))
-uint16x8_t __arm_vabdq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_u32)))
-uint32x4_t __arm_vabdq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_u32)))
-uint32x4_t __arm_vabdq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_u8)))
-uint8x16_t __arm_vabdq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_u8)))
-uint8x16_t __arm_vabdq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_s16)))
-int16x8_t __arm_vabdq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_s16)))
-int16x8_t __arm_vabdq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_s32)))
-int32x4_t __arm_vabdq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_s32)))
-int32x4_t __arm_vabdq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_s8)))
-int8x16_t __arm_vabdq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_s8)))
-int8x16_t __arm_vabdq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_u16)))
-uint16x8_t __arm_vabdq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_u16)))
-uint16x8_t __arm_vabdq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_u32)))
-uint32x4_t __arm_vabdq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_u32)))
-uint32x4_t __arm_vabdq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_u8)))
-uint8x16_t __arm_vabdq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_u8)))
-uint8x16_t __arm_vabdq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_s16)))
-int16x8_t __arm_vabdq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_s16)))
-int16x8_t __arm_vabdq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_s32)))
-int32x4_t __arm_vabdq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_s32)))
-int32x4_t __arm_vabdq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_s8)))
-int8x16_t __arm_vabdq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_s8)))
-int8x16_t __arm_vabdq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_u16)))
-uint16x8_t __arm_vabdq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_u16)))
-uint16x8_t __arm_vabdq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_u32)))
-uint32x4_t __arm_vabdq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_u32)))
-uint32x4_t __arm_vabdq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_u8)))
-uint8x16_t __arm_vabdq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_u8)))
-uint8x16_t __arm_vabdq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_s32)))
-int32x4_t __arm_vadciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_s32)))
-int32x4_t __arm_vadciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_u32)))
-uint32x4_t __arm_vadciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_u32)))
-uint32x4_t __arm_vadciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_s32)))
-int32x4_t __arm_vadciq_s32(int32x4_t, int32x4_t, unsigned *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_s32)))
-int32x4_t __arm_vadciq(int32x4_t, int32x4_t, unsigned *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_u32)))
-uint32x4_t __arm_vadciq_u32(uint32x4_t, uint32x4_t, unsigned *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_u32)))
-uint32x4_t __arm_vadciq(uint32x4_t, uint32x4_t, unsigned *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_s32)))
-int32x4_t __arm_vadcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_s32)))
-int32x4_t __arm_vadcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_u32)))
-uint32x4_t __arm_vadcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_u32)))
-uint32x4_t __arm_vadcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_s32)))
-int32x4_t __arm_vadcq_s32(int32x4_t, int32x4_t, unsigned *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_s32)))
-int32x4_t __arm_vadcq(int32x4_t, int32x4_t, unsigned *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_u32)))
-uint32x4_t __arm_vadcq_u32(uint32x4_t, uint32x4_t, unsigned *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_u32)))
-uint32x4_t __arm_vadcq(uint32x4_t, uint32x4_t, unsigned *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s16)))
-int16x8_t __arm_vaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s16)))
-int16x8_t __arm_vaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s32)))
-int32x4_t __arm_vaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s32)))
-int32x4_t __arm_vaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s8)))
-int8x16_t __arm_vaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s8)))
-int8x16_t __arm_vaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u16)))
-uint16x8_t __arm_vaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u16)))
-uint16x8_t __arm_vaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u32)))
-uint32x4_t __arm_vaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u32)))
-uint32x4_t __arm_vaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u8)))
-uint8x16_t __arm_vaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u8)))
-uint8x16_t __arm_vaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_s16)))
-int16x8_t __arm_vaddq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_s16)))
-int16x8_t __arm_vaddq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_s32)))
-int32x4_t __arm_vaddq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_s32)))
-int32x4_t __arm_vaddq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_s8)))
-int8x16_t __arm_vaddq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_s8)))
-int8x16_t __arm_vaddq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_u16)))
-uint16x8_t __arm_vaddq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_u16)))
-uint16x8_t __arm_vaddq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_u32)))
-uint32x4_t __arm_vaddq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_u32)))
-uint32x4_t __arm_vaddq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_u8)))
-uint8x16_t __arm_vaddq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_u8)))
-uint8x16_t __arm_vaddq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_s16)))
-int16x8_t __arm_vaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_s16)))
-int16x8_t __arm_vaddq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_s32)))
-int32x4_t __arm_vaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_s32)))
-int32x4_t __arm_vaddq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_s8)))
-int8x16_t __arm_vaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_s8)))
-int8x16_t __arm_vaddq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_u16)))
-uint16x8_t __arm_vaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_u16)))
-uint16x8_t __arm_vaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_u32)))
-uint32x4_t __arm_vaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_u32)))
-uint32x4_t __arm_vaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_u8)))
-uint8x16_t __arm_vaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_u8)))
-uint8x16_t __arm_vaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_s16)))
-int16x8_t __arm_vandq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_s16)))
-int16x8_t __arm_vandq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_s32)))
-int32x4_t __arm_vandq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_s32)))
-int32x4_t __arm_vandq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_s8)))
-int8x16_t __arm_vandq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_s8)))
-int8x16_t __arm_vandq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_u16)))
-uint16x8_t __arm_vandq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_u16)))
-uint16x8_t __arm_vandq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_u32)))
-uint32x4_t __arm_vandq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_u32)))
-uint32x4_t __arm_vandq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_u8)))
-uint8x16_t __arm_vandq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_u8)))
-uint8x16_t __arm_vandq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_s16)))
-int16x8_t __arm_vandq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_s16)))
-int16x8_t __arm_vandq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_s32)))
-int32x4_t __arm_vandq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_s32)))
-int32x4_t __arm_vandq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_s8)))
-int8x16_t __arm_vandq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_s8)))
-int8x16_t __arm_vandq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_u16)))
-uint16x8_t __arm_vandq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_u16)))
-uint16x8_t __arm_vandq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_u32)))
-uint32x4_t __arm_vandq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_u32)))
-uint32x4_t __arm_vandq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_u8)))
-uint8x16_t __arm_vandq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_u8)))
-uint8x16_t __arm_vandq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_s16)))
-int16x8_t __arm_vandq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_s16)))
-int16x8_t __arm_vandq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_s32)))
-int32x4_t __arm_vandq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_s32)))
-int32x4_t __arm_vandq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_s8)))
-int8x16_t __arm_vandq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_s8)))
-int8x16_t __arm_vandq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_u16)))
-uint16x8_t __arm_vandq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_u16)))
-uint16x8_t __arm_vandq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_u32)))
-uint32x4_t __arm_vandq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_u32)))
-uint32x4_t __arm_vandq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_u8)))
-uint8x16_t __arm_vandq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_u8)))
-uint8x16_t __arm_vandq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_s16)))
-int16x8_t __arm_vbicq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_s16)))
-int16x8_t __arm_vbicq_m_n(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_s32)))
-int32x4_t __arm_vbicq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_s32)))
-int32x4_t __arm_vbicq_m_n(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_u16)))
-uint16x8_t __arm_vbicq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_u16)))
-uint16x8_t __arm_vbicq_m_n(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_u32)))
-uint32x4_t __arm_vbicq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_u32)))
-uint32x4_t __arm_vbicq_m_n(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_s16)))
-int16x8_t __arm_vbicq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_s16)))
-int16x8_t __arm_vbicq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_s32)))
-int32x4_t __arm_vbicq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_s32)))
-int32x4_t __arm_vbicq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_s8)))
-int8x16_t __arm_vbicq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_s8)))
-int8x16_t __arm_vbicq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_u16)))
-uint16x8_t __arm_vbicq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_u16)))
-uint16x8_t __arm_vbicq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_u32)))
-uint32x4_t __arm_vbicq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_u32)))
-uint32x4_t __arm_vbicq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_u8)))
-uint8x16_t __arm_vbicq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_u8)))
-uint8x16_t __arm_vbicq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_s16)))
-int16x8_t __arm_vbicq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_s16)))
-int16x8_t __arm_vbicq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_s32)))
-int32x4_t __arm_vbicq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_s32)))
-int32x4_t __arm_vbicq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_u16)))
-uint16x8_t __arm_vbicq_n_u16(uint16x8_t, uint16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_u16)))
-uint16x8_t __arm_vbicq(uint16x8_t, uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_u32)))
-uint32x4_t __arm_vbicq_n_u32(uint32x4_t, uint32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_u32)))
-uint32x4_t __arm_vbicq(uint32x4_t, uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_s16)))
-int16x8_t __arm_vbicq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_s16)))
-int16x8_t __arm_vbicq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_s32)))
-int32x4_t __arm_vbicq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_s32)))
-int32x4_t __arm_vbicq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_s8)))
-int8x16_t __arm_vbicq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_s8)))
-int8x16_t __arm_vbicq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_u16)))
-uint16x8_t __arm_vbicq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_u16)))
-uint16x8_t __arm_vbicq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_u32)))
-uint32x4_t __arm_vbicq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_u32)))
-uint32x4_t __arm_vbicq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_u8)))
-uint8x16_t __arm_vbicq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_u8)))
-uint8x16_t __arm_vbicq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_s16)))
-int16x8_t __arm_vbicq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_s16)))
-int16x8_t __arm_vbicq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_s32)))
-int32x4_t __arm_vbicq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_s32)))
-int32x4_t __arm_vbicq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_s8)))
-int8x16_t __arm_vbicq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_s8)))
-int8x16_t __arm_vbicq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_u16)))
-uint16x8_t __arm_vbicq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_u16)))
-uint16x8_t __arm_vbicq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_u32)))
-uint32x4_t __arm_vbicq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_u32)))
-uint32x4_t __arm_vbicq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_u8)))
-uint8x16_t __arm_vbicq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_u8)))
-uint8x16_t __arm_vbicq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_s16)))
-int16x8_t __arm_vcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_s16)))
-int16x8_t __arm_vcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_s32)))
-int32x4_t __arm_vcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_s32)))
-int32x4_t __arm_vcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_s8)))
-int8x16_t __arm_vcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_s8)))
-int8x16_t __arm_vcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_u16)))
-uint16x8_t __arm_vcaddq_rot270_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_u16)))
-uint16x8_t __arm_vcaddq_rot270_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_u32)))
-uint32x4_t __arm_vcaddq_rot270_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_u32)))
-uint32x4_t __arm_vcaddq_rot270_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_u8)))
-uint8x16_t __arm_vcaddq_rot270_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_u8)))
-uint8x16_t __arm_vcaddq_rot270_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_s16)))
-int16x8_t __arm_vcaddq_rot270_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_s16)))
-int16x8_t __arm_vcaddq_rot270(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_s32)))
-int32x4_t __arm_vcaddq_rot270_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_s32)))
-int32x4_t __arm_vcaddq_rot270(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_s8)))
-int8x16_t __arm_vcaddq_rot270_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_s8)))
-int8x16_t __arm_vcaddq_rot270(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_u16)))
-uint16x8_t __arm_vcaddq_rot270_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_u16)))
-uint16x8_t __arm_vcaddq_rot270(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_u32)))
-uint32x4_t __arm_vcaddq_rot270_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_u32)))
-uint32x4_t __arm_vcaddq_rot270(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_u8)))
-uint8x16_t __arm_vcaddq_rot270_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_u8)))
-uint8x16_t __arm_vcaddq_rot270(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_s16)))
-int16x8_t __arm_vcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_s16)))
-int16x8_t __arm_vcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_s32)))
-int32x4_t __arm_vcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_s32)))
-int32x4_t __arm_vcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_s8)))
-int8x16_t __arm_vcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_s8)))
-int8x16_t __arm_vcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_u16)))
-uint16x8_t __arm_vcaddq_rot270_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_u16)))
-uint16x8_t __arm_vcaddq_rot270_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_u32)))
-uint32x4_t __arm_vcaddq_rot270_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_u32)))
-uint32x4_t __arm_vcaddq_rot270_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_u8)))
-uint8x16_t __arm_vcaddq_rot270_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_u8)))
-uint8x16_t __arm_vcaddq_rot270_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_s16)))
-int16x8_t __arm_vcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_s16)))
-int16x8_t __arm_vcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_s32)))
-int32x4_t __arm_vcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_s32)))
-int32x4_t __arm_vcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_s8)))
-int8x16_t __arm_vcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_s8)))
-int8x16_t __arm_vcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_u16)))
-uint16x8_t __arm_vcaddq_rot90_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_u16)))
-uint16x8_t __arm_vcaddq_rot90_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_u32)))
-uint32x4_t __arm_vcaddq_rot90_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_u32)))
-uint32x4_t __arm_vcaddq_rot90_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_u8)))
-uint8x16_t __arm_vcaddq_rot90_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_u8)))
-uint8x16_t __arm_vcaddq_rot90_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_s16)))
-int16x8_t __arm_vcaddq_rot90_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_s16)))
-int16x8_t __arm_vcaddq_rot90(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_s32)))
-int32x4_t __arm_vcaddq_rot90_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_s32)))
-int32x4_t __arm_vcaddq_rot90(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_s8)))
-int8x16_t __arm_vcaddq_rot90_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_s8)))
-int8x16_t __arm_vcaddq_rot90(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_u16)))
-uint16x8_t __arm_vcaddq_rot90_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_u16)))
-uint16x8_t __arm_vcaddq_rot90(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_u32)))
-uint32x4_t __arm_vcaddq_rot90_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_u32)))
-uint32x4_t __arm_vcaddq_rot90(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_u8)))
-uint8x16_t __arm_vcaddq_rot90_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_u8)))
-uint8x16_t __arm_vcaddq_rot90(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_s16)))
-int16x8_t __arm_vcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_s16)))
-int16x8_t __arm_vcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_s32)))
-int32x4_t __arm_vcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_s32)))
-int32x4_t __arm_vcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_s8)))
-int8x16_t __arm_vcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_s8)))
-int8x16_t __arm_vcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_u16)))
-uint16x8_t __arm_vcaddq_rot90_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_u16)))
-uint16x8_t __arm_vcaddq_rot90_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_u32)))
-uint32x4_t __arm_vcaddq_rot90_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_u32)))
-uint32x4_t __arm_vcaddq_rot90_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_u8)))
-uint8x16_t __arm_vcaddq_rot90_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_u8)))
-uint8x16_t __arm_vcaddq_rot90_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))
-mve_pred16_t __arm_vcmpcsq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))
-mve_pred16_t __arm_vcmpcsq_m(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))
-mve_pred16_t __arm_vcmpcsq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))
-mve_pred16_t __arm_vcmpcsq_m(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))
-mve_pred16_t __arm_vcmpcsq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))
-mve_pred16_t __arm_vcmpcsq_m(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u16)))
-mve_pred16_t __arm_vcmpcsq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u16)))
-mve_pred16_t __arm_vcmpcsq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u32)))
-mve_pred16_t __arm_vcmpcsq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u32)))
-mve_pred16_t __arm_vcmpcsq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u8)))
-mve_pred16_t __arm_vcmpcsq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u8)))
-mve_pred16_t __arm_vcmpcsq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u16)))
-mve_pred16_t __arm_vcmpcsq_n_u16(uint16x8_t, uint16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u16)))
-mve_pred16_t __arm_vcmpcsq(uint16x8_t, uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u32)))
-mve_pred16_t __arm_vcmpcsq_n_u32(uint32x4_t, uint32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u32)))
-mve_pred16_t __arm_vcmpcsq(uint32x4_t, uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u8)))
-mve_pred16_t __arm_vcmpcsq_n_u8(uint8x16_t, uint8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u8)))
-mve_pred16_t __arm_vcmpcsq(uint8x16_t, uint8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u16)))
-mve_pred16_t __arm_vcmpcsq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u16)))
-mve_pred16_t __arm_vcmpcsq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u32)))
-mve_pred16_t __arm_vcmpcsq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u32)))
-mve_pred16_t __arm_vcmpcsq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u8)))
-mve_pred16_t __arm_vcmpcsq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u8)))
-mve_pred16_t __arm_vcmpcsq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))
-mve_pred16_t __arm_vcmpeqq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))
-mve_pred16_t __arm_vcmpeqq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))
-mve_pred16_t __arm_vcmpeqq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))
-mve_pred16_t __arm_vcmpeqq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))
-mve_pred16_t __arm_vcmpeqq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))
-mve_pred16_t __arm_vcmpeqq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))
-mve_pred16_t __arm_vcmpeqq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))
-mve_pred16_t __arm_vcmpeqq_m(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))
-mve_pred16_t __arm_vcmpeqq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))
-mve_pred16_t __arm_vcmpeqq_m(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))
-mve_pred16_t __arm_vcmpeqq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))
-mve_pred16_t __arm_vcmpeqq_m(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s16)))
-mve_pred16_t __arm_vcmpeqq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s16)))
-mve_pred16_t __arm_vcmpeqq_m(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s32)))
-mve_pred16_t __arm_vcmpeqq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s32)))
-mve_pred16_t __arm_vcmpeqq_m(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s8)))
-mve_pred16_t __arm_vcmpeqq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s8)))
-mve_pred16_t __arm_vcmpeqq_m(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u16)))
-mve_pred16_t __arm_vcmpeqq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u16)))
-mve_pred16_t __arm_vcmpeqq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u32)))
-mve_pred16_t __arm_vcmpeqq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u32)))
-mve_pred16_t __arm_vcmpeqq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u8)))
-mve_pred16_t __arm_vcmpeqq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u8)))
-mve_pred16_t __arm_vcmpeqq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s16)))
-mve_pred16_t __arm_vcmpeqq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s16)))
-mve_pred16_t __arm_vcmpeqq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s32)))
-mve_pred16_t __arm_vcmpeqq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s32)))
-mve_pred16_t __arm_vcmpeqq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s8)))
-mve_pred16_t __arm_vcmpeqq_n_s8(int8x16_t, int8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s8)))
-mve_pred16_t __arm_vcmpeqq(int8x16_t, int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u16)))
-mve_pred16_t __arm_vcmpeqq_n_u16(uint16x8_t, uint16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u16)))
-mve_pred16_t __arm_vcmpeqq(uint16x8_t, uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u32)))
-mve_pred16_t __arm_vcmpeqq_n_u32(uint32x4_t, uint32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u32)))
-mve_pred16_t __arm_vcmpeqq(uint32x4_t, uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u8)))
-mve_pred16_t __arm_vcmpeqq_n_u8(uint8x16_t, uint8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u8)))
-mve_pred16_t __arm_vcmpeqq(uint8x16_t, uint8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s16)))
-mve_pred16_t __arm_vcmpeqq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s16)))
-mve_pred16_t __arm_vcmpeqq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s32)))
-mve_pred16_t __arm_vcmpeqq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s32)))
-mve_pred16_t __arm_vcmpeqq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s8)))
-mve_pred16_t __arm_vcmpeqq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s8)))
-mve_pred16_t __arm_vcmpeqq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u16)))
-mve_pred16_t __arm_vcmpeqq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u16)))
-mve_pred16_t __arm_vcmpeqq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u32)))
-mve_pred16_t __arm_vcmpeqq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u32)))
-mve_pred16_t __arm_vcmpeqq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u8)))
-mve_pred16_t __arm_vcmpeqq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u8)))
-mve_pred16_t __arm_vcmpeqq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))
-mve_pred16_t __arm_vcmpgeq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))
-mve_pred16_t __arm_vcmpgeq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))
-mve_pred16_t __arm_vcmpgeq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))
-mve_pred16_t __arm_vcmpgeq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))
-mve_pred16_t __arm_vcmpgeq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))
-mve_pred16_t __arm_vcmpgeq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s16)))
-mve_pred16_t __arm_vcmpgeq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s16)))
-mve_pred16_t __arm_vcmpgeq_m(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s32)))
-mve_pred16_t __arm_vcmpgeq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s32)))
-mve_pred16_t __arm_vcmpgeq_m(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s8)))
-mve_pred16_t __arm_vcmpgeq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s8)))
-mve_pred16_t __arm_vcmpgeq_m(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s16)))
-mve_pred16_t __arm_vcmpgeq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s16)))
-mve_pred16_t __arm_vcmpgeq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s32)))
-mve_pred16_t __arm_vcmpgeq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s32)))
-mve_pred16_t __arm_vcmpgeq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s8)))
-mve_pred16_t __arm_vcmpgeq_n_s8(int8x16_t, int8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s8)))
-mve_pred16_t __arm_vcmpgeq(int8x16_t, int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s16)))
-mve_pred16_t __arm_vcmpgeq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s16)))
-mve_pred16_t __arm_vcmpgeq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s32)))
-mve_pred16_t __arm_vcmpgeq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s32)))
-mve_pred16_t __arm_vcmpgeq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s8)))
-mve_pred16_t __arm_vcmpgeq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s8)))
-mve_pred16_t __arm_vcmpgeq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))
-mve_pred16_t __arm_vcmpgtq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))
-mve_pred16_t __arm_vcmpgtq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))
-mve_pred16_t __arm_vcmpgtq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))
-mve_pred16_t __arm_vcmpgtq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))
-mve_pred16_t __arm_vcmpgtq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))
-mve_pred16_t __arm_vcmpgtq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s16)))
-mve_pred16_t __arm_vcmpgtq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s16)))
-mve_pred16_t __arm_vcmpgtq_m(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s32)))
-mve_pred16_t __arm_vcmpgtq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s32)))
-mve_pred16_t __arm_vcmpgtq_m(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s8)))
-mve_pred16_t __arm_vcmpgtq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s8)))
-mve_pred16_t __arm_vcmpgtq_m(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s16)))
-mve_pred16_t __arm_vcmpgtq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s16)))
-mve_pred16_t __arm_vcmpgtq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s32)))
-mve_pred16_t __arm_vcmpgtq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s32)))
-mve_pred16_t __arm_vcmpgtq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s8)))
-mve_pred16_t __arm_vcmpgtq_n_s8(int8x16_t, int8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s8)))
-mve_pred16_t __arm_vcmpgtq(int8x16_t, int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s16)))
-mve_pred16_t __arm_vcmpgtq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s16)))
-mve_pred16_t __arm_vcmpgtq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s32)))
-mve_pred16_t __arm_vcmpgtq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s32)))
-mve_pred16_t __arm_vcmpgtq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s8)))
-mve_pred16_t __arm_vcmpgtq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s8)))
-mve_pred16_t __arm_vcmpgtq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))
-mve_pred16_t __arm_vcmphiq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))
-mve_pred16_t __arm_vcmphiq_m(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))
-mve_pred16_t __arm_vcmphiq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))
-mve_pred16_t __arm_vcmphiq_m(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))
-mve_pred16_t __arm_vcmphiq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))
-mve_pred16_t __arm_vcmphiq_m(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u16)))
-mve_pred16_t __arm_vcmphiq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u16)))
-mve_pred16_t __arm_vcmphiq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u32)))
-mve_pred16_t __arm_vcmphiq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u32)))
-mve_pred16_t __arm_vcmphiq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u8)))
-mve_pred16_t __arm_vcmphiq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u8)))
-mve_pred16_t __arm_vcmphiq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u16)))
-mve_pred16_t __arm_vcmphiq_n_u16(uint16x8_t, uint16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u16)))
-mve_pred16_t __arm_vcmphiq(uint16x8_t, uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u32)))
-mve_pred16_t __arm_vcmphiq_n_u32(uint32x4_t, uint32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u32)))
-mve_pred16_t __arm_vcmphiq(uint32x4_t, uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u8)))
-mve_pred16_t __arm_vcmphiq_n_u8(uint8x16_t, uint8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u8)))
-mve_pred16_t __arm_vcmphiq(uint8x16_t, uint8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u16)))
-mve_pred16_t __arm_vcmphiq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u16)))
-mve_pred16_t __arm_vcmphiq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u32)))
-mve_pred16_t __arm_vcmphiq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u32)))
-mve_pred16_t __arm_vcmphiq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u8)))
-mve_pred16_t __arm_vcmphiq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u8)))
-mve_pred16_t __arm_vcmphiq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))
-mve_pred16_t __arm_vcmpleq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))
-mve_pred16_t __arm_vcmpleq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))
-mve_pred16_t __arm_vcmpleq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))
-mve_pred16_t __arm_vcmpleq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))
-mve_pred16_t __arm_vcmpleq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))
-mve_pred16_t __arm_vcmpleq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s16)))
-mve_pred16_t __arm_vcmpleq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s16)))
-mve_pred16_t __arm_vcmpleq_m(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s32)))
-mve_pred16_t __arm_vcmpleq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s32)))
-mve_pred16_t __arm_vcmpleq_m(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s8)))
-mve_pred16_t __arm_vcmpleq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s8)))
-mve_pred16_t __arm_vcmpleq_m(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s16)))
-mve_pred16_t __arm_vcmpleq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s16)))
-mve_pred16_t __arm_vcmpleq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s32)))
-mve_pred16_t __arm_vcmpleq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s32)))
-mve_pred16_t __arm_vcmpleq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s8)))
-mve_pred16_t __arm_vcmpleq_n_s8(int8x16_t, int8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s8)))
-mve_pred16_t __arm_vcmpleq(int8x16_t, int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s16)))
-mve_pred16_t __arm_vcmpleq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s16)))
-mve_pred16_t __arm_vcmpleq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s32)))
-mve_pred16_t __arm_vcmpleq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s32)))
-mve_pred16_t __arm_vcmpleq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s8)))
-mve_pred16_t __arm_vcmpleq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s8)))
-mve_pred16_t __arm_vcmpleq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))
-mve_pred16_t __arm_vcmpltq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))
-mve_pred16_t __arm_vcmpltq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))
-mve_pred16_t __arm_vcmpltq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))
-mve_pred16_t __arm_vcmpltq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))
-mve_pred16_t __arm_vcmpltq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))
-mve_pred16_t __arm_vcmpltq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s16)))
-mve_pred16_t __arm_vcmpltq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s16)))
-mve_pred16_t __arm_vcmpltq_m(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s32)))
-mve_pred16_t __arm_vcmpltq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s32)))
-mve_pred16_t __arm_vcmpltq_m(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s8)))
-mve_pred16_t __arm_vcmpltq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s8)))
-mve_pred16_t __arm_vcmpltq_m(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s16)))
-mve_pred16_t __arm_vcmpltq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s16)))
-mve_pred16_t __arm_vcmpltq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s32)))
-mve_pred16_t __arm_vcmpltq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s32)))
-mve_pred16_t __arm_vcmpltq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s8)))
-mve_pred16_t __arm_vcmpltq_n_s8(int8x16_t, int8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s8)))
-mve_pred16_t __arm_vcmpltq(int8x16_t, int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s16)))
-mve_pred16_t __arm_vcmpltq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s16)))
-mve_pred16_t __arm_vcmpltq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s32)))
-mve_pred16_t __arm_vcmpltq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s32)))
-mve_pred16_t __arm_vcmpltq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s8)))
-mve_pred16_t __arm_vcmpltq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s8)))
-mve_pred16_t __arm_vcmpltq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))
-mve_pred16_t __arm_vcmpneq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))
-mve_pred16_t __arm_vcmpneq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))
-mve_pred16_t __arm_vcmpneq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))
-mve_pred16_t __arm_vcmpneq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))
-mve_pred16_t __arm_vcmpneq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))
-mve_pred16_t __arm_vcmpneq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))
-mve_pred16_t __arm_vcmpneq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))
-mve_pred16_t __arm_vcmpneq_m(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))
-mve_pred16_t __arm_vcmpneq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))
-mve_pred16_t __arm_vcmpneq_m(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))
-mve_pred16_t __arm_vcmpneq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))
-mve_pred16_t __arm_vcmpneq_m(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s16)))
-mve_pred16_t __arm_vcmpneq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s16)))
-mve_pred16_t __arm_vcmpneq_m(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s32)))
-mve_pred16_t __arm_vcmpneq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s32)))
-mve_pred16_t __arm_vcmpneq_m(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s8)))
-mve_pred16_t __arm_vcmpneq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s8)))
-mve_pred16_t __arm_vcmpneq_m(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u16)))
-mve_pred16_t __arm_vcmpneq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u16)))
-mve_pred16_t __arm_vcmpneq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u32)))
-mve_pred16_t __arm_vcmpneq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u32)))
-mve_pred16_t __arm_vcmpneq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u8)))
-mve_pred16_t __arm_vcmpneq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u8)))
-mve_pred16_t __arm_vcmpneq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s16)))
-mve_pred16_t __arm_vcmpneq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s16)))
-mve_pred16_t __arm_vcmpneq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s32)))
-mve_pred16_t __arm_vcmpneq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s32)))
-mve_pred16_t __arm_vcmpneq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s8)))
-mve_pred16_t __arm_vcmpneq_n_s8(int8x16_t, int8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s8)))
-mve_pred16_t __arm_vcmpneq(int8x16_t, int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u16)))
-mve_pred16_t __arm_vcmpneq_n_u16(uint16x8_t, uint16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u16)))
-mve_pred16_t __arm_vcmpneq(uint16x8_t, uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u32)))
-mve_pred16_t __arm_vcmpneq_n_u32(uint32x4_t, uint32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u32)))
-mve_pred16_t __arm_vcmpneq(uint32x4_t, uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u8)))
-mve_pred16_t __arm_vcmpneq_n_u8(uint8x16_t, uint8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u8)))
-mve_pred16_t __arm_vcmpneq(uint8x16_t, uint8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s16)))
-mve_pred16_t __arm_vcmpneq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s16)))
-mve_pred16_t __arm_vcmpneq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s32)))
-mve_pred16_t __arm_vcmpneq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s32)))
-mve_pred16_t __arm_vcmpneq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s8)))
-mve_pred16_t __arm_vcmpneq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s8)))
-mve_pred16_t __arm_vcmpneq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u16)))
-mve_pred16_t __arm_vcmpneq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u16)))
-mve_pred16_t __arm_vcmpneq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u32)))
-mve_pred16_t __arm_vcmpneq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u32)))
-mve_pred16_t __arm_vcmpneq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u8)))
-mve_pred16_t __arm_vcmpneq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u8)))
-mve_pred16_t __arm_vcmpneq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s16)))
-int16x8_t __arm_vcreateq_s16(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s32)))
-int32x4_t __arm_vcreateq_s32(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s64)))
-int64x2_t __arm_vcreateq_s64(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s8)))
-int8x16_t __arm_vcreateq_s8(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u16)))
-uint16x8_t __arm_vcreateq_u16(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u32)))
-uint32x4_t __arm_vcreateq_u32(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u64)))
-uint64x2_t __arm_vcreateq_u64(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u8)))
-uint8x16_t __arm_vcreateq_u8(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp16q)))
-mve_pred16_t __arm_vctp16q(uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp16q_m)))
-mve_pred16_t __arm_vctp16q_m(uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp32q)))
-mve_pred16_t __arm_vctp32q(uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp32q_m)))
-mve_pred16_t __arm_vctp32q_m(uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp64q)))
-mve_pred16_t __arm_vctp64q(uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp64q_m)))
-mve_pred16_t __arm_vctp64q_m(uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp8q)))
-mve_pred16_t __arm_vctp8q(uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp8q_m)))
-mve_pred16_t __arm_vctp8q_m(uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_n_u16)))
-uint16x8_t __arm_vddupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_n_u16)))
-uint16x8_t __arm_vddupq_m(uint16x8_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_n_u32)))
-uint32x4_t __arm_vddupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_n_u32)))
-uint32x4_t __arm_vddupq_m(uint32x4_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_n_u8)))
-uint8x16_t __arm_vddupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_n_u8)))
-uint8x16_t __arm_vddupq_m(uint8x16_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_wb_u16)))
-uint16x8_t __arm_vddupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_wb_u16)))
-uint16x8_t __arm_vddupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_wb_u32)))
-uint32x4_t __arm_vddupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_wb_u32)))
-uint32x4_t __arm_vddupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_wb_u8)))
-uint8x16_t __arm_vddupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_wb_u8)))
-uint8x16_t __arm_vddupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_n_u16)))
-uint16x8_t __arm_vddupq_n_u16(uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_n_u16)))
-uint16x8_t __arm_vddupq_u16(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_n_u32)))
-uint32x4_t __arm_vddupq_n_u32(uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_n_u32)))
-uint32x4_t __arm_vddupq_u32(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_n_u8)))
-uint8x16_t __arm_vddupq_n_u8(uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_n_u8)))
-uint8x16_t __arm_vddupq_u8(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_wb_u16)))
-uint16x8_t __arm_vddupq_wb_u16(uint32_t *, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_wb_u16)))
-uint16x8_t __arm_vddupq_u16(uint32_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_wb_u32)))
-uint32x4_t __arm_vddupq_wb_u32(uint32_t *, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_wb_u32)))
-uint32x4_t __arm_vddupq_u32(uint32_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_wb_u8)))
-uint8x16_t __arm_vddupq_wb_u8(uint32_t *, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_wb_u8)))
-uint8x16_t __arm_vddupq_u8(uint32_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_n_u16)))
-uint16x8_t __arm_vddupq_x_n_u16(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_n_u16)))
-uint16x8_t __arm_vddupq_x_u16(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_n_u32)))
-uint32x4_t __arm_vddupq_x_n_u32(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_n_u32)))
-uint32x4_t __arm_vddupq_x_u32(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_n_u8)))
-uint8x16_t __arm_vddupq_x_n_u8(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_n_u8)))
-uint8x16_t __arm_vddupq_x_u8(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_wb_u16)))
-uint16x8_t __arm_vddupq_x_wb_u16(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_wb_u16)))
-uint16x8_t __arm_vddupq_x_u16(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_wb_u32)))
-uint32x4_t __arm_vddupq_x_wb_u32(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_wb_u32)))
-uint32x4_t __arm_vddupq_x_u32(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_wb_u8)))
-uint8x16_t __arm_vddupq_x_wb_u8(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_wb_u8)))
-uint8x16_t __arm_vddupq_x_u8(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_s16)))
-int16x8_t __arm_vdupq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_s16)))
-int16x8_t __arm_vdupq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_s32)))
-int32x4_t __arm_vdupq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_s32)))
-int32x4_t __arm_vdupq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_s8)))
-int8x16_t __arm_vdupq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_s8)))
-int8x16_t __arm_vdupq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_u16)))
-uint16x8_t __arm_vdupq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_u16)))
-uint16x8_t __arm_vdupq_m(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_u32)))
-uint32x4_t __arm_vdupq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_u32)))
-uint32x4_t __arm_vdupq_m(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_u8)))
-uint8x16_t __arm_vdupq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_u8)))
-uint8x16_t __arm_vdupq_m(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_s16)))
-int16x8_t __arm_vdupq_n_s16(int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_s32)))
-int32x4_t __arm_vdupq_n_s32(int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_s8)))
-int8x16_t __arm_vdupq_n_s8(int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_u16)))
-uint16x8_t __arm_vdupq_n_u16(uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_u32)))
-uint32x4_t __arm_vdupq_n_u32(uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_u8)))
-uint8x16_t __arm_vdupq_n_u8(uint8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_s16)))
-int16x8_t __arm_vdupq_x_n_s16(int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_s32)))
-int32x4_t __arm_vdupq_x_n_s32(int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_s8)))
-int8x16_t __arm_vdupq_x_n_s8(int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_u16)))
-uint16x8_t __arm_vdupq_x_n_u16(uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_u32)))
-uint32x4_t __arm_vdupq_x_n_u32(uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_u8)))
-uint8x16_t __arm_vdupq_x_n_u8(uint8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_n_u16)))
-uint16x8_t __arm_vdwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_n_u16)))
-uint16x8_t __arm_vdwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_n_u32)))
-uint32x4_t __arm_vdwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_n_u32)))
-uint32x4_t __arm_vdwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_n_u8)))
-uint8x16_t __arm_vdwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_n_u8)))
-uint8x16_t __arm_vdwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_wb_u16)))
-uint16x8_t __arm_vdwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_wb_u16)))
-uint16x8_t __arm_vdwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_wb_u32)))
-uint32x4_t __arm_vdwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_wb_u32)))
-uint32x4_t __arm_vdwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_wb_u8)))
-uint8x16_t __arm_vdwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_wb_u8)))
-uint8x16_t __arm_vdwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_n_u16)))
-uint16x8_t __arm_vdwdupq_n_u16(uint32_t, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_n_u16)))
-uint16x8_t __arm_vdwdupq_u16(uint32_t, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_n_u32)))
-uint32x4_t __arm_vdwdupq_n_u32(uint32_t, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_n_u32)))
-uint32x4_t __arm_vdwdupq_u32(uint32_t, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_n_u8)))
-uint8x16_t __arm_vdwdupq_n_u8(uint32_t, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_n_u8)))
-uint8x16_t __arm_vdwdupq_u8(uint32_t, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_wb_u16)))
-uint16x8_t __arm_vdwdupq_wb_u16(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_wb_u16)))
-uint16x8_t __arm_vdwdupq_u16(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_wb_u32)))
-uint32x4_t __arm_vdwdupq_wb_u32(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_wb_u32)))
-uint32x4_t __arm_vdwdupq_u32(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_wb_u8)))
-uint8x16_t __arm_vdwdupq_wb_u8(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_wb_u8)))
-uint8x16_t __arm_vdwdupq_u8(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_n_u16)))
-uint16x8_t __arm_vdwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_n_u16)))
-uint16x8_t __arm_vdwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_n_u32)))
-uint32x4_t __arm_vdwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_n_u32)))
-uint32x4_t __arm_vdwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_n_u8)))
-uint8x16_t __arm_vdwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_n_u8)))
-uint8x16_t __arm_vdwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_wb_u16)))
-uint16x8_t __arm_vdwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_wb_u16)))
-uint16x8_t __arm_vdwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_wb_u32)))
-uint32x4_t __arm_vdwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_wb_u32)))
-uint32x4_t __arm_vdwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_wb_u8)))
-uint8x16_t __arm_vdwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_wb_u8)))
-uint8x16_t __arm_vdwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_s16)))
-int16x8_t __arm_veorq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_s16)))
-int16x8_t __arm_veorq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_s32)))
-int32x4_t __arm_veorq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_s32)))
-int32x4_t __arm_veorq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_s8)))
-int8x16_t __arm_veorq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_s8)))
-int8x16_t __arm_veorq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_u16)))
-uint16x8_t __arm_veorq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_u16)))
-uint16x8_t __arm_veorq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_u32)))
-uint32x4_t __arm_veorq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_u32)))
-uint32x4_t __arm_veorq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_u8)))
-uint8x16_t __arm_veorq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_u8)))
-uint8x16_t __arm_veorq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_s16)))
-int16x8_t __arm_veorq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_s16)))
-int16x8_t __arm_veorq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_s32)))
-int32x4_t __arm_veorq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_s32)))
-int32x4_t __arm_veorq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_s8)))
-int8x16_t __arm_veorq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_s8)))
-int8x16_t __arm_veorq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_u16)))
-uint16x8_t __arm_veorq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_u16)))
-uint16x8_t __arm_veorq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_u32)))
-uint32x4_t __arm_veorq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_u32)))
-uint32x4_t __arm_veorq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_u8)))
-uint8x16_t __arm_veorq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_u8)))
-uint8x16_t __arm_veorq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_s16)))
-int16x8_t __arm_veorq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_s16)))
-int16x8_t __arm_veorq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_s32)))
-int32x4_t __arm_veorq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_s32)))
-int32x4_t __arm_veorq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_s8)))
-int8x16_t __arm_veorq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_s8)))
-int8x16_t __arm_veorq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_u16)))
-uint16x8_t __arm_veorq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_u16)))
-uint16x8_t __arm_veorq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_u32)))
-uint32x4_t __arm_veorq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_u32)))
-uint32x4_t __arm_veorq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_u8)))
-uint8x16_t __arm_veorq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_u8)))
-uint8x16_t __arm_veorq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s16)))
-int16_t __arm_vgetq_lane_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s16)))
-int16_t __arm_vgetq_lane(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s32)))
-int32_t __arm_vgetq_lane_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s32)))
-int32_t __arm_vgetq_lane(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s64)))
-int64_t __arm_vgetq_lane_s64(int64x2_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s64)))
-int64_t __arm_vgetq_lane(int64x2_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s8)))
-int8_t __arm_vgetq_lane_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s8)))
-int8_t __arm_vgetq_lane(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u16)))
-uint16_t __arm_vgetq_lane_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u16)))
-uint16_t __arm_vgetq_lane(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u32)))
-uint32_t __arm_vgetq_lane_u32(uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u32)))
-uint32_t __arm_vgetq_lane(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u64)))
-uint64_t __arm_vgetq_lane_u64(uint64x2_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u64)))
-uint64_t __arm_vgetq_lane(uint64x2_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u8)))
-uint8_t __arm_vgetq_lane_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u8)))
-uint8_t __arm_vgetq_lane(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_s16)))
-int16x8_t __arm_vhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_s16)))
-int16x8_t __arm_vhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_s32)))
-int32x4_t __arm_vhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_s32)))
-int32x4_t __arm_vhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_s8)))
-int8x16_t __arm_vhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_s8)))
-int8x16_t __arm_vhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_u16)))
-uint16x8_t __arm_vhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_u16)))
-uint16x8_t __arm_vhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_u32)))
-uint32x4_t __arm_vhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_u32)))
-uint32x4_t __arm_vhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_u8)))
-uint8x16_t __arm_vhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_u8)))
-uint8x16_t __arm_vhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_s16)))
-int16x8_t __arm_vhaddq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_s16)))
-int16x8_t __arm_vhaddq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_s32)))
-int32x4_t __arm_vhaddq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_s32)))
-int32x4_t __arm_vhaddq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_s8)))
-int8x16_t __arm_vhaddq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_s8)))
-int8x16_t __arm_vhaddq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_u16)))
-uint16x8_t __arm_vhaddq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_u16)))
-uint16x8_t __arm_vhaddq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_u32)))
-uint32x4_t __arm_vhaddq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_u32)))
-uint32x4_t __arm_vhaddq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_u8)))
-uint8x16_t __arm_vhaddq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_u8)))
-uint8x16_t __arm_vhaddq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_s16)))
-int16x8_t __arm_vhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_s16)))
-int16x8_t __arm_vhaddq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_s32)))
-int32x4_t __arm_vhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_s32)))
-int32x4_t __arm_vhaddq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_s8)))
-int8x16_t __arm_vhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_s8)))
-int8x16_t __arm_vhaddq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_u16)))
-uint16x8_t __arm_vhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_u16)))
-uint16x8_t __arm_vhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_u32)))
-uint32x4_t __arm_vhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_u32)))
-uint32x4_t __arm_vhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_u8)))
-uint8x16_t __arm_vhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_u8)))
-uint8x16_t __arm_vhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16)))
-int16x8_t __arm_vhcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16)))
-int16x8_t __arm_vhcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32)))
-int32x4_t __arm_vhcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32)))
-int32x4_t __arm_vhcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8)))
-int8x16_t __arm_vhcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8)))
-int8x16_t __arm_vhcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_s16)))
-int16x8_t __arm_vhcaddq_rot270_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_s16)))
-int16x8_t __arm_vhcaddq_rot270(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_s32)))
-int32x4_t __arm_vhcaddq_rot270_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_s32)))
-int32x4_t __arm_vhcaddq_rot270(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_s8)))
-int8x16_t __arm_vhcaddq_rot270_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_s8)))
-int8x16_t __arm_vhcaddq_rot270(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16)))
-int16x8_t __arm_vhcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16)))
-int16x8_t __arm_vhcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32)))
-int32x4_t __arm_vhcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32)))
-int32x4_t __arm_vhcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8)))
-int8x16_t __arm_vhcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8)))
-int8x16_t __arm_vhcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16)))
-int16x8_t __arm_vhcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16)))
-int16x8_t __arm_vhcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32)))
-int32x4_t __arm_vhcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32)))
-int32x4_t __arm_vhcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8)))
-int8x16_t __arm_vhcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8)))
-int8x16_t __arm_vhcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_s16)))
-int16x8_t __arm_vhcaddq_rot90_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_s16)))
-int16x8_t __arm_vhcaddq_rot90(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_s32)))
-int32x4_t __arm_vhcaddq_rot90_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_s32)))
-int32x4_t __arm_vhcaddq_rot90(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_s8)))
-int8x16_t __arm_vhcaddq_rot90_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_s8)))
-int8x16_t __arm_vhcaddq_rot90(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16)))
-int16x8_t __arm_vhcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16)))
-int16x8_t __arm_vhcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32)))
-int32x4_t __arm_vhcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32)))
-int32x4_t __arm_vhcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8)))
-int8x16_t __arm_vhcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8)))
-int8x16_t __arm_vhcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_s16)))
-int16x8_t __arm_vhsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_s16)))
-int16x8_t __arm_vhsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_s32)))
-int32x4_t __arm_vhsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_s32)))
-int32x4_t __arm_vhsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_s8)))
-int8x16_t __arm_vhsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_s8)))
-int8x16_t __arm_vhsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_u16)))
-uint16x8_t __arm_vhsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_u16)))
-uint16x8_t __arm_vhsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_u32)))
-uint32x4_t __arm_vhsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_u32)))
-uint32x4_t __arm_vhsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_u8)))
-uint8x16_t __arm_vhsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_u8)))
-uint8x16_t __arm_vhsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_s16)))
-int16x8_t __arm_vhsubq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_s16)))
-int16x8_t __arm_vhsubq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_s32)))
-int32x4_t __arm_vhsubq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_s32)))
-int32x4_t __arm_vhsubq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_s8)))
-int8x16_t __arm_vhsubq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_s8)))
-int8x16_t __arm_vhsubq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_u16)))
-uint16x8_t __arm_vhsubq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_u16)))
-uint16x8_t __arm_vhsubq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_u32)))
-uint32x4_t __arm_vhsubq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_u32)))
-uint32x4_t __arm_vhsubq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_u8)))
-uint8x16_t __arm_vhsubq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_u8)))
-uint8x16_t __arm_vhsubq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_s16)))
-int16x8_t __arm_vhsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_s16)))
-int16x8_t __arm_vhsubq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_s32)))
-int32x4_t __arm_vhsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_s32)))
-int32x4_t __arm_vhsubq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_s8)))
-int8x16_t __arm_vhsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_s8)))
-int8x16_t __arm_vhsubq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_u16)))
-uint16x8_t __arm_vhsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_u16)))
-uint16x8_t __arm_vhsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_u32)))
-uint32x4_t __arm_vhsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_u32)))
-uint32x4_t __arm_vhsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_u8)))
-uint8x16_t __arm_vhsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_u8)))
-uint8x16_t __arm_vhsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_n_u16)))
-uint16x8_t __arm_vidupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_n_u16)))
-uint16x8_t __arm_vidupq_m(uint16x8_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_n_u32)))
-uint32x4_t __arm_vidupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_n_u32)))
-uint32x4_t __arm_vidupq_m(uint32x4_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_n_u8)))
-uint8x16_t __arm_vidupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_n_u8)))
-uint8x16_t __arm_vidupq_m(uint8x16_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_wb_u16)))
-uint16x8_t __arm_vidupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_wb_u16)))
-uint16x8_t __arm_vidupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_wb_u32)))
-uint32x4_t __arm_vidupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_wb_u32)))
-uint32x4_t __arm_vidupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_wb_u8)))
-uint8x16_t __arm_vidupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_wb_u8)))
-uint8x16_t __arm_vidupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_n_u16)))
-uint16x8_t __arm_vidupq_n_u16(uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_n_u16)))
-uint16x8_t __arm_vidupq_u16(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_n_u32)))
-uint32x4_t __arm_vidupq_n_u32(uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_n_u32)))
-uint32x4_t __arm_vidupq_u32(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_n_u8)))
-uint8x16_t __arm_vidupq_n_u8(uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_n_u8)))
-uint8x16_t __arm_vidupq_u8(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_wb_u16)))
-uint16x8_t __arm_vidupq_wb_u16(uint32_t *, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_wb_u16)))
-uint16x8_t __arm_vidupq_u16(uint32_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_wb_u32)))
-uint32x4_t __arm_vidupq_wb_u32(uint32_t *, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_wb_u32)))
-uint32x4_t __arm_vidupq_u32(uint32_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_wb_u8)))
-uint8x16_t __arm_vidupq_wb_u8(uint32_t *, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_wb_u8)))
-uint8x16_t __arm_vidupq_u8(uint32_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_n_u16)))
-uint16x8_t __arm_vidupq_x_n_u16(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_n_u16)))
-uint16x8_t __arm_vidupq_x_u16(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_n_u32)))
-uint32x4_t __arm_vidupq_x_n_u32(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_n_u32)))
-uint32x4_t __arm_vidupq_x_u32(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_n_u8)))
-uint8x16_t __arm_vidupq_x_n_u8(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_n_u8)))
-uint8x16_t __arm_vidupq_x_u8(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_wb_u16)))
-uint16x8_t __arm_vidupq_x_wb_u16(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_wb_u16)))
-uint16x8_t __arm_vidupq_x_u16(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_wb_u32)))
-uint32x4_t __arm_vidupq_x_wb_u32(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_wb_u32)))
-uint32x4_t __arm_vidupq_x_u32(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_wb_u8)))
-uint8x16_t __arm_vidupq_x_wb_u8(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_wb_u8)))
-uint8x16_t __arm_vidupq_x_u8(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_n_u16)))
-uint16x8_t __arm_viwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_n_u16)))
-uint16x8_t __arm_viwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_n_u32)))
-uint32x4_t __arm_viwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_n_u32)))
-uint32x4_t __arm_viwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_n_u8)))
-uint8x16_t __arm_viwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_n_u8)))
-uint8x16_t __arm_viwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_wb_u16)))
-uint16x8_t __arm_viwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_wb_u16)))
-uint16x8_t __arm_viwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_wb_u32)))
-uint32x4_t __arm_viwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_wb_u32)))
-uint32x4_t __arm_viwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_wb_u8)))
-uint8x16_t __arm_viwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_wb_u8)))
-uint8x16_t __arm_viwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_n_u16)))
-uint16x8_t __arm_viwdupq_n_u16(uint32_t, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_n_u16)))
-uint16x8_t __arm_viwdupq_u16(uint32_t, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_n_u32)))
-uint32x4_t __arm_viwdupq_n_u32(uint32_t, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_n_u32)))
-uint32x4_t __arm_viwdupq_u32(uint32_t, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_n_u8)))
-uint8x16_t __arm_viwdupq_n_u8(uint32_t, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_n_u8)))
-uint8x16_t __arm_viwdupq_u8(uint32_t, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_wb_u16)))
-uint16x8_t __arm_viwdupq_wb_u16(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_wb_u16)))
-uint16x8_t __arm_viwdupq_u16(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_wb_u32)))
-uint32x4_t __arm_viwdupq_wb_u32(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_wb_u32)))
-uint32x4_t __arm_viwdupq_u32(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_wb_u8)))
-uint8x16_t __arm_viwdupq_wb_u8(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_wb_u8)))
-uint8x16_t __arm_viwdupq_u8(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_n_u16)))
-uint16x8_t __arm_viwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_n_u16)))
-uint16x8_t __arm_viwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_n_u32)))
-uint32x4_t __arm_viwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_n_u32)))
-uint32x4_t __arm_viwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_n_u8)))
-uint8x16_t __arm_viwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_n_u8)))
-uint8x16_t __arm_viwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_wb_u16)))
-uint16x8_t __arm_viwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_wb_u16)))
-uint16x8_t __arm_viwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_wb_u32)))
-uint32x4_t __arm_viwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_wb_u32)))
-uint32x4_t __arm_viwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_wb_u8)))
-uint8x16_t __arm_viwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_wb_u8)))
-uint8x16_t __arm_viwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_s16)))
-int16x8_t __arm_vld1q_s16(const int16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_s16)))
-int16x8_t __arm_vld1q(const int16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_s32)))
-int32x4_t __arm_vld1q_s32(const int32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_s32)))
-int32x4_t __arm_vld1q(const int32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_s8)))
-int8x16_t __arm_vld1q_s8(const int8_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_s8)))
-int8x16_t __arm_vld1q(const int8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_u16)))
-uint16x8_t __arm_vld1q_u16(const uint16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_u16)))
-uint16x8_t __arm_vld1q(const uint16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_u32)))
-uint32x4_t __arm_vld1q_u32(const uint32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_u32)))
-uint32x4_t __arm_vld1q(const uint32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_u8)))
-uint8x16_t __arm_vld1q_u8(const uint8_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_u8)))
-uint8x16_t __arm_vld1q(const uint8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s16)))
-int16x8_t __arm_vld1q_z_s16(const int16_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s16)))
-int16x8_t __arm_vld1q_z(const int16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s32)))
-int32x4_t __arm_vld1q_z_s32(const int32_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s32)))
-int32x4_t __arm_vld1q_z(const int32_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s8)))
-int8x16_t __arm_vld1q_z_s8(const int8_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s8)))
-int8x16_t __arm_vld1q_z(const int8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u16)))
-uint16x8_t __arm_vld1q_z_u16(const uint16_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u16)))
-uint16x8_t __arm_vld1q_z(const uint16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u32)))
-uint32x4_t __arm_vld1q_z_u32(const uint32_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u32)))
-uint32x4_t __arm_vld1q_z(const uint32_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u8)))
-uint8x16_t __arm_vld1q_z_u8(const uint8_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u8)))
-uint8x16_t __arm_vld1q_z(const uint8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_s16)))
-int16x8x2_t __arm_vld2q_s16(const int16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_s16)))
-int16x8x2_t __arm_vld2q(const int16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_s32)))
-int32x4x2_t __arm_vld2q_s32(const int32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_s32)))
-int32x4x2_t __arm_vld2q(const int32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_s8)))
-int8x16x2_t __arm_vld2q_s8(const int8_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_s8)))
-int8x16x2_t __arm_vld2q(const int8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_u16)))
-uint16x8x2_t __arm_vld2q_u16(const uint16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_u16)))
-uint16x8x2_t __arm_vld2q(const uint16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_u32)))
-uint32x4x2_t __arm_vld2q_u32(const uint32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_u32)))
-uint32x4x2_t __arm_vld2q(const uint32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_u8)))
-uint8x16x2_t __arm_vld2q_u8(const uint8_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_u8)))
-uint8x16x2_t __arm_vld2q(const uint8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_s16)))
-int16x8x4_t __arm_vld4q_s16(const int16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_s16)))
-int16x8x4_t __arm_vld4q(const int16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_s32)))
-int32x4x4_t __arm_vld4q_s32(const int32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_s32)))
-int32x4x4_t __arm_vld4q(const int32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_s8)))
-int8x16x4_t __arm_vld4q_s8(const int8_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_s8)))
-int8x16x4_t __arm_vld4q(const int8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_u16)))
-uint16x8x4_t __arm_vld4q_u16(const uint16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_u16)))
-uint16x8x4_t __arm_vld4q(const uint16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_u32)))
-uint32x4x4_t __arm_vld4q_u32(const uint32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_u32)))
-uint32x4x4_t __arm_vld4q(const uint32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_u8)))
-uint8x16x4_t __arm_vld4q_u8(const uint8_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_u8)))
-uint8x16x4_t __arm_vld4q(const uint8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))
-int16x8_t __arm_vldrbq_gather_offset_s16(const int8_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))
-int16x8_t __arm_vldrbq_gather_offset(const int8_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))
-int32x4_t __arm_vldrbq_gather_offset_s32(const int8_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))
-int32x4_t __arm_vldrbq_gather_offset(const int8_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))
-int8x16_t __arm_vldrbq_gather_offset_s8(const int8_t *, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))
-int8x16_t __arm_vldrbq_gather_offset(const int8_t *, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))
-uint16x8_t __arm_vldrbq_gather_offset_u16(const uint8_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))
-uint16x8_t __arm_vldrbq_gather_offset(const uint8_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))
-uint32x4_t __arm_vldrbq_gather_offset_u32(const uint8_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))
-uint32x4_t __arm_vldrbq_gather_offset(const uint8_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))
-uint8x16_t __arm_vldrbq_gather_offset_u8(const uint8_t *, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))
-uint8x16_t __arm_vldrbq_gather_offset(const uint8_t *, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))
-int16x8_t __arm_vldrbq_gather_offset_z_s16(const int8_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))
-int16x8_t __arm_vldrbq_gather_offset_z(const int8_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))
-int32x4_t __arm_vldrbq_gather_offset_z_s32(const int8_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))
-int32x4_t __arm_vldrbq_gather_offset_z(const int8_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))
-int8x16_t __arm_vldrbq_gather_offset_z_s8(const int8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))
-int8x16_t __arm_vldrbq_gather_offset_z(const int8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))
-uint16x8_t __arm_vldrbq_gather_offset_z_u16(const uint8_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))
-uint16x8_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))
-uint32x4_t __arm_vldrbq_gather_offset_z_u32(const uint8_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))
-uint32x4_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))
-uint8x16_t __arm_vldrbq_gather_offset_z_u8(const uint8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))
-uint8x16_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_s16)))
-int16x8_t __arm_vldrbq_s16(const int8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_s32)))
-int32x4_t __arm_vldrbq_s32(const int8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_s8)))
-int8x16_t __arm_vldrbq_s8(const int8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_u16)))
-uint16x8_t __arm_vldrbq_u16(const uint8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_u32)))
-uint32x4_t __arm_vldrbq_u32(const uint8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_u8)))
-uint8x16_t __arm_vldrbq_u8(const uint8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_s16)))
-int16x8_t __arm_vldrbq_z_s16(const int8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_s32)))
-int32x4_t __arm_vldrbq_z_s32(const int8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_s8)))
-int8x16_t __arm_vldrbq_z_s8(const int8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_u16)))
-uint16x8_t __arm_vldrbq_z_u16(const uint8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_u32)))
-uint32x4_t __arm_vldrbq_z_u32(const uint8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_u8)))
-uint8x16_t __arm_vldrbq_z_u8(const uint8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_s64)))
-int64x2_t __arm_vldrdq_gather_base_s64(uint64x2_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_u64)))
-uint64x2_t __arm_vldrdq_gather_base_u64(uint64x2_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_s64)))
-int64x2_t __arm_vldrdq_gather_base_wb_s64(uint64x2_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_u64)))
-uint64x2_t __arm_vldrdq_gather_base_wb_u64(uint64x2_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_s64)))
-int64x2_t __arm_vldrdq_gather_base_wb_z_s64(uint64x2_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_u64)))
-uint64x2_t __arm_vldrdq_gather_base_wb_z_u64(uint64x2_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_z_s64)))
-int64x2_t __arm_vldrdq_gather_base_z_s64(uint64x2_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_z_u64)))
-uint64x2_t __arm_vldrdq_gather_base_z_u64(uint64x2_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))
-int64x2_t __arm_vldrdq_gather_offset_s64(const int64_t *, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))
-int64x2_t __arm_vldrdq_gather_offset(const int64_t *, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))
-uint64x2_t __arm_vldrdq_gather_offset_u64(const uint64_t *, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))
-uint64x2_t __arm_vldrdq_gather_offset(const uint64_t *, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))
-int64x2_t __arm_vldrdq_gather_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))
-int64x2_t __arm_vldrdq_gather_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))
-uint64x2_t __arm_vldrdq_gather_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))
-uint64x2_t __arm_vldrdq_gather_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))
-int64x2_t __arm_vldrdq_gather_shifted_offset_s64(const int64_t *, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))
-int64x2_t __arm_vldrdq_gather_shifted_offset(const int64_t *, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))
-uint64x2_t __arm_vldrdq_gather_shifted_offset_u64(const uint64_t *, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))
-uint64x2_t __arm_vldrdq_gather_shifted_offset(const uint64_t *, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))
-int64x2_t __arm_vldrdq_gather_shifted_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))
-int64x2_t __arm_vldrdq_gather_shifted_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))
-uint64x2_t __arm_vldrdq_gather_shifted_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))
-uint64x2_t __arm_vldrdq_gather_shifted_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))
-int16x8_t __arm_vldrhq_gather_offset_s16(const int16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))
-int16x8_t __arm_vldrhq_gather_offset(const int16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))
-int32x4_t __arm_vldrhq_gather_offset_s32(const int16_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))
-int32x4_t __arm_vldrhq_gather_offset(const int16_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))
-uint16x8_t __arm_vldrhq_gather_offset_u16(const uint16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))
-uint16x8_t __arm_vldrhq_gather_offset(const uint16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))
-uint32x4_t __arm_vldrhq_gather_offset_u32(const uint16_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))
-uint32x4_t __arm_vldrhq_gather_offset(const uint16_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))
-int16x8_t __arm_vldrhq_gather_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))
-int16x8_t __arm_vldrhq_gather_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))
-int32x4_t __arm_vldrhq_gather_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))
-int32x4_t __arm_vldrhq_gather_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))
-uint16x8_t __arm_vldrhq_gather_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))
-uint16x8_t __arm_vldrhq_gather_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))
-uint32x4_t __arm_vldrhq_gather_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))
-uint32x4_t __arm_vldrhq_gather_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))
-int16x8_t __arm_vldrhq_gather_shifted_offset_s16(const int16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))
-int16x8_t __arm_vldrhq_gather_shifted_offset(const int16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))
-int32x4_t __arm_vldrhq_gather_shifted_offset_s32(const int16_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))
-int32x4_t __arm_vldrhq_gather_shifted_offset(const int16_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))
-uint16x8_t __arm_vldrhq_gather_shifted_offset_u16(const uint16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))
-uint16x8_t __arm_vldrhq_gather_shifted_offset(const uint16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))
-uint32x4_t __arm_vldrhq_gather_shifted_offset_u32(const uint16_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))
-uint32x4_t __arm_vldrhq_gather_shifted_offset(const uint16_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))
-int16x8_t __arm_vldrhq_gather_shifted_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))
-int16x8_t __arm_vldrhq_gather_shifted_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))
-int32x4_t __arm_vldrhq_gather_shifted_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))
-int32x4_t __arm_vldrhq_gather_shifted_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))
-uint16x8_t __arm_vldrhq_gather_shifted_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))
-uint16x8_t __arm_vldrhq_gather_shifted_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))
-uint32x4_t __arm_vldrhq_gather_shifted_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))
-uint32x4_t __arm_vldrhq_gather_shifted_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_s16)))
-int16x8_t __arm_vldrhq_s16(const int16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_s32)))
-int32x4_t __arm_vldrhq_s32(const int16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_u16)))
-uint16x8_t __arm_vldrhq_u16(const uint16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_u32)))
-uint32x4_t __arm_vldrhq_u32(const uint16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_s16)))
-int16x8_t __arm_vldrhq_z_s16(const int16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_s32)))
-int32x4_t __arm_vldrhq_z_s32(const int16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_u16)))
-uint16x8_t __arm_vldrhq_z_u16(const uint16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_u32)))
-uint32x4_t __arm_vldrhq_z_u32(const uint16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_s32)))
-int32x4_t __arm_vldrwq_gather_base_s32(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_u32)))
-uint32x4_t __arm_vldrwq_gather_base_u32(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_s32)))
-int32x4_t __arm_vldrwq_gather_base_wb_s32(uint32x4_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_u32)))
-uint32x4_t __arm_vldrwq_gather_base_wb_u32(uint32x4_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_s32)))
-int32x4_t __arm_vldrwq_gather_base_wb_z_s32(uint32x4_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_u32)))
-uint32x4_t __arm_vldrwq_gather_base_wb_z_u32(uint32x4_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_z_s32)))
-int32x4_t __arm_vldrwq_gather_base_z_s32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_z_u32)))
-uint32x4_t __arm_vldrwq_gather_base_z_u32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))
-int32x4_t __arm_vldrwq_gather_offset_s32(const int32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))
-int32x4_t __arm_vldrwq_gather_offset(const int32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))
-uint32x4_t __arm_vldrwq_gather_offset_u32(const uint32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))
-uint32x4_t __arm_vldrwq_gather_offset(const uint32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))
-int32x4_t __arm_vldrwq_gather_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))
-int32x4_t __arm_vldrwq_gather_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))
-uint32x4_t __arm_vldrwq_gather_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))
-uint32x4_t __arm_vldrwq_gather_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))
-int32x4_t __arm_vldrwq_gather_shifted_offset_s32(const int32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))
-int32x4_t __arm_vldrwq_gather_shifted_offset(const int32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))
-uint32x4_t __arm_vldrwq_gather_shifted_offset_u32(const uint32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))
-uint32x4_t __arm_vldrwq_gather_shifted_offset(const uint32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))
-int32x4_t __arm_vldrwq_gather_shifted_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))
-int32x4_t __arm_vldrwq_gather_shifted_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))
-uint32x4_t __arm_vldrwq_gather_shifted_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))
-uint32x4_t __arm_vldrwq_gather_shifted_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_s32)))
-int32x4_t __arm_vldrwq_s32(const int32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_u32)))
-uint32x4_t __arm_vldrwq_u32(const uint32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_z_s32)))
-int32x4_t __arm_vldrwq_z_s32(const int32_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_z_u32)))
-uint32x4_t __arm_vldrwq_z_u32(const uint32_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_m_s16)))
-uint16x8_t __arm_vmaxaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_m_s16)))
-uint16x8_t __arm_vmaxaq_m(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_m_s32)))
-uint32x4_t __arm_vmaxaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_m_s32)))
-uint32x4_t __arm_vmaxaq_m(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_m_s8)))
-uint8x16_t __arm_vmaxaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_m_s8)))
-uint8x16_t __arm_vmaxaq_m(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_s16)))
-uint16x8_t __arm_vmaxaq_s16(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_s16)))
-uint16x8_t __arm_vmaxaq(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_s32)))
-uint32x4_t __arm_vmaxaq_s32(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_s32)))
-uint32x4_t __arm_vmaxaq(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_s8)))
-uint8x16_t __arm_vmaxaq_s8(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_s8)))
-uint8x16_t __arm_vmaxaq(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_s16)))
-int16x8_t __arm_vmaxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_s16)))
-int16x8_t __arm_vmaxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_s32)))
-int32x4_t __arm_vmaxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_s32)))
-int32x4_t __arm_vmaxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_s8)))
-int8x16_t __arm_vmaxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_s8)))
-int8x16_t __arm_vmaxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_u16)))
-uint16x8_t __arm_vmaxq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_u16)))
-uint16x8_t __arm_vmaxq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_u32)))
-uint32x4_t __arm_vmaxq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_u32)))
-uint32x4_t __arm_vmaxq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_u8)))
-uint8x16_t __arm_vmaxq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_u8)))
-uint8x16_t __arm_vmaxq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_s16)))
-int16x8_t __arm_vmaxq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_s16)))
-int16x8_t __arm_vmaxq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_s32)))
-int32x4_t __arm_vmaxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_s32)))
-int32x4_t __arm_vmaxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_s8)))
-int8x16_t __arm_vmaxq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_s8)))
-int8x16_t __arm_vmaxq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_u16)))
-uint16x8_t __arm_vmaxq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_u16)))
-uint16x8_t __arm_vmaxq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_u32)))
-uint32x4_t __arm_vmaxq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_u32)))
-uint32x4_t __arm_vmaxq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_u8)))
-uint8x16_t __arm_vmaxq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_u8)))
-uint8x16_t __arm_vmaxq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_s16)))
-int16x8_t __arm_vmaxq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_s16)))
-int16x8_t __arm_vmaxq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_s32)))
-int32x4_t __arm_vmaxq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_s32)))
-int32x4_t __arm_vmaxq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_s8)))
-int8x16_t __arm_vmaxq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_s8)))
-int8x16_t __arm_vmaxq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_u16)))
-uint16x8_t __arm_vmaxq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_u16)))
-uint16x8_t __arm_vmaxq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_u32)))
-uint32x4_t __arm_vmaxq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_u32)))
-uint32x4_t __arm_vmaxq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_u8)))
-uint8x16_t __arm_vmaxq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_u8)))
-uint8x16_t __arm_vmaxq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s16)))
-int16_t __arm_vmaxvq_s16(int16_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s16)))
-int16_t __arm_vmaxvq(int16_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s32)))
-int32_t __arm_vmaxvq_s32(int32_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s32)))
-int32_t __arm_vmaxvq(int32_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s8)))
-int8_t __arm_vmaxvq_s8(int8_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s8)))
-int8_t __arm_vmaxvq(int8_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u16)))
-uint16_t __arm_vmaxvq_u16(uint16_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u16)))
-uint16_t __arm_vmaxvq(uint16_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u32)))
-uint32_t __arm_vmaxvq_u32(uint32_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u32)))
-uint32_t __arm_vmaxvq(uint32_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u8)))
-uint8_t __arm_vmaxvq_u8(uint8_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u8)))
-uint8_t __arm_vmaxvq(uint8_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminaq_m_s16)))
-uint16x8_t __arm_vminaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminaq_m_s16)))
-uint16x8_t __arm_vminaq_m(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminaq_m_s32)))
-uint32x4_t __arm_vminaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminaq_m_s32)))
-uint32x4_t __arm_vminaq_m(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminaq_m_s8)))
-uint8x16_t __arm_vminaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminaq_m_s8)))
-uint8x16_t __arm_vminaq_m(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminaq_s16)))
-uint16x8_t __arm_vminaq_s16(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminaq_s16)))
-uint16x8_t __arm_vminaq(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminaq_s32)))
-uint32x4_t __arm_vminaq_s32(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminaq_s32)))
-uint32x4_t __arm_vminaq(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminaq_s8)))
-uint8x16_t __arm_vminaq_s8(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminaq_s8)))
-uint8x16_t __arm_vminaq(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_m_s16)))
-int16x8_t __arm_vminq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_m_s16)))
-int16x8_t __arm_vminq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_m_s32)))
-int32x4_t __arm_vminq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_m_s32)))
-int32x4_t __arm_vminq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_m_s8)))
-int8x16_t __arm_vminq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_m_s8)))
-int8x16_t __arm_vminq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_m_u16)))
-uint16x8_t __arm_vminq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_m_u16)))
-uint16x8_t __arm_vminq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_m_u32)))
-uint32x4_t __arm_vminq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_m_u32)))
-uint32x4_t __arm_vminq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_m_u8)))
-uint8x16_t __arm_vminq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_m_u8)))
-uint8x16_t __arm_vminq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_s16)))
-int16x8_t __arm_vminq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_s16)))
-int16x8_t __arm_vminq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_s32)))
-int32x4_t __arm_vminq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_s32)))
-int32x4_t __arm_vminq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_s8)))
-int8x16_t __arm_vminq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_s8)))
-int8x16_t __arm_vminq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_u16)))
-uint16x8_t __arm_vminq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_u16)))
-uint16x8_t __arm_vminq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_u32)))
-uint32x4_t __arm_vminq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_u32)))
-uint32x4_t __arm_vminq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_u8)))
-uint8x16_t __arm_vminq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_u8)))
-uint8x16_t __arm_vminq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_x_s16)))
-int16x8_t __arm_vminq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_x_s16)))
-int16x8_t __arm_vminq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_x_s32)))
-int32x4_t __arm_vminq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_x_s32)))
-int32x4_t __arm_vminq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_x_s8)))
-int8x16_t __arm_vminq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_x_s8)))
-int8x16_t __arm_vminq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_x_u16)))
-uint16x8_t __arm_vminq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_x_u16)))
-uint16x8_t __arm_vminq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_x_u32)))
-uint32x4_t __arm_vminq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_x_u32)))
-uint32x4_t __arm_vminq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_x_u8)))
-uint8x16_t __arm_vminq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_x_u8)))
-uint8x16_t __arm_vminq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_s16)))
-int16_t __arm_vminvq_s16(int16_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_s16)))
-int16_t __arm_vminvq(int16_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_s32)))
-int32_t __arm_vminvq_s32(int32_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_s32)))
-int32_t __arm_vminvq(int32_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_s8)))
-int8_t __arm_vminvq_s8(int8_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_s8)))
-int8_t __arm_vminvq(int8_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_u16)))
-uint16_t __arm_vminvq_u16(uint16_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_u16)))
-uint16_t __arm_vminvq(uint16_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_u32)))
-uint32_t __arm_vminvq_u32(uint32_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_u32)))
-uint32_t __arm_vminvq(uint32_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_u8)))
-uint8_t __arm_vminvq_u8(uint8_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_u8)))
-uint8_t __arm_vminvq(uint8_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_s16)))
-int32_t __arm_vmladavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_s16)))
-int32_t __arm_vmladavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_s32)))
-int32_t __arm_vmladavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_s32)))
-int32_t __arm_vmladavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_s8)))
-int32_t __arm_vmladavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_s8)))
-int32_t __arm_vmladavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_u16)))
-uint32_t __arm_vmladavaq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_u16)))
-uint32_t __arm_vmladavaq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_u32)))
-uint32_t __arm_vmladavaq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_u32)))
-uint32_t __arm_vmladavaq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_u8)))
-uint32_t __arm_vmladavaq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_u8)))
-uint32_t __arm_vmladavaq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_s16)))
-int32_t __arm_vmladavaq_s16(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_s16)))
-int32_t __arm_vmladavaq(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_s32)))
-int32_t __arm_vmladavaq_s32(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_s32)))
-int32_t __arm_vmladavaq(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_s8)))
-int32_t __arm_vmladavaq_s8(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_s8)))
-int32_t __arm_vmladavaq(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_u16)))
-uint32_t __arm_vmladavaq_u16(uint32_t, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_u16)))
-uint32_t __arm_vmladavaq(uint32_t, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_u32)))
-uint32_t __arm_vmladavaq_u32(uint32_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_u32)))
-uint32_t __arm_vmladavaq(uint32_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_u8)))
-uint32_t __arm_vmladavaq_u8(uint32_t, uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_u8)))
-uint32_t __arm_vmladavaq(uint32_t, uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_p_s16)))
-int32_t __arm_vmladavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_p_s16)))
-int32_t __arm_vmladavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_p_s32)))
-int32_t __arm_vmladavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_p_s32)))
-int32_t __arm_vmladavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_p_s8)))
-int32_t __arm_vmladavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_p_s8)))
-int32_t __arm_vmladavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_s16)))
-int32_t __arm_vmladavaxq_s16(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_s16)))
-int32_t __arm_vmladavaxq(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_s32)))
-int32_t __arm_vmladavaxq_s32(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_s32)))
-int32_t __arm_vmladavaxq(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_s8)))
-int32_t __arm_vmladavaxq_s8(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_s8)))
-int32_t __arm_vmladavaxq(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_s16)))
-int32_t __arm_vmladavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_s16)))
-int32_t __arm_vmladavq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_s32)))
-int32_t __arm_vmladavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_s32)))
-int32_t __arm_vmladavq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_s8)))
-int32_t __arm_vmladavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_s8)))
-int32_t __arm_vmladavq_p(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_u16)))
-uint32_t __arm_vmladavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_u16)))
-uint32_t __arm_vmladavq_p(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_u32)))
-uint32_t __arm_vmladavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_u32)))
-uint32_t __arm_vmladavq_p(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_u8)))
-uint32_t __arm_vmladavq_p_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_u8)))
-uint32_t __arm_vmladavq_p(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_s16)))
-int32_t __arm_vmladavq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_s16)))
-int32_t __arm_vmladavq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_s32)))
-int32_t __arm_vmladavq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_s32)))
-int32_t __arm_vmladavq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_s8)))
-int32_t __arm_vmladavq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_s8)))
-int32_t __arm_vmladavq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_u16)))
-uint32_t __arm_vmladavq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_u16)))
-uint32_t __arm_vmladavq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_u32)))
-uint32_t __arm_vmladavq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_u32)))
-uint32_t __arm_vmladavq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_u8)))
-uint32_t __arm_vmladavq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_u8)))
-uint32_t __arm_vmladavq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_p_s16)))
-int32_t __arm_vmladavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_p_s16)))
-int32_t __arm_vmladavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_p_s32)))
-int32_t __arm_vmladavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_p_s32)))
-int32_t __arm_vmladavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_p_s8)))
-int32_t __arm_vmladavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_p_s8)))
-int32_t __arm_vmladavxq_p(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_s16)))
-int32_t __arm_vmladavxq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_s16)))
-int32_t __arm_vmladavxq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_s32)))
-int32_t __arm_vmladavxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_s32)))
-int32_t __arm_vmladavxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_s8)))
-int32_t __arm_vmladavxq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_s8)))
-int32_t __arm_vmladavxq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_s16)))
-int64_t __arm_vmlaldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_s16)))
-int64_t __arm_vmlaldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_s32)))
-int64_t __arm_vmlaldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_s32)))
-int64_t __arm_vmlaldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_u16)))
-uint64_t __arm_vmlaldavaq_p_u16(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_u16)))
-uint64_t __arm_vmlaldavaq_p(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_u32)))
-uint64_t __arm_vmlaldavaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_u32)))
-uint64_t __arm_vmlaldavaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_s16)))
-int64_t __arm_vmlaldavaq_s16(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_s16)))
-int64_t __arm_vmlaldavaq(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_s32)))
-int64_t __arm_vmlaldavaq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_s32)))
-int64_t __arm_vmlaldavaq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_u16)))
-uint64_t __arm_vmlaldavaq_u16(uint64_t, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_u16)))
-uint64_t __arm_vmlaldavaq(uint64_t, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_u32)))
-uint64_t __arm_vmlaldavaq_u32(uint64_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_u32)))
-uint64_t __arm_vmlaldavaq(uint64_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_p_s16)))
-int64_t __arm_vmlaldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_p_s16)))
-int64_t __arm_vmlaldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_p_s32)))
-int64_t __arm_vmlaldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_p_s32)))
-int64_t __arm_vmlaldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_s16)))
-int64_t __arm_vmlaldavaxq_s16(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_s16)))
-int64_t __arm_vmlaldavaxq(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_s32)))
-int64_t __arm_vmlaldavaxq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_s32)))
-int64_t __arm_vmlaldavaxq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_s16)))
-int64_t __arm_vmlaldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_s16)))
-int64_t __arm_vmlaldavq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_s32)))
-int64_t __arm_vmlaldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_s32)))
-int64_t __arm_vmlaldavq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_u16)))
-uint64_t __arm_vmlaldavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_u16)))
-uint64_t __arm_vmlaldavq_p(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_u32)))
-uint64_t __arm_vmlaldavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_u32)))
-uint64_t __arm_vmlaldavq_p(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_s16)))
-int64_t __arm_vmlaldavq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_s16)))
-int64_t __arm_vmlaldavq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_s32)))
-int64_t __arm_vmlaldavq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_s32)))
-int64_t __arm_vmlaldavq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_u16)))
-uint64_t __arm_vmlaldavq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_u16)))
-uint64_t __arm_vmlaldavq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_u32)))
-uint64_t __arm_vmlaldavq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_u32)))
-uint64_t __arm_vmlaldavq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_p_s16)))
-int64_t __arm_vmlaldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_p_s16)))
-int64_t __arm_vmlaldavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_p_s32)))
-int64_t __arm_vmlaldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_p_s32)))
-int64_t __arm_vmlaldavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_s16)))
-int64_t __arm_vmlaldavxq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_s16)))
-int64_t __arm_vmlaldavxq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_s32)))
-int64_t __arm_vmlaldavxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_s32)))
-int64_t __arm_vmlaldavxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_p_s16)))
-int32_t __arm_vmlsdavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_p_s16)))
-int32_t __arm_vmlsdavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_p_s32)))
-int32_t __arm_vmlsdavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_p_s32)))
-int32_t __arm_vmlsdavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_p_s8)))
-int32_t __arm_vmlsdavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_p_s8)))
-int32_t __arm_vmlsdavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_s16)))
-int32_t __arm_vmlsdavaq_s16(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_s16)))
-int32_t __arm_vmlsdavaq(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_s32)))
-int32_t __arm_vmlsdavaq_s32(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_s32)))
-int32_t __arm_vmlsdavaq(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_s8)))
-int32_t __arm_vmlsdavaq_s8(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_s8)))
-int32_t __arm_vmlsdavaq(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_p_s16)))
-int32_t __arm_vmlsdavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_p_s16)))
-int32_t __arm_vmlsdavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_p_s32)))
-int32_t __arm_vmlsdavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_p_s32)))
-int32_t __arm_vmlsdavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_p_s8)))
-int32_t __arm_vmlsdavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_p_s8)))
-int32_t __arm_vmlsdavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_s16)))
-int32_t __arm_vmlsdavaxq_s16(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_s16)))
-int32_t __arm_vmlsdavaxq(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_s32)))
-int32_t __arm_vmlsdavaxq_s32(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_s32)))
-int32_t __arm_vmlsdavaxq(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_s8)))
-int32_t __arm_vmlsdavaxq_s8(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_s8)))
-int32_t __arm_vmlsdavaxq(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_p_s16)))
-int32_t __arm_vmlsdavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_p_s16)))
-int32_t __arm_vmlsdavq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_p_s32)))
-int32_t __arm_vmlsdavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_p_s32)))
-int32_t __arm_vmlsdavq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_p_s8)))
-int32_t __arm_vmlsdavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_p_s8)))
-int32_t __arm_vmlsdavq_p(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_s16)))
-int32_t __arm_vmlsdavq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_s16)))
-int32_t __arm_vmlsdavq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_s32)))
-int32_t __arm_vmlsdavq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_s32)))
-int32_t __arm_vmlsdavq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_s8)))
-int32_t __arm_vmlsdavq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_s8)))
-int32_t __arm_vmlsdavq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_p_s16)))
-int32_t __arm_vmlsdavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_p_s16)))
-int32_t __arm_vmlsdavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_p_s32)))
-int32_t __arm_vmlsdavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_p_s32)))
-int32_t __arm_vmlsdavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_p_s8)))
-int32_t __arm_vmlsdavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_p_s8)))
-int32_t __arm_vmlsdavxq_p(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_s16)))
-int32_t __arm_vmlsdavxq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_s16)))
-int32_t __arm_vmlsdavxq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_s32)))
-int32_t __arm_vmlsdavxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_s32)))
-int32_t __arm_vmlsdavxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_s8)))
-int32_t __arm_vmlsdavxq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_s8)))
-int32_t __arm_vmlsdavxq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_p_s16)))
-int64_t __arm_vmlsldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_p_s16)))
-int64_t __arm_vmlsldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_p_s32)))
-int64_t __arm_vmlsldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_p_s32)))
-int64_t __arm_vmlsldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_s16)))
-int64_t __arm_vmlsldavaq_s16(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_s16)))
-int64_t __arm_vmlsldavaq(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_s32)))
-int64_t __arm_vmlsldavaq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_s32)))
-int64_t __arm_vmlsldavaq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_p_s16)))
-int64_t __arm_vmlsldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_p_s16)))
-int64_t __arm_vmlsldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_p_s32)))
-int64_t __arm_vmlsldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_p_s32)))
-int64_t __arm_vmlsldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_s16)))
-int64_t __arm_vmlsldavaxq_s16(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_s16)))
-int64_t __arm_vmlsldavaxq(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_s32)))
-int64_t __arm_vmlsldavaxq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_s32)))
-int64_t __arm_vmlsldavaxq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_p_s16)))
-int64_t __arm_vmlsldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_p_s16)))
-int64_t __arm_vmlsldavq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_p_s32)))
-int64_t __arm_vmlsldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_p_s32)))
-int64_t __arm_vmlsldavq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_s16)))
-int64_t __arm_vmlsldavq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_s16)))
-int64_t __arm_vmlsldavq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_s32)))
-int64_t __arm_vmlsldavq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_s32)))
-int64_t __arm_vmlsldavq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_p_s16)))
-int64_t __arm_vmlsldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_p_s16)))
-int64_t __arm_vmlsldavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_p_s32)))
-int64_t __arm_vmlsldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_p_s32)))
-int64_t __arm_vmlsldavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_s16)))
-int64_t __arm_vmlsldavxq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_s16)))
-int64_t __arm_vmlsldavxq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_s32)))
-int64_t __arm_vmlsldavxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_s32)))
-int64_t __arm_vmlsldavxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_s16)))
-int16x8_t __arm_vmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_s16)))
-int16x8_t __arm_vmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_s32)))
-int32x4_t __arm_vmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_s32)))
-int32x4_t __arm_vmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_s8)))
-int8x16_t __arm_vmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_s8)))
-int8x16_t __arm_vmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_u16)))
-uint16x8_t __arm_vmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_u16)))
-uint16x8_t __arm_vmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_u32)))
-uint32x4_t __arm_vmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_u32)))
-uint32x4_t __arm_vmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_u8)))
-uint8x16_t __arm_vmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_u8)))
-uint8x16_t __arm_vmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_s16)))
-int16x8_t __arm_vmulhq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_s16)))
-int16x8_t __arm_vmulhq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_s32)))
-int32x4_t __arm_vmulhq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_s32)))
-int32x4_t __arm_vmulhq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_s8)))
-int8x16_t __arm_vmulhq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_s8)))
-int8x16_t __arm_vmulhq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_u16)))
-uint16x8_t __arm_vmulhq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_u16)))
-uint16x8_t __arm_vmulhq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_u32)))
-uint32x4_t __arm_vmulhq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_u32)))
-uint32x4_t __arm_vmulhq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_u8)))
-uint8x16_t __arm_vmulhq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_u8)))
-uint8x16_t __arm_vmulhq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_s16)))
-int16x8_t __arm_vmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_s16)))
-int16x8_t __arm_vmulhq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_s32)))
-int32x4_t __arm_vmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_s32)))
-int32x4_t __arm_vmulhq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_s8)))
-int8x16_t __arm_vmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_s8)))
-int8x16_t __arm_vmulhq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_u16)))
-uint16x8_t __arm_vmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_u16)))
-uint16x8_t __arm_vmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_u32)))
-uint32x4_t __arm_vmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_u32)))
-uint32x4_t __arm_vmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_u8)))
-uint8x16_t __arm_vmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_u8)))
-uint8x16_t __arm_vmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_s16)))
-int32x4_t __arm_vmullbq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_s16)))
-int32x4_t __arm_vmullbq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_s32)))
-int64x2_t __arm_vmullbq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_s32)))
-int64x2_t __arm_vmullbq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_s8)))
-int16x8_t __arm_vmullbq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_s8)))
-int16x8_t __arm_vmullbq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_u16)))
-uint32x4_t __arm_vmullbq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_u16)))
-uint32x4_t __arm_vmullbq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_u32)))
-uint64x2_t __arm_vmullbq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_u32)))
-uint64x2_t __arm_vmullbq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_u8)))
-uint16x8_t __arm_vmullbq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_u8)))
-uint16x8_t __arm_vmullbq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_s16)))
-int32x4_t __arm_vmullbq_int_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_s16)))
-int32x4_t __arm_vmullbq_int(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_s32)))
-int64x2_t __arm_vmullbq_int_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_s32)))
-int64x2_t __arm_vmullbq_int(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_s8)))
-int16x8_t __arm_vmullbq_int_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_s8)))
-int16x8_t __arm_vmullbq_int(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_u16)))
-uint32x4_t __arm_vmullbq_int_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_u16)))
-uint32x4_t __arm_vmullbq_int(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_u32)))
-uint64x2_t __arm_vmullbq_int_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_u32)))
-uint64x2_t __arm_vmullbq_int(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_u8)))
-uint16x8_t __arm_vmullbq_int_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_u8)))
-uint16x8_t __arm_vmullbq_int(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_s16)))
-int32x4_t __arm_vmullbq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_s16)))
-int32x4_t __arm_vmullbq_int_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_s32)))
-int64x2_t __arm_vmullbq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_s32)))
-int64x2_t __arm_vmullbq_int_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_s8)))
-int16x8_t __arm_vmullbq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_s8)))
-int16x8_t __arm_vmullbq_int_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_u16)))
-uint32x4_t __arm_vmullbq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_u16)))
-uint32x4_t __arm_vmullbq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_u32)))
-uint64x2_t __arm_vmullbq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_u32)))
-uint64x2_t __arm_vmullbq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_u8)))
-uint16x8_t __arm_vmullbq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_u8)))
-uint16x8_t __arm_vmullbq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_m_p16)))
-uint32x4_t __arm_vmullbq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_m_p16)))
-uint32x4_t __arm_vmullbq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_m_p8)))
-uint16x8_t __arm_vmullbq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_m_p8)))
-uint16x8_t __arm_vmullbq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_p16)))
-uint32x4_t __arm_vmullbq_poly_p16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_p16)))
-uint32x4_t __arm_vmullbq_poly(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_p8)))
-uint16x8_t __arm_vmullbq_poly_p8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_p8)))
-uint16x8_t __arm_vmullbq_poly(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_x_p16)))
-uint32x4_t __arm_vmullbq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_x_p16)))
-uint32x4_t __arm_vmullbq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_x_p8)))
-uint16x8_t __arm_vmullbq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_x_p8)))
-uint16x8_t __arm_vmullbq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_s16)))
-int32x4_t __arm_vmulltq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_s16)))
-int32x4_t __arm_vmulltq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_s32)))
-int64x2_t __arm_vmulltq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_s32)))
-int64x2_t __arm_vmulltq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_s8)))
-int16x8_t __arm_vmulltq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_s8)))
-int16x8_t __arm_vmulltq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_u16)))
-uint32x4_t __arm_vmulltq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_u16)))
-uint32x4_t __arm_vmulltq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_u32)))
-uint64x2_t __arm_vmulltq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_u32)))
-uint64x2_t __arm_vmulltq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_u8)))
-uint16x8_t __arm_vmulltq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_u8)))
-uint16x8_t __arm_vmulltq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_s16)))
-int32x4_t __arm_vmulltq_int_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_s16)))
-int32x4_t __arm_vmulltq_int(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_s32)))
-int64x2_t __arm_vmulltq_int_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_s32)))
-int64x2_t __arm_vmulltq_int(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_s8)))
-int16x8_t __arm_vmulltq_int_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_s8)))
-int16x8_t __arm_vmulltq_int(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_u16)))
-uint32x4_t __arm_vmulltq_int_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_u16)))
-uint32x4_t __arm_vmulltq_int(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_u32)))
-uint64x2_t __arm_vmulltq_int_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_u32)))
-uint64x2_t __arm_vmulltq_int(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_u8)))
-uint16x8_t __arm_vmulltq_int_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_u8)))
-uint16x8_t __arm_vmulltq_int(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_s16)))
-int32x4_t __arm_vmulltq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_s16)))
-int32x4_t __arm_vmulltq_int_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_s32)))
-int64x2_t __arm_vmulltq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_s32)))
-int64x2_t __arm_vmulltq_int_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_s8)))
-int16x8_t __arm_vmulltq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_s8)))
-int16x8_t __arm_vmulltq_int_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_u16)))
-uint32x4_t __arm_vmulltq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_u16)))
-uint32x4_t __arm_vmulltq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_u32)))
-uint64x2_t __arm_vmulltq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_u32)))
-uint64x2_t __arm_vmulltq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_u8)))
-uint16x8_t __arm_vmulltq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_u8)))
-uint16x8_t __arm_vmulltq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_m_p16)))
-uint32x4_t __arm_vmulltq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_m_p16)))
-uint32x4_t __arm_vmulltq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_m_p8)))
-uint16x8_t __arm_vmulltq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_m_p8)))
-uint16x8_t __arm_vmulltq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_p16)))
-uint32x4_t __arm_vmulltq_poly_p16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_p16)))
-uint32x4_t __arm_vmulltq_poly(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_p8)))
-uint16x8_t __arm_vmulltq_poly_p8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_p8)))
-uint16x8_t __arm_vmulltq_poly(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_x_p16)))
-uint32x4_t __arm_vmulltq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_x_p16)))
-uint32x4_t __arm_vmulltq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_x_p8)))
-uint16x8_t __arm_vmulltq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_x_p8)))
-uint16x8_t __arm_vmulltq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_s16)))
-int16x8_t __arm_vmulq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_s16)))
-int16x8_t __arm_vmulq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_s32)))
-int32x4_t __arm_vmulq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_s32)))
-int32x4_t __arm_vmulq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_s8)))
-int8x16_t __arm_vmulq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_s8)))
-int8x16_t __arm_vmulq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_u16)))
-uint16x8_t __arm_vmulq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_u16)))
-uint16x8_t __arm_vmulq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_u32)))
-uint32x4_t __arm_vmulq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_u32)))
-uint32x4_t __arm_vmulq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_u8)))
-uint8x16_t __arm_vmulq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_u8)))
-uint8x16_t __arm_vmulq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_s16)))
-int16x8_t __arm_vmulq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_s16)))
-int16x8_t __arm_vmulq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_s32)))
-int32x4_t __arm_vmulq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_s32)))
-int32x4_t __arm_vmulq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_s8)))
-int8x16_t __arm_vmulq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_s8)))
-int8x16_t __arm_vmulq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_u16)))
-uint16x8_t __arm_vmulq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_u16)))
-uint16x8_t __arm_vmulq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_u32)))
-uint32x4_t __arm_vmulq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_u32)))
-uint32x4_t __arm_vmulq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_u8)))
-uint8x16_t __arm_vmulq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_u8)))
-uint8x16_t __arm_vmulq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_s16)))
-int16x8_t __arm_vmulq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_s16)))
-int16x8_t __arm_vmulq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_s32)))
-int32x4_t __arm_vmulq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_s32)))
-int32x4_t __arm_vmulq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_s8)))
-int8x16_t __arm_vmulq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_s8)))
-int8x16_t __arm_vmulq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_u16)))
-uint16x8_t __arm_vmulq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_u16)))
-uint16x8_t __arm_vmulq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_u32)))
-uint32x4_t __arm_vmulq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_u32)))
-uint32x4_t __arm_vmulq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_u8)))
-uint8x16_t __arm_vmulq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_u8)))
-uint8x16_t __arm_vmulq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_s16)))
-int16x8_t __arm_vmvnq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_s16)))
-int16x8_t __arm_vmvnq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_s32)))
-int32x4_t __arm_vmvnq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_s32)))
-int32x4_t __arm_vmvnq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_u16)))
-uint16x8_t __arm_vmvnq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_u16)))
-uint16x8_t __arm_vmvnq_m(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_u32)))
-uint32x4_t __arm_vmvnq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_u32)))
-uint32x4_t __arm_vmvnq_m(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_n_s16)))
-int16x8_t __arm_vmvnq_n_s16(int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_n_s32)))
-int32x4_t __arm_vmvnq_n_s32(int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_n_u16)))
-uint16x8_t __arm_vmvnq_n_u16(uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_n_u32)))
-uint32x4_t __arm_vmvnq_n_u32(uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_x_n_s16)))
-int16x8_t __arm_vmvnq_x_n_s16(int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_x_n_s32)))
-int32x4_t __arm_vmvnq_x_n_s32(int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_x_n_u16)))
-uint16x8_t __arm_vmvnq_x_n_u16(uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_x_n_u32)))
-uint32x4_t __arm_vmvnq_x_n_u32(uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_s16)))
-int16x8_t __arm_vornq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_s16)))
-int16x8_t __arm_vornq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_s32)))
-int32x4_t __arm_vornq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_s32)))
-int32x4_t __arm_vornq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_s8)))
-int8x16_t __arm_vornq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_s8)))
-int8x16_t __arm_vornq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_u16)))
-uint16x8_t __arm_vornq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_u16)))
-uint16x8_t __arm_vornq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_u32)))
-uint32x4_t __arm_vornq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_u32)))
-uint32x4_t __arm_vornq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_u8)))
-uint8x16_t __arm_vornq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_u8)))
-uint8x16_t __arm_vornq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_s16)))
-int16x8_t __arm_vornq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_s16)))
-int16x8_t __arm_vornq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_s32)))
-int32x4_t __arm_vornq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_s32)))
-int32x4_t __arm_vornq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_s8)))
-int8x16_t __arm_vornq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_s8)))
-int8x16_t __arm_vornq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_u16)))
-uint16x8_t __arm_vornq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_u16)))
-uint16x8_t __arm_vornq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_u32)))
-uint32x4_t __arm_vornq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_u32)))
-uint32x4_t __arm_vornq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_u8)))
-uint8x16_t __arm_vornq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_u8)))
-uint8x16_t __arm_vornq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_s16)))
-int16x8_t __arm_vornq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_s16)))
-int16x8_t __arm_vornq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_s32)))
-int32x4_t __arm_vornq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_s32)))
-int32x4_t __arm_vornq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_s8)))
-int8x16_t __arm_vornq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_s8)))
-int8x16_t __arm_vornq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_u16)))
-uint16x8_t __arm_vornq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_u16)))
-uint16x8_t __arm_vornq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_u32)))
-uint32x4_t __arm_vornq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_u32)))
-uint32x4_t __arm_vornq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_u8)))
-uint8x16_t __arm_vornq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_u8)))
-uint8x16_t __arm_vornq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_s16)))
-int16x8_t __arm_vorrq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_s16)))
-int16x8_t __arm_vorrq_m_n(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_s32)))
-int32x4_t __arm_vorrq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_s32)))
-int32x4_t __arm_vorrq_m_n(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_u16)))
-uint16x8_t __arm_vorrq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_u16)))
-uint16x8_t __arm_vorrq_m_n(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_u32)))
-uint32x4_t __arm_vorrq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_u32)))
-uint32x4_t __arm_vorrq_m_n(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_s16)))
-int16x8_t __arm_vorrq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_s16)))
-int16x8_t __arm_vorrq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_s32)))
-int32x4_t __arm_vorrq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_s32)))
-int32x4_t __arm_vorrq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_s8)))
-int8x16_t __arm_vorrq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_s8)))
-int8x16_t __arm_vorrq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_u16)))
-uint16x8_t __arm_vorrq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_u16)))
-uint16x8_t __arm_vorrq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_u32)))
-uint32x4_t __arm_vorrq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_u32)))
-uint32x4_t __arm_vorrq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_u8)))
-uint8x16_t __arm_vorrq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_u8)))
-uint8x16_t __arm_vorrq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_s16)))
-int16x8_t __arm_vorrq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_s16)))
-int16x8_t __arm_vorrq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_s32)))
-int32x4_t __arm_vorrq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_s32)))
-int32x4_t __arm_vorrq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_u16)))
-uint16x8_t __arm_vorrq_n_u16(uint16x8_t, uint16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_u16)))
-uint16x8_t __arm_vorrq(uint16x8_t, uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_u32)))
-uint32x4_t __arm_vorrq_n_u32(uint32x4_t, uint32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_u32)))
-uint32x4_t __arm_vorrq(uint32x4_t, uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_s16)))
-int16x8_t __arm_vorrq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_s16)))
-int16x8_t __arm_vorrq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_s32)))
-int32x4_t __arm_vorrq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_s32)))
-int32x4_t __arm_vorrq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_s8)))
-int8x16_t __arm_vorrq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_s8)))
-int8x16_t __arm_vorrq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_u16)))
-uint16x8_t __arm_vorrq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_u16)))
-uint16x8_t __arm_vorrq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_u32)))
-uint32x4_t __arm_vorrq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_u32)))
-uint32x4_t __arm_vorrq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_u8)))
-uint8x16_t __arm_vorrq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_u8)))
-uint8x16_t __arm_vorrq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_s16)))
-int16x8_t __arm_vorrq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_s16)))
-int16x8_t __arm_vorrq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_s32)))
-int32x4_t __arm_vorrq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_s32)))
-int32x4_t __arm_vorrq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_s8)))
-int8x16_t __arm_vorrq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_s8)))
-int8x16_t __arm_vorrq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_u16)))
-uint16x8_t __arm_vorrq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_u16)))
-uint16x8_t __arm_vorrq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_u32)))
-uint32x4_t __arm_vorrq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_u32)))
-uint32x4_t __arm_vorrq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_u8)))
-uint8x16_t __arm_vorrq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_u8)))
-uint8x16_t __arm_vorrq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpnot)))
-mve_pred16_t __arm_vpnot(mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_s16)))
-int16x8_t __arm_vpselq_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_s16)))
-int16x8_t __arm_vpselq(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_s32)))
-int32x4_t __arm_vpselq_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_s32)))
-int32x4_t __arm_vpselq(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_s64)))
-int64x2_t __arm_vpselq_s64(int64x2_t, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_s64)))
-int64x2_t __arm_vpselq(int64x2_t, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_s8)))
-int8x16_t __arm_vpselq_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_s8)))
-int8x16_t __arm_vpselq(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_u16)))
-uint16x8_t __arm_vpselq_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_u16)))
-uint16x8_t __arm_vpselq(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_u32)))
-uint32x4_t __arm_vpselq_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_u32)))
-uint32x4_t __arm_vpselq(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_u64)))
-uint64x2_t __arm_vpselq_u64(uint64x2_t, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_u64)))
-uint64x2_t __arm_vpselq(uint64x2_t, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_u8)))
-uint8x16_t __arm_vpselq_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_u8)))
-uint8x16_t __arm_vpselq(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_s16)))
-int16x8_t __arm_vqaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_s16)))
-int16x8_t __arm_vqaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_s32)))
-int32x4_t __arm_vqaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_s32)))
-int32x4_t __arm_vqaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_s8)))
-int8x16_t __arm_vqaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_s8)))
-int8x16_t __arm_vqaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_u16)))
-uint16x8_t __arm_vqaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_u16)))
-uint16x8_t __arm_vqaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_u32)))
-uint32x4_t __arm_vqaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_u32)))
-uint32x4_t __arm_vqaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_u8)))
-uint8x16_t __arm_vqaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_u8)))
-uint8x16_t __arm_vqaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_s16)))
-int16x8_t __arm_vqaddq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_s16)))
-int16x8_t __arm_vqaddq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_s32)))
-int32x4_t __arm_vqaddq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_s32)))
-int32x4_t __arm_vqaddq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_s8)))
-int8x16_t __arm_vqaddq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_s8)))
-int8x16_t __arm_vqaddq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_u16)))
-uint16x8_t __arm_vqaddq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_u16)))
-uint16x8_t __arm_vqaddq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_u32)))
-uint32x4_t __arm_vqaddq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_u32)))
-uint32x4_t __arm_vqaddq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_u8)))
-uint8x16_t __arm_vqaddq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_u8)))
-uint8x16_t __arm_vqaddq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_m_s16)))
-int16x8_t __arm_vqdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_m_s16)))
-int16x8_t __arm_vqdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_m_s32)))
-int32x4_t __arm_vqdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_m_s32)))
-int32x4_t __arm_vqdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_m_s8)))
-int8x16_t __arm_vqdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_m_s8)))
-int8x16_t __arm_vqdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_s16)))
-int16x8_t __arm_vqdmulhq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_s16)))
-int16x8_t __arm_vqdmulhq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_s32)))
-int32x4_t __arm_vqdmulhq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_s32)))
-int32x4_t __arm_vqdmulhq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_s8)))
-int8x16_t __arm_vqdmulhq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_s8)))
-int8x16_t __arm_vqdmulhq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_m_s16)))
-int16x8_t __arm_vqrdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_m_s16)))
-int16x8_t __arm_vqrdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_m_s32)))
-int32x4_t __arm_vqrdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_m_s32)))
-int32x4_t __arm_vqrdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_m_s8)))
-int8x16_t __arm_vqrdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_m_s8)))
-int8x16_t __arm_vqrdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_s16)))
-int16x8_t __arm_vqrdmulhq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_s16)))
-int16x8_t __arm_vqrdmulhq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_s32)))
-int32x4_t __arm_vqrdmulhq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_s32)))
-int32x4_t __arm_vqrdmulhq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_s8)))
-int8x16_t __arm_vqrdmulhq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_s8)))
-int8x16_t __arm_vqrdmulhq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_s16)))
-int16x8_t __arm_vqrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_s16)))
-int16x8_t __arm_vqrshlq_m_n(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_s32)))
-int32x4_t __arm_vqrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_s32)))
-int32x4_t __arm_vqrshlq_m_n(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_s8)))
-int8x16_t __arm_vqrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_s8)))
-int8x16_t __arm_vqrshlq_m_n(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_u16)))
-uint16x8_t __arm_vqrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_u16)))
-uint16x8_t __arm_vqrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_u32)))
-uint32x4_t __arm_vqrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_u32)))
-uint32x4_t __arm_vqrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_u8)))
-uint8x16_t __arm_vqrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_u8)))
-uint8x16_t __arm_vqrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_s16)))
-int16x8_t __arm_vqrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_s16)))
-int16x8_t __arm_vqrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_s32)))
-int32x4_t __arm_vqrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_s32)))
-int32x4_t __arm_vqrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_s8)))
-int8x16_t __arm_vqrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_s8)))
-int8x16_t __arm_vqrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_u16)))
-uint16x8_t __arm_vqrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_u16)))
-uint16x8_t __arm_vqrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_u32)))
-uint32x4_t __arm_vqrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_u32)))
-uint32x4_t __arm_vqrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_u8)))
-uint8x16_t __arm_vqrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_u8)))
-uint8x16_t __arm_vqrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_s16)))
-int16x8_t __arm_vqrshlq_n_s16(int16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_s16)))
-int16x8_t __arm_vqrshlq(int16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_s32)))
-int32x4_t __arm_vqrshlq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_s32)))
-int32x4_t __arm_vqrshlq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_s8)))
-int8x16_t __arm_vqrshlq_n_s8(int8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_s8)))
-int8x16_t __arm_vqrshlq(int8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_u16)))
-uint16x8_t __arm_vqrshlq_n_u16(uint16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_u16)))
-uint16x8_t __arm_vqrshlq(uint16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_u32)))
-uint32x4_t __arm_vqrshlq_n_u32(uint32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_u32)))
-uint32x4_t __arm_vqrshlq(uint32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_u8)))
-uint8x16_t __arm_vqrshlq_n_u8(uint8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_u8)))
-uint8x16_t __arm_vqrshlq(uint8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_s16)))
-int16x8_t __arm_vqrshlq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_s16)))
-int16x8_t __arm_vqrshlq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_s32)))
-int32x4_t __arm_vqrshlq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_s32)))
-int32x4_t __arm_vqrshlq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_s8)))
-int8x16_t __arm_vqrshlq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_s8)))
-int8x16_t __arm_vqrshlq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_u16)))
-uint16x8_t __arm_vqrshlq_u16(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_u16)))
-uint16x8_t __arm_vqrshlq(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_u32)))
-uint32x4_t __arm_vqrshlq_u32(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_u32)))
-uint32x4_t __arm_vqrshlq(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_u8)))
-uint8x16_t __arm_vqrshlq_u8(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_u8)))
-uint8x16_t __arm_vqrshlq(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16)))
-int8x16_t __arm_vqrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16)))
-int8x16_t __arm_vqrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32)))
-int16x8_t __arm_vqrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32)))
-int16x8_t __arm_vqrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16)))
-uint8x16_t __arm_vqrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16)))
-uint8x16_t __arm_vqrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32)))
-uint16x8_t __arm_vqrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32)))
-uint16x8_t __arm_vqrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_s16)))
-int8x16_t __arm_vqrshrnbq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_s16)))
-int8x16_t __arm_vqrshrnbq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_s32)))
-int16x8_t __arm_vqrshrnbq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_s32)))
-int16x8_t __arm_vqrshrnbq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_u16)))
-uint8x16_t __arm_vqrshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_u16)))
-uint8x16_t __arm_vqrshrnbq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_u32)))
-uint16x8_t __arm_vqrshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_u32)))
-uint16x8_t __arm_vqrshrnbq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_s16)))
-int8x16_t __arm_vqrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_s16)))
-int8x16_t __arm_vqrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_s32)))
-int16x8_t __arm_vqrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_s32)))
-int16x8_t __arm_vqrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_u16)))
-uint8x16_t __arm_vqrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_u16)))
-uint8x16_t __arm_vqrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_u32)))
-uint16x8_t __arm_vqrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_u32)))
-uint16x8_t __arm_vqrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_s16)))
-int8x16_t __arm_vqrshrntq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_s16)))
-int8x16_t __arm_vqrshrntq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_s32)))
-int16x8_t __arm_vqrshrntq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_s32)))
-int16x8_t __arm_vqrshrntq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_u16)))
-uint8x16_t __arm_vqrshrntq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_u16)))
-uint8x16_t __arm_vqrshrntq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_u32)))
-uint16x8_t __arm_vqrshrntq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_u32)))
-uint16x8_t __arm_vqrshrntq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16)))
-uint8x16_t __arm_vqrshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16)))
-uint8x16_t __arm_vqrshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32)))
-uint16x8_t __arm_vqrshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32)))
-uint16x8_t __arm_vqrshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_n_s16)))
-uint8x16_t __arm_vqrshrunbq_n_s16(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_n_s16)))
-uint8x16_t __arm_vqrshrunbq(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_n_s32)))
-uint16x8_t __arm_vqrshrunbq_n_s32(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_n_s32)))
-uint16x8_t __arm_vqrshrunbq(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_m_n_s16)))
-uint8x16_t __arm_vqrshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_m_n_s16)))
-uint8x16_t __arm_vqrshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_m_n_s32)))
-uint16x8_t __arm_vqrshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_m_n_s32)))
-uint16x8_t __arm_vqrshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_n_s16)))
-uint8x16_t __arm_vqrshruntq_n_s16(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_n_s16)))
-uint8x16_t __arm_vqrshruntq(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_n_s32)))
-uint16x8_t __arm_vqrshruntq_n_s32(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_n_s32)))
-uint16x8_t __arm_vqrshruntq(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_s16)))
-int16x8_t __arm_vqshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_s16)))
-int16x8_t __arm_vqshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_s32)))
-int32x4_t __arm_vqshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_s32)))
-int32x4_t __arm_vqshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_s8)))
-int8x16_t __arm_vqshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_s8)))
-int8x16_t __arm_vqshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_u16)))
-uint16x8_t __arm_vqshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_u16)))
-uint16x8_t __arm_vqshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_u32)))
-uint32x4_t __arm_vqshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_u32)))
-uint32x4_t __arm_vqshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_u8)))
-uint8x16_t __arm_vqshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_u8)))
-uint8x16_t __arm_vqshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_s16)))
-int16x8_t __arm_vqshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_s16)))
-int16x8_t __arm_vqshlq_m_r(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_s32)))
-int32x4_t __arm_vqshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_s32)))
-int32x4_t __arm_vqshlq_m_r(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_s8)))
-int8x16_t __arm_vqshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_s8)))
-int8x16_t __arm_vqshlq_m_r(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_u16)))
-uint16x8_t __arm_vqshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_u16)))
-uint16x8_t __arm_vqshlq_m_r(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_u32)))
-uint32x4_t __arm_vqshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_u32)))
-uint32x4_t __arm_vqshlq_m_r(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_u8)))
-uint8x16_t __arm_vqshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_u8)))
-uint8x16_t __arm_vqshlq_m_r(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_s16)))
-int16x8_t __arm_vqshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_s16)))
-int16x8_t __arm_vqshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_s32)))
-int32x4_t __arm_vqshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_s32)))
-int32x4_t __arm_vqshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_s8)))
-int8x16_t __arm_vqshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_s8)))
-int8x16_t __arm_vqshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_u16)))
-uint16x8_t __arm_vqshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_u16)))
-uint16x8_t __arm_vqshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_u32)))
-uint32x4_t __arm_vqshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_u32)))
-uint32x4_t __arm_vqshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_u8)))
-uint8x16_t __arm_vqshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_u8)))
-uint8x16_t __arm_vqshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_s16)))
-int16x8_t __arm_vqshlq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_s16)))
-int16x8_t __arm_vqshlq_n(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_s32)))
-int32x4_t __arm_vqshlq_n_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_s32)))
-int32x4_t __arm_vqshlq_n(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_s8)))
-int8x16_t __arm_vqshlq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_s8)))
-int8x16_t __arm_vqshlq_n(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_u16)))
-uint16x8_t __arm_vqshlq_n_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_u16)))
-uint16x8_t __arm_vqshlq_n(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_u32)))
-uint32x4_t __arm_vqshlq_n_u32(uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_u32)))
-uint32x4_t __arm_vqshlq_n(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_u8)))
-uint8x16_t __arm_vqshlq_n_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_u8)))
-uint8x16_t __arm_vqshlq_n(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_s16)))
-int16x8_t __arm_vqshlq_r_s16(int16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_s16)))
-int16x8_t __arm_vqshlq_r(int16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_s32)))
-int32x4_t __arm_vqshlq_r_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_s32)))
-int32x4_t __arm_vqshlq_r(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_s8)))
-int8x16_t __arm_vqshlq_r_s8(int8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_s8)))
-int8x16_t __arm_vqshlq_r(int8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_u16)))
-uint16x8_t __arm_vqshlq_r_u16(uint16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_u16)))
-uint16x8_t __arm_vqshlq_r(uint16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_u32)))
-uint32x4_t __arm_vqshlq_r_u32(uint32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_u32)))
-uint32x4_t __arm_vqshlq_r(uint32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_u8)))
-uint8x16_t __arm_vqshlq_r_u8(uint8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_u8)))
-uint8x16_t __arm_vqshlq_r(uint8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_s16)))
-int16x8_t __arm_vqshlq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_s16)))
-int16x8_t __arm_vqshlq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_s32)))
-int32x4_t __arm_vqshlq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_s32)))
-int32x4_t __arm_vqshlq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_s8)))
-int8x16_t __arm_vqshlq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_s8)))
-int8x16_t __arm_vqshlq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_u16)))
-uint16x8_t __arm_vqshlq_u16(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_u16)))
-uint16x8_t __arm_vqshlq(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_u32)))
-uint32x4_t __arm_vqshlq_u32(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_u32)))
-uint32x4_t __arm_vqshlq(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_u8)))
-uint8x16_t __arm_vqshlq_u8(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_u8)))
-uint8x16_t __arm_vqshlq(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshluq_m_n_s16)))
-uint16x8_t __arm_vqshluq_m_n_s16(uint16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshluq_m_n_s16)))
-uint16x8_t __arm_vqshluq_m(uint16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshluq_m_n_s32)))
-uint32x4_t __arm_vqshluq_m_n_s32(uint32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshluq_m_n_s32)))
-uint32x4_t __arm_vqshluq_m(uint32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshluq_m_n_s8)))
-uint8x16_t __arm_vqshluq_m_n_s8(uint8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshluq_m_n_s8)))
-uint8x16_t __arm_vqshluq_m(uint8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshluq_n_s16)))
-uint16x8_t __arm_vqshluq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshluq_n_s16)))
-uint16x8_t __arm_vqshluq(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshluq_n_s32)))
-uint32x4_t __arm_vqshluq_n_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshluq_n_s32)))
-uint32x4_t __arm_vqshluq(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshluq_n_s8)))
-uint8x16_t __arm_vqshluq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshluq_n_s8)))
-uint8x16_t __arm_vqshluq(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_s16)))
-int8x16_t __arm_vqshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_s16)))
-int8x16_t __arm_vqshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_s32)))
-int16x8_t __arm_vqshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_s32)))
-int16x8_t __arm_vqshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_u16)))
-uint8x16_t __arm_vqshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_u16)))
-uint8x16_t __arm_vqshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_u32)))
-uint16x8_t __arm_vqshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_u32)))
-uint16x8_t __arm_vqshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_s16)))
-int8x16_t __arm_vqshrnbq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_s16)))
-int8x16_t __arm_vqshrnbq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_s32)))
-int16x8_t __arm_vqshrnbq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_s32)))
-int16x8_t __arm_vqshrnbq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_u16)))
-uint8x16_t __arm_vqshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_u16)))
-uint8x16_t __arm_vqshrnbq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_u32)))
-uint16x8_t __arm_vqshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_u32)))
-uint16x8_t __arm_vqshrnbq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_s16)))
-int8x16_t __arm_vqshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_s16)))
-int8x16_t __arm_vqshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_s32)))
-int16x8_t __arm_vqshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_s32)))
-int16x8_t __arm_vqshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_u16)))
-uint8x16_t __arm_vqshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_u16)))
-uint8x16_t __arm_vqshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_u32)))
-uint16x8_t __arm_vqshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_u32)))
-uint16x8_t __arm_vqshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_s16)))
-int8x16_t __arm_vqshrntq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_s16)))
-int8x16_t __arm_vqshrntq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_s32)))
-int16x8_t __arm_vqshrntq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_s32)))
-int16x8_t __arm_vqshrntq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_u16)))
-uint8x16_t __arm_vqshrntq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_u16)))
-uint8x16_t __arm_vqshrntq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_u32)))
-uint16x8_t __arm_vqshrntq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_u32)))
-uint16x8_t __arm_vqshrntq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_m_n_s16)))
-uint8x16_t __arm_vqshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_m_n_s16)))
-uint8x16_t __arm_vqshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_m_n_s32)))
-uint16x8_t __arm_vqshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_m_n_s32)))
-uint16x8_t __arm_vqshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_n_s16)))
-uint8x16_t __arm_vqshrunbq_n_s16(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_n_s16)))
-uint8x16_t __arm_vqshrunbq(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_n_s32)))
-uint16x8_t __arm_vqshrunbq_n_s32(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_n_s32)))
-uint16x8_t __arm_vqshrunbq(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_m_n_s16)))
-uint8x16_t __arm_vqshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_m_n_s16)))
-uint8x16_t __arm_vqshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_m_n_s32)))
-uint16x8_t __arm_vqshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_m_n_s32)))
-uint16x8_t __arm_vqshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_n_s16)))
-uint8x16_t __arm_vqshruntq_n_s16(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_n_s16)))
-uint8x16_t __arm_vqshruntq(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_n_s32)))
-uint16x8_t __arm_vqshruntq_n_s32(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_n_s32)))
-uint16x8_t __arm_vqshruntq(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_s16)))
-int16x8_t __arm_vqsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_s16)))
-int16x8_t __arm_vqsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_s32)))
-int32x4_t __arm_vqsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_s32)))
-int32x4_t __arm_vqsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_s8)))
-int8x16_t __arm_vqsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_s8)))
-int8x16_t __arm_vqsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_u16)))
-uint16x8_t __arm_vqsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_u16)))
-uint16x8_t __arm_vqsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_u32)))
-uint32x4_t __arm_vqsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_u32)))
-uint32x4_t __arm_vqsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_u8)))
-uint8x16_t __arm_vqsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_u8)))
-uint8x16_t __arm_vqsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_s16)))
-int16x8_t __arm_vqsubq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_s16)))
-int16x8_t __arm_vqsubq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_s32)))
-int32x4_t __arm_vqsubq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_s32)))
-int32x4_t __arm_vqsubq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_s8)))
-int8x16_t __arm_vqsubq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_s8)))
-int8x16_t __arm_vqsubq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_u16)))
-uint16x8_t __arm_vqsubq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_u16)))
-uint16x8_t __arm_vqsubq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_u32)))
-uint32x4_t __arm_vqsubq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_u32)))
-uint32x4_t __arm_vqsubq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_u8)))
-uint8x16_t __arm_vqsubq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_u8)))
-uint8x16_t __arm_vqsubq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))
-int16x8_t __arm_vreinterpretq_s16_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))
-int16x8_t __arm_vreinterpretq_s16(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))
-int16x8_t __arm_vreinterpretq_s16_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))
-int16x8_t __arm_vreinterpretq_s16(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))
-int16x8_t __arm_vreinterpretq_s16_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))
-int16x8_t __arm_vreinterpretq_s16(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))
-int16x8_t __arm_vreinterpretq_s16_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))
-int16x8_t __arm_vreinterpretq_s16(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))
-int16x8_t __arm_vreinterpretq_s16_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))
-int16x8_t __arm_vreinterpretq_s16(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))
-int16x8_t __arm_vreinterpretq_s16_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))
-int16x8_t __arm_vreinterpretq_s16(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
-int16x8_t __arm_vreinterpretq_s16_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
-int16x8_t __arm_vreinterpretq_s16(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))
-int32x4_t __arm_vreinterpretq_s32_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))
-int32x4_t __arm_vreinterpretq_s32(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))
-int32x4_t __arm_vreinterpretq_s32_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))
-int32x4_t __arm_vreinterpretq_s32(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))
-int32x4_t __arm_vreinterpretq_s32_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))
-int32x4_t __arm_vreinterpretq_s32(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))
-int32x4_t __arm_vreinterpretq_s32_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))
-int32x4_t __arm_vreinterpretq_s32(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))
-int32x4_t __arm_vreinterpretq_s32_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))
-int32x4_t __arm_vreinterpretq_s32(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))
-int32x4_t __arm_vreinterpretq_s32_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))
-int32x4_t __arm_vreinterpretq_s32(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
-int32x4_t __arm_vreinterpretq_s32_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
-int32x4_t __arm_vreinterpretq_s32(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))
-int64x2_t __arm_vreinterpretq_s64_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))
-int64x2_t __arm_vreinterpretq_s64(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))
-int64x2_t __arm_vreinterpretq_s64_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))
-int64x2_t __arm_vreinterpretq_s64(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))
-int64x2_t __arm_vreinterpretq_s64_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))
-int64x2_t __arm_vreinterpretq_s64(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))
-int64x2_t __arm_vreinterpretq_s64_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))
-int64x2_t __arm_vreinterpretq_s64(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))
-int64x2_t __arm_vreinterpretq_s64_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))
-int64x2_t __arm_vreinterpretq_s64(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))
-int64x2_t __arm_vreinterpretq_s64_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))
-int64x2_t __arm_vreinterpretq_s64(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
-int64x2_t __arm_vreinterpretq_s64_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
-int64x2_t __arm_vreinterpretq_s64(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))
-int8x16_t __arm_vreinterpretq_s8_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))
-int8x16_t __arm_vreinterpretq_s8(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))
-int8x16_t __arm_vreinterpretq_s8_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))
-int8x16_t __arm_vreinterpretq_s8(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))
-int8x16_t __arm_vreinterpretq_s8_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))
-int8x16_t __arm_vreinterpretq_s8(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))
-int8x16_t __arm_vreinterpretq_s8_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))
-int8x16_t __arm_vreinterpretq_s8(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))
-int8x16_t __arm_vreinterpretq_s8_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))
-int8x16_t __arm_vreinterpretq_s8(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))
-int8x16_t __arm_vreinterpretq_s8_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))
-int8x16_t __arm_vreinterpretq_s8(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
-int8x16_t __arm_vreinterpretq_s8_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
-int8x16_t __arm_vreinterpretq_s8(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))
-uint16x8_t __arm_vreinterpretq_u16_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))
-uint16x8_t __arm_vreinterpretq_u16(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))
-uint16x8_t __arm_vreinterpretq_u16_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))
-uint16x8_t __arm_vreinterpretq_u16(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))
-uint16x8_t __arm_vreinterpretq_u16_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))
-uint16x8_t __arm_vreinterpretq_u16(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))
-uint16x8_t __arm_vreinterpretq_u16_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))
-uint16x8_t __arm_vreinterpretq_u16(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))
-uint16x8_t __arm_vreinterpretq_u16_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))
-uint16x8_t __arm_vreinterpretq_u16(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))
-uint16x8_t __arm_vreinterpretq_u16_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))
-uint16x8_t __arm_vreinterpretq_u16(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
-uint16x8_t __arm_vreinterpretq_u16_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
-uint16x8_t __arm_vreinterpretq_u16(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))
-uint32x4_t __arm_vreinterpretq_u32_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))
-uint32x4_t __arm_vreinterpretq_u32(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))
-uint32x4_t __arm_vreinterpretq_u32_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))
-uint32x4_t __arm_vreinterpretq_u32(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))
-uint32x4_t __arm_vreinterpretq_u32_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))
-uint32x4_t __arm_vreinterpretq_u32(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))
-uint32x4_t __arm_vreinterpretq_u32_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))
-uint32x4_t __arm_vreinterpretq_u32(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))
-uint32x4_t __arm_vreinterpretq_u32_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))
-uint32x4_t __arm_vreinterpretq_u32(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))
-uint32x4_t __arm_vreinterpretq_u32_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))
-uint32x4_t __arm_vreinterpretq_u32(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
-uint32x4_t __arm_vreinterpretq_u32_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
-uint32x4_t __arm_vreinterpretq_u32(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))
-uint64x2_t __arm_vreinterpretq_u64_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))
-uint64x2_t __arm_vreinterpretq_u64(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))
-uint64x2_t __arm_vreinterpretq_u64_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))
-uint64x2_t __arm_vreinterpretq_u64(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))
-uint64x2_t __arm_vreinterpretq_u64_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))
-uint64x2_t __arm_vreinterpretq_u64(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))
-uint64x2_t __arm_vreinterpretq_u64_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))
-uint64x2_t __arm_vreinterpretq_u64(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))
-uint64x2_t __arm_vreinterpretq_u64_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))
-uint64x2_t __arm_vreinterpretq_u64(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))
-uint64x2_t __arm_vreinterpretq_u64_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))
-uint64x2_t __arm_vreinterpretq_u64(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
-uint64x2_t __arm_vreinterpretq_u64_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
-uint64x2_t __arm_vreinterpretq_u64(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
-uint8x16_t __arm_vreinterpretq_u8_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
-uint8x16_t __arm_vreinterpretq_u8(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
-uint8x16_t __arm_vreinterpretq_u8_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
-uint8x16_t __arm_vreinterpretq_u8(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
-uint8x16_t __arm_vreinterpretq_u8_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
-uint8x16_t __arm_vreinterpretq_u8(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
-uint8x16_t __arm_vreinterpretq_u8_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
-uint8x16_t __arm_vreinterpretq_u8(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
-uint8x16_t __arm_vreinterpretq_u8_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
-uint8x16_t __arm_vreinterpretq_u8(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
-uint8x16_t __arm_vreinterpretq_u8_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
-uint8x16_t __arm_vreinterpretq_u8(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
-uint8x16_t __arm_vreinterpretq_u8_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
-uint8x16_t __arm_vreinterpretq_u8(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_s16)))
-int16x8_t __arm_vrhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_s16)))
-int16x8_t __arm_vrhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_s32)))
-int32x4_t __arm_vrhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_s32)))
-int32x4_t __arm_vrhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_s8)))
-int8x16_t __arm_vrhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_s8)))
-int8x16_t __arm_vrhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_u16)))
-uint16x8_t __arm_vrhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_u16)))
-uint16x8_t __arm_vrhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_u32)))
-uint32x4_t __arm_vrhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_u32)))
-uint32x4_t __arm_vrhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_u8)))
-uint8x16_t __arm_vrhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_u8)))
-uint8x16_t __arm_vrhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_s16)))
-int16x8_t __arm_vrhaddq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_s16)))
-int16x8_t __arm_vrhaddq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_s32)))
-int32x4_t __arm_vrhaddq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_s32)))
-int32x4_t __arm_vrhaddq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_s8)))
-int8x16_t __arm_vrhaddq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_s8)))
-int8x16_t __arm_vrhaddq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_u16)))
-uint16x8_t __arm_vrhaddq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_u16)))
-uint16x8_t __arm_vrhaddq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_u32)))
-uint32x4_t __arm_vrhaddq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_u32)))
-uint32x4_t __arm_vrhaddq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_u8)))
-uint8x16_t __arm_vrhaddq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_u8)))
-uint8x16_t __arm_vrhaddq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_s16)))
-int16x8_t __arm_vrhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_s16)))
-int16x8_t __arm_vrhaddq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_s32)))
-int32x4_t __arm_vrhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_s32)))
-int32x4_t __arm_vrhaddq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_s8)))
-int8x16_t __arm_vrhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_s8)))
-int8x16_t __arm_vrhaddq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_u16)))
-uint16x8_t __arm_vrhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_u16)))
-uint16x8_t __arm_vrhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_u32)))
-uint32x4_t __arm_vrhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_u32)))
-uint32x4_t __arm_vrhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_u8)))
-uint8x16_t __arm_vrhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_u8)))
-uint8x16_t __arm_vrhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32)))
-int64_t __arm_vrmlaldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32)))
-int64_t __arm_vrmlaldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32)))
-uint64_t __arm_vrmlaldavhaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32)))
-uint64_t __arm_vrmlaldavhaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_s32)))
-int64_t __arm_vrmlaldavhaq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_s32)))
-int64_t __arm_vrmlaldavhaq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_u32)))
-uint64_t __arm_vrmlaldavhaq_u32(uint64_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_u32)))
-uint64_t __arm_vrmlaldavhaq(uint64_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32)))
-int64_t __arm_vrmlaldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32)))
-int64_t __arm_vrmlaldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaxq_s32)))
-int64_t __arm_vrmlaldavhaxq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaxq_s32)))
-int64_t __arm_vrmlaldavhaxq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_p_s32)))
-int64_t __arm_vrmlaldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_p_s32)))
-int64_t __arm_vrmlaldavhq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_p_u32)))
-uint64_t __arm_vrmlaldavhq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_p_u32)))
-uint64_t __arm_vrmlaldavhq_p(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_s32)))
-int64_t __arm_vrmlaldavhq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_s32)))
-int64_t __arm_vrmlaldavhq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_u32)))
-uint64_t __arm_vrmlaldavhq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_u32)))
-uint64_t __arm_vrmlaldavhq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32)))
-int64_t __arm_vrmlaldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32)))
-int64_t __arm_vrmlaldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhxq_s32)))
-int64_t __arm_vrmlaldavhxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhxq_s32)))
-int64_t __arm_vrmlaldavhxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32)))
-int64_t __arm_vrmlsldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32)))
-int64_t __arm_vrmlsldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaq_s32)))
-int64_t __arm_vrmlsldavhaq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaq_s32)))
-int64_t __arm_vrmlsldavhaq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32)))
-int64_t __arm_vrmlsldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32)))
-int64_t __arm_vrmlsldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaxq_s32)))
-int64_t __arm_vrmlsldavhaxq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaxq_s32)))
-int64_t __arm_vrmlsldavhaxq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhq_p_s32)))
-int64_t __arm_vrmlsldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhq_p_s32)))
-int64_t __arm_vrmlsldavhq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhq_s32)))
-int64_t __arm_vrmlsldavhq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhq_s32)))
-int64_t __arm_vrmlsldavhq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32)))
-int64_t __arm_vrmlsldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32)))
-int64_t __arm_vrmlsldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhxq_s32)))
-int64_t __arm_vrmlsldavhxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhxq_s32)))
-int64_t __arm_vrmlsldavhxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_s16)))
-int16x8_t __arm_vrmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_s16)))
-int16x8_t __arm_vrmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_s32)))
-int32x4_t __arm_vrmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_s32)))
-int32x4_t __arm_vrmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_s8)))
-int8x16_t __arm_vrmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_s8)))
-int8x16_t __arm_vrmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_u16)))
-uint16x8_t __arm_vrmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_u16)))
-uint16x8_t __arm_vrmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_u32)))
-uint32x4_t __arm_vrmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_u32)))
-uint32x4_t __arm_vrmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_u8)))
-uint8x16_t __arm_vrmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_u8)))
-uint8x16_t __arm_vrmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_s16)))
-int16x8_t __arm_vrmulhq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_s16)))
-int16x8_t __arm_vrmulhq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_s32)))
-int32x4_t __arm_vrmulhq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_s32)))
-int32x4_t __arm_vrmulhq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_s8)))
-int8x16_t __arm_vrmulhq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_s8)))
-int8x16_t __arm_vrmulhq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_u16)))
-uint16x8_t __arm_vrmulhq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_u16)))
-uint16x8_t __arm_vrmulhq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_u32)))
-uint32x4_t __arm_vrmulhq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_u32)))
-uint32x4_t __arm_vrmulhq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_u8)))
-uint8x16_t __arm_vrmulhq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_u8)))
-uint8x16_t __arm_vrmulhq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_s16)))
-int16x8_t __arm_vrmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_s16)))
-int16x8_t __arm_vrmulhq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_s32)))
-int32x4_t __arm_vrmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_s32)))
-int32x4_t __arm_vrmulhq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_s8)))
-int8x16_t __arm_vrmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_s8)))
-int8x16_t __arm_vrmulhq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_u16)))
-uint16x8_t __arm_vrmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_u16)))
-uint16x8_t __arm_vrmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_u32)))
-uint32x4_t __arm_vrmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_u32)))
-uint32x4_t __arm_vrmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_u8)))
-uint8x16_t __arm_vrmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_u8)))
-uint8x16_t __arm_vrmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_s16)))
-int16x8_t __arm_vrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_s16)))
-int16x8_t __arm_vrshlq_m_n(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_s32)))
-int32x4_t __arm_vrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_s32)))
-int32x4_t __arm_vrshlq_m_n(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_s8)))
-int8x16_t __arm_vrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_s8)))
-int8x16_t __arm_vrshlq_m_n(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_u16)))
-uint16x8_t __arm_vrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_u16)))
-uint16x8_t __arm_vrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_u32)))
-uint32x4_t __arm_vrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_u32)))
-uint32x4_t __arm_vrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_u8)))
-uint8x16_t __arm_vrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_u8)))
-uint8x16_t __arm_vrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_s16)))
-int16x8_t __arm_vrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_s16)))
-int16x8_t __arm_vrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_s32)))
-int32x4_t __arm_vrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_s32)))
-int32x4_t __arm_vrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_s8)))
-int8x16_t __arm_vrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_s8)))
-int8x16_t __arm_vrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_u16)))
-uint16x8_t __arm_vrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_u16)))
-uint16x8_t __arm_vrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_u32)))
-uint32x4_t __arm_vrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_u32)))
-uint32x4_t __arm_vrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_u8)))
-uint8x16_t __arm_vrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_u8)))
-uint8x16_t __arm_vrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_s16)))
-int16x8_t __arm_vrshlq_n_s16(int16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_s16)))
-int16x8_t __arm_vrshlq(int16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_s32)))
-int32x4_t __arm_vrshlq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_s32)))
-int32x4_t __arm_vrshlq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_s8)))
-int8x16_t __arm_vrshlq_n_s8(int8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_s8)))
-int8x16_t __arm_vrshlq(int8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_u16)))
-uint16x8_t __arm_vrshlq_n_u16(uint16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_u16)))
-uint16x8_t __arm_vrshlq(uint16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_u32)))
-uint32x4_t __arm_vrshlq_n_u32(uint32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_u32)))
-uint32x4_t __arm_vrshlq(uint32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_u8)))
-uint8x16_t __arm_vrshlq_n_u8(uint8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_u8)))
-uint8x16_t __arm_vrshlq(uint8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_s16)))
-int16x8_t __arm_vrshlq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_s16)))
-int16x8_t __arm_vrshlq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_s32)))
-int32x4_t __arm_vrshlq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_s32)))
-int32x4_t __arm_vrshlq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_s8)))
-int8x16_t __arm_vrshlq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_s8)))
-int8x16_t __arm_vrshlq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_u16)))
-uint16x8_t __arm_vrshlq_u16(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_u16)))
-uint16x8_t __arm_vrshlq(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_u32)))
-uint32x4_t __arm_vrshlq_u32(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_u32)))
-uint32x4_t __arm_vrshlq(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_u8)))
-uint8x16_t __arm_vrshlq_u8(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_u8)))
-uint8x16_t __arm_vrshlq(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_s16)))
-int16x8_t __arm_vrshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_s16)))
-int16x8_t __arm_vrshlq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_s32)))
-int32x4_t __arm_vrshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_s32)))
-int32x4_t __arm_vrshlq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_s8)))
-int8x16_t __arm_vrshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_s8)))
-int8x16_t __arm_vrshlq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_u16)))
-uint16x8_t __arm_vrshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_u16)))
-uint16x8_t __arm_vrshlq_x(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_u32)))
-uint32x4_t __arm_vrshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_u32)))
-uint32x4_t __arm_vrshlq_x(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_u8)))
-uint8x16_t __arm_vrshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_u8)))
-uint8x16_t __arm_vrshlq_x(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_s16)))
-int8x16_t __arm_vrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_s16)))
-int8x16_t __arm_vrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_s32)))
-int16x8_t __arm_vrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_s32)))
-int16x8_t __arm_vrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_u16)))
-uint8x16_t __arm_vrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_u16)))
-uint8x16_t __arm_vrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_u32)))
-uint16x8_t __arm_vrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_u32)))
-uint16x8_t __arm_vrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_s16)))
-int8x16_t __arm_vrshrnbq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_s16)))
-int8x16_t __arm_vrshrnbq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_s32)))
-int16x8_t __arm_vrshrnbq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_s32)))
-int16x8_t __arm_vrshrnbq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_u16)))
-uint8x16_t __arm_vrshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_u16)))
-uint8x16_t __arm_vrshrnbq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_u32)))
-uint16x8_t __arm_vrshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_u32)))
-uint16x8_t __arm_vrshrnbq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_s16)))
-int8x16_t __arm_vrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_s16)))
-int8x16_t __arm_vrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_s32)))
-int16x8_t __arm_vrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_s32)))
-int16x8_t __arm_vrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_u16)))
-uint8x16_t __arm_vrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_u16)))
-uint8x16_t __arm_vrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_u32)))
-uint16x8_t __arm_vrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_u32)))
-uint16x8_t __arm_vrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_s16)))
-int8x16_t __arm_vrshrntq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_s16)))
-int8x16_t __arm_vrshrntq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_s32)))
-int16x8_t __arm_vrshrntq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_s32)))
-int16x8_t __arm_vrshrntq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_u16)))
-uint8x16_t __arm_vrshrntq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_u16)))
-uint8x16_t __arm_vrshrntq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_u32)))
-uint16x8_t __arm_vrshrntq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_u32)))
-uint16x8_t __arm_vrshrntq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_s16)))
-int16x8_t __arm_vrshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_s16)))
-int16x8_t __arm_vrshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_s32)))
-int32x4_t __arm_vrshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_s32)))
-int32x4_t __arm_vrshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_s8)))
-int8x16_t __arm_vrshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_s8)))
-int8x16_t __arm_vrshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_u16)))
-uint16x8_t __arm_vrshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_u16)))
-uint16x8_t __arm_vrshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_u32)))
-uint32x4_t __arm_vrshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_u32)))
-uint32x4_t __arm_vrshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_u8)))
-uint8x16_t __arm_vrshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_u8)))
-uint8x16_t __arm_vrshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_s16)))
-int16x8_t __arm_vrshrq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_s16)))
-int16x8_t __arm_vrshrq(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_s32)))
-int32x4_t __arm_vrshrq_n_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_s32)))
-int32x4_t __arm_vrshrq(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_s8)))
-int8x16_t __arm_vrshrq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_s8)))
-int8x16_t __arm_vrshrq(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_u16)))
-uint16x8_t __arm_vrshrq_n_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_u16)))
-uint16x8_t __arm_vrshrq(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_u32)))
-uint32x4_t __arm_vrshrq_n_u32(uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_u32)))
-uint32x4_t __arm_vrshrq(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_u8)))
-uint8x16_t __arm_vrshrq_n_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_u8)))
-uint8x16_t __arm_vrshrq(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_s16)))
-int16x8_t __arm_vrshrq_x_n_s16(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_s16)))
-int16x8_t __arm_vrshrq_x(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_s32)))
-int32x4_t __arm_vrshrq_x_n_s32(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_s32)))
-int32x4_t __arm_vrshrq_x(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_s8)))
-int8x16_t __arm_vrshrq_x_n_s8(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_s8)))
-int8x16_t __arm_vrshrq_x(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_u16)))
-uint16x8_t __arm_vrshrq_x_n_u16(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_u16)))
-uint16x8_t __arm_vrshrq_x(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_u32)))
-uint32x4_t __arm_vrshrq_x_n_u32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_u32)))
-uint32x4_t __arm_vrshrq_x(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_u8)))
-uint8x16_t __arm_vrshrq_x_n_u8(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_u8)))
-uint8x16_t __arm_vrshrq_x(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s16)))
-int16x8_t __arm_vsetq_lane_s16(int16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s16)))
-int16x8_t __arm_vsetq_lane(int16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s32)))
-int32x4_t __arm_vsetq_lane_s32(int32_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s32)))
-int32x4_t __arm_vsetq_lane(int32_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s64)))
-int64x2_t __arm_vsetq_lane_s64(int64_t, int64x2_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s64)))
-int64x2_t __arm_vsetq_lane(int64_t, int64x2_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s8)))
-int8x16_t __arm_vsetq_lane_s8(int8_t, int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s8)))
-int8x16_t __arm_vsetq_lane(int8_t, int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u16)))
-uint16x8_t __arm_vsetq_lane_u16(uint16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u16)))
-uint16x8_t __arm_vsetq_lane(uint16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u32)))
-uint32x4_t __arm_vsetq_lane_u32(uint32_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u32)))
-uint32x4_t __arm_vsetq_lane(uint32_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u64)))
-uint64x2_t __arm_vsetq_lane_u64(uint64_t, uint64x2_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u64)))
-uint64x2_t __arm_vsetq_lane(uint64_t, uint64x2_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u8)))
-uint8x16_t __arm_vsetq_lane_u8(uint8_t, uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u8)))
-uint8x16_t __arm_vsetq_lane(uint8_t, uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_s16)))
-int32x4_t __arm_vshllbq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_s16)))
-int32x4_t __arm_vshllbq_m(int32x4_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_s8)))
-int16x8_t __arm_vshllbq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_s8)))
-int16x8_t __arm_vshllbq_m(int16x8_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_u16)))
-uint32x4_t __arm_vshllbq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_u16)))
-uint32x4_t __arm_vshllbq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_u8)))
-uint16x8_t __arm_vshllbq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_u8)))
-uint16x8_t __arm_vshllbq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_s16)))
-int32x4_t __arm_vshllbq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_s16)))
-int32x4_t __arm_vshllbq(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_s8)))
-int16x8_t __arm_vshllbq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_s8)))
-int16x8_t __arm_vshllbq(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_u16)))
-uint32x4_t __arm_vshllbq_n_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_u16)))
-uint32x4_t __arm_vshllbq(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_u8)))
-uint16x8_t __arm_vshllbq_n_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_u8)))
-uint16x8_t __arm_vshllbq(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_s16)))
-int32x4_t __arm_vshllbq_x_n_s16(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_s16)))
-int32x4_t __arm_vshllbq_x(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_s8)))
-int16x8_t __arm_vshllbq_x_n_s8(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_s8)))
-int16x8_t __arm_vshllbq_x(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_u16)))
-uint32x4_t __arm_vshllbq_x_n_u16(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_u16)))
-uint32x4_t __arm_vshllbq_x(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_u8)))
-uint16x8_t __arm_vshllbq_x_n_u8(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_u8)))
-uint16x8_t __arm_vshllbq_x(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_s16)))
-int32x4_t __arm_vshlltq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_s16)))
-int32x4_t __arm_vshlltq_m(int32x4_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_s8)))
-int16x8_t __arm_vshlltq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_s8)))
-int16x8_t __arm_vshlltq_m(int16x8_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_u16)))
-uint32x4_t __arm_vshlltq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_u16)))
-uint32x4_t __arm_vshlltq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_u8)))
-uint16x8_t __arm_vshlltq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_u8)))
-uint16x8_t __arm_vshlltq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_s16)))
-int32x4_t __arm_vshlltq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_s16)))
-int32x4_t __arm_vshlltq(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_s8)))
-int16x8_t __arm_vshlltq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_s8)))
-int16x8_t __arm_vshlltq(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_u16)))
-uint32x4_t __arm_vshlltq_n_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_u16)))
-uint32x4_t __arm_vshlltq(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_u8)))
-uint16x8_t __arm_vshlltq_n_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_u8)))
-uint16x8_t __arm_vshlltq(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_s16)))
-int32x4_t __arm_vshlltq_x_n_s16(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_s16)))
-int32x4_t __arm_vshlltq_x(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_s8)))
-int16x8_t __arm_vshlltq_x_n_s8(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_s8)))
-int16x8_t __arm_vshlltq_x(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_u16)))
-uint32x4_t __arm_vshlltq_x_n_u16(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_u16)))
-uint32x4_t __arm_vshlltq_x(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_u8)))
-uint16x8_t __arm_vshlltq_x_n_u8(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_u8)))
-uint16x8_t __arm_vshlltq_x(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_s16)))
-int16x8_t __arm_vshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_s16)))
-int16x8_t __arm_vshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_s32)))
-int32x4_t __arm_vshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_s32)))
-int32x4_t __arm_vshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_s8)))
-int8x16_t __arm_vshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_s8)))
-int8x16_t __arm_vshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_u16)))
-uint16x8_t __arm_vshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_u16)))
-uint16x8_t __arm_vshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_u32)))
-uint32x4_t __arm_vshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_u32)))
-uint32x4_t __arm_vshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_u8)))
-uint8x16_t __arm_vshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_u8)))
-uint8x16_t __arm_vshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_s16)))
-int16x8_t __arm_vshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_s16)))
-int16x8_t __arm_vshlq_m_r(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_s32)))
-int32x4_t __arm_vshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_s32)))
-int32x4_t __arm_vshlq_m_r(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_s8)))
-int8x16_t __arm_vshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_s8)))
-int8x16_t __arm_vshlq_m_r(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_u16)))
-uint16x8_t __arm_vshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_u16)))
-uint16x8_t __arm_vshlq_m_r(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_u32)))
-uint32x4_t __arm_vshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_u32)))
-uint32x4_t __arm_vshlq_m_r(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_u8)))
-uint8x16_t __arm_vshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_u8)))
-uint8x16_t __arm_vshlq_m_r(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_s16)))
-int16x8_t __arm_vshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_s16)))
-int16x8_t __arm_vshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_s32)))
-int32x4_t __arm_vshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_s32)))
-int32x4_t __arm_vshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_s8)))
-int8x16_t __arm_vshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_s8)))
-int8x16_t __arm_vshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_u16)))
-uint16x8_t __arm_vshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_u16)))
-uint16x8_t __arm_vshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_u32)))
-uint32x4_t __arm_vshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_u32)))
-uint32x4_t __arm_vshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_u8)))
-uint8x16_t __arm_vshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_u8)))
-uint8x16_t __arm_vshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_s16)))
-int16x8_t __arm_vshlq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_s16)))
-int16x8_t __arm_vshlq_n(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_s32)))
-int32x4_t __arm_vshlq_n_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_s32)))
-int32x4_t __arm_vshlq_n(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_s8)))
-int8x16_t __arm_vshlq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_s8)))
-int8x16_t __arm_vshlq_n(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_u16)))
-uint16x8_t __arm_vshlq_n_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_u16)))
-uint16x8_t __arm_vshlq_n(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_u32)))
-uint32x4_t __arm_vshlq_n_u32(uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_u32)))
-uint32x4_t __arm_vshlq_n(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_u8)))
-uint8x16_t __arm_vshlq_n_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_u8)))
-uint8x16_t __arm_vshlq_n(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_s16)))
-int16x8_t __arm_vshlq_r_s16(int16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_s16)))
-int16x8_t __arm_vshlq_r(int16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_s32)))
-int32x4_t __arm_vshlq_r_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_s32)))
-int32x4_t __arm_vshlq_r(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_s8)))
-int8x16_t __arm_vshlq_r_s8(int8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_s8)))
-int8x16_t __arm_vshlq_r(int8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_u16)))
-uint16x8_t __arm_vshlq_r_u16(uint16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_u16)))
-uint16x8_t __arm_vshlq_r(uint16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_u32)))
-uint32x4_t __arm_vshlq_r_u32(uint32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_u32)))
-uint32x4_t __arm_vshlq_r(uint32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_u8)))
-uint8x16_t __arm_vshlq_r_u8(uint8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_u8)))
-uint8x16_t __arm_vshlq_r(uint8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_s16)))
-int16x8_t __arm_vshlq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_s16)))
-int16x8_t __arm_vshlq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_s32)))
-int32x4_t __arm_vshlq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_s32)))
-int32x4_t __arm_vshlq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_s8)))
-int8x16_t __arm_vshlq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_s8)))
-int8x16_t __arm_vshlq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_u16)))
-uint16x8_t __arm_vshlq_u16(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_u16)))
-uint16x8_t __arm_vshlq(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_u32)))
-uint32x4_t __arm_vshlq_u32(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_u32)))
-uint32x4_t __arm_vshlq(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_u8)))
-uint8x16_t __arm_vshlq_u8(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_u8)))
-uint8x16_t __arm_vshlq(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_s16)))
-int16x8_t __arm_vshlq_x_n_s16(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_s16)))
-int16x8_t __arm_vshlq_x_n(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_s32)))
-int32x4_t __arm_vshlq_x_n_s32(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_s32)))
-int32x4_t __arm_vshlq_x_n(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_s8)))
-int8x16_t __arm_vshlq_x_n_s8(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_s8)))
-int8x16_t __arm_vshlq_x_n(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_u16)))
-uint16x8_t __arm_vshlq_x_n_u16(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_u16)))
-uint16x8_t __arm_vshlq_x_n(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_u32)))
-uint32x4_t __arm_vshlq_x_n_u32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_u32)))
-uint32x4_t __arm_vshlq_x_n(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_u8)))
-uint8x16_t __arm_vshlq_x_n_u8(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_u8)))
-uint8x16_t __arm_vshlq_x_n(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_s16)))
-int16x8_t __arm_vshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_s16)))
-int16x8_t __arm_vshlq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_s32)))
-int32x4_t __arm_vshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_s32)))
-int32x4_t __arm_vshlq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_s8)))
-int8x16_t __arm_vshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_s8)))
-int8x16_t __arm_vshlq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_u16)))
-uint16x8_t __arm_vshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_u16)))
-uint16x8_t __arm_vshlq_x(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_u32)))
-uint32x4_t __arm_vshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_u32)))
-uint32x4_t __arm_vshlq_x(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_u8)))
-uint8x16_t __arm_vshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_u8)))
-uint8x16_t __arm_vshlq_x(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_s16)))
-int8x16_t __arm_vshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_s16)))
-int8x16_t __arm_vshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_s32)))
-int16x8_t __arm_vshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_s32)))
-int16x8_t __arm_vshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_u16)))
-uint8x16_t __arm_vshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_u16)))
-uint8x16_t __arm_vshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_u32)))
-uint16x8_t __arm_vshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_u32)))
-uint16x8_t __arm_vshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_s16)))
-int8x16_t __arm_vshrnbq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_s16)))
-int8x16_t __arm_vshrnbq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_s32)))
-int16x8_t __arm_vshrnbq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_s32)))
-int16x8_t __arm_vshrnbq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_u16)))
-uint8x16_t __arm_vshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_u16)))
-uint8x16_t __arm_vshrnbq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_u32)))
-uint16x8_t __arm_vshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_u32)))
-uint16x8_t __arm_vshrnbq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_s16)))
-int8x16_t __arm_vshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_s16)))
-int8x16_t __arm_vshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_s32)))
-int16x8_t __arm_vshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_s32)))
-int16x8_t __arm_vshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_u16)))
-uint8x16_t __arm_vshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_u16)))
-uint8x16_t __arm_vshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_u32)))
-uint16x8_t __arm_vshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_u32)))
-uint16x8_t __arm_vshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_s16)))
-int8x16_t __arm_vshrntq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_s16)))
-int8x16_t __arm_vshrntq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_s32)))
-int16x8_t __arm_vshrntq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_s32)))
-int16x8_t __arm_vshrntq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_u16)))
-uint8x16_t __arm_vshrntq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_u16)))
-uint8x16_t __arm_vshrntq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_u32)))
-uint16x8_t __arm_vshrntq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_u32)))
-uint16x8_t __arm_vshrntq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_s16)))
-int16x8_t __arm_vshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_s16)))
-int16x8_t __arm_vshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_s32)))
-int32x4_t __arm_vshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_s32)))
-int32x4_t __arm_vshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_s8)))
-int8x16_t __arm_vshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_s8)))
-int8x16_t __arm_vshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_u16)))
-uint16x8_t __arm_vshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_u16)))
-uint16x8_t __arm_vshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_u32)))
-uint32x4_t __arm_vshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_u32)))
-uint32x4_t __arm_vshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_u8)))
-uint8x16_t __arm_vshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_u8)))
-uint8x16_t __arm_vshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_s16)))
-int16x8_t __arm_vshrq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_s16)))
-int16x8_t __arm_vshrq(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_s32)))
-int32x4_t __arm_vshrq_n_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_s32)))
-int32x4_t __arm_vshrq(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_s8)))
-int8x16_t __arm_vshrq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_s8)))
-int8x16_t __arm_vshrq(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_u16)))
-uint16x8_t __arm_vshrq_n_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_u16)))
-uint16x8_t __arm_vshrq(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_u32)))
-uint32x4_t __arm_vshrq_n_u32(uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_u32)))
-uint32x4_t __arm_vshrq(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_u8)))
-uint8x16_t __arm_vshrq_n_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_u8)))
-uint8x16_t __arm_vshrq(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_s16)))
-int16x8_t __arm_vshrq_x_n_s16(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_s16)))
-int16x8_t __arm_vshrq_x(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_s32)))
-int32x4_t __arm_vshrq_x_n_s32(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_s32)))
-int32x4_t __arm_vshrq_x(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_s8)))
-int8x16_t __arm_vshrq_x_n_s8(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_s8)))
-int8x16_t __arm_vshrq_x(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_u16)))
-uint16x8_t __arm_vshrq_x_n_u16(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_u16)))
-uint16x8_t __arm_vshrq_x(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_u32)))
-uint32x4_t __arm_vshrq_x_n_u32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_u32)))
-uint32x4_t __arm_vshrq_x(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_u8)))
-uint8x16_t __arm_vshrq_x_n_u8(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_u8)))
-uint8x16_t __arm_vshrq_x(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_s16)))
-int16x8_t __arm_vsliq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_s16)))
-int16x8_t __arm_vsliq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_s32)))
-int32x4_t __arm_vsliq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_s32)))
-int32x4_t __arm_vsliq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_s8)))
-int8x16_t __arm_vsliq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_s8)))
-int8x16_t __arm_vsliq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_u16)))
-uint16x8_t __arm_vsliq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_u16)))
-uint16x8_t __arm_vsliq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_u32)))
-uint32x4_t __arm_vsliq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_u32)))
-uint32x4_t __arm_vsliq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_u8)))
-uint8x16_t __arm_vsliq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_u8)))
-uint8x16_t __arm_vsliq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_s16)))
-int16x8_t __arm_vsliq_n_s16(int16x8_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_s16)))
-int16x8_t __arm_vsliq(int16x8_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_s32)))
-int32x4_t __arm_vsliq_n_s32(int32x4_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_s32)))
-int32x4_t __arm_vsliq(int32x4_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_s8)))
-int8x16_t __arm_vsliq_n_s8(int8x16_t, int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_s8)))
-int8x16_t __arm_vsliq(int8x16_t, int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_u16)))
-uint16x8_t __arm_vsliq_n_u16(uint16x8_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_u16)))
-uint16x8_t __arm_vsliq(uint16x8_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_u32)))
-uint32x4_t __arm_vsliq_n_u32(uint32x4_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_u32)))
-uint32x4_t __arm_vsliq(uint32x4_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_u8)))
-uint8x16_t __arm_vsliq_n_u8(uint8x16_t, uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_u8)))
-uint8x16_t __arm_vsliq(uint8x16_t, uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_s16)))
-int16x8_t __arm_vsriq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_s16)))
-int16x8_t __arm_vsriq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_s32)))
-int32x4_t __arm_vsriq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_s32)))
-int32x4_t __arm_vsriq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_s8)))
-int8x16_t __arm_vsriq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_s8)))
-int8x16_t __arm_vsriq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_u16)))
-uint16x8_t __arm_vsriq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_u16)))
-uint16x8_t __arm_vsriq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_u32)))
-uint32x4_t __arm_vsriq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_u32)))
-uint32x4_t __arm_vsriq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_u8)))
-uint8x16_t __arm_vsriq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_u8)))
-uint8x16_t __arm_vsriq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_s16)))
-int16x8_t __arm_vsriq_n_s16(int16x8_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_s16)))
-int16x8_t __arm_vsriq(int16x8_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_s32)))
-int32x4_t __arm_vsriq_n_s32(int32x4_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_s32)))
-int32x4_t __arm_vsriq(int32x4_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_s8)))
-int8x16_t __arm_vsriq_n_s8(int8x16_t, int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_s8)))
-int8x16_t __arm_vsriq(int8x16_t, int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_u16)))
-uint16x8_t __arm_vsriq_n_u16(uint16x8_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_u16)))
-uint16x8_t __arm_vsriq(uint16x8_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_u32)))
-uint32x4_t __arm_vsriq_n_u32(uint32x4_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_u32)))
-uint32x4_t __arm_vsriq(uint32x4_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_u8)))
-uint8x16_t __arm_vsriq_n_u8(uint8x16_t, uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_u8)))
-uint8x16_t __arm_vsriq(uint8x16_t, uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s16)))
-void __arm_vst1q_p_s16(int16_t *, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s16)))
-void __arm_vst1q_p(int16_t *, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s32)))
-void __arm_vst1q_p_s32(int32_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s32)))
-void __arm_vst1q_p(int32_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s8)))
-void __arm_vst1q_p_s8(int8_t *, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s8)))
-void __arm_vst1q_p(int8_t *, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u16)))
-void __arm_vst1q_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u16)))
-void __arm_vst1q_p(uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u32)))
-void __arm_vst1q_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u32)))
-void __arm_vst1q_p(uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u8)))
-void __arm_vst1q_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u8)))
-void __arm_vst1q_p(uint8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_s16)))
-void __arm_vst1q_s16(int16_t *, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_s16)))
-void __arm_vst1q(int16_t *, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_s32)))
-void __arm_vst1q_s32(int32_t *, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_s32)))
-void __arm_vst1q(int32_t *, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_s8)))
-void __arm_vst1q_s8(int8_t *, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_s8)))
-void __arm_vst1q(int8_t *, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_u16)))
-void __arm_vst1q_u16(uint16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_u16)))
-void __arm_vst1q(uint16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_u32)))
-void __arm_vst1q_u32(uint32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_u32)))
-void __arm_vst1q(uint32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_u8)))
-void __arm_vst1q_u8(uint8_t *, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_u8)))
-void __arm_vst1q(uint8_t *, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_s16)))
-void __arm_vst2q_s16(int16_t *, int16x8x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_s16)))
-void __arm_vst2q(int16_t *, int16x8x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_s32)))
-void __arm_vst2q_s32(int32_t *, int32x4x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_s32)))
-void __arm_vst2q(int32_t *, int32x4x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_s8)))
-void __arm_vst2q_s8(int8_t *, int8x16x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_s8)))
-void __arm_vst2q(int8_t *, int8x16x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_u16)))
-void __arm_vst2q_u16(uint16_t *, uint16x8x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_u16)))
-void __arm_vst2q(uint16_t *, uint16x8x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_u32)))
-void __arm_vst2q_u32(uint32_t *, uint32x4x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_u32)))
-void __arm_vst2q(uint32_t *, uint32x4x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_u8)))
-void __arm_vst2q_u8(uint8_t *, uint8x16x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_u8)))
-void __arm_vst2q(uint8_t *, uint8x16x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_s16)))
-void __arm_vst4q_s16(int16_t *, int16x8x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_s16)))
-void __arm_vst4q(int16_t *, int16x8x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_s32)))
-void __arm_vst4q_s32(int32_t *, int32x4x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_s32)))
-void __arm_vst4q(int32_t *, int32x4x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_s8)))
-void __arm_vst4q_s8(int8_t *, int8x16x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_s8)))
-void __arm_vst4q(int8_t *, int8x16x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_u16)))
-void __arm_vst4q_u16(uint16_t *, uint16x8x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_u16)))
-void __arm_vst4q(uint16_t *, uint16x8x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_u32)))
-void __arm_vst4q_u32(uint32_t *, uint32x4x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_u32)))
-void __arm_vst4q(uint32_t *, uint32x4x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_u8)))
-void __arm_vst4q_u8(uint8_t *, uint8x16x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_u8)))
-void __arm_vst4q(uint8_t *, uint8x16x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s16)))
-void __arm_vstrbq_p_s16(int8_t *, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s16)))
-void __arm_vstrbq_p(int8_t *, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s32)))
-void __arm_vstrbq_p_s32(int8_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s32)))
-void __arm_vstrbq_p(int8_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s8)))
-void __arm_vstrbq_p_s8(int8_t *, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s8)))
-void __arm_vstrbq_p(int8_t *, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u16)))
-void __arm_vstrbq_p_u16(uint8_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u16)))
-void __arm_vstrbq_p(uint8_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u32)))
-void __arm_vstrbq_p_u32(uint8_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u32)))
-void __arm_vstrbq_p(uint8_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u8)))
-void __arm_vstrbq_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u8)))
-void __arm_vstrbq_p(uint8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s16)))
-void __arm_vstrbq_s16(int8_t *, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s16)))
-void __arm_vstrbq(int8_t *, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s32)))
-void __arm_vstrbq_s32(int8_t *, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s32)))
-void __arm_vstrbq(int8_t *, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s8)))
-void __arm_vstrbq_s8(int8_t *, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s8)))
-void __arm_vstrbq(int8_t *, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))
-void __arm_vstrbq_scatter_offset_p_s16(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))
-void __arm_vstrbq_scatter_offset_p(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))
-void __arm_vstrbq_scatter_offset_p_s32(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))
-void __arm_vstrbq_scatter_offset_p(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))
-void __arm_vstrbq_scatter_offset_p_s8(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))
-void __arm_vstrbq_scatter_offset_p(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))
-void __arm_vstrbq_scatter_offset_p_u16(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))
-void __arm_vstrbq_scatter_offset_p(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))
-void __arm_vstrbq_scatter_offset_p_u32(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))
-void __arm_vstrbq_scatter_offset_p(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))
-void __arm_vstrbq_scatter_offset_p_u8(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))
-void __arm_vstrbq_scatter_offset_p(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))
-void __arm_vstrbq_scatter_offset_s16(int8_t *, uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))
-void __arm_vstrbq_scatter_offset(int8_t *, uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))
-void __arm_vstrbq_scatter_offset_s32(int8_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))
-void __arm_vstrbq_scatter_offset(int8_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))
-void __arm_vstrbq_scatter_offset_s8(int8_t *, uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))
-void __arm_vstrbq_scatter_offset(int8_t *, uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))
-void __arm_vstrbq_scatter_offset_u16(uint8_t *, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))
-void __arm_vstrbq_scatter_offset(uint8_t *, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))
-void __arm_vstrbq_scatter_offset_u32(uint8_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))
-void __arm_vstrbq_scatter_offset(uint8_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))
-void __arm_vstrbq_scatter_offset_u8(uint8_t *, uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))
-void __arm_vstrbq_scatter_offset(uint8_t *, uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u16)))
-void __arm_vstrbq_u16(uint8_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u16)))
-void __arm_vstrbq(uint8_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u32)))
-void __arm_vstrbq_u32(uint8_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u32)))
-void __arm_vstrbq(uint8_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u8)))
-void __arm_vstrbq_u8(uint8_t *, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u8)))
-void __arm_vstrbq(uint8_t *, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))
-void __arm_vstrdq_scatter_base_p_s64(uint64x2_t, int, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))
-void __arm_vstrdq_scatter_base_p(uint64x2_t, int, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))
-void __arm_vstrdq_scatter_base_p_u64(uint64x2_t, int, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))
-void __arm_vstrdq_scatter_base_p(uint64x2_t, int, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))
-void __arm_vstrdq_scatter_base_s64(uint64x2_t, int, int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))
-void __arm_vstrdq_scatter_base(uint64x2_t, int, int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))
-void __arm_vstrdq_scatter_base_u64(uint64x2_t, int, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))
-void __arm_vstrdq_scatter_base(uint64x2_t, int, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))
-void __arm_vstrdq_scatter_base_wb_p_s64(uint64x2_t *, int, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))
-void __arm_vstrdq_scatter_base_wb_p(uint64x2_t *, int, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))
-void __arm_vstrdq_scatter_base_wb_p_u64(uint64x2_t *, int, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))
-void __arm_vstrdq_scatter_base_wb_p(uint64x2_t *, int, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))
-void __arm_vstrdq_scatter_base_wb_s64(uint64x2_t *, int, int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))
-void __arm_vstrdq_scatter_base_wb(uint64x2_t *, int, int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))
-void __arm_vstrdq_scatter_base_wb_u64(uint64x2_t *, int, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))
-void __arm_vstrdq_scatter_base_wb(uint64x2_t *, int, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))
-void __arm_vstrdq_scatter_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))
-void __arm_vstrdq_scatter_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))
-void __arm_vstrdq_scatter_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))
-void __arm_vstrdq_scatter_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))
-void __arm_vstrdq_scatter_offset_s64(int64_t *, uint64x2_t, int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))
-void __arm_vstrdq_scatter_offset(int64_t *, uint64x2_t, int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))
-void __arm_vstrdq_scatter_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))
-void __arm_vstrdq_scatter_offset(uint64_t *, uint64x2_t, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))
-void __arm_vstrdq_scatter_shifted_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))
-void __arm_vstrdq_scatter_shifted_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))
-void __arm_vstrdq_scatter_shifted_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))
-void __arm_vstrdq_scatter_shifted_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))
-void __arm_vstrdq_scatter_shifted_offset_s64(int64_t *, uint64x2_t, int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))
-void __arm_vstrdq_scatter_shifted_offset(int64_t *, uint64x2_t, int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))
-void __arm_vstrdq_scatter_shifted_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))
-void __arm_vstrdq_scatter_shifted_offset(uint64_t *, uint64x2_t, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s16)))
-void __arm_vstrhq_p_s16(int16_t *, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s16)))
-void __arm_vstrhq_p(int16_t *, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s32)))
-void __arm_vstrhq_p_s32(int16_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s32)))
-void __arm_vstrhq_p(int16_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u16)))
-void __arm_vstrhq_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u16)))
-void __arm_vstrhq_p(uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u32)))
-void __arm_vstrhq_p_u32(uint16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u32)))
-void __arm_vstrhq_p(uint16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s16)))
-void __arm_vstrhq_s16(int16_t *, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s16)))
-void __arm_vstrhq(int16_t *, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s32)))
-void __arm_vstrhq_s32(int16_t *, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s32)))
-void __arm_vstrhq(int16_t *, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))
-void __arm_vstrhq_scatter_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))
-void __arm_vstrhq_scatter_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))
-void __arm_vstrhq_scatter_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))
-void __arm_vstrhq_scatter_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))
-void __arm_vstrhq_scatter_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))
-void __arm_vstrhq_scatter_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))
-void __arm_vstrhq_scatter_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))
-void __arm_vstrhq_scatter_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))
-void __arm_vstrhq_scatter_offset_s16(int16_t *, uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))
-void __arm_vstrhq_scatter_offset(int16_t *, uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))
-void __arm_vstrhq_scatter_offset_s32(int16_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))
-void __arm_vstrhq_scatter_offset(int16_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))
-void __arm_vstrhq_scatter_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))
-void __arm_vstrhq_scatter_offset(uint16_t *, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))
-void __arm_vstrhq_scatter_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))
-void __arm_vstrhq_scatter_offset(uint16_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))
-void __arm_vstrhq_scatter_shifted_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))
-void __arm_vstrhq_scatter_shifted_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))
-void __arm_vstrhq_scatter_shifted_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))
-void __arm_vstrhq_scatter_shifted_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))
-void __arm_vstrhq_scatter_shifted_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))
-void __arm_vstrhq_scatter_shifted_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))
-void __arm_vstrhq_scatter_shifted_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))
-void __arm_vstrhq_scatter_shifted_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))
-void __arm_vstrhq_scatter_shifted_offset_s16(int16_t *, uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))
-void __arm_vstrhq_scatter_shifted_offset(int16_t *, uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))
-void __arm_vstrhq_scatter_shifted_offset_s32(int16_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))
-void __arm_vstrhq_scatter_shifted_offset(int16_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))
-void __arm_vstrhq_scatter_shifted_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))
-void __arm_vstrhq_scatter_shifted_offset(uint16_t *, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))
-void __arm_vstrhq_scatter_shifted_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))
-void __arm_vstrhq_scatter_shifted_offset(uint16_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u16)))
-void __arm_vstrhq_u16(uint16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u16)))
-void __arm_vstrhq(uint16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u32)))
-void __arm_vstrhq_u32(uint16_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u32)))
-void __arm_vstrhq(uint16_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_s32)))
-void __arm_vstrwq_p_s32(int32_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_s32)))
-void __arm_vstrwq_p(int32_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_u32)))
-void __arm_vstrwq_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_u32)))
-void __arm_vstrwq_p(uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_s32)))
-void __arm_vstrwq_s32(int32_t *, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_s32)))
-void __arm_vstrwq(int32_t *, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))
-void __arm_vstrwq_scatter_base_p_s32(uint32x4_t, int, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))
-void __arm_vstrwq_scatter_base_p(uint32x4_t, int, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))
-void __arm_vstrwq_scatter_base_p_u32(uint32x4_t, int, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))
-void __arm_vstrwq_scatter_base_p(uint32x4_t, int, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))
-void __arm_vstrwq_scatter_base_s32(uint32x4_t, int, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))
-void __arm_vstrwq_scatter_base(uint32x4_t, int, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))
-void __arm_vstrwq_scatter_base_u32(uint32x4_t, int, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))
-void __arm_vstrwq_scatter_base(uint32x4_t, int, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))
-void __arm_vstrwq_scatter_base_wb_p_s32(uint32x4_t *, int, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))
-void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))
-void __arm_vstrwq_scatter_base_wb_p_u32(uint32x4_t *, int, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))
-void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))
-void __arm_vstrwq_scatter_base_wb_s32(uint32x4_t *, int, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))
-void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))
-void __arm_vstrwq_scatter_base_wb_u32(uint32x4_t *, int, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))
-void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))
-void __arm_vstrwq_scatter_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))
-void __arm_vstrwq_scatter_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))
-void __arm_vstrwq_scatter_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))
-void __arm_vstrwq_scatter_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))
-void __arm_vstrwq_scatter_offset_s32(int32_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))
-void __arm_vstrwq_scatter_offset(int32_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))
-void __arm_vstrwq_scatter_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))
-void __arm_vstrwq_scatter_offset(uint32_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))
-void __arm_vstrwq_scatter_shifted_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))
-void __arm_vstrwq_scatter_shifted_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))
-void __arm_vstrwq_scatter_shifted_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))
-void __arm_vstrwq_scatter_shifted_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))
-void __arm_vstrwq_scatter_shifted_offset_s32(int32_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))
-void __arm_vstrwq_scatter_shifted_offset(int32_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))
-void __arm_vstrwq_scatter_shifted_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))
-void __arm_vstrwq_scatter_shifted_offset(uint32_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_u32)))
-void __arm_vstrwq_u32(uint32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_u32)))
-void __arm_vstrwq(uint32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s16)))
-int16x8_t __arm_vsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s16)))
-int16x8_t __arm_vsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s32)))
-int32x4_t __arm_vsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s32)))
-int32x4_t __arm_vsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s8)))
-int8x16_t __arm_vsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s8)))
-int8x16_t __arm_vsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u16)))
-uint16x8_t __arm_vsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u16)))
-uint16x8_t __arm_vsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u32)))
-uint32x4_t __arm_vsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u32)))
-uint32x4_t __arm_vsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u8)))
-uint8x16_t __arm_vsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u8)))
-uint8x16_t __arm_vsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_s16)))
-int16x8_t __arm_vsubq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_s16)))
-int16x8_t __arm_vsubq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_s32)))
-int32x4_t __arm_vsubq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_s32)))
-int32x4_t __arm_vsubq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_s8)))
-int8x16_t __arm_vsubq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_s8)))
-int8x16_t __arm_vsubq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_u16)))
-uint16x8_t __arm_vsubq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_u16)))
-uint16x8_t __arm_vsubq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_u32)))
-uint32x4_t __arm_vsubq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_u32)))
-uint32x4_t __arm_vsubq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_u8)))
-uint8x16_t __arm_vsubq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_u8)))
-uint8x16_t __arm_vsubq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_s16)))
-int16x8_t __arm_vsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_s16)))
-int16x8_t __arm_vsubq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_s32)))
-int32x4_t __arm_vsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_s32)))
-int32x4_t __arm_vsubq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_s8)))
-int8x16_t __arm_vsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_s8)))
-int8x16_t __arm_vsubq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_u16)))
-uint16x8_t __arm_vsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_u16)))
-uint16x8_t __arm_vsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_u32)))
-uint32x4_t __arm_vsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_u32)))
-uint32x4_t __arm_vsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_u8)))
-uint8x16_t __arm_vsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_u8)))
-uint8x16_t __arm_vsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s16)))
-int16x8_t __arm_vuninitializedq(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s32)))
-int32x4_t __arm_vuninitializedq(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s64)))
-int64x2_t __arm_vuninitializedq(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s8)))
-int8x16_t __arm_vuninitializedq(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u16)))
-uint16x8_t __arm_vuninitializedq(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u32)))
-uint32x4_t __arm_vuninitializedq(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u64)))
-uint64x2_t __arm_vuninitializedq(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u8)))
-uint8x16_t __arm_vuninitializedq(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s16)))
-int16x8_t __arm_vuninitializedq_s16();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s32)))
-int32x4_t __arm_vuninitializedq_s32();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s64)))
-int64x2_t __arm_vuninitializedq_s64();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s8)))
-int8x16_t __arm_vuninitializedq_s8();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u16)))
-uint16x8_t __arm_vuninitializedq_u16();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u32)))
-uint32x4_t __arm_vuninitializedq_u32();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u64)))
-uint64x2_t __arm_vuninitializedq_u64();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u8)))
-uint8x16_t __arm_vuninitializedq_u8();
-
-#if (__ARM_FEATURE_MVE & 2)
-
-typedef __fp16 float16_t;
-typedef float float32_t;
-typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) float16_t float16x8_t;
-typedef struct { float16x8_t val[2]; } float16x8x2_t;
-typedef struct { float16x8_t val[4]; } float16x8x4_t;
-typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) float32_t float32x4_t;
-typedef struct { float32x4_t val[2]; } float32x4x2_t;
-typedef struct { float32x4_t val[4]; } float32x4x4_t;
-
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_f16)))
-float16x8_t __arm_vabdq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_f16)))
-float16x8_t __arm_vabdq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_f32)))
-float32x4_t __arm_vabdq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_f32)))
-float32x4_t __arm_vabdq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_f16)))
-float16x8_t __arm_vabdq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_f16)))
-float16x8_t __arm_vabdq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_f32)))
-float32x4_t __arm_vabdq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_f32)))
-float32x4_t __arm_vabdq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_f16)))
-float16x8_t __arm_vabdq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_f16)))
-float16x8_t __arm_vabdq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_f32)))
-float32x4_t __arm_vabdq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_f32)))
-float32x4_t __arm_vabdq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_f16)))
-float16x8_t __arm_vaddq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_f16)))
-float16x8_t __arm_vaddq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_f32)))
-float32x4_t __arm_vaddq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_f32)))
-float32x4_t __arm_vaddq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f16)))
-float16x8_t __arm_vaddq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f16)))
-float16x8_t __arm_vaddq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f32)))
-float32x4_t __arm_vaddq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f32)))
-float32x4_t __arm_vaddq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_f16)))
-float16x8_t __arm_vaddq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_f16)))
-float16x8_t __arm_vaddq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_f32)))
-float32x4_t __arm_vaddq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_f32)))
-float32x4_t __arm_vaddq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_f16)))
-float16x8_t __arm_vandq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_f16)))
-float16x8_t __arm_vandq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_f32)))
-float32x4_t __arm_vandq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_f32)))
-float32x4_t __arm_vandq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_f16)))
-float16x8_t __arm_vandq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_f16)))
-float16x8_t __arm_vandq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_f32)))
-float32x4_t __arm_vandq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_f32)))
-float32x4_t __arm_vandq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_f16)))
-float16x8_t __arm_vandq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_f16)))
-float16x8_t __arm_vandq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_f32)))
-float32x4_t __arm_vandq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_f32)))
-float32x4_t __arm_vandq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_f16)))
-float16x8_t __arm_vbicq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_f16)))
-float16x8_t __arm_vbicq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_f32)))
-float32x4_t __arm_vbicq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_f32)))
-float32x4_t __arm_vbicq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_f16)))
-float16x8_t __arm_vbicq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_f16)))
-float16x8_t __arm_vbicq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_f32)))
-float32x4_t __arm_vbicq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_f32)))
-float32x4_t __arm_vbicq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_f16)))
-float16x8_t __arm_vbicq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_f16)))
-float16x8_t __arm_vbicq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_f32)))
-float32x4_t __arm_vbicq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_f32)))
-float32x4_t __arm_vbicq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_f16)))
-float16x8_t __arm_vcaddq_rot270_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_f16)))
-float16x8_t __arm_vcaddq_rot270(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_f32)))
-float32x4_t __arm_vcaddq_rot270_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_f32)))
-float32x4_t __arm_vcaddq_rot270(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_f16)))
-float16x8_t __arm_vcaddq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_f16)))
-float16x8_t __arm_vcaddq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_f32)))
-float32x4_t __arm_vcaddq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_f32)))
-float32x4_t __arm_vcaddq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_f16)))
-float16x8_t __arm_vcaddq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_f16)))
-float16x8_t __arm_vcaddq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_f32)))
-float32x4_t __arm_vcaddq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_f32)))
-float32x4_t __arm_vcaddq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_f16)))
-float16x8_t __arm_vcaddq_rot90_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_f16)))
-float16x8_t __arm_vcaddq_rot90(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_f32)))
-float32x4_t __arm_vcaddq_rot90_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_f32)))
-float32x4_t __arm_vcaddq_rot90(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_f16)))
-float16x8_t __arm_vcaddq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_f16)))
-float16x8_t __arm_vcaddq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_f32)))
-float32x4_t __arm_vcaddq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_f32)))
-float32x4_t __arm_vcaddq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_f16)))
-float16x8_t __arm_vcaddq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_f16)))
-float16x8_t __arm_vcaddq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_f32)))
-float32x4_t __arm_vcaddq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_f32)))
-float32x4_t __arm_vcaddq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_f16)))
-float16x8_t __arm_vcmlaq_f16(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_f16)))
-float16x8_t __arm_vcmlaq(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_f32)))
-float32x4_t __arm_vcmlaq_f32(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_f32)))
-float32x4_t __arm_vcmlaq(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_m_f16)))
-float16x8_t __arm_vcmlaq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_m_f16)))
-float16x8_t __arm_vcmlaq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_m_f32)))
-float32x4_t __arm_vcmlaq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_m_f32)))
-float32x4_t __arm_vcmlaq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_f16)))
-float16x8_t __arm_vcmlaq_rot180_f16(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_f16)))
-float16x8_t __arm_vcmlaq_rot180(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_f32)))
-float32x4_t __arm_vcmlaq_rot180_f32(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_f32)))
-float32x4_t __arm_vcmlaq_rot180(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16)))
-float16x8_t __arm_vcmlaq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16)))
-float16x8_t __arm_vcmlaq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32)))
-float32x4_t __arm_vcmlaq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32)))
-float32x4_t __arm_vcmlaq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_f16)))
-float16x8_t __arm_vcmlaq_rot270_f16(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_f16)))
-float16x8_t __arm_vcmlaq_rot270(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_f32)))
-float32x4_t __arm_vcmlaq_rot270_f32(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_f32)))
-float32x4_t __arm_vcmlaq_rot270(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16)))
-float16x8_t __arm_vcmlaq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16)))
-float16x8_t __arm_vcmlaq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32)))
-float32x4_t __arm_vcmlaq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32)))
-float32x4_t __arm_vcmlaq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_f16)))
-float16x8_t __arm_vcmlaq_rot90_f16(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_f16)))
-float16x8_t __arm_vcmlaq_rot90(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_f32)))
-float32x4_t __arm_vcmlaq_rot90_f32(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_f32)))
-float32x4_t __arm_vcmlaq_rot90(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16)))
-float16x8_t __arm_vcmlaq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16)))
-float16x8_t __arm_vcmlaq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32)))
-float32x4_t __arm_vcmlaq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32)))
-float32x4_t __arm_vcmlaq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f16)))
-mve_pred16_t __arm_vcmpeqq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f16)))
-mve_pred16_t __arm_vcmpeqq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f32)))
-mve_pred16_t __arm_vcmpeqq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f32)))
-mve_pred16_t __arm_vcmpeqq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f16)))
-mve_pred16_t __arm_vcmpeqq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f16)))
-mve_pred16_t __arm_vcmpeqq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f32)))
-mve_pred16_t __arm_vcmpeqq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f32)))
-mve_pred16_t __arm_vcmpeqq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))
-mve_pred16_t __arm_vcmpeqq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))
-mve_pred16_t __arm_vcmpeqq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))
-mve_pred16_t __arm_vcmpeqq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))
-mve_pred16_t __arm_vcmpeqq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f16)))
-mve_pred16_t __arm_vcmpeqq_n_f16(float16x8_t, float16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f16)))
-mve_pred16_t __arm_vcmpeqq(float16x8_t, float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f32)))
-mve_pred16_t __arm_vcmpeqq_n_f32(float32x4_t, float32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f32)))
-mve_pred16_t __arm_vcmpeqq(float32x4_t, float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f16)))
-mve_pred16_t __arm_vcmpgeq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f16)))
-mve_pred16_t __arm_vcmpgeq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f32)))
-mve_pred16_t __arm_vcmpgeq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f32)))
-mve_pred16_t __arm_vcmpgeq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f16)))
-mve_pred16_t __arm_vcmpgeq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f16)))
-mve_pred16_t __arm_vcmpgeq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f32)))
-mve_pred16_t __arm_vcmpgeq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f32)))
-mve_pred16_t __arm_vcmpgeq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))
-mve_pred16_t __arm_vcmpgeq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))
-mve_pred16_t __arm_vcmpgeq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))
-mve_pred16_t __arm_vcmpgeq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))
-mve_pred16_t __arm_vcmpgeq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f16)))
-mve_pred16_t __arm_vcmpgeq_n_f16(float16x8_t, float16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f16)))
-mve_pred16_t __arm_vcmpgeq(float16x8_t, float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f32)))
-mve_pred16_t __arm_vcmpgeq_n_f32(float32x4_t, float32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f32)))
-mve_pred16_t __arm_vcmpgeq(float32x4_t, float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f16)))
-mve_pred16_t __arm_vcmpgtq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f16)))
-mve_pred16_t __arm_vcmpgtq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f32)))
-mve_pred16_t __arm_vcmpgtq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f32)))
-mve_pred16_t __arm_vcmpgtq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f16)))
-mve_pred16_t __arm_vcmpgtq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f16)))
-mve_pred16_t __arm_vcmpgtq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f32)))
-mve_pred16_t __arm_vcmpgtq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f32)))
-mve_pred16_t __arm_vcmpgtq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))
-mve_pred16_t __arm_vcmpgtq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))
-mve_pred16_t __arm_vcmpgtq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))
-mve_pred16_t __arm_vcmpgtq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))
-mve_pred16_t __arm_vcmpgtq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f16)))
-mve_pred16_t __arm_vcmpgtq_n_f16(float16x8_t, float16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f16)))
-mve_pred16_t __arm_vcmpgtq(float16x8_t, float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f32)))
-mve_pred16_t __arm_vcmpgtq_n_f32(float32x4_t, float32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f32)))
-mve_pred16_t __arm_vcmpgtq(float32x4_t, float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f16)))
-mve_pred16_t __arm_vcmpleq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f16)))
-mve_pred16_t __arm_vcmpleq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f32)))
-mve_pred16_t __arm_vcmpleq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f32)))
-mve_pred16_t __arm_vcmpleq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f16)))
-mve_pred16_t __arm_vcmpleq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f16)))
-mve_pred16_t __arm_vcmpleq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f32)))
-mve_pred16_t __arm_vcmpleq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f32)))
-mve_pred16_t __arm_vcmpleq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))
-mve_pred16_t __arm_vcmpleq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))
-mve_pred16_t __arm_vcmpleq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))
-mve_pred16_t __arm_vcmpleq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))
-mve_pred16_t __arm_vcmpleq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f16)))
-mve_pred16_t __arm_vcmpleq_n_f16(float16x8_t, float16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f16)))
-mve_pred16_t __arm_vcmpleq(float16x8_t, float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f32)))
-mve_pred16_t __arm_vcmpleq_n_f32(float32x4_t, float32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f32)))
-mve_pred16_t __arm_vcmpleq(float32x4_t, float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f16)))
-mve_pred16_t __arm_vcmpltq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f16)))
-mve_pred16_t __arm_vcmpltq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f32)))
-mve_pred16_t __arm_vcmpltq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f32)))
-mve_pred16_t __arm_vcmpltq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f16)))
-mve_pred16_t __arm_vcmpltq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f16)))
-mve_pred16_t __arm_vcmpltq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f32)))
-mve_pred16_t __arm_vcmpltq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f32)))
-mve_pred16_t __arm_vcmpltq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))
-mve_pred16_t __arm_vcmpltq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))
-mve_pred16_t __arm_vcmpltq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))
-mve_pred16_t __arm_vcmpltq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))
-mve_pred16_t __arm_vcmpltq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f16)))
-mve_pred16_t __arm_vcmpltq_n_f16(float16x8_t, float16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f16)))
-mve_pred16_t __arm_vcmpltq(float16x8_t, float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f32)))
-mve_pred16_t __arm_vcmpltq_n_f32(float32x4_t, float32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f32)))
-mve_pred16_t __arm_vcmpltq(float32x4_t, float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f16)))
-mve_pred16_t __arm_vcmpneq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f16)))
-mve_pred16_t __arm_vcmpneq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f32)))
-mve_pred16_t __arm_vcmpneq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f32)))
-mve_pred16_t __arm_vcmpneq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f16)))
-mve_pred16_t __arm_vcmpneq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f16)))
-mve_pred16_t __arm_vcmpneq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f32)))
-mve_pred16_t __arm_vcmpneq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f32)))
-mve_pred16_t __arm_vcmpneq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))
-mve_pred16_t __arm_vcmpneq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))
-mve_pred16_t __arm_vcmpneq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))
-mve_pred16_t __arm_vcmpneq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))
-mve_pred16_t __arm_vcmpneq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f16)))
-mve_pred16_t __arm_vcmpneq_n_f16(float16x8_t, float16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f16)))
-mve_pred16_t __arm_vcmpneq(float16x8_t, float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f32)))
-mve_pred16_t __arm_vcmpneq_n_f32(float32x4_t, float32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f32)))
-mve_pred16_t __arm_vcmpneq(float32x4_t, float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_f16)))
-float16x8_t __arm_vcmulq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_f16)))
-float16x8_t __arm_vcmulq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_f32)))
-float32x4_t __arm_vcmulq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_f32)))
-float32x4_t __arm_vcmulq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_m_f16)))
-float16x8_t __arm_vcmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_m_f16)))
-float16x8_t __arm_vcmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_m_f32)))
-float32x4_t __arm_vcmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_m_f32)))
-float32x4_t __arm_vcmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_f16)))
-float16x8_t __arm_vcmulq_rot180_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_f16)))
-float16x8_t __arm_vcmulq_rot180(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_f32)))
-float32x4_t __arm_vcmulq_rot180_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_f32)))
-float32x4_t __arm_vcmulq_rot180(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_m_f16)))
-float16x8_t __arm_vcmulq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_m_f16)))
-float16x8_t __arm_vcmulq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_m_f32)))
-float32x4_t __arm_vcmulq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_m_f32)))
-float32x4_t __arm_vcmulq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_x_f16)))
-float16x8_t __arm_vcmulq_rot180_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_x_f16)))
-float16x8_t __arm_vcmulq_rot180_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_x_f32)))
-float32x4_t __arm_vcmulq_rot180_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_x_f32)))
-float32x4_t __arm_vcmulq_rot180_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_f16)))
-float16x8_t __arm_vcmulq_rot270_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_f16)))
-float16x8_t __arm_vcmulq_rot270(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_f32)))
-float32x4_t __arm_vcmulq_rot270_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_f32)))
-float32x4_t __arm_vcmulq_rot270(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_m_f16)))
-float16x8_t __arm_vcmulq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_m_f16)))
-float16x8_t __arm_vcmulq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_m_f32)))
-float32x4_t __arm_vcmulq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_m_f32)))
-float32x4_t __arm_vcmulq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_x_f16)))
-float16x8_t __arm_vcmulq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_x_f16)))
-float16x8_t __arm_vcmulq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_x_f32)))
-float32x4_t __arm_vcmulq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_x_f32)))
-float32x4_t __arm_vcmulq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_f16)))
-float16x8_t __arm_vcmulq_rot90_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_f16)))
-float16x8_t __arm_vcmulq_rot90(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_f32)))
-float32x4_t __arm_vcmulq_rot90_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_f32)))
-float32x4_t __arm_vcmulq_rot90(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_m_f16)))
-float16x8_t __arm_vcmulq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_m_f16)))
-float16x8_t __arm_vcmulq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_m_f32)))
-float32x4_t __arm_vcmulq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_m_f32)))
-float32x4_t __arm_vcmulq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_x_f16)))
-float16x8_t __arm_vcmulq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_x_f16)))
-float16x8_t __arm_vcmulq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_x_f32)))
-float32x4_t __arm_vcmulq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_x_f32)))
-float32x4_t __arm_vcmulq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_x_f16)))
-float16x8_t __arm_vcmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_x_f16)))
-float16x8_t __arm_vcmulq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_x_f32)))
-float32x4_t __arm_vcmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_x_f32)))
-float32x4_t __arm_vcmulq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_f16)))
-float16x8_t __arm_vcreateq_f16(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_f32)))
-float32x4_t __arm_vcreateq_f32(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtbq_f16_f32)))
-float16x8_t __arm_vcvtbq_f16_f32(float16x8_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtbq_m_f16_f32)))
-float16x8_t __arm_vcvtbq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16)))
-float16x8_t __arm_vcvtq_m_n_f16_s16(float16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16)))
-float16x8_t __arm_vcvtq_m_n(float16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16)))
-float16x8_t __arm_vcvtq_m_n_f16_u16(float16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16)))
-float16x8_t __arm_vcvtq_m_n(float16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32)))
-float32x4_t __arm_vcvtq_m_n_f32_s32(float32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32)))
-float32x4_t __arm_vcvtq_m_n(float32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32)))
-float32x4_t __arm_vcvtq_m_n_f32_u32(float32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32)))
-float32x4_t __arm_vcvtq_m_n(float32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16)))
-int16x8_t __arm_vcvtq_m_n_s16_f16(int16x8_t, float16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16)))
-int16x8_t __arm_vcvtq_m_n(int16x8_t, float16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32)))
-int32x4_t __arm_vcvtq_m_n_s32_f32(int32x4_t, float32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32)))
-int32x4_t __arm_vcvtq_m_n(int32x4_t, float32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16)))
-uint16x8_t __arm_vcvtq_m_n_u16_f16(uint16x8_t, float16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16)))
-uint16x8_t __arm_vcvtq_m_n(uint16x8_t, float16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32)))
-uint32x4_t __arm_vcvtq_m_n_u32_f32(uint32x4_t, float32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32)))
-uint32x4_t __arm_vcvtq_m_n(uint32x4_t, float32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f16_s16)))
-float16x8_t __arm_vcvtq_n_f16_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f16_s16)))
-float16x8_t __arm_vcvtq_n(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f16_u16)))
-float16x8_t __arm_vcvtq_n_f16_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f16_u16)))
-float16x8_t __arm_vcvtq_n(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f32_s32)))
-float32x4_t __arm_vcvtq_n_f32_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f32_s32)))
-float32x4_t __arm_vcvtq_n(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f32_u32)))
-float32x4_t __arm_vcvtq_n_f32_u32(uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f32_u32)))
-float32x4_t __arm_vcvtq_n(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_s16_f16)))
-int16x8_t __arm_vcvtq_n_s16_f16(float16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_s32_f32)))
-int32x4_t __arm_vcvtq_n_s32_f32(float32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_u16_f16)))
-uint16x8_t __arm_vcvtq_n_u16_f16(float16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_u32_f32)))
-uint32x4_t __arm_vcvtq_n_u32_f32(float32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16)))
-float16x8_t __arm_vcvtq_x_n_f16_s16(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16)))
-float16x8_t __arm_vcvtq_x_n(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16)))
-float16x8_t __arm_vcvtq_x_n_f16_u16(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16)))
-float16x8_t __arm_vcvtq_x_n(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32)))
-float32x4_t __arm_vcvtq_x_n_f32_s32(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32)))
-float32x4_t __arm_vcvtq_x_n(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32)))
-float32x4_t __arm_vcvtq_x_n_f32_u32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32)))
-float32x4_t __arm_vcvtq_x_n(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_s16_f16)))
-int16x8_t __arm_vcvtq_x_n_s16_f16(float16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_s32_f32)))
-int32x4_t __arm_vcvtq_x_n_s32_f32(float32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_u16_f16)))
-uint16x8_t __arm_vcvtq_x_n_u16_f16(float16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_u32_f32)))
-uint32x4_t __arm_vcvtq_x_n_u32_f32(float32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvttq_f16_f32)))
-float16x8_t __arm_vcvttq_f16_f32(float16x8_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvttq_m_f16_f32)))
-float16x8_t __arm_vcvttq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_f16)))
-float16x8_t __arm_vdupq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_f16)))
-float16x8_t __arm_vdupq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_f32)))
-float32x4_t __arm_vdupq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_f32)))
-float32x4_t __arm_vdupq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_f16)))
-float16x8_t __arm_vdupq_n_f16(float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_f32)))
-float32x4_t __arm_vdupq_n_f32(float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_f16)))
-float16x8_t __arm_vdupq_x_n_f16(float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_f32)))
-float32x4_t __arm_vdupq_x_n_f32(float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_f16)))
-float16x8_t __arm_veorq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_f16)))
-float16x8_t __arm_veorq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_f32)))
-float32x4_t __arm_veorq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_f32)))
-float32x4_t __arm_veorq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_f16)))
-float16x8_t __arm_veorq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_f16)))
-float16x8_t __arm_veorq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_f32)))
-float32x4_t __arm_veorq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_f32)))
-float32x4_t __arm_veorq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_f16)))
-float16x8_t __arm_veorq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_f16)))
-float16x8_t __arm_veorq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_f32)))
-float32x4_t __arm_veorq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_f32)))
-float32x4_t __arm_veorq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f16)))
-float16_t __arm_vgetq_lane_f16(float16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f16)))
-float16_t __arm_vgetq_lane(float16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f32)))
-float32_t __arm_vgetq_lane_f32(float32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f32)))
-float32_t __arm_vgetq_lane(float32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_f16)))
-float16x8_t __arm_vld1q_f16(const float16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_f16)))
-float16x8_t __arm_vld1q(const float16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_f32)))
-float32x4_t __arm_vld1q_f32(const float32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_f32)))
-float32x4_t __arm_vld1q(const float32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f16)))
-float16x8_t __arm_vld1q_z_f16(const float16_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f16)))
-float16x8_t __arm_vld1q_z(const float16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f32)))
-float32x4_t __arm_vld1q_z_f32(const float32_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f32)))
-float32x4_t __arm_vld1q_z(const float32_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_f16)))
-float16x8x2_t __arm_vld2q_f16(const float16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_f16)))
-float16x8x2_t __arm_vld2q(const float16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_f32)))
-float32x4x2_t __arm_vld2q_f32(const float32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_f32)))
-float32x4x2_t __arm_vld2q(const float32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_f16)))
-float16x8x4_t __arm_vld4q_f16(const float16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_f16)))
-float16x8x4_t __arm_vld4q(const float16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_f32)))
-float32x4x4_t __arm_vld4q_f32(const float32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_f32)))
-float32x4x4_t __arm_vld4q(const float32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_f16)))
-float16x8_t __arm_vldrhq_f16(const float16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))
-float16x8_t __arm_vldrhq_gather_offset_f16(const float16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))
-float16x8_t __arm_vldrhq_gather_offset(const float16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))
-float16x8_t __arm_vldrhq_gather_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))
-float16x8_t __arm_vldrhq_gather_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))
-float16x8_t __arm_vldrhq_gather_shifted_offset_f16(const float16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))
-float16x8_t __arm_vldrhq_gather_shifted_offset(const float16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))
-float16x8_t __arm_vldrhq_gather_shifted_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))
-float16x8_t __arm_vldrhq_gather_shifted_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_f16)))
-float16x8_t __arm_vldrhq_z_f16(const float16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_f32)))
-float32x4_t __arm_vldrwq_f32(const float32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_f32)))
-float32x4_t __arm_vldrwq_gather_base_f32(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_f32)))
-float32x4_t __arm_vldrwq_gather_base_wb_f32(uint32x4_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_f32)))
-float32x4_t __arm_vldrwq_gather_base_wb_z_f32(uint32x4_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_z_f32)))
-float32x4_t __arm_vldrwq_gather_base_z_f32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))
-float32x4_t __arm_vldrwq_gather_offset_f32(const float32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))
-float32x4_t __arm_vldrwq_gather_offset(const float32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))
-float32x4_t __arm_vldrwq_gather_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))
-float32x4_t __arm_vldrwq_gather_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))
-float32x4_t __arm_vldrwq_gather_shifted_offset_f32(const float32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))
-float32x4_t __arm_vldrwq_gather_shifted_offset(const float32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))
-float32x4_t __arm_vldrwq_gather_shifted_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))
-float32x4_t __arm_vldrwq_gather_shifted_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_z_f32)))
-float32x4_t __arm_vldrwq_z_f32(const float32_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_f16)))
-float16x8_t __arm_vmaxnmaq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_f16)))
-float16x8_t __arm_vmaxnmaq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_f32)))
-float32x4_t __arm_vmaxnmaq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_f32)))
-float32x4_t __arm_vmaxnmaq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_m_f16)))
-float16x8_t __arm_vmaxnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_m_f16)))
-float16x8_t __arm_vmaxnmaq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_m_f32)))
-float32x4_t __arm_vmaxnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_m_f32)))
-float32x4_t __arm_vmaxnmaq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_f16)))
-float16x8_t __arm_vmaxnmq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_f16)))
-float16x8_t __arm_vmaxnmq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_f32)))
-float32x4_t __arm_vmaxnmq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_f32)))
-float32x4_t __arm_vmaxnmq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_m_f16)))
-float16x8_t __arm_vmaxnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_m_f16)))
-float16x8_t __arm_vmaxnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_m_f32)))
-float32x4_t __arm_vmaxnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_m_f32)))
-float32x4_t __arm_vmaxnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_x_f16)))
-float16x8_t __arm_vmaxnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_x_f16)))
-float16x8_t __arm_vmaxnmq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_x_f32)))
-float32x4_t __arm_vmaxnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_x_f32)))
-float32x4_t __arm_vmaxnmq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_f16)))
-float16x8_t __arm_vminnmaq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_f16)))
-float16x8_t __arm_vminnmaq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_f32)))
-float32x4_t __arm_vminnmaq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_f32)))
-float32x4_t __arm_vminnmaq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_m_f16)))
-float16x8_t __arm_vminnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_m_f16)))
-float16x8_t __arm_vminnmaq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_m_f32)))
-float32x4_t __arm_vminnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_m_f32)))
-float32x4_t __arm_vminnmaq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmq_f16)))
-float16x8_t __arm_vminnmq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmq_f16)))
-float16x8_t __arm_vminnmq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmq_f32)))
-float32x4_t __arm_vminnmq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmq_f32)))
-float32x4_t __arm_vminnmq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmq_m_f16)))
-float16x8_t __arm_vminnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmq_m_f16)))
-float16x8_t __arm_vminnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmq_m_f32)))
-float32x4_t __arm_vminnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmq_m_f32)))
-float32x4_t __arm_vminnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmq_x_f16)))
-float16x8_t __arm_vminnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmq_x_f16)))
-float16x8_t __arm_vminnmq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmq_x_f32)))
-float32x4_t __arm_vminnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmq_x_f32)))
-float32x4_t __arm_vminnmq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_f16)))
-float16x8_t __arm_vmulq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_f16)))
-float16x8_t __arm_vmulq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_f32)))
-float32x4_t __arm_vmulq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_f32)))
-float32x4_t __arm_vmulq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_f16)))
-float16x8_t __arm_vmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_f16)))
-float16x8_t __arm_vmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_f32)))
-float32x4_t __arm_vmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_f32)))
-float32x4_t __arm_vmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_f16)))
-float16x8_t __arm_vmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_f16)))
-float16x8_t __arm_vmulq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_f32)))
-float32x4_t __arm_vmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_f32)))
-float32x4_t __arm_vmulq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_f16)))
-float16x8_t __arm_vornq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_f16)))
-float16x8_t __arm_vornq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_f32)))
-float32x4_t __arm_vornq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_f32)))
-float32x4_t __arm_vornq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_f16)))
-float16x8_t __arm_vornq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_f16)))
-float16x8_t __arm_vornq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_f32)))
-float32x4_t __arm_vornq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_f32)))
-float32x4_t __arm_vornq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_f16)))
-float16x8_t __arm_vornq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_f16)))
-float16x8_t __arm_vornq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_f32)))
-float32x4_t __arm_vornq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_f32)))
-float32x4_t __arm_vornq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_f16)))
-float16x8_t __arm_vorrq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_f16)))
-float16x8_t __arm_vorrq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_f32)))
-float32x4_t __arm_vorrq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_f32)))
-float32x4_t __arm_vorrq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_f16)))
-float16x8_t __arm_vorrq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_f16)))
-float16x8_t __arm_vorrq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_f32)))
-float32x4_t __arm_vorrq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_f32)))
-float32x4_t __arm_vorrq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_f16)))
-float16x8_t __arm_vorrq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_f16)))
-float16x8_t __arm_vorrq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_f32)))
-float32x4_t __arm_vorrq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_f32)))
-float32x4_t __arm_vorrq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_f16)))
-float16x8_t __arm_vpselq_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_f16)))
-float16x8_t __arm_vpselq(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_f32)))
-float32x4_t __arm_vpselq_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_f32)))
-float32x4_t __arm_vpselq(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))
-float16x8_t __arm_vreinterpretq_f16_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))
-float16x8_t __arm_vreinterpretq_f16(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))
-float16x8_t __arm_vreinterpretq_f16_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))
-float16x8_t __arm_vreinterpretq_f16(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))
-float16x8_t __arm_vreinterpretq_f16_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))
-float16x8_t __arm_vreinterpretq_f16(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))
-float16x8_t __arm_vreinterpretq_f16_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))
-float16x8_t __arm_vreinterpretq_f16(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))
-float16x8_t __arm_vreinterpretq_f16_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))
-float16x8_t __arm_vreinterpretq_f16(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))
-float16x8_t __arm_vreinterpretq_f16_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))
-float16x8_t __arm_vreinterpretq_f16(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))
-float16x8_t __arm_vreinterpretq_f16_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))
-float16x8_t __arm_vreinterpretq_f16(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))
-float16x8_t __arm_vreinterpretq_f16_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))
-float16x8_t __arm_vreinterpretq_f16(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
-float16x8_t __arm_vreinterpretq_f16_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
-float16x8_t __arm_vreinterpretq_f16(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))
-float32x4_t __arm_vreinterpretq_f32_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))
-float32x4_t __arm_vreinterpretq_f32(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))
-float32x4_t __arm_vreinterpretq_f32_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))
-float32x4_t __arm_vreinterpretq_f32(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))
-float32x4_t __arm_vreinterpretq_f32_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))
-float32x4_t __arm_vreinterpretq_f32(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))
-float32x4_t __arm_vreinterpretq_f32_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))
-float32x4_t __arm_vreinterpretq_f32(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))
-float32x4_t __arm_vreinterpretq_f32_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))
-float32x4_t __arm_vreinterpretq_f32(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))
-float32x4_t __arm_vreinterpretq_f32_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))
-float32x4_t __arm_vreinterpretq_f32(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))
-float32x4_t __arm_vreinterpretq_f32_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))
-float32x4_t __arm_vreinterpretq_f32(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))
-float32x4_t __arm_vreinterpretq_f32_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))
-float32x4_t __arm_vreinterpretq_f32(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
-float32x4_t __arm_vreinterpretq_f32_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
-float32x4_t __arm_vreinterpretq_f32(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))
-int16x8_t __arm_vreinterpretq_s16_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))
-int16x8_t __arm_vreinterpretq_s16(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))
-int16x8_t __arm_vreinterpretq_s16_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))
-int16x8_t __arm_vreinterpretq_s16(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))
-int32x4_t __arm_vreinterpretq_s32_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))
-int32x4_t __arm_vreinterpretq_s32(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))
-int32x4_t __arm_vreinterpretq_s32_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))
-int32x4_t __arm_vreinterpretq_s32(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))
-int64x2_t __arm_vreinterpretq_s64_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))
-int64x2_t __arm_vreinterpretq_s64(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))
-int64x2_t __arm_vreinterpretq_s64_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))
-int64x2_t __arm_vreinterpretq_s64(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))
-int8x16_t __arm_vreinterpretq_s8_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))
-int8x16_t __arm_vreinterpretq_s8(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))
-int8x16_t __arm_vreinterpretq_s8_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))
-int8x16_t __arm_vreinterpretq_s8(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))
-uint16x8_t __arm_vreinterpretq_u16_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))
-uint16x8_t __arm_vreinterpretq_u16(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))
-uint16x8_t __arm_vreinterpretq_u16_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))
-uint16x8_t __arm_vreinterpretq_u16(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))
-uint32x4_t __arm_vreinterpretq_u32_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))
-uint32x4_t __arm_vreinterpretq_u32(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))
-uint32x4_t __arm_vreinterpretq_u32_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))
-uint32x4_t __arm_vreinterpretq_u32(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))
-uint64x2_t __arm_vreinterpretq_u64_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))
-uint64x2_t __arm_vreinterpretq_u64(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))
-uint64x2_t __arm_vreinterpretq_u64_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))
-uint64x2_t __arm_vreinterpretq_u64(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
-uint8x16_t __arm_vreinterpretq_u8_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
-uint8x16_t __arm_vreinterpretq_u8(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
-uint8x16_t __arm_vreinterpretq_u8_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
-uint8x16_t __arm_vreinterpretq_u8(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f16)))
-float16x8_t __arm_vsetq_lane_f16(float16_t, float16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f16)))
-float16x8_t __arm_vsetq_lane(float16_t, float16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f32)))
-float32x4_t __arm_vsetq_lane_f32(float32_t, float32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f32)))
-float32x4_t __arm_vsetq_lane(float32_t, float32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_f16)))
-void __arm_vst1q_f16(float16_t *, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_f16)))
-void __arm_vst1q(float16_t *, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_f32)))
-void __arm_vst1q_f32(float32_t *, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_f32)))
-void __arm_vst1q(float32_t *, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f16)))
-void __arm_vst1q_p_f16(float16_t *, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f16)))
-void __arm_vst1q_p(float16_t *, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f32)))
-void __arm_vst1q_p_f32(float32_t *, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f32)))
-void __arm_vst1q_p(float32_t *, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_f16)))
-void __arm_vst2q_f16(float16_t *, float16x8x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_f16)))
-void __arm_vst2q(float16_t *, float16x8x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_f32)))
-void __arm_vst2q_f32(float32_t *, float32x4x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_f32)))
-void __arm_vst2q(float32_t *, float32x4x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_f16)))
-void __arm_vst4q_f16(float16_t *, float16x8x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_f16)))
-void __arm_vst4q(float16_t *, float16x8x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_f32)))
-void __arm_vst4q_f32(float32_t *, float32x4x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_f32)))
-void __arm_vst4q(float32_t *, float32x4x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_f16)))
-void __arm_vstrhq_f16(float16_t *, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_f16)))
-void __arm_vstrhq(float16_t *, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_f16)))
-void __arm_vstrhq_p_f16(float16_t *, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_f16)))
-void __arm_vstrhq_p(float16_t *, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))
-void __arm_vstrhq_scatter_offset_f16(float16_t *, uint16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))
-void __arm_vstrhq_scatter_offset(float16_t *, uint16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))
-void __arm_vstrhq_scatter_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))
-void __arm_vstrhq_scatter_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))
-void __arm_vstrhq_scatter_shifted_offset_f16(float16_t *, uint16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))
-void __arm_vstrhq_scatter_shifted_offset(float16_t *, uint16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))
-void __arm_vstrhq_scatter_shifted_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))
-void __arm_vstrhq_scatter_shifted_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_f32)))
-void __arm_vstrwq_f32(float32_t *, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_f32)))
-void __arm_vstrwq(float32_t *, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_f32)))
-void __arm_vstrwq_p_f32(float32_t *, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_f32)))
-void __arm_vstrwq_p(float32_t *, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))
-void __arm_vstrwq_scatter_base_f32(uint32x4_t, int, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))
-void __arm_vstrwq_scatter_base(uint32x4_t, int, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))
-void __arm_vstrwq_scatter_base_p_f32(uint32x4_t, int, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))
-void __arm_vstrwq_scatter_base_p(uint32x4_t, int, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))
-void __arm_vstrwq_scatter_base_wb_f32(uint32x4_t *, int, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))
-void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))
-void __arm_vstrwq_scatter_base_wb_p_f32(uint32x4_t *, int, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))
-void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))
-void __arm_vstrwq_scatter_offset_f32(float32_t *, uint32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))
-void __arm_vstrwq_scatter_offset(float32_t *, uint32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))
-void __arm_vstrwq_scatter_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))
-void __arm_vstrwq_scatter_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))
-void __arm_vstrwq_scatter_shifted_offset_f32(float32_t *, uint32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))
-void __arm_vstrwq_scatter_shifted_offset(float32_t *, uint32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))
-void __arm_vstrwq_scatter_shifted_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))
-void __arm_vstrwq_scatter_shifted_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_f16)))
-float16x8_t __arm_vsubq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_f16)))
-float16x8_t __arm_vsubq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_f32)))
-float32x4_t __arm_vsubq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_f32)))
-float32x4_t __arm_vsubq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f16)))
-float16x8_t __arm_vsubq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f16)))
-float16x8_t __arm_vsubq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f32)))
-float32x4_t __arm_vsubq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f32)))
-float32x4_t __arm_vsubq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_f16)))
-float16x8_t __arm_vsubq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_f16)))
-float16x8_t __arm_vsubq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_f32)))
-float32x4_t __arm_vsubq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_f32)))
-float32x4_t __arm_vsubq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_f16)))
-float16x8_t __arm_vuninitializedq_f16();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_f32)))
-float32x4_t __arm_vuninitializedq_f32();
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f16)))
-float16x8_t __arm_vuninitializedq(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f32)))
-float32x4_t __arm_vuninitializedq(float32x4_t);
-
-#endif /* (__ARM_FEATURE_MVE & 2) */
-
-#if (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE)
-
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_asrl)))
-int64_t asrl(int64_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_lsll)))
-uint64_t lsll(uint64_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqrshr)))
-int32_t sqrshr(int32_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqrshrl)))
-int64_t sqrshrl(int64_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqrshrl_sat48)))
-int64_t sqrshrl_sat48(int64_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqshl)))
-int32_t sqshl(int32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqshll)))
-int64_t sqshll(int64_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_srshr)))
-int32_t srshr(int32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_srshrl)))
-int64_t srshrl(int64_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqrshl)))
-uint32_t uqrshl(uint32_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqrshll)))
-uint64_t uqrshll(uint64_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqrshll_sat48)))
-uint64_t uqrshll_sat48(uint64_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqshl)))
-uint32_t uqshl(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqshll)))
-uint64_t uqshll(uint64_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_urshr)))
-uint32_t urshr(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_urshrl)))
-uint64_t urshrl(uint64_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_s16)))
-uint32_t vabavq_p_s16(uint32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_s16)))
-uint32_t vabavq_p(uint32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_s32)))
-uint32_t vabavq_p_s32(uint32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_s32)))
-uint32_t vabavq_p(uint32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_s8)))
-uint32_t vabavq_p_s8(uint32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_s8)))
-uint32_t vabavq_p(uint32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_u16)))
-uint32_t vabavq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_u16)))
-uint32_t vabavq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_u32)))
-uint32_t vabavq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_u32)))
-uint32_t vabavq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_u8)))
-uint32_t vabavq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_p_u8)))
-uint32_t vabavq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_s16)))
-uint32_t vabavq_s16(uint32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_s16)))
-uint32_t vabavq(uint32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_s32)))
-uint32_t vabavq_s32(uint32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_s32)))
-uint32_t vabavq(uint32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_s8)))
-uint32_t vabavq_s8(uint32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_s8)))
-uint32_t vabavq(uint32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_u16)))
-uint32_t vabavq_u16(uint32_t, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_u16)))
-uint32_t vabavq(uint32_t, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_u32)))
-uint32_t vabavq_u32(uint32_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_u32)))
-uint32_t vabavq(uint32_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabavq_u8)))
-uint32_t vabavq_u8(uint32_t, uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabavq_u8)))
-uint32_t vabavq(uint32_t, uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_s16)))
-int16x8_t vabdq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_s16)))
-int16x8_t vabdq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_s32)))
-int32x4_t vabdq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_s32)))
-int32x4_t vabdq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_s8)))
-int8x16_t vabdq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_s8)))
-int8x16_t vabdq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_u16)))
-uint16x8_t vabdq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_u16)))
-uint16x8_t vabdq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_u32)))
-uint32x4_t vabdq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_u32)))
-uint32x4_t vabdq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_u8)))
-uint8x16_t vabdq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_u8)))
-uint8x16_t vabdq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_s16)))
-int16x8_t vabdq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_s16)))
-int16x8_t vabdq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_s32)))
-int32x4_t vabdq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_s32)))
-int32x4_t vabdq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_s8)))
-int8x16_t vabdq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_s8)))
-int8x16_t vabdq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_u16)))
-uint16x8_t vabdq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_u16)))
-uint16x8_t vabdq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_u32)))
-uint32x4_t vabdq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_u32)))
-uint32x4_t vabdq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_u8)))
-uint8x16_t vabdq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_u8)))
-uint8x16_t vabdq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_s16)))
-int16x8_t vabdq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_s16)))
-int16x8_t vabdq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_s32)))
-int32x4_t vabdq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_s32)))
-int32x4_t vabdq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_s8)))
-int8x16_t vabdq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_s8)))
-int8x16_t vabdq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_u16)))
-uint16x8_t vabdq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_u16)))
-uint16x8_t vabdq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_u32)))
-uint32x4_t vabdq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_u32)))
-uint32x4_t vabdq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_u8)))
-uint8x16_t vabdq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_u8)))
-uint8x16_t vabdq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_s32)))
-int32x4_t vadciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_s32)))
-int32x4_t vadciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_u32)))
-uint32x4_t vadciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_u32)))
-uint32x4_t vadciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_s32)))
-int32x4_t vadciq_s32(int32x4_t, int32x4_t, unsigned *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_s32)))
-int32x4_t vadciq(int32x4_t, int32x4_t, unsigned *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_u32)))
-uint32x4_t vadciq_u32(uint32x4_t, uint32x4_t, unsigned *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_u32)))
-uint32x4_t vadciq(uint32x4_t, uint32x4_t, unsigned *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_s32)))
-int32x4_t vadcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_s32)))
-int32x4_t vadcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_u32)))
-uint32x4_t vadcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_u32)))
-uint32x4_t vadcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_s32)))
-int32x4_t vadcq_s32(int32x4_t, int32x4_t, unsigned *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_s32)))
-int32x4_t vadcq(int32x4_t, int32x4_t, unsigned *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_u32)))
-uint32x4_t vadcq_u32(uint32x4_t, uint32x4_t, unsigned *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_u32)))
-uint32x4_t vadcq(uint32x4_t, uint32x4_t, unsigned *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s16)))
-int16x8_t vaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s16)))
-int16x8_t vaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s32)))
-int32x4_t vaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s32)))
-int32x4_t vaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s8)))
-int8x16_t vaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s8)))
-int8x16_t vaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u16)))
-uint16x8_t vaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u16)))
-uint16x8_t vaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u32)))
-uint32x4_t vaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u32)))
-uint32x4_t vaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u8)))
-uint8x16_t vaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u8)))
-uint8x16_t vaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_s16)))
-int16x8_t vaddq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_s16)))
-int16x8_t vaddq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_s32)))
-int32x4_t vaddq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_s32)))
-int32x4_t vaddq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_s8)))
-int8x16_t vaddq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_s8)))
-int8x16_t vaddq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_u16)))
-uint16x8_t vaddq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_u16)))
-uint16x8_t vaddq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_u32)))
-uint32x4_t vaddq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_u32)))
-uint32x4_t vaddq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_u8)))
-uint8x16_t vaddq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_u8)))
-uint8x16_t vaddq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_s16)))
-int16x8_t vaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_s16)))
-int16x8_t vaddq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_s32)))
-int32x4_t vaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_s32)))
-int32x4_t vaddq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_s8)))
-int8x16_t vaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_s8)))
-int8x16_t vaddq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_u16)))
-uint16x8_t vaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_u16)))
-uint16x8_t vaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_u32)))
-uint32x4_t vaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_u32)))
-uint32x4_t vaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_u8)))
-uint8x16_t vaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_u8)))
-uint8x16_t vaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_s16)))
-int16x8_t vandq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_s16)))
-int16x8_t vandq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_s32)))
-int32x4_t vandq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_s32)))
-int32x4_t vandq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_s8)))
-int8x16_t vandq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_s8)))
-int8x16_t vandq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_u16)))
-uint16x8_t vandq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_u16)))
-uint16x8_t vandq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_u32)))
-uint32x4_t vandq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_u32)))
-uint32x4_t vandq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_u8)))
-uint8x16_t vandq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_u8)))
-uint8x16_t vandq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_s16)))
-int16x8_t vandq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_s16)))
-int16x8_t vandq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_s32)))
-int32x4_t vandq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_s32)))
-int32x4_t vandq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_s8)))
-int8x16_t vandq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_s8)))
-int8x16_t vandq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_u16)))
-uint16x8_t vandq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_u16)))
-uint16x8_t vandq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_u32)))
-uint32x4_t vandq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_u32)))
-uint32x4_t vandq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_u8)))
-uint8x16_t vandq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_u8)))
-uint8x16_t vandq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_s16)))
-int16x8_t vandq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_s16)))
-int16x8_t vandq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_s32)))
-int32x4_t vandq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_s32)))
-int32x4_t vandq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_s8)))
-int8x16_t vandq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_s8)))
-int8x16_t vandq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_u16)))
-uint16x8_t vandq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_u16)))
-uint16x8_t vandq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_u32)))
-uint32x4_t vandq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_u32)))
-uint32x4_t vandq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_u8)))
-uint8x16_t vandq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_u8)))
-uint8x16_t vandq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_s16)))
-int16x8_t vbicq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_s16)))
-int16x8_t vbicq_m_n(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_s32)))
-int32x4_t vbicq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_s32)))
-int32x4_t vbicq_m_n(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_u16)))
-uint16x8_t vbicq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_u16)))
-uint16x8_t vbicq_m_n(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_u32)))
-uint32x4_t vbicq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_n_u32)))
-uint32x4_t vbicq_m_n(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_s16)))
-int16x8_t vbicq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_s16)))
-int16x8_t vbicq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_s32)))
-int32x4_t vbicq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_s32)))
-int32x4_t vbicq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_s8)))
-int8x16_t vbicq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_s8)))
-int8x16_t vbicq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_u16)))
-uint16x8_t vbicq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_u16)))
-uint16x8_t vbicq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_u32)))
-uint32x4_t vbicq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_u32)))
-uint32x4_t vbicq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_u8)))
-uint8x16_t vbicq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_u8)))
-uint8x16_t vbicq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_s16)))
-int16x8_t vbicq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_s16)))
-int16x8_t vbicq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_s32)))
-int32x4_t vbicq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_s32)))
-int32x4_t vbicq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_u16)))
-uint16x8_t vbicq_n_u16(uint16x8_t, uint16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_u16)))
-uint16x8_t vbicq(uint16x8_t, uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_u32)))
-uint32x4_t vbicq_n_u32(uint32x4_t, uint32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_n_u32)))
-uint32x4_t vbicq(uint32x4_t, uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_s16)))
-int16x8_t vbicq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_s16)))
-int16x8_t vbicq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_s32)))
-int32x4_t vbicq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_s32)))
-int32x4_t vbicq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_s8)))
-int8x16_t vbicq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_s8)))
-int8x16_t vbicq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_u16)))
-uint16x8_t vbicq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_u16)))
-uint16x8_t vbicq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_u32)))
-uint32x4_t vbicq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_u32)))
-uint32x4_t vbicq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_u8)))
-uint8x16_t vbicq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_u8)))
-uint8x16_t vbicq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_s16)))
-int16x8_t vbicq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_s16)))
-int16x8_t vbicq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_s32)))
-int32x4_t vbicq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_s32)))
-int32x4_t vbicq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_s8)))
-int8x16_t vbicq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_s8)))
-int8x16_t vbicq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_u16)))
-uint16x8_t vbicq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_u16)))
-uint16x8_t vbicq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_u32)))
-uint32x4_t vbicq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_u32)))
-uint32x4_t vbicq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_u8)))
-uint8x16_t vbicq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_u8)))
-uint8x16_t vbicq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_s16)))
-int16x8_t vcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_s16)))
-int16x8_t vcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_s32)))
-int32x4_t vcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_s32)))
-int32x4_t vcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_s8)))
-int8x16_t vcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_s8)))
-int8x16_t vcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_u16)))
-uint16x8_t vcaddq_rot270_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_u16)))
-uint16x8_t vcaddq_rot270_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_u32)))
-uint32x4_t vcaddq_rot270_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_u32)))
-uint32x4_t vcaddq_rot270_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_u8)))
-uint8x16_t vcaddq_rot270_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_u8)))
-uint8x16_t vcaddq_rot270_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_s16)))
-int16x8_t vcaddq_rot270_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_s16)))
-int16x8_t vcaddq_rot270(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_s32)))
-int32x4_t vcaddq_rot270_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_s32)))
-int32x4_t vcaddq_rot270(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_s8)))
-int8x16_t vcaddq_rot270_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_s8)))
-int8x16_t vcaddq_rot270(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_u16)))
-uint16x8_t vcaddq_rot270_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_u16)))
-uint16x8_t vcaddq_rot270(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_u32)))
-uint32x4_t vcaddq_rot270_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_u32)))
-uint32x4_t vcaddq_rot270(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_u8)))
-uint8x16_t vcaddq_rot270_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_u8)))
-uint8x16_t vcaddq_rot270(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_s16)))
-int16x8_t vcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_s16)))
-int16x8_t vcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_s32)))
-int32x4_t vcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_s32)))
-int32x4_t vcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_s8)))
-int8x16_t vcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_s8)))
-int8x16_t vcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_u16)))
-uint16x8_t vcaddq_rot270_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_u16)))
-uint16x8_t vcaddq_rot270_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_u32)))
-uint32x4_t vcaddq_rot270_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_u32)))
-uint32x4_t vcaddq_rot270_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_u8)))
-uint8x16_t vcaddq_rot270_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_u8)))
-uint8x16_t vcaddq_rot270_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_s16)))
-int16x8_t vcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_s16)))
-int16x8_t vcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_s32)))
-int32x4_t vcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_s32)))
-int32x4_t vcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_s8)))
-int8x16_t vcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_s8)))
-int8x16_t vcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_u16)))
-uint16x8_t vcaddq_rot90_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_u16)))
-uint16x8_t vcaddq_rot90_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_u32)))
-uint32x4_t vcaddq_rot90_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_u32)))
-uint32x4_t vcaddq_rot90_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_u8)))
-uint8x16_t vcaddq_rot90_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_u8)))
-uint8x16_t vcaddq_rot90_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_s16)))
-int16x8_t vcaddq_rot90_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_s16)))
-int16x8_t vcaddq_rot90(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_s32)))
-int32x4_t vcaddq_rot90_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_s32)))
-int32x4_t vcaddq_rot90(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_s8)))
-int8x16_t vcaddq_rot90_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_s8)))
-int8x16_t vcaddq_rot90(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_u16)))
-uint16x8_t vcaddq_rot90_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_u16)))
-uint16x8_t vcaddq_rot90(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_u32)))
-uint32x4_t vcaddq_rot90_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_u32)))
-uint32x4_t vcaddq_rot90(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_u8)))
-uint8x16_t vcaddq_rot90_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_u8)))
-uint8x16_t vcaddq_rot90(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_s16)))
-int16x8_t vcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_s16)))
-int16x8_t vcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_s32)))
-int32x4_t vcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_s32)))
-int32x4_t vcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_s8)))
-int8x16_t vcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_s8)))
-int8x16_t vcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_u16)))
-uint16x8_t vcaddq_rot90_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_u16)))
-uint16x8_t vcaddq_rot90_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_u32)))
-uint32x4_t vcaddq_rot90_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_u32)))
-uint32x4_t vcaddq_rot90_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_u8)))
-uint8x16_t vcaddq_rot90_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_u8)))
-uint8x16_t vcaddq_rot90_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))
-mve_pred16_t vcmpcsq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))
-mve_pred16_t vcmpcsq_m(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))
-mve_pred16_t vcmpcsq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))
-mve_pred16_t vcmpcsq_m(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))
-mve_pred16_t vcmpcsq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))
-mve_pred16_t vcmpcsq_m(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u16)))
-mve_pred16_t vcmpcsq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u16)))
-mve_pred16_t vcmpcsq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u32)))
-mve_pred16_t vcmpcsq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u32)))
-mve_pred16_t vcmpcsq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u8)))
-mve_pred16_t vcmpcsq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u8)))
-mve_pred16_t vcmpcsq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u16)))
-mve_pred16_t vcmpcsq_n_u16(uint16x8_t, uint16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u16)))
-mve_pred16_t vcmpcsq(uint16x8_t, uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u32)))
-mve_pred16_t vcmpcsq_n_u32(uint32x4_t, uint32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u32)))
-mve_pred16_t vcmpcsq(uint32x4_t, uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u8)))
-mve_pred16_t vcmpcsq_n_u8(uint8x16_t, uint8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u8)))
-mve_pred16_t vcmpcsq(uint8x16_t, uint8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u16)))
-mve_pred16_t vcmpcsq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u16)))
-mve_pred16_t vcmpcsq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u32)))
-mve_pred16_t vcmpcsq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u32)))
-mve_pred16_t vcmpcsq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u8)))
-mve_pred16_t vcmpcsq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u8)))
-mve_pred16_t vcmpcsq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))
-mve_pred16_t vcmpeqq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))
-mve_pred16_t vcmpeqq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))
-mve_pred16_t vcmpeqq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))
-mve_pred16_t vcmpeqq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))
-mve_pred16_t vcmpeqq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))
-mve_pred16_t vcmpeqq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))
-mve_pred16_t vcmpeqq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))
-mve_pred16_t vcmpeqq_m(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))
-mve_pred16_t vcmpeqq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))
-mve_pred16_t vcmpeqq_m(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))
-mve_pred16_t vcmpeqq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))
-mve_pred16_t vcmpeqq_m(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s16)))
-mve_pred16_t vcmpeqq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s16)))
-mve_pred16_t vcmpeqq_m(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s32)))
-mve_pred16_t vcmpeqq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s32)))
-mve_pred16_t vcmpeqq_m(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s8)))
-mve_pred16_t vcmpeqq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s8)))
-mve_pred16_t vcmpeqq_m(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u16)))
-mve_pred16_t vcmpeqq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u16)))
-mve_pred16_t vcmpeqq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u32)))
-mve_pred16_t vcmpeqq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u32)))
-mve_pred16_t vcmpeqq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u8)))
-mve_pred16_t vcmpeqq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u8)))
-mve_pred16_t vcmpeqq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s16)))
-mve_pred16_t vcmpeqq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s16)))
-mve_pred16_t vcmpeqq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s32)))
-mve_pred16_t vcmpeqq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s32)))
-mve_pred16_t vcmpeqq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s8)))
-mve_pred16_t vcmpeqq_n_s8(int8x16_t, int8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s8)))
-mve_pred16_t vcmpeqq(int8x16_t, int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u16)))
-mve_pred16_t vcmpeqq_n_u16(uint16x8_t, uint16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u16)))
-mve_pred16_t vcmpeqq(uint16x8_t, uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u32)))
-mve_pred16_t vcmpeqq_n_u32(uint32x4_t, uint32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u32)))
-mve_pred16_t vcmpeqq(uint32x4_t, uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u8)))
-mve_pred16_t vcmpeqq_n_u8(uint8x16_t, uint8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u8)))
-mve_pred16_t vcmpeqq(uint8x16_t, uint8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s16)))
-mve_pred16_t vcmpeqq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s16)))
-mve_pred16_t vcmpeqq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s32)))
-mve_pred16_t vcmpeqq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s32)))
-mve_pred16_t vcmpeqq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s8)))
-mve_pred16_t vcmpeqq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s8)))
-mve_pred16_t vcmpeqq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u16)))
-mve_pred16_t vcmpeqq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u16)))
-mve_pred16_t vcmpeqq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u32)))
-mve_pred16_t vcmpeqq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u32)))
-mve_pred16_t vcmpeqq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u8)))
-mve_pred16_t vcmpeqq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u8)))
-mve_pred16_t vcmpeqq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))
-mve_pred16_t vcmpgeq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))
-mve_pred16_t vcmpgeq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))
-mve_pred16_t vcmpgeq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))
-mve_pred16_t vcmpgeq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))
-mve_pred16_t vcmpgeq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))
-mve_pred16_t vcmpgeq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s16)))
-mve_pred16_t vcmpgeq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s16)))
-mve_pred16_t vcmpgeq_m(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s32)))
-mve_pred16_t vcmpgeq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s32)))
-mve_pred16_t vcmpgeq_m(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s8)))
-mve_pred16_t vcmpgeq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s8)))
-mve_pred16_t vcmpgeq_m(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s16)))
-mve_pred16_t vcmpgeq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s16)))
-mve_pred16_t vcmpgeq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s32)))
-mve_pred16_t vcmpgeq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s32)))
-mve_pred16_t vcmpgeq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s8)))
-mve_pred16_t vcmpgeq_n_s8(int8x16_t, int8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s8)))
-mve_pred16_t vcmpgeq(int8x16_t, int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s16)))
-mve_pred16_t vcmpgeq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s16)))
-mve_pred16_t vcmpgeq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s32)))
-mve_pred16_t vcmpgeq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s32)))
-mve_pred16_t vcmpgeq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s8)))
-mve_pred16_t vcmpgeq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s8)))
-mve_pred16_t vcmpgeq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))
-mve_pred16_t vcmpgtq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))
-mve_pred16_t vcmpgtq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))
-mve_pred16_t vcmpgtq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))
-mve_pred16_t vcmpgtq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))
-mve_pred16_t vcmpgtq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))
-mve_pred16_t vcmpgtq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s16)))
-mve_pred16_t vcmpgtq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s16)))
-mve_pred16_t vcmpgtq_m(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s32)))
-mve_pred16_t vcmpgtq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s32)))
-mve_pred16_t vcmpgtq_m(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s8)))
-mve_pred16_t vcmpgtq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s8)))
-mve_pred16_t vcmpgtq_m(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s16)))
-mve_pred16_t vcmpgtq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s16)))
-mve_pred16_t vcmpgtq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s32)))
-mve_pred16_t vcmpgtq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s32)))
-mve_pred16_t vcmpgtq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s8)))
-mve_pred16_t vcmpgtq_n_s8(int8x16_t, int8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s8)))
-mve_pred16_t vcmpgtq(int8x16_t, int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s16)))
-mve_pred16_t vcmpgtq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s16)))
-mve_pred16_t vcmpgtq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s32)))
-mve_pred16_t vcmpgtq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s32)))
-mve_pred16_t vcmpgtq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s8)))
-mve_pred16_t vcmpgtq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s8)))
-mve_pred16_t vcmpgtq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))
-mve_pred16_t vcmphiq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))
-mve_pred16_t vcmphiq_m(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))
-mve_pred16_t vcmphiq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))
-mve_pred16_t vcmphiq_m(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))
-mve_pred16_t vcmphiq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))
-mve_pred16_t vcmphiq_m(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u16)))
-mve_pred16_t vcmphiq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u16)))
-mve_pred16_t vcmphiq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u32)))
-mve_pred16_t vcmphiq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u32)))
-mve_pred16_t vcmphiq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u8)))
-mve_pred16_t vcmphiq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u8)))
-mve_pred16_t vcmphiq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u16)))
-mve_pred16_t vcmphiq_n_u16(uint16x8_t, uint16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u16)))
-mve_pred16_t vcmphiq(uint16x8_t, uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u32)))
-mve_pred16_t vcmphiq_n_u32(uint32x4_t, uint32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u32)))
-mve_pred16_t vcmphiq(uint32x4_t, uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u8)))
-mve_pred16_t vcmphiq_n_u8(uint8x16_t, uint8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u8)))
-mve_pred16_t vcmphiq(uint8x16_t, uint8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u16)))
-mve_pred16_t vcmphiq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u16)))
-mve_pred16_t vcmphiq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u32)))
-mve_pred16_t vcmphiq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u32)))
-mve_pred16_t vcmphiq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u8)))
-mve_pred16_t vcmphiq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u8)))
-mve_pred16_t vcmphiq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))
-mve_pred16_t vcmpleq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))
-mve_pred16_t vcmpleq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))
-mve_pred16_t vcmpleq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))
-mve_pred16_t vcmpleq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))
-mve_pred16_t vcmpleq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))
-mve_pred16_t vcmpleq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s16)))
-mve_pred16_t vcmpleq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s16)))
-mve_pred16_t vcmpleq_m(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s32)))
-mve_pred16_t vcmpleq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s32)))
-mve_pred16_t vcmpleq_m(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s8)))
-mve_pred16_t vcmpleq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s8)))
-mve_pred16_t vcmpleq_m(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s16)))
-mve_pred16_t vcmpleq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s16)))
-mve_pred16_t vcmpleq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s32)))
-mve_pred16_t vcmpleq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s32)))
-mve_pred16_t vcmpleq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s8)))
-mve_pred16_t vcmpleq_n_s8(int8x16_t, int8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s8)))
-mve_pred16_t vcmpleq(int8x16_t, int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s16)))
-mve_pred16_t vcmpleq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s16)))
-mve_pred16_t vcmpleq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s32)))
-mve_pred16_t vcmpleq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s32)))
-mve_pred16_t vcmpleq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s8)))
-mve_pred16_t vcmpleq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s8)))
-mve_pred16_t vcmpleq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))
-mve_pred16_t vcmpltq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))
-mve_pred16_t vcmpltq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))
-mve_pred16_t vcmpltq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))
-mve_pred16_t vcmpltq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))
-mve_pred16_t vcmpltq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))
-mve_pred16_t vcmpltq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s16)))
-mve_pred16_t vcmpltq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s16)))
-mve_pred16_t vcmpltq_m(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s32)))
-mve_pred16_t vcmpltq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s32)))
-mve_pred16_t vcmpltq_m(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s8)))
-mve_pred16_t vcmpltq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s8)))
-mve_pred16_t vcmpltq_m(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s16)))
-mve_pred16_t vcmpltq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s16)))
-mve_pred16_t vcmpltq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s32)))
-mve_pred16_t vcmpltq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s32)))
-mve_pred16_t vcmpltq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s8)))
-mve_pred16_t vcmpltq_n_s8(int8x16_t, int8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s8)))
-mve_pred16_t vcmpltq(int8x16_t, int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s16)))
-mve_pred16_t vcmpltq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s16)))
-mve_pred16_t vcmpltq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s32)))
-mve_pred16_t vcmpltq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s32)))
-mve_pred16_t vcmpltq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s8)))
-mve_pred16_t vcmpltq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s8)))
-mve_pred16_t vcmpltq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))
-mve_pred16_t vcmpneq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))
-mve_pred16_t vcmpneq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))
-mve_pred16_t vcmpneq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))
-mve_pred16_t vcmpneq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))
-mve_pred16_t vcmpneq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))
-mve_pred16_t vcmpneq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))
-mve_pred16_t vcmpneq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))
-mve_pred16_t vcmpneq_m(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))
-mve_pred16_t vcmpneq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))
-mve_pred16_t vcmpneq_m(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))
-mve_pred16_t vcmpneq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))
-mve_pred16_t vcmpneq_m(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s16)))
-mve_pred16_t vcmpneq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s16)))
-mve_pred16_t vcmpneq_m(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s32)))
-mve_pred16_t vcmpneq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s32)))
-mve_pred16_t vcmpneq_m(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s8)))
-mve_pred16_t vcmpneq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s8)))
-mve_pred16_t vcmpneq_m(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u16)))
-mve_pred16_t vcmpneq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u16)))
-mve_pred16_t vcmpneq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u32)))
-mve_pred16_t vcmpneq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u32)))
-mve_pred16_t vcmpneq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u8)))
-mve_pred16_t vcmpneq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u8)))
-mve_pred16_t vcmpneq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s16)))
-mve_pred16_t vcmpneq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s16)))
-mve_pred16_t vcmpneq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s32)))
-mve_pred16_t vcmpneq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s32)))
-mve_pred16_t vcmpneq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s8)))
-mve_pred16_t vcmpneq_n_s8(int8x16_t, int8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s8)))
-mve_pred16_t vcmpneq(int8x16_t, int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u16)))
-mve_pred16_t vcmpneq_n_u16(uint16x8_t, uint16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u16)))
-mve_pred16_t vcmpneq(uint16x8_t, uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u32)))
-mve_pred16_t vcmpneq_n_u32(uint32x4_t, uint32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u32)))
-mve_pred16_t vcmpneq(uint32x4_t, uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u8)))
-mve_pred16_t vcmpneq_n_u8(uint8x16_t, uint8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u8)))
-mve_pred16_t vcmpneq(uint8x16_t, uint8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s16)))
-mve_pred16_t vcmpneq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s16)))
-mve_pred16_t vcmpneq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s32)))
-mve_pred16_t vcmpneq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s32)))
-mve_pred16_t vcmpneq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s8)))
-mve_pred16_t vcmpneq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s8)))
-mve_pred16_t vcmpneq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u16)))
-mve_pred16_t vcmpneq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u16)))
-mve_pred16_t vcmpneq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u32)))
-mve_pred16_t vcmpneq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u32)))
-mve_pred16_t vcmpneq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u8)))
-mve_pred16_t vcmpneq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u8)))
-mve_pred16_t vcmpneq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s16)))
-int16x8_t vcreateq_s16(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s32)))
-int32x4_t vcreateq_s32(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s64)))
-int64x2_t vcreateq_s64(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s8)))
-int8x16_t vcreateq_s8(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u16)))
-uint16x8_t vcreateq_u16(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u32)))
-uint32x4_t vcreateq_u32(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u64)))
-uint64x2_t vcreateq_u64(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u8)))
-uint8x16_t vcreateq_u8(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp16q)))
-mve_pred16_t vctp16q(uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp16q_m)))
-mve_pred16_t vctp16q_m(uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp32q)))
-mve_pred16_t vctp32q(uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp32q_m)))
-mve_pred16_t vctp32q_m(uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp64q)))
-mve_pred16_t vctp64q(uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp64q_m)))
-mve_pred16_t vctp64q_m(uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp8q)))
-mve_pred16_t vctp8q(uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vctp8q_m)))
-mve_pred16_t vctp8q_m(uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_n_u16)))
-uint16x8_t vddupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_n_u16)))
-uint16x8_t vddupq_m(uint16x8_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_n_u32)))
-uint32x4_t vddupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_n_u32)))
-uint32x4_t vddupq_m(uint32x4_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_n_u8)))
-uint8x16_t vddupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_n_u8)))
-uint8x16_t vddupq_m(uint8x16_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_wb_u16)))
-uint16x8_t vddupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_wb_u16)))
-uint16x8_t vddupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_wb_u32)))
-uint32x4_t vddupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_wb_u32)))
-uint32x4_t vddupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_wb_u8)))
-uint8x16_t vddupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_m_wb_u8)))
-uint8x16_t vddupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_n_u16)))
-uint16x8_t vddupq_n_u16(uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_n_u16)))
-uint16x8_t vddupq_u16(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_n_u32)))
-uint32x4_t vddupq_n_u32(uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_n_u32)))
-uint32x4_t vddupq_u32(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_n_u8)))
-uint8x16_t vddupq_n_u8(uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_n_u8)))
-uint8x16_t vddupq_u8(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_wb_u16)))
-uint16x8_t vddupq_wb_u16(uint32_t *, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_wb_u16)))
-uint16x8_t vddupq_u16(uint32_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_wb_u32)))
-uint32x4_t vddupq_wb_u32(uint32_t *, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_wb_u32)))
-uint32x4_t vddupq_u32(uint32_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_wb_u8)))
-uint8x16_t vddupq_wb_u8(uint32_t *, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_wb_u8)))
-uint8x16_t vddupq_u8(uint32_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_n_u16)))
-uint16x8_t vddupq_x_n_u16(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_n_u16)))
-uint16x8_t vddupq_x_u16(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_n_u32)))
-uint32x4_t vddupq_x_n_u32(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_n_u32)))
-uint32x4_t vddupq_x_u32(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_n_u8)))
-uint8x16_t vddupq_x_n_u8(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_n_u8)))
-uint8x16_t vddupq_x_u8(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_wb_u16)))
-uint16x8_t vddupq_x_wb_u16(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_wb_u16)))
-uint16x8_t vddupq_x_u16(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_wb_u32)))
-uint32x4_t vddupq_x_wb_u32(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_wb_u32)))
-uint32x4_t vddupq_x_u32(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_wb_u8)))
-uint8x16_t vddupq_x_wb_u8(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vddupq_x_wb_u8)))
-uint8x16_t vddupq_x_u8(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_s16)))
-int16x8_t vdupq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_s16)))
-int16x8_t vdupq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_s32)))
-int32x4_t vdupq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_s32)))
-int32x4_t vdupq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_s8)))
-int8x16_t vdupq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_s8)))
-int8x16_t vdupq_m(int8x16_t, int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_u16)))
-uint16x8_t vdupq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_u16)))
-uint16x8_t vdupq_m(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_u32)))
-uint32x4_t vdupq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_u32)))
-uint32x4_t vdupq_m(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_u8)))
-uint8x16_t vdupq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_u8)))
-uint8x16_t vdupq_m(uint8x16_t, uint8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_s16)))
-int16x8_t vdupq_n_s16(int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_s32)))
-int32x4_t vdupq_n_s32(int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_s8)))
-int8x16_t vdupq_n_s8(int8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_u16)))
-uint16x8_t vdupq_n_u16(uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_u32)))
-uint32x4_t vdupq_n_u32(uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_u8)))
-uint8x16_t vdupq_n_u8(uint8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_s16)))
-int16x8_t vdupq_x_n_s16(int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_s32)))
-int32x4_t vdupq_x_n_s32(int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_s8)))
-int8x16_t vdupq_x_n_s8(int8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_u16)))
-uint16x8_t vdupq_x_n_u16(uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_u32)))
-uint32x4_t vdupq_x_n_u32(uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_u8)))
-uint8x16_t vdupq_x_n_u8(uint8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_n_u16)))
-uint16x8_t vdwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_n_u16)))
-uint16x8_t vdwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_n_u32)))
-uint32x4_t vdwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_n_u32)))
-uint32x4_t vdwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_n_u8)))
-uint8x16_t vdwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_n_u8)))
-uint8x16_t vdwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_wb_u16)))
-uint16x8_t vdwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_wb_u16)))
-uint16x8_t vdwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_wb_u32)))
-uint32x4_t vdwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_wb_u32)))
-uint32x4_t vdwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_wb_u8)))
-uint8x16_t vdwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_m_wb_u8)))
-uint8x16_t vdwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_n_u16)))
-uint16x8_t vdwdupq_n_u16(uint32_t, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_n_u16)))
-uint16x8_t vdwdupq_u16(uint32_t, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_n_u32)))
-uint32x4_t vdwdupq_n_u32(uint32_t, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_n_u32)))
-uint32x4_t vdwdupq_u32(uint32_t, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_n_u8)))
-uint8x16_t vdwdupq_n_u8(uint32_t, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_n_u8)))
-uint8x16_t vdwdupq_u8(uint32_t, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_wb_u16)))
-uint16x8_t vdwdupq_wb_u16(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_wb_u16)))
-uint16x8_t vdwdupq_u16(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_wb_u32)))
-uint32x4_t vdwdupq_wb_u32(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_wb_u32)))
-uint32x4_t vdwdupq_u32(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_wb_u8)))
-uint8x16_t vdwdupq_wb_u8(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_wb_u8)))
-uint8x16_t vdwdupq_u8(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_n_u16)))
-uint16x8_t vdwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_n_u16)))
-uint16x8_t vdwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_n_u32)))
-uint32x4_t vdwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_n_u32)))
-uint32x4_t vdwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_n_u8)))
-uint8x16_t vdwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_n_u8)))
-uint8x16_t vdwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_wb_u16)))
-uint16x8_t vdwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_wb_u16)))
-uint16x8_t vdwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_wb_u32)))
-uint32x4_t vdwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_wb_u32)))
-uint32x4_t vdwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_wb_u8)))
-uint8x16_t vdwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdwdupq_x_wb_u8)))
-uint8x16_t vdwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_s16)))
-int16x8_t veorq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_s16)))
-int16x8_t veorq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_s32)))
-int32x4_t veorq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_s32)))
-int32x4_t veorq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_s8)))
-int8x16_t veorq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_s8)))
-int8x16_t veorq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_u16)))
-uint16x8_t veorq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_u16)))
-uint16x8_t veorq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_u32)))
-uint32x4_t veorq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_u32)))
-uint32x4_t veorq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_u8)))
-uint8x16_t veorq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_u8)))
-uint8x16_t veorq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_s16)))
-int16x8_t veorq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_s16)))
-int16x8_t veorq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_s32)))
-int32x4_t veorq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_s32)))
-int32x4_t veorq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_s8)))
-int8x16_t veorq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_s8)))
-int8x16_t veorq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_u16)))
-uint16x8_t veorq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_u16)))
-uint16x8_t veorq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_u32)))
-uint32x4_t veorq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_u32)))
-uint32x4_t veorq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_u8)))
-uint8x16_t veorq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_u8)))
-uint8x16_t veorq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_s16)))
-int16x8_t veorq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_s16)))
-int16x8_t veorq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_s32)))
-int32x4_t veorq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_s32)))
-int32x4_t veorq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_s8)))
-int8x16_t veorq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_s8)))
-int8x16_t veorq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_u16)))
-uint16x8_t veorq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_u16)))
-uint16x8_t veorq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_u32)))
-uint32x4_t veorq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_u32)))
-uint32x4_t veorq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_u8)))
-uint8x16_t veorq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_u8)))
-uint8x16_t veorq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s16)))
-int16_t vgetq_lane_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s16)))
-int16_t vgetq_lane(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s32)))
-int32_t vgetq_lane_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s32)))
-int32_t vgetq_lane(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s64)))
-int64_t vgetq_lane_s64(int64x2_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s64)))
-int64_t vgetq_lane(int64x2_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s8)))
-int8_t vgetq_lane_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s8)))
-int8_t vgetq_lane(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u16)))
-uint16_t vgetq_lane_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u16)))
-uint16_t vgetq_lane(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u32)))
-uint32_t vgetq_lane_u32(uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u32)))
-uint32_t vgetq_lane(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u64)))
-uint64_t vgetq_lane_u64(uint64x2_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u64)))
-uint64_t vgetq_lane(uint64x2_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u8)))
-uint8_t vgetq_lane_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u8)))
-uint8_t vgetq_lane(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_s16)))
-int16x8_t vhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_s16)))
-int16x8_t vhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_s32)))
-int32x4_t vhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_s32)))
-int32x4_t vhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_s8)))
-int8x16_t vhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_s8)))
-int8x16_t vhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_u16)))
-uint16x8_t vhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_u16)))
-uint16x8_t vhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_u32)))
-uint32x4_t vhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_u32)))
-uint32x4_t vhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_u8)))
-uint8x16_t vhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_m_u8)))
-uint8x16_t vhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_s16)))
-int16x8_t vhaddq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_s16)))
-int16x8_t vhaddq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_s32)))
-int32x4_t vhaddq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_s32)))
-int32x4_t vhaddq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_s8)))
-int8x16_t vhaddq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_s8)))
-int8x16_t vhaddq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_u16)))
-uint16x8_t vhaddq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_u16)))
-uint16x8_t vhaddq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_u32)))
-uint32x4_t vhaddq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_u32)))
-uint32x4_t vhaddq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_u8)))
-uint8x16_t vhaddq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_u8)))
-uint8x16_t vhaddq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_s16)))
-int16x8_t vhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_s16)))
-int16x8_t vhaddq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_s32)))
-int32x4_t vhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_s32)))
-int32x4_t vhaddq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_s8)))
-int8x16_t vhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_s8)))
-int8x16_t vhaddq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_u16)))
-uint16x8_t vhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_u16)))
-uint16x8_t vhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_u32)))
-uint32x4_t vhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_u32)))
-uint32x4_t vhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_u8)))
-uint8x16_t vhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhaddq_x_u8)))
-uint8x16_t vhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16)))
-int16x8_t vhcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16)))
-int16x8_t vhcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32)))
-int32x4_t vhcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32)))
-int32x4_t vhcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8)))
-int8x16_t vhcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8)))
-int8x16_t vhcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_s16)))
-int16x8_t vhcaddq_rot270_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_s16)))
-int16x8_t vhcaddq_rot270(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_s32)))
-int32x4_t vhcaddq_rot270_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_s32)))
-int32x4_t vhcaddq_rot270(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_s8)))
-int8x16_t vhcaddq_rot270_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_s8)))
-int8x16_t vhcaddq_rot270(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16)))
-int16x8_t vhcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16)))
-int16x8_t vhcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32)))
-int32x4_t vhcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32)))
-int32x4_t vhcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8)))
-int8x16_t vhcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8)))
-int8x16_t vhcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16)))
-int16x8_t vhcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16)))
-int16x8_t vhcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32)))
-int32x4_t vhcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32)))
-int32x4_t vhcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8)))
-int8x16_t vhcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8)))
-int8x16_t vhcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_s16)))
-int16x8_t vhcaddq_rot90_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_s16)))
-int16x8_t vhcaddq_rot90(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_s32)))
-int32x4_t vhcaddq_rot90_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_s32)))
-int32x4_t vhcaddq_rot90(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_s8)))
-int8x16_t vhcaddq_rot90_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_s8)))
-int8x16_t vhcaddq_rot90(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16)))
-int16x8_t vhcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16)))
-int16x8_t vhcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32)))
-int32x4_t vhcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32)))
-int32x4_t vhcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8)))
-int8x16_t vhcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8)))
-int8x16_t vhcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_s16)))
-int16x8_t vhsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_s16)))
-int16x8_t vhsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_s32)))
-int32x4_t vhsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_s32)))
-int32x4_t vhsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_s8)))
-int8x16_t vhsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_s8)))
-int8x16_t vhsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_u16)))
-uint16x8_t vhsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_u16)))
-uint16x8_t vhsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_u32)))
-uint32x4_t vhsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_u32)))
-uint32x4_t vhsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_u8)))
-uint8x16_t vhsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_m_u8)))
-uint8x16_t vhsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_s16)))
-int16x8_t vhsubq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_s16)))
-int16x8_t vhsubq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_s32)))
-int32x4_t vhsubq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_s32)))
-int32x4_t vhsubq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_s8)))
-int8x16_t vhsubq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_s8)))
-int8x16_t vhsubq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_u16)))
-uint16x8_t vhsubq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_u16)))
-uint16x8_t vhsubq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_u32)))
-uint32x4_t vhsubq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_u32)))
-uint32x4_t vhsubq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_u8)))
-uint8x16_t vhsubq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_u8)))
-uint8x16_t vhsubq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_s16)))
-int16x8_t vhsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_s16)))
-int16x8_t vhsubq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_s32)))
-int32x4_t vhsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_s32)))
-int32x4_t vhsubq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_s8)))
-int8x16_t vhsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_s8)))
-int8x16_t vhsubq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_u16)))
-uint16x8_t vhsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_u16)))
-uint16x8_t vhsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_u32)))
-uint32x4_t vhsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_u32)))
-uint32x4_t vhsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_u8)))
-uint8x16_t vhsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vhsubq_x_u8)))
-uint8x16_t vhsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_n_u16)))
-uint16x8_t vidupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_n_u16)))
-uint16x8_t vidupq_m(uint16x8_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_n_u32)))
-uint32x4_t vidupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_n_u32)))
-uint32x4_t vidupq_m(uint32x4_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_n_u8)))
-uint8x16_t vidupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_n_u8)))
-uint8x16_t vidupq_m(uint8x16_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_wb_u16)))
-uint16x8_t vidupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_wb_u16)))
-uint16x8_t vidupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_wb_u32)))
-uint32x4_t vidupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_wb_u32)))
-uint32x4_t vidupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_wb_u8)))
-uint8x16_t vidupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_m_wb_u8)))
-uint8x16_t vidupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_n_u16)))
-uint16x8_t vidupq_n_u16(uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_n_u16)))
-uint16x8_t vidupq_u16(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_n_u32)))
-uint32x4_t vidupq_n_u32(uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_n_u32)))
-uint32x4_t vidupq_u32(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_n_u8)))
-uint8x16_t vidupq_n_u8(uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_n_u8)))
-uint8x16_t vidupq_u8(uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_wb_u16)))
-uint16x8_t vidupq_wb_u16(uint32_t *, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_wb_u16)))
-uint16x8_t vidupq_u16(uint32_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_wb_u32)))
-uint32x4_t vidupq_wb_u32(uint32_t *, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_wb_u32)))
-uint32x4_t vidupq_u32(uint32_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_wb_u8)))
-uint8x16_t vidupq_wb_u8(uint32_t *, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_wb_u8)))
-uint8x16_t vidupq_u8(uint32_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_n_u16)))
-uint16x8_t vidupq_x_n_u16(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_n_u16)))
-uint16x8_t vidupq_x_u16(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_n_u32)))
-uint32x4_t vidupq_x_n_u32(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_n_u32)))
-uint32x4_t vidupq_x_u32(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_n_u8)))
-uint8x16_t vidupq_x_n_u8(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_n_u8)))
-uint8x16_t vidupq_x_u8(uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_wb_u16)))
-uint16x8_t vidupq_x_wb_u16(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_wb_u16)))
-uint16x8_t vidupq_x_u16(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_wb_u32)))
-uint32x4_t vidupq_x_wb_u32(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_wb_u32)))
-uint32x4_t vidupq_x_u32(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_wb_u8)))
-uint8x16_t vidupq_x_wb_u8(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vidupq_x_wb_u8)))
-uint8x16_t vidupq_x_u8(uint32_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_n_u16)))
-uint16x8_t viwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_n_u16)))
-uint16x8_t viwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_n_u32)))
-uint32x4_t viwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_n_u32)))
-uint32x4_t viwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_n_u8)))
-uint8x16_t viwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_n_u8)))
-uint8x16_t viwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_wb_u16)))
-uint16x8_t viwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_wb_u16)))
-uint16x8_t viwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_wb_u32)))
-uint32x4_t viwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_wb_u32)))
-uint32x4_t viwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_wb_u8)))
-uint8x16_t viwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_m_wb_u8)))
-uint8x16_t viwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_n_u16)))
-uint16x8_t viwdupq_n_u16(uint32_t, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_n_u16)))
-uint16x8_t viwdupq_u16(uint32_t, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_n_u32)))
-uint32x4_t viwdupq_n_u32(uint32_t, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_n_u32)))
-uint32x4_t viwdupq_u32(uint32_t, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_n_u8)))
-uint8x16_t viwdupq_n_u8(uint32_t, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_n_u8)))
-uint8x16_t viwdupq_u8(uint32_t, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_wb_u16)))
-uint16x8_t viwdupq_wb_u16(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_wb_u16)))
-uint16x8_t viwdupq_u16(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_wb_u32)))
-uint32x4_t viwdupq_wb_u32(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_wb_u32)))
-uint32x4_t viwdupq_u32(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_wb_u8)))
-uint8x16_t viwdupq_wb_u8(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_wb_u8)))
-uint8x16_t viwdupq_u8(uint32_t *, uint32_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_n_u16)))
-uint16x8_t viwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_n_u16)))
-uint16x8_t viwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_n_u32)))
-uint32x4_t viwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_n_u32)))
-uint32x4_t viwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_n_u8)))
-uint8x16_t viwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_n_u8)))
-uint8x16_t viwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_wb_u16)))
-uint16x8_t viwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_wb_u16)))
-uint16x8_t viwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_wb_u32)))
-uint32x4_t viwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_wb_u32)))
-uint32x4_t viwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_wb_u8)))
-uint8x16_t viwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_viwdupq_x_wb_u8)))
-uint8x16_t viwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_s16)))
-int16x8_t vld1q_s16(const int16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_s16)))
-int16x8_t vld1q(const int16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_s32)))
-int32x4_t vld1q_s32(const int32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_s32)))
-int32x4_t vld1q(const int32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_s8)))
-int8x16_t vld1q_s8(const int8_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_s8)))
-int8x16_t vld1q(const int8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_u16)))
-uint16x8_t vld1q_u16(const uint16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_u16)))
-uint16x8_t vld1q(const uint16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_u32)))
-uint32x4_t vld1q_u32(const uint32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_u32)))
-uint32x4_t vld1q(const uint32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_u8)))
-uint8x16_t vld1q_u8(const uint8_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_u8)))
-uint8x16_t vld1q(const uint8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s16)))
-int16x8_t vld1q_z_s16(const int16_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s16)))
-int16x8_t vld1q_z(const int16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s32)))
-int32x4_t vld1q_z_s32(const int32_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s32)))
-int32x4_t vld1q_z(const int32_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s8)))
-int8x16_t vld1q_z_s8(const int8_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s8)))
-int8x16_t vld1q_z(const int8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u16)))
-uint16x8_t vld1q_z_u16(const uint16_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u16)))
-uint16x8_t vld1q_z(const uint16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u32)))
-uint32x4_t vld1q_z_u32(const uint32_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u32)))
-uint32x4_t vld1q_z(const uint32_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u8)))
-uint8x16_t vld1q_z_u8(const uint8_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u8)))
-uint8x16_t vld1q_z(const uint8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_s16)))
-int16x8x2_t vld2q_s16(const int16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_s16)))
-int16x8x2_t vld2q(const int16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_s32)))
-int32x4x2_t vld2q_s32(const int32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_s32)))
-int32x4x2_t vld2q(const int32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_s8)))
-int8x16x2_t vld2q_s8(const int8_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_s8)))
-int8x16x2_t vld2q(const int8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_u16)))
-uint16x8x2_t vld2q_u16(const uint16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_u16)))
-uint16x8x2_t vld2q(const uint16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_u32)))
-uint32x4x2_t vld2q_u32(const uint32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_u32)))
-uint32x4x2_t vld2q(const uint32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_u8)))
-uint8x16x2_t vld2q_u8(const uint8_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_u8)))
-uint8x16x2_t vld2q(const uint8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_s16)))
-int16x8x4_t vld4q_s16(const int16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_s16)))
-int16x8x4_t vld4q(const int16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_s32)))
-int32x4x4_t vld4q_s32(const int32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_s32)))
-int32x4x4_t vld4q(const int32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_s8)))
-int8x16x4_t vld4q_s8(const int8_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_s8)))
-int8x16x4_t vld4q(const int8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_u16)))
-uint16x8x4_t vld4q_u16(const uint16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_u16)))
-uint16x8x4_t vld4q(const uint16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_u32)))
-uint32x4x4_t vld4q_u32(const uint32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_u32)))
-uint32x4x4_t vld4q(const uint32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_u8)))
-uint8x16x4_t vld4q_u8(const uint8_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_u8)))
-uint8x16x4_t vld4q(const uint8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))
-int16x8_t vldrbq_gather_offset_s16(const int8_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))
-int16x8_t vldrbq_gather_offset(const int8_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))
-int32x4_t vldrbq_gather_offset_s32(const int8_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))
-int32x4_t vldrbq_gather_offset(const int8_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))
-int8x16_t vldrbq_gather_offset_s8(const int8_t *, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))
-int8x16_t vldrbq_gather_offset(const int8_t *, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))
-uint16x8_t vldrbq_gather_offset_u16(const uint8_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))
-uint16x8_t vldrbq_gather_offset(const uint8_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))
-uint32x4_t vldrbq_gather_offset_u32(const uint8_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))
-uint32x4_t vldrbq_gather_offset(const uint8_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))
-uint8x16_t vldrbq_gather_offset_u8(const uint8_t *, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))
-uint8x16_t vldrbq_gather_offset(const uint8_t *, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))
-int16x8_t vldrbq_gather_offset_z_s16(const int8_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))
-int16x8_t vldrbq_gather_offset_z(const int8_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))
-int32x4_t vldrbq_gather_offset_z_s32(const int8_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))
-int32x4_t vldrbq_gather_offset_z(const int8_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))
-int8x16_t vldrbq_gather_offset_z_s8(const int8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))
-int8x16_t vldrbq_gather_offset_z(const int8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))
-uint16x8_t vldrbq_gather_offset_z_u16(const uint8_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))
-uint16x8_t vldrbq_gather_offset_z(const uint8_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))
-uint32x4_t vldrbq_gather_offset_z_u32(const uint8_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))
-uint32x4_t vldrbq_gather_offset_z(const uint8_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))
-uint8x16_t vldrbq_gather_offset_z_u8(const uint8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))
-uint8x16_t vldrbq_gather_offset_z(const uint8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_s16)))
-int16x8_t vldrbq_s16(const int8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_s32)))
-int32x4_t vldrbq_s32(const int8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_s8)))
-int8x16_t vldrbq_s8(const int8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_u16)))
-uint16x8_t vldrbq_u16(const uint8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_u32)))
-uint32x4_t vldrbq_u32(const uint8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_u8)))
-uint8x16_t vldrbq_u8(const uint8_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_s16)))
-int16x8_t vldrbq_z_s16(const int8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_s32)))
-int32x4_t vldrbq_z_s32(const int8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_s8)))
-int8x16_t vldrbq_z_s8(const int8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_u16)))
-uint16x8_t vldrbq_z_u16(const uint8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_u32)))
-uint32x4_t vldrbq_z_u32(const uint8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_u8)))
-uint8x16_t vldrbq_z_u8(const uint8_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_s64)))
-int64x2_t vldrdq_gather_base_s64(uint64x2_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_u64)))
-uint64x2_t vldrdq_gather_base_u64(uint64x2_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_s64)))
-int64x2_t vldrdq_gather_base_wb_s64(uint64x2_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_u64)))
-uint64x2_t vldrdq_gather_base_wb_u64(uint64x2_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_s64)))
-int64x2_t vldrdq_gather_base_wb_z_s64(uint64x2_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_u64)))
-uint64x2_t vldrdq_gather_base_wb_z_u64(uint64x2_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_z_s64)))
-int64x2_t vldrdq_gather_base_z_s64(uint64x2_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_z_u64)))
-uint64x2_t vldrdq_gather_base_z_u64(uint64x2_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))
-int64x2_t vldrdq_gather_offset_s64(const int64_t *, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))
-int64x2_t vldrdq_gather_offset(const int64_t *, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))
-uint64x2_t vldrdq_gather_offset_u64(const uint64_t *, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))
-uint64x2_t vldrdq_gather_offset(const uint64_t *, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))
-int64x2_t vldrdq_gather_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))
-int64x2_t vldrdq_gather_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))
-uint64x2_t vldrdq_gather_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))
-uint64x2_t vldrdq_gather_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))
-int64x2_t vldrdq_gather_shifted_offset_s64(const int64_t *, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))
-int64x2_t vldrdq_gather_shifted_offset(const int64_t *, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))
-uint64x2_t vldrdq_gather_shifted_offset_u64(const uint64_t *, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))
-uint64x2_t vldrdq_gather_shifted_offset(const uint64_t *, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))
-int64x2_t vldrdq_gather_shifted_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))
-int64x2_t vldrdq_gather_shifted_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))
-uint64x2_t vldrdq_gather_shifted_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))
-uint64x2_t vldrdq_gather_shifted_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))
-int16x8_t vldrhq_gather_offset_s16(const int16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))
-int16x8_t vldrhq_gather_offset(const int16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))
-int32x4_t vldrhq_gather_offset_s32(const int16_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))
-int32x4_t vldrhq_gather_offset(const int16_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))
-uint16x8_t vldrhq_gather_offset_u16(const uint16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))
-uint16x8_t vldrhq_gather_offset(const uint16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))
-uint32x4_t vldrhq_gather_offset_u32(const uint16_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))
-uint32x4_t vldrhq_gather_offset(const uint16_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))
-int16x8_t vldrhq_gather_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))
-int16x8_t vldrhq_gather_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))
-int32x4_t vldrhq_gather_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))
-int32x4_t vldrhq_gather_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))
-uint16x8_t vldrhq_gather_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))
-uint16x8_t vldrhq_gather_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))
-uint32x4_t vldrhq_gather_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))
-uint32x4_t vldrhq_gather_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))
-int16x8_t vldrhq_gather_shifted_offset_s16(const int16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))
-int16x8_t vldrhq_gather_shifted_offset(const int16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))
-int32x4_t vldrhq_gather_shifted_offset_s32(const int16_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))
-int32x4_t vldrhq_gather_shifted_offset(const int16_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))
-uint16x8_t vldrhq_gather_shifted_offset_u16(const uint16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))
-uint16x8_t vldrhq_gather_shifted_offset(const uint16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))
-uint32x4_t vldrhq_gather_shifted_offset_u32(const uint16_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))
-uint32x4_t vldrhq_gather_shifted_offset(const uint16_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))
-int16x8_t vldrhq_gather_shifted_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))
-int16x8_t vldrhq_gather_shifted_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))
-int32x4_t vldrhq_gather_shifted_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))
-int32x4_t vldrhq_gather_shifted_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))
-uint16x8_t vldrhq_gather_shifted_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))
-uint16x8_t vldrhq_gather_shifted_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))
-uint32x4_t vldrhq_gather_shifted_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))
-uint32x4_t vldrhq_gather_shifted_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_s16)))
-int16x8_t vldrhq_s16(const int16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_s32)))
-int32x4_t vldrhq_s32(const int16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_u16)))
-uint16x8_t vldrhq_u16(const uint16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_u32)))
-uint32x4_t vldrhq_u32(const uint16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_s16)))
-int16x8_t vldrhq_z_s16(const int16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_s32)))
-int32x4_t vldrhq_z_s32(const int16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_u16)))
-uint16x8_t vldrhq_z_u16(const uint16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_u32)))
-uint32x4_t vldrhq_z_u32(const uint16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_s32)))
-int32x4_t vldrwq_gather_base_s32(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_u32)))
-uint32x4_t vldrwq_gather_base_u32(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_s32)))
-int32x4_t vldrwq_gather_base_wb_s32(uint32x4_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_u32)))
-uint32x4_t vldrwq_gather_base_wb_u32(uint32x4_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_s32)))
-int32x4_t vldrwq_gather_base_wb_z_s32(uint32x4_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_u32)))
-uint32x4_t vldrwq_gather_base_wb_z_u32(uint32x4_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_z_s32)))
-int32x4_t vldrwq_gather_base_z_s32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_z_u32)))
-uint32x4_t vldrwq_gather_base_z_u32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))
-int32x4_t vldrwq_gather_offset_s32(const int32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))
-int32x4_t vldrwq_gather_offset(const int32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))
-uint32x4_t vldrwq_gather_offset_u32(const uint32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))
-uint32x4_t vldrwq_gather_offset(const uint32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))
-int32x4_t vldrwq_gather_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))
-int32x4_t vldrwq_gather_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))
-uint32x4_t vldrwq_gather_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))
-uint32x4_t vldrwq_gather_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))
-int32x4_t vldrwq_gather_shifted_offset_s32(const int32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))
-int32x4_t vldrwq_gather_shifted_offset(const int32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))
-uint32x4_t vldrwq_gather_shifted_offset_u32(const uint32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))
-uint32x4_t vldrwq_gather_shifted_offset(const uint32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))
-int32x4_t vldrwq_gather_shifted_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))
-int32x4_t vldrwq_gather_shifted_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))
-uint32x4_t vldrwq_gather_shifted_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))
-uint32x4_t vldrwq_gather_shifted_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_s32)))
-int32x4_t vldrwq_s32(const int32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_u32)))
-uint32x4_t vldrwq_u32(const uint32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_z_s32)))
-int32x4_t vldrwq_z_s32(const int32_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_z_u32)))
-uint32x4_t vldrwq_z_u32(const uint32_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_m_s16)))
-uint16x8_t vmaxaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_m_s16)))
-uint16x8_t vmaxaq_m(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_m_s32)))
-uint32x4_t vmaxaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_m_s32)))
-uint32x4_t vmaxaq_m(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_m_s8)))
-uint8x16_t vmaxaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_m_s8)))
-uint8x16_t vmaxaq_m(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_s16)))
-uint16x8_t vmaxaq_s16(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_s16)))
-uint16x8_t vmaxaq(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_s32)))
-uint32x4_t vmaxaq_s32(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_s32)))
-uint32x4_t vmaxaq(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_s8)))
-uint8x16_t vmaxaq_s8(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxaq_s8)))
-uint8x16_t vmaxaq(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_s16)))
-int16x8_t vmaxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_s16)))
-int16x8_t vmaxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_s32)))
-int32x4_t vmaxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_s32)))
-int32x4_t vmaxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_s8)))
-int8x16_t vmaxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_s8)))
-int8x16_t vmaxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_u16)))
-uint16x8_t vmaxq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_u16)))
-uint16x8_t vmaxq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_u32)))
-uint32x4_t vmaxq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_u32)))
-uint32x4_t vmaxq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_u8)))
-uint8x16_t vmaxq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_m_u8)))
-uint8x16_t vmaxq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_s16)))
-int16x8_t vmaxq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_s16)))
-int16x8_t vmaxq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_s32)))
-int32x4_t vmaxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_s32)))
-int32x4_t vmaxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_s8)))
-int8x16_t vmaxq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_s8)))
-int8x16_t vmaxq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_u16)))
-uint16x8_t vmaxq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_u16)))
-uint16x8_t vmaxq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_u32)))
-uint32x4_t vmaxq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_u32)))
-uint32x4_t vmaxq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_u8)))
-uint8x16_t vmaxq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_u8)))
-uint8x16_t vmaxq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_s16)))
-int16x8_t vmaxq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_s16)))
-int16x8_t vmaxq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_s32)))
-int32x4_t vmaxq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_s32)))
-int32x4_t vmaxq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_s8)))
-int8x16_t vmaxq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_s8)))
-int8x16_t vmaxq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_u16)))
-uint16x8_t vmaxq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_u16)))
-uint16x8_t vmaxq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_u32)))
-uint32x4_t vmaxq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_u32)))
-uint32x4_t vmaxq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_u8)))
-uint8x16_t vmaxq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxq_x_u8)))
-uint8x16_t vmaxq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s16)))
-int16_t vmaxvq_s16(int16_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s16)))
-int16_t vmaxvq(int16_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s32)))
-int32_t vmaxvq_s32(int32_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s32)))
-int32_t vmaxvq(int32_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s8)))
-int8_t vmaxvq_s8(int8_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s8)))
-int8_t vmaxvq(int8_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u16)))
-uint16_t vmaxvq_u16(uint16_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u16)))
-uint16_t vmaxvq(uint16_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u32)))
-uint32_t vmaxvq_u32(uint32_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u32)))
-uint32_t vmaxvq(uint32_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u8)))
-uint8_t vmaxvq_u8(uint8_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u8)))
-uint8_t vmaxvq(uint8_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminaq_m_s16)))
-uint16x8_t vminaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminaq_m_s16)))
-uint16x8_t vminaq_m(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminaq_m_s32)))
-uint32x4_t vminaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminaq_m_s32)))
-uint32x4_t vminaq_m(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminaq_m_s8)))
-uint8x16_t vminaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminaq_m_s8)))
-uint8x16_t vminaq_m(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminaq_s16)))
-uint16x8_t vminaq_s16(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminaq_s16)))
-uint16x8_t vminaq(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminaq_s32)))
-uint32x4_t vminaq_s32(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminaq_s32)))
-uint32x4_t vminaq(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminaq_s8)))
-uint8x16_t vminaq_s8(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminaq_s8)))
-uint8x16_t vminaq(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_m_s16)))
-int16x8_t vminq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_m_s16)))
-int16x8_t vminq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_m_s32)))
-int32x4_t vminq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_m_s32)))
-int32x4_t vminq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_m_s8)))
-int8x16_t vminq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_m_s8)))
-int8x16_t vminq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_m_u16)))
-uint16x8_t vminq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_m_u16)))
-uint16x8_t vminq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_m_u32)))
-uint32x4_t vminq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_m_u32)))
-uint32x4_t vminq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_m_u8)))
-uint8x16_t vminq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_m_u8)))
-uint8x16_t vminq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_s16)))
-int16x8_t vminq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_s16)))
-int16x8_t vminq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_s32)))
-int32x4_t vminq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_s32)))
-int32x4_t vminq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_s8)))
-int8x16_t vminq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_s8)))
-int8x16_t vminq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_u16)))
-uint16x8_t vminq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_u16)))
-uint16x8_t vminq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_u32)))
-uint32x4_t vminq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_u32)))
-uint32x4_t vminq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_u8)))
-uint8x16_t vminq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_u8)))
-uint8x16_t vminq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_x_s16)))
-int16x8_t vminq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_x_s16)))
-int16x8_t vminq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_x_s32)))
-int32x4_t vminq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_x_s32)))
-int32x4_t vminq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_x_s8)))
-int8x16_t vminq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_x_s8)))
-int8x16_t vminq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_x_u16)))
-uint16x8_t vminq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_x_u16)))
-uint16x8_t vminq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_x_u32)))
-uint32x4_t vminq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_x_u32)))
-uint32x4_t vminq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminq_x_u8)))
-uint8x16_t vminq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminq_x_u8)))
-uint8x16_t vminq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_s16)))
-int16_t vminvq_s16(int16_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_s16)))
-int16_t vminvq(int16_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_s32)))
-int32_t vminvq_s32(int32_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_s32)))
-int32_t vminvq(int32_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_s8)))
-int8_t vminvq_s8(int8_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_s8)))
-int8_t vminvq(int8_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_u16)))
-uint16_t vminvq_u16(uint16_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_u16)))
-uint16_t vminvq(uint16_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_u32)))
-uint32_t vminvq_u32(uint32_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_u32)))
-uint32_t vminvq(uint32_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_u8)))
-uint8_t vminvq_u8(uint8_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_u8)))
-uint8_t vminvq(uint8_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_s16)))
-int32_t vmladavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_s16)))
-int32_t vmladavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_s32)))
-int32_t vmladavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_s32)))
-int32_t vmladavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_s8)))
-int32_t vmladavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_s8)))
-int32_t vmladavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_u16)))
-uint32_t vmladavaq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_u16)))
-uint32_t vmladavaq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_u32)))
-uint32_t vmladavaq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_u32)))
-uint32_t vmladavaq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_u8)))
-uint32_t vmladavaq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_p_u8)))
-uint32_t vmladavaq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_s16)))
-int32_t vmladavaq_s16(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_s16)))
-int32_t vmladavaq(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_s32)))
-int32_t vmladavaq_s32(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_s32)))
-int32_t vmladavaq(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_s8)))
-int32_t vmladavaq_s8(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_s8)))
-int32_t vmladavaq(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_u16)))
-uint32_t vmladavaq_u16(uint32_t, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_u16)))
-uint32_t vmladavaq(uint32_t, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_u32)))
-uint32_t vmladavaq_u32(uint32_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_u32)))
-uint32_t vmladavaq(uint32_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_u8)))
-uint32_t vmladavaq_u8(uint32_t, uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaq_u8)))
-uint32_t vmladavaq(uint32_t, uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_p_s16)))
-int32_t vmladavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_p_s16)))
-int32_t vmladavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_p_s32)))
-int32_t vmladavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_p_s32)))
-int32_t vmladavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_p_s8)))
-int32_t vmladavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_p_s8)))
-int32_t vmladavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_s16)))
-int32_t vmladavaxq_s16(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_s16)))
-int32_t vmladavaxq(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_s32)))
-int32_t vmladavaxq_s32(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_s32)))
-int32_t vmladavaxq(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_s8)))
-int32_t vmladavaxq_s8(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavaxq_s8)))
-int32_t vmladavaxq(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_s16)))
-int32_t vmladavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_s16)))
-int32_t vmladavq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_s32)))
-int32_t vmladavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_s32)))
-int32_t vmladavq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_s8)))
-int32_t vmladavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_s8)))
-int32_t vmladavq_p(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_u16)))
-uint32_t vmladavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_u16)))
-uint32_t vmladavq_p(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_u32)))
-uint32_t vmladavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_u32)))
-uint32_t vmladavq_p(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_u8)))
-uint32_t vmladavq_p_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_p_u8)))
-uint32_t vmladavq_p(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_s16)))
-int32_t vmladavq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_s16)))
-int32_t vmladavq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_s32)))
-int32_t vmladavq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_s32)))
-int32_t vmladavq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_s8)))
-int32_t vmladavq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_s8)))
-int32_t vmladavq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_u16)))
-uint32_t vmladavq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_u16)))
-uint32_t vmladavq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_u32)))
-uint32_t vmladavq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_u32)))
-uint32_t vmladavq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavq_u8)))
-uint32_t vmladavq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavq_u8)))
-uint32_t vmladavq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_p_s16)))
-int32_t vmladavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_p_s16)))
-int32_t vmladavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_p_s32)))
-int32_t vmladavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_p_s32)))
-int32_t vmladavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_p_s8)))
-int32_t vmladavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_p_s8)))
-int32_t vmladavxq_p(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_s16)))
-int32_t vmladavxq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_s16)))
-int32_t vmladavxq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_s32)))
-int32_t vmladavxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_s32)))
-int32_t vmladavxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_s8)))
-int32_t vmladavxq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmladavxq_s8)))
-int32_t vmladavxq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_s16)))
-int64_t vmlaldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_s16)))
-int64_t vmlaldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_s32)))
-int64_t vmlaldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_s32)))
-int64_t vmlaldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_u16)))
-uint64_t vmlaldavaq_p_u16(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_u16)))
-uint64_t vmlaldavaq_p(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_u32)))
-uint64_t vmlaldavaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_p_u32)))
-uint64_t vmlaldavaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_s16)))
-int64_t vmlaldavaq_s16(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_s16)))
-int64_t vmlaldavaq(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_s32)))
-int64_t vmlaldavaq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_s32)))
-int64_t vmlaldavaq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_u16)))
-uint64_t vmlaldavaq_u16(uint64_t, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_u16)))
-uint64_t vmlaldavaq(uint64_t, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_u32)))
-uint64_t vmlaldavaq_u32(uint64_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaq_u32)))
-uint64_t vmlaldavaq(uint64_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_p_s16)))
-int64_t vmlaldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_p_s16)))
-int64_t vmlaldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_p_s32)))
-int64_t vmlaldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_p_s32)))
-int64_t vmlaldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_s16)))
-int64_t vmlaldavaxq_s16(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_s16)))
-int64_t vmlaldavaxq(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_s32)))
-int64_t vmlaldavaxq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavaxq_s32)))
-int64_t vmlaldavaxq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_s16)))
-int64_t vmlaldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_s16)))
-int64_t vmlaldavq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_s32)))
-int64_t vmlaldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_s32)))
-int64_t vmlaldavq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_u16)))
-uint64_t vmlaldavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_u16)))
-uint64_t vmlaldavq_p(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_u32)))
-uint64_t vmlaldavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_p_u32)))
-uint64_t vmlaldavq_p(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_s16)))
-int64_t vmlaldavq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_s16)))
-int64_t vmlaldavq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_s32)))
-int64_t vmlaldavq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_s32)))
-int64_t vmlaldavq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_u16)))
-uint64_t vmlaldavq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_u16)))
-uint64_t vmlaldavq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_u32)))
-uint64_t vmlaldavq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavq_u32)))
-uint64_t vmlaldavq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_p_s16)))
-int64_t vmlaldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_p_s16)))
-int64_t vmlaldavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_p_s32)))
-int64_t vmlaldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_p_s32)))
-int64_t vmlaldavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_s16)))
-int64_t vmlaldavxq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_s16)))
-int64_t vmlaldavxq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_s32)))
-int64_t vmlaldavxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlaldavxq_s32)))
-int64_t vmlaldavxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_p_s16)))
-int32_t vmlsdavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_p_s16)))
-int32_t vmlsdavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_p_s32)))
-int32_t vmlsdavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_p_s32)))
-int32_t vmlsdavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_p_s8)))
-int32_t vmlsdavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_p_s8)))
-int32_t vmlsdavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_s16)))
-int32_t vmlsdavaq_s16(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_s16)))
-int32_t vmlsdavaq(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_s32)))
-int32_t vmlsdavaq_s32(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_s32)))
-int32_t vmlsdavaq(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_s8)))
-int32_t vmlsdavaq_s8(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaq_s8)))
-int32_t vmlsdavaq(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_p_s16)))
-int32_t vmlsdavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_p_s16)))
-int32_t vmlsdavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_p_s32)))
-int32_t vmlsdavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_p_s32)))
-int32_t vmlsdavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_p_s8)))
-int32_t vmlsdavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_p_s8)))
-int32_t vmlsdavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_s16)))
-int32_t vmlsdavaxq_s16(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_s16)))
-int32_t vmlsdavaxq(int32_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_s32)))
-int32_t vmlsdavaxq_s32(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_s32)))
-int32_t vmlsdavaxq(int32_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_s8)))
-int32_t vmlsdavaxq_s8(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavaxq_s8)))
-int32_t vmlsdavaxq(int32_t, int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_p_s16)))
-int32_t vmlsdavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_p_s16)))
-int32_t vmlsdavq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_p_s32)))
-int32_t vmlsdavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_p_s32)))
-int32_t vmlsdavq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_p_s8)))
-int32_t vmlsdavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_p_s8)))
-int32_t vmlsdavq_p(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_s16)))
-int32_t vmlsdavq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_s16)))
-int32_t vmlsdavq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_s32)))
-int32_t vmlsdavq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_s32)))
-int32_t vmlsdavq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_s8)))
-int32_t vmlsdavq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavq_s8)))
-int32_t vmlsdavq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_p_s16)))
-int32_t vmlsdavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_p_s16)))
-int32_t vmlsdavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_p_s32)))
-int32_t vmlsdavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_p_s32)))
-int32_t vmlsdavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_p_s8)))
-int32_t vmlsdavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_p_s8)))
-int32_t vmlsdavxq_p(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_s16)))
-int32_t vmlsdavxq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_s16)))
-int32_t vmlsdavxq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_s32)))
-int32_t vmlsdavxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_s32)))
-int32_t vmlsdavxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_s8)))
-int32_t vmlsdavxq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsdavxq_s8)))
-int32_t vmlsdavxq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_p_s16)))
-int64_t vmlsldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_p_s16)))
-int64_t vmlsldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_p_s32)))
-int64_t vmlsldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_p_s32)))
-int64_t vmlsldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_s16)))
-int64_t vmlsldavaq_s16(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_s16)))
-int64_t vmlsldavaq(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_s32)))
-int64_t vmlsldavaq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaq_s32)))
-int64_t vmlsldavaq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_p_s16)))
-int64_t vmlsldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_p_s16)))
-int64_t vmlsldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_p_s32)))
-int64_t vmlsldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_p_s32)))
-int64_t vmlsldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_s16)))
-int64_t vmlsldavaxq_s16(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_s16)))
-int64_t vmlsldavaxq(int64_t, int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_s32)))
-int64_t vmlsldavaxq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavaxq_s32)))
-int64_t vmlsldavaxq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_p_s16)))
-int64_t vmlsldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_p_s16)))
-int64_t vmlsldavq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_p_s32)))
-int64_t vmlsldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_p_s32)))
-int64_t vmlsldavq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_s16)))
-int64_t vmlsldavq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_s16)))
-int64_t vmlsldavq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_s32)))
-int64_t vmlsldavq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavq_s32)))
-int64_t vmlsldavq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_p_s16)))
-int64_t vmlsldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_p_s16)))
-int64_t vmlsldavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_p_s32)))
-int64_t vmlsldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_p_s32)))
-int64_t vmlsldavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_s16)))
-int64_t vmlsldavxq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_s16)))
-int64_t vmlsldavxq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_s32)))
-int64_t vmlsldavxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmlsldavxq_s32)))
-int64_t vmlsldavxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_s16)))
-int16x8_t vmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_s16)))
-int16x8_t vmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_s32)))
-int32x4_t vmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_s32)))
-int32x4_t vmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_s8)))
-int8x16_t vmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_s8)))
-int8x16_t vmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_u16)))
-uint16x8_t vmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_u16)))
-uint16x8_t vmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_u32)))
-uint32x4_t vmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_u32)))
-uint32x4_t vmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_u8)))
-uint8x16_t vmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_m_u8)))
-uint8x16_t vmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_s16)))
-int16x8_t vmulhq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_s16)))
-int16x8_t vmulhq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_s32)))
-int32x4_t vmulhq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_s32)))
-int32x4_t vmulhq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_s8)))
-int8x16_t vmulhq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_s8)))
-int8x16_t vmulhq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_u16)))
-uint16x8_t vmulhq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_u16)))
-uint16x8_t vmulhq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_u32)))
-uint32x4_t vmulhq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_u32)))
-uint32x4_t vmulhq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_u8)))
-uint8x16_t vmulhq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_u8)))
-uint8x16_t vmulhq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_s16)))
-int16x8_t vmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_s16)))
-int16x8_t vmulhq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_s32)))
-int32x4_t vmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_s32)))
-int32x4_t vmulhq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_s8)))
-int8x16_t vmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_s8)))
-int8x16_t vmulhq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_u16)))
-uint16x8_t vmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_u16)))
-uint16x8_t vmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_u32)))
-uint32x4_t vmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_u32)))
-uint32x4_t vmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_u8)))
-uint8x16_t vmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulhq_x_u8)))
-uint8x16_t vmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_s16)))
-int32x4_t vmullbq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_s16)))
-int32x4_t vmullbq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_s32)))
-int64x2_t vmullbq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_s32)))
-int64x2_t vmullbq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_s8)))
-int16x8_t vmullbq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_s8)))
-int16x8_t vmullbq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_u16)))
-uint32x4_t vmullbq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_u16)))
-uint32x4_t vmullbq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_u32)))
-uint64x2_t vmullbq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_u32)))
-uint64x2_t vmullbq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_u8)))
-uint16x8_t vmullbq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_m_u8)))
-uint16x8_t vmullbq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_s16)))
-int32x4_t vmullbq_int_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_s16)))
-int32x4_t vmullbq_int(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_s32)))
-int64x2_t vmullbq_int_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_s32)))
-int64x2_t vmullbq_int(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_s8)))
-int16x8_t vmullbq_int_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_s8)))
-int16x8_t vmullbq_int(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_u16)))
-uint32x4_t vmullbq_int_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_u16)))
-uint32x4_t vmullbq_int(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_u32)))
-uint64x2_t vmullbq_int_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_u32)))
-uint64x2_t vmullbq_int(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_u8)))
-uint16x8_t vmullbq_int_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_u8)))
-uint16x8_t vmullbq_int(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_s16)))
-int32x4_t vmullbq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_s16)))
-int32x4_t vmullbq_int_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_s32)))
-int64x2_t vmullbq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_s32)))
-int64x2_t vmullbq_int_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_s8)))
-int16x8_t vmullbq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_s8)))
-int16x8_t vmullbq_int_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_u16)))
-uint32x4_t vmullbq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_u16)))
-uint32x4_t vmullbq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_u32)))
-uint64x2_t vmullbq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_u32)))
-uint64x2_t vmullbq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_u8)))
-uint16x8_t vmullbq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_int_x_u8)))
-uint16x8_t vmullbq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_m_p16)))
-uint32x4_t vmullbq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_m_p16)))
-uint32x4_t vmullbq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_m_p8)))
-uint16x8_t vmullbq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_m_p8)))
-uint16x8_t vmullbq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_p16)))
-uint32x4_t vmullbq_poly_p16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_p16)))
-uint32x4_t vmullbq_poly(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_p8)))
-uint16x8_t vmullbq_poly_p8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_p8)))
-uint16x8_t vmullbq_poly(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_x_p16)))
-uint32x4_t vmullbq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_x_p16)))
-uint32x4_t vmullbq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_x_p8)))
-uint16x8_t vmullbq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmullbq_poly_x_p8)))
-uint16x8_t vmullbq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_s16)))
-int32x4_t vmulltq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_s16)))
-int32x4_t vmulltq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_s32)))
-int64x2_t vmulltq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_s32)))
-int64x2_t vmulltq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_s8)))
-int16x8_t vmulltq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_s8)))
-int16x8_t vmulltq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_u16)))
-uint32x4_t vmulltq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_u16)))
-uint32x4_t vmulltq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_u32)))
-uint64x2_t vmulltq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_u32)))
-uint64x2_t vmulltq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_u8)))
-uint16x8_t vmulltq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_m_u8)))
-uint16x8_t vmulltq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_s16)))
-int32x4_t vmulltq_int_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_s16)))
-int32x4_t vmulltq_int(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_s32)))
-int64x2_t vmulltq_int_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_s32)))
-int64x2_t vmulltq_int(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_s8)))
-int16x8_t vmulltq_int_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_s8)))
-int16x8_t vmulltq_int(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_u16)))
-uint32x4_t vmulltq_int_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_u16)))
-uint32x4_t vmulltq_int(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_u32)))
-uint64x2_t vmulltq_int_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_u32)))
-uint64x2_t vmulltq_int(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_u8)))
-uint16x8_t vmulltq_int_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_u8)))
-uint16x8_t vmulltq_int(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_s16)))
-int32x4_t vmulltq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_s16)))
-int32x4_t vmulltq_int_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_s32)))
-int64x2_t vmulltq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_s32)))
-int64x2_t vmulltq_int_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_s8)))
-int16x8_t vmulltq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_s8)))
-int16x8_t vmulltq_int_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_u16)))
-uint32x4_t vmulltq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_u16)))
-uint32x4_t vmulltq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_u32)))
-uint64x2_t vmulltq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_u32)))
-uint64x2_t vmulltq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_u8)))
-uint16x8_t vmulltq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_int_x_u8)))
-uint16x8_t vmulltq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_m_p16)))
-uint32x4_t vmulltq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_m_p16)))
-uint32x4_t vmulltq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_m_p8)))
-uint16x8_t vmulltq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_m_p8)))
-uint16x8_t vmulltq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_p16)))
-uint32x4_t vmulltq_poly_p16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_p16)))
-uint32x4_t vmulltq_poly(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_p8)))
-uint16x8_t vmulltq_poly_p8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_p8)))
-uint16x8_t vmulltq_poly(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_x_p16)))
-uint32x4_t vmulltq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_x_p16)))
-uint32x4_t vmulltq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_x_p8)))
-uint16x8_t vmulltq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulltq_poly_x_p8)))
-uint16x8_t vmulltq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_s16)))
-int16x8_t vmulq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_s16)))
-int16x8_t vmulq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_s32)))
-int32x4_t vmulq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_s32)))
-int32x4_t vmulq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_s8)))
-int8x16_t vmulq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_s8)))
-int8x16_t vmulq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_u16)))
-uint16x8_t vmulq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_u16)))
-uint16x8_t vmulq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_u32)))
-uint32x4_t vmulq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_u32)))
-uint32x4_t vmulq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_u8)))
-uint8x16_t vmulq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_u8)))
-uint8x16_t vmulq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_s16)))
-int16x8_t vmulq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_s16)))
-int16x8_t vmulq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_s32)))
-int32x4_t vmulq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_s32)))
-int32x4_t vmulq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_s8)))
-int8x16_t vmulq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_s8)))
-int8x16_t vmulq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_u16)))
-uint16x8_t vmulq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_u16)))
-uint16x8_t vmulq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_u32)))
-uint32x4_t vmulq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_u32)))
-uint32x4_t vmulq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_u8)))
-uint8x16_t vmulq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_u8)))
-uint8x16_t vmulq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_s16)))
-int16x8_t vmulq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_s16)))
-int16x8_t vmulq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_s32)))
-int32x4_t vmulq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_s32)))
-int32x4_t vmulq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_s8)))
-int8x16_t vmulq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_s8)))
-int8x16_t vmulq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_u16)))
-uint16x8_t vmulq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_u16)))
-uint16x8_t vmulq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_u32)))
-uint32x4_t vmulq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_u32)))
-uint32x4_t vmulq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_u8)))
-uint8x16_t vmulq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_u8)))
-uint8x16_t vmulq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_s16)))
-int16x8_t vmvnq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_s16)))
-int16x8_t vmvnq_m(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_s32)))
-int32x4_t vmvnq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_s32)))
-int32x4_t vmvnq_m(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_u16)))
-uint16x8_t vmvnq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_u16)))
-uint16x8_t vmvnq_m(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_u32)))
-uint32x4_t vmvnq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmvnq_m_n_u32)))
-uint32x4_t vmvnq_m(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_n_s16)))
-int16x8_t vmvnq_n_s16(int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_n_s32)))
-int32x4_t vmvnq_n_s32(int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_n_u16)))
-uint16x8_t vmvnq_n_u16(uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_n_u32)))
-uint32x4_t vmvnq_n_u32(uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_x_n_s16)))
-int16x8_t vmvnq_x_n_s16(int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_x_n_s32)))
-int32x4_t vmvnq_x_n_s32(int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_x_n_u16)))
-uint16x8_t vmvnq_x_n_u16(uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmvnq_x_n_u32)))
-uint32x4_t vmvnq_x_n_u32(uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_s16)))
-int16x8_t vornq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_s16)))
-int16x8_t vornq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_s32)))
-int32x4_t vornq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_s32)))
-int32x4_t vornq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_s8)))
-int8x16_t vornq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_s8)))
-int8x16_t vornq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_u16)))
-uint16x8_t vornq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_u16)))
-uint16x8_t vornq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_u32)))
-uint32x4_t vornq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_u32)))
-uint32x4_t vornq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_u8)))
-uint8x16_t vornq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_u8)))
-uint8x16_t vornq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_s16)))
-int16x8_t vornq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_s16)))
-int16x8_t vornq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_s32)))
-int32x4_t vornq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_s32)))
-int32x4_t vornq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_s8)))
-int8x16_t vornq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_s8)))
-int8x16_t vornq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_u16)))
-uint16x8_t vornq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_u16)))
-uint16x8_t vornq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_u32)))
-uint32x4_t vornq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_u32)))
-uint32x4_t vornq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_u8)))
-uint8x16_t vornq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_u8)))
-uint8x16_t vornq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_s16)))
-int16x8_t vornq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_s16)))
-int16x8_t vornq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_s32)))
-int32x4_t vornq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_s32)))
-int32x4_t vornq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_s8)))
-int8x16_t vornq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_s8)))
-int8x16_t vornq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_u16)))
-uint16x8_t vornq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_u16)))
-uint16x8_t vornq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_u32)))
-uint32x4_t vornq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_u32)))
-uint32x4_t vornq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_u8)))
-uint8x16_t vornq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_u8)))
-uint8x16_t vornq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_s16)))
-int16x8_t vorrq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_s16)))
-int16x8_t vorrq_m_n(int16x8_t, int16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_s32)))
-int32x4_t vorrq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_s32)))
-int32x4_t vorrq_m_n(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_u16)))
-uint16x8_t vorrq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_u16)))
-uint16x8_t vorrq_m_n(uint16x8_t, uint16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_u32)))
-uint32x4_t vorrq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_n_u32)))
-uint32x4_t vorrq_m_n(uint32x4_t, uint32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_s16)))
-int16x8_t vorrq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_s16)))
-int16x8_t vorrq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_s32)))
-int32x4_t vorrq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_s32)))
-int32x4_t vorrq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_s8)))
-int8x16_t vorrq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_s8)))
-int8x16_t vorrq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_u16)))
-uint16x8_t vorrq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_u16)))
-uint16x8_t vorrq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_u32)))
-uint32x4_t vorrq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_u32)))
-uint32x4_t vorrq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_u8)))
-uint8x16_t vorrq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_u8)))
-uint8x16_t vorrq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_s16)))
-int16x8_t vorrq_n_s16(int16x8_t, int16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_s16)))
-int16x8_t vorrq(int16x8_t, int16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_s32)))
-int32x4_t vorrq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_s32)))
-int32x4_t vorrq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_u16)))
-uint16x8_t vorrq_n_u16(uint16x8_t, uint16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_u16)))
-uint16x8_t vorrq(uint16x8_t, uint16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_u32)))
-uint32x4_t vorrq_n_u32(uint32x4_t, uint32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_n_u32)))
-uint32x4_t vorrq(uint32x4_t, uint32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_s16)))
-int16x8_t vorrq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_s16)))
-int16x8_t vorrq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_s32)))
-int32x4_t vorrq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_s32)))
-int32x4_t vorrq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_s8)))
-int8x16_t vorrq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_s8)))
-int8x16_t vorrq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_u16)))
-uint16x8_t vorrq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_u16)))
-uint16x8_t vorrq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_u32)))
-uint32x4_t vorrq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_u32)))
-uint32x4_t vorrq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_u8)))
-uint8x16_t vorrq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_u8)))
-uint8x16_t vorrq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_s16)))
-int16x8_t vorrq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_s16)))
-int16x8_t vorrq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_s32)))
-int32x4_t vorrq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_s32)))
-int32x4_t vorrq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_s8)))
-int8x16_t vorrq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_s8)))
-int8x16_t vorrq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_u16)))
-uint16x8_t vorrq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_u16)))
-uint16x8_t vorrq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_u32)))
-uint32x4_t vorrq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_u32)))
-uint32x4_t vorrq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_u8)))
-uint8x16_t vorrq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_u8)))
-uint8x16_t vorrq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpnot)))
-mve_pred16_t vpnot(mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_s16)))
-int16x8_t vpselq_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_s16)))
-int16x8_t vpselq(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_s32)))
-int32x4_t vpselq_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_s32)))
-int32x4_t vpselq(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_s64)))
-int64x2_t vpselq_s64(int64x2_t, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_s64)))
-int64x2_t vpselq(int64x2_t, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_s8)))
-int8x16_t vpselq_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_s8)))
-int8x16_t vpselq(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_u16)))
-uint16x8_t vpselq_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_u16)))
-uint16x8_t vpselq(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_u32)))
-uint32x4_t vpselq_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_u32)))
-uint32x4_t vpselq(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_u64)))
-uint64x2_t vpselq_u64(uint64x2_t, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_u64)))
-uint64x2_t vpselq(uint64x2_t, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_u8)))
-uint8x16_t vpselq_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_u8)))
-uint8x16_t vpselq(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_s16)))
-int16x8_t vqaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_s16)))
-int16x8_t vqaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_s32)))
-int32x4_t vqaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_s32)))
-int32x4_t vqaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_s8)))
-int8x16_t vqaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_s8)))
-int8x16_t vqaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_u16)))
-uint16x8_t vqaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_u16)))
-uint16x8_t vqaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_u32)))
-uint32x4_t vqaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_u32)))
-uint32x4_t vqaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_u8)))
-uint8x16_t vqaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_m_u8)))
-uint8x16_t vqaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_s16)))
-int16x8_t vqaddq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_s16)))
-int16x8_t vqaddq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_s32)))
-int32x4_t vqaddq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_s32)))
-int32x4_t vqaddq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_s8)))
-int8x16_t vqaddq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_s8)))
-int8x16_t vqaddq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_u16)))
-uint16x8_t vqaddq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_u16)))
-uint16x8_t vqaddq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_u32)))
-uint32x4_t vqaddq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_u32)))
-uint32x4_t vqaddq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqaddq_u8)))
-uint8x16_t vqaddq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqaddq_u8)))
-uint8x16_t vqaddq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_m_s16)))
-int16x8_t vqdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_m_s16)))
-int16x8_t vqdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_m_s32)))
-int32x4_t vqdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_m_s32)))
-int32x4_t vqdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_m_s8)))
-int8x16_t vqdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_m_s8)))
-int8x16_t vqdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_s16)))
-int16x8_t vqdmulhq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_s16)))
-int16x8_t vqdmulhq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_s32)))
-int32x4_t vqdmulhq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_s32)))
-int32x4_t vqdmulhq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_s8)))
-int8x16_t vqdmulhq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqdmulhq_s8)))
-int8x16_t vqdmulhq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_m_s16)))
-int16x8_t vqrdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_m_s16)))
-int16x8_t vqrdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_m_s32)))
-int32x4_t vqrdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_m_s32)))
-int32x4_t vqrdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_m_s8)))
-int8x16_t vqrdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_m_s8)))
-int8x16_t vqrdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_s16)))
-int16x8_t vqrdmulhq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_s16)))
-int16x8_t vqrdmulhq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_s32)))
-int32x4_t vqrdmulhq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_s32)))
-int32x4_t vqrdmulhq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_s8)))
-int8x16_t vqrdmulhq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrdmulhq_s8)))
-int8x16_t vqrdmulhq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_s16)))
-int16x8_t vqrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_s16)))
-int16x8_t vqrshlq_m_n(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_s32)))
-int32x4_t vqrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_s32)))
-int32x4_t vqrshlq_m_n(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_s8)))
-int8x16_t vqrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_s8)))
-int8x16_t vqrshlq_m_n(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_u16)))
-uint16x8_t vqrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_u16)))
-uint16x8_t vqrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_u32)))
-uint32x4_t vqrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_u32)))
-uint32x4_t vqrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_u8)))
-uint8x16_t vqrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_n_u8)))
-uint8x16_t vqrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_s16)))
-int16x8_t vqrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_s16)))
-int16x8_t vqrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_s32)))
-int32x4_t vqrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_s32)))
-int32x4_t vqrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_s8)))
-int8x16_t vqrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_s8)))
-int8x16_t vqrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_u16)))
-uint16x8_t vqrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_u16)))
-uint16x8_t vqrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_u32)))
-uint32x4_t vqrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_u32)))
-uint32x4_t vqrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_u8)))
-uint8x16_t vqrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_m_u8)))
-uint8x16_t vqrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_s16)))
-int16x8_t vqrshlq_n_s16(int16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_s16)))
-int16x8_t vqrshlq(int16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_s32)))
-int32x4_t vqrshlq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_s32)))
-int32x4_t vqrshlq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_s8)))
-int8x16_t vqrshlq_n_s8(int8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_s8)))
-int8x16_t vqrshlq(int8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_u16)))
-uint16x8_t vqrshlq_n_u16(uint16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_u16)))
-uint16x8_t vqrshlq(uint16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_u32)))
-uint32x4_t vqrshlq_n_u32(uint32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_u32)))
-uint32x4_t vqrshlq(uint32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_u8)))
-uint8x16_t vqrshlq_n_u8(uint8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_n_u8)))
-uint8x16_t vqrshlq(uint8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_s16)))
-int16x8_t vqrshlq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_s16)))
-int16x8_t vqrshlq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_s32)))
-int32x4_t vqrshlq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_s32)))
-int32x4_t vqrshlq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_s8)))
-int8x16_t vqrshlq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_s8)))
-int8x16_t vqrshlq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_u16)))
-uint16x8_t vqrshlq_u16(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_u16)))
-uint16x8_t vqrshlq(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_u32)))
-uint32x4_t vqrshlq_u32(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_u32)))
-uint32x4_t vqrshlq(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_u8)))
-uint8x16_t vqrshlq_u8(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshlq_u8)))
-uint8x16_t vqrshlq(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16)))
-int8x16_t vqrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16)))
-int8x16_t vqrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32)))
-int16x8_t vqrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32)))
-int16x8_t vqrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16)))
-uint8x16_t vqrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16)))
-uint8x16_t vqrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32)))
-uint16x8_t vqrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32)))
-uint16x8_t vqrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_s16)))
-int8x16_t vqrshrnbq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_s16)))
-int8x16_t vqrshrnbq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_s32)))
-int16x8_t vqrshrnbq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_s32)))
-int16x8_t vqrshrnbq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_u16)))
-uint8x16_t vqrshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_u16)))
-uint8x16_t vqrshrnbq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_u32)))
-uint16x8_t vqrshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrnbq_n_u32)))
-uint16x8_t vqrshrnbq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_s16)))
-int8x16_t vqrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_s16)))
-int8x16_t vqrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_s32)))
-int16x8_t vqrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_s32)))
-int16x8_t vqrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_u16)))
-uint8x16_t vqrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_u16)))
-uint8x16_t vqrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_u32)))
-uint16x8_t vqrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_m_n_u32)))
-uint16x8_t vqrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_s16)))
-int8x16_t vqrshrntq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_s16)))
-int8x16_t vqrshrntq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_s32)))
-int16x8_t vqrshrntq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_s32)))
-int16x8_t vqrshrntq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_u16)))
-uint8x16_t vqrshrntq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_u16)))
-uint8x16_t vqrshrntq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_u32)))
-uint16x8_t vqrshrntq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrntq_n_u32)))
-uint16x8_t vqrshrntq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16)))
-uint8x16_t vqrshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16)))
-uint8x16_t vqrshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32)))
-uint16x8_t vqrshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32)))
-uint16x8_t vqrshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_n_s16)))
-uint8x16_t vqrshrunbq_n_s16(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_n_s16)))
-uint8x16_t vqrshrunbq(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_n_s32)))
-uint16x8_t vqrshrunbq_n_s32(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshrunbq_n_s32)))
-uint16x8_t vqrshrunbq(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_m_n_s16)))
-uint8x16_t vqrshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_m_n_s16)))
-uint8x16_t vqrshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_m_n_s32)))
-uint16x8_t vqrshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_m_n_s32)))
-uint16x8_t vqrshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_n_s16)))
-uint8x16_t vqrshruntq_n_s16(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_n_s16)))
-uint8x16_t vqrshruntq(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_n_s32)))
-uint16x8_t vqrshruntq_n_s32(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqrshruntq_n_s32)))
-uint16x8_t vqrshruntq(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_s16)))
-int16x8_t vqshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_s16)))
-int16x8_t vqshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_s32)))
-int32x4_t vqshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_s32)))
-int32x4_t vqshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_s8)))
-int8x16_t vqshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_s8)))
-int8x16_t vqshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_u16)))
-uint16x8_t vqshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_u16)))
-uint16x8_t vqshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_u32)))
-uint32x4_t vqshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_u32)))
-uint32x4_t vqshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_u8)))
-uint8x16_t vqshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_n_u8)))
-uint8x16_t vqshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_s16)))
-int16x8_t vqshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_s16)))
-int16x8_t vqshlq_m_r(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_s32)))
-int32x4_t vqshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_s32)))
-int32x4_t vqshlq_m_r(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_s8)))
-int8x16_t vqshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_s8)))
-int8x16_t vqshlq_m_r(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_u16)))
-uint16x8_t vqshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_u16)))
-uint16x8_t vqshlq_m_r(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_u32)))
-uint32x4_t vqshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_u32)))
-uint32x4_t vqshlq_m_r(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_u8)))
-uint8x16_t vqshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_r_u8)))
-uint8x16_t vqshlq_m_r(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_s16)))
-int16x8_t vqshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_s16)))
-int16x8_t vqshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_s32)))
-int32x4_t vqshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_s32)))
-int32x4_t vqshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_s8)))
-int8x16_t vqshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_s8)))
-int8x16_t vqshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_u16)))
-uint16x8_t vqshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_u16)))
-uint16x8_t vqshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_u32)))
-uint32x4_t vqshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_u32)))
-uint32x4_t vqshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_u8)))
-uint8x16_t vqshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_m_u8)))
-uint8x16_t vqshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_s16)))
-int16x8_t vqshlq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_s16)))
-int16x8_t vqshlq_n(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_s32)))
-int32x4_t vqshlq_n_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_s32)))
-int32x4_t vqshlq_n(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_s8)))
-int8x16_t vqshlq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_s8)))
-int8x16_t vqshlq_n(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_u16)))
-uint16x8_t vqshlq_n_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_u16)))
-uint16x8_t vqshlq_n(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_u32)))
-uint32x4_t vqshlq_n_u32(uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_u32)))
-uint32x4_t vqshlq_n(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_u8)))
-uint8x16_t vqshlq_n_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_n_u8)))
-uint8x16_t vqshlq_n(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_s16)))
-int16x8_t vqshlq_r_s16(int16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_s16)))
-int16x8_t vqshlq_r(int16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_s32)))
-int32x4_t vqshlq_r_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_s32)))
-int32x4_t vqshlq_r(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_s8)))
-int8x16_t vqshlq_r_s8(int8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_s8)))
-int8x16_t vqshlq_r(int8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_u16)))
-uint16x8_t vqshlq_r_u16(uint16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_u16)))
-uint16x8_t vqshlq_r(uint16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_u32)))
-uint32x4_t vqshlq_r_u32(uint32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_u32)))
-uint32x4_t vqshlq_r(uint32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_u8)))
-uint8x16_t vqshlq_r_u8(uint8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_r_u8)))
-uint8x16_t vqshlq_r(uint8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_s16)))
-int16x8_t vqshlq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_s16)))
-int16x8_t vqshlq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_s32)))
-int32x4_t vqshlq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_s32)))
-int32x4_t vqshlq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_s8)))
-int8x16_t vqshlq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_s8)))
-int8x16_t vqshlq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_u16)))
-uint16x8_t vqshlq_u16(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_u16)))
-uint16x8_t vqshlq(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_u32)))
-uint32x4_t vqshlq_u32(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_u32)))
-uint32x4_t vqshlq(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshlq_u8)))
-uint8x16_t vqshlq_u8(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshlq_u8)))
-uint8x16_t vqshlq(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshluq_m_n_s16)))
-uint16x8_t vqshluq_m_n_s16(uint16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshluq_m_n_s16)))
-uint16x8_t vqshluq_m(uint16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshluq_m_n_s32)))
-uint32x4_t vqshluq_m_n_s32(uint32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshluq_m_n_s32)))
-uint32x4_t vqshluq_m(uint32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshluq_m_n_s8)))
-uint8x16_t vqshluq_m_n_s8(uint8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshluq_m_n_s8)))
-uint8x16_t vqshluq_m(uint8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshluq_n_s16)))
-uint16x8_t vqshluq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshluq_n_s16)))
-uint16x8_t vqshluq(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshluq_n_s32)))
-uint32x4_t vqshluq_n_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshluq_n_s32)))
-uint32x4_t vqshluq(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshluq_n_s8)))
-uint8x16_t vqshluq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshluq_n_s8)))
-uint8x16_t vqshluq(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_s16)))
-int8x16_t vqshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_s16)))
-int8x16_t vqshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_s32)))
-int16x8_t vqshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_s32)))
-int16x8_t vqshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_u16)))
-uint8x16_t vqshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_u16)))
-uint8x16_t vqshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_u32)))
-uint16x8_t vqshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_m_n_u32)))
-uint16x8_t vqshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_s16)))
-int8x16_t vqshrnbq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_s16)))
-int8x16_t vqshrnbq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_s32)))
-int16x8_t vqshrnbq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_s32)))
-int16x8_t vqshrnbq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_u16)))
-uint8x16_t vqshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_u16)))
-uint8x16_t vqshrnbq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_u32)))
-uint16x8_t vqshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrnbq_n_u32)))
-uint16x8_t vqshrnbq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_s16)))
-int8x16_t vqshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_s16)))
-int8x16_t vqshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_s32)))
-int16x8_t vqshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_s32)))
-int16x8_t vqshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_u16)))
-uint8x16_t vqshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_u16)))
-uint8x16_t vqshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_u32)))
-uint16x8_t vqshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_m_n_u32)))
-uint16x8_t vqshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_s16)))
-int8x16_t vqshrntq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_s16)))
-int8x16_t vqshrntq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_s32)))
-int16x8_t vqshrntq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_s32)))
-int16x8_t vqshrntq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_u16)))
-uint8x16_t vqshrntq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_u16)))
-uint8x16_t vqshrntq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_u32)))
-uint16x8_t vqshrntq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrntq_n_u32)))
-uint16x8_t vqshrntq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_m_n_s16)))
-uint8x16_t vqshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_m_n_s16)))
-uint8x16_t vqshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_m_n_s32)))
-uint16x8_t vqshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_m_n_s32)))
-uint16x8_t vqshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_n_s16)))
-uint8x16_t vqshrunbq_n_s16(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_n_s16)))
-uint8x16_t vqshrunbq(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_n_s32)))
-uint16x8_t vqshrunbq_n_s32(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshrunbq_n_s32)))
-uint16x8_t vqshrunbq(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_m_n_s16)))
-uint8x16_t vqshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_m_n_s16)))
-uint8x16_t vqshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_m_n_s32)))
-uint16x8_t vqshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_m_n_s32)))
-uint16x8_t vqshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_n_s16)))
-uint8x16_t vqshruntq_n_s16(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_n_s16)))
-uint8x16_t vqshruntq(uint8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_n_s32)))
-uint16x8_t vqshruntq_n_s32(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqshruntq_n_s32)))
-uint16x8_t vqshruntq(uint16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_s16)))
-int16x8_t vqsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_s16)))
-int16x8_t vqsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_s32)))
-int32x4_t vqsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_s32)))
-int32x4_t vqsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_s8)))
-int8x16_t vqsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_s8)))
-int8x16_t vqsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_u16)))
-uint16x8_t vqsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_u16)))
-uint16x8_t vqsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_u32)))
-uint32x4_t vqsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_u32)))
-uint32x4_t vqsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_u8)))
-uint8x16_t vqsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_m_u8)))
-uint8x16_t vqsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_s16)))
-int16x8_t vqsubq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_s16)))
-int16x8_t vqsubq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_s32)))
-int32x4_t vqsubq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_s32)))
-int32x4_t vqsubq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_s8)))
-int8x16_t vqsubq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_s8)))
-int8x16_t vqsubq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_u16)))
-uint16x8_t vqsubq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_u16)))
-uint16x8_t vqsubq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_u32)))
-uint32x4_t vqsubq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_u32)))
-uint32x4_t vqsubq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vqsubq_u8)))
-uint8x16_t vqsubq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vqsubq_u8)))
-uint8x16_t vqsubq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))
-int16x8_t vreinterpretq_s16_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))
-int16x8_t vreinterpretq_s16(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))
-int16x8_t vreinterpretq_s16_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))
-int16x8_t vreinterpretq_s16(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))
-int16x8_t vreinterpretq_s16_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))
-int16x8_t vreinterpretq_s16(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))
-int16x8_t vreinterpretq_s16_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))
-int16x8_t vreinterpretq_s16(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))
-int16x8_t vreinterpretq_s16_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))
-int16x8_t vreinterpretq_s16(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))
-int16x8_t vreinterpretq_s16_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))
-int16x8_t vreinterpretq_s16(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
-int16x8_t vreinterpretq_s16_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
-int16x8_t vreinterpretq_s16(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))
-int32x4_t vreinterpretq_s32_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))
-int32x4_t vreinterpretq_s32(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))
-int32x4_t vreinterpretq_s32_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))
-int32x4_t vreinterpretq_s32(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))
-int32x4_t vreinterpretq_s32_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))
-int32x4_t vreinterpretq_s32(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))
-int32x4_t vreinterpretq_s32_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))
-int32x4_t vreinterpretq_s32(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))
-int32x4_t vreinterpretq_s32_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))
-int32x4_t vreinterpretq_s32(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))
-int32x4_t vreinterpretq_s32_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))
-int32x4_t vreinterpretq_s32(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
-int32x4_t vreinterpretq_s32_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
-int32x4_t vreinterpretq_s32(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))
-int64x2_t vreinterpretq_s64_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))
-int64x2_t vreinterpretq_s64(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))
-int64x2_t vreinterpretq_s64_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))
-int64x2_t vreinterpretq_s64(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))
-int64x2_t vreinterpretq_s64_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))
-int64x2_t vreinterpretq_s64(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))
-int64x2_t vreinterpretq_s64_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))
-int64x2_t vreinterpretq_s64(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))
-int64x2_t vreinterpretq_s64_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))
-int64x2_t vreinterpretq_s64(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))
-int64x2_t vreinterpretq_s64_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))
-int64x2_t vreinterpretq_s64(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
-int64x2_t vreinterpretq_s64_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
-int64x2_t vreinterpretq_s64(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))
-int8x16_t vreinterpretq_s8_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))
-int8x16_t vreinterpretq_s8(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))
-int8x16_t vreinterpretq_s8_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))
-int8x16_t vreinterpretq_s8(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))
-int8x16_t vreinterpretq_s8_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))
-int8x16_t vreinterpretq_s8(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))
-int8x16_t vreinterpretq_s8_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))
-int8x16_t vreinterpretq_s8(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))
-int8x16_t vreinterpretq_s8_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))
-int8x16_t vreinterpretq_s8(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))
-int8x16_t vreinterpretq_s8_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))
-int8x16_t vreinterpretq_s8(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
-int8x16_t vreinterpretq_s8_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
-int8x16_t vreinterpretq_s8(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))
-uint16x8_t vreinterpretq_u16_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))
-uint16x8_t vreinterpretq_u16(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))
-uint16x8_t vreinterpretq_u16_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))
-uint16x8_t vreinterpretq_u16(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))
-uint16x8_t vreinterpretq_u16_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))
-uint16x8_t vreinterpretq_u16(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))
-uint16x8_t vreinterpretq_u16_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))
-uint16x8_t vreinterpretq_u16(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))
-uint16x8_t vreinterpretq_u16_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))
-uint16x8_t vreinterpretq_u16(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))
-uint16x8_t vreinterpretq_u16_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))
-uint16x8_t vreinterpretq_u16(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
-uint16x8_t vreinterpretq_u16_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
-uint16x8_t vreinterpretq_u16(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))
-uint32x4_t vreinterpretq_u32_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))
-uint32x4_t vreinterpretq_u32(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))
-uint32x4_t vreinterpretq_u32_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))
-uint32x4_t vreinterpretq_u32(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))
-uint32x4_t vreinterpretq_u32_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))
-uint32x4_t vreinterpretq_u32(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))
-uint32x4_t vreinterpretq_u32_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))
-uint32x4_t vreinterpretq_u32(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))
-uint32x4_t vreinterpretq_u32_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))
-uint32x4_t vreinterpretq_u32(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))
-uint32x4_t vreinterpretq_u32_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))
-uint32x4_t vreinterpretq_u32(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
-uint32x4_t vreinterpretq_u32_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
-uint32x4_t vreinterpretq_u32(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))
-uint64x2_t vreinterpretq_u64_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))
-uint64x2_t vreinterpretq_u64(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))
-uint64x2_t vreinterpretq_u64_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))
-uint64x2_t vreinterpretq_u64(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))
-uint64x2_t vreinterpretq_u64_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))
-uint64x2_t vreinterpretq_u64(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))
-uint64x2_t vreinterpretq_u64_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))
-uint64x2_t vreinterpretq_u64(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))
-uint64x2_t vreinterpretq_u64_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))
-uint64x2_t vreinterpretq_u64(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))
-uint64x2_t vreinterpretq_u64_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))
-uint64x2_t vreinterpretq_u64(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
-uint64x2_t vreinterpretq_u64_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
-uint64x2_t vreinterpretq_u64(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
-uint8x16_t vreinterpretq_u8_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
-uint8x16_t vreinterpretq_u8(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
-uint8x16_t vreinterpretq_u8_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
-uint8x16_t vreinterpretq_u8(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
-uint8x16_t vreinterpretq_u8_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
-uint8x16_t vreinterpretq_u8(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
-uint8x16_t vreinterpretq_u8_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
-uint8x16_t vreinterpretq_u8(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
-uint8x16_t vreinterpretq_u8_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
-uint8x16_t vreinterpretq_u8(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
-uint8x16_t vreinterpretq_u8_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
-uint8x16_t vreinterpretq_u8(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
-uint8x16_t vreinterpretq_u8_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
-uint8x16_t vreinterpretq_u8(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_s16)))
-int16x8_t vrhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_s16)))
-int16x8_t vrhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_s32)))
-int32x4_t vrhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_s32)))
-int32x4_t vrhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_s8)))
-int8x16_t vrhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_s8)))
-int8x16_t vrhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_u16)))
-uint16x8_t vrhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_u16)))
-uint16x8_t vrhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_u32)))
-uint32x4_t vrhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_u32)))
-uint32x4_t vrhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_u8)))
-uint8x16_t vrhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_m_u8)))
-uint8x16_t vrhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_s16)))
-int16x8_t vrhaddq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_s16)))
-int16x8_t vrhaddq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_s32)))
-int32x4_t vrhaddq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_s32)))
-int32x4_t vrhaddq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_s8)))
-int8x16_t vrhaddq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_s8)))
-int8x16_t vrhaddq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_u16)))
-uint16x8_t vrhaddq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_u16)))
-uint16x8_t vrhaddq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_u32)))
-uint32x4_t vrhaddq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_u32)))
-uint32x4_t vrhaddq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_u8)))
-uint8x16_t vrhaddq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_u8)))
-uint8x16_t vrhaddq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_s16)))
-int16x8_t vrhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_s16)))
-int16x8_t vrhaddq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_s32)))
-int32x4_t vrhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_s32)))
-int32x4_t vrhaddq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_s8)))
-int8x16_t vrhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_s8)))
-int8x16_t vrhaddq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_u16)))
-uint16x8_t vrhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_u16)))
-uint16x8_t vrhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_u32)))
-uint32x4_t vrhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_u32)))
-uint32x4_t vrhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_u8)))
-uint8x16_t vrhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrhaddq_x_u8)))
-uint8x16_t vrhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32)))
-int64_t vrmlaldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32)))
-int64_t vrmlaldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32)))
-uint64_t vrmlaldavhaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32)))
-uint64_t vrmlaldavhaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_s32)))
-int64_t vrmlaldavhaq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_s32)))
-int64_t vrmlaldavhaq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_u32)))
-uint64_t vrmlaldavhaq_u32(uint64_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaq_u32)))
-uint64_t vrmlaldavhaq(uint64_t, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32)))
-int64_t vrmlaldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32)))
-int64_t vrmlaldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaxq_s32)))
-int64_t vrmlaldavhaxq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhaxq_s32)))
-int64_t vrmlaldavhaxq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_p_s32)))
-int64_t vrmlaldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_p_s32)))
-int64_t vrmlaldavhq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_p_u32)))
-uint64_t vrmlaldavhq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_p_u32)))
-uint64_t vrmlaldavhq_p(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_s32)))
-int64_t vrmlaldavhq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_s32)))
-int64_t vrmlaldavhq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_u32)))
-uint64_t vrmlaldavhq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhq_u32)))
-uint64_t vrmlaldavhq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32)))
-int64_t vrmlaldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32)))
-int64_t vrmlaldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhxq_s32)))
-int64_t vrmlaldavhxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlaldavhxq_s32)))
-int64_t vrmlaldavhxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32)))
-int64_t vrmlsldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32)))
-int64_t vrmlsldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaq_s32)))
-int64_t vrmlsldavhaq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaq_s32)))
-int64_t vrmlsldavhaq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32)))
-int64_t vrmlsldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32)))
-int64_t vrmlsldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaxq_s32)))
-int64_t vrmlsldavhaxq_s32(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhaxq_s32)))
-int64_t vrmlsldavhaxq(int64_t, int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhq_p_s32)))
-int64_t vrmlsldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhq_p_s32)))
-int64_t vrmlsldavhq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhq_s32)))
-int64_t vrmlsldavhq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhq_s32)))
-int64_t vrmlsldavhq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32)))
-int64_t vrmlsldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32)))
-int64_t vrmlsldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhxq_s32)))
-int64_t vrmlsldavhxq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmlsldavhxq_s32)))
-int64_t vrmlsldavhxq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_s16)))
-int16x8_t vrmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_s16)))
-int16x8_t vrmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_s32)))
-int32x4_t vrmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_s32)))
-int32x4_t vrmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_s8)))
-int8x16_t vrmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_s8)))
-int8x16_t vrmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_u16)))
-uint16x8_t vrmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_u16)))
-uint16x8_t vrmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_u32)))
-uint32x4_t vrmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_u32)))
-uint32x4_t vrmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_u8)))
-uint8x16_t vrmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_m_u8)))
-uint8x16_t vrmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_s16)))
-int16x8_t vrmulhq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_s16)))
-int16x8_t vrmulhq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_s32)))
-int32x4_t vrmulhq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_s32)))
-int32x4_t vrmulhq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_s8)))
-int8x16_t vrmulhq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_s8)))
-int8x16_t vrmulhq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_u16)))
-uint16x8_t vrmulhq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_u16)))
-uint16x8_t vrmulhq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_u32)))
-uint32x4_t vrmulhq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_u32)))
-uint32x4_t vrmulhq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_u8)))
-uint8x16_t vrmulhq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_u8)))
-uint8x16_t vrmulhq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_s16)))
-int16x8_t vrmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_s16)))
-int16x8_t vrmulhq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_s32)))
-int32x4_t vrmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_s32)))
-int32x4_t vrmulhq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_s8)))
-int8x16_t vrmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_s8)))
-int8x16_t vrmulhq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_u16)))
-uint16x8_t vrmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_u16)))
-uint16x8_t vrmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_u32)))
-uint32x4_t vrmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_u32)))
-uint32x4_t vrmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_u8)))
-uint8x16_t vrmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrmulhq_x_u8)))
-uint8x16_t vrmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_s16)))
-int16x8_t vrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_s16)))
-int16x8_t vrshlq_m_n(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_s32)))
-int32x4_t vrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_s32)))
-int32x4_t vrshlq_m_n(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_s8)))
-int8x16_t vrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_s8)))
-int8x16_t vrshlq_m_n(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_u16)))
-uint16x8_t vrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_u16)))
-uint16x8_t vrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_u32)))
-uint32x4_t vrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_u32)))
-uint32x4_t vrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_u8)))
-uint8x16_t vrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_n_u8)))
-uint8x16_t vrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_s16)))
-int16x8_t vrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_s16)))
-int16x8_t vrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_s32)))
-int32x4_t vrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_s32)))
-int32x4_t vrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_s8)))
-int8x16_t vrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_s8)))
-int8x16_t vrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_u16)))
-uint16x8_t vrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_u16)))
-uint16x8_t vrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_u32)))
-uint32x4_t vrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_u32)))
-uint32x4_t vrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_u8)))
-uint8x16_t vrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_m_u8)))
-uint8x16_t vrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_s16)))
-int16x8_t vrshlq_n_s16(int16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_s16)))
-int16x8_t vrshlq(int16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_s32)))
-int32x4_t vrshlq_n_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_s32)))
-int32x4_t vrshlq(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_s8)))
-int8x16_t vrshlq_n_s8(int8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_s8)))
-int8x16_t vrshlq(int8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_u16)))
-uint16x8_t vrshlq_n_u16(uint16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_u16)))
-uint16x8_t vrshlq(uint16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_u32)))
-uint32x4_t vrshlq_n_u32(uint32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_u32)))
-uint32x4_t vrshlq(uint32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_u8)))
-uint8x16_t vrshlq_n_u8(uint8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_n_u8)))
-uint8x16_t vrshlq(uint8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_s16)))
-int16x8_t vrshlq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_s16)))
-int16x8_t vrshlq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_s32)))
-int32x4_t vrshlq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_s32)))
-int32x4_t vrshlq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_s8)))
-int8x16_t vrshlq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_s8)))
-int8x16_t vrshlq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_u16)))
-uint16x8_t vrshlq_u16(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_u16)))
-uint16x8_t vrshlq(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_u32)))
-uint32x4_t vrshlq_u32(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_u32)))
-uint32x4_t vrshlq(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_u8)))
-uint8x16_t vrshlq_u8(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_u8)))
-uint8x16_t vrshlq(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_s16)))
-int16x8_t vrshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_s16)))
-int16x8_t vrshlq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_s32)))
-int32x4_t vrshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_s32)))
-int32x4_t vrshlq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_s8)))
-int8x16_t vrshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_s8)))
-int8x16_t vrshlq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_u16)))
-uint16x8_t vrshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_u16)))
-uint16x8_t vrshlq_x(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_u32)))
-uint32x4_t vrshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_u32)))
-uint32x4_t vrshlq_x(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_u8)))
-uint8x16_t vrshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshlq_x_u8)))
-uint8x16_t vrshlq_x(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_s16)))
-int8x16_t vrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_s16)))
-int8x16_t vrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_s32)))
-int16x8_t vrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_s32)))
-int16x8_t vrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_u16)))
-uint8x16_t vrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_u16)))
-uint8x16_t vrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_u32)))
-uint16x8_t vrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_m_n_u32)))
-uint16x8_t vrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_s16)))
-int8x16_t vrshrnbq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_s16)))
-int8x16_t vrshrnbq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_s32)))
-int16x8_t vrshrnbq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_s32)))
-int16x8_t vrshrnbq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_u16)))
-uint8x16_t vrshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_u16)))
-uint8x16_t vrshrnbq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_u32)))
-uint16x8_t vrshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrnbq_n_u32)))
-uint16x8_t vrshrnbq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_s16)))
-int8x16_t vrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_s16)))
-int8x16_t vrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_s32)))
-int16x8_t vrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_s32)))
-int16x8_t vrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_u16)))
-uint8x16_t vrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_u16)))
-uint8x16_t vrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_u32)))
-uint16x8_t vrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_m_n_u32)))
-uint16x8_t vrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_s16)))
-int8x16_t vrshrntq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_s16)))
-int8x16_t vrshrntq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_s32)))
-int16x8_t vrshrntq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_s32)))
-int16x8_t vrshrntq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_u16)))
-uint8x16_t vrshrntq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_u16)))
-uint8x16_t vrshrntq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_u32)))
-uint16x8_t vrshrntq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrntq_n_u32)))
-uint16x8_t vrshrntq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_s16)))
-int16x8_t vrshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_s16)))
-int16x8_t vrshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_s32)))
-int32x4_t vrshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_s32)))
-int32x4_t vrshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_s8)))
-int8x16_t vrshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_s8)))
-int8x16_t vrshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_u16)))
-uint16x8_t vrshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_u16)))
-uint16x8_t vrshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_u32)))
-uint32x4_t vrshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_u32)))
-uint32x4_t vrshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_u8)))
-uint8x16_t vrshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_m_n_u8)))
-uint8x16_t vrshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_s16)))
-int16x8_t vrshrq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_s16)))
-int16x8_t vrshrq(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_s32)))
-int32x4_t vrshrq_n_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_s32)))
-int32x4_t vrshrq(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_s8)))
-int8x16_t vrshrq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_s8)))
-int8x16_t vrshrq(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_u16)))
-uint16x8_t vrshrq_n_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_u16)))
-uint16x8_t vrshrq(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_u32)))
-uint32x4_t vrshrq_n_u32(uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_u32)))
-uint32x4_t vrshrq(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_u8)))
-uint8x16_t vrshrq_n_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_n_u8)))
-uint8x16_t vrshrq(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_s16)))
-int16x8_t vrshrq_x_n_s16(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_s16)))
-int16x8_t vrshrq_x(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_s32)))
-int32x4_t vrshrq_x_n_s32(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_s32)))
-int32x4_t vrshrq_x(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_s8)))
-int8x16_t vrshrq_x_n_s8(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_s8)))
-int8x16_t vrshrq_x(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_u16)))
-uint16x8_t vrshrq_x_n_u16(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_u16)))
-uint16x8_t vrshrq_x(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_u32)))
-uint32x4_t vrshrq_x_n_u32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_u32)))
-uint32x4_t vrshrq_x(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_u8)))
-uint8x16_t vrshrq_x_n_u8(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vrshrq_x_n_u8)))
-uint8x16_t vrshrq_x(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s16)))
-int16x8_t vsetq_lane_s16(int16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s16)))
-int16x8_t vsetq_lane(int16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s32)))
-int32x4_t vsetq_lane_s32(int32_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s32)))
-int32x4_t vsetq_lane(int32_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s64)))
-int64x2_t vsetq_lane_s64(int64_t, int64x2_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s64)))
-int64x2_t vsetq_lane(int64_t, int64x2_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s8)))
-int8x16_t vsetq_lane_s8(int8_t, int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s8)))
-int8x16_t vsetq_lane(int8_t, int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u16)))
-uint16x8_t vsetq_lane_u16(uint16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u16)))
-uint16x8_t vsetq_lane(uint16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u32)))
-uint32x4_t vsetq_lane_u32(uint32_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u32)))
-uint32x4_t vsetq_lane(uint32_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u64)))
-uint64x2_t vsetq_lane_u64(uint64_t, uint64x2_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u64)))
-uint64x2_t vsetq_lane(uint64_t, uint64x2_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u8)))
-uint8x16_t vsetq_lane_u8(uint8_t, uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u8)))
-uint8x16_t vsetq_lane(uint8_t, uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_s16)))
-int32x4_t vshllbq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_s16)))
-int32x4_t vshllbq_m(int32x4_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_s8)))
-int16x8_t vshllbq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_s8)))
-int16x8_t vshllbq_m(int16x8_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_u16)))
-uint32x4_t vshllbq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_u16)))
-uint32x4_t vshllbq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_u8)))
-uint16x8_t vshllbq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_m_n_u8)))
-uint16x8_t vshllbq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_s16)))
-int32x4_t vshllbq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_s16)))
-int32x4_t vshllbq(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_s8)))
-int16x8_t vshllbq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_s8)))
-int16x8_t vshllbq(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_u16)))
-uint32x4_t vshllbq_n_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_u16)))
-uint32x4_t vshllbq(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_u8)))
-uint16x8_t vshllbq_n_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_n_u8)))
-uint16x8_t vshllbq(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_s16)))
-int32x4_t vshllbq_x_n_s16(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_s16)))
-int32x4_t vshllbq_x(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_s8)))
-int16x8_t vshllbq_x_n_s8(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_s8)))
-int16x8_t vshllbq_x(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_u16)))
-uint32x4_t vshllbq_x_n_u16(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_u16)))
-uint32x4_t vshllbq_x(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_u8)))
-uint16x8_t vshllbq_x_n_u8(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshllbq_x_n_u8)))
-uint16x8_t vshllbq_x(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_s16)))
-int32x4_t vshlltq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_s16)))
-int32x4_t vshlltq_m(int32x4_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_s8)))
-int16x8_t vshlltq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_s8)))
-int16x8_t vshlltq_m(int16x8_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_u16)))
-uint32x4_t vshlltq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_u16)))
-uint32x4_t vshlltq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_u8)))
-uint16x8_t vshlltq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_m_n_u8)))
-uint16x8_t vshlltq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_s16)))
-int32x4_t vshlltq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_s16)))
-int32x4_t vshlltq(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_s8)))
-int16x8_t vshlltq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_s8)))
-int16x8_t vshlltq(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_u16)))
-uint32x4_t vshlltq_n_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_u16)))
-uint32x4_t vshlltq(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_u8)))
-uint16x8_t vshlltq_n_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_n_u8)))
-uint16x8_t vshlltq(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_s16)))
-int32x4_t vshlltq_x_n_s16(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_s16)))
-int32x4_t vshlltq_x(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_s8)))
-int16x8_t vshlltq_x_n_s8(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_s8)))
-int16x8_t vshlltq_x(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_u16)))
-uint32x4_t vshlltq_x_n_u16(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_u16)))
-uint32x4_t vshlltq_x(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_u8)))
-uint16x8_t vshlltq_x_n_u8(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlltq_x_n_u8)))
-uint16x8_t vshlltq_x(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_s16)))
-int16x8_t vshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_s16)))
-int16x8_t vshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_s32)))
-int32x4_t vshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_s32)))
-int32x4_t vshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_s8)))
-int8x16_t vshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_s8)))
-int8x16_t vshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_u16)))
-uint16x8_t vshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_u16)))
-uint16x8_t vshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_u32)))
-uint32x4_t vshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_u32)))
-uint32x4_t vshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_u8)))
-uint8x16_t vshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_n_u8)))
-uint8x16_t vshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_s16)))
-int16x8_t vshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_s16)))
-int16x8_t vshlq_m_r(int16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_s32)))
-int32x4_t vshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_s32)))
-int32x4_t vshlq_m_r(int32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_s8)))
-int8x16_t vshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_s8)))
-int8x16_t vshlq_m_r(int8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_u16)))
-uint16x8_t vshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_u16)))
-uint16x8_t vshlq_m_r(uint16x8_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_u32)))
-uint32x4_t vshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_u32)))
-uint32x4_t vshlq_m_r(uint32x4_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_u8)))
-uint8x16_t vshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_r_u8)))
-uint8x16_t vshlq_m_r(uint8x16_t, int32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_s16)))
-int16x8_t vshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_s16)))
-int16x8_t vshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_s32)))
-int32x4_t vshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_s32)))
-int32x4_t vshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_s8)))
-int8x16_t vshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_s8)))
-int8x16_t vshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_u16)))
-uint16x8_t vshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_u16)))
-uint16x8_t vshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_u32)))
-uint32x4_t vshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_u32)))
-uint32x4_t vshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_u8)))
-uint8x16_t vshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_m_u8)))
-uint8x16_t vshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_s16)))
-int16x8_t vshlq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_s16)))
-int16x8_t vshlq_n(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_s32)))
-int32x4_t vshlq_n_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_s32)))
-int32x4_t vshlq_n(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_s8)))
-int8x16_t vshlq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_s8)))
-int8x16_t vshlq_n(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_u16)))
-uint16x8_t vshlq_n_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_u16)))
-uint16x8_t vshlq_n(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_u32)))
-uint32x4_t vshlq_n_u32(uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_u32)))
-uint32x4_t vshlq_n(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_u8)))
-uint8x16_t vshlq_n_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_n_u8)))
-uint8x16_t vshlq_n(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_s16)))
-int16x8_t vshlq_r_s16(int16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_s16)))
-int16x8_t vshlq_r(int16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_s32)))
-int32x4_t vshlq_r_s32(int32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_s32)))
-int32x4_t vshlq_r(int32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_s8)))
-int8x16_t vshlq_r_s8(int8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_s8)))
-int8x16_t vshlq_r(int8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_u16)))
-uint16x8_t vshlq_r_u16(uint16x8_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_u16)))
-uint16x8_t vshlq_r(uint16x8_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_u32)))
-uint32x4_t vshlq_r_u32(uint32x4_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_u32)))
-uint32x4_t vshlq_r(uint32x4_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_u8)))
-uint8x16_t vshlq_r_u8(uint8x16_t, int32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_r_u8)))
-uint8x16_t vshlq_r(uint8x16_t, int32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_s16)))
-int16x8_t vshlq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_s16)))
-int16x8_t vshlq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_s32)))
-int32x4_t vshlq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_s32)))
-int32x4_t vshlq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_s8)))
-int8x16_t vshlq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_s8)))
-int8x16_t vshlq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_u16)))
-uint16x8_t vshlq_u16(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_u16)))
-uint16x8_t vshlq(uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_u32)))
-uint32x4_t vshlq_u32(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_u32)))
-uint32x4_t vshlq(uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_u8)))
-uint8x16_t vshlq_u8(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_u8)))
-uint8x16_t vshlq(uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_s16)))
-int16x8_t vshlq_x_n_s16(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_s16)))
-int16x8_t vshlq_x_n(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_s32)))
-int32x4_t vshlq_x_n_s32(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_s32)))
-int32x4_t vshlq_x_n(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_s8)))
-int8x16_t vshlq_x_n_s8(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_s8)))
-int8x16_t vshlq_x_n(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_u16)))
-uint16x8_t vshlq_x_n_u16(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_u16)))
-uint16x8_t vshlq_x_n(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_u32)))
-uint32x4_t vshlq_x_n_u32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_u32)))
-uint32x4_t vshlq_x_n(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_u8)))
-uint8x16_t vshlq_x_n_u8(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_n_u8)))
-uint8x16_t vshlq_x_n(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_s16)))
-int16x8_t vshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_s16)))
-int16x8_t vshlq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_s32)))
-int32x4_t vshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_s32)))
-int32x4_t vshlq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_s8)))
-int8x16_t vshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_s8)))
-int8x16_t vshlq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_u16)))
-uint16x8_t vshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_u16)))
-uint16x8_t vshlq_x(uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_u32)))
-uint32x4_t vshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_u32)))
-uint32x4_t vshlq_x(uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_u8)))
-uint8x16_t vshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshlq_x_u8)))
-uint8x16_t vshlq_x(uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_s16)))
-int8x16_t vshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_s16)))
-int8x16_t vshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_s32)))
-int16x8_t vshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_s32)))
-int16x8_t vshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_u16)))
-uint8x16_t vshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_u16)))
-uint8x16_t vshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_u32)))
-uint16x8_t vshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_m_n_u32)))
-uint16x8_t vshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_s16)))
-int8x16_t vshrnbq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_s16)))
-int8x16_t vshrnbq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_s32)))
-int16x8_t vshrnbq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_s32)))
-int16x8_t vshrnbq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_u16)))
-uint8x16_t vshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_u16)))
-uint8x16_t vshrnbq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_u32)))
-uint16x8_t vshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrnbq_n_u32)))
-uint16x8_t vshrnbq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_s16)))
-int8x16_t vshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_s16)))
-int8x16_t vshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_s32)))
-int16x8_t vshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_s32)))
-int16x8_t vshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_u16)))
-uint8x16_t vshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_u16)))
-uint8x16_t vshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_u32)))
-uint16x8_t vshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_m_n_u32)))
-uint16x8_t vshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_s16)))
-int8x16_t vshrntq_n_s16(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_s16)))
-int8x16_t vshrntq(int8x16_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_s32)))
-int16x8_t vshrntq_n_s32(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_s32)))
-int16x8_t vshrntq(int16x8_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_u16)))
-uint8x16_t vshrntq_n_u16(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_u16)))
-uint8x16_t vshrntq(uint8x16_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_u32)))
-uint16x8_t vshrntq_n_u32(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrntq_n_u32)))
-uint16x8_t vshrntq(uint16x8_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_s16)))
-int16x8_t vshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_s16)))
-int16x8_t vshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_s32)))
-int32x4_t vshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_s32)))
-int32x4_t vshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_s8)))
-int8x16_t vshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_s8)))
-int8x16_t vshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_u16)))
-uint16x8_t vshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_u16)))
-uint16x8_t vshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_u32)))
-uint32x4_t vshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_u32)))
-uint32x4_t vshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_u8)))
-uint8x16_t vshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_m_n_u8)))
-uint8x16_t vshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_s16)))
-int16x8_t vshrq_n_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_s16)))
-int16x8_t vshrq(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_s32)))
-int32x4_t vshrq_n_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_s32)))
-int32x4_t vshrq(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_s8)))
-int8x16_t vshrq_n_s8(int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_s8)))
-int8x16_t vshrq(int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_u16)))
-uint16x8_t vshrq_n_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_u16)))
-uint16x8_t vshrq(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_u32)))
-uint32x4_t vshrq_n_u32(uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_u32)))
-uint32x4_t vshrq(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_u8)))
-uint8x16_t vshrq_n_u8(uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_n_u8)))
-uint8x16_t vshrq(uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_s16)))
-int16x8_t vshrq_x_n_s16(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_s16)))
-int16x8_t vshrq_x(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_s32)))
-int32x4_t vshrq_x_n_s32(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_s32)))
-int32x4_t vshrq_x(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_s8)))
-int8x16_t vshrq_x_n_s8(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_s8)))
-int8x16_t vshrq_x(int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_u16)))
-uint16x8_t vshrq_x_n_u16(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_u16)))
-uint16x8_t vshrq_x(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_u32)))
-uint32x4_t vshrq_x_n_u32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_u32)))
-uint32x4_t vshrq_x(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_u8)))
-uint8x16_t vshrq_x_n_u8(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vshrq_x_n_u8)))
-uint8x16_t vshrq_x(uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_s16)))
-int16x8_t vsliq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_s16)))
-int16x8_t vsliq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_s32)))
-int32x4_t vsliq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_s32)))
-int32x4_t vsliq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_s8)))
-int8x16_t vsliq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_s8)))
-int8x16_t vsliq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_u16)))
-uint16x8_t vsliq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_u16)))
-uint16x8_t vsliq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_u32)))
-uint32x4_t vsliq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_u32)))
-uint32x4_t vsliq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_u8)))
-uint8x16_t vsliq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_m_n_u8)))
-uint8x16_t vsliq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_s16)))
-int16x8_t vsliq_n_s16(int16x8_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_s16)))
-int16x8_t vsliq(int16x8_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_s32)))
-int32x4_t vsliq_n_s32(int32x4_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_s32)))
-int32x4_t vsliq(int32x4_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_s8)))
-int8x16_t vsliq_n_s8(int8x16_t, int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_s8)))
-int8x16_t vsliq(int8x16_t, int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_u16)))
-uint16x8_t vsliq_n_u16(uint16x8_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_u16)))
-uint16x8_t vsliq(uint16x8_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_u32)))
-uint32x4_t vsliq_n_u32(uint32x4_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_u32)))
-uint32x4_t vsliq(uint32x4_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_u8)))
-uint8x16_t vsliq_n_u8(uint8x16_t, uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsliq_n_u8)))
-uint8x16_t vsliq(uint8x16_t, uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_s16)))
-int16x8_t vsriq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_s16)))
-int16x8_t vsriq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_s32)))
-int32x4_t vsriq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_s32)))
-int32x4_t vsriq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_s8)))
-int8x16_t vsriq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_s8)))
-int8x16_t vsriq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_u16)))
-uint16x8_t vsriq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_u16)))
-uint16x8_t vsriq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_u32)))
-uint32x4_t vsriq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_u32)))
-uint32x4_t vsriq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_u8)))
-uint8x16_t vsriq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_m_n_u8)))
-uint8x16_t vsriq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_s16)))
-int16x8_t vsriq_n_s16(int16x8_t, int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_s16)))
-int16x8_t vsriq(int16x8_t, int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_s32)))
-int32x4_t vsriq_n_s32(int32x4_t, int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_s32)))
-int32x4_t vsriq(int32x4_t, int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_s8)))
-int8x16_t vsriq_n_s8(int8x16_t, int8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_s8)))
-int8x16_t vsriq(int8x16_t, int8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_u16)))
-uint16x8_t vsriq_n_u16(uint16x8_t, uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_u16)))
-uint16x8_t vsriq(uint16x8_t, uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_u32)))
-uint32x4_t vsriq_n_u32(uint32x4_t, uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_u32)))
-uint32x4_t vsriq(uint32x4_t, uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_u8)))
-uint8x16_t vsriq_n_u8(uint8x16_t, uint8x16_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsriq_n_u8)))
-uint8x16_t vsriq(uint8x16_t, uint8x16_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s16)))
-void vst1q_p_s16(int16_t *, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s16)))
-void vst1q_p(int16_t *, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s32)))
-void vst1q_p_s32(int32_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s32)))
-void vst1q_p(int32_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s8)))
-void vst1q_p_s8(int8_t *, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s8)))
-void vst1q_p(int8_t *, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u16)))
-void vst1q_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u16)))
-void vst1q_p(uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u32)))
-void vst1q_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u32)))
-void vst1q_p(uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u8)))
-void vst1q_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u8)))
-void vst1q_p(uint8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_s16)))
-void vst1q_s16(int16_t *, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_s16)))
-void vst1q(int16_t *, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_s32)))
-void vst1q_s32(int32_t *, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_s32)))
-void vst1q(int32_t *, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_s8)))
-void vst1q_s8(int8_t *, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_s8)))
-void vst1q(int8_t *, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_u16)))
-void vst1q_u16(uint16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_u16)))
-void vst1q(uint16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_u32)))
-void vst1q_u32(uint32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_u32)))
-void vst1q(uint32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_u8)))
-void vst1q_u8(uint8_t *, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_u8)))
-void vst1q(uint8_t *, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_s16)))
-void vst2q_s16(int16_t *, int16x8x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_s16)))
-void vst2q(int16_t *, int16x8x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_s32)))
-void vst2q_s32(int32_t *, int32x4x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_s32)))
-void vst2q(int32_t *, int32x4x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_s8)))
-void vst2q_s8(int8_t *, int8x16x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_s8)))
-void vst2q(int8_t *, int8x16x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_u16)))
-void vst2q_u16(uint16_t *, uint16x8x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_u16)))
-void vst2q(uint16_t *, uint16x8x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_u32)))
-void vst2q_u32(uint32_t *, uint32x4x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_u32)))
-void vst2q(uint32_t *, uint32x4x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_u8)))
-void vst2q_u8(uint8_t *, uint8x16x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_u8)))
-void vst2q(uint8_t *, uint8x16x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_s16)))
-void vst4q_s16(int16_t *, int16x8x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_s16)))
-void vst4q(int16_t *, int16x8x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_s32)))
-void vst4q_s32(int32_t *, int32x4x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_s32)))
-void vst4q(int32_t *, int32x4x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_s8)))
-void vst4q_s8(int8_t *, int8x16x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_s8)))
-void vst4q(int8_t *, int8x16x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_u16)))
-void vst4q_u16(uint16_t *, uint16x8x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_u16)))
-void vst4q(uint16_t *, uint16x8x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_u32)))
-void vst4q_u32(uint32_t *, uint32x4x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_u32)))
-void vst4q(uint32_t *, uint32x4x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_u8)))
-void vst4q_u8(uint8_t *, uint8x16x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_u8)))
-void vst4q(uint8_t *, uint8x16x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s16)))
-void vstrbq_p_s16(int8_t *, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s16)))
-void vstrbq_p(int8_t *, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s32)))
-void vstrbq_p_s32(int8_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s32)))
-void vstrbq_p(int8_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s8)))
-void vstrbq_p_s8(int8_t *, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s8)))
-void vstrbq_p(int8_t *, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u16)))
-void vstrbq_p_u16(uint8_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u16)))
-void vstrbq_p(uint8_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u32)))
-void vstrbq_p_u32(uint8_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u32)))
-void vstrbq_p(uint8_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u8)))
-void vstrbq_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u8)))
-void vstrbq_p(uint8_t *, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s16)))
-void vstrbq_s16(int8_t *, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s16)))
-void vstrbq(int8_t *, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s32)))
-void vstrbq_s32(int8_t *, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s32)))
-void vstrbq(int8_t *, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s8)))
-void vstrbq_s8(int8_t *, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s8)))
-void vstrbq(int8_t *, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))
-void vstrbq_scatter_offset_p_s16(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))
-void vstrbq_scatter_offset_p(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))
-void vstrbq_scatter_offset_p_s32(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))
-void vstrbq_scatter_offset_p(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))
-void vstrbq_scatter_offset_p_s8(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))
-void vstrbq_scatter_offset_p(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))
-void vstrbq_scatter_offset_p_u16(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))
-void vstrbq_scatter_offset_p(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))
-void vstrbq_scatter_offset_p_u32(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))
-void vstrbq_scatter_offset_p(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))
-void vstrbq_scatter_offset_p_u8(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))
-void vstrbq_scatter_offset_p(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))
-void vstrbq_scatter_offset_s16(int8_t *, uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))
-void vstrbq_scatter_offset(int8_t *, uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))
-void vstrbq_scatter_offset_s32(int8_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))
-void vstrbq_scatter_offset(int8_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))
-void vstrbq_scatter_offset_s8(int8_t *, uint8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))
-void vstrbq_scatter_offset(int8_t *, uint8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))
-void vstrbq_scatter_offset_u16(uint8_t *, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))
-void vstrbq_scatter_offset(uint8_t *, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))
-void vstrbq_scatter_offset_u32(uint8_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))
-void vstrbq_scatter_offset(uint8_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))
-void vstrbq_scatter_offset_u8(uint8_t *, uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))
-void vstrbq_scatter_offset(uint8_t *, uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u16)))
-void vstrbq_u16(uint8_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u16)))
-void vstrbq(uint8_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u32)))
-void vstrbq_u32(uint8_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u32)))
-void vstrbq(uint8_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u8)))
-void vstrbq_u8(uint8_t *, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u8)))
-void vstrbq(uint8_t *, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))
-void vstrdq_scatter_base_p_s64(uint64x2_t, int, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))
-void vstrdq_scatter_base_p(uint64x2_t, int, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))
-void vstrdq_scatter_base_p_u64(uint64x2_t, int, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))
-void vstrdq_scatter_base_p(uint64x2_t, int, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))
-void vstrdq_scatter_base_s64(uint64x2_t, int, int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))
-void vstrdq_scatter_base(uint64x2_t, int, int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))
-void vstrdq_scatter_base_u64(uint64x2_t, int, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))
-void vstrdq_scatter_base(uint64x2_t, int, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))
-void vstrdq_scatter_base_wb_p_s64(uint64x2_t *, int, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))
-void vstrdq_scatter_base_wb_p(uint64x2_t *, int, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))
-void vstrdq_scatter_base_wb_p_u64(uint64x2_t *, int, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))
-void vstrdq_scatter_base_wb_p(uint64x2_t *, int, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))
-void vstrdq_scatter_base_wb_s64(uint64x2_t *, int, int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))
-void vstrdq_scatter_base_wb(uint64x2_t *, int, int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))
-void vstrdq_scatter_base_wb_u64(uint64x2_t *, int, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))
-void vstrdq_scatter_base_wb(uint64x2_t *, int, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))
-void vstrdq_scatter_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))
-void vstrdq_scatter_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))
-void vstrdq_scatter_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))
-void vstrdq_scatter_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))
-void vstrdq_scatter_offset_s64(int64_t *, uint64x2_t, int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))
-void vstrdq_scatter_offset(int64_t *, uint64x2_t, int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))
-void vstrdq_scatter_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))
-void vstrdq_scatter_offset(uint64_t *, uint64x2_t, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))
-void vstrdq_scatter_shifted_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))
-void vstrdq_scatter_shifted_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))
-void vstrdq_scatter_shifted_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))
-void vstrdq_scatter_shifted_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))
-void vstrdq_scatter_shifted_offset_s64(int64_t *, uint64x2_t, int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))
-void vstrdq_scatter_shifted_offset(int64_t *, uint64x2_t, int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))
-void vstrdq_scatter_shifted_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))
-void vstrdq_scatter_shifted_offset(uint64_t *, uint64x2_t, uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s16)))
-void vstrhq_p_s16(int16_t *, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s16)))
-void vstrhq_p(int16_t *, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s32)))
-void vstrhq_p_s32(int16_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s32)))
-void vstrhq_p(int16_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u16)))
-void vstrhq_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u16)))
-void vstrhq_p(uint16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u32)))
-void vstrhq_p_u32(uint16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u32)))
-void vstrhq_p(uint16_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s16)))
-void vstrhq_s16(int16_t *, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s16)))
-void vstrhq(int16_t *, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s32)))
-void vstrhq_s32(int16_t *, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s32)))
-void vstrhq(int16_t *, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))
-void vstrhq_scatter_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))
-void vstrhq_scatter_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))
-void vstrhq_scatter_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))
-void vstrhq_scatter_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))
-void vstrhq_scatter_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))
-void vstrhq_scatter_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))
-void vstrhq_scatter_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))
-void vstrhq_scatter_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))
-void vstrhq_scatter_offset_s16(int16_t *, uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))
-void vstrhq_scatter_offset(int16_t *, uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))
-void vstrhq_scatter_offset_s32(int16_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))
-void vstrhq_scatter_offset(int16_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))
-void vstrhq_scatter_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))
-void vstrhq_scatter_offset(uint16_t *, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))
-void vstrhq_scatter_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))
-void vstrhq_scatter_offset(uint16_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))
-void vstrhq_scatter_shifted_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))
-void vstrhq_scatter_shifted_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))
-void vstrhq_scatter_shifted_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))
-void vstrhq_scatter_shifted_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))
-void vstrhq_scatter_shifted_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))
-void vstrhq_scatter_shifted_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))
-void vstrhq_scatter_shifted_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))
-void vstrhq_scatter_shifted_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))
-void vstrhq_scatter_shifted_offset_s16(int16_t *, uint16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))
-void vstrhq_scatter_shifted_offset(int16_t *, uint16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))
-void vstrhq_scatter_shifted_offset_s32(int16_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))
-void vstrhq_scatter_shifted_offset(int16_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))
-void vstrhq_scatter_shifted_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))
-void vstrhq_scatter_shifted_offset(uint16_t *, uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))
-void vstrhq_scatter_shifted_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))
-void vstrhq_scatter_shifted_offset(uint16_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u16)))
-void vstrhq_u16(uint16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u16)))
-void vstrhq(uint16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u32)))
-void vstrhq_u32(uint16_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u32)))
-void vstrhq(uint16_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_s32)))
-void vstrwq_p_s32(int32_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_s32)))
-void vstrwq_p(int32_t *, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_u32)))
-void vstrwq_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_u32)))
-void vstrwq_p(uint32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_s32)))
-void vstrwq_s32(int32_t *, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_s32)))
-void vstrwq(int32_t *, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))
-void vstrwq_scatter_base_p_s32(uint32x4_t, int, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))
-void vstrwq_scatter_base_p(uint32x4_t, int, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))
-void vstrwq_scatter_base_p_u32(uint32x4_t, int, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))
-void vstrwq_scatter_base_p(uint32x4_t, int, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))
-void vstrwq_scatter_base_s32(uint32x4_t, int, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))
-void vstrwq_scatter_base(uint32x4_t, int, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))
-void vstrwq_scatter_base_u32(uint32x4_t, int, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))
-void vstrwq_scatter_base(uint32x4_t, int, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))
-void vstrwq_scatter_base_wb_p_s32(uint32x4_t *, int, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))
-void vstrwq_scatter_base_wb_p(uint32x4_t *, int, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))
-void vstrwq_scatter_base_wb_p_u32(uint32x4_t *, int, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))
-void vstrwq_scatter_base_wb_p(uint32x4_t *, int, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))
-void vstrwq_scatter_base_wb_s32(uint32x4_t *, int, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))
-void vstrwq_scatter_base_wb(uint32x4_t *, int, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))
-void vstrwq_scatter_base_wb_u32(uint32x4_t *, int, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))
-void vstrwq_scatter_base_wb(uint32x4_t *, int, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))
-void vstrwq_scatter_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))
-void vstrwq_scatter_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))
-void vstrwq_scatter_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))
-void vstrwq_scatter_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))
-void vstrwq_scatter_offset_s32(int32_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))
-void vstrwq_scatter_offset(int32_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))
-void vstrwq_scatter_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))
-void vstrwq_scatter_offset(uint32_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))
-void vstrwq_scatter_shifted_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))
-void vstrwq_scatter_shifted_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))
-void vstrwq_scatter_shifted_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))
-void vstrwq_scatter_shifted_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))
-void vstrwq_scatter_shifted_offset_s32(int32_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))
-void vstrwq_scatter_shifted_offset(int32_t *, uint32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))
-void vstrwq_scatter_shifted_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))
-void vstrwq_scatter_shifted_offset(uint32_t *, uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_u32)))
-void vstrwq_u32(uint32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_u32)))
-void vstrwq(uint32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s16)))
-int16x8_t vsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s16)))
-int16x8_t vsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s32)))
-int32x4_t vsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s32)))
-int32x4_t vsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s8)))
-int8x16_t vsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s8)))
-int8x16_t vsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u16)))
-uint16x8_t vsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u16)))
-uint16x8_t vsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u32)))
-uint32x4_t vsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u32)))
-uint32x4_t vsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u8)))
-uint8x16_t vsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u8)))
-uint8x16_t vsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_s16)))
-int16x8_t vsubq_s16(int16x8_t, int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_s16)))
-int16x8_t vsubq(int16x8_t, int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_s32)))
-int32x4_t vsubq_s32(int32x4_t, int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_s32)))
-int32x4_t vsubq(int32x4_t, int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_s8)))
-int8x16_t vsubq_s8(int8x16_t, int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_s8)))
-int8x16_t vsubq(int8x16_t, int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_u16)))
-uint16x8_t vsubq_u16(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_u16)))
-uint16x8_t vsubq(uint16x8_t, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_u32)))
-uint32x4_t vsubq_u32(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_u32)))
-uint32x4_t vsubq(uint32x4_t, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_u8)))
-uint8x16_t vsubq_u8(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_u8)))
-uint8x16_t vsubq(uint8x16_t, uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_s16)))
-int16x8_t vsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_s16)))
-int16x8_t vsubq_x(int16x8_t, int16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_s32)))
-int32x4_t vsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_s32)))
-int32x4_t vsubq_x(int32x4_t, int32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_s8)))
-int8x16_t vsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_s8)))
-int8x16_t vsubq_x(int8x16_t, int8x16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_u16)))
-uint16x8_t vsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_u16)))
-uint16x8_t vsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_u32)))
-uint32x4_t vsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_u32)))
-uint32x4_t vsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_u8)))
-uint8x16_t vsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_u8)))
-uint8x16_t vsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s16)))
-int16x8_t vuninitializedq(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s32)))
-int32x4_t vuninitializedq(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s64)))
-int64x2_t vuninitializedq(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s8)))
-int8x16_t vuninitializedq(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u16)))
-uint16x8_t vuninitializedq(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u32)))
-uint32x4_t vuninitializedq(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u64)))
-uint64x2_t vuninitializedq(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u8)))
-uint8x16_t vuninitializedq(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s16)))
-int16x8_t vuninitializedq_s16();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s32)))
-int32x4_t vuninitializedq_s32();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s64)))
-int64x2_t vuninitializedq_s64();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s8)))
-int8x16_t vuninitializedq_s8();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u16)))
-uint16x8_t vuninitializedq_u16();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u32)))
-uint32x4_t vuninitializedq_u32();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u64)))
-uint64x2_t vuninitializedq_u64();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u8)))
-uint8x16_t vuninitializedq_u8();
-
-#endif /* (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE) */
-
-#if (__ARM_FEATURE_MVE & 2) && (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE)
-
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_f16)))
-float16x8_t vabdq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_f16)))
-float16x8_t vabdq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_f32)))
-float32x4_t vabdq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_f32)))
-float32x4_t vabdq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_f16)))
-float16x8_t vabdq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_f16)))
-float16x8_t vabdq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_f32)))
-float32x4_t vabdq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_m_f32)))
-float32x4_t vabdq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_f16)))
-float16x8_t vabdq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_f16)))
-float16x8_t vabdq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_f32)))
-float32x4_t vabdq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vabdq_x_f32)))
-float32x4_t vabdq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_f16)))
-float16x8_t vaddq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_f16)))
-float16x8_t vaddq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_f32)))
-float32x4_t vaddq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_f32)))
-float32x4_t vaddq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f16)))
-float16x8_t vaddq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f16)))
-float16x8_t vaddq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f32)))
-float32x4_t vaddq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f32)))
-float32x4_t vaddq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_f16)))
-float16x8_t vaddq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_f16)))
-float16x8_t vaddq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_f32)))
-float32x4_t vaddq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_x_f32)))
-float32x4_t vaddq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_f16)))
-float16x8_t vandq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_f16)))
-float16x8_t vandq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_f32)))
-float32x4_t vandq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_f32)))
-float32x4_t vandq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_f16)))
-float16x8_t vandq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_f16)))
-float16x8_t vandq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_m_f32)))
-float32x4_t vandq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_m_f32)))
-float32x4_t vandq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_f16)))
-float16x8_t vandq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_f16)))
-float16x8_t vandq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vandq_x_f32)))
-float32x4_t vandq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vandq_x_f32)))
-float32x4_t vandq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_f16)))
-float16x8_t vbicq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_f16)))
-float16x8_t vbicq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_f32)))
-float32x4_t vbicq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_f32)))
-float32x4_t vbicq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_f16)))
-float16x8_t vbicq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_f16)))
-float16x8_t vbicq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_f32)))
-float32x4_t vbicq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_m_f32)))
-float32x4_t vbicq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_f16)))
-float16x8_t vbicq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_f16)))
-float16x8_t vbicq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_f32)))
-float32x4_t vbicq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vbicq_x_f32)))
-float32x4_t vbicq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_f16)))
-float16x8_t vcaddq_rot270_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_f16)))
-float16x8_t vcaddq_rot270(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_f32)))
-float32x4_t vcaddq_rot270_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_f32)))
-float32x4_t vcaddq_rot270(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_f16)))
-float16x8_t vcaddq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_f16)))
-float16x8_t vcaddq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_f32)))
-float32x4_t vcaddq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_m_f32)))
-float32x4_t vcaddq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_f16)))
-float16x8_t vcaddq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_f16)))
-float16x8_t vcaddq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_f32)))
-float32x4_t vcaddq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot270_x_f32)))
-float32x4_t vcaddq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_f16)))
-float16x8_t vcaddq_rot90_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_f16)))
-float16x8_t vcaddq_rot90(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_f32)))
-float32x4_t vcaddq_rot90_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_f32)))
-float32x4_t vcaddq_rot90(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_f16)))
-float16x8_t vcaddq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_f16)))
-float16x8_t vcaddq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_f32)))
-float32x4_t vcaddq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_m_f32)))
-float32x4_t vcaddq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_f16)))
-float16x8_t vcaddq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_f16)))
-float16x8_t vcaddq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_f32)))
-float32x4_t vcaddq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcaddq_rot90_x_f32)))
-float32x4_t vcaddq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_f16)))
-float16x8_t vcmlaq_f16(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_f16)))
-float16x8_t vcmlaq(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_f32)))
-float32x4_t vcmlaq_f32(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_f32)))
-float32x4_t vcmlaq(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_m_f16)))
-float16x8_t vcmlaq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_m_f16)))
-float16x8_t vcmlaq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_m_f32)))
-float32x4_t vcmlaq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_m_f32)))
-float32x4_t vcmlaq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_f16)))
-float16x8_t vcmlaq_rot180_f16(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_f16)))
-float16x8_t vcmlaq_rot180(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_f32)))
-float32x4_t vcmlaq_rot180_f32(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_f32)))
-float32x4_t vcmlaq_rot180(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16)))
-float16x8_t vcmlaq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16)))
-float16x8_t vcmlaq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32)))
-float32x4_t vcmlaq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32)))
-float32x4_t vcmlaq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_f16)))
-float16x8_t vcmlaq_rot270_f16(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_f16)))
-float16x8_t vcmlaq_rot270(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_f32)))
-float32x4_t vcmlaq_rot270_f32(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_f32)))
-float32x4_t vcmlaq_rot270(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16)))
-float16x8_t vcmlaq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16)))
-float16x8_t vcmlaq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32)))
-float32x4_t vcmlaq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32)))
-float32x4_t vcmlaq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_f16)))
-float16x8_t vcmlaq_rot90_f16(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_f16)))
-float16x8_t vcmlaq_rot90(float16x8_t, float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_f32)))
-float32x4_t vcmlaq_rot90_f32(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_f32)))
-float32x4_t vcmlaq_rot90(float32x4_t, float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16)))
-float16x8_t vcmlaq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16)))
-float16x8_t vcmlaq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32)))
-float32x4_t vcmlaq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32)))
-float32x4_t vcmlaq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f16)))
-mve_pred16_t vcmpeqq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f16)))
-mve_pred16_t vcmpeqq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f32)))
-mve_pred16_t vcmpeqq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f32)))
-mve_pred16_t vcmpeqq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f16)))
-mve_pred16_t vcmpeqq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f16)))
-mve_pred16_t vcmpeqq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f32)))
-mve_pred16_t vcmpeqq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f32)))
-mve_pred16_t vcmpeqq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))
-mve_pred16_t vcmpeqq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))
-mve_pred16_t vcmpeqq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))
-mve_pred16_t vcmpeqq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))
-mve_pred16_t vcmpeqq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f16)))
-mve_pred16_t vcmpeqq_n_f16(float16x8_t, float16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f16)))
-mve_pred16_t vcmpeqq(float16x8_t, float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f32)))
-mve_pred16_t vcmpeqq_n_f32(float32x4_t, float32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f32)))
-mve_pred16_t vcmpeqq(float32x4_t, float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f16)))
-mve_pred16_t vcmpgeq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f16)))
-mve_pred16_t vcmpgeq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f32)))
-mve_pred16_t vcmpgeq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f32)))
-mve_pred16_t vcmpgeq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f16)))
-mve_pred16_t vcmpgeq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f16)))
-mve_pred16_t vcmpgeq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f32)))
-mve_pred16_t vcmpgeq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f32)))
-mve_pred16_t vcmpgeq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))
-mve_pred16_t vcmpgeq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))
-mve_pred16_t vcmpgeq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))
-mve_pred16_t vcmpgeq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))
-mve_pred16_t vcmpgeq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f16)))
-mve_pred16_t vcmpgeq_n_f16(float16x8_t, float16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f16)))
-mve_pred16_t vcmpgeq(float16x8_t, float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f32)))
-mve_pred16_t vcmpgeq_n_f32(float32x4_t, float32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f32)))
-mve_pred16_t vcmpgeq(float32x4_t, float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f16)))
-mve_pred16_t vcmpgtq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f16)))
-mve_pred16_t vcmpgtq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f32)))
-mve_pred16_t vcmpgtq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f32)))
-mve_pred16_t vcmpgtq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f16)))
-mve_pred16_t vcmpgtq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f16)))
-mve_pred16_t vcmpgtq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f32)))
-mve_pred16_t vcmpgtq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f32)))
-mve_pred16_t vcmpgtq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))
-mve_pred16_t vcmpgtq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))
-mve_pred16_t vcmpgtq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))
-mve_pred16_t vcmpgtq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))
-mve_pred16_t vcmpgtq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f16)))
-mve_pred16_t vcmpgtq_n_f16(float16x8_t, float16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f16)))
-mve_pred16_t vcmpgtq(float16x8_t, float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f32)))
-mve_pred16_t vcmpgtq_n_f32(float32x4_t, float32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f32)))
-mve_pred16_t vcmpgtq(float32x4_t, float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f16)))
-mve_pred16_t vcmpleq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f16)))
-mve_pred16_t vcmpleq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f32)))
-mve_pred16_t vcmpleq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f32)))
-mve_pred16_t vcmpleq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f16)))
-mve_pred16_t vcmpleq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f16)))
-mve_pred16_t vcmpleq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f32)))
-mve_pred16_t vcmpleq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f32)))
-mve_pred16_t vcmpleq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))
-mve_pred16_t vcmpleq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))
-mve_pred16_t vcmpleq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))
-mve_pred16_t vcmpleq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))
-mve_pred16_t vcmpleq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f16)))
-mve_pred16_t vcmpleq_n_f16(float16x8_t, float16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f16)))
-mve_pred16_t vcmpleq(float16x8_t, float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f32)))
-mve_pred16_t vcmpleq_n_f32(float32x4_t, float32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f32)))
-mve_pred16_t vcmpleq(float32x4_t, float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f16)))
-mve_pred16_t vcmpltq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f16)))
-mve_pred16_t vcmpltq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f32)))
-mve_pred16_t vcmpltq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f32)))
-mve_pred16_t vcmpltq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f16)))
-mve_pred16_t vcmpltq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f16)))
-mve_pred16_t vcmpltq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f32)))
-mve_pred16_t vcmpltq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f32)))
-mve_pred16_t vcmpltq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))
-mve_pred16_t vcmpltq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))
-mve_pred16_t vcmpltq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))
-mve_pred16_t vcmpltq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))
-mve_pred16_t vcmpltq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f16)))
-mve_pred16_t vcmpltq_n_f16(float16x8_t, float16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f16)))
-mve_pred16_t vcmpltq(float16x8_t, float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f32)))
-mve_pred16_t vcmpltq_n_f32(float32x4_t, float32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f32)))
-mve_pred16_t vcmpltq(float32x4_t, float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f16)))
-mve_pred16_t vcmpneq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f16)))
-mve_pred16_t vcmpneq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f32)))
-mve_pred16_t vcmpneq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f32)))
-mve_pred16_t vcmpneq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f16)))
-mve_pred16_t vcmpneq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f16)))
-mve_pred16_t vcmpneq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f32)))
-mve_pred16_t vcmpneq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f32)))
-mve_pred16_t vcmpneq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))
-mve_pred16_t vcmpneq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))
-mve_pred16_t vcmpneq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))
-mve_pred16_t vcmpneq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))
-mve_pred16_t vcmpneq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f16)))
-mve_pred16_t vcmpneq_n_f16(float16x8_t, float16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f16)))
-mve_pred16_t vcmpneq(float16x8_t, float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f32)))
-mve_pred16_t vcmpneq_n_f32(float32x4_t, float32_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f32)))
-mve_pred16_t vcmpneq(float32x4_t, float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_f16)))
-float16x8_t vcmulq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_f16)))
-float16x8_t vcmulq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_f32)))
-float32x4_t vcmulq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_f32)))
-float32x4_t vcmulq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_m_f16)))
-float16x8_t vcmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_m_f16)))
-float16x8_t vcmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_m_f32)))
-float32x4_t vcmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_m_f32)))
-float32x4_t vcmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_f16)))
-float16x8_t vcmulq_rot180_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_f16)))
-float16x8_t vcmulq_rot180(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_f32)))
-float32x4_t vcmulq_rot180_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_f32)))
-float32x4_t vcmulq_rot180(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_m_f16)))
-float16x8_t vcmulq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_m_f16)))
-float16x8_t vcmulq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_m_f32)))
-float32x4_t vcmulq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_m_f32)))
-float32x4_t vcmulq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_x_f16)))
-float16x8_t vcmulq_rot180_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_x_f16)))
-float16x8_t vcmulq_rot180_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_x_f32)))
-float32x4_t vcmulq_rot180_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot180_x_f32)))
-float32x4_t vcmulq_rot180_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_f16)))
-float16x8_t vcmulq_rot270_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_f16)))
-float16x8_t vcmulq_rot270(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_f32)))
-float32x4_t vcmulq_rot270_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_f32)))
-float32x4_t vcmulq_rot270(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_m_f16)))
-float16x8_t vcmulq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_m_f16)))
-float16x8_t vcmulq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_m_f32)))
-float32x4_t vcmulq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_m_f32)))
-float32x4_t vcmulq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_x_f16)))
-float16x8_t vcmulq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_x_f16)))
-float16x8_t vcmulq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_x_f32)))
-float32x4_t vcmulq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot270_x_f32)))
-float32x4_t vcmulq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_f16)))
-float16x8_t vcmulq_rot90_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_f16)))
-float16x8_t vcmulq_rot90(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_f32)))
-float32x4_t vcmulq_rot90_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_f32)))
-float32x4_t vcmulq_rot90(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_m_f16)))
-float16x8_t vcmulq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_m_f16)))
-float16x8_t vcmulq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_m_f32)))
-float32x4_t vcmulq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_m_f32)))
-float32x4_t vcmulq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_x_f16)))
-float16x8_t vcmulq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_x_f16)))
-float16x8_t vcmulq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_x_f32)))
-float32x4_t vcmulq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_rot90_x_f32)))
-float32x4_t vcmulq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_x_f16)))
-float16x8_t vcmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_x_f16)))
-float16x8_t vcmulq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmulq_x_f32)))
-float32x4_t vcmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmulq_x_f32)))
-float32x4_t vcmulq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_f16)))
-float16x8_t vcreateq_f16(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_f32)))
-float32x4_t vcreateq_f32(uint64_t, uint64_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtbq_f16_f32)))
-float16x8_t vcvtbq_f16_f32(float16x8_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtbq_m_f16_f32)))
-float16x8_t vcvtbq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16)))
-float16x8_t vcvtq_m_n_f16_s16(float16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16)))
-float16x8_t vcvtq_m_n(float16x8_t, int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16)))
-float16x8_t vcvtq_m_n_f16_u16(float16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16)))
-float16x8_t vcvtq_m_n(float16x8_t, uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32)))
-float32x4_t vcvtq_m_n_f32_s32(float32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32)))
-float32x4_t vcvtq_m_n(float32x4_t, int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32)))
-float32x4_t vcvtq_m_n_f32_u32(float32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32)))
-float32x4_t vcvtq_m_n(float32x4_t, uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16)))
-int16x8_t vcvtq_m_n_s16_f16(int16x8_t, float16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16)))
-int16x8_t vcvtq_m_n(int16x8_t, float16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32)))
-int32x4_t vcvtq_m_n_s32_f32(int32x4_t, float32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32)))
-int32x4_t vcvtq_m_n(int32x4_t, float32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16)))
-uint16x8_t vcvtq_m_n_u16_f16(uint16x8_t, float16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16)))
-uint16x8_t vcvtq_m_n(uint16x8_t, float16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32)))
-uint32x4_t vcvtq_m_n_u32_f32(uint32x4_t, float32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32)))
-uint32x4_t vcvtq_m_n(uint32x4_t, float32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f16_s16)))
-float16x8_t vcvtq_n_f16_s16(int16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f16_s16)))
-float16x8_t vcvtq_n(int16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f16_u16)))
-float16x8_t vcvtq_n_f16_u16(uint16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f16_u16)))
-float16x8_t vcvtq_n(uint16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f32_s32)))
-float32x4_t vcvtq_n_f32_s32(int32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f32_s32)))
-float32x4_t vcvtq_n(int32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f32_u32)))
-float32x4_t vcvtq_n_f32_u32(uint32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_f32_u32)))
-float32x4_t vcvtq_n(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_s16_f16)))
-int16x8_t vcvtq_n_s16_f16(float16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_s32_f32)))
-int32x4_t vcvtq_n_s32_f32(float32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_u16_f16)))
-uint16x8_t vcvtq_n_u16_f16(float16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_n_u32_f32)))
-uint32x4_t vcvtq_n_u32_f32(float32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16)))
-float16x8_t vcvtq_x_n_f16_s16(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16)))
-float16x8_t vcvtq_x_n(int16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16)))
-float16x8_t vcvtq_x_n_f16_u16(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16)))
-float16x8_t vcvtq_x_n(uint16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32)))
-float32x4_t vcvtq_x_n_f32_s32(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32)))
-float32x4_t vcvtq_x_n(int32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32)))
-float32x4_t vcvtq_x_n_f32_u32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32)))
-float32x4_t vcvtq_x_n(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_s16_f16)))
-int16x8_t vcvtq_x_n_s16_f16(float16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_s32_f32)))
-int32x4_t vcvtq_x_n_s32_f32(float32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_u16_f16)))
-uint16x8_t vcvtq_x_n_u16_f16(float16x8_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtq_x_n_u32_f32)))
-uint32x4_t vcvtq_x_n_u32_f32(float32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvttq_f16_f32)))
-float16x8_t vcvttq_f16_f32(float16x8_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvttq_m_f16_f32)))
-float16x8_t vcvttq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_f16)))
-float16x8_t vdupq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_f16)))
-float16x8_t vdupq_m(float16x8_t, float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_f32)))
-float32x4_t vdupq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vdupq_m_n_f32)))
-float32x4_t vdupq_m(float32x4_t, float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_f16)))
-float16x8_t vdupq_n_f16(float16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_n_f32)))
-float32x4_t vdupq_n_f32(float32_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_f16)))
-float16x8_t vdupq_x_n_f16(float16_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vdupq_x_n_f32)))
-float32x4_t vdupq_x_n_f32(float32_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_f16)))
-float16x8_t veorq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_f16)))
-float16x8_t veorq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_f32)))
-float32x4_t veorq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_f32)))
-float32x4_t veorq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_f16)))
-float16x8_t veorq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_f16)))
-float16x8_t veorq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_m_f32)))
-float32x4_t veorq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_m_f32)))
-float32x4_t veorq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_f16)))
-float16x8_t veorq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_f16)))
-float16x8_t veorq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_veorq_x_f32)))
-float32x4_t veorq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_veorq_x_f32)))
-float32x4_t veorq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f16)))
-float16_t vgetq_lane_f16(float16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f16)))
-float16_t vgetq_lane(float16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f32)))
-float32_t vgetq_lane_f32(float32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f32)))
-float32_t vgetq_lane(float32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_f16)))
-float16x8_t vld1q_f16(const float16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_f16)))
-float16x8_t vld1q(const float16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_f32)))
-float32x4_t vld1q_f32(const float32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_f32)))
-float32x4_t vld1q(const float32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f16)))
-float16x8_t vld1q_z_f16(const float16_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f16)))
-float16x8_t vld1q_z(const float16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f32)))
-float32x4_t vld1q_z_f32(const float32_t *, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f32)))
-float32x4_t vld1q_z(const float32_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_f16)))
-float16x8x2_t vld2q_f16(const float16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_f16)))
-float16x8x2_t vld2q(const float16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_f32)))
-float32x4x2_t vld2q_f32(const float32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_f32)))
-float32x4x2_t vld2q(const float32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_f16)))
-float16x8x4_t vld4q_f16(const float16_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_f16)))
-float16x8x4_t vld4q(const float16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_f32)))
-float32x4x4_t vld4q_f32(const float32_t *);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_f32)))
-float32x4x4_t vld4q(const float32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_f16)))
-float16x8_t vldrhq_f16(const float16_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))
-float16x8_t vldrhq_gather_offset_f16(const float16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))
-float16x8_t vldrhq_gather_offset(const float16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))
-float16x8_t vldrhq_gather_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))
-float16x8_t vldrhq_gather_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))
-float16x8_t vldrhq_gather_shifted_offset_f16(const float16_t *, uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))
-float16x8_t vldrhq_gather_shifted_offset(const float16_t *, uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))
-float16x8_t vldrhq_gather_shifted_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))
-float16x8_t vldrhq_gather_shifted_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_f16)))
-float16x8_t vldrhq_z_f16(const float16_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_f32)))
-float32x4_t vldrwq_f32(const float32_t *);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_f32)))
-float32x4_t vldrwq_gather_base_f32(uint32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_f32)))
-float32x4_t vldrwq_gather_base_wb_f32(uint32x4_t *, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_f32)))
-float32x4_t vldrwq_gather_base_wb_z_f32(uint32x4_t *, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_z_f32)))
-float32x4_t vldrwq_gather_base_z_f32(uint32x4_t, int, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))
-float32x4_t vldrwq_gather_offset_f32(const float32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))
-float32x4_t vldrwq_gather_offset(const float32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))
-float32x4_t vldrwq_gather_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))
-float32x4_t vldrwq_gather_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))
-float32x4_t vldrwq_gather_shifted_offset_f32(const float32_t *, uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))
-float32x4_t vldrwq_gather_shifted_offset(const float32_t *, uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))
-float32x4_t vldrwq_gather_shifted_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))
-float32x4_t vldrwq_gather_shifted_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_z_f32)))
-float32x4_t vldrwq_z_f32(const float32_t *, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_f16)))
-float16x8_t vmaxnmaq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_f16)))
-float16x8_t vmaxnmaq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_f32)))
-float32x4_t vmaxnmaq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_f32)))
-float32x4_t vmaxnmaq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_m_f16)))
-float16x8_t vmaxnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_m_f16)))
-float16x8_t vmaxnmaq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_m_f32)))
-float32x4_t vmaxnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmaq_m_f32)))
-float32x4_t vmaxnmaq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_f16)))
-float16x8_t vmaxnmq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_f16)))
-float16x8_t vmaxnmq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_f32)))
-float32x4_t vmaxnmq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_f32)))
-float32x4_t vmaxnmq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_m_f16)))
-float16x8_t vmaxnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_m_f16)))
-float16x8_t vmaxnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_m_f32)))
-float32x4_t vmaxnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_m_f32)))
-float32x4_t vmaxnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_x_f16)))
-float16x8_t vmaxnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_x_f16)))
-float16x8_t vmaxnmq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_x_f32)))
-float32x4_t vmaxnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxnmq_x_f32)))
-float32x4_t vmaxnmq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_f16)))
-float16x8_t vminnmaq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_f16)))
-float16x8_t vminnmaq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_f32)))
-float32x4_t vminnmaq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_f32)))
-float32x4_t vminnmaq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_m_f16)))
-float16x8_t vminnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_m_f16)))
-float16x8_t vminnmaq_m(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_m_f32)))
-float32x4_t vminnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmaq_m_f32)))
-float32x4_t vminnmaq_m(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmq_f16)))
-float16x8_t vminnmq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmq_f16)))
-float16x8_t vminnmq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmq_f32)))
-float32x4_t vminnmq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmq_f32)))
-float32x4_t vminnmq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmq_m_f16)))
-float16x8_t vminnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmq_m_f16)))
-float16x8_t vminnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmq_m_f32)))
-float32x4_t vminnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmq_m_f32)))
-float32x4_t vminnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmq_x_f16)))
-float16x8_t vminnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmq_x_f16)))
-float16x8_t vminnmq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminnmq_x_f32)))
-float32x4_t vminnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminnmq_x_f32)))
-float32x4_t vminnmq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_f16)))
-float16x8_t vmulq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_f16)))
-float16x8_t vmulq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_f32)))
-float32x4_t vmulq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_f32)))
-float32x4_t vmulq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_f16)))
-float16x8_t vmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_f16)))
-float16x8_t vmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_f32)))
-float32x4_t vmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_m_f32)))
-float32x4_t vmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_f16)))
-float16x8_t vmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_f16)))
-float16x8_t vmulq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_f32)))
-float32x4_t vmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmulq_x_f32)))
-float32x4_t vmulq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_f16)))
-float16x8_t vornq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_f16)))
-float16x8_t vornq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_f32)))
-float32x4_t vornq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_f32)))
-float32x4_t vornq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_f16)))
-float16x8_t vornq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_f16)))
-float16x8_t vornq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_m_f32)))
-float32x4_t vornq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_m_f32)))
-float32x4_t vornq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_f16)))
-float16x8_t vornq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_f16)))
-float16x8_t vornq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vornq_x_f32)))
-float32x4_t vornq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vornq_x_f32)))
-float32x4_t vornq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_f16)))
-float16x8_t vorrq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_f16)))
-float16x8_t vorrq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_f32)))
-float32x4_t vorrq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_f32)))
-float32x4_t vorrq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_f16)))
-float16x8_t vorrq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_f16)))
-float16x8_t vorrq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_f32)))
-float32x4_t vorrq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_m_f32)))
-float32x4_t vorrq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_f16)))
-float16x8_t vorrq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_f16)))
-float16x8_t vorrq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_f32)))
-float32x4_t vorrq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vorrq_x_f32)))
-float32x4_t vorrq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_f16)))
-float16x8_t vpselq_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_f16)))
-float16x8_t vpselq(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vpselq_f32)))
-float32x4_t vpselq_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vpselq_f32)))
-float32x4_t vpselq(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))
-float16x8_t vreinterpretq_f16_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))
-float16x8_t vreinterpretq_f16(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))
-float16x8_t vreinterpretq_f16_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))
-float16x8_t vreinterpretq_f16(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))
-float16x8_t vreinterpretq_f16_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))
-float16x8_t vreinterpretq_f16(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))
-float16x8_t vreinterpretq_f16_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))
-float16x8_t vreinterpretq_f16(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))
-float16x8_t vreinterpretq_f16_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))
-float16x8_t vreinterpretq_f16(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))
-float16x8_t vreinterpretq_f16_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))
-float16x8_t vreinterpretq_f16(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))
-float16x8_t vreinterpretq_f16_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))
-float16x8_t vreinterpretq_f16(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))
-float16x8_t vreinterpretq_f16_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))
-float16x8_t vreinterpretq_f16(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
-float16x8_t vreinterpretq_f16_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
-float16x8_t vreinterpretq_f16(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))
-float32x4_t vreinterpretq_f32_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))
-float32x4_t vreinterpretq_f32(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))
-float32x4_t vreinterpretq_f32_s16(int16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))
-float32x4_t vreinterpretq_f32(int16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))
-float32x4_t vreinterpretq_f32_s32(int32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))
-float32x4_t vreinterpretq_f32(int32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))
-float32x4_t vreinterpretq_f32_s64(int64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))
-float32x4_t vreinterpretq_f32(int64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))
-float32x4_t vreinterpretq_f32_s8(int8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))
-float32x4_t vreinterpretq_f32(int8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))
-float32x4_t vreinterpretq_f32_u16(uint16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))
-float32x4_t vreinterpretq_f32(uint16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))
-float32x4_t vreinterpretq_f32_u32(uint32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))
-float32x4_t vreinterpretq_f32(uint32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))
-float32x4_t vreinterpretq_f32_u64(uint64x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))
-float32x4_t vreinterpretq_f32(uint64x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
-float32x4_t vreinterpretq_f32_u8(uint8x16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
-float32x4_t vreinterpretq_f32(uint8x16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))
-int16x8_t vreinterpretq_s16_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))
-int16x8_t vreinterpretq_s16(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))
-int16x8_t vreinterpretq_s16_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))
-int16x8_t vreinterpretq_s16(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))
-int32x4_t vreinterpretq_s32_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))
-int32x4_t vreinterpretq_s32(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))
-int32x4_t vreinterpretq_s32_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))
-int32x4_t vreinterpretq_s32(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))
-int64x2_t vreinterpretq_s64_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))
-int64x2_t vreinterpretq_s64(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))
-int64x2_t vreinterpretq_s64_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))
-int64x2_t vreinterpretq_s64(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))
-int8x16_t vreinterpretq_s8_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))
-int8x16_t vreinterpretq_s8(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))
-int8x16_t vreinterpretq_s8_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))
-int8x16_t vreinterpretq_s8(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))
-uint16x8_t vreinterpretq_u16_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))
-uint16x8_t vreinterpretq_u16(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))
-uint16x8_t vreinterpretq_u16_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))
-uint16x8_t vreinterpretq_u16(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))
-uint32x4_t vreinterpretq_u32_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))
-uint32x4_t vreinterpretq_u32(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))
-uint32x4_t vreinterpretq_u32_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))
-uint32x4_t vreinterpretq_u32(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))
-uint64x2_t vreinterpretq_u64_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))
-uint64x2_t vreinterpretq_u64(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))
-uint64x2_t vreinterpretq_u64_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))
-uint64x2_t vreinterpretq_u64(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
-uint8x16_t vreinterpretq_u8_f16(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
-uint8x16_t vreinterpretq_u8(float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
-uint8x16_t vreinterpretq_u8_f32(float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
-uint8x16_t vreinterpretq_u8(float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f16)))
-float16x8_t vsetq_lane_f16(float16_t, float16x8_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f16)))
-float16x8_t vsetq_lane(float16_t, float16x8_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f32)))
-float32x4_t vsetq_lane_f32(float32_t, float32x4_t, int);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f32)))
-float32x4_t vsetq_lane(float32_t, float32x4_t, int);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_f16)))
-void vst1q_f16(float16_t *, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_f16)))
-void vst1q(float16_t *, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_f32)))
-void vst1q_f32(float32_t *, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_f32)))
-void vst1q(float32_t *, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f16)))
-void vst1q_p_f16(float16_t *, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f16)))
-void vst1q_p(float16_t *, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f32)))
-void vst1q_p_f32(float32_t *, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f32)))
-void vst1q_p(float32_t *, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_f16)))
-void vst2q_f16(float16_t *, float16x8x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_f16)))
-void vst2q(float16_t *, float16x8x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_f32)))
-void vst2q_f32(float32_t *, float32x4x2_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_f32)))
-void vst2q(float32_t *, float32x4x2_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_f16)))
-void vst4q_f16(float16_t *, float16x8x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_f16)))
-void vst4q(float16_t *, float16x8x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_f32)))
-void vst4q_f32(float32_t *, float32x4x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_f32)))
-void vst4q(float32_t *, float32x4x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_f16)))
-void vstrhq_f16(float16_t *, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_f16)))
-void vstrhq(float16_t *, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_f16)))
-void vstrhq_p_f16(float16_t *, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_f16)))
-void vstrhq_p(float16_t *, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))
-void vstrhq_scatter_offset_f16(float16_t *, uint16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))
-void vstrhq_scatter_offset(float16_t *, uint16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))
-void vstrhq_scatter_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))
-void vstrhq_scatter_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))
-void vstrhq_scatter_shifted_offset_f16(float16_t *, uint16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))
-void vstrhq_scatter_shifted_offset(float16_t *, uint16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))
-void vstrhq_scatter_shifted_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))
-void vstrhq_scatter_shifted_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_f32)))
-void vstrwq_f32(float32_t *, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_f32)))
-void vstrwq(float32_t *, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_f32)))
-void vstrwq_p_f32(float32_t *, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_f32)))
-void vstrwq_p(float32_t *, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))
-void vstrwq_scatter_base_f32(uint32x4_t, int, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))
-void vstrwq_scatter_base(uint32x4_t, int, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))
-void vstrwq_scatter_base_p_f32(uint32x4_t, int, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))
-void vstrwq_scatter_base_p(uint32x4_t, int, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))
-void vstrwq_scatter_base_wb_f32(uint32x4_t *, int, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))
-void vstrwq_scatter_base_wb(uint32x4_t *, int, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))
-void vstrwq_scatter_base_wb_p_f32(uint32x4_t *, int, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))
-void vstrwq_scatter_base_wb_p(uint32x4_t *, int, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))
-void vstrwq_scatter_offset_f32(float32_t *, uint32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))
-void vstrwq_scatter_offset(float32_t *, uint32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))
-void vstrwq_scatter_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))
-void vstrwq_scatter_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))
-void vstrwq_scatter_shifted_offset_f32(float32_t *, uint32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))
-void vstrwq_scatter_shifted_offset(float32_t *, uint32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))
-void vstrwq_scatter_shifted_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))
-void vstrwq_scatter_shifted_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_f16)))
-float16x8_t vsubq_f16(float16x8_t, float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_f16)))
-float16x8_t vsubq(float16x8_t, float16x8_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_f32)))
-float32x4_t vsubq_f32(float32x4_t, float32x4_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_f32)))
-float32x4_t vsubq(float32x4_t, float32x4_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f16)))
-float16x8_t vsubq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f16)))
-float16x8_t vsubq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f32)))
-float32x4_t vsubq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f32)))
-float32x4_t vsubq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_f16)))
-float16x8_t vsubq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_f16)))
-float16x8_t vsubq_x(float16x8_t, float16x8_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_f32)))
-float32x4_t vsubq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_x_f32)))
-float32x4_t vsubq_x(float32x4_t, float32x4_t, mve_pred16_t);
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_f16)))
-float16x8_t vuninitializedq_f16();
-static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_f32)))
-float32x4_t vuninitializedq_f32();
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f16)))
-float16x8_t vuninitializedq(float16x8_t);
-static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f32)))
-float32x4_t vuninitializedq(float32x4_t);
-
-#endif /* (__ARM_FEATURE_MVE & 2) && (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE) */
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
-
-#endif /* __ARM_MVE_H */
diff --git a/linux-x86/lib64/clang/11.0.1/include/arm_neon.h b/linux-x86/lib64/clang/11.0.1/include/arm_neon.h
deleted file mode 100644
index a8ab92f..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/arm_neon.h
+++ /dev/null
@@ -1,63222 +0,0 @@
-/*===---- arm_neon.h - ARM Neon intrinsics ---------------------------------===
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __ARM_NEON_H
-#define __ARM_NEON_H
-
-#if !defined(__ARM_NEON)
-#error "NEON support not enabled"
-#endif
-
-#include <stdint.h>
-
-typedef float float32_t;
-typedef __fp16 float16_t;
-#ifdef __aarch64__
-typedef double float64_t;
-#endif
-
-#ifdef __aarch64__
-typedef uint8_t poly8_t;
-typedef uint16_t poly16_t;
-typedef uint64_t poly64_t;
-typedef __uint128_t poly128_t;
-#else
-typedef int8_t poly8_t;
-typedef int16_t poly16_t;
-#endif
-typedef __attribute__((neon_vector_type(8))) int8_t int8x8_t;
-typedef __attribute__((neon_vector_type(16))) int8_t int8x16_t;
-typedef __attribute__((neon_vector_type(4))) int16_t int16x4_t;
-typedef __attribute__((neon_vector_type(8))) int16_t int16x8_t;
-typedef __attribute__((neon_vector_type(2))) int32_t int32x2_t;
-typedef __attribute__((neon_vector_type(4))) int32_t int32x4_t;
-typedef __attribute__((neon_vector_type(1))) int64_t int64x1_t;
-typedef __attribute__((neon_vector_type(2))) int64_t int64x2_t;
-typedef __attribute__((neon_vector_type(8))) uint8_t uint8x8_t;
-typedef __attribute__((neon_vector_type(16))) uint8_t uint8x16_t;
-typedef __attribute__((neon_vector_type(4))) uint16_t uint16x4_t;
-typedef __attribute__((neon_vector_type(8))) uint16_t uint16x8_t;
-typedef __attribute__((neon_vector_type(2))) uint32_t uint32x2_t;
-typedef __attribute__((neon_vector_type(4))) uint32_t uint32x4_t;
-typedef __attribute__((neon_vector_type(1))) uint64_t uint64x1_t;
-typedef __attribute__((neon_vector_type(2))) uint64_t uint64x2_t;
-typedef __attribute__((neon_vector_type(4))) float16_t float16x4_t;
-typedef __attribute__((neon_vector_type(8))) float16_t float16x8_t;
-typedef __attribute__((neon_vector_type(2))) float32_t float32x2_t;
-typedef __attribute__((neon_vector_type(4))) float32_t float32x4_t;
-#ifdef __aarch64__
-typedef __attribute__((neon_vector_type(1))) float64_t float64x1_t;
-typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t;
-#endif
-typedef __attribute__((neon_polyvector_type(8))) poly8_t poly8x8_t;
-typedef __attribute__((neon_polyvector_type(16))) poly8_t poly8x16_t;
-typedef __attribute__((neon_polyvector_type(4))) poly16_t poly16x4_t;
-typedef __attribute__((neon_polyvector_type(8))) poly16_t poly16x8_t;
-#ifdef __aarch64__
-typedef __attribute__((neon_polyvector_type(1))) poly64_t poly64x1_t;
-typedef __attribute__((neon_polyvector_type(2))) poly64_t poly64x2_t;
-#endif
-
-typedef struct int8x8x2_t {
-  int8x8_t val[2];
-} int8x8x2_t;
-
-typedef struct int8x16x2_t {
-  int8x16_t val[2];
-} int8x16x2_t;
-
-typedef struct int16x4x2_t {
-  int16x4_t val[2];
-} int16x4x2_t;
-
-typedef struct int16x8x2_t {
-  int16x8_t val[2];
-} int16x8x2_t;
-
-typedef struct int32x2x2_t {
-  int32x2_t val[2];
-} int32x2x2_t;
-
-typedef struct int32x4x2_t {
-  int32x4_t val[2];
-} int32x4x2_t;
-
-typedef struct int64x1x2_t {
-  int64x1_t val[2];
-} int64x1x2_t;
-
-typedef struct int64x2x2_t {
-  int64x2_t val[2];
-} int64x2x2_t;
-
-typedef struct uint8x8x2_t {
-  uint8x8_t val[2];
-} uint8x8x2_t;
-
-typedef struct uint8x16x2_t {
-  uint8x16_t val[2];
-} uint8x16x2_t;
-
-typedef struct uint16x4x2_t {
-  uint16x4_t val[2];
-} uint16x4x2_t;
-
-typedef struct uint16x8x2_t {
-  uint16x8_t val[2];
-} uint16x8x2_t;
-
-typedef struct uint32x2x2_t {
-  uint32x2_t val[2];
-} uint32x2x2_t;
-
-typedef struct uint32x4x2_t {
-  uint32x4_t val[2];
-} uint32x4x2_t;
-
-typedef struct uint64x1x2_t {
-  uint64x1_t val[2];
-} uint64x1x2_t;
-
-typedef struct uint64x2x2_t {
-  uint64x2_t val[2];
-} uint64x2x2_t;
-
-typedef struct float16x4x2_t {
-  float16x4_t val[2];
-} float16x4x2_t;
-
-typedef struct float16x8x2_t {
-  float16x8_t val[2];
-} float16x8x2_t;
-
-typedef struct float32x2x2_t {
-  float32x2_t val[2];
-} float32x2x2_t;
-
-typedef struct float32x4x2_t {
-  float32x4_t val[2];
-} float32x4x2_t;
-
-#ifdef __aarch64__
-typedef struct float64x1x2_t {
-  float64x1_t val[2];
-} float64x1x2_t;
-
-typedef struct float64x2x2_t {
-  float64x2_t val[2];
-} float64x2x2_t;
-
-#endif
-typedef struct poly8x8x2_t {
-  poly8x8_t val[2];
-} poly8x8x2_t;
-
-typedef struct poly8x16x2_t {
-  poly8x16_t val[2];
-} poly8x16x2_t;
-
-typedef struct poly16x4x2_t {
-  poly16x4_t val[2];
-} poly16x4x2_t;
-
-typedef struct poly16x8x2_t {
-  poly16x8_t val[2];
-} poly16x8x2_t;
-
-#ifdef __aarch64__
-typedef struct poly64x1x2_t {
-  poly64x1_t val[2];
-} poly64x1x2_t;
-
-typedef struct poly64x2x2_t {
-  poly64x2_t val[2];
-} poly64x2x2_t;
-
-#endif
-typedef struct int8x8x3_t {
-  int8x8_t val[3];
-} int8x8x3_t;
-
-typedef struct int8x16x3_t {
-  int8x16_t val[3];
-} int8x16x3_t;
-
-typedef struct int16x4x3_t {
-  int16x4_t val[3];
-} int16x4x3_t;
-
-typedef struct int16x8x3_t {
-  int16x8_t val[3];
-} int16x8x3_t;
-
-typedef struct int32x2x3_t {
-  int32x2_t val[3];
-} int32x2x3_t;
-
-typedef struct int32x4x3_t {
-  int32x4_t val[3];
-} int32x4x3_t;
-
-typedef struct int64x1x3_t {
-  int64x1_t val[3];
-} int64x1x3_t;
-
-typedef struct int64x2x3_t {
-  int64x2_t val[3];
-} int64x2x3_t;
-
-typedef struct uint8x8x3_t {
-  uint8x8_t val[3];
-} uint8x8x3_t;
-
-typedef struct uint8x16x3_t {
-  uint8x16_t val[3];
-} uint8x16x3_t;
-
-typedef struct uint16x4x3_t {
-  uint16x4_t val[3];
-} uint16x4x3_t;
-
-typedef struct uint16x8x3_t {
-  uint16x8_t val[3];
-} uint16x8x3_t;
-
-typedef struct uint32x2x3_t {
-  uint32x2_t val[3];
-} uint32x2x3_t;
-
-typedef struct uint32x4x3_t {
-  uint32x4_t val[3];
-} uint32x4x3_t;
-
-typedef struct uint64x1x3_t {
-  uint64x1_t val[3];
-} uint64x1x3_t;
-
-typedef struct uint64x2x3_t {
-  uint64x2_t val[3];
-} uint64x2x3_t;
-
-typedef struct float16x4x3_t {
-  float16x4_t val[3];
-} float16x4x3_t;
-
-typedef struct float16x8x3_t {
-  float16x8_t val[3];
-} float16x8x3_t;
-
-typedef struct float32x2x3_t {
-  float32x2_t val[3];
-} float32x2x3_t;
-
-typedef struct float32x4x3_t {
-  float32x4_t val[3];
-} float32x4x3_t;
-
-#ifdef __aarch64__
-typedef struct float64x1x3_t {
-  float64x1_t val[3];
-} float64x1x3_t;
-
-typedef struct float64x2x3_t {
-  float64x2_t val[3];
-} float64x2x3_t;
-
-#endif
-typedef struct poly8x8x3_t {
-  poly8x8_t val[3];
-} poly8x8x3_t;
-
-typedef struct poly8x16x3_t {
-  poly8x16_t val[3];
-} poly8x16x3_t;
-
-typedef struct poly16x4x3_t {
-  poly16x4_t val[3];
-} poly16x4x3_t;
-
-typedef struct poly16x8x3_t {
-  poly16x8_t val[3];
-} poly16x8x3_t;
-
-#ifdef __aarch64__
-typedef struct poly64x1x3_t {
-  poly64x1_t val[3];
-} poly64x1x3_t;
-
-typedef struct poly64x2x3_t {
-  poly64x2_t val[3];
-} poly64x2x3_t;
-
-#endif
-typedef struct int8x8x4_t {
-  int8x8_t val[4];
-} int8x8x4_t;
-
-typedef struct int8x16x4_t {
-  int8x16_t val[4];
-} int8x16x4_t;
-
-typedef struct int16x4x4_t {
-  int16x4_t val[4];
-} int16x4x4_t;
-
-typedef struct int16x8x4_t {
-  int16x8_t val[4];
-} int16x8x4_t;
-
-typedef struct int32x2x4_t {
-  int32x2_t val[4];
-} int32x2x4_t;
-
-typedef struct int32x4x4_t {
-  int32x4_t val[4];
-} int32x4x4_t;
-
-typedef struct int64x1x4_t {
-  int64x1_t val[4];
-} int64x1x4_t;
-
-typedef struct int64x2x4_t {
-  int64x2_t val[4];
-} int64x2x4_t;
-
-typedef struct uint8x8x4_t {
-  uint8x8_t val[4];
-} uint8x8x4_t;
-
-typedef struct uint8x16x4_t {
-  uint8x16_t val[4];
-} uint8x16x4_t;
-
-typedef struct uint16x4x4_t {
-  uint16x4_t val[4];
-} uint16x4x4_t;
-
-typedef struct uint16x8x4_t {
-  uint16x8_t val[4];
-} uint16x8x4_t;
-
-typedef struct uint32x2x4_t {
-  uint32x2_t val[4];
-} uint32x2x4_t;
-
-typedef struct uint32x4x4_t {
-  uint32x4_t val[4];
-} uint32x4x4_t;
-
-typedef struct uint64x1x4_t {
-  uint64x1_t val[4];
-} uint64x1x4_t;
-
-typedef struct uint64x2x4_t {
-  uint64x2_t val[4];
-} uint64x2x4_t;
-
-typedef struct float16x4x4_t {
-  float16x4_t val[4];
-} float16x4x4_t;
-
-typedef struct float16x8x4_t {
-  float16x8_t val[4];
-} float16x8x4_t;
-
-typedef struct float32x2x4_t {
-  float32x2_t val[4];
-} float32x2x4_t;
-
-typedef struct float32x4x4_t {
-  float32x4_t val[4];
-} float32x4x4_t;
-
-#ifdef __aarch64__
-typedef struct float64x1x4_t {
-  float64x1_t val[4];
-} float64x1x4_t;
-
-typedef struct float64x2x4_t {
-  float64x2_t val[4];
-} float64x2x4_t;
-
-#endif
-typedef struct poly8x8x4_t {
-  poly8x8_t val[4];
-} poly8x8x4_t;
-
-typedef struct poly8x16x4_t {
-  poly8x16_t val[4];
-} poly8x16x4_t;
-
-typedef struct poly16x4x4_t {
-  poly16x4_t val[4];
-} poly16x4x4_t;
-
-typedef struct poly16x8x4_t {
-  poly16x8_t val[4];
-} poly16x8x4_t;
-
-#ifdef __aarch64__
-typedef struct poly64x1x4_t {
-  poly64x1_t val[4];
-} poly64x1x4_t;
-
-typedef struct poly64x2x4_t {
-  poly64x2_t val[4];
-} poly64x2x4_t;
-
-#endif
-
-#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__))
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x16_t __noswap_vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x16_t __noswap_vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vabd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vabd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vabd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vabsq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vabsq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vabsq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vabsq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabsq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vabsq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabsq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vabsq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vabs_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vabs_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vabs_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vabs_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vabs_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vabs_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vabs_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vabs_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 5);
-  return __ret;
-}
-#else
-__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 37);
-  return __ret;
-}
-#else
-__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 37);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vclsq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vclsq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vclsq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vclsq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vclsq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vclsq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vcls_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vcls_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcls_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcls_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcls_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcls_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vclzq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vclzq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vclzq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vclzq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vclzq_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vclzq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vclzq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vclzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vclzq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vclzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vclzq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vclzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vclz_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vclz_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclz_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclz_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclz_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclz_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vclz_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vclz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vclz_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vclz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vclz_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vclz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vcnt_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vcnt_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vcntq_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vcntq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcntq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcntq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vcntq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vcntq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcnt_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcnt_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vcnt_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vcnt_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x16_t __noswap_vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x16_t __noswap_vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  return __ret;
-}
-#else
-__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#define vcreate_p8(__p0) __extension__ ({ \
-  poly8x8_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (poly8x8_t)(__promote); \
-  __ret; \
-})
-#define vcreate_p16(__p0) __extension__ ({ \
-  poly16x4_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (poly16x4_t)(__promote); \
-  __ret; \
-})
-#define vcreate_u8(__p0) __extension__ ({ \
-  uint8x8_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (uint8x8_t)(__promote); \
-  __ret; \
-})
-#define vcreate_u32(__p0) __extension__ ({ \
-  uint32x2_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (uint32x2_t)(__promote); \
-  __ret; \
-})
-#define vcreate_u64(__p0) __extension__ ({ \
-  uint64x1_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (uint64x1_t)(__promote); \
-  __ret; \
-})
-#define vcreate_u16(__p0) __extension__ ({ \
-  uint16x4_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (uint16x4_t)(__promote); \
-  __ret; \
-})
-#define vcreate_s8(__p0) __extension__ ({ \
-  int8x8_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (int8x8_t)(__promote); \
-  __ret; \
-})
-#define vcreate_f32(__p0) __extension__ ({ \
-  float32x2_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (float32x2_t)(__promote); \
-  __ret; \
-})
-#define vcreate_f16(__p0) __extension__ ({ \
-  float16x4_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (float16x4_t)(__promote); \
-  __ret; \
-})
-#define vcreate_s32(__p0) __extension__ ({ \
-  int32x2_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (int32x2_t)(__promote); \
-  __ret; \
-})
-#define vcreate_s64(__p0) __extension__ ({ \
-  int64x1_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (int64x1_t)(__promote); \
-  __ret; \
-})
-#define vcreate_s16(__p0) __extension__ ({ \
-  int16x4_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (int16x4_t)(__promote); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x16_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vdup_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vdup_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vdup_n_p8(poly8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly8x8_t vdup_n_p8(poly8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vdup_n_p16(poly16_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly16x4_t vdup_n_p16(poly16_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vdupq_n_p8(poly8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly8x16_t vdupq_n_p8(poly8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vdupq_n_p16(poly16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly16x8_t vdupq_n_p16(poly16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vdupq_n_u8(uint8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint8x16_t vdupq_n_u8(uint8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vdupq_n_u32(uint32_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint32x4_t vdupq_n_u32(uint32_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vdupq_n_u64(uint64_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai uint64x2_t vdupq_n_u64(uint64_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vdupq_n_u16(uint16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint16x8_t vdupq_n_u16(uint16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vdupq_n_s8(int8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int8x16_t vdupq_n_s8(int8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vdupq_n_f32(float32_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai float32x4_t vdupq_n_f32(float32_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vdupq_n_s32(int32_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int32x4_t vdupq_n_s32(int32_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vdupq_n_s64(int64_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai int64x2_t vdupq_n_s64(int64_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vdupq_n_s16(int16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int16x8_t vdupq_n_s16(int16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vdup_n_u8(uint8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint8x8_t vdup_n_u8(uint8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vdup_n_u32(uint32_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai uint32x2_t vdup_n_u32(uint32_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vdup_n_u64(uint64_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vdup_n_u16(uint16_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint16x4_t vdup_n_u16(uint16_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vdup_n_s8(int8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int8x8_t vdup_n_s8(int8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vdup_n_f32(float32_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai float32x2_t vdup_n_f32(float32_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vdup_n_s32(int32_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai int32x2_t vdup_n_s32(int32_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vdup_n_s64(int64_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vdup_n_s16(int16_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int16x4_t vdup_n_s16(int16_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
-  __ret; \
-})
-#else
-#define vext_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
-  __ret; \
-})
-#else
-#define vext_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
-  __ret; \
-})
-#else
-#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
-  __ret; \
-})
-#else
-#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 41); \
-  __ret; \
-})
-#else
-#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vext_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vext_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vext_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vext_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vext_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vext_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 9); \
-  __ret; \
-})
-#else
-#define vext_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vext_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vext_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vext_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vext_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vget_high_p8(poly8x16_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai poly8x8_t vget_high_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai poly8x8_t __noswap_vget_high_p8(poly8x16_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vget_high_p16(poly16x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai poly16x4_t vget_high_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vget_high_u8(uint8x16_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai uint8x8_t vget_high_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vget_high_u8(uint8x16_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vget_high_u32(uint32x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#else
-__ai uint32x2_t vget_high_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vget_high_u32(uint32x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x1_t vget_high_u64(uint64x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#else
-__ai uint64x1_t vget_high_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vget_high_u16(uint16x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai uint16x4_t vget_high_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vget_high_u16(uint16x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vget_high_s8(int8x16_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai int8x8_t vget_high_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vget_high_s8(int8x16_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vget_high_f32(float32x4_t __p0) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#else
-__ai float32x2_t vget_high_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vget_high_f32(float32x4_t __p0) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vget_high_f16(float16x8_t __p0) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vget_high_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vget_high_f16(float16x8_t __p0) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vget_high_s32(int32x4_t __p0) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#else
-__ai int32x2_t vget_high_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vget_high_s32(int32x4_t __p0) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x1_t vget_high_s64(int64x2_t __p0) {
-  int64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#else
-__ai int64x1_t vget_high_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vget_high_s16(int16x8_t __p0) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai int16x4_t vget_high_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vget_high_s16(int16x8_t __p0) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vget_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vget_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vget_low_p8(poly8x16_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai poly8x8_t vget_low_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vget_low_p16(poly16x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai poly16x4_t vget_low_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vget_low_u8(uint8x16_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai uint8x8_t vget_low_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vget_low_u32(uint32x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vget_low_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x1_t vget_low_u64(uint64x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0);
-  return __ret;
-}
-#else
-__ai uint64x1_t vget_low_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vget_low_u16(uint16x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai uint16x4_t vget_low_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vget_low_s8(int8x16_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai int8x8_t vget_low_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vget_low_f32(float32x4_t __p0) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
-  return __ret;
-}
-#else
-__ai float32x2_t vget_low_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vget_low_f16(float16x8_t __p0) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai float16x4_t vget_low_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vget_low_s32(int32x4_t __p0) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
-  return __ret;
-}
-#else
-__ai int32x2_t vget_low_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x1_t vget_low_s64(int64x2_t __p0) {
-  int64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0);
-  return __ret;
-}
-#else
-__ai int64x1_t vget_low_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vget_low_s16(int16x8_t __p0) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai int16x4_t vget_low_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p8(__p0) __extension__ ({ \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \
-  __ret; \
-})
-#else
-#define vld1_p8(__p0) __extension__ ({ \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p16(__p0) __extension__ ({ \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \
-  __ret; \
-})
-#else
-#define vld1_p16(__p0) __extension__ ({ \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p8(__p0) __extension__ ({ \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \
-  __ret; \
-})
-#else
-#define vld1q_p8(__p0) __extension__ ({ \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p16(__p0) __extension__ ({ \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \
-  __ret; \
-})
-#else
-#define vld1q_p16(__p0) __extension__ ({ \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u8(__p0) __extension__ ({ \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \
-  __ret; \
-})
-#else
-#define vld1q_u8(__p0) __extension__ ({ \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u32(__p0) __extension__ ({ \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \
-  __ret; \
-})
-#else
-#define vld1q_u32(__p0) __extension__ ({ \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u64(__p0) __extension__ ({ \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \
-  __ret; \
-})
-#else
-#define vld1q_u64(__p0) __extension__ ({ \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u16(__p0) __extension__ ({ \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \
-  __ret; \
-})
-#else
-#define vld1q_u16(__p0) __extension__ ({ \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s8(__p0) __extension__ ({ \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \
-  __ret; \
-})
-#else
-#define vld1q_s8(__p0) __extension__ ({ \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f32(__p0) __extension__ ({ \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \
-  __ret; \
-})
-#else
-#define vld1q_f32(__p0) __extension__ ({ \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s32(__p0) __extension__ ({ \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \
-  __ret; \
-})
-#else
-#define vld1q_s32(__p0) __extension__ ({ \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s64(__p0) __extension__ ({ \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \
-  __ret; \
-})
-#else
-#define vld1q_s64(__p0) __extension__ ({ \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s16(__p0) __extension__ ({ \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \
-  __ret; \
-})
-#else
-#define vld1q_s16(__p0) __extension__ ({ \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u8(__p0) __extension__ ({ \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \
-  __ret; \
-})
-#else
-#define vld1_u8(__p0) __extension__ ({ \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u32(__p0) __extension__ ({ \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \
-  __ret; \
-})
-#else
-#define vld1_u32(__p0) __extension__ ({ \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_u64(__p0) __extension__ ({ \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u16(__p0) __extension__ ({ \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \
-  __ret; \
-})
-#else
-#define vld1_u16(__p0) __extension__ ({ \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s8(__p0) __extension__ ({ \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \
-  __ret; \
-})
-#else
-#define vld1_s8(__p0) __extension__ ({ \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f32(__p0) __extension__ ({ \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \
-  __ret; \
-})
-#else
-#define vld1_f32(__p0) __extension__ ({ \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s32(__p0) __extension__ ({ \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \
-  __ret; \
-})
-#else
-#define vld1_s32(__p0) __extension__ ({ \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_s64(__p0) __extension__ ({ \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s16(__p0) __extension__ ({ \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \
-  __ret; \
-})
-#else
-#define vld1_s16(__p0) __extension__ ({ \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_p8(__p0) __extension__ ({ \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \
-  __ret; \
-})
-#else
-#define vld1_dup_p8(__p0) __extension__ ({ \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_p16(__p0) __extension__ ({ \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \
-  __ret; \
-})
-#else
-#define vld1_dup_p16(__p0) __extension__ ({ \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_p8(__p0) __extension__ ({ \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \
-  __ret; \
-})
-#else
-#define vld1q_dup_p8(__p0) __extension__ ({ \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_p16(__p0) __extension__ ({ \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \
-  __ret; \
-})
-#else
-#define vld1q_dup_p16(__p0) __extension__ ({ \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_u8(__p0) __extension__ ({ \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \
-  __ret; \
-})
-#else
-#define vld1q_dup_u8(__p0) __extension__ ({ \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_u32(__p0) __extension__ ({ \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \
-  __ret; \
-})
-#else
-#define vld1q_dup_u32(__p0) __extension__ ({ \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_u64(__p0) __extension__ ({ \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \
-  __ret; \
-})
-#else
-#define vld1q_dup_u64(__p0) __extension__ ({ \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_u16(__p0) __extension__ ({ \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \
-  __ret; \
-})
-#else
-#define vld1q_dup_u16(__p0) __extension__ ({ \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_s8(__p0) __extension__ ({ \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \
-  __ret; \
-})
-#else
-#define vld1q_dup_s8(__p0) __extension__ ({ \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_f32(__p0) __extension__ ({ \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \
-  __ret; \
-})
-#else
-#define vld1q_dup_f32(__p0) __extension__ ({ \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_s32(__p0) __extension__ ({ \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \
-  __ret; \
-})
-#else
-#define vld1q_dup_s32(__p0) __extension__ ({ \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_s64(__p0) __extension__ ({ \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \
-  __ret; \
-})
-#else
-#define vld1q_dup_s64(__p0) __extension__ ({ \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_s16(__p0) __extension__ ({ \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \
-  __ret; \
-})
-#else
-#define vld1q_dup_s16(__p0) __extension__ ({ \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_u8(__p0) __extension__ ({ \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \
-  __ret; \
-})
-#else
-#define vld1_dup_u8(__p0) __extension__ ({ \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_u32(__p0) __extension__ ({ \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \
-  __ret; \
-})
-#else
-#define vld1_dup_u32(__p0) __extension__ ({ \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_dup_u64(__p0) __extension__ ({ \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_u16(__p0) __extension__ ({ \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \
-  __ret; \
-})
-#else
-#define vld1_dup_u16(__p0) __extension__ ({ \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_s8(__p0) __extension__ ({ \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \
-  __ret; \
-})
-#else
-#define vld1_dup_s8(__p0) __extension__ ({ \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_f32(__p0) __extension__ ({ \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \
-  __ret; \
-})
-#else
-#define vld1_dup_f32(__p0) __extension__ ({ \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_s32(__p0) __extension__ ({ \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \
-  __ret; \
-})
-#else
-#define vld1_dup_s32(__p0) __extension__ ({ \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_dup_s64(__p0) __extension__ ({ \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_s16(__p0) __extension__ ({ \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \
-  __ret; \
-})
-#else
-#define vld1_dup_s16(__p0) __extension__ ({ \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \
-  __ret; \
-})
-#else
-#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \
-  __ret; \
-})
-#else
-#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \
-  __ret; \
-})
-#else
-#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \
-  __ret; \
-})
-#else
-#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \
-  __ret; \
-})
-#else
-#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \
-  __ret; \
-})
-#else
-#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p8_x2(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld1_p8_x2(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p16_x2(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld1_p16_x2(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p8_x2(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld1q_p8_x2(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p16_x2(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld1q_p16_x2(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u8_x2(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld1q_u8_x2(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u32_x2(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld1q_u32_x2(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u64_x2(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld1q_u64_x2(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u16_x2(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld1q_u16_x2(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s8_x2(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld1q_s8_x2(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f32_x2(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld1q_f32_x2(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s32_x2(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld1q_s32_x2(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s64_x2(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld1q_s64_x2(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s16_x2(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld1q_s16_x2(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u8_x2(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld1_u8_x2(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u32_x2(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld1_u32_x2(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_u64_x2(__p0) __extension__ ({ \
-  uint64x1x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u16_x2(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld1_u16_x2(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s8_x2(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld1_s8_x2(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f32_x2(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld1_f32_x2(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s32_x2(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld1_s32_x2(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_s64_x2(__p0) __extension__ ({ \
-  int64x1x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s16_x2(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld1_s16_x2(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p8_x3(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld1_p8_x3(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p16_x3(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld1_p16_x3(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p8_x3(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld1q_p8_x3(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p16_x3(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld1q_p16_x3(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u8_x3(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld1q_u8_x3(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u32_x3(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld1q_u32_x3(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u64_x3(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld1q_u64_x3(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u16_x3(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld1q_u16_x3(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s8_x3(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld1q_s8_x3(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f32_x3(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld1q_f32_x3(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s32_x3(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld1q_s32_x3(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s64_x3(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld1q_s64_x3(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s16_x3(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld1q_s16_x3(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u8_x3(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld1_u8_x3(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u32_x3(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld1_u32_x3(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_u64_x3(__p0) __extension__ ({ \
-  uint64x1x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u16_x3(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld1_u16_x3(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s8_x3(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld1_s8_x3(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f32_x3(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld1_f32_x3(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s32_x3(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld1_s32_x3(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_s64_x3(__p0) __extension__ ({ \
-  int64x1x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s16_x3(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld1_s16_x3(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p8_x4(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld1_p8_x4(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p16_x4(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld1_p16_x4(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p8_x4(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld1q_p8_x4(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p16_x4(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld1q_p16_x4(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u8_x4(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld1q_u8_x4(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u32_x4(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld1q_u32_x4(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u64_x4(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld1q_u64_x4(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u16_x4(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld1q_u16_x4(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s8_x4(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld1q_s8_x4(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f32_x4(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld1q_f32_x4(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s32_x4(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld1q_s32_x4(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s64_x4(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld1q_s64_x4(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s16_x4(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld1q_s16_x4(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u8_x4(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld1_u8_x4(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u32_x4(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld1_u32_x4(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_u64_x4(__p0) __extension__ ({ \
-  uint64x1x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u16_x4(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld1_u16_x4(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s8_x4(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld1_s8_x4(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f32_x4(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld1_f32_x4(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s32_x4(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld1_s32_x4(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_s64_x4(__p0) __extension__ ({ \
-  int64x1x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s16_x4(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld1_s16_x4(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_p8(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld2_p8(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_p16(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld2_p16(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_p8(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld2q_p8(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_p16(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld2q_p16(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_u8(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld2q_u8(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_u32(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld2q_u32(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_u16(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld2q_u16(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_s8(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld2q_s8(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_f32(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld2q_f32(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_s32(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld2q_s32(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_s16(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld2q_s16(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_u8(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld2_u8(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_u32(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld2_u32(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_u64(__p0) __extension__ ({ \
-  uint64x1x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2_u16(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld2_u16(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_s8(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld2_s8(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_f32(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld2_f32(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_s32(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld2_s32(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_s64(__p0) __extension__ ({ \
-  int64x1x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2_s16(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld2_s16(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_p8(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld2_dup_p8(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_p16(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld2_dup_p16(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld2q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld2q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld2q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld2q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld2q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld2q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_s8(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld2q_dup_s8(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_f32(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld2q_dup_f32(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_s32(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld2q_dup_s32(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_s64(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld2q_dup_s64(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_s16(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld2q_dup_s16(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_u8(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld2_dup_u8(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_u32(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld2_dup_u32(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_dup_u64(__p0) __extension__ ({ \
-  uint64x1x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_u16(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld2_dup_u16(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_s8(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld2_dup_s8(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_f32(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld2_dup_f32(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_s32(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld2_dup_s32(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_dup_s64(__p0) __extension__ ({ \
-  int64x1x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_s16(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld2_dup_s16(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \
-  __ret; \
-})
-#else
-#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  poly8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \
-  __ret; \
-})
-#else
-#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  poly16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \
-  __ret; \
-})
-#else
-#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  poly16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \
-  __ret; \
-})
-#else
-#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  uint32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \
-  __ret; \
-})
-#else
-#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  uint16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \
-  __ret; \
-})
-#else
-#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  float32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \
-  __ret; \
-})
-#else
-#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  int32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \
-  __ret; \
-})
-#else
-#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  int16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \
-  __ret; \
-})
-#else
-#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  uint8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \
-  __ret; \
-})
-#else
-#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  uint32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \
-  __ret; \
-})
-#else
-#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  uint16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \
-  __ret; \
-})
-#else
-#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  int8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \
-  __ret; \
-})
-#else
-#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  float32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \
-  __ret; \
-})
-#else
-#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  int32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \
-  __ret; \
-})
-#else
-#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  int16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_p8(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld3_p8(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_p16(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld3_p16(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_p8(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld3q_p8(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_p16(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld3q_p16(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_u8(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld3q_u8(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_u32(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld3q_u32(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_u16(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld3q_u16(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_s8(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld3q_s8(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_f32(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld3q_f32(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_s32(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld3q_s32(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_s16(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld3q_s16(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_u8(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld3_u8(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_u32(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld3_u32(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_u64(__p0) __extension__ ({ \
-  uint64x1x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3_u16(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld3_u16(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_s8(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld3_s8(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_f32(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld3_f32(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_s32(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld3_s32(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_s64(__p0) __extension__ ({ \
-  int64x1x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3_s16(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld3_s16(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_p8(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld3_dup_p8(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_p16(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld3_dup_p16(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld3q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld3q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld3q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld3q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld3q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld3q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_s8(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld3q_dup_s8(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_f32(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld3q_dup_f32(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_s32(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld3q_dup_s32(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_s64(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld3q_dup_s64(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_s16(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld3q_dup_s16(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_u8(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld3_dup_u8(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_u32(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld3_dup_u32(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_dup_u64(__p0) __extension__ ({ \
-  uint64x1x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_u16(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld3_dup_u16(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_s8(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld3_dup_s8(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_f32(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld3_dup_f32(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_s32(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld3_dup_s32(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_dup_s64(__p0) __extension__ ({ \
-  int64x1x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_s16(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld3_dup_s16(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \
-  __ret; \
-})
-#else
-#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  poly8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \
-  __ret; \
-})
-#else
-#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  poly16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \
-  __ret; \
-})
-#else
-#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  poly16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \
-  __ret; \
-})
-#else
-#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  uint32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \
-  __ret; \
-})
-#else
-#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  uint16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \
-  __ret; \
-})
-#else
-#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  float32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \
-  __ret; \
-})
-#else
-#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  int32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \
-  __ret; \
-})
-#else
-#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  int16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \
-  __ret; \
-})
-#else
-#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  uint8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \
-  __ret; \
-})
-#else
-#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  uint32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \
-  __ret; \
-})
-#else
-#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  uint16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \
-  __ret; \
-})
-#else
-#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  int8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \
-  __ret; \
-})
-#else
-#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  float32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \
-  __ret; \
-})
-#else
-#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  int32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \
-  __ret; \
-})
-#else
-#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  int16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_p8(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld4_p8(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_p16(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld4_p16(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_p8(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld4q_p8(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_p16(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld4q_p16(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_u8(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld4q_u8(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_u32(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld4q_u32(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_u16(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld4q_u16(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_s8(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld4q_s8(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_f32(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld4q_f32(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_s32(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld4q_s32(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_s16(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld4q_s16(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_u8(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld4_u8(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_u32(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld4_u32(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_u64(__p0) __extension__ ({ \
-  uint64x1x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4_u16(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld4_u16(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_s8(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld4_s8(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_f32(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld4_f32(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_s32(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld4_s32(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_s64(__p0) __extension__ ({ \
-  int64x1x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4_s16(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld4_s16(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_p8(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld4_dup_p8(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_p16(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld4_dup_p16(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld4q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld4q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld4q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld4q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld4q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld4q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_s8(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld4q_dup_s8(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_f32(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld4q_dup_f32(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_s32(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld4q_dup_s32(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_s64(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld4q_dup_s64(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_s16(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld4q_dup_s16(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_u8(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld4_dup_u8(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_u32(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld4_dup_u32(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_dup_u64(__p0) __extension__ ({ \
-  uint64x1x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_u16(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld4_dup_u16(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_s8(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld4_dup_s8(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_f32(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld4_dup_f32(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_s32(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld4_dup_s32(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_dup_s64(__p0) __extension__ ({ \
-  int64x1x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_s16(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld4_dup_s16(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \
-  __ret; \
-})
-#else
-#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  poly8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \
-  __ret; \
-})
-#else
-#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  poly16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \
-  __ret; \
-})
-#else
-#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  poly16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \
-  __ret; \
-})
-#else
-#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  uint32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \
-  __ret; \
-})
-#else
-#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  uint16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \
-  __ret; \
-})
-#else
-#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  float32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \
-  __ret; \
-})
-#else
-#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  int32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \
-  __ret; \
-})
-#else
-#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  int16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \
-  __ret; \
-})
-#else
-#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  uint8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \
-  __ret; \
-})
-#else
-#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  uint32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \
-  __ret; \
-})
-#else
-#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  uint16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \
-  __ret; \
-})
-#else
-#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  int8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \
-  __ret; \
-})
-#else
-#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  float32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \
-  __ret; \
-})
-#else
-#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  int32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \
-  __ret; \
-})
-#else
-#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  int16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlaq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x8_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlaq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlaq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlaq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x2_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmla_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmla_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmla_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmla_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmla_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __ret;
-  __ret = __p0 + __p1 * (float32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 + __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + __p1 * (int32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint32x2_t __ret;
-  __ret = __p0 + __p1 * (uint32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 + __rev1 * (uint32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint16x4_t __ret;
-  __ret = __p0 + __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 + __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __ret;
-  __ret = __p0 + __p1 * (float32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 + __rev1 * (float32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int32x2_t __ret;
-  __ret = __p0 + __p1 * (int32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 + __rev1 * (int32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int16x4_t __ret;
-  __ret = __p0 + __p1 * (int16x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 + __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlsq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x8_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlsq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlsq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlsq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlsq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x2_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmls_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmls_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmls_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmls_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 - __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __ret;
-  __ret = __p0 - __p1 * (float32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 - __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - __p1 * (int32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 - __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint32x2_t __ret;
-  __ret = __p0 - __p1 * (uint32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 - __rev1 * (uint32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint16x4_t __ret;
-  __ret = __p0 - __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 - __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __ret;
-  __ret = __p0 - __p1 * (float32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 - __rev1 * (float32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int32x2_t __ret;
-  __ret = __p0 - __p1 * (int32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 - __rev1 * (int32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int16x4_t __ret;
-  __ret = __p0 - __p1 * (int16x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 - __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vmov_n_p8(poly8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly8x8_t vmov_n_p8(poly8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vmov_n_p16(poly16_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly16x4_t vmov_n_p16(poly16_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vmovq_n_p8(poly8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly8x16_t vmovq_n_p8(poly8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vmovq_n_p16(poly16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly16x8_t vmovq_n_p16(poly16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmovq_n_u8(uint8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint8x16_t vmovq_n_u8(uint8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovq_n_u32(uint32_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint32x4_t vmovq_n_u32(uint32_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmovq_n_u64(uint64_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai uint64x2_t vmovq_n_u64(uint64_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovq_n_u16(uint16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint16x8_t vmovq_n_u16(uint16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmovq_n_s8(int8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int8x16_t vmovq_n_s8(int8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmovq_n_f32(float32_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai float32x4_t vmovq_n_f32(float32_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovq_n_s32(int32_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int32x4_t vmovq_n_s32(int32_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmovq_n_s64(int64_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai int64x2_t vmovq_n_s64(int64_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovq_n_s16(int16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int16x8_t vmovq_n_s16(int16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmov_n_u8(uint8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint8x8_t vmov_n_u8(uint8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmov_n_u32(uint32_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai uint32x2_t vmov_n_u32(uint32_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vmov_n_u64(uint64_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmov_n_u16(uint16_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint16x4_t vmov_n_u16(uint16_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmov_n_s8(int8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int8x8_t vmov_n_s8(int8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmov_n_f32(float32_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai float32x2_t vmov_n_f32(float32_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmov_n_s32(int32_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai int32x2_t vmov_n_s32(int32_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vmov_n_s64(int64_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmov_n_s16(int16_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int16x4_t vmov_n_s16(int16_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovl_u8(uint8x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vmovl_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vmovl_u8(uint8x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmovl_u32(uint32x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmovl_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmovl_u32(uint32x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovl_u16(uint16x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmovl_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmovl_u16(uint16x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovl_s8(int8x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vmovl_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vmovl_s8(int8x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmovl_s32(int32x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vmovl_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmovl_s32(int32x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovl_s16(int16x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vmovl_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmovl_s16(int16x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmovn_u32(uint32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vmovn_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vmovn_u32(uint32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmovn_u64(uint64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vmovn_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vmovn_u64(uint64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmovn_u16(uint16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vmovn_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vmovn_u16(uint16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmovn_s32(int32x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vmovn_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vmovn_s32(int32x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmovn_s64(int64x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vmovn_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vmovn_s64(int64x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmovn_s16(int16x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vmovn_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vmovn_s16(int16x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmul_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmul_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmul_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmul_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmul_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 * (float32x4_t) {__p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 * (float32x4_t) {__p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 * (int32x4_t) {__p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 * (int32x4_t) {__p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 * (uint32x2_t) {__p1, __p1};
-  return __ret;
-}
-#else
-__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 * (uint32x2_t) {__p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 * (float32x2_t) {__p1, __p1};
-  return __ret;
-}
-#else
-__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 * (float32x2_t) {__p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 * (int32x2_t) {__p1, __p1};
-  return __ret;
-}
-#else
-__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 * (int32x2_t) {__p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 * (int16x4_t) {__p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 * (int16x4_t) {__p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37);
-  return __ret;
-}
-#else
-__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 37);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai poly16x8_t __noswap_vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vmull_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = vmull_u32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __noswap_vmull_u32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = vmull_u16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __noswap_vmull_u16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmull_u32(__p0, (uint32x2_t) {__p1, __p1});
-  return __ret;
-}
-#else
-__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmull_u32(__rev0, (uint32x2_t) {__p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint64x2_t __ret;
-  __ret = __noswap_vmull_u32(__p0, (uint32x2_t) {__p1, __p1});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmull_u16(__rev0, (uint16x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint32x4_t __ret;
-  __ret = __noswap_vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = vmull_s32(__p0, (int32x2_t) {__p1, __p1});
-  return __ret;
-}
-#else
-__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmull_s32(__rev0, (int32x2_t) {__p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = __noswap_vmull_s32(__p0, (int32x2_t) {__p1, __p1});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = __noswap_vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vmvn_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai poly8x8_t vmvn_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmvnq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int8x16_t vmvnq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmvnq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int32x4_t vmvnq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmvnq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int16x8_t vmvnq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmvn_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint8x8_t vmvn_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmvn_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint32x2_t vmvn_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmvn_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint16x4_t vmvn_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmvn_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int8x8_t vmvn_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmvn_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int32x2_t vmvn_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmvn_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int16x4_t vmvn_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vnegq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int8x16_t vnegq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vnegq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float32x4_t vnegq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vnegq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int32x4_t vnegq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vnegq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int16x8_t vnegq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vneg_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int8x8_t vneg_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vneg_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float32x2_t vneg_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vneg_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int32x2_t vneg_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vneg_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int16x4_t vneg_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#else
-__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 19);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#else
-__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpaddlq_s8(int8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpaddlq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vpaddlq_s32(int32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vpaddlq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpaddlq_s16(int16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpaddlq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#else
-__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 19);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vpaddl_s8(int8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vpaddl_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x1_t vpaddl_s32(int32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#else
-__ai int64x1_t vpaddl_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vpaddl_s16(int16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vpaddl_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqabsq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqabsq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqabsq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqabsq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqabsq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqabsq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqabs_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqabs_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqabs_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqabs_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqabs_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqabs_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlal_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlal_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlal_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlal_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlal_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlal_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlsl_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlsl_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __ret;
-  __ret = vqdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __ret;
-  __ret = vqdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vqdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __ret;
-  __ret = vqdmulh_s32(__p0, (int32x2_t) {__p1, __p1});
-  return __ret;
-}
-#else
-__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = __noswap_vqdmulh_s32(__rev0, (int32x2_t) {__p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __ret;
-  __ret = vqdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __noswap_vqdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vqdmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqdmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = vqdmull_s32(__p0, (int32x2_t) {__p1, __p1});
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmull_s32(__rev0, (int32x2_t) {__p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = __noswap_vqdmull_s32(__p0, (int32x2_t) {__p1, __p1});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = __noswap_vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vqmovn_u32(uint32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vqmovn_u64(uint64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vqmovn_u16(uint16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqmovn_s32(int32x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqmovn_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vqmovn_s32(int32x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqmovn_s64(int64x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqmovn_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vqmovn_s64(int64x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqmovn_s16(int16x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqmovn_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vqmovn_s16(int16x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqmovun_s32(int32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqmovun_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vqmovun_s32(int32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqmovun_s64(int64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqmovun_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vqmovun_s64(int64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqmovun_s16(int16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqmovun_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vqmovun_s16(int16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqnegq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqnegq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqnegq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqnegq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqnegq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqnegq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqneg_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqneg_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqneg_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqneg_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqneg_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqneg_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __ret;
-  __ret = vqrdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqrdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __ret;
-  __ret = vqrdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vqrdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __ret;
-  __ret = vqrdmulh_s32(__p0, (int32x2_t) {__p1, __p1});
-  return __ret;
-}
-#else
-__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = __noswap_vqrdmulh_s32(__rev0, (int32x2_t) {__p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __ret;
-  __ret = vqrdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __noswap_vqrdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqshl_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqshl_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vqshl_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqshl_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vqshl_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vqshl_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vqshl_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vqshl_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vqshlu_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrecpeq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrecpeq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrecpe_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrecpe_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vrev16_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai poly8x8_t vrev16_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  return __ret;
-}
-#else
-__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrev16q_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  return __ret;
-}
-#else
-__ai int8x16_t vrev16q_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrev16_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrev16_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrev16_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai int8x8_t vrev16_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vrev32_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vrev32_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vrev32_p16(poly16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai poly16x4_t vrev32_p16(poly16x4_t __p0) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  return __ret;
-}
-#else
-__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrev32q_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  return __ret;
-}
-#else
-__ai int8x16_t vrev32q_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vrev32q_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai int16x8_t vrev32q_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrev32_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrev32_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vrev32_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai uint16x4_t vrev32_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrev32_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai int8x8_t vrev32_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vrev32_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai int16x4_t vrev32_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vrev64_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai poly8x8_t vrev64_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vrev64_p16(poly16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai poly16x4_t vrev64_p16(poly16x4_t __p0) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  return __ret;
-}
-#else
-__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrev64q_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  return __ret;
-}
-#else
-__ai int8x16_t vrev64q_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrev64q_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai float32x4_t vrev64q_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vrev64q_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai int32x4_t vrev64q_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vrev64q_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai int16x8_t vrev64q_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrev64_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrev64_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrev64_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrev64_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vrev64_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai uint16x4_t vrev64_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrev64_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrev64_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrev64_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
-  return __ret;
-}
-#else
-__ai float32x2_t vrev64_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vrev64_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
-  return __ret;
-}
-#else
-__ai int32x2_t vrev64_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vrev64_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai int16x4_t vrev64_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vrshr_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vrshr_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vrshr_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vrshr_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vrshr_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vrshr_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vrshr_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vrshr_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrsqrte_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrsqrte_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define vshlq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vshlq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vshlq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vshlq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define vshlq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vshlq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vshlq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vshlq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vshl_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vshl_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vshl_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vshl_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vshl_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vshl_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vshl_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vshl_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vshll_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vshll_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vshll_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vshll_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vshll_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vshll_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define vshrq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vshrq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vshrq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vshrq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define vshrq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vshrq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vshrq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vshrq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vshr_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vshr_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vshr_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vshr_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vshr_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vshr_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vshr_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vshr_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
-  __ret; \
-})
-#else
-#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
-  __ret; \
-})
-#else
-#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
-  __ret; \
-})
-#else
-#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
-  __ret; \
-})
-#else
-#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
-  __ret; \
-})
-#else
-#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
-  __ret; \
-})
-#else
-#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
-  __ret; \
-})
-#else
-#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
-  __ret; \
-})
-#else
-#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 4); \
-})
-#else
-#define vst1_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 5); \
-})
-#else
-#define vst1_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 36); \
-})
-#else
-#define vst1q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 37); \
-})
-#else
-#define vst1q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 48); \
-})
-#else
-#define vst1q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 50); \
-})
-#else
-#define vst1q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 51); \
-})
-#else
-#define vst1q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 49); \
-})
-#else
-#define vst1q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 32); \
-})
-#else
-#define vst1q_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 41); \
-})
-#else
-#define vst1q_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 34); \
-})
-#else
-#define vst1q_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 35); \
-})
-#else
-#define vst1q_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 35); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 33); \
-})
-#else
-#define vst1q_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 16); \
-})
-#else
-#define vst1_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 18); \
-})
-#else
-#define vst1_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 18); \
-})
-#endif
-
-#define vst1_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 17); \
-})
-#else
-#define vst1_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 0); \
-})
-#else
-#define vst1_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 9); \
-})
-#else
-#define vst1_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 2); \
-})
-#else
-#define vst1_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 2); \
-})
-#endif
-
-#define vst1_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 1); \
-})
-#else
-#define vst1_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \
-})
-#else
-#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \
-})
-#else
-#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \
-})
-#else
-#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \
-})
-#else
-#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \
-})
-#else
-#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \
-})
-#else
-#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \
-})
-#else
-#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \
-})
-#else
-#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \
-})
-#else
-#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \
-})
-#else
-#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \
-})
-#else
-#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \
-})
-#else
-#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \
-})
-#else
-#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \
-})
-#else
-#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \
-})
-#else
-#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
-})
-#endif
-
-#define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \
-})
-#else
-#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \
-})
-#else
-#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \
-})
-#else
-#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \
-})
-#else
-#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
-})
-#endif
-
-#define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \
-})
-#else
-#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p8_x2(__p0, __p1) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \
-})
-#else
-#define vst1_p8_x2(__p0, __p1) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  poly8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p16_x2(__p0, __p1) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \
-})
-#else
-#define vst1_p16_x2(__p0, __p1) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  poly16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \
-})
-#else
-#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \
-})
-#else
-#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  poly16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \
-})
-#else
-#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \
-})
-#else
-#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  uint32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \
-})
-#else
-#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \
-})
-#else
-#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  uint16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \
-})
-#else
-#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 41); \
-})
-#else
-#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  float32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 34); \
-})
-#else
-#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  int32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 35); \
-})
-#else
-#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 35); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 33); \
-})
-#else
-#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  int16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u8_x2(__p0, __p1) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \
-})
-#else
-#define vst1_u8_x2(__p0, __p1) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  uint8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u32_x2(__p0, __p1) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \
-})
-#else
-#define vst1_u32_x2(__p0, __p1) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  uint32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \
-})
-#endif
-
-#define vst1_u64_x2(__p0, __p1) __extension__ ({ \
-  uint64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u16_x2(__p0, __p1) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \
-})
-#else
-#define vst1_u16_x2(__p0, __p1) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  uint16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s8_x2(__p0, __p1) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \
-})
-#else
-#define vst1_s8_x2(__p0, __p1) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  int8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f32_x2(__p0, __p1) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 9); \
-})
-#else
-#define vst1_f32_x2(__p0, __p1) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  float32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s32_x2(__p0, __p1) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 2); \
-})
-#else
-#define vst1_s32_x2(__p0, __p1) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  int32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 2); \
-})
-#endif
-
-#define vst1_s64_x2(__p0, __p1) __extension__ ({ \
-  int64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s16_x2(__p0, __p1) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 1); \
-})
-#else
-#define vst1_s16_x2(__p0, __p1) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  int16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p8_x3(__p0, __p1) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \
-})
-#else
-#define vst1_p8_x3(__p0, __p1) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  poly8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p16_x3(__p0, __p1) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \
-})
-#else
-#define vst1_p16_x3(__p0, __p1) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  poly16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \
-})
-#else
-#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \
-})
-#else
-#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  poly16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \
-})
-#else
-#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \
-})
-#else
-#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  uint32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \
-})
-#else
-#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \
-})
-#else
-#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  uint16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \
-})
-#else
-#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 41); \
-})
-#else
-#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  float32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 34); \
-})
-#else
-#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  int32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 35); \
-})
-#else
-#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 35); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 33); \
-})
-#else
-#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  int16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u8_x3(__p0, __p1) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \
-})
-#else
-#define vst1_u8_x3(__p0, __p1) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  uint8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u32_x3(__p0, __p1) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \
-})
-#else
-#define vst1_u32_x3(__p0, __p1) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  uint32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \
-})
-#endif
-
-#define vst1_u64_x3(__p0, __p1) __extension__ ({ \
-  uint64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u16_x3(__p0, __p1) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \
-})
-#else
-#define vst1_u16_x3(__p0, __p1) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  uint16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s8_x3(__p0, __p1) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \
-})
-#else
-#define vst1_s8_x3(__p0, __p1) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  int8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f32_x3(__p0, __p1) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 9); \
-})
-#else
-#define vst1_f32_x3(__p0, __p1) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  float32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s32_x3(__p0, __p1) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 2); \
-})
-#else
-#define vst1_s32_x3(__p0, __p1) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  int32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 2); \
-})
-#endif
-
-#define vst1_s64_x3(__p0, __p1) __extension__ ({ \
-  int64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s16_x3(__p0, __p1) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 1); \
-})
-#else
-#define vst1_s16_x3(__p0, __p1) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  int16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p8_x4(__p0, __p1) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \
-})
-#else
-#define vst1_p8_x4(__p0, __p1) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  poly8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p16_x4(__p0, __p1) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \
-})
-#else
-#define vst1_p16_x4(__p0, __p1) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  poly16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \
-})
-#else
-#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \
-})
-#else
-#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  poly16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \
-})
-#else
-#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \
-})
-#else
-#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  uint32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \
-})
-#else
-#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \
-})
-#else
-#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  uint16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \
-})
-#else
-#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 41); \
-})
-#else
-#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  float32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 34); \
-})
-#else
-#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  int32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 35); \
-})
-#else
-#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 35); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 33); \
-})
-#else
-#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  int16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u8_x4(__p0, __p1) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \
-})
-#else
-#define vst1_u8_x4(__p0, __p1) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  uint8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u32_x4(__p0, __p1) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \
-})
-#else
-#define vst1_u32_x4(__p0, __p1) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  uint32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \
-})
-#endif
-
-#define vst1_u64_x4(__p0, __p1) __extension__ ({ \
-  uint64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u16_x4(__p0, __p1) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \
-})
-#else
-#define vst1_u16_x4(__p0, __p1) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  uint16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s8_x4(__p0, __p1) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \
-})
-#else
-#define vst1_s8_x4(__p0, __p1) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  int8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f32_x4(__p0, __p1) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 9); \
-})
-#else
-#define vst1_f32_x4(__p0, __p1) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  float32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s32_x4(__p0, __p1) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 2); \
-})
-#else
-#define vst1_s32_x4(__p0, __p1) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  int32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 2); \
-})
-#endif
-
-#define vst1_s64_x4(__p0, __p1) __extension__ ({ \
-  int64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s16_x4(__p0, __p1) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 1); \
-})
-#else
-#define vst1_s16_x4(__p0, __p1) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  int16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \
-})
-#else
-#define vst2_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  poly8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \
-})
-#else
-#define vst2_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  poly16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \
-})
-#else
-#define vst2q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \
-})
-#else
-#define vst2q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  poly16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \
-})
-#else
-#define vst2q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \
-})
-#else
-#define vst2q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  uint32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \
-})
-#else
-#define vst2q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  uint16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \
-})
-#else
-#define vst2q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 41); \
-})
-#else
-#define vst2q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  float32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 34); \
-})
-#else
-#define vst2q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  int32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 33); \
-})
-#else
-#define vst2q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  int16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \
-})
-#else
-#define vst2_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  uint8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \
-})
-#else
-#define vst2_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  uint32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \
-})
-#endif
-
-#define vst2_u64(__p0, __p1) __extension__ ({ \
-  uint64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst2_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \
-})
-#else
-#define vst2_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  uint16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_s8(__p0, __p1) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \
-})
-#else
-#define vst2_s8(__p0, __p1) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  int8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_f32(__p0, __p1) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 9); \
-})
-#else
-#define vst2_f32(__p0, __p1) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  float32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_s32(__p0, __p1) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 2); \
-})
-#else
-#define vst2_s32(__p0, __p1) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  int32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 2); \
-})
-#endif
-
-#define vst2_s64(__p0, __p1) __extension__ ({ \
-  int64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst2_s16(__p0, __p1) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 1); \
-})
-#else
-#define vst2_s16(__p0, __p1) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  int16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \
-})
-#else
-#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  poly8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \
-})
-#else
-#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  poly16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \
-})
-#else
-#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  poly16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \
-})
-#else
-#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  uint32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \
-})
-#else
-#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  uint16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \
-})
-#else
-#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  float32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \
-})
-#else
-#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  int32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \
-})
-#else
-#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  int16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \
-})
-#else
-#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  uint8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \
-})
-#else
-#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  uint32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \
-})
-#else
-#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  uint16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \
-})
-#else
-#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  int8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \
-})
-#else
-#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  float32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \
-})
-#else
-#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  int32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \
-})
-#else
-#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  int16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \
-})
-#else
-#define vst3_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  poly8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \
-})
-#else
-#define vst3_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  poly16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \
-})
-#else
-#define vst3q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \
-})
-#else
-#define vst3q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  poly16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \
-})
-#else
-#define vst3q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \
-})
-#else
-#define vst3q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  uint32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \
-})
-#else
-#define vst3q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  uint16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \
-})
-#else
-#define vst3q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 41); \
-})
-#else
-#define vst3q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  float32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 34); \
-})
-#else
-#define vst3q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  int32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 33); \
-})
-#else
-#define vst3q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  int16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \
-})
-#else
-#define vst3_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  uint8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \
-})
-#else
-#define vst3_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  uint32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \
-})
-#endif
-
-#define vst3_u64(__p0, __p1) __extension__ ({ \
-  uint64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst3_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \
-})
-#else
-#define vst3_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  uint16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_s8(__p0, __p1) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \
-})
-#else
-#define vst3_s8(__p0, __p1) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  int8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_f32(__p0, __p1) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 9); \
-})
-#else
-#define vst3_f32(__p0, __p1) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  float32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_s32(__p0, __p1) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 2); \
-})
-#else
-#define vst3_s32(__p0, __p1) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  int32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 2); \
-})
-#endif
-
-#define vst3_s64(__p0, __p1) __extension__ ({ \
-  int64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst3_s16(__p0, __p1) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 1); \
-})
-#else
-#define vst3_s16(__p0, __p1) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  int16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \
-})
-#else
-#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  poly8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \
-})
-#else
-#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  poly16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \
-})
-#else
-#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  poly16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \
-})
-#else
-#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  uint32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \
-})
-#else
-#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  uint16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \
-})
-#else
-#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  float32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \
-})
-#else
-#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  int32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \
-})
-#else
-#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  int16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \
-})
-#else
-#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  uint8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \
-})
-#else
-#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  uint32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \
-})
-#else
-#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  uint16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \
-})
-#else
-#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  int8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \
-})
-#else
-#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  float32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \
-})
-#else
-#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  int32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \
-})
-#else
-#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  int16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \
-})
-#else
-#define vst4_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  poly8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \
-})
-#else
-#define vst4_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  poly16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \
-})
-#else
-#define vst4q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \
-})
-#else
-#define vst4q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  poly16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \
-})
-#else
-#define vst4q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \
-})
-#else
-#define vst4q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  uint32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \
-})
-#else
-#define vst4q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  uint16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \
-})
-#else
-#define vst4q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 41); \
-})
-#else
-#define vst4q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  float32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 34); \
-})
-#else
-#define vst4q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  int32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 33); \
-})
-#else
-#define vst4q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  int16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \
-})
-#else
-#define vst4_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  uint8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \
-})
-#else
-#define vst4_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  uint32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \
-})
-#endif
-
-#define vst4_u64(__p0, __p1) __extension__ ({ \
-  uint64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst4_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \
-})
-#else
-#define vst4_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  uint16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_s8(__p0, __p1) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \
-})
-#else
-#define vst4_s8(__p0, __p1) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  int8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_f32(__p0, __p1) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 9); \
-})
-#else
-#define vst4_f32(__p0, __p1) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  float32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_s32(__p0, __p1) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 2); \
-})
-#else
-#define vst4_s32(__p0, __p1) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  int32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 2); \
-})
-#endif
-
-#define vst4_s64(__p0, __p1) __extension__ ({ \
-  int64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst4_s16(__p0, __p1) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 1); \
-})
-#else
-#define vst4_s16(__p0, __p1) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  int16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \
-})
-#else
-#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  poly8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \
-})
-#else
-#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  poly16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \
-})
-#else
-#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  poly16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \
-})
-#else
-#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  uint32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \
-})
-#else
-#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  uint16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \
-})
-#else
-#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  float32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \
-})
-#else
-#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  int32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \
-})
-#else
-#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  int16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \
-})
-#else
-#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  uint8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \
-})
-#else
-#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  uint32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \
-})
-#else
-#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  uint16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \
-})
-#else
-#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  int8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \
-})
-#else
-#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  float32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \
-})
-#else
-#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  int32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \
-})
-#else
-#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  int16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmovl_u8(__p0) - vmovl_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmovl_u8(__rev0) - __noswap_vmovl_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmovl_u32(__p0) - vmovl_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmovl_u32(__rev0) - __noswap_vmovl_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmovl_u16(__p0) - vmovl_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmovl_u16(__rev0) - __noswap_vmovl_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = vmovl_s8(__p0) - vmovl_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmovl_s8(__rev0) - __noswap_vmovl_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = vmovl_s32(__p0) - vmovl_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmovl_s32(__rev0) - __noswap_vmovl_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = vmovl_s16(__p0) - vmovl_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmovl_s16(__rev0) - __noswap_vmovl_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 - vmovl_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __noswap_vmovl_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 - vmovl_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 - __noswap_vmovl_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 - vmovl_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __noswap_vmovl_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 - vmovl_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __noswap_vmovl_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 - vmovl_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 - __noswap_vmovl_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 - vmovl_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __noswap_vmovl_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
-  poly8x8x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
-  uint8x8x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
-  int8x8x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) {
-  poly8x8x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) {
-  uint8x8x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) {
-  int8x8x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) {
-  poly8x8x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) {
-  uint8x8x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) {
-  int8x8x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
-  return __ret;
-}
-#else
-__ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
-  return __ret;
-}
-#else
-__ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
-  return __ret;
-}
-#else
-__ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
-  return __ret;
-}
-#else
-__ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
-  return __ret;
-}
-#else
-__ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
-  return __ret;
-}
-#else
-__ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#if !defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = vqdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = vqdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = vqdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqrdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqrdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = vqrdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqrdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = vqrdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqrdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = vqrdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqrdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-#endif
-#if (__ARM_FP & 2)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vcvt_f16_f32(float32x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcvt_f32_f16(float16x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f16(__p0) __extension__ ({ \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \
-  __ret; \
-})
-#else
-#define vld1q_f16(__p0) __extension__ ({ \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f16(__p0) __extension__ ({ \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \
-  __ret; \
-})
-#else
-#define vld1_f16(__p0) __extension__ ({ \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_f16(__p0) __extension__ ({ \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \
-  __ret; \
-})
-#else
-#define vld1q_dup_f16(__p0) __extension__ ({ \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_f16(__p0) __extension__ ({ \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \
-  __ret; \
-})
-#else
-#define vld1_dup_f16(__p0) __extension__ ({ \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \
-  __ret; \
-})
-#else
-#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \
-  __ret; \
-})
-#else
-#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f16_x2(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld1q_f16_x2(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f16_x2(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld1_f16_x2(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f16_x3(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld1q_f16_x3(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f16_x3(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld1_f16_x3(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f16_x4(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld1q_f16_x4(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f16_x4(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld1_f16_x4(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_f16(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld2q_f16(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_f16(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld2_f16(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_f16(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld2q_dup_f16(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_f16(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld2_dup_f16(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 40); \
-  __ret; \
-})
-#else
-#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  float16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 8); \
-  __ret; \
-})
-#else
-#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  float16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_f16(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld3q_f16(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_f16(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld3_f16(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_f16(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld3q_dup_f16(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_f16(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld3_dup_f16(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 40); \
-  __ret; \
-})
-#else
-#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  float16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 8); \
-  __ret; \
-})
-#else
-#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  float16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_f16(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld4q_f16(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_f16(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld4_f16(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_f16(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld4q_dup_f16(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_f16(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld4_dup_f16(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 40); \
-  __ret; \
-})
-#else
-#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  float16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 8); \
-  __ret; \
-})
-#else
-#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  float16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 40); \
-})
-#else
-#define vst1q_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 8); \
-})
-#else
-#define vst1_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \
-})
-#else
-#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \
-})
-#else
-#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 40); \
-})
-#else
-#define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  float16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f16_x2(__p0, __p1) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 8); \
-})
-#else
-#define vst1_f16_x2(__p0, __p1) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  float16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 40); \
-})
-#else
-#define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  float16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f16_x3(__p0, __p1) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 8); \
-})
-#else
-#define vst1_f16_x3(__p0, __p1) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  float16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 40); \
-})
-#else
-#define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  float16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f16_x4(__p0, __p1) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 8); \
-})
-#else
-#define vst1_f16_x4(__p0, __p1) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  float16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 40); \
-})
-#else
-#define vst2q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  float16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_f16(__p0, __p1) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 8); \
-})
-#else
-#define vst2_f16(__p0, __p1) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  float16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 40); \
-})
-#else
-#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  float16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 8); \
-})
-#else
-#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  float16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 40); \
-})
-#else
-#define vst3q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  float16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_f16(__p0, __p1) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 8); \
-})
-#else
-#define vst3_f16(__p0, __p1) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  float16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 40); \
-})
-#else
-#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  float16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 8); \
-})
-#else
-#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  float16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 40); \
-})
-#else
-#define vst4q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  float16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_f16(__p0, __p1) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 8); \
-})
-#else
-#define vst4_f16(__p0, __p1) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  float16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 40); \
-})
-#else
-#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  float16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 8); \
-})
-#else
-#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  float16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 8); \
-})
-#endif
-
-#endif
-#if __ARM_ARCH >= 8
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcvta_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcvta_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_CRYPTO)
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__p0, __p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__rev0, __p1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint32_t vsha1h_u32(uint32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__p0, __p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__rev0, __p1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__p0, __p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__rev0, __p1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnd_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnd_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndaq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndaq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnda_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnda_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndiq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndiq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrndi_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrndi_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndmq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndmq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrndm_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrndm_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndnq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndnq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrndn_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrndn_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float32_t vrndns_f32(float32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrndns_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndpq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndpq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrndp_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrndp_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndxq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndxq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrndx_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrndx_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrnd_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrnd_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndaq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndaq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrnda_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrnda_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndmq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndmq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndm_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndm_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndnq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndnq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndn_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndn_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndpq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndpq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndp_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndp_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndxq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndxq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndx_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndx_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvta_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-#endif
-#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrnd_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndaq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndaq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrnda_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndiq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndiq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndi_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndmq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndmq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndm_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndnq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndnq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndn_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndpq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndpq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndp_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndxq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndxq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndx_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#endif
-#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#endif
-#if defined(__ARM_FEATURE_COMPLEX)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_COMPLEX) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_COMPLEX) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_DOTPROD)
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x8_t __s2 = __p2; \
-  uint32x4_t __ret; \
-uint8x8_t __reint = __s2; \
-uint32x4_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = vdotq_u32(__s0, __s1, *(uint8x16_t *) &__reint1); \
-  __ret; \
-})
-#else
-#define vdotq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x8_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-uint8x8_t __reint = __rev2; \
-uint32x4_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = __noswap_vdotq_u32(__rev0, __rev1, *(uint8x16_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-int8x8_t __reint = __s2; \
-int32x4_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = vdotq_s32(__s0, __s1, *(int8x16_t *) &__reint1); \
-  __ret; \
-})
-#else
-#define vdotq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-int8x8_t __reint = __rev2; \
-int32x4_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = __noswap_vdotq_s32(__rev0, __rev1, *(int8x16_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __s2 = __p2; \
-  uint32x2_t __ret; \
-uint8x8_t __reint = __s2; \
-uint32x2_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3); \
-  __ret = vdot_u32(__s0, __s1, *(uint8x8_t *) &__reint1); \
-  __ret; \
-})
-#else
-#define vdot_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __s2 = __p2; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-uint8x8_t __reint = __rev2; \
-uint32x2_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3); \
-  __ret = __noswap_vdot_u32(__rev0, __rev1, *(uint8x8_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __s2 = __p2; \
-  int32x2_t __ret; \
-int8x8_t __reint = __s2; \
-int32x2_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3); \
-  __ret = vdot_s32(__s0, __s1, *(int8x8_t *) &__reint1); \
-  __ret; \
-})
-#else
-#define vdot_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-int8x8_t __reint = __rev2; \
-int32x2_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3); \
-  __ret = __noswap_vdot_s32(__rev0, __rev1, *(int8x8_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_DOTPROD) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __s2 = __p2; \
-  uint32x4_t __ret; \
-uint8x16_t __reint = __s2; \
-uint32x4_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = vdotq_u32(__s0, __s1, *(uint8x16_t *) &__reint1); \
-  __ret; \
-})
-#else
-#define vdotq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-uint8x16_t __reint = __rev2; \
-uint32x4_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = __noswap_vdotq_u32(__rev0, __rev1, *(uint8x16_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __s2 = __p2; \
-  int32x4_t __ret; \
-int8x16_t __reint = __s2; \
-int32x4_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = vdotq_s32(__s0, __s1, *(int8x16_t *) &__reint1); \
-  __ret; \
-})
-#else
-#define vdotq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-int8x16_t __reint = __rev2; \
-int32x4_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = __noswap_vdotq_s32(__rev0, __rev1, *(int8x16_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x16_t __s2 = __p2; \
-  uint32x2_t __ret; \
-uint8x16_t __reint = __s2; \
-uint32x2_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3); \
-  __ret = vdot_u32(__s0, __s1, *(uint8x8_t *) &__reint1); \
-  __ret; \
-})
-#else
-#define vdot_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x16_t __s2 = __p2; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-uint8x16_t __reint = __rev2; \
-uint32x2_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3); \
-  __ret = __noswap_vdot_u32(__rev0, __rev1, *(uint8x8_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x16_t __s2 = __p2; \
-  int32x2_t __ret; \
-int8x16_t __reint = __s2; \
-int32x2_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3); \
-  __ret = vdot_s32(__s0, __s1, *(int8x8_t *) &__reint1); \
-  __ret; \
-})
-#else
-#define vdot_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x16_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-int8x16_t __reint = __rev2; \
-int32x2_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3); \
-  __ret = __noswap_vdot_s32(__rev0, __rev1, *(int8x8_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FMA)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __ret;
-  __ret = vfmaq_f32(__p0, __p1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vfmaq_f32(__rev0, __rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __ret;
-  __ret = vfma_f32(__p0, __p1, (float32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __noswap_vfma_f32(__rev0, __rev1, (float32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = vfmaq_f32(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vfmaq_f32(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = vfma_f32(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = __noswap_vfma_f32(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16FML) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vabsq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vabsq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vabs_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vabs_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqzq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqzq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceqz_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceqz_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgezq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgezq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgez_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgez_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtzq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtzq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgtz_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgtz_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vclezq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vclezq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclez_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclez_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltzq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltzq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcltz_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcltz_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcvt_f16_s16(int16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai float16x4_t vcvt_f16_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvt_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvt_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvta_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvta_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 40); \
-  __ret; \
-})
-#else
-#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 8); \
-  __ret; \
-})
-#else
-#define vext_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = vfmaq_f16(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __noswap_vfmaq_f16(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = vfma_f16(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __noswap_vfma_f16(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmul_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = __s0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \
-  __ret; \
-})
-#else
-#define vmulq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __rev0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = __s0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \
-  __ret; \
-})
-#else
-#define vmul_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __rev0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vnegq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float16x8_t vnegq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vneg_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float16x4_t vneg_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrecpeq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrecpeq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrecpe_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrecpe_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrev64q_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai float16x8_t vrev64q_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrev64_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai float16x4_t vrev64_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrsqrteq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrsqrteq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrsqrte_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrsqrte_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \
-  __ret; \
-})
-#else
-#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \
-  __ret; \
-})
-#else
-#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \
-  __ret; \
-})
-#else
-#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \
-  __ret; \
-})
-#else
-#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = vfmaq_f16(__s0, __s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vfmaq_f16(__rev0, __rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = vfma_f16(__s0, __s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vfma_f16(__rev0, __rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsh_lane_f16(__p0_0, __p1_0, __p2_0, __p3_0) __extension__ ({ \
-  float16_t __s0_0 = __p0_0; \
-  float16_t __s1_0 = __p1_0; \
-  float16x4_t __s2_0 = __p2_0; \
-  float16_t __ret_0; \
-  __ret_0 = vfmah_lane_f16(__s0_0, -__s1_0, __s2_0, __p3_0); \
-  __ret_0; \
-})
-#else
-#define vfmsh_lane_f16(__p0_1, __p1_1, __p2_1, __p3_1) __extension__ ({ \
-  float16_t __s0_1 = __p0_1; \
-  float16_t __s1_1 = __p1_1; \
-  float16x4_t __s2_1 = __p2_1; \
-  float16x4_t __rev2_1;  __rev2_1 = __builtin_shufflevector(__s2_1, __s2_1, 3, 2, 1, 0); \
-  float16_t __ret_1; \
-  __ret_1 = __noswap_vfmah_lane_f16(__s0_1, -__s1_1, __rev2_1, __p3_1); \
-  __ret_1; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f16(__p0_2, __p1_2, __p2_2, __p3_2) __extension__ ({ \
-  float16x8_t __s0_2 = __p0_2; \
-  float16x8_t __s1_2 = __p1_2; \
-  float16x4_t __s2_2 = __p2_2; \
-  float16x8_t __ret_2; \
-  __ret_2 = vfmaq_lane_f16(__s0_2, -__s1_2, __s2_2, __p3_2); \
-  __ret_2; \
-})
-#else
-#define vfmsq_lane_f16(__p0_3, __p1_3, __p2_3, __p3_3) __extension__ ({ \
-  float16x8_t __s0_3 = __p0_3; \
-  float16x8_t __s1_3 = __p1_3; \
-  float16x4_t __s2_3 = __p2_3; \
-  float16x8_t __rev0_3;  __rev0_3 = __builtin_shufflevector(__s0_3, __s0_3, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_3;  __rev1_3 = __builtin_shufflevector(__s1_3, __s1_3, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_3;  __rev2_3 = __builtin_shufflevector(__s2_3, __s2_3, 3, 2, 1, 0); \
-  float16x8_t __ret_3; \
-  __ret_3 = __noswap_vfmaq_lane_f16(__rev0_3, -__rev1_3, __rev2_3, __p3_3); \
-  __ret_3 = __builtin_shufflevector(__ret_3, __ret_3, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_3; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_lane_f16(__p0_4, __p1_4, __p2_4, __p3_4) __extension__ ({ \
-  float16x4_t __s0_4 = __p0_4; \
-  float16x4_t __s1_4 = __p1_4; \
-  float16x4_t __s2_4 = __p2_4; \
-  float16x4_t __ret_4; \
-  __ret_4 = vfma_lane_f16(__s0_4, -__s1_4, __s2_4, __p3_4); \
-  __ret_4; \
-})
-#else
-#define vfms_lane_f16(__p0_5, __p1_5, __p2_5, __p3_5) __extension__ ({ \
-  float16x4_t __s0_5 = __p0_5; \
-  float16x4_t __s1_5 = __p1_5; \
-  float16x4_t __s2_5 = __p2_5; \
-  float16x4_t __rev0_5;  __rev0_5 = __builtin_shufflevector(__s0_5, __s0_5, 3, 2, 1, 0); \
-  float16x4_t __rev1_5;  __rev1_5 = __builtin_shufflevector(__s1_5, __s1_5, 3, 2, 1, 0); \
-  float16x4_t __rev2_5;  __rev2_5 = __builtin_shufflevector(__s2_5, __s2_5, 3, 2, 1, 0); \
-  float16x4_t __ret_5; \
-  __ret_5 = __noswap_vfma_lane_f16(__rev0_5, -__rev1_5, __rev2_5, __p3_5); \
-  __ret_5 = __builtin_shufflevector(__ret_5, __ret_5, 3, 2, 1, 0); \
-  __ret_5; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsh_laneq_f16(__p0_6, __p1_6, __p2_6, __p3_6) __extension__ ({ \
-  float16_t __s0_6 = __p0_6; \
-  float16_t __s1_6 = __p1_6; \
-  float16x8_t __s2_6 = __p2_6; \
-  float16_t __ret_6; \
-  __ret_6 = vfmah_laneq_f16(__s0_6, -__s1_6, __s2_6, __p3_6); \
-  __ret_6; \
-})
-#else
-#define vfmsh_laneq_f16(__p0_7, __p1_7, __p2_7, __p3_7) __extension__ ({ \
-  float16_t __s0_7 = __p0_7; \
-  float16_t __s1_7 = __p1_7; \
-  float16x8_t __s2_7 = __p2_7; \
-  float16x8_t __rev2_7;  __rev2_7 = __builtin_shufflevector(__s2_7, __s2_7, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_7; \
-  __ret_7 = __noswap_vfmah_laneq_f16(__s0_7, -__s1_7, __rev2_7, __p3_7); \
-  __ret_7; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f16(__p0_8, __p1_8, __p2_8, __p3_8) __extension__ ({ \
-  float16x8_t __s0_8 = __p0_8; \
-  float16x8_t __s1_8 = __p1_8; \
-  float16x8_t __s2_8 = __p2_8; \
-  float16x8_t __ret_8; \
-  __ret_8 = vfmaq_laneq_f16(__s0_8, -__s1_8, __s2_8, __p3_8); \
-  __ret_8; \
-})
-#else
-#define vfmsq_laneq_f16(__p0_9, __p1_9, __p2_9, __p3_9) __extension__ ({ \
-  float16x8_t __s0_9 = __p0_9; \
-  float16x8_t __s1_9 = __p1_9; \
-  float16x8_t __s2_9 = __p2_9; \
-  float16x8_t __rev0_9;  __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_9;  __rev1_9 = __builtin_shufflevector(__s1_9, __s1_9, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_9;  __rev2_9 = __builtin_shufflevector(__s2_9, __s2_9, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_9; \
-  __ret_9 = __noswap_vfmaq_laneq_f16(__rev0_9, -__rev1_9, __rev2_9, __p3_9); \
-  __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_9; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f16(__p0_10, __p1_10, __p2_10, __p3_10) __extension__ ({ \
-  float16x4_t __s0_10 = __p0_10; \
-  float16x4_t __s1_10 = __p1_10; \
-  float16x8_t __s2_10 = __p2_10; \
-  float16x4_t __ret_10; \
-  __ret_10 = vfma_laneq_f16(__s0_10, -__s1_10, __s2_10, __p3_10); \
-  __ret_10; \
-})
-#else
-#define vfms_laneq_f16(__p0_11, __p1_11, __p2_11, __p3_11) __extension__ ({ \
-  float16x4_t __s0_11 = __p0_11; \
-  float16x4_t __s1_11 = __p1_11; \
-  float16x8_t __s2_11 = __p2_11; \
-  float16x4_t __rev0_11;  __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 3, 2, 1, 0); \
-  float16x4_t __rev1_11;  __rev1_11 = __builtin_shufflevector(__s1_11, __s1_11, 3, 2, 1, 0); \
-  float16x8_t __rev2_11;  __rev2_11 = __builtin_shufflevector(__s2_11, __s2_11, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_11; \
-  __ret_11 = __noswap_vfma_laneq_f16(__rev0_11, -__rev1_11, __rev2_11, __p3_11); \
-  __ret_11 = __builtin_shufflevector(__ret_11, __ret_11, 3, 2, 1, 0); \
-  __ret_11; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = vfmaq_f16(__s0, -__s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vfmaq_f16(__rev0, -__rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = vfma_f16(__s0, -__s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vfma_f16(__rev0, -__rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vminnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vminnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vminvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vminv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmul_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__rev1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = vmulxq_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmulxq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vmulxq_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = vmulx_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmulx_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vmulx_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__rev1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = vmulxq_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmulxq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vmulxq_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = vmulx_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmulx_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vmulx_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = vmulxq_f16(__s0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \
-  __ret; \
-})
-#else
-#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vmulxq_f16(__rev0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = vmulx_f16(__s0, (float16x4_t) {__s1, __s1, __s1, __s1}); \
-  __ret; \
-})
-#else
-#define vmulx_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vmulx_f16(__rev0, (float16x4_t) {__s1, __s1, __s1, __s1}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndiq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndiq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndi_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndi_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vsqrtq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vsqrtq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vsqrt_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vsqrt_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_QRDMX)
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = vqaddq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = vqaddq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = vqadd_s32(__p0, vqrdmulh_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = vqadd_s16(__p0, vqrdmulh_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqaddq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlahq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = vqaddq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlahq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = vqadd_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlah_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = vqadd_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlah_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = vqsubq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = vqsubq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = vqsub_s32(__p0, vqrdmulh_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = vqsub_s16(__p0, vqrdmulh_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqsubq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlshq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = vqsubq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlshq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = vqsub_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlsh_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = vqsub_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlsh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqaddq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlahq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = vqaddq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlahq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = vqadd_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlah_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = vqadd_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlah_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqsubq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlshq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = vqsubq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlshq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = vqsub_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlsh_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = vqsub_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlsh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#endif
-#if defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-__ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vabds_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vabsq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vabsq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabsq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vabsq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vabs_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-__ai int64x1_t vabs_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai int64_t vabsd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vabsd_s64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-__ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vaddhn_u32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vaddhn_u32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vaddhn_u64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vaddhn_u64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vaddhn_u16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vaddhn_u16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vaddhn_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vaddhn_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vaddhn_s64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vaddhn_s64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vaddhn_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vaddhn_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddlvq_s8(int8x16_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlvq_s8(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddlvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlvq_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vaddlvq_s32(int32x4_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlvq_s32(__p0);
-  return __ret;
-}
-#else
-__ai int64_t vaddlvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlvq_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddlvq_s16(int16x8_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlvq_s16(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddlvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlvq_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddlv_u8(uint8x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlv_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddlv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlv_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vaddlv_u32(uint32x2_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlv_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vaddlv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlv_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddlv_u16(uint16x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlv_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddlv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlv_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddlv_s8(int8x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlv_s8(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddlv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlv_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vaddlv_s32(int32x2_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlv_s32(__p0);
-  return __ret;
-}
-#else
-__ai int64_t vaddlv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlv_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddlv_s16(int16x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlv_s16(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddlv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlv_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vaddvq_u8(uint8x16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddvq_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vaddvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddvq_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddvq_u32(uint32x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddvq_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddvq_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vaddvq_u64(uint64x2_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddvq_u64(__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vaddvq_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddvq_u64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddvq_u16(uint16x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddvq_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddvq_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vaddvq_s8(int8x16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddvq_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vaddvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddvq_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vaddvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vaddvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vaddvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vaddvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vaddvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vaddvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddvq_s32(int32x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddvq_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddvq_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vaddvq_s64(int64x2_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddvq_s64(__p0);
-  return __ret;
-}
-#else
-__ai int64_t vaddvq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddvq_s64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddvq_s16(int16x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddvq_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddvq_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vaddv_u8(uint8x8_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddv_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vaddv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddv_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddv_u32(uint32x2_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddv_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddv_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddv_u16(uint16x4_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddv_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddv_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vaddv_s8(int8x8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddv_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vaddv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddv_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vaddv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vaddv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddv_s32(int32x2_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddv_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddv_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddv_s16(int16x4_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddv_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddv_s16(__rev0);
-  return __ret;
-}
-#endif
-
-__ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 38);
-  return __ret;
-}
-#else
-__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 38);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1);
-  return __ret;
-}
-__ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-__ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-__ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-__ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vceqd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vceqd_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_p64(poly64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceqz_p16(poly16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceqz_p16(poly16x4_t __p0) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqzq_p16(poly16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqzq_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_u64(uint64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceqz_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceqz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceqz_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceqz_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceqz_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceqz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceqz_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceqz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64_t vceqzd_u64(uint64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0);
-  return __ret;
-}
-__ai int64_t vceqzd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vceqzd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vceqzd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vceqzs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 >= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 >= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 >= __p1);
-  return __ret;
-}
-__ai int64_t vcged_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcged_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcgez_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcgez_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgez_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgez_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgez_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgez_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgez_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgez_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgez_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgez_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64_t vcgezd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcgezd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vcgezd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcgezs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 > __p1);
-  return __ret;
-}
-__ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 > __p1);
-  return __ret;
-}
-__ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 > __p1);
-  return __ret;
-}
-__ai int64_t vcgtd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcgtd_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgtz_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgtz_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64_t vcgtzd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcgtzd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vcgtzd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcgtzs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 <= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 <= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 <= __p1);
-  return __ret;
-}
-__ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vcled_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcled_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vclezq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vclezq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vclezq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vclezq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vclezq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vclezq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vclezq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vclezq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vclezq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vclezq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vclezq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vclezq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vclez_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vclez_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vclez_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclez_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclez_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclez_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclez_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vclez_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclez_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclez_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64_t vclezd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vclezd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vclezd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vclezs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 < __p1);
-  return __ret;
-}
-__ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 < __p1);
-  return __ret;
-}
-__ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 < __p1);
-  return __ret;
-}
-__ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vcltd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcltd_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcltz_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcltz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcltz_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcltz_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcltz_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcltz_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcltz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcltz_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcltz_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcltz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64_t vcltzd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcltzd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vcltzd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcltzs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  return __ret;
-}
-#else
-__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  return __ret;
-}
-#else
-__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p8(__p0_12, __p1_12, __p2_12, __p3_12) __extension__ ({ \
-  poly8x16_t __s0_12 = __p0_12; \
-  poly8x8_t __s2_12 = __p2_12; \
-  poly8x16_t __ret_12; \
-  __ret_12 = vsetq_lane_p8(vget_lane_p8(__s2_12, __p3_12), __s0_12, __p1_12); \
-  __ret_12; \
-})
-#else
-#define vcopyq_lane_p8(__p0_13, __p1_13, __p2_13, __p3_13) __extension__ ({ \
-  poly8x16_t __s0_13 = __p0_13; \
-  poly8x8_t __s2_13 = __p2_13; \
-  poly8x16_t __rev0_13;  __rev0_13 = __builtin_shufflevector(__s0_13, __s0_13, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev2_13;  __rev2_13 = __builtin_shufflevector(__s2_13, __s2_13, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_13; \
-  __ret_13 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_13, __p3_13), __rev0_13, __p1_13); \
-  __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_13; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p16(__p0_14, __p1_14, __p2_14, __p3_14) __extension__ ({ \
-  poly16x8_t __s0_14 = __p0_14; \
-  poly16x4_t __s2_14 = __p2_14; \
-  poly16x8_t __ret_14; \
-  __ret_14 = vsetq_lane_p16(vget_lane_p16(__s2_14, __p3_14), __s0_14, __p1_14); \
-  __ret_14; \
-})
-#else
-#define vcopyq_lane_p16(__p0_15, __p1_15, __p2_15, __p3_15) __extension__ ({ \
-  poly16x8_t __s0_15 = __p0_15; \
-  poly16x4_t __s2_15 = __p2_15; \
-  poly16x8_t __rev0_15;  __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __rev2_15;  __rev2_15 = __builtin_shufflevector(__s2_15, __s2_15, 3, 2, 1, 0); \
-  poly16x8_t __ret_15; \
-  __ret_15 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_15, __p3_15), __rev0_15, __p1_15); \
-  __ret_15 = __builtin_shufflevector(__ret_15, __ret_15, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_15; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u8(__p0_16, __p1_16, __p2_16, __p3_16) __extension__ ({ \
-  uint8x16_t __s0_16 = __p0_16; \
-  uint8x8_t __s2_16 = __p2_16; \
-  uint8x16_t __ret_16; \
-  __ret_16 = vsetq_lane_u8(vget_lane_u8(__s2_16, __p3_16), __s0_16, __p1_16); \
-  __ret_16; \
-})
-#else
-#define vcopyq_lane_u8(__p0_17, __p1_17, __p2_17, __p3_17) __extension__ ({ \
-  uint8x16_t __s0_17 = __p0_17; \
-  uint8x8_t __s2_17 = __p2_17; \
-  uint8x16_t __rev0_17;  __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_17;  __rev2_17 = __builtin_shufflevector(__s2_17, __s2_17, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_17; \
-  __ret_17 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_17, __p3_17), __rev0_17, __p1_17); \
-  __ret_17 = __builtin_shufflevector(__ret_17, __ret_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_17; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u32(__p0_18, __p1_18, __p2_18, __p3_18) __extension__ ({ \
-  uint32x4_t __s0_18 = __p0_18; \
-  uint32x2_t __s2_18 = __p2_18; \
-  uint32x4_t __ret_18; \
-  __ret_18 = vsetq_lane_u32(vget_lane_u32(__s2_18, __p3_18), __s0_18, __p1_18); \
-  __ret_18; \
-})
-#else
-#define vcopyq_lane_u32(__p0_19, __p1_19, __p2_19, __p3_19) __extension__ ({ \
-  uint32x4_t __s0_19 = __p0_19; \
-  uint32x2_t __s2_19 = __p2_19; \
-  uint32x4_t __rev0_19;  __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 3, 2, 1, 0); \
-  uint32x2_t __rev2_19;  __rev2_19 = __builtin_shufflevector(__s2_19, __s2_19, 1, 0); \
-  uint32x4_t __ret_19; \
-  __ret_19 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_19, __p3_19), __rev0_19, __p1_19); \
-  __ret_19 = __builtin_shufflevector(__ret_19, __ret_19, 3, 2, 1, 0); \
-  __ret_19; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u64(__p0_20, __p1_20, __p2_20, __p3_20) __extension__ ({ \
-  uint64x2_t __s0_20 = __p0_20; \
-  uint64x1_t __s2_20 = __p2_20; \
-  uint64x2_t __ret_20; \
-  __ret_20 = vsetq_lane_u64(vget_lane_u64(__s2_20, __p3_20), __s0_20, __p1_20); \
-  __ret_20; \
-})
-#else
-#define vcopyq_lane_u64(__p0_21, __p1_21, __p2_21, __p3_21) __extension__ ({ \
-  uint64x2_t __s0_21 = __p0_21; \
-  uint64x1_t __s2_21 = __p2_21; \
-  uint64x2_t __rev0_21;  __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 1, 0); \
-  uint64x2_t __ret_21; \
-  __ret_21 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_21, __p3_21), __rev0_21, __p1_21); \
-  __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 1, 0); \
-  __ret_21; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u16(__p0_22, __p1_22, __p2_22, __p3_22) __extension__ ({ \
-  uint16x8_t __s0_22 = __p0_22; \
-  uint16x4_t __s2_22 = __p2_22; \
-  uint16x8_t __ret_22; \
-  __ret_22 = vsetq_lane_u16(vget_lane_u16(__s2_22, __p3_22), __s0_22, __p1_22); \
-  __ret_22; \
-})
-#else
-#define vcopyq_lane_u16(__p0_23, __p1_23, __p2_23, __p3_23) __extension__ ({ \
-  uint16x8_t __s0_23 = __p0_23; \
-  uint16x4_t __s2_23 = __p2_23; \
-  uint16x8_t __rev0_23;  __rev0_23 = __builtin_shufflevector(__s0_23, __s0_23, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_23;  __rev2_23 = __builtin_shufflevector(__s2_23, __s2_23, 3, 2, 1, 0); \
-  uint16x8_t __ret_23; \
-  __ret_23 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_23, __p3_23), __rev0_23, __p1_23); \
-  __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_23; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s8(__p0_24, __p1_24, __p2_24, __p3_24) __extension__ ({ \
-  int8x16_t __s0_24 = __p0_24; \
-  int8x8_t __s2_24 = __p2_24; \
-  int8x16_t __ret_24; \
-  __ret_24 = vsetq_lane_s8(vget_lane_s8(__s2_24, __p3_24), __s0_24, __p1_24); \
-  __ret_24; \
-})
-#else
-#define vcopyq_lane_s8(__p0_25, __p1_25, __p2_25, __p3_25) __extension__ ({ \
-  int8x16_t __s0_25 = __p0_25; \
-  int8x8_t __s2_25 = __p2_25; \
-  int8x16_t __rev0_25;  __rev0_25 = __builtin_shufflevector(__s0_25, __s0_25, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_25;  __rev2_25 = __builtin_shufflevector(__s2_25, __s2_25, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_25; \
-  __ret_25 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_25, __p3_25), __rev0_25, __p1_25); \
-  __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_25; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_f32(__p0_26, __p1_26, __p2_26, __p3_26) __extension__ ({ \
-  float32x4_t __s0_26 = __p0_26; \
-  float32x2_t __s2_26 = __p2_26; \
-  float32x4_t __ret_26; \
-  __ret_26 = vsetq_lane_f32(vget_lane_f32(__s2_26, __p3_26), __s0_26, __p1_26); \
-  __ret_26; \
-})
-#else
-#define vcopyq_lane_f32(__p0_27, __p1_27, __p2_27, __p3_27) __extension__ ({ \
-  float32x4_t __s0_27 = __p0_27; \
-  float32x2_t __s2_27 = __p2_27; \
-  float32x4_t __rev0_27;  __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 3, 2, 1, 0); \
-  float32x2_t __rev2_27;  __rev2_27 = __builtin_shufflevector(__s2_27, __s2_27, 1, 0); \
-  float32x4_t __ret_27; \
-  __ret_27 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_27, __p3_27), __rev0_27, __p1_27); \
-  __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 3, 2, 1, 0); \
-  __ret_27; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s32(__p0_28, __p1_28, __p2_28, __p3_28) __extension__ ({ \
-  int32x4_t __s0_28 = __p0_28; \
-  int32x2_t __s2_28 = __p2_28; \
-  int32x4_t __ret_28; \
-  __ret_28 = vsetq_lane_s32(vget_lane_s32(__s2_28, __p3_28), __s0_28, __p1_28); \
-  __ret_28; \
-})
-#else
-#define vcopyq_lane_s32(__p0_29, __p1_29, __p2_29, __p3_29) __extension__ ({ \
-  int32x4_t __s0_29 = __p0_29; \
-  int32x2_t __s2_29 = __p2_29; \
-  int32x4_t __rev0_29;  __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 3, 2, 1, 0); \
-  int32x2_t __rev2_29;  __rev2_29 = __builtin_shufflevector(__s2_29, __s2_29, 1, 0); \
-  int32x4_t __ret_29; \
-  __ret_29 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_29, __p3_29), __rev0_29, __p1_29); \
-  __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 3, 2, 1, 0); \
-  __ret_29; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s64(__p0_30, __p1_30, __p2_30, __p3_30) __extension__ ({ \
-  int64x2_t __s0_30 = __p0_30; \
-  int64x1_t __s2_30 = __p2_30; \
-  int64x2_t __ret_30; \
-  __ret_30 = vsetq_lane_s64(vget_lane_s64(__s2_30, __p3_30), __s0_30, __p1_30); \
-  __ret_30; \
-})
-#else
-#define vcopyq_lane_s64(__p0_31, __p1_31, __p2_31, __p3_31) __extension__ ({ \
-  int64x2_t __s0_31 = __p0_31; \
-  int64x1_t __s2_31 = __p2_31; \
-  int64x2_t __rev0_31;  __rev0_31 = __builtin_shufflevector(__s0_31, __s0_31, 1, 0); \
-  int64x2_t __ret_31; \
-  __ret_31 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_31, __p3_31), __rev0_31, __p1_31); \
-  __ret_31 = __builtin_shufflevector(__ret_31, __ret_31, 1, 0); \
-  __ret_31; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s16(__p0_32, __p1_32, __p2_32, __p3_32) __extension__ ({ \
-  int16x8_t __s0_32 = __p0_32; \
-  int16x4_t __s2_32 = __p2_32; \
-  int16x8_t __ret_32; \
-  __ret_32 = vsetq_lane_s16(vget_lane_s16(__s2_32, __p3_32), __s0_32, __p1_32); \
-  __ret_32; \
-})
-#else
-#define vcopyq_lane_s16(__p0_33, __p1_33, __p2_33, __p3_33) __extension__ ({ \
-  int16x8_t __s0_33 = __p0_33; \
-  int16x4_t __s2_33 = __p2_33; \
-  int16x8_t __rev0_33;  __rev0_33 = __builtin_shufflevector(__s0_33, __s0_33, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_33;  __rev2_33 = __builtin_shufflevector(__s2_33, __s2_33, 3, 2, 1, 0); \
-  int16x8_t __ret_33; \
-  __ret_33 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_33, __p3_33), __rev0_33, __p1_33); \
-  __ret_33 = __builtin_shufflevector(__ret_33, __ret_33, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_33; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_p8(__p0_34, __p1_34, __p2_34, __p3_34) __extension__ ({ \
-  poly8x8_t __s0_34 = __p0_34; \
-  poly8x8_t __s2_34 = __p2_34; \
-  poly8x8_t __ret_34; \
-  __ret_34 = vset_lane_p8(vget_lane_p8(__s2_34, __p3_34), __s0_34, __p1_34); \
-  __ret_34; \
-})
-#else
-#define vcopy_lane_p8(__p0_35, __p1_35, __p2_35, __p3_35) __extension__ ({ \
-  poly8x8_t __s0_35 = __p0_35; \
-  poly8x8_t __s2_35 = __p2_35; \
-  poly8x8_t __rev0_35;  __rev0_35 = __builtin_shufflevector(__s0_35, __s0_35, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev2_35;  __rev2_35 = __builtin_shufflevector(__s2_35, __s2_35, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_35; \
-  __ret_35 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_35, __p3_35), __rev0_35, __p1_35); \
-  __ret_35 = __builtin_shufflevector(__ret_35, __ret_35, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_35; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_p16(__p0_36, __p1_36, __p2_36, __p3_36) __extension__ ({ \
-  poly16x4_t __s0_36 = __p0_36; \
-  poly16x4_t __s2_36 = __p2_36; \
-  poly16x4_t __ret_36; \
-  __ret_36 = vset_lane_p16(vget_lane_p16(__s2_36, __p3_36), __s0_36, __p1_36); \
-  __ret_36; \
-})
-#else
-#define vcopy_lane_p16(__p0_37, __p1_37, __p2_37, __p3_37) __extension__ ({ \
-  poly16x4_t __s0_37 = __p0_37; \
-  poly16x4_t __s2_37 = __p2_37; \
-  poly16x4_t __rev0_37;  __rev0_37 = __builtin_shufflevector(__s0_37, __s0_37, 3, 2, 1, 0); \
-  poly16x4_t __rev2_37;  __rev2_37 = __builtin_shufflevector(__s2_37, __s2_37, 3, 2, 1, 0); \
-  poly16x4_t __ret_37; \
-  __ret_37 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_37, __p3_37), __rev0_37, __p1_37); \
-  __ret_37 = __builtin_shufflevector(__ret_37, __ret_37, 3, 2, 1, 0); \
-  __ret_37; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u8(__p0_38, __p1_38, __p2_38, __p3_38) __extension__ ({ \
-  uint8x8_t __s0_38 = __p0_38; \
-  uint8x8_t __s2_38 = __p2_38; \
-  uint8x8_t __ret_38; \
-  __ret_38 = vset_lane_u8(vget_lane_u8(__s2_38, __p3_38), __s0_38, __p1_38); \
-  __ret_38; \
-})
-#else
-#define vcopy_lane_u8(__p0_39, __p1_39, __p2_39, __p3_39) __extension__ ({ \
-  uint8x8_t __s0_39 = __p0_39; \
-  uint8x8_t __s2_39 = __p2_39; \
-  uint8x8_t __rev0_39;  __rev0_39 = __builtin_shufflevector(__s0_39, __s0_39, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_39;  __rev2_39 = __builtin_shufflevector(__s2_39, __s2_39, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_39; \
-  __ret_39 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_39, __p3_39), __rev0_39, __p1_39); \
-  __ret_39 = __builtin_shufflevector(__ret_39, __ret_39, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_39; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u32(__p0_40, __p1_40, __p2_40, __p3_40) __extension__ ({ \
-  uint32x2_t __s0_40 = __p0_40; \
-  uint32x2_t __s2_40 = __p2_40; \
-  uint32x2_t __ret_40; \
-  __ret_40 = vset_lane_u32(vget_lane_u32(__s2_40, __p3_40), __s0_40, __p1_40); \
-  __ret_40; \
-})
-#else
-#define vcopy_lane_u32(__p0_41, __p1_41, __p2_41, __p3_41) __extension__ ({ \
-  uint32x2_t __s0_41 = __p0_41; \
-  uint32x2_t __s2_41 = __p2_41; \
-  uint32x2_t __rev0_41;  __rev0_41 = __builtin_shufflevector(__s0_41, __s0_41, 1, 0); \
-  uint32x2_t __rev2_41;  __rev2_41 = __builtin_shufflevector(__s2_41, __s2_41, 1, 0); \
-  uint32x2_t __ret_41; \
-  __ret_41 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_41, __p3_41), __rev0_41, __p1_41); \
-  __ret_41 = __builtin_shufflevector(__ret_41, __ret_41, 1, 0); \
-  __ret_41; \
-})
-#endif
-
-#define vcopy_lane_u64(__p0_42, __p1_42, __p2_42, __p3_42) __extension__ ({ \
-  uint64x1_t __s0_42 = __p0_42; \
-  uint64x1_t __s2_42 = __p2_42; \
-  uint64x1_t __ret_42; \
-  __ret_42 = vset_lane_u64(vget_lane_u64(__s2_42, __p3_42), __s0_42, __p1_42); \
-  __ret_42; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u16(__p0_43, __p1_43, __p2_43, __p3_43) __extension__ ({ \
-  uint16x4_t __s0_43 = __p0_43; \
-  uint16x4_t __s2_43 = __p2_43; \
-  uint16x4_t __ret_43; \
-  __ret_43 = vset_lane_u16(vget_lane_u16(__s2_43, __p3_43), __s0_43, __p1_43); \
-  __ret_43; \
-})
-#else
-#define vcopy_lane_u16(__p0_44, __p1_44, __p2_44, __p3_44) __extension__ ({ \
-  uint16x4_t __s0_44 = __p0_44; \
-  uint16x4_t __s2_44 = __p2_44; \
-  uint16x4_t __rev0_44;  __rev0_44 = __builtin_shufflevector(__s0_44, __s0_44, 3, 2, 1, 0); \
-  uint16x4_t __rev2_44;  __rev2_44 = __builtin_shufflevector(__s2_44, __s2_44, 3, 2, 1, 0); \
-  uint16x4_t __ret_44; \
-  __ret_44 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_44, __p3_44), __rev0_44, __p1_44); \
-  __ret_44 = __builtin_shufflevector(__ret_44, __ret_44, 3, 2, 1, 0); \
-  __ret_44; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s8(__p0_45, __p1_45, __p2_45, __p3_45) __extension__ ({ \
-  int8x8_t __s0_45 = __p0_45; \
-  int8x8_t __s2_45 = __p2_45; \
-  int8x8_t __ret_45; \
-  __ret_45 = vset_lane_s8(vget_lane_s8(__s2_45, __p3_45), __s0_45, __p1_45); \
-  __ret_45; \
-})
-#else
-#define vcopy_lane_s8(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \
-  int8x8_t __s0_46 = __p0_46; \
-  int8x8_t __s2_46 = __p2_46; \
-  int8x8_t __rev0_46;  __rev0_46 = __builtin_shufflevector(__s0_46, __s0_46, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_46;  __rev2_46 = __builtin_shufflevector(__s2_46, __s2_46, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_46; \
-  __ret_46 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_46, __p3_46), __rev0_46, __p1_46); \
-  __ret_46 = __builtin_shufflevector(__ret_46, __ret_46, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_46; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_f32(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \
-  float32x2_t __s0_47 = __p0_47; \
-  float32x2_t __s2_47 = __p2_47; \
-  float32x2_t __ret_47; \
-  __ret_47 = vset_lane_f32(vget_lane_f32(__s2_47, __p3_47), __s0_47, __p1_47); \
-  __ret_47; \
-})
-#else
-#define vcopy_lane_f32(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \
-  float32x2_t __s0_48 = __p0_48; \
-  float32x2_t __s2_48 = __p2_48; \
-  float32x2_t __rev0_48;  __rev0_48 = __builtin_shufflevector(__s0_48, __s0_48, 1, 0); \
-  float32x2_t __rev2_48;  __rev2_48 = __builtin_shufflevector(__s2_48, __s2_48, 1, 0); \
-  float32x2_t __ret_48; \
-  __ret_48 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_48, __p3_48), __rev0_48, __p1_48); \
-  __ret_48 = __builtin_shufflevector(__ret_48, __ret_48, 1, 0); \
-  __ret_48; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s32(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \
-  int32x2_t __s0_49 = __p0_49; \
-  int32x2_t __s2_49 = __p2_49; \
-  int32x2_t __ret_49; \
-  __ret_49 = vset_lane_s32(vget_lane_s32(__s2_49, __p3_49), __s0_49, __p1_49); \
-  __ret_49; \
-})
-#else
-#define vcopy_lane_s32(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \
-  int32x2_t __s0_50 = __p0_50; \
-  int32x2_t __s2_50 = __p2_50; \
-  int32x2_t __rev0_50;  __rev0_50 = __builtin_shufflevector(__s0_50, __s0_50, 1, 0); \
-  int32x2_t __rev2_50;  __rev2_50 = __builtin_shufflevector(__s2_50, __s2_50, 1, 0); \
-  int32x2_t __ret_50; \
-  __ret_50 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_50, __p3_50), __rev0_50, __p1_50); \
-  __ret_50 = __builtin_shufflevector(__ret_50, __ret_50, 1, 0); \
-  __ret_50; \
-})
-#endif
-
-#define vcopy_lane_s64(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \
-  int64x1_t __s0_51 = __p0_51; \
-  int64x1_t __s2_51 = __p2_51; \
-  int64x1_t __ret_51; \
-  __ret_51 = vset_lane_s64(vget_lane_s64(__s2_51, __p3_51), __s0_51, __p1_51); \
-  __ret_51; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s16(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \
-  int16x4_t __s0_52 = __p0_52; \
-  int16x4_t __s2_52 = __p2_52; \
-  int16x4_t __ret_52; \
-  __ret_52 = vset_lane_s16(vget_lane_s16(__s2_52, __p3_52), __s0_52, __p1_52); \
-  __ret_52; \
-})
-#else
-#define vcopy_lane_s16(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \
-  int16x4_t __s0_53 = __p0_53; \
-  int16x4_t __s2_53 = __p2_53; \
-  int16x4_t __rev0_53;  __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 3, 2, 1, 0); \
-  int16x4_t __rev2_53;  __rev2_53 = __builtin_shufflevector(__s2_53, __s2_53, 3, 2, 1, 0); \
-  int16x4_t __ret_53; \
-  __ret_53 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_53, __p3_53), __rev0_53, __p1_53); \
-  __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 3, 2, 1, 0); \
-  __ret_53; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p8(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \
-  poly8x16_t __s0_54 = __p0_54; \
-  poly8x16_t __s2_54 = __p2_54; \
-  poly8x16_t __ret_54; \
-  __ret_54 = vsetq_lane_p8(vgetq_lane_p8(__s2_54, __p3_54), __s0_54, __p1_54); \
-  __ret_54; \
-})
-#else
-#define vcopyq_laneq_p8(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \
-  poly8x16_t __s0_55 = __p0_55; \
-  poly8x16_t __s2_55 = __p2_55; \
-  poly8x16_t __rev0_55;  __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev2_55;  __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_55; \
-  __ret_55 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_55, __p3_55), __rev0_55, __p1_55); \
-  __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_55; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p16(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \
-  poly16x8_t __s0_56 = __p0_56; \
-  poly16x8_t __s2_56 = __p2_56; \
-  poly16x8_t __ret_56; \
-  __ret_56 = vsetq_lane_p16(vgetq_lane_p16(__s2_56, __p3_56), __s0_56, __p1_56); \
-  __ret_56; \
-})
-#else
-#define vcopyq_laneq_p16(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \
-  poly16x8_t __s0_57 = __p0_57; \
-  poly16x8_t __s2_57 = __p2_57; \
-  poly16x8_t __rev0_57;  __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev2_57;  __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret_57; \
-  __ret_57 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_57, __p3_57), __rev0_57, __p1_57); \
-  __ret_57 = __builtin_shufflevector(__ret_57, __ret_57, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_57; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u8(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \
-  uint8x16_t __s0_58 = __p0_58; \
-  uint8x16_t __s2_58 = __p2_58; \
-  uint8x16_t __ret_58; \
-  __ret_58 = vsetq_lane_u8(vgetq_lane_u8(__s2_58, __p3_58), __s0_58, __p1_58); \
-  __ret_58; \
-})
-#else
-#define vcopyq_laneq_u8(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \
-  uint8x16_t __s0_59 = __p0_59; \
-  uint8x16_t __s2_59 = __p2_59; \
-  uint8x16_t __rev0_59;  __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_59;  __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_59; \
-  __ret_59 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_59, __p3_59), __rev0_59, __p1_59); \
-  __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_59; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u32(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \
-  uint32x4_t __s0_60 = __p0_60; \
-  uint32x4_t __s2_60 = __p2_60; \
-  uint32x4_t __ret_60; \
-  __ret_60 = vsetq_lane_u32(vgetq_lane_u32(__s2_60, __p3_60), __s0_60, __p1_60); \
-  __ret_60; \
-})
-#else
-#define vcopyq_laneq_u32(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \
-  uint32x4_t __s0_61 = __p0_61; \
-  uint32x4_t __s2_61 = __p2_61; \
-  uint32x4_t __rev0_61;  __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 3, 2, 1, 0); \
-  uint32x4_t __rev2_61;  __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 3, 2, 1, 0); \
-  uint32x4_t __ret_61; \
-  __ret_61 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_61, __p3_61), __rev0_61, __p1_61); \
-  __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 3, 2, 1, 0); \
-  __ret_61; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u64(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \
-  uint64x2_t __s0_62 = __p0_62; \
-  uint64x2_t __s2_62 = __p2_62; \
-  uint64x2_t __ret_62; \
-  __ret_62 = vsetq_lane_u64(vgetq_lane_u64(__s2_62, __p3_62), __s0_62, __p1_62); \
-  __ret_62; \
-})
-#else
-#define vcopyq_laneq_u64(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \
-  uint64x2_t __s0_63 = __p0_63; \
-  uint64x2_t __s2_63 = __p2_63; \
-  uint64x2_t __rev0_63;  __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 1, 0); \
-  uint64x2_t __rev2_63;  __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 1, 0); \
-  uint64x2_t __ret_63; \
-  __ret_63 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_63, __p3_63), __rev0_63, __p1_63); \
-  __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 1, 0); \
-  __ret_63; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u16(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \
-  uint16x8_t __s0_64 = __p0_64; \
-  uint16x8_t __s2_64 = __p2_64; \
-  uint16x8_t __ret_64; \
-  __ret_64 = vsetq_lane_u16(vgetq_lane_u16(__s2_64, __p3_64), __s0_64, __p1_64); \
-  __ret_64; \
-})
-#else
-#define vcopyq_laneq_u16(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \
-  uint16x8_t __s0_65 = __p0_65; \
-  uint16x8_t __s2_65 = __p2_65; \
-  uint16x8_t __rev0_65;  __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_65;  __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_65; \
-  __ret_65 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_65, __p3_65), __rev0_65, __p1_65); \
-  __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_65; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s8(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \
-  int8x16_t __s0_66 = __p0_66; \
-  int8x16_t __s2_66 = __p2_66; \
-  int8x16_t __ret_66; \
-  __ret_66 = vsetq_lane_s8(vgetq_lane_s8(__s2_66, __p3_66), __s0_66, __p1_66); \
-  __ret_66; \
-})
-#else
-#define vcopyq_laneq_s8(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \
-  int8x16_t __s0_67 = __p0_67; \
-  int8x16_t __s2_67 = __p2_67; \
-  int8x16_t __rev0_67;  __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_67;  __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_67; \
-  __ret_67 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_67, __p3_67), __rev0_67, __p1_67); \
-  __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_67; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_f32(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \
-  float32x4_t __s0_68 = __p0_68; \
-  float32x4_t __s2_68 = __p2_68; \
-  float32x4_t __ret_68; \
-  __ret_68 = vsetq_lane_f32(vgetq_lane_f32(__s2_68, __p3_68), __s0_68, __p1_68); \
-  __ret_68; \
-})
-#else
-#define vcopyq_laneq_f32(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \
-  float32x4_t __s0_69 = __p0_69; \
-  float32x4_t __s2_69 = __p2_69; \
-  float32x4_t __rev0_69;  __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 3, 2, 1, 0); \
-  float32x4_t __rev2_69;  __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 3, 2, 1, 0); \
-  float32x4_t __ret_69; \
-  __ret_69 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_69, __p3_69), __rev0_69, __p1_69); \
-  __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 3, 2, 1, 0); \
-  __ret_69; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s32(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \
-  int32x4_t __s0_70 = __p0_70; \
-  int32x4_t __s2_70 = __p2_70; \
-  int32x4_t __ret_70; \
-  __ret_70 = vsetq_lane_s32(vgetq_lane_s32(__s2_70, __p3_70), __s0_70, __p1_70); \
-  __ret_70; \
-})
-#else
-#define vcopyq_laneq_s32(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \
-  int32x4_t __s0_71 = __p0_71; \
-  int32x4_t __s2_71 = __p2_71; \
-  int32x4_t __rev0_71;  __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 3, 2, 1, 0); \
-  int32x4_t __rev2_71;  __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 3, 2, 1, 0); \
-  int32x4_t __ret_71; \
-  __ret_71 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_71, __p3_71), __rev0_71, __p1_71); \
-  __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 3, 2, 1, 0); \
-  __ret_71; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s64(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \
-  int64x2_t __s0_72 = __p0_72; \
-  int64x2_t __s2_72 = __p2_72; \
-  int64x2_t __ret_72; \
-  __ret_72 = vsetq_lane_s64(vgetq_lane_s64(__s2_72, __p3_72), __s0_72, __p1_72); \
-  __ret_72; \
-})
-#else
-#define vcopyq_laneq_s64(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \
-  int64x2_t __s0_73 = __p0_73; \
-  int64x2_t __s2_73 = __p2_73; \
-  int64x2_t __rev0_73;  __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 1, 0); \
-  int64x2_t __rev2_73;  __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 1, 0); \
-  int64x2_t __ret_73; \
-  __ret_73 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_73, __p3_73), __rev0_73, __p1_73); \
-  __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 1, 0); \
-  __ret_73; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s16(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \
-  int16x8_t __s0_74 = __p0_74; \
-  int16x8_t __s2_74 = __p2_74; \
-  int16x8_t __ret_74; \
-  __ret_74 = vsetq_lane_s16(vgetq_lane_s16(__s2_74, __p3_74), __s0_74, __p1_74); \
-  __ret_74; \
-})
-#else
-#define vcopyq_laneq_s16(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \
-  int16x8_t __s0_75 = __p0_75; \
-  int16x8_t __s2_75 = __p2_75; \
-  int16x8_t __rev0_75;  __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_75;  __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_75; \
-  __ret_75 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_75, __p3_75), __rev0_75, __p1_75); \
-  __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_75; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p8(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \
-  poly8x8_t __s0_76 = __p0_76; \
-  poly8x16_t __s2_76 = __p2_76; \
-  poly8x8_t __ret_76; \
-  __ret_76 = vset_lane_p8(vgetq_lane_p8(__s2_76, __p3_76), __s0_76, __p1_76); \
-  __ret_76; \
-})
-#else
-#define vcopy_laneq_p8(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \
-  poly8x8_t __s0_77 = __p0_77; \
-  poly8x16_t __s2_77 = __p2_77; \
-  poly8x8_t __rev0_77;  __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev2_77;  __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_77; \
-  __ret_77 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_77, __p3_77), __rev0_77, __p1_77); \
-  __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_77; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p16(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \
-  poly16x4_t __s0_78 = __p0_78; \
-  poly16x8_t __s2_78 = __p2_78; \
-  poly16x4_t __ret_78; \
-  __ret_78 = vset_lane_p16(vgetq_lane_p16(__s2_78, __p3_78), __s0_78, __p1_78); \
-  __ret_78; \
-})
-#else
-#define vcopy_laneq_p16(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \
-  poly16x4_t __s0_79 = __p0_79; \
-  poly16x8_t __s2_79 = __p2_79; \
-  poly16x4_t __rev0_79;  __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 3, 2, 1, 0); \
-  poly16x8_t __rev2_79;  __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret_79; \
-  __ret_79 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_79, __p3_79), __rev0_79, __p1_79); \
-  __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 3, 2, 1, 0); \
-  __ret_79; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u8(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \
-  uint8x8_t __s0_80 = __p0_80; \
-  uint8x16_t __s2_80 = __p2_80; \
-  uint8x8_t __ret_80; \
-  __ret_80 = vset_lane_u8(vgetq_lane_u8(__s2_80, __p3_80), __s0_80, __p1_80); \
-  __ret_80; \
-})
-#else
-#define vcopy_laneq_u8(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \
-  uint8x8_t __s0_81 = __p0_81; \
-  uint8x16_t __s2_81 = __p2_81; \
-  uint8x8_t __rev0_81;  __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_81;  __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_81; \
-  __ret_81 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_81, __p3_81), __rev0_81, __p1_81); \
-  __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_81; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u32(__p0_82, __p1_82, __p2_82, __p3_82) __extension__ ({ \
-  uint32x2_t __s0_82 = __p0_82; \
-  uint32x4_t __s2_82 = __p2_82; \
-  uint32x2_t __ret_82; \
-  __ret_82 = vset_lane_u32(vgetq_lane_u32(__s2_82, __p3_82), __s0_82, __p1_82); \
-  __ret_82; \
-})
-#else
-#define vcopy_laneq_u32(__p0_83, __p1_83, __p2_83, __p3_83) __extension__ ({ \
-  uint32x2_t __s0_83 = __p0_83; \
-  uint32x4_t __s2_83 = __p2_83; \
-  uint32x2_t __rev0_83;  __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 1, 0); \
-  uint32x4_t __rev2_83;  __rev2_83 = __builtin_shufflevector(__s2_83, __s2_83, 3, 2, 1, 0); \
-  uint32x2_t __ret_83; \
-  __ret_83 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_83, __p3_83), __rev0_83, __p1_83); \
-  __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 1, 0); \
-  __ret_83; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u64(__p0_84, __p1_84, __p2_84, __p3_84) __extension__ ({ \
-  uint64x1_t __s0_84 = __p0_84; \
-  uint64x2_t __s2_84 = __p2_84; \
-  uint64x1_t __ret_84; \
-  __ret_84 = vset_lane_u64(vgetq_lane_u64(__s2_84, __p3_84), __s0_84, __p1_84); \
-  __ret_84; \
-})
-#else
-#define vcopy_laneq_u64(__p0_85, __p1_85, __p2_85, __p3_85) __extension__ ({ \
-  uint64x1_t __s0_85 = __p0_85; \
-  uint64x2_t __s2_85 = __p2_85; \
-  uint64x2_t __rev2_85;  __rev2_85 = __builtin_shufflevector(__s2_85, __s2_85, 1, 0); \
-  uint64x1_t __ret_85; \
-  __ret_85 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_85, __p3_85), __s0_85, __p1_85); \
-  __ret_85; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u16(__p0_86, __p1_86, __p2_86, __p3_86) __extension__ ({ \
-  uint16x4_t __s0_86 = __p0_86; \
-  uint16x8_t __s2_86 = __p2_86; \
-  uint16x4_t __ret_86; \
-  __ret_86 = vset_lane_u16(vgetq_lane_u16(__s2_86, __p3_86), __s0_86, __p1_86); \
-  __ret_86; \
-})
-#else
-#define vcopy_laneq_u16(__p0_87, __p1_87, __p2_87, __p3_87) __extension__ ({ \
-  uint16x4_t __s0_87 = __p0_87; \
-  uint16x8_t __s2_87 = __p2_87; \
-  uint16x4_t __rev0_87;  __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \
-  uint16x8_t __rev2_87;  __rev2_87 = __builtin_shufflevector(__s2_87, __s2_87, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_87; \
-  __ret_87 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_87, __p3_87), __rev0_87, __p1_87); \
-  __ret_87 = __builtin_shufflevector(__ret_87, __ret_87, 3, 2, 1, 0); \
-  __ret_87; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s8(__p0_88, __p1_88, __p2_88, __p3_88) __extension__ ({ \
-  int8x8_t __s0_88 = __p0_88; \
-  int8x16_t __s2_88 = __p2_88; \
-  int8x8_t __ret_88; \
-  __ret_88 = vset_lane_s8(vgetq_lane_s8(__s2_88, __p3_88), __s0_88, __p1_88); \
-  __ret_88; \
-})
-#else
-#define vcopy_laneq_s8(__p0_89, __p1_89, __p2_89, __p3_89) __extension__ ({ \
-  int8x8_t __s0_89 = __p0_89; \
-  int8x16_t __s2_89 = __p2_89; \
-  int8x8_t __rev0_89;  __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_89;  __rev2_89 = __builtin_shufflevector(__s2_89, __s2_89, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_89; \
-  __ret_89 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_89, __p3_89), __rev0_89, __p1_89); \
-  __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_89; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_f32(__p0_90, __p1_90, __p2_90, __p3_90) __extension__ ({ \
-  float32x2_t __s0_90 = __p0_90; \
-  float32x4_t __s2_90 = __p2_90; \
-  float32x2_t __ret_90; \
-  __ret_90 = vset_lane_f32(vgetq_lane_f32(__s2_90, __p3_90), __s0_90, __p1_90); \
-  __ret_90; \
-})
-#else
-#define vcopy_laneq_f32(__p0_91, __p1_91, __p2_91, __p3_91) __extension__ ({ \
-  float32x2_t __s0_91 = __p0_91; \
-  float32x4_t __s2_91 = __p2_91; \
-  float32x2_t __rev0_91;  __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, 1, 0); \
-  float32x4_t __rev2_91;  __rev2_91 = __builtin_shufflevector(__s2_91, __s2_91, 3, 2, 1, 0); \
-  float32x2_t __ret_91; \
-  __ret_91 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_91, __p3_91), __rev0_91, __p1_91); \
-  __ret_91 = __builtin_shufflevector(__ret_91, __ret_91, 1, 0); \
-  __ret_91; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s32(__p0_92, __p1_92, __p2_92, __p3_92) __extension__ ({ \
-  int32x2_t __s0_92 = __p0_92; \
-  int32x4_t __s2_92 = __p2_92; \
-  int32x2_t __ret_92; \
-  __ret_92 = vset_lane_s32(vgetq_lane_s32(__s2_92, __p3_92), __s0_92, __p1_92); \
-  __ret_92; \
-})
-#else
-#define vcopy_laneq_s32(__p0_93, __p1_93, __p2_93, __p3_93) __extension__ ({ \
-  int32x2_t __s0_93 = __p0_93; \
-  int32x4_t __s2_93 = __p2_93; \
-  int32x2_t __rev0_93;  __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 1, 0); \
-  int32x4_t __rev2_93;  __rev2_93 = __builtin_shufflevector(__s2_93, __s2_93, 3, 2, 1, 0); \
-  int32x2_t __ret_93; \
-  __ret_93 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_93, __p3_93), __rev0_93, __p1_93); \
-  __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 1, 0); \
-  __ret_93; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s64(__p0_94, __p1_94, __p2_94, __p3_94) __extension__ ({ \
-  int64x1_t __s0_94 = __p0_94; \
-  int64x2_t __s2_94 = __p2_94; \
-  int64x1_t __ret_94; \
-  __ret_94 = vset_lane_s64(vgetq_lane_s64(__s2_94, __p3_94), __s0_94, __p1_94); \
-  __ret_94; \
-})
-#else
-#define vcopy_laneq_s64(__p0_95, __p1_95, __p2_95, __p3_95) __extension__ ({ \
-  int64x1_t __s0_95 = __p0_95; \
-  int64x2_t __s2_95 = __p2_95; \
-  int64x2_t __rev2_95;  __rev2_95 = __builtin_shufflevector(__s2_95, __s2_95, 1, 0); \
-  int64x1_t __ret_95; \
-  __ret_95 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_95, __p3_95), __s0_95, __p1_95); \
-  __ret_95; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s16(__p0_96, __p1_96, __p2_96, __p3_96) __extension__ ({ \
-  int16x4_t __s0_96 = __p0_96; \
-  int16x8_t __s2_96 = __p2_96; \
-  int16x4_t __ret_96; \
-  __ret_96 = vset_lane_s16(vgetq_lane_s16(__s2_96, __p3_96), __s0_96, __p1_96); \
-  __ret_96; \
-})
-#else
-#define vcopy_laneq_s16(__p0_97, __p1_97, __p2_97, __p3_97) __extension__ ({ \
-  int16x4_t __s0_97 = __p0_97; \
-  int16x8_t __s2_97 = __p2_97; \
-  int16x4_t __rev0_97;  __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, 3, 2, 1, 0); \
-  int16x8_t __rev2_97;  __rev2_97 = __builtin_shufflevector(__s2_97, __s2_97, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_97; \
-  __ret_97 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_97, __p3_97), __rev0_97, __p1_97); \
-  __ret_97 = __builtin_shufflevector(__ret_97, __ret_97, 3, 2, 1, 0); \
-  __ret_97; \
-})
-#endif
-
-#define vcreate_p64(__p0) __extension__ ({ \
-  poly64x1_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (poly64x1_t)(__promote); \
-  __ret; \
-})
-#define vcreate_f64(__p0) __extension__ ({ \
-  float64x1_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (float64x1_t)(__promote); \
-  __ret; \
-})
-__ai float32_t vcvts_f32_s32(int32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0);
-  return __ret;
-}
-__ai float32_t vcvts_f32_u32(uint32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcvt_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
-  return __ret;
-}
-#endif
-
-__ai float64_t vcvtd_f64_s64(int64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0);
-  return __ret;
-}
-__ai float64_t vcvtd_f64_u64(uint64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-__ai float64x1_t vcvt_f64_s64(int64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
-  float16x8_t __ret;
-  __ret = vcombine_f16(__p0, vcvt_f16_f32(__p1));
-  return __ret;
-}
-#else
-__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __noswap_vcombine_f16(__rev0, __noswap_vcvt_f16_f32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = vcvt_f32_f16(vget_high_f16(__p0));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcvt_f32_f16(__noswap_vget_high_f16(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x4_t __ret;
-  __ret = vcombine_f32(__p0, vcvt_f32_f64(__p1));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvt_f32_f64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
-  float64x2_t __ret;
-  __ret = vcvt_f64_f32(vget_high_f32(__p0));
-  return __ret;
-}
-#else
-__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vcvt_f64_f32(__noswap_vget_high_f32(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \
-  __ret; \
-})
-#define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \
-  __ret; \
-})
-#define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \
-  __ret; \
-})
-#define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \
-  __ret; \
-})
-#define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \
-  __ret; \
-})
-__ai int32_t vcvts_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvt_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai uint32_t vcvts_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-__ai int32_t vcvtas_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtad_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtas_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtad_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0);
-  return __ret;
-}
-__ai int32_t vcvtms_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtmd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtms_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtmd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0);
-  return __ret;
-}
-__ai int32_t vcvtns_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtnd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtns_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtnd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0);
-  return __ret;
-}
-__ai int32_t vcvtps_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtpd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtps_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtpd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0);
-  return __ret;
-}
-__ai float32_t vcvtxd_f32_f64(float64_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x4_t __ret;
-  __ret = vcombine_f32(__p0, vcvtx_f32_f64(__p1));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvtx_f32_f64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdupd_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdupd_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_lane_f64((float64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdups_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdupd_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdup_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vdup_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x1_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x1_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x1_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x1_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai poly64x1_t vdup_n_p64(poly64_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vdupq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai float64x2_t vdupq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vdup_n_f64(float64_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) {__p0};
-  return __ret;
-}
-#define vext_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
-  __ret; \
-})
-#else
-#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 42); \
-  __ret; \
-})
-#else
-#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vext_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (float64x1_t)__s2, __p3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
-  __ret; \
-})
-#else
-#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__s2, __p3, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
-  __ret; \
-})
-#else
-#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
-  __ret; \
-})
-#endif
-
-#define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
-  __ret; \
-})
-#else
-#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
-  __ret; \
-})
-#else
-#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
-  __ret; \
-})
-#else
-#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
-  __ret; \
-})
-#else
-#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__rev2, __p3, 10); \
-  __ret; \
-})
-#define __noswap_vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
-  __ret; \
-})
-#else
-#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __ret;
-  __ret = vfmaq_f64(__p0, __p1, (float64x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vfmaq_f64(__rev0, __rev1, (float64x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfma_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) {
-  float64x1_t __ret;
-  __ret = vfma_f64(__p0, __p1, (float64x1_t) {__p2});
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = vfmaq_f64(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vfmaq_f64(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = vfma_f64(__p0, -__p1, __p2);
-  return __ret;
-}
-#define vfmsd_lane_f64(__p0_98, __p1_98, __p2_98, __p3_98) __extension__ ({ \
-  float64_t __s0_98 = __p0_98; \
-  float64_t __s1_98 = __p1_98; \
-  float64x1_t __s2_98 = __p2_98; \
-  float64_t __ret_98; \
-  __ret_98 = vfmad_lane_f64(__s0_98, -__s1_98, __s2_98, __p3_98); \
-  __ret_98; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfmss_lane_f32(__p0_99, __p1_99, __p2_99, __p3_99) __extension__ ({ \
-  float32_t __s0_99 = __p0_99; \
-  float32_t __s1_99 = __p1_99; \
-  float32x2_t __s2_99 = __p2_99; \
-  float32_t __ret_99; \
-  __ret_99 = vfmas_lane_f32(__s0_99, -__s1_99, __s2_99, __p3_99); \
-  __ret_99; \
-})
-#else
-#define vfmss_lane_f32(__p0_100, __p1_100, __p2_100, __p3_100) __extension__ ({ \
-  float32_t __s0_100 = __p0_100; \
-  float32_t __s1_100 = __p1_100; \
-  float32x2_t __s2_100 = __p2_100; \
-  float32x2_t __rev2_100;  __rev2_100 = __builtin_shufflevector(__s2_100, __s2_100, 1, 0); \
-  float32_t __ret_100; \
-  __ret_100 = __noswap_vfmas_lane_f32(__s0_100, -__s1_100, __rev2_100, __p3_100); \
-  __ret_100; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f64(__p0_101, __p1_101, __p2_101, __p3_101) __extension__ ({ \
-  float64x2_t __s0_101 = __p0_101; \
-  float64x2_t __s1_101 = __p1_101; \
-  float64x1_t __s2_101 = __p2_101; \
-  float64x2_t __ret_101; \
-  __ret_101 = vfmaq_lane_f64(__s0_101, -__s1_101, __s2_101, __p3_101); \
-  __ret_101; \
-})
-#else
-#define vfmsq_lane_f64(__p0_102, __p1_102, __p2_102, __p3_102) __extension__ ({ \
-  float64x2_t __s0_102 = __p0_102; \
-  float64x2_t __s1_102 = __p1_102; \
-  float64x1_t __s2_102 = __p2_102; \
-  float64x2_t __rev0_102;  __rev0_102 = __builtin_shufflevector(__s0_102, __s0_102, 1, 0); \
-  float64x2_t __rev1_102;  __rev1_102 = __builtin_shufflevector(__s1_102, __s1_102, 1, 0); \
-  float64x2_t __ret_102; \
-  __ret_102 = __noswap_vfmaq_lane_f64(__rev0_102, -__rev1_102, __s2_102, __p3_102); \
-  __ret_102 = __builtin_shufflevector(__ret_102, __ret_102, 1, 0); \
-  __ret_102; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f32(__p0_103, __p1_103, __p2_103, __p3_103) __extension__ ({ \
-  float32x4_t __s0_103 = __p0_103; \
-  float32x4_t __s1_103 = __p1_103; \
-  float32x2_t __s2_103 = __p2_103; \
-  float32x4_t __ret_103; \
-  __ret_103 = vfmaq_lane_f32(__s0_103, -__s1_103, __s2_103, __p3_103); \
-  __ret_103; \
-})
-#else
-#define vfmsq_lane_f32(__p0_104, __p1_104, __p2_104, __p3_104) __extension__ ({ \
-  float32x4_t __s0_104 = __p0_104; \
-  float32x4_t __s1_104 = __p1_104; \
-  float32x2_t __s2_104 = __p2_104; \
-  float32x4_t __rev0_104;  __rev0_104 = __builtin_shufflevector(__s0_104, __s0_104, 3, 2, 1, 0); \
-  float32x4_t __rev1_104;  __rev1_104 = __builtin_shufflevector(__s1_104, __s1_104, 3, 2, 1, 0); \
-  float32x2_t __rev2_104;  __rev2_104 = __builtin_shufflevector(__s2_104, __s2_104, 1, 0); \
-  float32x4_t __ret_104; \
-  __ret_104 = __noswap_vfmaq_lane_f32(__rev0_104, -__rev1_104, __rev2_104, __p3_104); \
-  __ret_104 = __builtin_shufflevector(__ret_104, __ret_104, 3, 2, 1, 0); \
-  __ret_104; \
-})
-#endif
-
-#define vfms_lane_f64(__p0_105, __p1_105, __p2_105, __p3_105) __extension__ ({ \
-  float64x1_t __s0_105 = __p0_105; \
-  float64x1_t __s1_105 = __p1_105; \
-  float64x1_t __s2_105 = __p2_105; \
-  float64x1_t __ret_105; \
-  __ret_105 = vfma_lane_f64(__s0_105, -__s1_105, __s2_105, __p3_105); \
-  __ret_105; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfms_lane_f32(__p0_106, __p1_106, __p2_106, __p3_106) __extension__ ({ \
-  float32x2_t __s0_106 = __p0_106; \
-  float32x2_t __s1_106 = __p1_106; \
-  float32x2_t __s2_106 = __p2_106; \
-  float32x2_t __ret_106; \
-  __ret_106 = vfma_lane_f32(__s0_106, -__s1_106, __s2_106, __p3_106); \
-  __ret_106; \
-})
-#else
-#define vfms_lane_f32(__p0_107, __p1_107, __p2_107, __p3_107) __extension__ ({ \
-  float32x2_t __s0_107 = __p0_107; \
-  float32x2_t __s1_107 = __p1_107; \
-  float32x2_t __s2_107 = __p2_107; \
-  float32x2_t __rev0_107;  __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 1, 0); \
-  float32x2_t __rev1_107;  __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 1, 0); \
-  float32x2_t __rev2_107;  __rev2_107 = __builtin_shufflevector(__s2_107, __s2_107, 1, 0); \
-  float32x2_t __ret_107; \
-  __ret_107 = __noswap_vfma_lane_f32(__rev0_107, -__rev1_107, __rev2_107, __p3_107); \
-  __ret_107 = __builtin_shufflevector(__ret_107, __ret_107, 1, 0); \
-  __ret_107; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsd_laneq_f64(__p0_108, __p1_108, __p2_108, __p3_108) __extension__ ({ \
-  float64_t __s0_108 = __p0_108; \
-  float64_t __s1_108 = __p1_108; \
-  float64x2_t __s2_108 = __p2_108; \
-  float64_t __ret_108; \
-  __ret_108 = vfmad_laneq_f64(__s0_108, -__s1_108, __s2_108, __p3_108); \
-  __ret_108; \
-})
-#else
-#define vfmsd_laneq_f64(__p0_109, __p1_109, __p2_109, __p3_109) __extension__ ({ \
-  float64_t __s0_109 = __p0_109; \
-  float64_t __s1_109 = __p1_109; \
-  float64x2_t __s2_109 = __p2_109; \
-  float64x2_t __rev2_109;  __rev2_109 = __builtin_shufflevector(__s2_109, __s2_109, 1, 0); \
-  float64_t __ret_109; \
-  __ret_109 = __noswap_vfmad_laneq_f64(__s0_109, -__s1_109, __rev2_109, __p3_109); \
-  __ret_109; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmss_laneq_f32(__p0_110, __p1_110, __p2_110, __p3_110) __extension__ ({ \
-  float32_t __s0_110 = __p0_110; \
-  float32_t __s1_110 = __p1_110; \
-  float32x4_t __s2_110 = __p2_110; \
-  float32_t __ret_110; \
-  __ret_110 = vfmas_laneq_f32(__s0_110, -__s1_110, __s2_110, __p3_110); \
-  __ret_110; \
-})
-#else
-#define vfmss_laneq_f32(__p0_111, __p1_111, __p2_111, __p3_111) __extension__ ({ \
-  float32_t __s0_111 = __p0_111; \
-  float32_t __s1_111 = __p1_111; \
-  float32x4_t __s2_111 = __p2_111; \
-  float32x4_t __rev2_111;  __rev2_111 = __builtin_shufflevector(__s2_111, __s2_111, 3, 2, 1, 0); \
-  float32_t __ret_111; \
-  __ret_111 = __noswap_vfmas_laneq_f32(__s0_111, -__s1_111, __rev2_111, __p3_111); \
-  __ret_111; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f64(__p0_112, __p1_112, __p2_112, __p3_112) __extension__ ({ \
-  float64x2_t __s0_112 = __p0_112; \
-  float64x2_t __s1_112 = __p1_112; \
-  float64x2_t __s2_112 = __p2_112; \
-  float64x2_t __ret_112; \
-  __ret_112 = vfmaq_laneq_f64(__s0_112, -__s1_112, __s2_112, __p3_112); \
-  __ret_112; \
-})
-#else
-#define vfmsq_laneq_f64(__p0_113, __p1_113, __p2_113, __p3_113) __extension__ ({ \
-  float64x2_t __s0_113 = __p0_113; \
-  float64x2_t __s1_113 = __p1_113; \
-  float64x2_t __s2_113 = __p2_113; \
-  float64x2_t __rev0_113;  __rev0_113 = __builtin_shufflevector(__s0_113, __s0_113, 1, 0); \
-  float64x2_t __rev1_113;  __rev1_113 = __builtin_shufflevector(__s1_113, __s1_113, 1, 0); \
-  float64x2_t __rev2_113;  __rev2_113 = __builtin_shufflevector(__s2_113, __s2_113, 1, 0); \
-  float64x2_t __ret_113; \
-  __ret_113 = __noswap_vfmaq_laneq_f64(__rev0_113, -__rev1_113, __rev2_113, __p3_113); \
-  __ret_113 = __builtin_shufflevector(__ret_113, __ret_113, 1, 0); \
-  __ret_113; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f32(__p0_114, __p1_114, __p2_114, __p3_114) __extension__ ({ \
-  float32x4_t __s0_114 = __p0_114; \
-  float32x4_t __s1_114 = __p1_114; \
-  float32x4_t __s2_114 = __p2_114; \
-  float32x4_t __ret_114; \
-  __ret_114 = vfmaq_laneq_f32(__s0_114, -__s1_114, __s2_114, __p3_114); \
-  __ret_114; \
-})
-#else
-#define vfmsq_laneq_f32(__p0_115, __p1_115, __p2_115, __p3_115) __extension__ ({ \
-  float32x4_t __s0_115 = __p0_115; \
-  float32x4_t __s1_115 = __p1_115; \
-  float32x4_t __s2_115 = __p2_115; \
-  float32x4_t __rev0_115;  __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 3, 2, 1, 0); \
-  float32x4_t __rev1_115;  __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, 3, 2, 1, 0); \
-  float32x4_t __rev2_115;  __rev2_115 = __builtin_shufflevector(__s2_115, __s2_115, 3, 2, 1, 0); \
-  float32x4_t __ret_115; \
-  __ret_115 = __noswap_vfmaq_laneq_f32(__rev0_115, -__rev1_115, __rev2_115, __p3_115); \
-  __ret_115 = __builtin_shufflevector(__ret_115, __ret_115, 3, 2, 1, 0); \
-  __ret_115; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f64(__p0_116, __p1_116, __p2_116, __p3_116) __extension__ ({ \
-  float64x1_t __s0_116 = __p0_116; \
-  float64x1_t __s1_116 = __p1_116; \
-  float64x2_t __s2_116 = __p2_116; \
-  float64x1_t __ret_116; \
-  __ret_116 = vfma_laneq_f64(__s0_116, -__s1_116, __s2_116, __p3_116); \
-  __ret_116; \
-})
-#else
-#define vfms_laneq_f64(__p0_117, __p1_117, __p2_117, __p3_117) __extension__ ({ \
-  float64x1_t __s0_117 = __p0_117; \
-  float64x1_t __s1_117 = __p1_117; \
-  float64x2_t __s2_117 = __p2_117; \
-  float64x2_t __rev2_117;  __rev2_117 = __builtin_shufflevector(__s2_117, __s2_117, 1, 0); \
-  float64x1_t __ret_117; \
-  __ret_117 = __noswap_vfma_laneq_f64(__s0_117, -__s1_117, __rev2_117, __p3_117); \
-  __ret_117; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f32(__p0_118, __p1_118, __p2_118, __p3_118) __extension__ ({ \
-  float32x2_t __s0_118 = __p0_118; \
-  float32x2_t __s1_118 = __p1_118; \
-  float32x4_t __s2_118 = __p2_118; \
-  float32x2_t __ret_118; \
-  __ret_118 = vfma_laneq_f32(__s0_118, -__s1_118, __s2_118, __p3_118); \
-  __ret_118; \
-})
-#else
-#define vfms_laneq_f32(__p0_119, __p1_119, __p2_119, __p3_119) __extension__ ({ \
-  float32x2_t __s0_119 = __p0_119; \
-  float32x2_t __s1_119 = __p1_119; \
-  float32x4_t __s2_119 = __p2_119; \
-  float32x2_t __rev0_119;  __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, 1, 0); \
-  float32x2_t __rev1_119;  __rev1_119 = __builtin_shufflevector(__s1_119, __s1_119, 1, 0); \
-  float32x4_t __rev2_119;  __rev2_119 = __builtin_shufflevector(__s2_119, __s2_119, 3, 2, 1, 0); \
-  float32x2_t __ret_119; \
-  __ret_119 = __noswap_vfma_laneq_f32(__rev0_119, -__rev1_119, __rev2_119, __p3_119); \
-  __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 1, 0); \
-  __ret_119; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __ret;
-  __ret = vfmaq_f64(__p0, -__p1, (float64x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vfmaq_f64(__rev0, -__rev1, (float64x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __ret;
-  __ret = vfmaq_f32(__p0, -__p1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vfmaq_f32(__rev0, -__rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfms_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) {
-  float64x1_t __ret;
-  __ret = vfma_f64(__p0, -__p1, (float64x1_t) {__p2});
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __ret;
-  __ret = vfma_f32(__p0, -__p1, (float32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __noswap_vfma_f32(__rev0, -__rev1, (float32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#else
-__ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
-  return __ret;
-}
-__ai poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x1_t vget_high_f64(float64x2_t __p0) {
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#else
-__ai float64x1_t vget_high_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
-  return __ret;
-}
-#endif
-
-#define vget_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vget_lane_i64((poly64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vget_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vget_lane_f64((float64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0);
-  return __ret;
-}
-#else
-__ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x1_t vget_low_f64(float64x2_t __p0) {
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0);
-  return __ret;
-}
-#else
-__ai float64x1_t vget_low_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
-  return __ret;
-}
-#endif
-
-#define vld1_p64(__p0) __extension__ ({ \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64(__p0) __extension__ ({ \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \
-  __ret; \
-})
-#define vld1_dup_p64(__p0) __extension__ ({ \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_dup_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_dup_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_dup_f64(__p0) __extension__ ({ \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \
-  __ret; \
-})
-#define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \
-  __ret; \
-})
-#else
-#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \
-  __ret; \
-})
-#else
-#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
-  __ret; \
-})
-#define vld1_p64_x2(__p0) __extension__ ({ \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64_x2(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64_x2(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64_x2(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64_x2(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64_x2(__p0) __extension__ ({ \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld1_p64_x3(__p0) __extension__ ({ \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64_x3(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64_x3(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64_x3(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64_x3(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64_x3(__p0) __extension__ ({ \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld1_p64_x4(__p0) __extension__ ({ \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64_x4(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64_x4(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64_x4(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64_x4(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64_x4(__p0) __extension__ ({ \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld2_p64(__p0) __extension__ ({ \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld2q_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_u64(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld2q_u64(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld2q_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_s64(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld2q_s64(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_f64(__p0) __extension__ ({ \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld2_dup_p64(__p0) __extension__ ({ \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld2q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld2q_dup_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_dup_f64(__p0) __extension__ ({ \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x2_t __s1 = __p1; \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \
-  __ret; \
-})
-#else
-#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \
-  __ret; \
-})
-#else
-#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \
-  __ret; \
-})
-#else
-#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \
-  __ret; \
-})
-#else
-#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \
-  __ret; \
-})
-#else
-#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \
-  __ret; \
-})
-#else
-#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \
-  __ret; \
-})
-#else
-#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x2_t __s1 = __p1; \
-  uint64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
-  __ret; \
-})
-#define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x2_t __s1 = __p1; \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \
-  __ret; \
-})
-#define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x2_t __s1 = __p1; \
-  int64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \
-  __ret; \
-})
-#define vld3_p64(__p0) __extension__ ({ \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld3q_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_u64(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld3q_u64(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld3q_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_s64(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld3q_s64(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_f64(__p0) __extension__ ({ \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld3_dup_p64(__p0) __extension__ ({ \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld3q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld3q_dup_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_dup_f64(__p0) __extension__ ({ \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x3_t __s1 = __p1; \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \
-  __ret; \
-})
-#else
-#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \
-  __ret; \
-})
-#else
-#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \
-  __ret; \
-})
-#else
-#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \
-  __ret; \
-})
-#else
-#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \
-  __ret; \
-})
-#else
-#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \
-  __ret; \
-})
-#else
-#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \
-  __ret; \
-})
-#else
-#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x3_t __s1 = __p1; \
-  uint64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
-  __ret; \
-})
-#define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x3_t __s1 = __p1; \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \
-  __ret; \
-})
-#define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x3_t __s1 = __p1; \
-  int64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \
-  __ret; \
-})
-#define vld4_p64(__p0) __extension__ ({ \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld4q_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_u64(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld4q_u64(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld4q_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_s64(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld4q_s64(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_f64(__p0) __extension__ ({ \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld4_dup_p64(__p0) __extension__ ({ \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld4q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld4q_dup_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_dup_f64(__p0) __extension__ ({ \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x4_t __s1 = __p1; \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \
-  __ret; \
-})
-#else
-#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \
-  __ret; \
-})
-#else
-#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \
-  __ret; \
-})
-#else
-#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \
-  __ret; \
-})
-#else
-#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \
-  __ret; \
-})
-#else
-#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \
-  __ret; \
-})
-#else
-#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \
-  __ret; \
-})
-#else
-#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x4_t __s1 = __p1; \
-  uint64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
-  __ret; \
-})
-#define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x4_t __s1 = __p1; \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \
-  __ret; \
-})
-#define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x4_t __s1 = __p1; \
-  int64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \
-  __ret; \
-})
-#define vldrq_p128(__p0) __extension__ ({ \
-  poly128_t __ret; \
-  __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxnmv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxnmv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vmaxvq_s8(int8x16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxvq_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vmaxvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxvq_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vmaxvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vmaxvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vmaxvq_s32(int32x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxvq_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vmaxvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxvq_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vmaxvq_s16(int16x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxvq_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vmaxvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxvq_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vmaxv_u8(uint8x8_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxv_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vmaxv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxv_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vmaxv_u32(uint32x2_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxv_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vmaxv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxv_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vmaxv_u16(uint16x4_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxv_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vmaxv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxv_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vmaxv_s8(int8x8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxv_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vmaxv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxv_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vmaxv_s32(int32x2_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxv_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vmaxv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxv_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vmaxv_s16(int16x4_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxv_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vmaxv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxv_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vminnmvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminnmvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vminnmvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminnmvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminnmvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminnmvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminnmv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminnmv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vminvq_u8(uint8x16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminvq_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vminvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminvq_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vminvq_u32(uint32x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminvq_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vminvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminvq_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vminvq_u16(uint16x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminvq_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vminvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminvq_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vminvq_s8(int8x16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminvq_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vminvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminvq_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vminvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vminvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vminvq_s32(int32x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminvq_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vminvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminvq_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vminvq_s16(int16x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminvq_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vminvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminvq_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vminv_u8(uint8x8_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminv_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vminv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminv_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vminv_u32(uint32x2_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminv_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vminv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminv_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vminv_u16(uint16x4_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminv_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vminv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminv_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vminv_s8(int8x8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminv_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vminv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminv_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vminv_s32(int32x2_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminv_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vminv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminv_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vminv_s16(int16x4_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminv_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vminv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminv_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlaq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x8_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlaq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlaq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlaq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x2_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmla_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmla_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmla_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmla_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmla_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmlaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __ret;
-  __ret = __p0 + __p1 * (float64x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai float64x2_t vmlaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 + __rev1 * (float64x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 + vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 + vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 + vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 + vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 + vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 + vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 + vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 + vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 + vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 + vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 + vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 + vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlsq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x8_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlsq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlsq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlsq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlsq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x2_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmls_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmls_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmls_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmls_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmlsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __ret;
-  __ret = __p0 - __p1 * (float64x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai float64x2_t vmlsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 - __rev1 * (float64x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 - vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 - vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 - vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 - vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 - vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 - vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 - vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 - vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 - vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 - vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 - vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 - vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai poly64x1_t vmov_n_p64(poly64_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmovq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai float64x2_t vmovq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmov_n_f64(float64_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_120) {
-  uint16x8_t __ret_120;
-  uint8x8_t __a1_120 = vget_high_u8(__p0_120);
-  __ret_120 = (uint16x8_t)(vshll_n_u8(__a1_120, 0));
-  return __ret_120;
-}
-#else
-__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_121) {
-  uint8x16_t __rev0_121;  __rev0_121 = __builtin_shufflevector(__p0_121, __p0_121, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret_121;
-  uint8x8_t __a1_121 = __noswap_vget_high_u8(__rev0_121);
-  __ret_121 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_121, 0));
-  __ret_121 = __builtin_shufflevector(__ret_121, __ret_121, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret_121;
-}
-__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_122) {
-  uint16x8_t __ret_122;
-  uint8x8_t __a1_122 = __noswap_vget_high_u8(__p0_122);
-  __ret_122 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_122, 0));
-  return __ret_122;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_123) {
-  uint64x2_t __ret_123;
-  uint32x2_t __a1_123 = vget_high_u32(__p0_123);
-  __ret_123 = (uint64x2_t)(vshll_n_u32(__a1_123, 0));
-  return __ret_123;
-}
-#else
-__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_124) {
-  uint32x4_t __rev0_124;  __rev0_124 = __builtin_shufflevector(__p0_124, __p0_124, 3, 2, 1, 0);
-  uint64x2_t __ret_124;
-  uint32x2_t __a1_124 = __noswap_vget_high_u32(__rev0_124);
-  __ret_124 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_124, 0));
-  __ret_124 = __builtin_shufflevector(__ret_124, __ret_124, 1, 0);
-  return __ret_124;
-}
-__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_125) {
-  uint64x2_t __ret_125;
-  uint32x2_t __a1_125 = __noswap_vget_high_u32(__p0_125);
-  __ret_125 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_125, 0));
-  return __ret_125;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_126) {
-  uint32x4_t __ret_126;
-  uint16x4_t __a1_126 = vget_high_u16(__p0_126);
-  __ret_126 = (uint32x4_t)(vshll_n_u16(__a1_126, 0));
-  return __ret_126;
-}
-#else
-__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_127) {
-  uint16x8_t __rev0_127;  __rev0_127 = __builtin_shufflevector(__p0_127, __p0_127, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret_127;
-  uint16x4_t __a1_127 = __noswap_vget_high_u16(__rev0_127);
-  __ret_127 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_127, 0));
-  __ret_127 = __builtin_shufflevector(__ret_127, __ret_127, 3, 2, 1, 0);
-  return __ret_127;
-}
-__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_128) {
-  uint32x4_t __ret_128;
-  uint16x4_t __a1_128 = __noswap_vget_high_u16(__p0_128);
-  __ret_128 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_128, 0));
-  return __ret_128;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovl_high_s8(int8x16_t __p0_129) {
-  int16x8_t __ret_129;
-  int8x8_t __a1_129 = vget_high_s8(__p0_129);
-  __ret_129 = (int16x8_t)(vshll_n_s8(__a1_129, 0));
-  return __ret_129;
-}
-#else
-__ai int16x8_t vmovl_high_s8(int8x16_t __p0_130) {
-  int8x16_t __rev0_130;  __rev0_130 = __builtin_shufflevector(__p0_130, __p0_130, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret_130;
-  int8x8_t __a1_130 = __noswap_vget_high_s8(__rev0_130);
-  __ret_130 = (int16x8_t)(__noswap_vshll_n_s8(__a1_130, 0));
-  __ret_130 = __builtin_shufflevector(__ret_130, __ret_130, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret_130;
-}
-__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_131) {
-  int16x8_t __ret_131;
-  int8x8_t __a1_131 = __noswap_vget_high_s8(__p0_131);
-  __ret_131 = (int16x8_t)(__noswap_vshll_n_s8(__a1_131, 0));
-  return __ret_131;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmovl_high_s32(int32x4_t __p0_132) {
-  int64x2_t __ret_132;
-  int32x2_t __a1_132 = vget_high_s32(__p0_132);
-  __ret_132 = (int64x2_t)(vshll_n_s32(__a1_132, 0));
-  return __ret_132;
-}
-#else
-__ai int64x2_t vmovl_high_s32(int32x4_t __p0_133) {
-  int32x4_t __rev0_133;  __rev0_133 = __builtin_shufflevector(__p0_133, __p0_133, 3, 2, 1, 0);
-  int64x2_t __ret_133;
-  int32x2_t __a1_133 = __noswap_vget_high_s32(__rev0_133);
-  __ret_133 = (int64x2_t)(__noswap_vshll_n_s32(__a1_133, 0));
-  __ret_133 = __builtin_shufflevector(__ret_133, __ret_133, 1, 0);
-  return __ret_133;
-}
-__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_134) {
-  int64x2_t __ret_134;
-  int32x2_t __a1_134 = __noswap_vget_high_s32(__p0_134);
-  __ret_134 = (int64x2_t)(__noswap_vshll_n_s32(__a1_134, 0));
-  return __ret_134;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovl_high_s16(int16x8_t __p0_135) {
-  int32x4_t __ret_135;
-  int16x4_t __a1_135 = vget_high_s16(__p0_135);
-  __ret_135 = (int32x4_t)(vshll_n_s16(__a1_135, 0));
-  return __ret_135;
-}
-#else
-__ai int32x4_t vmovl_high_s16(int16x8_t __p0_136) {
-  int16x8_t __rev0_136;  __rev0_136 = __builtin_shufflevector(__p0_136, __p0_136, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret_136;
-  int16x4_t __a1_136 = __noswap_vget_high_s16(__rev0_136);
-  __ret_136 = (int32x4_t)(__noswap_vshll_n_s16(__a1_136, 0));
-  __ret_136 = __builtin_shufflevector(__ret_136, __ret_136, 3, 2, 1, 0);
-  return __ret_136;
-}
-__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_137) {
-  int32x4_t __ret_137;
-  int16x4_t __a1_137 = __noswap_vget_high_s16(__p0_137);
-  __ret_137 = (int32x4_t)(__noswap_vshll_n_s16(__a1_137, 0));
-  return __ret_137;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vmovn_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vmovn_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vmovn_u64(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vmovn_u64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vmovn_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vmovn_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vmovn_s32(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vmovn_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vmovn_s64(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vmovn_s64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vmovn_s16(__p1));
-  return __ret;
-}
-#else
-__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vmovn_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#define vmuld_lane_f64(__p0_138, __p1_138, __p2_138) __extension__ ({ \
-  float64_t __s0_138 = __p0_138; \
-  float64x1_t __s1_138 = __p1_138; \
-  float64_t __ret_138; \
-  __ret_138 = __s0_138 * vget_lane_f64(__s1_138, __p2_138); \
-  __ret_138; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmuls_lane_f32(__p0_139, __p1_139, __p2_139) __extension__ ({ \
-  float32_t __s0_139 = __p0_139; \
-  float32x2_t __s1_139 = __p1_139; \
-  float32_t __ret_139; \
-  __ret_139 = __s0_139 * vget_lane_f32(__s1_139, __p2_139); \
-  __ret_139; \
-})
-#else
-#define vmuls_lane_f32(__p0_140, __p1_140, __p2_140) __extension__ ({ \
-  float32_t __s0_140 = __p0_140; \
-  float32x2_t __s1_140 = __p1_140; \
-  float32x2_t __rev1_140;  __rev1_140 = __builtin_shufflevector(__s1_140, __s1_140, 1, 0); \
-  float32_t __ret_140; \
-  __ret_140 = __s0_140 * __noswap_vget_lane_f32(__rev1_140, __p2_140); \
-  __ret_140; \
-})
-#endif
-
-#define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmuld_laneq_f64(__p0_141, __p1_141, __p2_141) __extension__ ({ \
-  float64_t __s0_141 = __p0_141; \
-  float64x2_t __s1_141 = __p1_141; \
-  float64_t __ret_141; \
-  __ret_141 = __s0_141 * vgetq_lane_f64(__s1_141, __p2_141); \
-  __ret_141; \
-})
-#else
-#define vmuld_laneq_f64(__p0_142, __p1_142, __p2_142) __extension__ ({ \
-  float64_t __s0_142 = __p0_142; \
-  float64x2_t __s1_142 = __p1_142; \
-  float64x2_t __rev1_142;  __rev1_142 = __builtin_shufflevector(__s1_142, __s1_142, 1, 0); \
-  float64_t __ret_142; \
-  __ret_142 = __s0_142 * __noswap_vgetq_lane_f64(__rev1_142, __p2_142); \
-  __ret_142; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmuls_laneq_f32(__p0_143, __p1_143, __p2_143) __extension__ ({ \
-  float32_t __s0_143 = __p0_143; \
-  float32x4_t __s1_143 = __p1_143; \
-  float32_t __ret_143; \
-  __ret_143 = __s0_143 * vgetq_lane_f32(__s1_143, __p2_143); \
-  __ret_143; \
-})
-#else
-#define vmuls_laneq_f32(__p0_144, __p1_144, __p2_144) __extension__ ({ \
-  float32_t __s0_144 = __p0_144; \
-  float32x4_t __s1_144 = __p1_144; \
-  float32x4_t __rev1_144;  __rev1_144 = __builtin_shufflevector(__s1_144, __s1_144, 3, 2, 1, 0); \
-  float32_t __ret_144; \
-  __ret_144 = __s0_144 * __noswap_vgetq_lane_f32(__rev1_144, __p2_144); \
-  __ret_144; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 10); \
-  __ret; \
-})
-#else
-#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__rev1, __p2, 10); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmul_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmul_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmul_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmul_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmul_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmul_n_f64((float64x1_t)__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 * (float64x2_t) {__p1, __p1};
-  return __ret;
-}
-#else
-__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 * (float64x2_t) {__p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) {
-  poly128_t __ret;
-  __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly16x8_t __ret;
-  __ret = vmull_p8(vget_high_p8(__p0), vget_high_p8(__p1));
-  return __ret;
-}
-#else
-__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __noswap_vmull_p8(__noswap_vget_high_p8(__rev0), __noswap_vget_high_p8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmull_u8(vget_high_u8(__p0), vget_high_u8(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmull_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmull_u32(vget_high_u32(__p0), vget_high_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmull_u16(vget_high_u16(__p0), vget_high_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = vmull_s8(vget_high_s8(__p0), vget_high_s8(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmull_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
-  return __ret;
-}
-#else
-__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly128_t __ret;
-  __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1)));
-  return __ret;
-}
-#else
-__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly128_t __ret;
-  __ret = vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1)));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = vmull_u32(vget_high_u32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_high_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = vmull_u16(vget_high_u16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_high_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = vmull_u32(vget_high_u32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_high_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = vmull_u16(vget_high_u16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_high_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmull_n_u32(vget_high_u32(__p0), __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmull_n_u32(__noswap_vget_high_u32(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmull_n_u16(vget_high_u16(__p0), __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmull_n_u16(__noswap_vget_high_u16(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = vmull_n_s32(vget_high_s32(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = vmull_n_s16(vget_high_s16(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = vmull_u32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __noswap_vmull_u32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = vmull_u16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __noswap_vmull_u16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#endif
-
-__ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
-  return __ret;
-}
-#define vmulxd_lane_f64(__p0_145, __p1_145, __p2_145) __extension__ ({ \
-  float64_t __s0_145 = __p0_145; \
-  float64x1_t __s1_145 = __p1_145; \
-  float64_t __ret_145; \
-  __ret_145 = vmulxd_f64(__s0_145, vget_lane_f64(__s1_145, __p2_145)); \
-  __ret_145; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmulxs_lane_f32(__p0_146, __p1_146, __p2_146) __extension__ ({ \
-  float32_t __s0_146 = __p0_146; \
-  float32x2_t __s1_146 = __p1_146; \
-  float32_t __ret_146; \
-  __ret_146 = vmulxs_f32(__s0_146, vget_lane_f32(__s1_146, __p2_146)); \
-  __ret_146; \
-})
-#else
-#define vmulxs_lane_f32(__p0_147, __p1_147, __p2_147) __extension__ ({ \
-  float32_t __s0_147 = __p0_147; \
-  float32x2_t __s1_147 = __p1_147; \
-  float32x2_t __rev1_147;  __rev1_147 = __builtin_shufflevector(__s1_147, __s1_147, 1, 0); \
-  float32_t __ret_147; \
-  __ret_147 = vmulxs_f32(__s0_147, __noswap_vget_lane_f32(__rev1_147, __p2_147)); \
-  __ret_147; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = vmulxq_f64(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmulxq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = __noswap_vmulxq_f64(__rev0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = vmulxq_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmulxq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __noswap_vmulxq_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = vmulx_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmulx_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __noswap_vmulx_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxd_laneq_f64(__p0_148, __p1_148, __p2_148) __extension__ ({ \
-  float64_t __s0_148 = __p0_148; \
-  float64x2_t __s1_148 = __p1_148; \
-  float64_t __ret_148; \
-  __ret_148 = vmulxd_f64(__s0_148, vgetq_lane_f64(__s1_148, __p2_148)); \
-  __ret_148; \
-})
-#else
-#define vmulxd_laneq_f64(__p0_149, __p1_149, __p2_149) __extension__ ({ \
-  float64_t __s0_149 = __p0_149; \
-  float64x2_t __s1_149 = __p1_149; \
-  float64x2_t __rev1_149;  __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 1, 0); \
-  float64_t __ret_149; \
-  __ret_149 = vmulxd_f64(__s0_149, __noswap_vgetq_lane_f64(__rev1_149, __p2_149)); \
-  __ret_149; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxs_laneq_f32(__p0_150, __p1_150, __p2_150) __extension__ ({ \
-  float32_t __s0_150 = __p0_150; \
-  float32x4_t __s1_150 = __p1_150; \
-  float32_t __ret_150; \
-  __ret_150 = vmulxs_f32(__s0_150, vgetq_lane_f32(__s1_150, __p2_150)); \
-  __ret_150; \
-})
-#else
-#define vmulxs_laneq_f32(__p0_151, __p1_151, __p2_151) __extension__ ({ \
-  float32_t __s0_151 = __p0_151; \
-  float32x4_t __s1_151 = __p1_151; \
-  float32x4_t __rev1_151;  __rev1_151 = __builtin_shufflevector(__s1_151, __s1_151, 3, 2, 1, 0); \
-  float32_t __ret_151; \
-  __ret_151 = vmulxs_f32(__s0_151, __noswap_vgetq_lane_f32(__rev1_151, __p2_151)); \
-  __ret_151; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = vmulxq_f64(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmulxq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = __noswap_vmulxq_f64(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = vmulxq_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmulxq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __noswap_vmulxq_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = vmulx_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmulx_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __noswap_vmulx_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vnegq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float64x2_t vnegq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vnegq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int64x2_t vnegq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vneg_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-__ai int64x1_t vneg_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-__ai int64_t vnegd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vnegd_s64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vpaddd_u64(uint64x2_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vpaddd_u64(__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vpaddd_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vpaddd_u64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpaddd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpaddd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpaddd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpaddd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vpaddd_s64(int64x2_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vpaddd_s64(__p0);
-  return __ret;
-}
-#else
-__ai int64_t vpaddd_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vpaddd_s64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpadds_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpadds_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpadds_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpadds_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpmaxqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpmaxqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpmaxs_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxs_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpmaxs_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxs_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpmaxnms_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpmaxnms_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpminqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminqd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpminqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminqd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpmins_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmins_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpmins_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmins_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpminnmqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpminnmqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpminnms_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpminnms_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpminnms_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpminnms_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqabsq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqabsq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqabs_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai int8_t vqabsb_s8(int8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0);
-  return __ret;
-}
-__ai int32_t vqabss_s32(int32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqabss_s32(__p0);
-  return __ret;
-}
-__ai int64_t vqabsd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0);
-  return __ret;
-}
-__ai int16_t vqabsh_s16(int16_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0);
-  return __ret;
-}
-__ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2);
-  return __ret;
-}
-__ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlal_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlal_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlal_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlal_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlal_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlal_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlal_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlal_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2);
-  return __ret;
-}
-__ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlsl_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlsl_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlsl_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlsl_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlsl_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlsl_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhs_lane_s32(__p0_152, __p1_152, __p2_152) __extension__ ({ \
-  int32_t __s0_152 = __p0_152; \
-  int32x2_t __s1_152 = __p1_152; \
-  int32_t __ret_152; \
-  __ret_152 = vqdmulhs_s32(__s0_152, vget_lane_s32(__s1_152, __p2_152)); \
-  __ret_152; \
-})
-#else
-#define vqdmulhs_lane_s32(__p0_153, __p1_153, __p2_153) __extension__ ({ \
-  int32_t __s0_153 = __p0_153; \
-  int32x2_t __s1_153 = __p1_153; \
-  int32x2_t __rev1_153;  __rev1_153 = __builtin_shufflevector(__s1_153, __s1_153, 1, 0); \
-  int32_t __ret_153; \
-  __ret_153 = vqdmulhs_s32(__s0_153, __noswap_vget_lane_s32(__rev1_153, __p2_153)); \
-  __ret_153; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhh_lane_s16(__p0_154, __p1_154, __p2_154) __extension__ ({ \
-  int16_t __s0_154 = __p0_154; \
-  int16x4_t __s1_154 = __p1_154; \
-  int16_t __ret_154; \
-  __ret_154 = vqdmulhh_s16(__s0_154, vget_lane_s16(__s1_154, __p2_154)); \
-  __ret_154; \
-})
-#else
-#define vqdmulhh_lane_s16(__p0_155, __p1_155, __p2_155) __extension__ ({ \
-  int16_t __s0_155 = __p0_155; \
-  int16x4_t __s1_155 = __p1_155; \
-  int16x4_t __rev1_155;  __rev1_155 = __builtin_shufflevector(__s1_155, __s1_155, 3, 2, 1, 0); \
-  int16_t __ret_155; \
-  __ret_155 = vqdmulhh_s16(__s0_155, __noswap_vget_lane_s16(__rev1_155, __p2_155)); \
-  __ret_155; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhs_laneq_s32(__p0_156, __p1_156, __p2_156) __extension__ ({ \
-  int32_t __s0_156 = __p0_156; \
-  int32x4_t __s1_156 = __p1_156; \
-  int32_t __ret_156; \
-  __ret_156 = vqdmulhs_s32(__s0_156, vgetq_lane_s32(__s1_156, __p2_156)); \
-  __ret_156; \
-})
-#else
-#define vqdmulhs_laneq_s32(__p0_157, __p1_157, __p2_157) __extension__ ({ \
-  int32_t __s0_157 = __p0_157; \
-  int32x4_t __s1_157 = __p1_157; \
-  int32x4_t __rev1_157;  __rev1_157 = __builtin_shufflevector(__s1_157, __s1_157, 3, 2, 1, 0); \
-  int32_t __ret_157; \
-  __ret_157 = vqdmulhs_s32(__s0_157, __noswap_vgetq_lane_s32(__rev1_157, __p2_157)); \
-  __ret_157; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhh_laneq_s16(__p0_158, __p1_158, __p2_158) __extension__ ({ \
-  int16_t __s0_158 = __p0_158; \
-  int16x8_t __s1_158 = __p1_158; \
-  int16_t __ret_158; \
-  __ret_158 = vqdmulhh_s16(__s0_158, vgetq_lane_s16(__s1_158, __p2_158)); \
-  __ret_158; \
-})
-#else
-#define vqdmulhh_laneq_s16(__p0_159, __p1_159, __p2_159) __extension__ ({ \
-  int16_t __s0_159 = __p0_159; \
-  int16x8_t __s1_159 = __p1_159; \
-  int16x8_t __rev1_159;  __rev1_159 = __builtin_shufflevector(__s1_159, __s1_159, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_159; \
-  __ret_159 = vqdmulhh_s16(__s0_159, __noswap_vgetq_lane_s16(__rev1_159, __p2_159)); \
-  __ret_159; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vqdmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vqdmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vqdmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqdmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vqdmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqdmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = vqdmull_n_s32(vget_high_s32(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = vqdmull_n_s16(vget_high_s16(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulls_lane_s32(__p0_160, __p1_160, __p2_160) __extension__ ({ \
-  int32_t __s0_160 = __p0_160; \
-  int32x2_t __s1_160 = __p1_160; \
-  int64_t __ret_160; \
-  __ret_160 = vqdmulls_s32(__s0_160, vget_lane_s32(__s1_160, __p2_160)); \
-  __ret_160; \
-})
-#else
-#define vqdmulls_lane_s32(__p0_161, __p1_161, __p2_161) __extension__ ({ \
-  int32_t __s0_161 = __p0_161; \
-  int32x2_t __s1_161 = __p1_161; \
-  int32x2_t __rev1_161;  __rev1_161 = __builtin_shufflevector(__s1_161, __s1_161, 1, 0); \
-  int64_t __ret_161; \
-  __ret_161 = vqdmulls_s32(__s0_161, __noswap_vget_lane_s32(__rev1_161, __p2_161)); \
-  __ret_161; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmullh_lane_s16(__p0_162, __p1_162, __p2_162) __extension__ ({ \
-  int16_t __s0_162 = __p0_162; \
-  int16x4_t __s1_162 = __p1_162; \
-  int32_t __ret_162; \
-  __ret_162 = vqdmullh_s16(__s0_162, vget_lane_s16(__s1_162, __p2_162)); \
-  __ret_162; \
-})
-#else
-#define vqdmullh_lane_s16(__p0_163, __p1_163, __p2_163) __extension__ ({ \
-  int16_t __s0_163 = __p0_163; \
-  int16x4_t __s1_163 = __p1_163; \
-  int16x4_t __rev1_163;  __rev1_163 = __builtin_shufflevector(__s1_163, __s1_163, 3, 2, 1, 0); \
-  int32_t __ret_163; \
-  __ret_163 = vqdmullh_s16(__s0_163, __noswap_vget_lane_s16(__rev1_163, __p2_163)); \
-  __ret_163; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulls_laneq_s32(__p0_164, __p1_164, __p2_164) __extension__ ({ \
-  int32_t __s0_164 = __p0_164; \
-  int32x4_t __s1_164 = __p1_164; \
-  int64_t __ret_164; \
-  __ret_164 = vqdmulls_s32(__s0_164, vgetq_lane_s32(__s1_164, __p2_164)); \
-  __ret_164; \
-})
-#else
-#define vqdmulls_laneq_s32(__p0_165, __p1_165, __p2_165) __extension__ ({ \
-  int32_t __s0_165 = __p0_165; \
-  int32x4_t __s1_165 = __p1_165; \
-  int32x4_t __rev1_165;  __rev1_165 = __builtin_shufflevector(__s1_165, __s1_165, 3, 2, 1, 0); \
-  int64_t __ret_165; \
-  __ret_165 = vqdmulls_s32(__s0_165, __noswap_vgetq_lane_s32(__rev1_165, __p2_165)); \
-  __ret_165; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmullh_laneq_s16(__p0_166, __p1_166, __p2_166) __extension__ ({ \
-  int16_t __s0_166 = __p0_166; \
-  int16x8_t __s1_166 = __p1_166; \
-  int32_t __ret_166; \
-  __ret_166 = vqdmullh_s16(__s0_166, vgetq_lane_s16(__s1_166, __p2_166)); \
-  __ret_166; \
-})
-#else
-#define vqdmullh_laneq_s16(__p0_167, __p1_167, __p2_167) __extension__ ({ \
-  int16_t __s0_167 = __p0_167; \
-  int16x8_t __s1_167 = __p1_167; \
-  int16x8_t __rev1_167;  __rev1_167 = __builtin_shufflevector(__s1_167, __s1_167, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32_t __ret_167; \
-  __ret_167 = vqdmullh_s16(__s0_167, __noswap_vgetq_lane_s16(__rev1_167, __p2_167)); \
-  __ret_167; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vqdmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqdmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai int16_t vqmovns_s32(int32_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0);
-  return __ret;
-}
-__ai int32_t vqmovnd_s64(int64_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0);
-  return __ret;
-}
-__ai int8_t vqmovnh_s16(int16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0);
-  return __ret;
-}
-__ai uint16_t vqmovns_u32(uint32_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0);
-  return __ret;
-}
-__ai uint32_t vqmovnd_u64(uint64_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0);
-  return __ret;
-}
-__ai uint8_t vqmovnh_u16(uint16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vqmovn_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vqmovn_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vqmovn_u64(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vqmovn_u64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vqmovn_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vqmovn_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vqmovn_s32(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vqmovn_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vqmovn_s64(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vqmovn_s64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vqmovn_s16(__p1));
-  return __ret;
-}
-#else
-__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vqmovn_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int16_t vqmovuns_s32(int32_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqmovuns_s32(__p0);
-  return __ret;
-}
-__ai int32_t vqmovund_s64(int64_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqmovund_s64(__p0);
-  return __ret;
-}
-__ai int8_t vqmovunh_s16(int16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqmovunh_s16(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqmovun_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16((uint16x4_t)(__p0), vqmovun_s32(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vqmovun_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16((uint16x4_t)(__rev0), __noswap_vqmovun_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqmovun_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32((uint32x2_t)(__p0), vqmovun_s64(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vqmovun_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32((uint32x2_t)(__rev0), __noswap_vqmovun_s64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqmovun_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8((uint8x8_t)(__p0), vqmovun_s16(__p1));
-  return __ret;
-}
-#else
-__ai uint8x16_t vqmovun_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8((uint8x8_t)(__rev0), __noswap_vqmovun_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqnegq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqnegq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqneg_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai int8_t vqnegb_s8(int8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0);
-  return __ret;
-}
-__ai int32_t vqnegs_s32(int32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0);
-  return __ret;
-}
-__ai int64_t vqnegd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0);
-  return __ret;
-}
-__ai int16_t vqnegh_s16(int16_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0);
-  return __ret;
-}
-__ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhs_lane_s32(__p0_168, __p1_168, __p2_168) __extension__ ({ \
-  int32_t __s0_168 = __p0_168; \
-  int32x2_t __s1_168 = __p1_168; \
-  int32_t __ret_168; \
-  __ret_168 = vqrdmulhs_s32(__s0_168, vget_lane_s32(__s1_168, __p2_168)); \
-  __ret_168; \
-})
-#else
-#define vqrdmulhs_lane_s32(__p0_169, __p1_169, __p2_169) __extension__ ({ \
-  int32_t __s0_169 = __p0_169; \
-  int32x2_t __s1_169 = __p1_169; \
-  int32x2_t __rev1_169;  __rev1_169 = __builtin_shufflevector(__s1_169, __s1_169, 1, 0); \
-  int32_t __ret_169; \
-  __ret_169 = vqrdmulhs_s32(__s0_169, __noswap_vget_lane_s32(__rev1_169, __p2_169)); \
-  __ret_169; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhh_lane_s16(__p0_170, __p1_170, __p2_170) __extension__ ({ \
-  int16_t __s0_170 = __p0_170; \
-  int16x4_t __s1_170 = __p1_170; \
-  int16_t __ret_170; \
-  __ret_170 = vqrdmulhh_s16(__s0_170, vget_lane_s16(__s1_170, __p2_170)); \
-  __ret_170; \
-})
-#else
-#define vqrdmulhh_lane_s16(__p0_171, __p1_171, __p2_171) __extension__ ({ \
-  int16_t __s0_171 = __p0_171; \
-  int16x4_t __s1_171 = __p1_171; \
-  int16x4_t __rev1_171;  __rev1_171 = __builtin_shufflevector(__s1_171, __s1_171, 3, 2, 1, 0); \
-  int16_t __ret_171; \
-  __ret_171 = vqrdmulhh_s16(__s0_171, __noswap_vget_lane_s16(__rev1_171, __p2_171)); \
-  __ret_171; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhs_laneq_s32(__p0_172, __p1_172, __p2_172) __extension__ ({ \
-  int32_t __s0_172 = __p0_172; \
-  int32x4_t __s1_172 = __p1_172; \
-  int32_t __ret_172; \
-  __ret_172 = vqrdmulhs_s32(__s0_172, vgetq_lane_s32(__s1_172, __p2_172)); \
-  __ret_172; \
-})
-#else
-#define vqrdmulhs_laneq_s32(__p0_173, __p1_173, __p2_173) __extension__ ({ \
-  int32_t __s0_173 = __p0_173; \
-  int32x4_t __s1_173 = __p1_173; \
-  int32x4_t __rev1_173;  __rev1_173 = __builtin_shufflevector(__s1_173, __s1_173, 3, 2, 1, 0); \
-  int32_t __ret_173; \
-  __ret_173 = vqrdmulhs_s32(__s0_173, __noswap_vgetq_lane_s32(__rev1_173, __p2_173)); \
-  __ret_173; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhh_laneq_s16(__p0_174, __p1_174, __p2_174) __extension__ ({ \
-  int16_t __s0_174 = __p0_174; \
-  int16x8_t __s1_174 = __p1_174; \
-  int16_t __ret_174; \
-  __ret_174 = vqrdmulhh_s16(__s0_174, vgetq_lane_s16(__s1_174, __p2_174)); \
-  __ret_174; \
-})
-#else
-#define vqrdmulhh_laneq_s16(__p0_175, __p1_175, __p2_175) __extension__ ({ \
-  int16_t __s0_175 = __p0_175; \
-  int16x8_t __s1_175 = __p1_175; \
-  int16x8_t __rev1_175;  __rev1_175 = __builtin_shufflevector(__s1_175, __s1_175, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_175; \
-  __ret_175 = vqrdmulhh_s16(__s0_175, __noswap_vgetq_lane_s16(__rev1_175, __p2_175)); \
-  __ret_175; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai uint8_t vqrshlb_u8(uint8_t __p0, uint8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqrshls_u32(uint32_t __p0, uint32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqrshld_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqrshlh_u16(uint16_t __p0, uint16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u32(__p0_176, __p1_176, __p2_176) __extension__ ({ \
-  uint16x4_t __s0_176 = __p0_176; \
-  uint32x4_t __s1_176 = __p1_176; \
-  uint16x8_t __ret_176; \
-  __ret_176 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_176), (uint16x4_t)(vqrshrn_n_u32(__s1_176, __p2_176)))); \
-  __ret_176; \
-})
-#else
-#define vqrshrn_high_n_u32(__p0_177, __p1_177, __p2_177) __extension__ ({ \
-  uint16x4_t __s0_177 = __p0_177; \
-  uint32x4_t __s1_177 = __p1_177; \
-  uint16x4_t __rev0_177;  __rev0_177 = __builtin_shufflevector(__s0_177, __s0_177, 3, 2, 1, 0); \
-  uint32x4_t __rev1_177;  __rev1_177 = __builtin_shufflevector(__s1_177, __s1_177, 3, 2, 1, 0); \
-  uint16x8_t __ret_177; \
-  __ret_177 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_177), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_177, __p2_177)))); \
-  __ret_177 = __builtin_shufflevector(__ret_177, __ret_177, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_177; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u64(__p0_178, __p1_178, __p2_178) __extension__ ({ \
-  uint32x2_t __s0_178 = __p0_178; \
-  uint64x2_t __s1_178 = __p1_178; \
-  uint32x4_t __ret_178; \
-  __ret_178 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_178), (uint32x2_t)(vqrshrn_n_u64(__s1_178, __p2_178)))); \
-  __ret_178; \
-})
-#else
-#define vqrshrn_high_n_u64(__p0_179, __p1_179, __p2_179) __extension__ ({ \
-  uint32x2_t __s0_179 = __p0_179; \
-  uint64x2_t __s1_179 = __p1_179; \
-  uint32x2_t __rev0_179;  __rev0_179 = __builtin_shufflevector(__s0_179, __s0_179, 1, 0); \
-  uint64x2_t __rev1_179;  __rev1_179 = __builtin_shufflevector(__s1_179, __s1_179, 1, 0); \
-  uint32x4_t __ret_179; \
-  __ret_179 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_179), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_179, __p2_179)))); \
-  __ret_179 = __builtin_shufflevector(__ret_179, __ret_179, 3, 2, 1, 0); \
-  __ret_179; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u16(__p0_180, __p1_180, __p2_180) __extension__ ({ \
-  uint8x8_t __s0_180 = __p0_180; \
-  uint16x8_t __s1_180 = __p1_180; \
-  uint8x16_t __ret_180; \
-  __ret_180 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_180), (uint8x8_t)(vqrshrn_n_u16(__s1_180, __p2_180)))); \
-  __ret_180; \
-})
-#else
-#define vqrshrn_high_n_u16(__p0_181, __p1_181, __p2_181) __extension__ ({ \
-  uint8x8_t __s0_181 = __p0_181; \
-  uint16x8_t __s1_181 = __p1_181; \
-  uint8x8_t __rev0_181;  __rev0_181 = __builtin_shufflevector(__s0_181, __s0_181, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_181;  __rev1_181 = __builtin_shufflevector(__s1_181, __s1_181, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_181; \
-  __ret_181 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_181), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_181, __p2_181)))); \
-  __ret_181 = __builtin_shufflevector(__ret_181, __ret_181, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_181; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s32(__p0_182, __p1_182, __p2_182) __extension__ ({ \
-  int16x4_t __s0_182 = __p0_182; \
-  int32x4_t __s1_182 = __p1_182; \
-  int16x8_t __ret_182; \
-  __ret_182 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_182), (int16x4_t)(vqrshrn_n_s32(__s1_182, __p2_182)))); \
-  __ret_182; \
-})
-#else
-#define vqrshrn_high_n_s32(__p0_183, __p1_183, __p2_183) __extension__ ({ \
-  int16x4_t __s0_183 = __p0_183; \
-  int32x4_t __s1_183 = __p1_183; \
-  int16x4_t __rev0_183;  __rev0_183 = __builtin_shufflevector(__s0_183, __s0_183, 3, 2, 1, 0); \
-  int32x4_t __rev1_183;  __rev1_183 = __builtin_shufflevector(__s1_183, __s1_183, 3, 2, 1, 0); \
-  int16x8_t __ret_183; \
-  __ret_183 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_183), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_183, __p2_183)))); \
-  __ret_183 = __builtin_shufflevector(__ret_183, __ret_183, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_183; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s64(__p0_184, __p1_184, __p2_184) __extension__ ({ \
-  int32x2_t __s0_184 = __p0_184; \
-  int64x2_t __s1_184 = __p1_184; \
-  int32x4_t __ret_184; \
-  __ret_184 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_184), (int32x2_t)(vqrshrn_n_s64(__s1_184, __p2_184)))); \
-  __ret_184; \
-})
-#else
-#define vqrshrn_high_n_s64(__p0_185, __p1_185, __p2_185) __extension__ ({ \
-  int32x2_t __s0_185 = __p0_185; \
-  int64x2_t __s1_185 = __p1_185; \
-  int32x2_t __rev0_185;  __rev0_185 = __builtin_shufflevector(__s0_185, __s0_185, 1, 0); \
-  int64x2_t __rev1_185;  __rev1_185 = __builtin_shufflevector(__s1_185, __s1_185, 1, 0); \
-  int32x4_t __ret_185; \
-  __ret_185 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_185), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_185, __p2_185)))); \
-  __ret_185 = __builtin_shufflevector(__ret_185, __ret_185, 3, 2, 1, 0); \
-  __ret_185; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s16(__p0_186, __p1_186, __p2_186) __extension__ ({ \
-  int8x8_t __s0_186 = __p0_186; \
-  int16x8_t __s1_186 = __p1_186; \
-  int8x16_t __ret_186; \
-  __ret_186 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_186), (int8x8_t)(vqrshrn_n_s16(__s1_186, __p2_186)))); \
-  __ret_186; \
-})
-#else
-#define vqrshrn_high_n_s16(__p0_187, __p1_187, __p2_187) __extension__ ({ \
-  int8x8_t __s0_187 = __p0_187; \
-  int16x8_t __s1_187 = __p1_187; \
-  int8x8_t __rev0_187;  __rev0_187 = __builtin_shufflevector(__s0_187, __s0_187, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_187;  __rev1_187 = __builtin_shufflevector(__s1_187, __s1_187, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_187; \
-  __ret_187 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_187), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_187, __p2_187)))); \
-  __ret_187 = __builtin_shufflevector(__ret_187, __ret_187, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_187; \
-})
-#endif
-
-#define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s32(__p0_188, __p1_188, __p2_188) __extension__ ({ \
-  int16x4_t __s0_188 = __p0_188; \
-  int32x4_t __s1_188 = __p1_188; \
-  int16x8_t __ret_188; \
-  __ret_188 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_188), (int16x4_t)(vqrshrun_n_s32(__s1_188, __p2_188)))); \
-  __ret_188; \
-})
-#else
-#define vqrshrun_high_n_s32(__p0_189, __p1_189, __p2_189) __extension__ ({ \
-  int16x4_t __s0_189 = __p0_189; \
-  int32x4_t __s1_189 = __p1_189; \
-  int16x4_t __rev0_189;  __rev0_189 = __builtin_shufflevector(__s0_189, __s0_189, 3, 2, 1, 0); \
-  int32x4_t __rev1_189;  __rev1_189 = __builtin_shufflevector(__s1_189, __s1_189, 3, 2, 1, 0); \
-  int16x8_t __ret_189; \
-  __ret_189 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_189), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_189, __p2_189)))); \
-  __ret_189 = __builtin_shufflevector(__ret_189, __ret_189, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_189; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s64(__p0_190, __p1_190, __p2_190) __extension__ ({ \
-  int32x2_t __s0_190 = __p0_190; \
-  int64x2_t __s1_190 = __p1_190; \
-  int32x4_t __ret_190; \
-  __ret_190 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_190), (int32x2_t)(vqrshrun_n_s64(__s1_190, __p2_190)))); \
-  __ret_190; \
-})
-#else
-#define vqrshrun_high_n_s64(__p0_191, __p1_191, __p2_191) __extension__ ({ \
-  int32x2_t __s0_191 = __p0_191; \
-  int64x2_t __s1_191 = __p1_191; \
-  int32x2_t __rev0_191;  __rev0_191 = __builtin_shufflevector(__s0_191, __s0_191, 1, 0); \
-  int64x2_t __rev1_191;  __rev1_191 = __builtin_shufflevector(__s1_191, __s1_191, 1, 0); \
-  int32x4_t __ret_191; \
-  __ret_191 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_191), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_191, __p2_191)))); \
-  __ret_191 = __builtin_shufflevector(__ret_191, __ret_191, 3, 2, 1, 0); \
-  __ret_191; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s16(__p0_192, __p1_192, __p2_192) __extension__ ({ \
-  int8x8_t __s0_192 = __p0_192; \
-  int16x8_t __s1_192 = __p1_192; \
-  int8x16_t __ret_192; \
-  __ret_192 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_192), (int8x8_t)(vqrshrun_n_s16(__s1_192, __p2_192)))); \
-  __ret_192; \
-})
-#else
-#define vqrshrun_high_n_s16(__p0_193, __p1_193, __p2_193) __extension__ ({ \
-  int8x8_t __s0_193 = __p0_193; \
-  int16x8_t __s1_193 = __p1_193; \
-  int8x8_t __rev0_193;  __rev0_193 = __builtin_shufflevector(__s0_193, __s0_193, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_193;  __rev1_193 = __builtin_shufflevector(__s1_193, __s1_193, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_193; \
-  __ret_193 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_193), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_193, __p2_193)))); \
-  __ret_193 = __builtin_shufflevector(__ret_193, __ret_193, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_193; \
-})
-#endif
-
-#define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \
-  __ret; \
-})
-__ai uint8_t vqshlb_u8(uint8_t __p0, uint8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqshls_u32(uint32_t __p0, uint32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqshld_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqshlh_u16(uint16_t __p0, uint16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1);
-  return __ret;
-}
-#define vqshlb_n_u8(__p0, __p1) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \
-  __ret; \
-})
-#define vqshls_n_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \
-  __ret; \
-})
-#define vqshld_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vqshlh_n_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \
-  __ret; \
-})
-#define vqshlb_n_s8(__p0, __p1) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \
-  __ret; \
-})
-#define vqshls_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshld_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshlh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#define vqshlub_n_s8(__p0, __p1) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \
-  __ret; \
-})
-#define vqshlus_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshlud_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshluh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u32(__p0_194, __p1_194, __p2_194) __extension__ ({ \
-  uint16x4_t __s0_194 = __p0_194; \
-  uint32x4_t __s1_194 = __p1_194; \
-  uint16x8_t __ret_194; \
-  __ret_194 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_194), (uint16x4_t)(vqshrn_n_u32(__s1_194, __p2_194)))); \
-  __ret_194; \
-})
-#else
-#define vqshrn_high_n_u32(__p0_195, __p1_195, __p2_195) __extension__ ({ \
-  uint16x4_t __s0_195 = __p0_195; \
-  uint32x4_t __s1_195 = __p1_195; \
-  uint16x4_t __rev0_195;  __rev0_195 = __builtin_shufflevector(__s0_195, __s0_195, 3, 2, 1, 0); \
-  uint32x4_t __rev1_195;  __rev1_195 = __builtin_shufflevector(__s1_195, __s1_195, 3, 2, 1, 0); \
-  uint16x8_t __ret_195; \
-  __ret_195 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_195), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_195, __p2_195)))); \
-  __ret_195 = __builtin_shufflevector(__ret_195, __ret_195, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_195; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u64(__p0_196, __p1_196, __p2_196) __extension__ ({ \
-  uint32x2_t __s0_196 = __p0_196; \
-  uint64x2_t __s1_196 = __p1_196; \
-  uint32x4_t __ret_196; \
-  __ret_196 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_196), (uint32x2_t)(vqshrn_n_u64(__s1_196, __p2_196)))); \
-  __ret_196; \
-})
-#else
-#define vqshrn_high_n_u64(__p0_197, __p1_197, __p2_197) __extension__ ({ \
-  uint32x2_t __s0_197 = __p0_197; \
-  uint64x2_t __s1_197 = __p1_197; \
-  uint32x2_t __rev0_197;  __rev0_197 = __builtin_shufflevector(__s0_197, __s0_197, 1, 0); \
-  uint64x2_t __rev1_197;  __rev1_197 = __builtin_shufflevector(__s1_197, __s1_197, 1, 0); \
-  uint32x4_t __ret_197; \
-  __ret_197 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_197), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_197, __p2_197)))); \
-  __ret_197 = __builtin_shufflevector(__ret_197, __ret_197, 3, 2, 1, 0); \
-  __ret_197; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u16(__p0_198, __p1_198, __p2_198) __extension__ ({ \
-  uint8x8_t __s0_198 = __p0_198; \
-  uint16x8_t __s1_198 = __p1_198; \
-  uint8x16_t __ret_198; \
-  __ret_198 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_198), (uint8x8_t)(vqshrn_n_u16(__s1_198, __p2_198)))); \
-  __ret_198; \
-})
-#else
-#define vqshrn_high_n_u16(__p0_199, __p1_199, __p2_199) __extension__ ({ \
-  uint8x8_t __s0_199 = __p0_199; \
-  uint16x8_t __s1_199 = __p1_199; \
-  uint8x8_t __rev0_199;  __rev0_199 = __builtin_shufflevector(__s0_199, __s0_199, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_199;  __rev1_199 = __builtin_shufflevector(__s1_199, __s1_199, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_199; \
-  __ret_199 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_199), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_199, __p2_199)))); \
-  __ret_199 = __builtin_shufflevector(__ret_199, __ret_199, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_199; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s32(__p0_200, __p1_200, __p2_200) __extension__ ({ \
-  int16x4_t __s0_200 = __p0_200; \
-  int32x4_t __s1_200 = __p1_200; \
-  int16x8_t __ret_200; \
-  __ret_200 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_200), (int16x4_t)(vqshrn_n_s32(__s1_200, __p2_200)))); \
-  __ret_200; \
-})
-#else
-#define vqshrn_high_n_s32(__p0_201, __p1_201, __p2_201) __extension__ ({ \
-  int16x4_t __s0_201 = __p0_201; \
-  int32x4_t __s1_201 = __p1_201; \
-  int16x4_t __rev0_201;  __rev0_201 = __builtin_shufflevector(__s0_201, __s0_201, 3, 2, 1, 0); \
-  int32x4_t __rev1_201;  __rev1_201 = __builtin_shufflevector(__s1_201, __s1_201, 3, 2, 1, 0); \
-  int16x8_t __ret_201; \
-  __ret_201 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_201), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_201, __p2_201)))); \
-  __ret_201 = __builtin_shufflevector(__ret_201, __ret_201, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_201; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s64(__p0_202, __p1_202, __p2_202) __extension__ ({ \
-  int32x2_t __s0_202 = __p0_202; \
-  int64x2_t __s1_202 = __p1_202; \
-  int32x4_t __ret_202; \
-  __ret_202 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_202), (int32x2_t)(vqshrn_n_s64(__s1_202, __p2_202)))); \
-  __ret_202; \
-})
-#else
-#define vqshrn_high_n_s64(__p0_203, __p1_203, __p2_203) __extension__ ({ \
-  int32x2_t __s0_203 = __p0_203; \
-  int64x2_t __s1_203 = __p1_203; \
-  int32x2_t __rev0_203;  __rev0_203 = __builtin_shufflevector(__s0_203, __s0_203, 1, 0); \
-  int64x2_t __rev1_203;  __rev1_203 = __builtin_shufflevector(__s1_203, __s1_203, 1, 0); \
-  int32x4_t __ret_203; \
-  __ret_203 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_203), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_203, __p2_203)))); \
-  __ret_203 = __builtin_shufflevector(__ret_203, __ret_203, 3, 2, 1, 0); \
-  __ret_203; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s16(__p0_204, __p1_204, __p2_204) __extension__ ({ \
-  int8x8_t __s0_204 = __p0_204; \
-  int16x8_t __s1_204 = __p1_204; \
-  int8x16_t __ret_204; \
-  __ret_204 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_204), (int8x8_t)(vqshrn_n_s16(__s1_204, __p2_204)))); \
-  __ret_204; \
-})
-#else
-#define vqshrn_high_n_s16(__p0_205, __p1_205, __p2_205) __extension__ ({ \
-  int8x8_t __s0_205 = __p0_205; \
-  int16x8_t __s1_205 = __p1_205; \
-  int8x8_t __rev0_205;  __rev0_205 = __builtin_shufflevector(__s0_205, __s0_205, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_205;  __rev1_205 = __builtin_shufflevector(__s1_205, __s1_205, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_205; \
-  __ret_205 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_205), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_205, __p2_205)))); \
-  __ret_205 = __builtin_shufflevector(__ret_205, __ret_205, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_205; \
-})
-#endif
-
-#define vqshrns_n_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \
-  __ret; \
-})
-#define vqshrns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s32(__p0_206, __p1_206, __p2_206) __extension__ ({ \
-  int16x4_t __s0_206 = __p0_206; \
-  int32x4_t __s1_206 = __p1_206; \
-  int16x8_t __ret_206; \
-  __ret_206 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_206), (int16x4_t)(vqshrun_n_s32(__s1_206, __p2_206)))); \
-  __ret_206; \
-})
-#else
-#define vqshrun_high_n_s32(__p0_207, __p1_207, __p2_207) __extension__ ({ \
-  int16x4_t __s0_207 = __p0_207; \
-  int32x4_t __s1_207 = __p1_207; \
-  int16x4_t __rev0_207;  __rev0_207 = __builtin_shufflevector(__s0_207, __s0_207, 3, 2, 1, 0); \
-  int32x4_t __rev1_207;  __rev1_207 = __builtin_shufflevector(__s1_207, __s1_207, 3, 2, 1, 0); \
-  int16x8_t __ret_207; \
-  __ret_207 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_207), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_207, __p2_207)))); \
-  __ret_207 = __builtin_shufflevector(__ret_207, __ret_207, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_207; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s64(__p0_208, __p1_208, __p2_208) __extension__ ({ \
-  int32x2_t __s0_208 = __p0_208; \
-  int64x2_t __s1_208 = __p1_208; \
-  int32x4_t __ret_208; \
-  __ret_208 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_208), (int32x2_t)(vqshrun_n_s64(__s1_208, __p2_208)))); \
-  __ret_208; \
-})
-#else
-#define vqshrun_high_n_s64(__p0_209, __p1_209, __p2_209) __extension__ ({ \
-  int32x2_t __s0_209 = __p0_209; \
-  int64x2_t __s1_209 = __p1_209; \
-  int32x2_t __rev0_209;  __rev0_209 = __builtin_shufflevector(__s0_209, __s0_209, 1, 0); \
-  int64x2_t __rev1_209;  __rev1_209 = __builtin_shufflevector(__s1_209, __s1_209, 1, 0); \
-  int32x4_t __ret_209; \
-  __ret_209 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_209), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_209, __p2_209)))); \
-  __ret_209 = __builtin_shufflevector(__ret_209, __ret_209, 3, 2, 1, 0); \
-  __ret_209; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s16(__p0_210, __p1_210, __p2_210) __extension__ ({ \
-  int8x8_t __s0_210 = __p0_210; \
-  int16x8_t __s1_210 = __p1_210; \
-  int8x16_t __ret_210; \
-  __ret_210 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_210), (int8x8_t)(vqshrun_n_s16(__s1_210, __p2_210)))); \
-  __ret_210; \
-})
-#else
-#define vqshrun_high_n_s16(__p0_211, __p1_211, __p2_211) __extension__ ({ \
-  int8x8_t __s0_211 = __p0_211; \
-  int16x8_t __s1_211 = __p1_211; \
-  int8x8_t __rev0_211;  __rev0_211 = __builtin_shufflevector(__s0_211, __s0_211, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_211;  __rev1_211 = __builtin_shufflevector(__s1_211, __s1_211, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_211; \
-  __ret_211 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_211), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_211, __p2_211)))); \
-  __ret_211 = __builtin_shufflevector(__ret_211, __ret_211, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_211; \
-})
-#endif
-
-#define vqshruns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshrund_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \
-  __ret; \
-})
-__ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
-  poly8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
-  poly8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
-  uint8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) {
-  int8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
-  uint8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) {
-  int8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
-  poly8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
-  poly8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
-  uint8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) {
-  int8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
-  uint8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) {
-  int8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
-  poly8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
-  poly8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
-  uint8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) {
-  int8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
-  uint8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) {
-  int8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vraddhn_u32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vraddhn_u32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vraddhn_u64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vraddhn_u64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vraddhn_u16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vraddhn_u16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vraddhn_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vraddhn_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vraddhn_s64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vraddhn_s64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vraddhn_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vraddhn_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrbitq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vrbitq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrbit_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrbit_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrecpe_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-__ai float64_t vrecped_f64(float64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrecped_f64(__p0);
-  return __ret;
-}
-__ai float32_t vrecpes_f32(float32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-__ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1);
-  return __ret;
-}
-__ai float64_t vrecpxd_f64(float64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0);
-  return __ret;
-}
-__ai float32_t vrecpxs_f32(float32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vrshld_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1);
-  return __ret;
-}
-#define vrshrd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vrshrd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u32(__p0_212, __p1_212, __p2_212) __extension__ ({ \
-  uint16x4_t __s0_212 = __p0_212; \
-  uint32x4_t __s1_212 = __p1_212; \
-  uint16x8_t __ret_212; \
-  __ret_212 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_212), (uint16x4_t)(vrshrn_n_u32(__s1_212, __p2_212)))); \
-  __ret_212; \
-})
-#else
-#define vrshrn_high_n_u32(__p0_213, __p1_213, __p2_213) __extension__ ({ \
-  uint16x4_t __s0_213 = __p0_213; \
-  uint32x4_t __s1_213 = __p1_213; \
-  uint16x4_t __rev0_213;  __rev0_213 = __builtin_shufflevector(__s0_213, __s0_213, 3, 2, 1, 0); \
-  uint32x4_t __rev1_213;  __rev1_213 = __builtin_shufflevector(__s1_213, __s1_213, 3, 2, 1, 0); \
-  uint16x8_t __ret_213; \
-  __ret_213 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_213), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_213, __p2_213)))); \
-  __ret_213 = __builtin_shufflevector(__ret_213, __ret_213, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_213; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u64(__p0_214, __p1_214, __p2_214) __extension__ ({ \
-  uint32x2_t __s0_214 = __p0_214; \
-  uint64x2_t __s1_214 = __p1_214; \
-  uint32x4_t __ret_214; \
-  __ret_214 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_214), (uint32x2_t)(vrshrn_n_u64(__s1_214, __p2_214)))); \
-  __ret_214; \
-})
-#else
-#define vrshrn_high_n_u64(__p0_215, __p1_215, __p2_215) __extension__ ({ \
-  uint32x2_t __s0_215 = __p0_215; \
-  uint64x2_t __s1_215 = __p1_215; \
-  uint32x2_t __rev0_215;  __rev0_215 = __builtin_shufflevector(__s0_215, __s0_215, 1, 0); \
-  uint64x2_t __rev1_215;  __rev1_215 = __builtin_shufflevector(__s1_215, __s1_215, 1, 0); \
-  uint32x4_t __ret_215; \
-  __ret_215 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_215), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_215, __p2_215)))); \
-  __ret_215 = __builtin_shufflevector(__ret_215, __ret_215, 3, 2, 1, 0); \
-  __ret_215; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u16(__p0_216, __p1_216, __p2_216) __extension__ ({ \
-  uint8x8_t __s0_216 = __p0_216; \
-  uint16x8_t __s1_216 = __p1_216; \
-  uint8x16_t __ret_216; \
-  __ret_216 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_216), (uint8x8_t)(vrshrn_n_u16(__s1_216, __p2_216)))); \
-  __ret_216; \
-})
-#else
-#define vrshrn_high_n_u16(__p0_217, __p1_217, __p2_217) __extension__ ({ \
-  uint8x8_t __s0_217 = __p0_217; \
-  uint16x8_t __s1_217 = __p1_217; \
-  uint8x8_t __rev0_217;  __rev0_217 = __builtin_shufflevector(__s0_217, __s0_217, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_217;  __rev1_217 = __builtin_shufflevector(__s1_217, __s1_217, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_217; \
-  __ret_217 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_217), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_217, __p2_217)))); \
-  __ret_217 = __builtin_shufflevector(__ret_217, __ret_217, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_217; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s32(__p0_218, __p1_218, __p2_218) __extension__ ({ \
-  int16x4_t __s0_218 = __p0_218; \
-  int32x4_t __s1_218 = __p1_218; \
-  int16x8_t __ret_218; \
-  __ret_218 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_218), (int16x4_t)(vrshrn_n_s32(__s1_218, __p2_218)))); \
-  __ret_218; \
-})
-#else
-#define vrshrn_high_n_s32(__p0_219, __p1_219, __p2_219) __extension__ ({ \
-  int16x4_t __s0_219 = __p0_219; \
-  int32x4_t __s1_219 = __p1_219; \
-  int16x4_t __rev0_219;  __rev0_219 = __builtin_shufflevector(__s0_219, __s0_219, 3, 2, 1, 0); \
-  int32x4_t __rev1_219;  __rev1_219 = __builtin_shufflevector(__s1_219, __s1_219, 3, 2, 1, 0); \
-  int16x8_t __ret_219; \
-  __ret_219 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_219), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_219, __p2_219)))); \
-  __ret_219 = __builtin_shufflevector(__ret_219, __ret_219, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_219; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s64(__p0_220, __p1_220, __p2_220) __extension__ ({ \
-  int32x2_t __s0_220 = __p0_220; \
-  int64x2_t __s1_220 = __p1_220; \
-  int32x4_t __ret_220; \
-  __ret_220 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_220), (int32x2_t)(vrshrn_n_s64(__s1_220, __p2_220)))); \
-  __ret_220; \
-})
-#else
-#define vrshrn_high_n_s64(__p0_221, __p1_221, __p2_221) __extension__ ({ \
-  int32x2_t __s0_221 = __p0_221; \
-  int64x2_t __s1_221 = __p1_221; \
-  int32x2_t __rev0_221;  __rev0_221 = __builtin_shufflevector(__s0_221, __s0_221, 1, 0); \
-  int64x2_t __rev1_221;  __rev1_221 = __builtin_shufflevector(__s1_221, __s1_221, 1, 0); \
-  int32x4_t __ret_221; \
-  __ret_221 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_221), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_221, __p2_221)))); \
-  __ret_221 = __builtin_shufflevector(__ret_221, __ret_221, 3, 2, 1, 0); \
-  __ret_221; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s16(__p0_222, __p1_222, __p2_222) __extension__ ({ \
-  int8x8_t __s0_222 = __p0_222; \
-  int16x8_t __s1_222 = __p1_222; \
-  int8x16_t __ret_222; \
-  __ret_222 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_222), (int8x8_t)(vrshrn_n_s16(__s1_222, __p2_222)))); \
-  __ret_222; \
-})
-#else
-#define vrshrn_high_n_s16(__p0_223, __p1_223, __p2_223) __extension__ ({ \
-  int8x8_t __s0_223 = __p0_223; \
-  int16x8_t __s1_223 = __p1_223; \
-  int8x8_t __rev0_223;  __rev0_223 = __builtin_shufflevector(__s0_223, __s0_223, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_223;  __rev1_223 = __builtin_shufflevector(__s1_223, __s1_223, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_223; \
-  __ret_223 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_223), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_223, __p2_223)))); \
-  __ret_223 = __builtin_shufflevector(__ret_223, __ret_223, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_223; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrsqrteq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrsqrteq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrsqrte_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-__ai float64_t vrsqrted_f64(float64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0);
-  return __ret;
-}
-__ai float32_t vrsqrtes_f32(float32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-__ai float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1);
-  return __ret;
-}
-#define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __s1 = __p1; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __s1 = __p1; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vrsubhn_u32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vrsubhn_u32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vrsubhn_u64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vrsubhn_u64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vrsubhn_u16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vrsubhn_u16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vrsubhn_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vrsubhn_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vrsubhn_s64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vrsubhn_s64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vrsubhn_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vrsubhn_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64_t __s0 = __p0; \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (poly64x1_t)__s1, __p2); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (float64x1_t)__s1, __p2); \
-  __ret; \
-})
-__ai uint64_t vshld_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vshld_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1);
-  return __ret;
-}
-#define vshld_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vshld_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u8(__p0_224, __p1_224) __extension__ ({ \
-  uint8x16_t __s0_224 = __p0_224; \
-  uint16x8_t __ret_224; \
-  __ret_224 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_224), __p1_224)); \
-  __ret_224; \
-})
-#else
-#define vshll_high_n_u8(__p0_225, __p1_225) __extension__ ({ \
-  uint8x16_t __s0_225 = __p0_225; \
-  uint8x16_t __rev0_225;  __rev0_225 = __builtin_shufflevector(__s0_225, __s0_225, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_225; \
-  __ret_225 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_225), __p1_225)); \
-  __ret_225 = __builtin_shufflevector(__ret_225, __ret_225, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_225; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u32(__p0_226, __p1_226) __extension__ ({ \
-  uint32x4_t __s0_226 = __p0_226; \
-  uint64x2_t __ret_226; \
-  __ret_226 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_226), __p1_226)); \
-  __ret_226; \
-})
-#else
-#define vshll_high_n_u32(__p0_227, __p1_227) __extension__ ({ \
-  uint32x4_t __s0_227 = __p0_227; \
-  uint32x4_t __rev0_227;  __rev0_227 = __builtin_shufflevector(__s0_227, __s0_227, 3, 2, 1, 0); \
-  uint64x2_t __ret_227; \
-  __ret_227 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_227), __p1_227)); \
-  __ret_227 = __builtin_shufflevector(__ret_227, __ret_227, 1, 0); \
-  __ret_227; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u16(__p0_228, __p1_228) __extension__ ({ \
-  uint16x8_t __s0_228 = __p0_228; \
-  uint32x4_t __ret_228; \
-  __ret_228 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_228), __p1_228)); \
-  __ret_228; \
-})
-#else
-#define vshll_high_n_u16(__p0_229, __p1_229) __extension__ ({ \
-  uint16x8_t __s0_229 = __p0_229; \
-  uint16x8_t __rev0_229;  __rev0_229 = __builtin_shufflevector(__s0_229, __s0_229, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_229; \
-  __ret_229 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_229), __p1_229)); \
-  __ret_229 = __builtin_shufflevector(__ret_229, __ret_229, 3, 2, 1, 0); \
-  __ret_229; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s8(__p0_230, __p1_230) __extension__ ({ \
-  int8x16_t __s0_230 = __p0_230; \
-  int16x8_t __ret_230; \
-  __ret_230 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_230), __p1_230)); \
-  __ret_230; \
-})
-#else
-#define vshll_high_n_s8(__p0_231, __p1_231) __extension__ ({ \
-  int8x16_t __s0_231 = __p0_231; \
-  int8x16_t __rev0_231;  __rev0_231 = __builtin_shufflevector(__s0_231, __s0_231, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_231; \
-  __ret_231 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_231), __p1_231)); \
-  __ret_231 = __builtin_shufflevector(__ret_231, __ret_231, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_231; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s32(__p0_232, __p1_232) __extension__ ({ \
-  int32x4_t __s0_232 = __p0_232; \
-  int64x2_t __ret_232; \
-  __ret_232 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_232), __p1_232)); \
-  __ret_232; \
-})
-#else
-#define vshll_high_n_s32(__p0_233, __p1_233) __extension__ ({ \
-  int32x4_t __s0_233 = __p0_233; \
-  int32x4_t __rev0_233;  __rev0_233 = __builtin_shufflevector(__s0_233, __s0_233, 3, 2, 1, 0); \
-  int64x2_t __ret_233; \
-  __ret_233 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_233), __p1_233)); \
-  __ret_233 = __builtin_shufflevector(__ret_233, __ret_233, 1, 0); \
-  __ret_233; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s16(__p0_234, __p1_234) __extension__ ({ \
-  int16x8_t __s0_234 = __p0_234; \
-  int32x4_t __ret_234; \
-  __ret_234 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_234), __p1_234)); \
-  __ret_234; \
-})
-#else
-#define vshll_high_n_s16(__p0_235, __p1_235) __extension__ ({ \
-  int16x8_t __s0_235 = __p0_235; \
-  int16x8_t __rev0_235;  __rev0_235 = __builtin_shufflevector(__s0_235, __s0_235, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_235; \
-  __ret_235 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_235), __p1_235)); \
-  __ret_235 = __builtin_shufflevector(__ret_235, __ret_235, 3, 2, 1, 0); \
-  __ret_235; \
-})
-#endif
-
-#define vshrd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vshrd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u32(__p0_236, __p1_236, __p2_236) __extension__ ({ \
-  uint16x4_t __s0_236 = __p0_236; \
-  uint32x4_t __s1_236 = __p1_236; \
-  uint16x8_t __ret_236; \
-  __ret_236 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_236), (uint16x4_t)(vshrn_n_u32(__s1_236, __p2_236)))); \
-  __ret_236; \
-})
-#else
-#define vshrn_high_n_u32(__p0_237, __p1_237, __p2_237) __extension__ ({ \
-  uint16x4_t __s0_237 = __p0_237; \
-  uint32x4_t __s1_237 = __p1_237; \
-  uint16x4_t __rev0_237;  __rev0_237 = __builtin_shufflevector(__s0_237, __s0_237, 3, 2, 1, 0); \
-  uint32x4_t __rev1_237;  __rev1_237 = __builtin_shufflevector(__s1_237, __s1_237, 3, 2, 1, 0); \
-  uint16x8_t __ret_237; \
-  __ret_237 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_237), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_237, __p2_237)))); \
-  __ret_237 = __builtin_shufflevector(__ret_237, __ret_237, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_237; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u64(__p0_238, __p1_238, __p2_238) __extension__ ({ \
-  uint32x2_t __s0_238 = __p0_238; \
-  uint64x2_t __s1_238 = __p1_238; \
-  uint32x4_t __ret_238; \
-  __ret_238 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_238), (uint32x2_t)(vshrn_n_u64(__s1_238, __p2_238)))); \
-  __ret_238; \
-})
-#else
-#define vshrn_high_n_u64(__p0_239, __p1_239, __p2_239) __extension__ ({ \
-  uint32x2_t __s0_239 = __p0_239; \
-  uint64x2_t __s1_239 = __p1_239; \
-  uint32x2_t __rev0_239;  __rev0_239 = __builtin_shufflevector(__s0_239, __s0_239, 1, 0); \
-  uint64x2_t __rev1_239;  __rev1_239 = __builtin_shufflevector(__s1_239, __s1_239, 1, 0); \
-  uint32x4_t __ret_239; \
-  __ret_239 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_239), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_239, __p2_239)))); \
-  __ret_239 = __builtin_shufflevector(__ret_239, __ret_239, 3, 2, 1, 0); \
-  __ret_239; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u16(__p0_240, __p1_240, __p2_240) __extension__ ({ \
-  uint8x8_t __s0_240 = __p0_240; \
-  uint16x8_t __s1_240 = __p1_240; \
-  uint8x16_t __ret_240; \
-  __ret_240 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_240), (uint8x8_t)(vshrn_n_u16(__s1_240, __p2_240)))); \
-  __ret_240; \
-})
-#else
-#define vshrn_high_n_u16(__p0_241, __p1_241, __p2_241) __extension__ ({ \
-  uint8x8_t __s0_241 = __p0_241; \
-  uint16x8_t __s1_241 = __p1_241; \
-  uint8x8_t __rev0_241;  __rev0_241 = __builtin_shufflevector(__s0_241, __s0_241, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_241;  __rev1_241 = __builtin_shufflevector(__s1_241, __s1_241, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_241; \
-  __ret_241 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_241), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_241, __p2_241)))); \
-  __ret_241 = __builtin_shufflevector(__ret_241, __ret_241, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_241; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s32(__p0_242, __p1_242, __p2_242) __extension__ ({ \
-  int16x4_t __s0_242 = __p0_242; \
-  int32x4_t __s1_242 = __p1_242; \
-  int16x8_t __ret_242; \
-  __ret_242 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_242), (int16x4_t)(vshrn_n_s32(__s1_242, __p2_242)))); \
-  __ret_242; \
-})
-#else
-#define vshrn_high_n_s32(__p0_243, __p1_243, __p2_243) __extension__ ({ \
-  int16x4_t __s0_243 = __p0_243; \
-  int32x4_t __s1_243 = __p1_243; \
-  int16x4_t __rev0_243;  __rev0_243 = __builtin_shufflevector(__s0_243, __s0_243, 3, 2, 1, 0); \
-  int32x4_t __rev1_243;  __rev1_243 = __builtin_shufflevector(__s1_243, __s1_243, 3, 2, 1, 0); \
-  int16x8_t __ret_243; \
-  __ret_243 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_243), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_243, __p2_243)))); \
-  __ret_243 = __builtin_shufflevector(__ret_243, __ret_243, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_243; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s64(__p0_244, __p1_244, __p2_244) __extension__ ({ \
-  int32x2_t __s0_244 = __p0_244; \
-  int64x2_t __s1_244 = __p1_244; \
-  int32x4_t __ret_244; \
-  __ret_244 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_244), (int32x2_t)(vshrn_n_s64(__s1_244, __p2_244)))); \
-  __ret_244; \
-})
-#else
-#define vshrn_high_n_s64(__p0_245, __p1_245, __p2_245) __extension__ ({ \
-  int32x2_t __s0_245 = __p0_245; \
-  int64x2_t __s1_245 = __p1_245; \
-  int32x2_t __rev0_245;  __rev0_245 = __builtin_shufflevector(__s0_245, __s0_245, 1, 0); \
-  int64x2_t __rev1_245;  __rev1_245 = __builtin_shufflevector(__s1_245, __s1_245, 1, 0); \
-  int32x4_t __ret_245; \
-  __ret_245 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_245), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_245, __p2_245)))); \
-  __ret_245 = __builtin_shufflevector(__ret_245, __ret_245, 3, 2, 1, 0); \
-  __ret_245; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s16(__p0_246, __p1_246, __p2_246) __extension__ ({ \
-  int8x8_t __s0_246 = __p0_246; \
-  int16x8_t __s1_246 = __p1_246; \
-  int8x16_t __ret_246; \
-  __ret_246 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_246), (int8x8_t)(vshrn_n_s16(__s1_246, __p2_246)))); \
-  __ret_246; \
-})
-#else
-#define vshrn_high_n_s16(__p0_247, __p1_247, __p2_247) __extension__ ({ \
-  int8x8_t __s0_247 = __p0_247; \
-  int16x8_t __s1_247 = __p1_247; \
-  int8x8_t __rev0_247;  __rev0_247 = __builtin_shufflevector(__s0_247, __s0_247, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_247;  __rev1_247 = __builtin_shufflevector(__s1_247, __s1_247, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_247; \
-  __ret_247 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_247), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_247, __p2_247)))); \
-  __ret_247 = __builtin_shufflevector(__ret_247, __ret_247, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_247; \
-})
-#endif
-
-#define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __s1 = __p1; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __s1 = __p1; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
-  __ret; \
-})
-#else
-#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai uint8_t vsqaddb_u8(uint8_t __p0, int8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vsqadds_u32(uint32_t __p0, int32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vsqaddd_u64(uint64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vsqaddh_u16(uint16_t __p0, int16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vsqadd_u64(uint64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vsqrtq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vsqrtq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vsqrtq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vsqrtq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vsqrt_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vsqrt_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vsqrt_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __s1 = __p1; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __s1 = __p1; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __s1 = __p1; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __s1 = __p1; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
-  __ret; \
-})
-#else
-#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vst1_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 38); \
-})
-#else
-#define vst1q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 42); \
-})
-#else
-#define vst1q_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 42); \
-})
-#endif
-
-#define vst1_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 10); \
-})
-#define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \
-})
-#else
-#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \
-})
-#else
-#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \
-})
-#endif
-
-#define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
-})
-#define vst1_p64_x2(__p0, __p1) __extension__ ({ \
-  poly64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \
-})
-#else
-#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 42); \
-})
-#else
-#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 42); \
-})
-#endif
-
-#define vst1_f64_x2(__p0, __p1) __extension__ ({ \
-  float64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 10); \
-})
-#define vst1_p64_x3(__p0, __p1) __extension__ ({ \
-  poly64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \
-})
-#else
-#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 42); \
-})
-#else
-#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 42); \
-})
-#endif
-
-#define vst1_f64_x3(__p0, __p1) __extension__ ({ \
-  float64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 10); \
-})
-#define vst1_p64_x4(__p0, __p1) __extension__ ({ \
-  poly64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \
-})
-#else
-#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 42); \
-})
-#else
-#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 42); \
-})
-#endif
-
-#define vst1_f64_x4(__p0, __p1) __extension__ ({ \
-  float64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 10); \
-})
-#define vst2_p64(__p0, __p1) __extension__ ({ \
-  poly64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \
-})
-#else
-#define vst2q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \
-})
-#else
-#define vst2q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 42); \
-})
-#else
-#define vst2q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 35); \
-})
-#else
-#define vst2q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 35); \
-})
-#endif
-
-#define vst2_f64(__p0, __p1) __extension__ ({ \
-  float64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 10); \
-})
-#define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \
-})
-#else
-#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \
-})
-#else
-#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \
-})
-#else
-#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \
-})
-#else
-#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \
-})
-#else
-#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \
-})
-#else
-#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \
-})
-#else
-#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \
-})
-#endif
-
-#define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
-})
-#define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \
-})
-#define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \
-})
-#define vst3_p64(__p0, __p1) __extension__ ({ \
-  poly64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \
-})
-#else
-#define vst3q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \
-})
-#else
-#define vst3q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 42); \
-})
-#else
-#define vst3q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 35); \
-})
-#else
-#define vst3q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 35); \
-})
-#endif
-
-#define vst3_f64(__p0, __p1) __extension__ ({ \
-  float64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 10); \
-})
-#define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \
-})
-#else
-#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \
-})
-#else
-#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \
-})
-#else
-#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \
-})
-#else
-#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \
-})
-#else
-#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \
-})
-#else
-#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \
-})
-#else
-#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \
-})
-#endif
-
-#define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
-})
-#define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \
-})
-#define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \
-})
-#define vst4_p64(__p0, __p1) __extension__ ({ \
-  poly64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \
-})
-#else
-#define vst4q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \
-})
-#else
-#define vst4q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 42); \
-})
-#else
-#define vst4q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 35); \
-})
-#else
-#define vst4q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 35); \
-})
-#endif
-
-#define vst4_f64(__p0, __p1) __extension__ ({ \
-  float64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 10); \
-})
-#define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \
-})
-#else
-#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \
-})
-#else
-#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \
-})
-#else
-#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \
-})
-#else
-#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \
-})
-#else
-#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \
-})
-#else
-#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \
-})
-#else
-#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \
-})
-#endif
-
-#define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
-})
-#define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \
-})
-#define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \
-})
-#define vstrq_p128(__p0, __p1) __extension__ ({ \
-  poly128_t __s1 = __p1; \
-  __builtin_neon_vstrq_p128(__p0, __s1); \
-})
-__ai uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vsubd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vsubhn_u32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vsubhn_u32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vsubhn_u64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vsubhn_u64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vsubhn_u16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vsubhn_u16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vsubhn_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vsubhn_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vsubhn_s64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vsubhn_s64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vsubhn_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vsubhn_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmovl_high_u8(__p0) - vmovl_high_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmovl_high_u8(__rev0) - __noswap_vmovl_high_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmovl_high_u32(__p0) - vmovl_high_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmovl_high_u32(__rev0) - __noswap_vmovl_high_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmovl_high_u16(__p0) - vmovl_high_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmovl_high_u16(__rev0) - __noswap_vmovl_high_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = vmovl_high_s8(__p0) - vmovl_high_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmovl_high_s8(__rev0) - __noswap_vmovl_high_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vmovl_high_s32(__p0) - vmovl_high_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmovl_high_s32(__rev0) - __noswap_vmovl_high_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vmovl_high_s16(__p0) - vmovl_high_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmovl_high_s16(__rev0) - __noswap_vmovl_high_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 - vmovl_high_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 - vmovl_high_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 - vmovl_high_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 - vmovl_high_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 - vmovl_high_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 - vmovl_high_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  return __ret;
-}
-#else
-__ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  return __ret;
-}
-#else
-__ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  return __ret;
-}
-#else
-__ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  return __ret;
-}
-#else
-__ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  return __ret;
-}
-#else
-__ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  return __ret;
-}
-#else
-__ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vtstd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vtstd_s64(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vuqaddb_s8(int8_t __p0, uint8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vuqadds_s32(int32_t __p0, uint32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vuqaddd_s64(int64_t __p0, uint64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vuqaddh_s16(int16_t __p0, uint16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vuqadd_s64(int64x1_t __p0, uint64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  return __ret;
-}
-#else
-__ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  return __ret;
-}
-#else
-__ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  return __ret;
-}
-#else
-__ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  return __ret;
-}
-#else
-__ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  return __ret;
-}
-#else
-__ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  return __ret;
-}
-#else
-__ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  return __ret;
-}
-#else
-__ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  return __ret;
-}
-#else
-__ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  return __ret;
-}
-#else
-__ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  return __ret;
-}
-#else
-__ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  return __ret;
-}
-#else
-__ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  return __ret;
-}
-#else
-__ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = __p0 + vabdq_u8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 + __noswap_vabdq_u8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + vabdq_u32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vabdq_u32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + vabdq_u16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __noswap_vabdq_u16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = __p0 + vabdq_s8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 + __noswap_vabdq_s8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + vabdq_s32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vabdq_s32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + vabdq_s16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __noswap_vabdq_s16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = __p0 + vabd_u8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 + __noswap_vabd_u8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __ret;
-  __ret = __p0 + vabd_u32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 + __noswap_vabd_u32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __ret;
-  __ret = __p0 + vabd_u16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 + __noswap_vabd_u16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = __p0 + vabd_s8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 + __noswap_vabd_s8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = __p0 + vabd_s32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 + __noswap_vabd_s32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = __p0 + vabd_s16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 + __noswap_vabd_s16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(vmovl_u8((uint8x8_t)(vabd_u8(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(vmovl_u32((uint32x2_t)(vabd_u32(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(vmovl_u16((uint16x4_t)(vabd_u16(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(vmovl_u8((uint8x8_t)(vabd_s8(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(vmovl_u32((uint32x2_t)(vabd_s32(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(vmovl_u16((uint16x4_t)(vabd_s16(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmovl_u8(__p0) + vmovl_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmovl_u8(__rev0) + __noswap_vmovl_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmovl_u32(__p0) + vmovl_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmovl_u32(__rev0) + __noswap_vmovl_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmovl_u16(__p0) + vmovl_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmovl_u16(__rev0) + __noswap_vmovl_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = vmovl_s8(__p0) + vmovl_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmovl_s8(__rev0) + __noswap_vmovl_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = vmovl_s32(__p0) + vmovl_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmovl_s32(__rev0) + __noswap_vmovl_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = vmovl_s16(__p0) + vmovl_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmovl_s16(__rev0) + __noswap_vmovl_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 + vmovl_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __noswap_vmovl_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 + vmovl_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __noswap_vmovl_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 + vmovl_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vmovl_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 + vmovl_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __noswap_vmovl_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 + vmovl_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __noswap_vmovl_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 + vmovl_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vmovl_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_f16(__p0_248, __p1_248) __extension__ ({ \
-  float16x4_t __s0_248 = __p0_248; \
-  float16_t __ret_248; \
-float16x4_t __reint_248 = __s0_248; \
-int16_t __reint1_248 = vget_lane_s16(*(int16x4_t *) &__reint_248, __p1_248); \
-  __ret_248 = *(float16_t *) &__reint1_248; \
-  __ret_248; \
-})
-#else
-#define vget_lane_f16(__p0_249, __p1_249) __extension__ ({ \
-  float16x4_t __s0_249 = __p0_249; \
-  float16x4_t __rev0_249;  __rev0_249 = __builtin_shufflevector(__s0_249, __s0_249, 3, 2, 1, 0); \
-  float16_t __ret_249; \
-float16x4_t __reint_249 = __rev0_249; \
-int16_t __reint1_249 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_249, __p1_249); \
-  __ret_249 = *(float16_t *) &__reint1_249; \
-  __ret_249; \
-})
-#define __noswap_vget_lane_f16(__p0_250, __p1_250) __extension__ ({ \
-  float16x4_t __s0_250 = __p0_250; \
-  float16_t __ret_250; \
-float16x4_t __reint_250 = __s0_250; \
-int16_t __reint1_250 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_250, __p1_250); \
-  __ret_250 = *(float16_t *) &__reint1_250; \
-  __ret_250; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_f16(__p0_251, __p1_251) __extension__ ({ \
-  float16x8_t __s0_251 = __p0_251; \
-  float16_t __ret_251; \
-float16x8_t __reint_251 = __s0_251; \
-int16_t __reint1_251 = vgetq_lane_s16(*(int16x8_t *) &__reint_251, __p1_251); \
-  __ret_251 = *(float16_t *) &__reint1_251; \
-  __ret_251; \
-})
-#else
-#define vgetq_lane_f16(__p0_252, __p1_252) __extension__ ({ \
-  float16x8_t __s0_252 = __p0_252; \
-  float16x8_t __rev0_252;  __rev0_252 = __builtin_shufflevector(__s0_252, __s0_252, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_252; \
-float16x8_t __reint_252 = __rev0_252; \
-int16_t __reint1_252 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_252, __p1_252); \
-  __ret_252 = *(float16_t *) &__reint1_252; \
-  __ret_252; \
-})
-#define __noswap_vgetq_lane_f16(__p0_253, __p1_253) __extension__ ({ \
-  float16x8_t __s0_253 = __p0_253; \
-  float16_t __ret_253; \
-float16x8_t __reint_253 = __s0_253; \
-int16_t __reint1_253 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_253, __p1_253); \
-  __ret_253 = *(float16_t *) &__reint1_253; \
-  __ret_253; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + vmull_u8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __noswap_vmull_u8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + __noswap_vmull_u8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + vmull_u32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __noswap_vmull_u32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + __noswap_vmull_u32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + vmull_u16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vmull_u16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + __noswap_vmull_u16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + vmull_s8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __noswap_vmull_s8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + __noswap_vmull_s8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + vmull_s32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __noswap_vmull_s32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + __noswap_vmull_s32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + vmull_s16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vmull_s16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + __noswap_vmull_s16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 + vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 + vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 + vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 + vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + vmull_s32(__p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 - vmull_u8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __noswap_vmull_u8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 - __noswap_vmull_u8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 - vmull_u32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 - __noswap_vmull_u32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 - __noswap_vmull_u32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - vmull_u16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __noswap_vmull_u16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - __noswap_vmull_u16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 - vmull_s8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __noswap_vmull_s8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 - __noswap_vmull_s8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 - vmull_s32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 - __noswap_vmull_s32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 - __noswap_vmull_s32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - vmull_s16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __noswap_vmull_s16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - __noswap_vmull_s16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 - vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 - vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 - vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 - vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 - vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 - __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 - __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 - vmull_s32(__p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 - __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 - __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_f16(__p0_254, __p1_254, __p2_254) __extension__ ({ \
-  float16_t __s0_254 = __p0_254; \
-  float16x4_t __s1_254 = __p1_254; \
-  float16x4_t __ret_254; \
-float16_t __reint_254 = __s0_254; \
-float16x4_t __reint1_254 = __s1_254; \
-int16x4_t __reint2_254 = vset_lane_s16(*(int16_t *) &__reint_254, *(int16x4_t *) &__reint1_254, __p2_254); \
-  __ret_254 = *(float16x4_t *) &__reint2_254; \
-  __ret_254; \
-})
-#else
-#define vset_lane_f16(__p0_255, __p1_255, __p2_255) __extension__ ({ \
-  float16_t __s0_255 = __p0_255; \
-  float16x4_t __s1_255 = __p1_255; \
-  float16x4_t __rev1_255;  __rev1_255 = __builtin_shufflevector(__s1_255, __s1_255, 3, 2, 1, 0); \
-  float16x4_t __ret_255; \
-float16_t __reint_255 = __s0_255; \
-float16x4_t __reint1_255 = __rev1_255; \
-int16x4_t __reint2_255 = __noswap_vset_lane_s16(*(int16_t *) &__reint_255, *(int16x4_t *) &__reint1_255, __p2_255); \
-  __ret_255 = *(float16x4_t *) &__reint2_255; \
-  __ret_255 = __builtin_shufflevector(__ret_255, __ret_255, 3, 2, 1, 0); \
-  __ret_255; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_f16(__p0_256, __p1_256, __p2_256) __extension__ ({ \
-  float16_t __s0_256 = __p0_256; \
-  float16x8_t __s1_256 = __p1_256; \
-  float16x8_t __ret_256; \
-float16_t __reint_256 = __s0_256; \
-float16x8_t __reint1_256 = __s1_256; \
-int16x8_t __reint2_256 = vsetq_lane_s16(*(int16_t *) &__reint_256, *(int16x8_t *) &__reint1_256, __p2_256); \
-  __ret_256 = *(float16x8_t *) &__reint2_256; \
-  __ret_256; \
-})
-#else
-#define vsetq_lane_f16(__p0_257, __p1_257, __p2_257) __extension__ ({ \
-  float16_t __s0_257 = __p0_257; \
-  float16x8_t __s1_257 = __p1_257; \
-  float16x8_t __rev1_257;  __rev1_257 = __builtin_shufflevector(__s1_257, __s1_257, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_257; \
-float16_t __reint_257 = __s0_257; \
-float16x8_t __reint1_257 = __rev1_257; \
-int16x8_t __reint2_257 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_257, *(int16x8_t *) &__reint1_257, __p2_257); \
-  __ret_257 = *(float16x8_t *) &__reint2_257; \
-  __ret_257 = __builtin_shufflevector(__ret_257, __ret_257, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_257; \
-})
-#endif
-
-#if defined(__ARM_FEATURE_FP16FML) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_lane_high_f16(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \
-  float32x4_t __s0_258 = __p0_258; \
-  float16x8_t __s1_258 = __p1_258; \
-  float16x4_t __s2_258 = __p2_258; \
-  float32x4_t __ret_258; \
-  __ret_258 = vfmlalq_high_f16(__s0_258, __s1_258, (float16x8_t) {vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258)}); \
-  __ret_258; \
-})
-#else
-#define vfmlalq_lane_high_f16(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \
-  float32x4_t __s0_259 = __p0_259; \
-  float16x8_t __s1_259 = __p1_259; \
-  float16x4_t __s2_259 = __p2_259; \
-  float32x4_t __rev0_259;  __rev0_259 = __builtin_shufflevector(__s0_259, __s0_259, 3, 2, 1, 0); \
-  float16x8_t __rev1_259;  __rev1_259 = __builtin_shufflevector(__s1_259, __s1_259, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_259;  __rev2_259 = __builtin_shufflevector(__s2_259, __s2_259, 3, 2, 1, 0); \
-  float32x4_t __ret_259; \
-  __ret_259 = __noswap_vfmlalq_high_f16(__rev0_259, __rev1_259, (float16x8_t) {__noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259)}); \
-  __ret_259 = __builtin_shufflevector(__ret_259, __ret_259, 3, 2, 1, 0); \
-  __ret_259; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_lane_high_f16(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \
-  float32x2_t __s0_260 = __p0_260; \
-  float16x4_t __s1_260 = __p1_260; \
-  float16x4_t __s2_260 = __p2_260; \
-  float32x2_t __ret_260; \
-  __ret_260 = vfmlal_high_f16(__s0_260, __s1_260, (float16x4_t) {vget_lane_f16(__s2_260, __p3_260), vget_lane_f16(__s2_260, __p3_260), vget_lane_f16(__s2_260, __p3_260), vget_lane_f16(__s2_260, __p3_260)}); \
-  __ret_260; \
-})
-#else
-#define vfmlal_lane_high_f16(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \
-  float32x2_t __s0_261 = __p0_261; \
-  float16x4_t __s1_261 = __p1_261; \
-  float16x4_t __s2_261 = __p2_261; \
-  float32x2_t __rev0_261;  __rev0_261 = __builtin_shufflevector(__s0_261, __s0_261, 1, 0); \
-  float16x4_t __rev1_261;  __rev1_261 = __builtin_shufflevector(__s1_261, __s1_261, 3, 2, 1, 0); \
-  float16x4_t __rev2_261;  __rev2_261 = __builtin_shufflevector(__s2_261, __s2_261, 3, 2, 1, 0); \
-  float32x2_t __ret_261; \
-  __ret_261 = __noswap_vfmlal_high_f16(__rev0_261, __rev1_261, (float16x4_t) {__noswap_vget_lane_f16(__rev2_261, __p3_261), __noswap_vget_lane_f16(__rev2_261, __p3_261), __noswap_vget_lane_f16(__rev2_261, __p3_261), __noswap_vget_lane_f16(__rev2_261, __p3_261)}); \
-  __ret_261 = __builtin_shufflevector(__ret_261, __ret_261, 1, 0); \
-  __ret_261; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_lane_low_f16(__p0_262, __p1_262, __p2_262, __p3_262) __extension__ ({ \
-  float32x4_t __s0_262 = __p0_262; \
-  float16x8_t __s1_262 = __p1_262; \
-  float16x4_t __s2_262 = __p2_262; \
-  float32x4_t __ret_262; \
-  __ret_262 = vfmlalq_low_f16(__s0_262, __s1_262, (float16x8_t) {vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262)}); \
-  __ret_262; \
-})
-#else
-#define vfmlalq_lane_low_f16(__p0_263, __p1_263, __p2_263, __p3_263) __extension__ ({ \
-  float32x4_t __s0_263 = __p0_263; \
-  float16x8_t __s1_263 = __p1_263; \
-  float16x4_t __s2_263 = __p2_263; \
-  float32x4_t __rev0_263;  __rev0_263 = __builtin_shufflevector(__s0_263, __s0_263, 3, 2, 1, 0); \
-  float16x8_t __rev1_263;  __rev1_263 = __builtin_shufflevector(__s1_263, __s1_263, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_263;  __rev2_263 = __builtin_shufflevector(__s2_263, __s2_263, 3, 2, 1, 0); \
-  float32x4_t __ret_263; \
-  __ret_263 = __noswap_vfmlalq_low_f16(__rev0_263, __rev1_263, (float16x8_t) {__noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263)}); \
-  __ret_263 = __builtin_shufflevector(__ret_263, __ret_263, 3, 2, 1, 0); \
-  __ret_263; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_lane_low_f16(__p0_264, __p1_264, __p2_264, __p3_264) __extension__ ({ \
-  float32x2_t __s0_264 = __p0_264; \
-  float16x4_t __s1_264 = __p1_264; \
-  float16x4_t __s2_264 = __p2_264; \
-  float32x2_t __ret_264; \
-  __ret_264 = vfmlal_low_f16(__s0_264, __s1_264, (float16x4_t) {vget_lane_f16(__s2_264, __p3_264), vget_lane_f16(__s2_264, __p3_264), vget_lane_f16(__s2_264, __p3_264), vget_lane_f16(__s2_264, __p3_264)}); \
-  __ret_264; \
-})
-#else
-#define vfmlal_lane_low_f16(__p0_265, __p1_265, __p2_265, __p3_265) __extension__ ({ \
-  float32x2_t __s0_265 = __p0_265; \
-  float16x4_t __s1_265 = __p1_265; \
-  float16x4_t __s2_265 = __p2_265; \
-  float32x2_t __rev0_265;  __rev0_265 = __builtin_shufflevector(__s0_265, __s0_265, 1, 0); \
-  float16x4_t __rev1_265;  __rev1_265 = __builtin_shufflevector(__s1_265, __s1_265, 3, 2, 1, 0); \
-  float16x4_t __rev2_265;  __rev2_265 = __builtin_shufflevector(__s2_265, __s2_265, 3, 2, 1, 0); \
-  float32x2_t __ret_265; \
-  __ret_265 = __noswap_vfmlal_low_f16(__rev0_265, __rev1_265, (float16x4_t) {__noswap_vget_lane_f16(__rev2_265, __p3_265), __noswap_vget_lane_f16(__rev2_265, __p3_265), __noswap_vget_lane_f16(__rev2_265, __p3_265), __noswap_vget_lane_f16(__rev2_265, __p3_265)}); \
-  __ret_265 = __builtin_shufflevector(__ret_265, __ret_265, 1, 0); \
-  __ret_265; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_laneq_high_f16(__p0_266, __p1_266, __p2_266, __p3_266) __extension__ ({ \
-  float32x4_t __s0_266 = __p0_266; \
-  float16x8_t __s1_266 = __p1_266; \
-  float16x8_t __s2_266 = __p2_266; \
-  float32x4_t __ret_266; \
-  __ret_266 = vfmlalq_high_f16(__s0_266, __s1_266, (float16x8_t) {vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266)}); \
-  __ret_266; \
-})
-#else
-#define vfmlalq_laneq_high_f16(__p0_267, __p1_267, __p2_267, __p3_267) __extension__ ({ \
-  float32x4_t __s0_267 = __p0_267; \
-  float16x8_t __s1_267 = __p1_267; \
-  float16x8_t __s2_267 = __p2_267; \
-  float32x4_t __rev0_267;  __rev0_267 = __builtin_shufflevector(__s0_267, __s0_267, 3, 2, 1, 0); \
-  float16x8_t __rev1_267;  __rev1_267 = __builtin_shufflevector(__s1_267, __s1_267, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_267;  __rev2_267 = __builtin_shufflevector(__s2_267, __s2_267, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_267; \
-  __ret_267 = __noswap_vfmlalq_high_f16(__rev0_267, __rev1_267, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267)}); \
-  __ret_267 = __builtin_shufflevector(__ret_267, __ret_267, 3, 2, 1, 0); \
-  __ret_267; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_laneq_high_f16(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \
-  float32x2_t __s0_268 = __p0_268; \
-  float16x4_t __s1_268 = __p1_268; \
-  float16x8_t __s2_268 = __p2_268; \
-  float32x2_t __ret_268; \
-  __ret_268 = vfmlal_high_f16(__s0_268, __s1_268, (float16x4_t) {vgetq_lane_f16(__s2_268, __p3_268), vgetq_lane_f16(__s2_268, __p3_268), vgetq_lane_f16(__s2_268, __p3_268), vgetq_lane_f16(__s2_268, __p3_268)}); \
-  __ret_268; \
-})
-#else
-#define vfmlal_laneq_high_f16(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \
-  float32x2_t __s0_269 = __p0_269; \
-  float16x4_t __s1_269 = __p1_269; \
-  float16x8_t __s2_269 = __p2_269; \
-  float32x2_t __rev0_269;  __rev0_269 = __builtin_shufflevector(__s0_269, __s0_269, 1, 0); \
-  float16x4_t __rev1_269;  __rev1_269 = __builtin_shufflevector(__s1_269, __s1_269, 3, 2, 1, 0); \
-  float16x8_t __rev2_269;  __rev2_269 = __builtin_shufflevector(__s2_269, __s2_269, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_269; \
-  __ret_269 = __noswap_vfmlal_high_f16(__rev0_269, __rev1_269, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_269, __p3_269), __noswap_vgetq_lane_f16(__rev2_269, __p3_269), __noswap_vgetq_lane_f16(__rev2_269, __p3_269), __noswap_vgetq_lane_f16(__rev2_269, __p3_269)}); \
-  __ret_269 = __builtin_shufflevector(__ret_269, __ret_269, 1, 0); \
-  __ret_269; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_laneq_low_f16(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \
-  float32x4_t __s0_270 = __p0_270; \
-  float16x8_t __s1_270 = __p1_270; \
-  float16x8_t __s2_270 = __p2_270; \
-  float32x4_t __ret_270; \
-  __ret_270 = vfmlalq_low_f16(__s0_270, __s1_270, (float16x8_t) {vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270)}); \
-  __ret_270; \
-})
-#else
-#define vfmlalq_laneq_low_f16(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \
-  float32x4_t __s0_271 = __p0_271; \
-  float16x8_t __s1_271 = __p1_271; \
-  float16x8_t __s2_271 = __p2_271; \
-  float32x4_t __rev0_271;  __rev0_271 = __builtin_shufflevector(__s0_271, __s0_271, 3, 2, 1, 0); \
-  float16x8_t __rev1_271;  __rev1_271 = __builtin_shufflevector(__s1_271, __s1_271, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_271;  __rev2_271 = __builtin_shufflevector(__s2_271, __s2_271, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_271; \
-  __ret_271 = __noswap_vfmlalq_low_f16(__rev0_271, __rev1_271, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271)}); \
-  __ret_271 = __builtin_shufflevector(__ret_271, __ret_271, 3, 2, 1, 0); \
-  __ret_271; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_laneq_low_f16(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \
-  float32x2_t __s0_272 = __p0_272; \
-  float16x4_t __s1_272 = __p1_272; \
-  float16x8_t __s2_272 = __p2_272; \
-  float32x2_t __ret_272; \
-  __ret_272 = vfmlal_low_f16(__s0_272, __s1_272, (float16x4_t) {vgetq_lane_f16(__s2_272, __p3_272), vgetq_lane_f16(__s2_272, __p3_272), vgetq_lane_f16(__s2_272, __p3_272), vgetq_lane_f16(__s2_272, __p3_272)}); \
-  __ret_272; \
-})
-#else
-#define vfmlal_laneq_low_f16(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \
-  float32x2_t __s0_273 = __p0_273; \
-  float16x4_t __s1_273 = __p1_273; \
-  float16x8_t __s2_273 = __p2_273; \
-  float32x2_t __rev0_273;  __rev0_273 = __builtin_shufflevector(__s0_273, __s0_273, 1, 0); \
-  float16x4_t __rev1_273;  __rev1_273 = __builtin_shufflevector(__s1_273, __s1_273, 3, 2, 1, 0); \
-  float16x8_t __rev2_273;  __rev2_273 = __builtin_shufflevector(__s2_273, __s2_273, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_273; \
-  __ret_273 = __noswap_vfmlal_low_f16(__rev0_273, __rev1_273, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_273, __p3_273), __noswap_vgetq_lane_f16(__rev2_273, __p3_273), __noswap_vgetq_lane_f16(__rev2_273, __p3_273), __noswap_vgetq_lane_f16(__rev2_273, __p3_273)}); \
-  __ret_273 = __builtin_shufflevector(__ret_273, __ret_273, 1, 0); \
-  __ret_273; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlslq_lane_high_f16(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \
-  float32x4_t __s0_274 = __p0_274; \
-  float16x8_t __s1_274 = __p1_274; \
-  float16x4_t __s2_274 = __p2_274; \
-  float32x4_t __ret_274; \
-  __ret_274 = vfmlslq_high_f16(__s0_274, __s1_274, (float16x8_t) {vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274)}); \
-  __ret_274; \
-})
-#else
-#define vfmlslq_lane_high_f16(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \
-  float32x4_t __s0_275 = __p0_275; \
-  float16x8_t __s1_275 = __p1_275; \
-  float16x4_t __s2_275 = __p2_275; \
-  float32x4_t __rev0_275;  __rev0_275 = __builtin_shufflevector(__s0_275, __s0_275, 3, 2, 1, 0); \
-  float16x8_t __rev1_275;  __rev1_275 = __builtin_shufflevector(__s1_275, __s1_275, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_275;  __rev2_275 = __builtin_shufflevector(__s2_275, __s2_275, 3, 2, 1, 0); \
-  float32x4_t __ret_275; \
-  __ret_275 = __noswap_vfmlslq_high_f16(__rev0_275, __rev1_275, (float16x8_t) {__noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275)}); \
-  __ret_275 = __builtin_shufflevector(__ret_275, __ret_275, 3, 2, 1, 0); \
-  __ret_275; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlsl_lane_high_f16(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \
-  float32x2_t __s0_276 = __p0_276; \
-  float16x4_t __s1_276 = __p1_276; \
-  float16x4_t __s2_276 = __p2_276; \
-  float32x2_t __ret_276; \
-  __ret_276 = vfmlsl_high_f16(__s0_276, __s1_276, (float16x4_t) {vget_lane_f16(__s2_276, __p3_276), vget_lane_f16(__s2_276, __p3_276), vget_lane_f16(__s2_276, __p3_276), vget_lane_f16(__s2_276, __p3_276)}); \
-  __ret_276; \
-})
-#else
-#define vfmlsl_lane_high_f16(__p0_277, __p1_277, __p2_277, __p3_277) __extension__ ({ \
-  float32x2_t __s0_277 = __p0_277; \
-  float16x4_t __s1_277 = __p1_277; \
-  float16x4_t __s2_277 = __p2_277; \
-  float32x2_t __rev0_277;  __rev0_277 = __builtin_shufflevector(__s0_277, __s0_277, 1, 0); \
-  float16x4_t __rev1_277;  __rev1_277 = __builtin_shufflevector(__s1_277, __s1_277, 3, 2, 1, 0); \
-  float16x4_t __rev2_277;  __rev2_277 = __builtin_shufflevector(__s2_277, __s2_277, 3, 2, 1, 0); \
-  float32x2_t __ret_277; \
-  __ret_277 = __noswap_vfmlsl_high_f16(__rev0_277, __rev1_277, (float16x4_t) {__noswap_vget_lane_f16(__rev2_277, __p3_277), __noswap_vget_lane_f16(__rev2_277, __p3_277), __noswap_vget_lane_f16(__rev2_277, __p3_277), __noswap_vget_lane_f16(__rev2_277, __p3_277)}); \
-  __ret_277 = __builtin_shufflevector(__ret_277, __ret_277, 1, 0); \
-  __ret_277; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlslq_lane_low_f16(__p0_278, __p1_278, __p2_278, __p3_278) __extension__ ({ \
-  float32x4_t __s0_278 = __p0_278; \
-  float16x8_t __s1_278 = __p1_278; \
-  float16x4_t __s2_278 = __p2_278; \
-  float32x4_t __ret_278; \
-  __ret_278 = vfmlslq_low_f16(__s0_278, __s1_278, (float16x8_t) {vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278)}); \
-  __ret_278; \
-})
-#else
-#define vfmlslq_lane_low_f16(__p0_279, __p1_279, __p2_279, __p3_279) __extension__ ({ \
-  float32x4_t __s0_279 = __p0_279; \
-  float16x8_t __s1_279 = __p1_279; \
-  float16x4_t __s2_279 = __p2_279; \
-  float32x4_t __rev0_279;  __rev0_279 = __builtin_shufflevector(__s0_279, __s0_279, 3, 2, 1, 0); \
-  float16x8_t __rev1_279;  __rev1_279 = __builtin_shufflevector(__s1_279, __s1_279, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_279;  __rev2_279 = __builtin_shufflevector(__s2_279, __s2_279, 3, 2, 1, 0); \
-  float32x4_t __ret_279; \
-  __ret_279 = __noswap_vfmlslq_low_f16(__rev0_279, __rev1_279, (float16x8_t) {__noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279)}); \
-  __ret_279 = __builtin_shufflevector(__ret_279, __ret_279, 3, 2, 1, 0); \
-  __ret_279; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlsl_lane_low_f16(__p0_280, __p1_280, __p2_280, __p3_280) __extension__ ({ \
-  float32x2_t __s0_280 = __p0_280; \
-  float16x4_t __s1_280 = __p1_280; \
-  float16x4_t __s2_280 = __p2_280; \
-  float32x2_t __ret_280; \
-  __ret_280 = vfmlsl_low_f16(__s0_280, __s1_280, (float16x4_t) {vget_lane_f16(__s2_280, __p3_280), vget_lane_f16(__s2_280, __p3_280), vget_lane_f16(__s2_280, __p3_280), vget_lane_f16(__s2_280, __p3_280)}); \
-  __ret_280; \
-})
-#else
-#define vfmlsl_lane_low_f16(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \
-  float32x2_t __s0_281 = __p0_281; \
-  float16x4_t __s1_281 = __p1_281; \
-  float16x4_t __s2_281 = __p2_281; \
-  float32x2_t __rev0_281;  __rev0_281 = __builtin_shufflevector(__s0_281, __s0_281, 1, 0); \
-  float16x4_t __rev1_281;  __rev1_281 = __builtin_shufflevector(__s1_281, __s1_281, 3, 2, 1, 0); \
-  float16x4_t __rev2_281;  __rev2_281 = __builtin_shufflevector(__s2_281, __s2_281, 3, 2, 1, 0); \
-  float32x2_t __ret_281; \
-  __ret_281 = __noswap_vfmlsl_low_f16(__rev0_281, __rev1_281, (float16x4_t) {__noswap_vget_lane_f16(__rev2_281, __p3_281), __noswap_vget_lane_f16(__rev2_281, __p3_281), __noswap_vget_lane_f16(__rev2_281, __p3_281), __noswap_vget_lane_f16(__rev2_281, __p3_281)}); \
-  __ret_281 = __builtin_shufflevector(__ret_281, __ret_281, 1, 0); \
-  __ret_281; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlslq_laneq_high_f16(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \
-  float32x4_t __s0_282 = __p0_282; \
-  float16x8_t __s1_282 = __p1_282; \
-  float16x8_t __s2_282 = __p2_282; \
-  float32x4_t __ret_282; \
-  __ret_282 = vfmlslq_high_f16(__s0_282, __s1_282, (float16x8_t) {vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282)}); \
-  __ret_282; \
-})
-#else
-#define vfmlslq_laneq_high_f16(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \
-  float32x4_t __s0_283 = __p0_283; \
-  float16x8_t __s1_283 = __p1_283; \
-  float16x8_t __s2_283 = __p2_283; \
-  float32x4_t __rev0_283;  __rev0_283 = __builtin_shufflevector(__s0_283, __s0_283, 3, 2, 1, 0); \
-  float16x8_t __rev1_283;  __rev1_283 = __builtin_shufflevector(__s1_283, __s1_283, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_283;  __rev2_283 = __builtin_shufflevector(__s2_283, __s2_283, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_283; \
-  __ret_283 = __noswap_vfmlslq_high_f16(__rev0_283, __rev1_283, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283)}); \
-  __ret_283 = __builtin_shufflevector(__ret_283, __ret_283, 3, 2, 1, 0); \
-  __ret_283; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlsl_laneq_high_f16(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \
-  float32x2_t __s0_284 = __p0_284; \
-  float16x4_t __s1_284 = __p1_284; \
-  float16x8_t __s2_284 = __p2_284; \
-  float32x2_t __ret_284; \
-  __ret_284 = vfmlsl_high_f16(__s0_284, __s1_284, (float16x4_t) {vgetq_lane_f16(__s2_284, __p3_284), vgetq_lane_f16(__s2_284, __p3_284), vgetq_lane_f16(__s2_284, __p3_284), vgetq_lane_f16(__s2_284, __p3_284)}); \
-  __ret_284; \
-})
-#else
-#define vfmlsl_laneq_high_f16(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \
-  float32x2_t __s0_285 = __p0_285; \
-  float16x4_t __s1_285 = __p1_285; \
-  float16x8_t __s2_285 = __p2_285; \
-  float32x2_t __rev0_285;  __rev0_285 = __builtin_shufflevector(__s0_285, __s0_285, 1, 0); \
-  float16x4_t __rev1_285;  __rev1_285 = __builtin_shufflevector(__s1_285, __s1_285, 3, 2, 1, 0); \
-  float16x8_t __rev2_285;  __rev2_285 = __builtin_shufflevector(__s2_285, __s2_285, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_285; \
-  __ret_285 = __noswap_vfmlsl_high_f16(__rev0_285, __rev1_285, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_285, __p3_285), __noswap_vgetq_lane_f16(__rev2_285, __p3_285), __noswap_vgetq_lane_f16(__rev2_285, __p3_285), __noswap_vgetq_lane_f16(__rev2_285, __p3_285)}); \
-  __ret_285 = __builtin_shufflevector(__ret_285, __ret_285, 1, 0); \
-  __ret_285; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlslq_laneq_low_f16(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \
-  float32x4_t __s0_286 = __p0_286; \
-  float16x8_t __s1_286 = __p1_286; \
-  float16x8_t __s2_286 = __p2_286; \
-  float32x4_t __ret_286; \
-  __ret_286 = vfmlslq_low_f16(__s0_286, __s1_286, (float16x8_t) {vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286)}); \
-  __ret_286; \
-})
-#else
-#define vfmlslq_laneq_low_f16(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \
-  float32x4_t __s0_287 = __p0_287; \
-  float16x8_t __s1_287 = __p1_287; \
-  float16x8_t __s2_287 = __p2_287; \
-  float32x4_t __rev0_287;  __rev0_287 = __builtin_shufflevector(__s0_287, __s0_287, 3, 2, 1, 0); \
-  float16x8_t __rev1_287;  __rev1_287 = __builtin_shufflevector(__s1_287, __s1_287, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_287;  __rev2_287 = __builtin_shufflevector(__s2_287, __s2_287, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_287; \
-  __ret_287 = __noswap_vfmlslq_low_f16(__rev0_287, __rev1_287, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287)}); \
-  __ret_287 = __builtin_shufflevector(__ret_287, __ret_287, 3, 2, 1, 0); \
-  __ret_287; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlsl_laneq_low_f16(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \
-  float32x2_t __s0_288 = __p0_288; \
-  float16x4_t __s1_288 = __p1_288; \
-  float16x8_t __s2_288 = __p2_288; \
-  float32x2_t __ret_288; \
-  __ret_288 = vfmlsl_low_f16(__s0_288, __s1_288, (float16x4_t) {vgetq_lane_f16(__s2_288, __p3_288), vgetq_lane_f16(__s2_288, __p3_288), vgetq_lane_f16(__s2_288, __p3_288), vgetq_lane_f16(__s2_288, __p3_288)}); \
-  __ret_288; \
-})
-#else
-#define vfmlsl_laneq_low_f16(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \
-  float32x2_t __s0_289 = __p0_289; \
-  float16x4_t __s1_289 = __p1_289; \
-  float16x8_t __s2_289 = __p2_289; \
-  float32x2_t __rev0_289;  __rev0_289 = __builtin_shufflevector(__s0_289, __s0_289, 1, 0); \
-  float16x4_t __rev1_289;  __rev1_289 = __builtin_shufflevector(__s1_289, __s1_289, 3, 2, 1, 0); \
-  float16x8_t __rev2_289;  __rev2_289 = __builtin_shufflevector(__s2_289, __s2_289, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_289; \
-  __ret_289 = __noswap_vfmlsl_low_f16(__rev0_289, __rev1_289, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_289, __p3_289), __noswap_vgetq_lane_f16(__rev2_289, __p3_289), __noswap_vgetq_lane_f16(__rev2_289, __p3_289), __noswap_vgetq_lane_f16(__rev2_289, __p3_289)}); \
-  __ret_289 = __builtin_shufflevector(__ret_289, __ret_289, 1, 0); \
-  __ret_289; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vmulh_lane_f16(__p0_290, __p1_290, __p2_290) __extension__ ({ \
-  float16_t __s0_290 = __p0_290; \
-  float16x4_t __s1_290 = __p1_290; \
-  float16_t __ret_290; \
-  __ret_290 = __s0_290 * vget_lane_f16(__s1_290, __p2_290); \
-  __ret_290; \
-})
-#else
-#define vmulh_lane_f16(__p0_291, __p1_291, __p2_291) __extension__ ({ \
-  float16_t __s0_291 = __p0_291; \
-  float16x4_t __s1_291 = __p1_291; \
-  float16x4_t __rev1_291;  __rev1_291 = __builtin_shufflevector(__s1_291, __s1_291, 3, 2, 1, 0); \
-  float16_t __ret_291; \
-  __ret_291 = __s0_291 * __noswap_vget_lane_f16(__rev1_291, __p2_291); \
-  __ret_291; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulh_laneq_f16(__p0_292, __p1_292, __p2_292) __extension__ ({ \
-  float16_t __s0_292 = __p0_292; \
-  float16x8_t __s1_292 = __p1_292; \
-  float16_t __ret_292; \
-  __ret_292 = __s0_292 * vgetq_lane_f16(__s1_292, __p2_292); \
-  __ret_292; \
-})
-#else
-#define vmulh_laneq_f16(__p0_293, __p1_293, __p2_293) __extension__ ({ \
-  float16_t __s0_293 = __p0_293; \
-  float16x8_t __s1_293 = __p1_293; \
-  float16x8_t __rev1_293;  __rev1_293 = __builtin_shufflevector(__s1_293, __s1_293, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_293; \
-  __ret_293 = __s0_293 * __noswap_vgetq_lane_f16(__rev1_293, __p2_293); \
-  __ret_293; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
-__ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
-  int32_t __ret;
-  __ret = vqadds_s32(__p0, vqrdmulhs_s32(__p1, __p2));
-  return __ret;
-}
-__ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
-  int16_t __ret;
-  __ret = vqaddh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahs_lane_s32(__p0_294, __p1_294, __p2_294, __p3_294) __extension__ ({ \
-  int32_t __s0_294 = __p0_294; \
-  int32_t __s1_294 = __p1_294; \
-  int32x2_t __s2_294 = __p2_294; \
-  int32_t __ret_294; \
-  __ret_294 = vqadds_s32(__s0_294, vqrdmulhs_s32(__s1_294, vget_lane_s32(__s2_294, __p3_294))); \
-  __ret_294; \
-})
-#else
-#define vqrdmlahs_lane_s32(__p0_295, __p1_295, __p2_295, __p3_295) __extension__ ({ \
-  int32_t __s0_295 = __p0_295; \
-  int32_t __s1_295 = __p1_295; \
-  int32x2_t __s2_295 = __p2_295; \
-  int32x2_t __rev2_295;  __rev2_295 = __builtin_shufflevector(__s2_295, __s2_295, 1, 0); \
-  int32_t __ret_295; \
-  __ret_295 = vqadds_s32(__s0_295, vqrdmulhs_s32(__s1_295, __noswap_vget_lane_s32(__rev2_295, __p3_295))); \
-  __ret_295; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahh_lane_s16(__p0_296, __p1_296, __p2_296, __p3_296) __extension__ ({ \
-  int16_t __s0_296 = __p0_296; \
-  int16_t __s1_296 = __p1_296; \
-  int16x4_t __s2_296 = __p2_296; \
-  int16_t __ret_296; \
-  __ret_296 = vqaddh_s16(__s0_296, vqrdmulhh_s16(__s1_296, vget_lane_s16(__s2_296, __p3_296))); \
-  __ret_296; \
-})
-#else
-#define vqrdmlahh_lane_s16(__p0_297, __p1_297, __p2_297, __p3_297) __extension__ ({ \
-  int16_t __s0_297 = __p0_297; \
-  int16_t __s1_297 = __p1_297; \
-  int16x4_t __s2_297 = __p2_297; \
-  int16x4_t __rev2_297;  __rev2_297 = __builtin_shufflevector(__s2_297, __s2_297, 3, 2, 1, 0); \
-  int16_t __ret_297; \
-  __ret_297 = vqaddh_s16(__s0_297, vqrdmulhh_s16(__s1_297, __noswap_vget_lane_s16(__rev2_297, __p3_297))); \
-  __ret_297; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahs_laneq_s32(__p0_298, __p1_298, __p2_298, __p3_298) __extension__ ({ \
-  int32_t __s0_298 = __p0_298; \
-  int32_t __s1_298 = __p1_298; \
-  int32x4_t __s2_298 = __p2_298; \
-  int32_t __ret_298; \
-  __ret_298 = vqadds_s32(__s0_298, vqrdmulhs_s32(__s1_298, vgetq_lane_s32(__s2_298, __p3_298))); \
-  __ret_298; \
-})
-#else
-#define vqrdmlahs_laneq_s32(__p0_299, __p1_299, __p2_299, __p3_299) __extension__ ({ \
-  int32_t __s0_299 = __p0_299; \
-  int32_t __s1_299 = __p1_299; \
-  int32x4_t __s2_299 = __p2_299; \
-  int32x4_t __rev2_299;  __rev2_299 = __builtin_shufflevector(__s2_299, __s2_299, 3, 2, 1, 0); \
-  int32_t __ret_299; \
-  __ret_299 = vqadds_s32(__s0_299, vqrdmulhs_s32(__s1_299, __noswap_vgetq_lane_s32(__rev2_299, __p3_299))); \
-  __ret_299; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahh_laneq_s16(__p0_300, __p1_300, __p2_300, __p3_300) __extension__ ({ \
-  int16_t __s0_300 = __p0_300; \
-  int16_t __s1_300 = __p1_300; \
-  int16x8_t __s2_300 = __p2_300; \
-  int16_t __ret_300; \
-  __ret_300 = vqaddh_s16(__s0_300, vqrdmulhh_s16(__s1_300, vgetq_lane_s16(__s2_300, __p3_300))); \
-  __ret_300; \
-})
-#else
-#define vqrdmlahh_laneq_s16(__p0_301, __p1_301, __p2_301, __p3_301) __extension__ ({ \
-  int16_t __s0_301 = __p0_301; \
-  int16_t __s1_301 = __p1_301; \
-  int16x8_t __s2_301 = __p2_301; \
-  int16x8_t __rev2_301;  __rev2_301 = __builtin_shufflevector(__s2_301, __s2_301, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_301; \
-  __ret_301 = vqaddh_s16(__s0_301, vqrdmulhh_s16(__s1_301, __noswap_vgetq_lane_s16(__rev2_301, __p3_301))); \
-  __ret_301; \
-})
-#endif
-
-__ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
-  int32_t __ret;
-  __ret = vqsubs_s32(__p0, vqrdmulhs_s32(__p1, __p2));
-  return __ret;
-}
-__ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
-  int16_t __ret;
-  __ret = vqsubh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshs_lane_s32(__p0_302, __p1_302, __p2_302, __p3_302) __extension__ ({ \
-  int32_t __s0_302 = __p0_302; \
-  int32_t __s1_302 = __p1_302; \
-  int32x2_t __s2_302 = __p2_302; \
-  int32_t __ret_302; \
-  __ret_302 = vqsubs_s32(__s0_302, vqrdmulhs_s32(__s1_302, vget_lane_s32(__s2_302, __p3_302))); \
-  __ret_302; \
-})
-#else
-#define vqrdmlshs_lane_s32(__p0_303, __p1_303, __p2_303, __p3_303) __extension__ ({ \
-  int32_t __s0_303 = __p0_303; \
-  int32_t __s1_303 = __p1_303; \
-  int32x2_t __s2_303 = __p2_303; \
-  int32x2_t __rev2_303;  __rev2_303 = __builtin_shufflevector(__s2_303, __s2_303, 1, 0); \
-  int32_t __ret_303; \
-  __ret_303 = vqsubs_s32(__s0_303, vqrdmulhs_s32(__s1_303, __noswap_vget_lane_s32(__rev2_303, __p3_303))); \
-  __ret_303; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshh_lane_s16(__p0_304, __p1_304, __p2_304, __p3_304) __extension__ ({ \
-  int16_t __s0_304 = __p0_304; \
-  int16_t __s1_304 = __p1_304; \
-  int16x4_t __s2_304 = __p2_304; \
-  int16_t __ret_304; \
-  __ret_304 = vqsubh_s16(__s0_304, vqrdmulhh_s16(__s1_304, vget_lane_s16(__s2_304, __p3_304))); \
-  __ret_304; \
-})
-#else
-#define vqrdmlshh_lane_s16(__p0_305, __p1_305, __p2_305, __p3_305) __extension__ ({ \
-  int16_t __s0_305 = __p0_305; \
-  int16_t __s1_305 = __p1_305; \
-  int16x4_t __s2_305 = __p2_305; \
-  int16x4_t __rev2_305;  __rev2_305 = __builtin_shufflevector(__s2_305, __s2_305, 3, 2, 1, 0); \
-  int16_t __ret_305; \
-  __ret_305 = vqsubh_s16(__s0_305, vqrdmulhh_s16(__s1_305, __noswap_vget_lane_s16(__rev2_305, __p3_305))); \
-  __ret_305; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshs_laneq_s32(__p0_306, __p1_306, __p2_306, __p3_306) __extension__ ({ \
-  int32_t __s0_306 = __p0_306; \
-  int32_t __s1_306 = __p1_306; \
-  int32x4_t __s2_306 = __p2_306; \
-  int32_t __ret_306; \
-  __ret_306 = vqsubs_s32(__s0_306, vqrdmulhs_s32(__s1_306, vgetq_lane_s32(__s2_306, __p3_306))); \
-  __ret_306; \
-})
-#else
-#define vqrdmlshs_laneq_s32(__p0_307, __p1_307, __p2_307, __p3_307) __extension__ ({ \
-  int32_t __s0_307 = __p0_307; \
-  int32_t __s1_307 = __p1_307; \
-  int32x4_t __s2_307 = __p2_307; \
-  int32x4_t __rev2_307;  __rev2_307 = __builtin_shufflevector(__s2_307, __s2_307, 3, 2, 1, 0); \
-  int32_t __ret_307; \
-  __ret_307 = vqsubs_s32(__s0_307, vqrdmulhs_s32(__s1_307, __noswap_vgetq_lane_s32(__rev2_307, __p3_307))); \
-  __ret_307; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshh_laneq_s16(__p0_308, __p1_308, __p2_308, __p3_308) __extension__ ({ \
-  int16_t __s0_308 = __p0_308; \
-  int16_t __s1_308 = __p1_308; \
-  int16x8_t __s2_308 = __p2_308; \
-  int16_t __ret_308; \
-  __ret_308 = vqsubh_s16(__s0_308, vqrdmulhh_s16(__s1_308, vgetq_lane_s16(__s2_308, __p3_308))); \
-  __ret_308; \
-})
-#else
-#define vqrdmlshh_laneq_s16(__p0_309, __p1_309, __p2_309, __p3_309) __extension__ ({ \
-  int16_t __s0_309 = __p0_309; \
-  int16_t __s1_309 = __p1_309; \
-  int16x8_t __s2_309 = __p2_309; \
-  int16x8_t __rev2_309;  __rev2_309 = __builtin_shufflevector(__s2_309, __s2_309, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_309; \
-  __ret_309 = vqsubh_s16(__s0_309, vqrdmulhh_s16(__s1_309, __noswap_vgetq_lane_s16(__rev2_309, __p3_309))); \
-  __ret_309; \
-})
-#endif
-
-#endif
-#if defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = vabdl_u8(vget_high_u8(__p0), vget_high_u8(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vabdl_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = vabdl_u32(vget_high_u32(__p0), vget_high_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vabdl_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = vabdl_u16(vget_high_u16(__p0), vget_high_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vabdl_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = vabdl_s8(vget_high_s8(__p0), vget_high_s8(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vabdl_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vabdl_s32(vget_high_s32(__p0), vget_high_s32(__p1));
-  return __ret;
-}
-#else
-__ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vabdl_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vabdl_s16(vget_high_s16(__p0), vget_high_s16(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vabdl_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmovl_high_u8(__p0) + vmovl_high_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmovl_high_u8(__rev0) + __noswap_vmovl_high_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmovl_high_u32(__p0) + vmovl_high_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmovl_high_u32(__rev0) + __noswap_vmovl_high_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmovl_high_u16(__p0) + vmovl_high_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmovl_high_u16(__rev0) + __noswap_vmovl_high_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = vmovl_high_s8(__p0) + vmovl_high_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmovl_high_s8(__rev0) + __noswap_vmovl_high_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vmovl_high_s32(__p0) + vmovl_high_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmovl_high_s32(__rev0) + __noswap_vmovl_high_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vmovl_high_s16(__p0) + vmovl_high_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmovl_high_s16(__rev0) + __noswap_vmovl_high_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 + vmovl_high_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 + vmovl_high_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 + vmovl_high_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 + vmovl_high_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 + vmovl_high_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 + vmovl_high_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p64(__p0_310, __p1_310, __p2_310, __p3_310) __extension__ ({ \
-  poly64x2_t __s0_310 = __p0_310; \
-  poly64x1_t __s2_310 = __p2_310; \
-  poly64x2_t __ret_310; \
-  __ret_310 = vsetq_lane_p64(vget_lane_p64(__s2_310, __p3_310), __s0_310, __p1_310); \
-  __ret_310; \
-})
-#else
-#define vcopyq_lane_p64(__p0_311, __p1_311, __p2_311, __p3_311) __extension__ ({ \
-  poly64x2_t __s0_311 = __p0_311; \
-  poly64x1_t __s2_311 = __p2_311; \
-  poly64x2_t __rev0_311;  __rev0_311 = __builtin_shufflevector(__s0_311, __s0_311, 1, 0); \
-  poly64x2_t __ret_311; \
-  __ret_311 = __noswap_vsetq_lane_p64(vget_lane_p64(__s2_311, __p3_311), __rev0_311, __p1_311); \
-  __ret_311 = __builtin_shufflevector(__ret_311, __ret_311, 1, 0); \
-  __ret_311; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_f64(__p0_312, __p1_312, __p2_312, __p3_312) __extension__ ({ \
-  float64x2_t __s0_312 = __p0_312; \
-  float64x1_t __s2_312 = __p2_312; \
-  float64x2_t __ret_312; \
-  __ret_312 = vsetq_lane_f64(vget_lane_f64(__s2_312, __p3_312), __s0_312, __p1_312); \
-  __ret_312; \
-})
-#else
-#define vcopyq_lane_f64(__p0_313, __p1_313, __p2_313, __p3_313) __extension__ ({ \
-  float64x2_t __s0_313 = __p0_313; \
-  float64x1_t __s2_313 = __p2_313; \
-  float64x2_t __rev0_313;  __rev0_313 = __builtin_shufflevector(__s0_313, __s0_313, 1, 0); \
-  float64x2_t __ret_313; \
-  __ret_313 = __noswap_vsetq_lane_f64(vget_lane_f64(__s2_313, __p3_313), __rev0_313, __p1_313); \
-  __ret_313 = __builtin_shufflevector(__ret_313, __ret_313, 1, 0); \
-  __ret_313; \
-})
-#endif
-
-#define vcopy_lane_p64(__p0_314, __p1_314, __p2_314, __p3_314) __extension__ ({ \
-  poly64x1_t __s0_314 = __p0_314; \
-  poly64x1_t __s2_314 = __p2_314; \
-  poly64x1_t __ret_314; \
-  __ret_314 = vset_lane_p64(vget_lane_p64(__s2_314, __p3_314), __s0_314, __p1_314); \
-  __ret_314; \
-})
-#define vcopy_lane_f64(__p0_315, __p1_315, __p2_315, __p3_315) __extension__ ({ \
-  float64x1_t __s0_315 = __p0_315; \
-  float64x1_t __s2_315 = __p2_315; \
-  float64x1_t __ret_315; \
-  __ret_315 = vset_lane_f64(vget_lane_f64(__s2_315, __p3_315), __s0_315, __p1_315); \
-  __ret_315; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p64(__p0_316, __p1_316, __p2_316, __p3_316) __extension__ ({ \
-  poly64x2_t __s0_316 = __p0_316; \
-  poly64x2_t __s2_316 = __p2_316; \
-  poly64x2_t __ret_316; \
-  __ret_316 = vsetq_lane_p64(vgetq_lane_p64(__s2_316, __p3_316), __s0_316, __p1_316); \
-  __ret_316; \
-})
-#else
-#define vcopyq_laneq_p64(__p0_317, __p1_317, __p2_317, __p3_317) __extension__ ({ \
-  poly64x2_t __s0_317 = __p0_317; \
-  poly64x2_t __s2_317 = __p2_317; \
-  poly64x2_t __rev0_317;  __rev0_317 = __builtin_shufflevector(__s0_317, __s0_317, 1, 0); \
-  poly64x2_t __rev2_317;  __rev2_317 = __builtin_shufflevector(__s2_317, __s2_317, 1, 0); \
-  poly64x2_t __ret_317; \
-  __ret_317 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_317, __p3_317), __rev0_317, __p1_317); \
-  __ret_317 = __builtin_shufflevector(__ret_317, __ret_317, 1, 0); \
-  __ret_317; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_f64(__p0_318, __p1_318, __p2_318, __p3_318) __extension__ ({ \
-  float64x2_t __s0_318 = __p0_318; \
-  float64x2_t __s2_318 = __p2_318; \
-  float64x2_t __ret_318; \
-  __ret_318 = vsetq_lane_f64(vgetq_lane_f64(__s2_318, __p3_318), __s0_318, __p1_318); \
-  __ret_318; \
-})
-#else
-#define vcopyq_laneq_f64(__p0_319, __p1_319, __p2_319, __p3_319) __extension__ ({ \
-  float64x2_t __s0_319 = __p0_319; \
-  float64x2_t __s2_319 = __p2_319; \
-  float64x2_t __rev0_319;  __rev0_319 = __builtin_shufflevector(__s0_319, __s0_319, 1, 0); \
-  float64x2_t __rev2_319;  __rev2_319 = __builtin_shufflevector(__s2_319, __s2_319, 1, 0); \
-  float64x2_t __ret_319; \
-  __ret_319 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_319, __p3_319), __rev0_319, __p1_319); \
-  __ret_319 = __builtin_shufflevector(__ret_319, __ret_319, 1, 0); \
-  __ret_319; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p64(__p0_320, __p1_320, __p2_320, __p3_320) __extension__ ({ \
-  poly64x1_t __s0_320 = __p0_320; \
-  poly64x2_t __s2_320 = __p2_320; \
-  poly64x1_t __ret_320; \
-  __ret_320 = vset_lane_p64(vgetq_lane_p64(__s2_320, __p3_320), __s0_320, __p1_320); \
-  __ret_320; \
-})
-#else
-#define vcopy_laneq_p64(__p0_321, __p1_321, __p2_321, __p3_321) __extension__ ({ \
-  poly64x1_t __s0_321 = __p0_321; \
-  poly64x2_t __s2_321 = __p2_321; \
-  poly64x2_t __rev2_321;  __rev2_321 = __builtin_shufflevector(__s2_321, __s2_321, 1, 0); \
-  poly64x1_t __ret_321; \
-  __ret_321 = vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_321, __p3_321), __s0_321, __p1_321); \
-  __ret_321; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_f64(__p0_322, __p1_322, __p2_322, __p3_322) __extension__ ({ \
-  float64x1_t __s0_322 = __p0_322; \
-  float64x2_t __s2_322 = __p2_322; \
-  float64x1_t __ret_322; \
-  __ret_322 = vset_lane_f64(vgetq_lane_f64(__s2_322, __p3_322), __s0_322, __p1_322); \
-  __ret_322; \
-})
-#else
-#define vcopy_laneq_f64(__p0_323, __p1_323, __p2_323, __p3_323) __extension__ ({ \
-  float64x1_t __s0_323 = __p0_323; \
-  float64x2_t __s2_323 = __p2_323; \
-  float64x2_t __rev2_323;  __rev2_323 = __builtin_shufflevector(__s2_323, __s2_323, 1, 0); \
-  float64x1_t __ret_323; \
-  __ret_323 = vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_323, __p3_323), __s0_323, __p1_323); \
-  __ret_323; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __ret;
-  __ret = vmlal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmlal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __ret;
-  __ret = vmlal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmlal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __ret;
-  __ret = vmlal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmlal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __ret;
-  __ret = vmlal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmlal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = vmlal_n_u32(__p0, vget_high_u32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmlal_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = vmlal_n_u16(__p0, vget_high_u16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmlal_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __ret;
-  __ret = vmlsl_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmlsl_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __ret;
-  __ret = vmlsl_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmlsl_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __ret;
-  __ret = vmlsl_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmlsl_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __ret;
-  __ret = vmlsl_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmlsl_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = vmlsl_n_u32(__p0, vget_high_u32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmlsl_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = vmlsl_n_u16(__p0, vget_high_u16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmlsl_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#define vmulx_lane_f64(__p0_324, __p1_324, __p2_324) __extension__ ({ \
-  float64x1_t __s0_324 = __p0_324; \
-  float64x1_t __s1_324 = __p1_324; \
-  float64x1_t __ret_324; \
-  float64_t __x_324 = vget_lane_f64(__s0_324, 0); \
-  float64_t __y_324 = vget_lane_f64(__s1_324, __p2_324); \
-  float64_t __z_324 = vmulxd_f64(__x_324, __y_324); \
-  __ret_324 = vset_lane_f64(__z_324, __s0_324, __p2_324); \
-  __ret_324; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f64(__p0_325, __p1_325, __p2_325) __extension__ ({ \
-  float64x1_t __s0_325 = __p0_325; \
-  float64x2_t __s1_325 = __p1_325; \
-  float64x1_t __ret_325; \
-  float64_t __x_325 = vget_lane_f64(__s0_325, 0); \
-  float64_t __y_325 = vgetq_lane_f64(__s1_325, __p2_325); \
-  float64_t __z_325 = vmulxd_f64(__x_325, __y_325); \
-  __ret_325 = vset_lane_f64(__z_325, __s0_325, 0); \
-  __ret_325; \
-})
-#else
-#define vmulx_laneq_f64(__p0_326, __p1_326, __p2_326) __extension__ ({ \
-  float64x1_t __s0_326 = __p0_326; \
-  float64x2_t __s1_326 = __p1_326; \
-  float64x2_t __rev1_326;  __rev1_326 = __builtin_shufflevector(__s1_326, __s1_326, 1, 0); \
-  float64x1_t __ret_326; \
-  float64_t __x_326 = vget_lane_f64(__s0_326, 0); \
-  float64_t __y_326 = __noswap_vgetq_lane_f64(__rev1_326, __p2_326); \
-  float64_t __z_326 = vmulxd_f64(__x_326, __y_326); \
-  __ret_326 = vset_lane_f64(__z_326, __s0_326, 0); \
-  __ret_326; \
-})
-#endif
-
-#endif
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + vabdl_u8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __noswap_vabdl_u8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + __noswap_vabdl_u8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + vabdl_u32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __noswap_vabdl_u32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + __noswap_vabdl_u32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + vabdl_u16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vabdl_u16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + __noswap_vabdl_u16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + vabdl_s8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __noswap_vabdl_s8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + __noswap_vabdl_s8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + vabdl_s32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __noswap_vabdl_s32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + __noswap_vabdl_s32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + vabdl_s16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vabdl_s16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + __noswap_vabdl_s16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#if defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __ret;
-  __ret = vabal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vabal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __ret;
-  __ret = vabal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
-  return __ret;
-}
-#else
-__ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vabal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __ret;
-  __ret = vabal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vabal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __ret;
-  __ret = vabal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vabal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vabal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vabal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vabal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vabal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-
-#undef __ai
-
-#endif /* __ARM_NEON_H */
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx2intrin.h b/linux-x86/lib64/clang/11.0.1/include/avx2intrin.h
deleted file mode 100644
index 162e83e..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/avx2intrin.h
+++ /dev/null
@@ -1,1146 +0,0 @@
-/*===---- avx2intrin.h - AVX2 intrinsics -----------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __IMMINTRIN_H
-#error "Never use <avx2intrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __AVX2INTRIN_H
-#define __AVX2INTRIN_H
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx2"), __min_vector_width__(256)))
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx2"), __min_vector_width__(128)))
-
-/* SSE4 Multiple Packed Sums of Absolute Difference.  */
-#define _mm256_mpsadbw_epu8(X, Y, M) \
-  (__m256i)__builtin_ia32_mpsadbw256((__v32qi)(__m256i)(X), \
-                                     (__v32qi)(__m256i)(Y), (int)(M))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_abs_epi8(__m256i __a)
-{
-    return (__m256i)__builtin_ia32_pabsb256((__v32qi)__a);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_abs_epi16(__m256i __a)
-{
-    return (__m256i)__builtin_ia32_pabsw256((__v16hi)__a);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_abs_epi32(__m256i __a)
-{
-    return (__m256i)__builtin_ia32_pabsd256((__v8si)__a);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_packs_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_packsswb256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_packs_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_packssdw256((__v8si)__a, (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_packus_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_packuswb256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_packus_epi32(__m256i __V1, __m256i __V2)
-{
-  return (__m256i) __builtin_ia32_packusdw256((__v8si)__V1, (__v8si)__V2);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_add_epi8(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v32qu)__a + (__v32qu)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_add_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v16hu)__a + (__v16hu)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_add_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v8su)__a + (__v8su)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_add_epi64(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v4du)__a + (__v4du)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_adds_epi8(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_paddsb256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_adds_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_paddsw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_adds_epu8(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_paddusb256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_adds_epu16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_paddusw256((__v16hi)__a, (__v16hi)__b);
-}
-
-#define _mm256_alignr_epi8(a, b, n) \
-  (__m256i)__builtin_ia32_palignr256((__v32qi)(__m256i)(a), \
-                                     (__v32qi)(__m256i)(b), (n))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_and_si256(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v4du)__a & (__v4du)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_andnot_si256(__m256i __a, __m256i __b)
-{
-  return (__m256i)(~(__v4du)__a & (__v4du)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_avg_epu8(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pavgb256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_avg_epu16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pavgw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M)
-{
-  return (__m256i)__builtin_ia32_pblendvb256((__v32qi)__V1, (__v32qi)__V2,
-                                              (__v32qi)__M);
-}
-
-#define _mm256_blend_epi16(V1, V2, M) \
-  (__m256i)__builtin_ia32_pblendw256((__v16hi)(__m256i)(V1), \
-                                     (__v16hi)(__m256i)(V2), (int)(M))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cmpeq_epi8(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v32qi)__a == (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cmpeq_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v16hi)__a == (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cmpeq_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v8si)__a == (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cmpeq_epi64(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v4di)__a == (__v4di)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cmpgt_epi8(__m256i __a, __m256i __b)
-{
-  /* This function always performs a signed comparison, but __v32qi is a char
-     which may be signed or unsigned, so use __v32qs. */
-  return (__m256i)((__v32qs)__a > (__v32qs)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cmpgt_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v16hi)__a > (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cmpgt_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v8si)__a > (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cmpgt_epi64(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v4di)__a > (__v4di)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_hadd_epi16(__m256i __a, __m256i __b)
-{
-    return (__m256i)__builtin_ia32_phaddw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_hadd_epi32(__m256i __a, __m256i __b)
-{
-    return (__m256i)__builtin_ia32_phaddd256((__v8si)__a, (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_hadds_epi16(__m256i __a, __m256i __b)
-{
-    return (__m256i)__builtin_ia32_phaddsw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_hsub_epi16(__m256i __a, __m256i __b)
-{
-    return (__m256i)__builtin_ia32_phsubw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_hsub_epi32(__m256i __a, __m256i __b)
-{
-    return (__m256i)__builtin_ia32_phsubd256((__v8si)__a, (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_hsubs_epi16(__m256i __a, __m256i __b)
-{
-    return (__m256i)__builtin_ia32_phsubsw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maddubs_epi16(__m256i __a, __m256i __b)
-{
-    return (__m256i)__builtin_ia32_pmaddubsw256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_madd_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pmaddwd256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_max_epi8(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pmaxsb256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_max_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pmaxsw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_max_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pmaxsd256((__v8si)__a, (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_max_epu8(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pmaxub256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_max_epu16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pmaxuw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_max_epu32(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pmaxud256((__v8si)__a, (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_min_epi8(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pminsb256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_min_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pminsw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_min_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pminsd256((__v8si)__a, (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_min_epu8(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pminub256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_min_epu16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pminuw256 ((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_min_epu32(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pminud256((__v8si)__a, (__v8si)__b);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS256
-_mm256_movemask_epi8(__m256i __a)
-{
-  return __builtin_ia32_pmovmskb256((__v32qi)__a);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi8_epi16(__m128i __V)
-{
-  /* This function always performs a signed extension, but __v16qi is a char
-     which may be signed or unsigned, so use __v16qs. */
-  return (__m256i)__builtin_convertvector((__v16qs)__V, __v16hi);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi8_epi32(__m128i __V)
-{
-  /* This function always performs a signed extension, but __v16qi is a char
-     which may be signed or unsigned, so use __v16qs. */
-  return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi8_epi64(__m128i __V)
-{
-  /* This function always performs a signed extension, but __v16qi is a char
-     which may be signed or unsigned, so use __v16qs. */
-  return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4di);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi16_epi32(__m128i __V)
-{
-  return (__m256i)__builtin_convertvector((__v8hi)__V, __v8si);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi16_epi64(__m128i __V)
-{
-  return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4di);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi32_epi64(__m128i __V)
-{
-  return (__m256i)__builtin_convertvector((__v4si)__V, __v4di);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepu8_epi16(__m128i __V)
-{
-  return (__m256i)__builtin_convertvector((__v16qu)__V, __v16hi);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepu8_epi32(__m128i __V)
-{
-  return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepu8_epi64(__m128i __V)
-{
-  return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4di);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepu16_epi32(__m128i __V)
-{
-  return (__m256i)__builtin_convertvector((__v8hu)__V, __v8si);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepu16_epi64(__m128i __V)
-{
-  return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4di);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepu32_epi64(__m128i __V)
-{
-  return (__m256i)__builtin_convertvector((__v4su)__V, __v4di);
-}
-
-static __inline__  __m256i __DEFAULT_FN_ATTRS256
-_mm256_mul_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pmuldq256((__v8si)__a, (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mulhrs_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pmulhrsw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mulhi_epu16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pmulhuw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mulhi_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pmulhw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mullo_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v16hu)__a * (__v16hu)__b);
-}
-
-static __inline__  __m256i __DEFAULT_FN_ATTRS256
-_mm256_mullo_epi32 (__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v8su)__a * (__v8su)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mul_epu32(__m256i __a, __m256i __b)
-{
-  return __builtin_ia32_pmuludq256((__v8si)__a, (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_or_si256(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v4du)__a | (__v4du)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sad_epu8(__m256i __a, __m256i __b)
-{
-  return __builtin_ia32_psadbw256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_shuffle_epi8(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_pshufb256((__v32qi)__a, (__v32qi)__b);
-}
-
-#define _mm256_shuffle_epi32(a, imm) \
-  (__m256i)__builtin_ia32_pshufd256((__v8si)(__m256i)(a), (int)(imm))
-
-#define _mm256_shufflehi_epi16(a, imm) \
-  (__m256i)__builtin_ia32_pshufhw256((__v16hi)(__m256i)(a), (int)(imm))
-
-#define _mm256_shufflelo_epi16(a, imm) \
-  (__m256i)__builtin_ia32_pshuflw256((__v16hi)(__m256i)(a), (int)(imm))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sign_epi8(__m256i __a, __m256i __b)
-{
-    return (__m256i)__builtin_ia32_psignb256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sign_epi16(__m256i __a, __m256i __b)
-{
-    return (__m256i)__builtin_ia32_psignw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sign_epi32(__m256i __a, __m256i __b)
-{
-    return (__m256i)__builtin_ia32_psignd256((__v8si)__a, (__v8si)__b);
-}
-
-#define _mm256_slli_si256(a, imm) \
-  (__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm))
-
-#define _mm256_bslli_epi128(a, imm) \
-  (__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_slli_epi16(__m256i __a, int __count)
-{
-  return (__m256i)__builtin_ia32_psllwi256((__v16hi)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sll_epi16(__m256i __a, __m128i __count)
-{
-  return (__m256i)__builtin_ia32_psllw256((__v16hi)__a, (__v8hi)__count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_slli_epi32(__m256i __a, int __count)
-{
-  return (__m256i)__builtin_ia32_pslldi256((__v8si)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sll_epi32(__m256i __a, __m128i __count)
-{
-  return (__m256i)__builtin_ia32_pslld256((__v8si)__a, (__v4si)__count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_slli_epi64(__m256i __a, int __count)
-{
-  return __builtin_ia32_psllqi256((__v4di)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sll_epi64(__m256i __a, __m128i __count)
-{
-  return __builtin_ia32_psllq256((__v4di)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srai_epi16(__m256i __a, int __count)
-{
-  return (__m256i)__builtin_ia32_psrawi256((__v16hi)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sra_epi16(__m256i __a, __m128i __count)
-{
-  return (__m256i)__builtin_ia32_psraw256((__v16hi)__a, (__v8hi)__count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srai_epi32(__m256i __a, int __count)
-{
-  return (__m256i)__builtin_ia32_psradi256((__v8si)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sra_epi32(__m256i __a, __m128i __count)
-{
-  return (__m256i)__builtin_ia32_psrad256((__v8si)__a, (__v4si)__count);
-}
-
-#define _mm256_srli_si256(a, imm) \
-  (__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm))
-
-#define _mm256_bsrli_epi128(a, imm) \
-  (__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srli_epi16(__m256i __a, int __count)
-{
-  return (__m256i)__builtin_ia32_psrlwi256((__v16hi)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srl_epi16(__m256i __a, __m128i __count)
-{
-  return (__m256i)__builtin_ia32_psrlw256((__v16hi)__a, (__v8hi)__count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srli_epi32(__m256i __a, int __count)
-{
-  return (__m256i)__builtin_ia32_psrldi256((__v8si)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srl_epi32(__m256i __a, __m128i __count)
-{
-  return (__m256i)__builtin_ia32_psrld256((__v8si)__a, (__v4si)__count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srli_epi64(__m256i __a, int __count)
-{
-  return __builtin_ia32_psrlqi256((__v4di)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srl_epi64(__m256i __a, __m128i __count)
-{
-  return __builtin_ia32_psrlq256((__v4di)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sub_epi8(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v32qu)__a - (__v32qu)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sub_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v16hu)__a - (__v16hu)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sub_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v8su)__a - (__v8su)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sub_epi64(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v4du)__a - (__v4du)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_subs_epi8(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_psubsb256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_subs_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_psubsw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_subs_epu8(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_psubusb256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_subs_epu16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_psubusw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpackhi_epi8(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 8, 32+8, 9, 32+9, 10, 32+10, 11, 32+11, 12, 32+12, 13, 32+13, 14, 32+14, 15, 32+15, 24, 32+24, 25, 32+25, 26, 32+26, 27, 32+27, 28, 32+28, 29, 32+29, 30, 32+30, 31, 32+31);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpackhi_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpackhi_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 2, 8+2, 3, 8+3, 6, 8+6, 7, 8+7);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpackhi_epi64(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 1, 4+1, 3, 4+3);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpacklo_epi8(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 0, 32+0, 1, 32+1, 2, 32+2, 3, 32+3, 4, 32+4, 5, 32+5, 6, 32+6, 7, 32+7, 16, 32+16, 17, 32+17, 18, 32+18, 19, 32+19, 20, 32+20, 21, 32+21, 22, 32+22, 23, 32+23);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpacklo_epi16(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpacklo_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 0, 8+0, 1, 8+1, 4, 8+4, 5, 8+5);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpacklo_epi64(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 0, 4+0, 2, 4+2);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_xor_si256(__m256i __a, __m256i __b)
-{
-  return (__m256i)((__v4du)__a ^ (__v4du)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_stream_load_si256(__m256i const *__V)
-{
-  typedef __v4di __v4di_aligned __attribute__((aligned(32)));
-  return (__m256i)__builtin_nontemporal_load((const __v4di_aligned *)__V);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_broadcastss_ps(__m128 __X)
-{
-  return (__m128)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_broadcastsd_pd(__m128d __a)
-{
-  return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_broadcastss_ps(__m128 __X)
-{
-  return (__m256)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_broadcastsd_pd(__m128d __X)
-{
-  return (__m256d)__builtin_shufflevector((__v2df)__X, (__v2df)__X, 0, 0, 0, 0);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_broadcastsi128_si256(__m128i __X)
-{
-  return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 1, 0, 1);
-}
-
-#define _mm_blend_epi32(V1, V2, M) \
-  (__m128i)__builtin_ia32_pblendd128((__v4si)(__m128i)(V1), \
-                                     (__v4si)(__m128i)(V2), (int)(M))
-
-#define _mm256_blend_epi32(V1, V2, M) \
-  (__m256i)__builtin_ia32_pblendd256((__v8si)(__m256i)(V1), \
-                                     (__v8si)(__m256i)(V2), (int)(M))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_broadcastb_epi8(__m128i __X)
-{
-  return (__m256i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_broadcastw_epi16(__m128i __X)
-{
-  return (__m256i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_broadcastd_epi32(__m128i __X)
-{
-  return (__m256i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_broadcastq_epi64(__m128i __X)
-{
-  return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0, 0, 0);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_broadcastb_epi8(__m128i __X)
-{
-  return (__m128i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_broadcastw_epi16(__m128i __X)
-{
-  return (__m128i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_broadcastd_epi32(__m128i __X)
-{
-  return (__m128i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_broadcastq_epi64(__m128i __X)
-{
-  return (__m128i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_permutevar8x32_epi32(__m256i __a, __m256i __b)
-{
-  return (__m256i)__builtin_ia32_permvarsi256((__v8si)__a, (__v8si)__b);
-}
-
-#define _mm256_permute4x64_pd(V, M) \
-  (__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(V), (int)(M))
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_permutevar8x32_ps(__m256 __a, __m256i __b)
-{
-  return (__m256)__builtin_ia32_permvarsf256((__v8sf)__a, (__v8si)__b);
-}
-
-#define _mm256_permute4x64_epi64(V, M) \
-  (__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(V), (int)(M))
-
-#define _mm256_permute2x128_si256(V1, V2, M) \
-  (__m256i)__builtin_ia32_permti256((__m256i)(V1), (__m256i)(V2), (int)(M))
-
-#define _mm256_extracti128_si256(V, M) \
-  (__m128i)__builtin_ia32_extract128i256((__v4di)(__m256i)(V), (int)(M))
-
-#define _mm256_inserti128_si256(V1, V2, M) \
-  (__m256i)__builtin_ia32_insert128i256((__v4di)(__m256i)(V1), \
-                                        (__v2di)(__m128i)(V2), (int)(M))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskload_epi32(int const *__X, __m256i __M)
-{
-  return (__m256i)__builtin_ia32_maskloadd256((const __v8si *)__X, (__v8si)__M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskload_epi64(long long const *__X, __m256i __M)
-{
-  return (__m256i)__builtin_ia32_maskloadq256((const __v4di *)__X, (__v4di)__M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskload_epi32(int const *__X, __m128i __M)
-{
-  return (__m128i)__builtin_ia32_maskloadd((const __v4si *)__X, (__v4si)__M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskload_epi64(long long const *__X, __m128i __M)
-{
-  return (__m128i)__builtin_ia32_maskloadq((const __v2di *)__X, (__v2di)__M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_maskstore_epi32(int *__X, __m256i __M, __m256i __Y)
-{
-  __builtin_ia32_maskstored256((__v8si *)__X, (__v8si)__M, (__v8si)__Y);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_maskstore_epi64(long long *__X, __m256i __M, __m256i __Y)
-{
-  __builtin_ia32_maskstoreq256((__v4di *)__X, (__v4di)__M, (__v4di)__Y);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_maskstore_epi32(int *__X, __m128i __M, __m128i __Y)
-{
-  __builtin_ia32_maskstored((__v4si *)__X, (__v4si)__M, (__v4si)__Y);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_maskstore_epi64(long long *__X, __m128i __M, __m128i __Y)
-{
-  __builtin_ia32_maskstoreq(( __v2di *)__X, (__v2di)__M, (__v2di)__Y);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sllv_epi32(__m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_psllv8si((__v8si)__X, (__v8si)__Y);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_sllv_epi32(__m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_psllv4si((__v4si)__X, (__v4si)__Y);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sllv_epi64(__m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_psllv4di((__v4di)__X, (__v4di)__Y);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_sllv_epi64(__m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_psllv2di((__v2di)__X, (__v2di)__Y);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srav_epi32(__m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_psrav8si((__v8si)__X, (__v8si)__Y);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_srav_epi32(__m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_psrav4si((__v4si)__X, (__v4si)__Y);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srlv_epi32(__m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_psrlv8si((__v8si)__X, (__v8si)__Y);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_srlv_epi32(__m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_psrlv4si((__v4si)__X, (__v4si)__Y);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srlv_epi64(__m256i __X, __m256i __Y)
-{
-  return (__m256i)__builtin_ia32_psrlv4di((__v4di)__X, (__v4di)__Y);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_srlv_epi64(__m128i __X, __m128i __Y)
-{
-  return (__m128i)__builtin_ia32_psrlv2di((__v2di)__X, (__v2di)__Y);
-}
-
-#define _mm_mask_i32gather_pd(a, m, i, mask, s) \
-  (__m128d)__builtin_ia32_gatherd_pd((__v2df)(__m128i)(a), \
-                                     (double const *)(m), \
-                                     (__v4si)(__m128i)(i), \
-                                     (__v2df)(__m128d)(mask), (s))
-
-#define _mm256_mask_i32gather_pd(a, m, i, mask, s) \
-  (__m256d)__builtin_ia32_gatherd_pd256((__v4df)(__m256d)(a), \
-                                        (double const *)(m), \
-                                        (__v4si)(__m128i)(i), \
-                                        (__v4df)(__m256d)(mask), (s))
-
-#define _mm_mask_i64gather_pd(a, m, i, mask, s) \
-  (__m128d)__builtin_ia32_gatherq_pd((__v2df)(__m128d)(a), \
-                                     (double const *)(m), \
-                                     (__v2di)(__m128i)(i), \
-                                     (__v2df)(__m128d)(mask), (s))
-
-#define _mm256_mask_i64gather_pd(a, m, i, mask, s) \
-  (__m256d)__builtin_ia32_gatherq_pd256((__v4df)(__m256d)(a), \
-                                        (double const *)(m), \
-                                        (__v4di)(__m256i)(i), \
-                                        (__v4df)(__m256d)(mask), (s))
-
-#define _mm_mask_i32gather_ps(a, m, i, mask, s) \
-  (__m128)__builtin_ia32_gatherd_ps((__v4sf)(__m128)(a), \
-                                    (float const *)(m), \
-                                    (__v4si)(__m128i)(i), \
-                                    (__v4sf)(__m128)(mask), (s))
-
-#define _mm256_mask_i32gather_ps(a, m, i, mask, s) \
-  (__m256)__builtin_ia32_gatherd_ps256((__v8sf)(__m256)(a), \
-                                       (float const *)(m), \
-                                       (__v8si)(__m256i)(i), \
-                                       (__v8sf)(__m256)(mask), (s))
-
-#define _mm_mask_i64gather_ps(a, m, i, mask, s) \
-  (__m128)__builtin_ia32_gatherq_ps((__v4sf)(__m128)(a), \
-                                    (float const *)(m), \
-                                    (__v2di)(__m128i)(i), \
-                                    (__v4sf)(__m128)(mask), (s))
-
-#define _mm256_mask_i64gather_ps(a, m, i, mask, s) \
-  (__m128)__builtin_ia32_gatherq_ps256((__v4sf)(__m128)(a), \
-                                       (float const *)(m), \
-                                       (__v4di)(__m256i)(i), \
-                                       (__v4sf)(__m128)(mask), (s))
-
-#define _mm_mask_i32gather_epi32(a, m, i, mask, s) \
-  (__m128i)__builtin_ia32_gatherd_d((__v4si)(__m128i)(a), \
-                                    (int const *)(m), \
-                                    (__v4si)(__m128i)(i), \
-                                    (__v4si)(__m128i)(mask), (s))
-
-#define _mm256_mask_i32gather_epi32(a, m, i, mask, s) \
-  (__m256i)__builtin_ia32_gatherd_d256((__v8si)(__m256i)(a), \
-                                       (int const *)(m), \
-                                       (__v8si)(__m256i)(i), \
-                                       (__v8si)(__m256i)(mask), (s))
-
-#define _mm_mask_i64gather_epi32(a, m, i, mask, s) \
-  (__m128i)__builtin_ia32_gatherq_d((__v4si)(__m128i)(a), \
-                                    (int const *)(m), \
-                                    (__v2di)(__m128i)(i), \
-                                    (__v4si)(__m128i)(mask), (s))
-
-#define _mm256_mask_i64gather_epi32(a, m, i, mask, s) \
-  (__m128i)__builtin_ia32_gatherq_d256((__v4si)(__m128i)(a), \
-                                       (int const *)(m), \
-                                       (__v4di)(__m256i)(i), \
-                                       (__v4si)(__m128i)(mask), (s))
-
-#define _mm_mask_i32gather_epi64(a, m, i, mask, s) \
-  (__m128i)__builtin_ia32_gatherd_q((__v2di)(__m128i)(a), \
-                                    (long long const *)(m), \
-                                    (__v4si)(__m128i)(i), \
-                                    (__v2di)(__m128i)(mask), (s))
-
-#define _mm256_mask_i32gather_epi64(a, m, i, mask, s) \
-  (__m256i)__builtin_ia32_gatherd_q256((__v4di)(__m256i)(a), \
-                                       (long long const *)(m), \
-                                       (__v4si)(__m128i)(i), \
-                                       (__v4di)(__m256i)(mask), (s))
-
-#define _mm_mask_i64gather_epi64(a, m, i, mask, s) \
-  (__m128i)__builtin_ia32_gatherq_q((__v2di)(__m128i)(a), \
-                                    (long long const *)(m), \
-                                    (__v2di)(__m128i)(i), \
-                                    (__v2di)(__m128i)(mask), (s))
-
-#define _mm256_mask_i64gather_epi64(a, m, i, mask, s) \
-  (__m256i)__builtin_ia32_gatherq_q256((__v4di)(__m256i)(a), \
-                                       (long long const *)(m), \
-                                       (__v4di)(__m256i)(i), \
-                                       (__v4di)(__m256i)(mask), (s))
-
-#define _mm_i32gather_pd(m, i, s) \
-  (__m128d)__builtin_ia32_gatherd_pd((__v2df)_mm_undefined_pd(), \
-                                     (double const *)(m), \
-                                     (__v4si)(__m128i)(i), \
-                                     (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \
-                                                          _mm_setzero_pd()), \
-                                     (s))
-
-#define _mm256_i32gather_pd(m, i, s) \
-  (__m256d)__builtin_ia32_gatherd_pd256((__v4df)_mm256_undefined_pd(), \
-                                        (double const *)(m), \
-                                        (__v4si)(__m128i)(i), \
-                                        (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \
-                                                              _mm256_setzero_pd(), \
-                                                              _CMP_EQ_OQ), \
-                                        (s))
-
-#define _mm_i64gather_pd(m, i, s) \
-  (__m128d)__builtin_ia32_gatherq_pd((__v2df)_mm_undefined_pd(), \
-                                     (double const *)(m), \
-                                     (__v2di)(__m128i)(i), \
-                                     (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \
-                                                          _mm_setzero_pd()), \
-                                     (s))
-
-#define _mm256_i64gather_pd(m, i, s) \
-  (__m256d)__builtin_ia32_gatherq_pd256((__v4df)_mm256_undefined_pd(), \
-                                        (double const *)(m), \
-                                        (__v4di)(__m256i)(i), \
-                                        (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \
-                                                              _mm256_setzero_pd(), \
-                                                              _CMP_EQ_OQ), \
-                                        (s))
-
-#define _mm_i32gather_ps(m, i, s) \
-  (__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_undefined_ps(), \
-                                    (float const *)(m), \
-                                    (__v4si)(__m128i)(i), \
-                                    (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
-                                                         _mm_setzero_ps()), \
-                                    (s))
-
-#define _mm256_i32gather_ps(m, i, s) \
-  (__m256)__builtin_ia32_gatherd_ps256((__v8sf)_mm256_undefined_ps(), \
-                                       (float const *)(m), \
-                                       (__v8si)(__m256i)(i), \
-                                       (__v8sf)_mm256_cmp_ps(_mm256_setzero_ps(), \
-                                                             _mm256_setzero_ps(), \
-                                                             _CMP_EQ_OQ), \
-                                       (s))
-
-#define _mm_i64gather_ps(m, i, s) \
-  (__m128)__builtin_ia32_gatherq_ps((__v4sf)_mm_undefined_ps(), \
-                                    (float const *)(m), \
-                                    (__v2di)(__m128i)(i), \
-                                    (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
-                                                         _mm_setzero_ps()), \
-                                    (s))
-
-#define _mm256_i64gather_ps(m, i, s) \
-  (__m128)__builtin_ia32_gatherq_ps256((__v4sf)_mm_undefined_ps(), \
-                                       (float const *)(m), \
-                                       (__v4di)(__m256i)(i), \
-                                       (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
-                                                            _mm_setzero_ps()), \
-                                       (s))
-
-#define _mm_i32gather_epi32(m, i, s) \
-  (__m128i)__builtin_ia32_gatherd_d((__v4si)_mm_undefined_si128(), \
-                                    (int const *)(m), (__v4si)(__m128i)(i), \
-                                    (__v4si)_mm_set1_epi32(-1), (s))
-
-#define _mm256_i32gather_epi32(m, i, s) \
-  (__m256i)__builtin_ia32_gatherd_d256((__v8si)_mm256_undefined_si256(), \
-                                       (int const *)(m), (__v8si)(__m256i)(i), \
-                                       (__v8si)_mm256_set1_epi32(-1), (s))
-
-#define _mm_i64gather_epi32(m, i, s) \
-  (__m128i)__builtin_ia32_gatherq_d((__v4si)_mm_undefined_si128(), \
-                                    (int const *)(m), (__v2di)(__m128i)(i), \
-                                    (__v4si)_mm_set1_epi32(-1), (s))
-
-#define _mm256_i64gather_epi32(m, i, s) \
-  (__m128i)__builtin_ia32_gatherq_d256((__v4si)_mm_undefined_si128(), \
-                                       (int const *)(m), (__v4di)(__m256i)(i), \
-                                       (__v4si)_mm_set1_epi32(-1), (s))
-
-#define _mm_i32gather_epi64(m, i, s) \
-  (__m128i)__builtin_ia32_gatherd_q((__v2di)_mm_undefined_si128(), \
-                                    (long long const *)(m), \
-                                    (__v4si)(__m128i)(i), \
-                                    (__v2di)_mm_set1_epi64x(-1), (s))
-
-#define _mm256_i32gather_epi64(m, i, s) \
-  (__m256i)__builtin_ia32_gatherd_q256((__v4di)_mm256_undefined_si256(), \
-                                       (long long const *)(m), \
-                                       (__v4si)(__m128i)(i), \
-                                       (__v4di)_mm256_set1_epi64x(-1), (s))
-
-#define _mm_i64gather_epi64(m, i, s) \
-  (__m128i)__builtin_ia32_gatherq_q((__v2di)_mm_undefined_si128(), \
-                                    (long long const *)(m), \
-                                    (__v2di)(__m128i)(i), \
-                                    (__v2di)_mm_set1_epi64x(-1), (s))
-
-#define _mm256_i64gather_epi64(m, i, s) \
-  (__m256i)__builtin_ia32_gatherq_q256((__v4di)_mm256_undefined_si256(), \
-                                       (long long const *)(m), \
-                                       (__v4di)(__m256i)(i), \
-                                       (__v4di)_mm256_set1_epi64x(-1), (s))
-
-#undef __DEFAULT_FN_ATTRS256
-#undef __DEFAULT_FN_ATTRS128
-
-#endif /* __AVX2INTRIN_H */
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512bwintrin.h b/linux-x86/lib64/clang/11.0.1/include/avx512bwintrin.h
deleted file mode 100644
index 3765584..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/avx512bwintrin.h
+++ /dev/null
@@ -1,2021 +0,0 @@
-/*===------------- avx512bwintrin.h - AVX512BW intrinsics ------------------===
- *
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-#ifndef __IMMINTRIN_H
-#error "Never use <avx512bwintrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __AVX512BWINTRIN_H
-#define __AVX512BWINTRIN_H
-
-typedef unsigned int __mmask32;
-typedef unsigned long long __mmask64;
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512bw"), __min_vector_width__(512)))
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512bw")))
-
-static __inline __mmask32 __DEFAULT_FN_ATTRS
-_knot_mask32(__mmask32 __M)
-{
-  return __builtin_ia32_knotsi(__M);
-}
-
-static __inline __mmask64 __DEFAULT_FN_ATTRS
-_knot_mask64(__mmask64 __M)
-{
-  return __builtin_ia32_knotdi(__M);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_kand_mask32(__mmask32 __A, __mmask32 __B)
-{
-  return (__mmask32)__builtin_ia32_kandsi((__mmask32)__A, (__mmask32)__B);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_kand_mask64(__mmask64 __A, __mmask64 __B)
-{
-  return (__mmask64)__builtin_ia32_kanddi((__mmask64)__A, (__mmask64)__B);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_kandn_mask32(__mmask32 __A, __mmask32 __B)
-{
-  return (__mmask32)__builtin_ia32_kandnsi((__mmask32)__A, (__mmask32)__B);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_kandn_mask64(__mmask64 __A, __mmask64 __B)
-{
-  return (__mmask64)__builtin_ia32_kandndi((__mmask64)__A, (__mmask64)__B);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_kor_mask32(__mmask32 __A, __mmask32 __B)
-{
-  return (__mmask32)__builtin_ia32_korsi((__mmask32)__A, (__mmask32)__B);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_kor_mask64(__mmask64 __A, __mmask64 __B)
-{
-  return (__mmask64)__builtin_ia32_kordi((__mmask64)__A, (__mmask64)__B);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_kxnor_mask32(__mmask32 __A, __mmask32 __B)
-{
-  return (__mmask32)__builtin_ia32_kxnorsi((__mmask32)__A, (__mmask32)__B);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_kxnor_mask64(__mmask64 __A, __mmask64 __B)
-{
-  return (__mmask64)__builtin_ia32_kxnordi((__mmask64)__A, (__mmask64)__B);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_kxor_mask32(__mmask32 __A, __mmask32 __B)
-{
-  return (__mmask32)__builtin_ia32_kxorsi((__mmask32)__A, (__mmask32)__B);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_kxor_mask64(__mmask64 __A, __mmask64 __B)
-{
-  return (__mmask64)__builtin_ia32_kxordi((__mmask64)__A, (__mmask64)__B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortestc_mask32_u8(__mmask32 __A, __mmask32 __B)
-{
-  return (unsigned char)__builtin_ia32_kortestcsi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortestz_mask32_u8(__mmask32 __A, __mmask32 __B)
-{
-  return (unsigned char)__builtin_ia32_kortestzsi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortest_mask32_u8(__mmask32 __A, __mmask32 __B, unsigned char *__C) {
-  *__C = (unsigned char)__builtin_ia32_kortestcsi(__A, __B);
-  return (unsigned char)__builtin_ia32_kortestzsi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortestc_mask64_u8(__mmask64 __A, __mmask64 __B)
-{
-  return (unsigned char)__builtin_ia32_kortestcdi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortestz_mask64_u8(__mmask64 __A, __mmask64 __B)
-{
-  return (unsigned char)__builtin_ia32_kortestzdi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortest_mask64_u8(__mmask64 __A, __mmask64 __B, unsigned char *__C) {
-  *__C = (unsigned char)__builtin_ia32_kortestcdi(__A, __B);
-  return (unsigned char)__builtin_ia32_kortestzdi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_ktestc_mask32_u8(__mmask32 __A, __mmask32 __B)
-{
-  return (unsigned char)__builtin_ia32_ktestcsi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_ktestz_mask32_u8(__mmask32 __A, __mmask32 __B)
-{
-  return (unsigned char)__builtin_ia32_ktestzsi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_ktest_mask32_u8(__mmask32 __A, __mmask32 __B, unsigned char *__C) {
-  *__C = (unsigned char)__builtin_ia32_ktestcsi(__A, __B);
-  return (unsigned char)__builtin_ia32_ktestzsi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_ktestc_mask64_u8(__mmask64 __A, __mmask64 __B)
-{
-  return (unsigned char)__builtin_ia32_ktestcdi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_ktestz_mask64_u8(__mmask64 __A, __mmask64 __B)
-{
-  return (unsigned char)__builtin_ia32_ktestzdi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_ktest_mask64_u8(__mmask64 __A, __mmask64 __B, unsigned char *__C) {
-  *__C = (unsigned char)__builtin_ia32_ktestcdi(__A, __B);
-  return (unsigned char)__builtin_ia32_ktestzdi(__A, __B);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_kadd_mask32(__mmask32 __A, __mmask32 __B)
-{
-  return (__mmask32)__builtin_ia32_kaddsi((__mmask32)__A, (__mmask32)__B);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_kadd_mask64(__mmask64 __A, __mmask64 __B)
-{
-  return (__mmask64)__builtin_ia32_kadddi((__mmask64)__A, (__mmask64)__B);
-}
-
-#define _kshiftli_mask32(A, I) \
-  (__mmask32)__builtin_ia32_kshiftlisi((__mmask32)(A), (unsigned int)(I))
-
-#define _kshiftri_mask32(A, I) \
-  (__mmask32)__builtin_ia32_kshiftrisi((__mmask32)(A), (unsigned int)(I))
-
-#define _kshiftli_mask64(A, I) \
-  (__mmask64)__builtin_ia32_kshiftlidi((__mmask64)(A), (unsigned int)(I))
-
-#define _kshiftri_mask64(A, I) \
-  (__mmask64)__builtin_ia32_kshiftridi((__mmask64)(A), (unsigned int)(I))
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_cvtmask32_u32(__mmask32 __A) {
-  return (unsigned int)__builtin_ia32_kmovd((__mmask32)__A);
-}
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-_cvtmask64_u64(__mmask64 __A) {
-  return (unsigned long long)__builtin_ia32_kmovq((__mmask64)__A);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_cvtu32_mask32(unsigned int __A) {
-  return (__mmask32)__builtin_ia32_kmovd((__mmask32)__A);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_cvtu64_mask64(unsigned long long __A) {
-  return (__mmask64)__builtin_ia32_kmovq((__mmask64)__A);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_load_mask32(__mmask32 *__A) {
-  return (__mmask32)__builtin_ia32_kmovd(*(__mmask32 *)__A);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_load_mask64(__mmask64 *__A) {
-  return (__mmask64)__builtin_ia32_kmovq(*(__mmask64 *)__A);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS
-_store_mask32(__mmask32 *__A, __mmask32 __B) {
-  *(__mmask32 *)__A = __builtin_ia32_kmovd((__mmask32)__B);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS
-_store_mask64(__mmask64 *__A, __mmask64 __B) {
-  *(__mmask64 *)__A = __builtin_ia32_kmovq((__mmask64)__B);
-}
-
-/* Integer compare */
-
-#define _mm512_cmp_epi8_mask(a, b, p) \
-  (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
-                                         (__v64qi)(__m512i)(b), (int)(p), \
-                                         (__mmask64)-1)
-
-#define _mm512_mask_cmp_epi8_mask(m, a, b, p) \
-  (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
-                                         (__v64qi)(__m512i)(b), (int)(p), \
-                                         (__mmask64)(m))
-
-#define _mm512_cmp_epu8_mask(a, b, p) \
-  (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
-                                          (__v64qi)(__m512i)(b), (int)(p), \
-                                          (__mmask64)-1)
-
-#define _mm512_mask_cmp_epu8_mask(m, a, b, p) \
-  (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
-                                          (__v64qi)(__m512i)(b), (int)(p), \
-                                          (__mmask64)(m))
-
-#define _mm512_cmp_epi16_mask(a, b, p) \
-  (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
-                                         (__v32hi)(__m512i)(b), (int)(p), \
-                                         (__mmask32)-1)
-
-#define _mm512_mask_cmp_epi16_mask(m, a, b, p) \
-  (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
-                                         (__v32hi)(__m512i)(b), (int)(p), \
-                                         (__mmask32)(m))
-
-#define _mm512_cmp_epu16_mask(a, b, p) \
-  (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
-                                          (__v32hi)(__m512i)(b), (int)(p), \
-                                          (__mmask32)-1)
-
-#define _mm512_mask_cmp_epu16_mask(m, a, b, p) \
-  (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
-                                          (__v32hi)(__m512i)(b), (int)(p), \
-                                          (__mmask32)(m))
-
-#define _mm512_cmpeq_epi8_mask(A, B) \
-    _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epi8_mask(k, A, B) \
-    _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epi8_mask(A, B) \
-    _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epi8_mask(k, A, B) \
-    _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epi8_mask(A, B) \
-    _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epi8_mask(k, A, B) \
-    _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epi8_mask(A, B) \
-    _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epi8_mask(k, A, B) \
-    _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epi8_mask(A, B) \
-    _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epi8_mask(k, A, B) \
-    _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epi8_mask(A, B) \
-    _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epi8_mask(k, A, B) \
-    _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm512_cmpeq_epu8_mask(A, B) \
-    _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epu8_mask(k, A, B) \
-    _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epu8_mask(A, B) \
-    _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epu8_mask(k, A, B) \
-    _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epu8_mask(A, B) \
-    _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epu8_mask(k, A, B) \
-    _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epu8_mask(A, B) \
-    _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epu8_mask(k, A, B) \
-    _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epu8_mask(A, B) \
-    _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epu8_mask(k, A, B) \
-    _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epu8_mask(A, B) \
-    _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epu8_mask(k, A, B) \
-    _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm512_cmpeq_epi16_mask(A, B) \
-    _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epi16_mask(k, A, B) \
-    _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epi16_mask(A, B) \
-    _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epi16_mask(k, A, B) \
-    _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epi16_mask(A, B) \
-    _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epi16_mask(k, A, B) \
-    _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epi16_mask(A, B) \
-    _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epi16_mask(k, A, B) \
-    _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epi16_mask(A, B) \
-    _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epi16_mask(k, A, B) \
-    _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epi16_mask(A, B) \
-    _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epi16_mask(k, A, B) \
-    _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm512_cmpeq_epu16_mask(A, B) \
-    _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epu16_mask(k, A, B) \
-    _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epu16_mask(A, B) \
-    _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epu16_mask(k, A, B) \
-    _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epu16_mask(A, B) \
-    _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epu16_mask(k, A, B) \
-    _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epu16_mask(A, B) \
-    _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epu16_mask(k, A, B) \
-    _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epu16_mask(A, B) \
-    _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epu16_mask(k, A, B) \
-    _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epu16_mask(A, B) \
-    _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epu16_mask(k, A, B) \
-    _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_NE)
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_add_epi8 (__m512i __A, __m512i __B) {
-  return (__m512i) ((__v64qu) __A + (__v64qu) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_add_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                             (__v64qi)_mm512_add_epi8(__A, __B),
-                                             (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_add_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                             (__v64qi)_mm512_add_epi8(__A, __B),
-                                             (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sub_epi8 (__m512i __A, __m512i __B) {
-  return (__m512i) ((__v64qu) __A - (__v64qu) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sub_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                             (__v64qi)_mm512_sub_epi8(__A, __B),
-                                             (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sub_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                             (__v64qi)_mm512_sub_epi8(__A, __B),
-                                             (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_add_epi16 (__m512i __A, __m512i __B) {
-  return (__m512i) ((__v32hu) __A + (__v32hu) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_add_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_add_epi16(__A, __B),
-                                             (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_add_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_add_epi16(__A, __B),
-                                             (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sub_epi16 (__m512i __A, __m512i __B) {
-  return (__m512i) ((__v32hu) __A - (__v32hu) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sub_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_sub_epi16(__A, __B),
-                                             (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sub_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_sub_epi16(__A, __B),
-                                             (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mullo_epi16 (__m512i __A, __m512i __B) {
-  return (__m512i) ((__v32hu) __A * (__v32hu) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mullo_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_mullo_epi16(__A, __B),
-                                             (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mullo_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_mullo_epi16(__A, __B),
-                                             (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_blend_epi8 (__mmask64 __U, __m512i __A, __m512i __W)
-{
-  return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U,
-              (__v64qi) __W,
-              (__v64qi) __A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_blend_epi16 (__mmask32 __U, __m512i __A, __m512i __W)
-{
-  return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U,
-              (__v32hi) __W,
-              (__v32hi) __A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_abs_epi8 (__m512i __A)
-{
-  return (__m512i)__builtin_ia32_pabsb512((__v64qi)__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_epi8 (__m512i __W, __mmask64 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                             (__v64qi)_mm512_abs_epi8(__A),
-                                             (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_abs_epi8 (__mmask64 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                             (__v64qi)_mm512_abs_epi8(__A),
-                                             (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_abs_epi16 (__m512i __A)
-{
-  return (__m512i)__builtin_ia32_pabsw512((__v32hi)__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_epi16 (__m512i __W, __mmask32 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_abs_epi16(__A),
-                                             (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_abs_epi16 (__mmask32 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_abs_epi16(__A),
-                                             (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_packs_epi32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_packssdw512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_packs_epi32(__mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                       (__v32hi)_mm512_packs_epi32(__A, __B),
-                                       (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_packs_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                       (__v32hi)_mm512_packs_epi32(__A, __B),
-                                       (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_packs_epi16(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_packsswb512((__v32hi)__A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_packs_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                        (__v64qi)_mm512_packs_epi16(__A, __B),
-                                        (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_packs_epi16(__mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                        (__v64qi)_mm512_packs_epi16(__A, __B),
-                                        (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_packus_epi32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_packusdw512((__v16si) __A, (__v16si) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_packus_epi32(__mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                       (__v32hi)_mm512_packus_epi32(__A, __B),
-                                       (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_packus_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                       (__v32hi)_mm512_packus_epi32(__A, __B),
-                                       (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_packus_epi16(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_packuswb512((__v32hi) __A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_packus_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                        (__v64qi)_mm512_packus_epi16(__A, __B),
-                                        (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_packus_epi16(__mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                        (__v64qi)_mm512_packus_epi16(__A, __B),
-                                        (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_adds_epi8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_paddsb512((__v64qi)__A, (__v64qi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_adds_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_adds_epi8(__A, __B),
-                                        (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_adds_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_adds_epi8(__A, __B),
-                                        (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_adds_epi16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_paddsw512((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_adds_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                        (__v32hi)_mm512_adds_epi16(__A, __B),
-                                        (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_adds_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                        (__v32hi)_mm512_adds_epi16(__A, __B),
-                                        (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_adds_epu8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_paddusb512((__v64qi) __A, (__v64qi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_adds_epu8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_adds_epu8(__A, __B),
-                                        (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_adds_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_adds_epu8(__A, __B),
-                                        (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_adds_epu16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_paddusw512((__v32hi) __A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_adds_epu16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                        (__v32hi)_mm512_adds_epu16(__A, __B),
-                                        (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_adds_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                        (__v32hi)_mm512_adds_epu16(__A, __B),
-                                        (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_avg_epu8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pavgb512((__v64qi)__A, (__v64qi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_avg_epu8 (__m512i __W, __mmask64 __U, __m512i __A,
-          __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-              (__v64qi)_mm512_avg_epu8(__A, __B),
-              (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_avg_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-              (__v64qi)_mm512_avg_epu8(__A, __B),
-              (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_avg_epu16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pavgw512((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_avg_epu16 (__m512i __W, __mmask32 __U, __m512i __A,
-           __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-              (__v32hi)_mm512_avg_epu16(__A, __B),
-              (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_avg_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-              (__v32hi)_mm512_avg_epu16(__A, __B),
-              (__v32hi) _mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_max_epi8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxsb512((__v64qi) __A, (__v64qi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epi8 (__mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                             (__v64qi)_mm512_max_epi8(__A, __B),
-                                             (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epi8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                             (__v64qi)_mm512_max_epi8(__A, __B),
-                                             (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_max_epi16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxsw512((__v32hi) __A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epi16 (__mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                            (__v32hi)_mm512_max_epi16(__A, __B),
-                                            (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
-           __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                            (__v32hi)_mm512_max_epi16(__A, __B),
-                                            (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_max_epu8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxub512((__v64qi)__A, (__v64qi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epu8 (__mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                             (__v64qi)_mm512_max_epu8(__A, __B),
-                                             (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epu8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                             (__v64qi)_mm512_max_epu8(__A, __B),
-                                             (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_max_epu16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxuw512((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epu16 (__mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                            (__v32hi)_mm512_max_epu16(__A, __B),
-                                            (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epu16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                            (__v32hi)_mm512_max_epu16(__A, __B),
-                                            (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_min_epi8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminsb512((__v64qi) __A, (__v64qi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epi8 (__mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                             (__v64qi)_mm512_min_epi8(__A, __B),
-                                             (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epi8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                             (__v64qi)_mm512_min_epi8(__A, __B),
-                                             (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_min_epi16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminsw512((__v32hi) __A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epi16 (__mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                            (__v32hi)_mm512_min_epi16(__A, __B),
-                                            (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epi16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                            (__v32hi)_mm512_min_epi16(__A, __B),
-                                            (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_min_epu8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminub512((__v64qi)__A, (__v64qi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epu8 (__mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                             (__v64qi)_mm512_min_epu8(__A, __B),
-                                             (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epu8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
-                                             (__v64qi)_mm512_min_epu8(__A, __B),
-                                             (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_min_epu16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminuw512((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epu16 (__mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                            (__v32hi)_mm512_min_epu16(__A, __B),
-                                            (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epu16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                            (__v32hi)_mm512_min_epu16(__A, __B),
-                                            (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_shuffle_epi8(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pshufb512((__v64qi)__A,(__v64qi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_shuffle_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                         (__v64qi)_mm512_shuffle_epi8(__A, __B),
-                                         (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_shuffle_epi8(__mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                         (__v64qi)_mm512_shuffle_epi8(__A, __B),
-                                         (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_subs_epi8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_psubsb512((__v64qi)__A, (__v64qi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_subs_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_subs_epi8(__A, __B),
-                                        (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_subs_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_subs_epi8(__A, __B),
-                                        (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_subs_epi16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_psubsw512((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_subs_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                        (__v32hi)_mm512_subs_epi16(__A, __B),
-                                        (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_subs_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                        (__v32hi)_mm512_subs_epi16(__A, __B),
-                                        (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_subs_epu8 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_psubusb512((__v64qi) __A, (__v64qi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_subs_epu8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_subs_epu8(__A, __B),
-                                        (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_subs_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_subs_epu8(__A, __B),
-                                        (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_subs_epu16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_psubusw512((__v32hi) __A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_subs_epu16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                        (__v32hi)_mm512_subs_epu16(__A, __B),
-                                        (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_subs_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                        (__v32hi)_mm512_subs_epu16(__A, __B),
-                                        (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_permutex2var_epi16(__m512i __A, __m512i __I, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_vpermi2varhi512((__v32hi)__A, (__v32hi)__I,
-                                                 (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_permutex2var_epi16(__m512i __A, __mmask32 __U, __m512i __I,
-                               __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512(__U,
-                              (__v32hi)_mm512_permutex2var_epi16(__A, __I, __B),
-                              (__v32hi)__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask2_permutex2var_epi16(__m512i __A, __m512i __I, __mmask32 __U,
-                                __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512(__U,
-                              (__v32hi)_mm512_permutex2var_epi16(__A, __I, __B),
-                              (__v32hi)__I);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutex2var_epi16(__mmask32 __U, __m512i __A, __m512i __I,
-                                __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512(__U,
-                              (__v32hi)_mm512_permutex2var_epi16(__A, __I, __B),
-                              (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mulhrs_epi16(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmulhrsw512((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mulhrs_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_mulhrs_epi16(__A, __B),
-                                         (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mulhrs_epi16(__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_mulhrs_epi16(__A, __B),
-                                         (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mulhi_epi16(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmulhw512((__v32hi) __A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mulhi_epi16(__m512i __W, __mmask32 __U, __m512i __A,
-       __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_mulhi_epi16(__A, __B),
-                                          (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mulhi_epi16(__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_mulhi_epi16(__A, __B),
-                                          (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mulhi_epu16(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmulhuw512((__v32hi) __A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mulhi_epu16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_mulhi_epu16(__A, __B),
-                                          (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mulhi_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_mulhi_epu16(__A, __B),
-                                          (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maddubs_epi16(__m512i __X, __m512i __Y) {
-  return (__m512i)__builtin_ia32_pmaddubsw512((__v64qi)__X, (__v64qi)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_maddubs_epi16(__m512i __W, __mmask32 __U, __m512i __X,
-                          __m512i __Y) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32) __U,
-                                        (__v32hi)_mm512_maddubs_epi16(__X, __Y),
-                                        (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_maddubs_epi16(__mmask32 __U, __m512i __X, __m512i __Y) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32) __U,
-                                        (__v32hi)_mm512_maddubs_epi16(__X, __Y),
-                                        (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_madd_epi16(__m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_pmaddwd512((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_madd_epi16(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_madd_epi16(__A, __B),
-                                           (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_madd_epi16(__mmask16 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_madd_epi16(__A, __B),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtsepi16_epi8 (__m512i __A) {
-  return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
-               (__v32qi)_mm256_setzero_si256(),
-               (__mmask32) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) {
-  return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
-               (__v32qi)__O,
-               __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtsepi16_epi8 (__mmask32 __M, __m512i __A) {
-  return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
-               (__v32qi) _mm256_setzero_si256(),
-               __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtusepi16_epi8 (__m512i __A) {
-  return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
-                (__v32qi) _mm256_setzero_si256(),
-                (__mmask32) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) {
-  return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
-                (__v32qi) __O,
-                __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtusepi16_epi8 (__mmask32 __M, __m512i __A) {
-  return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
-                (__v32qi) _mm256_setzero_si256(),
-                __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi16_epi8 (__m512i __A) {
-  return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
-              (__v32qi) _mm256_undefined_si256(),
-              (__mmask32) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) {
-  return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
-              (__v32qi) __O,
-              __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi16_epi8 (__mmask32 __M, __m512i __A) {
-  return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
-              (__v32qi) _mm256_setzero_si256(),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A)
-{
-  __builtin_ia32_pmovwb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A)
-{
-  __builtin_ia32_pmovswb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A)
-{
-  __builtin_ia32_pmovuswb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_epi8(__m512i __A, __m512i __B) {
-  return (__m512i)__builtin_shufflevector((__v64qi)__A, (__v64qi)__B,
-                                          8,  64+8,   9, 64+9,
-                                          10, 64+10, 11, 64+11,
-                                          12, 64+12, 13, 64+13,
-                                          14, 64+14, 15, 64+15,
-                                          24, 64+24, 25, 64+25,
-                                          26, 64+26, 27, 64+27,
-                                          28, 64+28, 29, 64+29,
-                                          30, 64+30, 31, 64+31,
-                                          40, 64+40, 41, 64+41,
-                                          42, 64+42, 43, 64+43,
-                                          44, 64+44, 45, 64+45,
-                                          46, 64+46, 47, 64+47,
-                                          56, 64+56, 57, 64+57,
-                                          58, 64+58, 59, 64+59,
-                                          60, 64+60, 61, 64+61,
-                                          62, 64+62, 63, 64+63);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpackhi_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_unpackhi_epi8(__A, __B),
-                                        (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpackhi_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_unpackhi_epi8(__A, __B),
-                                        (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_epi16(__m512i __A, __m512i __B) {
-  return (__m512i)__builtin_shufflevector((__v32hi)__A, (__v32hi)__B,
-                                          4,  32+4,   5, 32+5,
-                                          6,  32+6,   7, 32+7,
-                                          12, 32+12, 13, 32+13,
-                                          14, 32+14, 15, 32+15,
-                                          20, 32+20, 21, 32+21,
-                                          22, 32+22, 23, 32+23,
-                                          28, 32+28, 29, 32+29,
-                                          30, 32+30, 31, 32+31);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpackhi_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                       (__v32hi)_mm512_unpackhi_epi16(__A, __B),
-                                       (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpackhi_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                       (__v32hi)_mm512_unpackhi_epi16(__A, __B),
-                                       (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_epi8(__m512i __A, __m512i __B) {
-  return (__m512i)__builtin_shufflevector((__v64qi)__A, (__v64qi)__B,
-                                          0,  64+0,   1, 64+1,
-                                          2,  64+2,   3, 64+3,
-                                          4,  64+4,   5, 64+5,
-                                          6,  64+6,   7, 64+7,
-                                          16, 64+16, 17, 64+17,
-                                          18, 64+18, 19, 64+19,
-                                          20, 64+20, 21, 64+21,
-                                          22, 64+22, 23, 64+23,
-                                          32, 64+32, 33, 64+33,
-                                          34, 64+34, 35, 64+35,
-                                          36, 64+36, 37, 64+37,
-                                          38, 64+38, 39, 64+39,
-                                          48, 64+48, 49, 64+49,
-                                          50, 64+50, 51, 64+51,
-                                          52, 64+52, 53, 64+53,
-                                          54, 64+54, 55, 64+55);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpacklo_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_unpacklo_epi8(__A, __B),
-                                        (__v64qi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpacklo_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
-                                        (__v64qi)_mm512_unpacklo_epi8(__A, __B),
-                                        (__v64qi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_epi16(__m512i __A, __m512i __B) {
-  return (__m512i)__builtin_shufflevector((__v32hi)__A, (__v32hi)__B,
-                                          0,  32+0,   1, 32+1,
-                                          2,  32+2,   3, 32+3,
-                                          8,  32+8,   9, 32+9,
-                                          10, 32+10, 11, 32+11,
-                                          16, 32+16, 17, 32+17,
-                                          18, 32+18, 19, 32+19,
-                                          24, 32+24, 25, 32+25,
-                                          26, 32+26, 27, 32+27);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpacklo_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                       (__v32hi)_mm512_unpacklo_epi16(__A, __B),
-                                       (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpacklo_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                       (__v32hi)_mm512_unpacklo_epi16(__A, __B),
-                                       (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi8_epi16(__m256i __A)
-{
-  /* This function always performs a signed extension, but __v32qi is a char
-     which may be signed or unsigned, so use __v32qs. */
-  return (__m512i)__builtin_convertvector((__v32qs)__A, __v32hi);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi8_epi16(__m512i __W, __mmask32 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_cvtepi8_epi16(__A),
-                                             (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi8_epi16(__mmask32 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_cvtepi8_epi16(__A),
-                                             (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu8_epi16(__m256i __A)
-{
-  return (__m512i)__builtin_convertvector((__v32qu)__A, __v32hi);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu8_epi16(__m512i __W, __mmask32 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_cvtepu8_epi16(__A),
-                                             (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu8_epi16(__mmask32 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                             (__v32hi)_mm512_cvtepu8_epi16(__A),
-                                             (__v32hi)_mm512_setzero_si512());
-}
-
-
-#define _mm512_shufflehi_epi16(A, imm) \
-  (__m512i)__builtin_ia32_pshufhw512((__v32hi)(__m512i)(A), (int)(imm))
-
-#define _mm512_mask_shufflehi_epi16(W, U, A, imm) \
-  (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                      (__v32hi)_mm512_shufflehi_epi16((A), \
-                                                                      (imm)), \
-                                      (__v32hi)(__m512i)(W))
-
-#define _mm512_maskz_shufflehi_epi16(U, A, imm) \
-  (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                      (__v32hi)_mm512_shufflehi_epi16((A), \
-                                                                      (imm)), \
-                                      (__v32hi)_mm512_setzero_si512())
-
-#define _mm512_shufflelo_epi16(A, imm) \
-  (__m512i)__builtin_ia32_pshuflw512((__v32hi)(__m512i)(A), (int)(imm))
-
-
-#define _mm512_mask_shufflelo_epi16(W, U, A, imm) \
-  (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                      (__v32hi)_mm512_shufflelo_epi16((A), \
-                                                                      (imm)), \
-                                      (__v32hi)(__m512i)(W))
-
-
-#define _mm512_maskz_shufflelo_epi16(U, A, imm) \
-  (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                      (__v32hi)_mm512_shufflelo_epi16((A), \
-                                                                      (imm)), \
-                                      (__v32hi)_mm512_setzero_si512())
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sllv_epi16(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_psllv32hi((__v32hi) __A, (__v32hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sllv_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                           (__v32hi)_mm512_sllv_epi16(__A, __B),
-                                           (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sllv_epi16(__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                           (__v32hi)_mm512_sllv_epi16(__A, __B),
-                                           (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sll_epi16(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psllw512((__v32hi) __A, (__v8hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sll_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_sll_epi16(__A, __B),
-                                          (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sll_epi16(__mmask32 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_sll_epi16(__A, __B),
-                                          (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi16(__m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_psllwi512((__v32hi)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_slli_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_slli_epi16(__A, __B),
-                                         (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_slli_epi16(__mmask32 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_slli_epi16(__A, __B),
-                                         (__v32hi)_mm512_setzero_si512());
-}
-
-#define _mm512_bslli_epi128(a, imm) \
-  (__m512i)__builtin_ia32_pslldqi512_byteshift((__v8di)(__m512i)(a), (int)(imm))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srlv_epi16(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_psrlv32hi((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srlv_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                           (__v32hi)_mm512_srlv_epi16(__A, __B),
-                                           (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srlv_epi16(__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                           (__v32hi)_mm512_srlv_epi16(__A, __B),
-                                           (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srav_epi16(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_psrav32hi((__v32hi)__A, (__v32hi)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srav_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                           (__v32hi)_mm512_srav_epi16(__A, __B),
-                                           (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srav_epi16(__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                           (__v32hi)_mm512_srav_epi16(__A, __B),
-                                           (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sra_epi16(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psraw512((__v32hi) __A, (__v8hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sra_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_sra_epi16(__A, __B),
-                                          (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sra_epi16(__mmask32 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_sra_epi16(__A, __B),
-                                          (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi16(__m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_psrawi512((__v32hi)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srai_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_srai_epi16(__A, __B),
-                                         (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi16(__mmask32 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_srai_epi16(__A, __B),
-                                         (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srl_epi16(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psrlw512((__v32hi) __A, (__v8hi) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srl_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_srl_epi16(__A, __B),
-                                          (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srl_epi16(__mmask32 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                          (__v32hi)_mm512_srl_epi16(__A, __B),
-                                          (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi16(__m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_psrlwi512((__v32hi)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srli_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_srli_epi16(__A, __B),
-                                         (__v32hi)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srli_epi16(__mmask32 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
-                                         (__v32hi)_mm512_srli_epi16(__A, __B),
-                                         (__v32hi)_mm512_setzero_si512());
-}
-
-#define _mm512_bsrli_epi128(a, imm) \
-  (__m512i)__builtin_ia32_psrldqi512_byteshift((__v8di)(__m512i)(a), (int)(imm))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mov_epi16 (__m512i __W, __mmask32 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U,
-                (__v32hi) __A,
-                (__v32hi) __W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mov_epi16 (__mmask32 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U,
-                (__v32hi) __A,
-                (__v32hi) _mm512_setzero_si512 ());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mov_epi8 (__m512i __W, __mmask64 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U,
-                (__v64qi) __A,
-                (__v64qi) __W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mov_epi8 (__mmask64 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U,
-                (__v64qi) __A,
-                (__v64qi) _mm512_setzero_si512 ());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_set1_epi8 (__m512i __O, __mmask64 __M, char __A)
-{
-  return (__m512i) __builtin_ia32_selectb_512(__M,
-                                              (__v64qi)_mm512_set1_epi8(__A),
-                                              (__v64qi) __O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_set1_epi8 (__mmask64 __M, char __A)
-{
-  return (__m512i) __builtin_ia32_selectb_512(__M,
-                                              (__v64qi) _mm512_set1_epi8(__A),
-                                              (__v64qi) _mm512_setzero_si512());
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_mm512_kunpackd (__mmask64 __A, __mmask64 __B)
-{
-  return (__mmask64) __builtin_ia32_kunpckdi ((__mmask64) __A,
-                (__mmask64) __B);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_mm512_kunpackw (__mmask32 __A, __mmask32 __B)
-{
-  return (__mmask32) __builtin_ia32_kunpcksi ((__mmask32) __A,
-                (__mmask32) __B);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_loadu_epi16 (void const *__P)
-{
-  struct __loadu_epi16 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_epi16*)__P)->__v;
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_loadu_epi16 (__m512i __W, __mmask32 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddquhi512_mask ((const __v32hi *) __P,
-                 (__v32hi) __W,
-                 (__mmask32) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_loadu_epi16 (__mmask32 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddquhi512_mask ((const __v32hi *) __P,
-                 (__v32hi)
-                 _mm512_setzero_si512 (),
-                 (__mmask32) __U);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_loadu_epi8 (void const *__P)
-{
-  struct __loadu_epi8 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_epi8*)__P)->__v;
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_loadu_epi8 (__m512i __W, __mmask64 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddquqi512_mask ((const __v64qi *) __P,
-                 (__v64qi) __W,
-                 (__mmask64) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_loadu_epi8 (__mmask64 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddquqi512_mask ((const __v64qi *) __P,
-                 (__v64qi)
-                 _mm512_setzero_si512 (),
-                 (__mmask64) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_epi16 (void *__P, __m512i __A)
-{
-  struct __storeu_epi16 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi16*)__P)->__v = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_storeu_epi16 (void *__P, __mmask32 __U, __m512i __A)
-{
-  __builtin_ia32_storedquhi512_mask ((__v32hi *) __P,
-             (__v32hi) __A,
-             (__mmask32) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_epi8 (void *__P, __m512i __A)
-{
-  struct __storeu_epi8 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi8*)__P)->__v = __A;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_storeu_epi8 (void *__P, __mmask64 __U, __m512i __A)
-{
-  __builtin_ia32_storedquqi512_mask ((__v64qi *) __P,
-             (__v64qi) __A,
-             (__mmask64) __U);
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS512
-_mm512_test_epi8_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpneq_epi8_mask (_mm512_and_epi32 (__A, __B),
-                                  _mm512_setzero_si512());
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS512
-_mm512_mask_test_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpneq_epi8_mask (__U, _mm512_and_epi32 (__A, __B),
-                                       _mm512_setzero_si512());
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS512
-_mm512_test_epi16_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpneq_epi16_mask (_mm512_and_epi32 (__A, __B),
-                                   _mm512_setzero_si512());
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS512
-_mm512_mask_test_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpneq_epi16_mask (__U, _mm512_and_epi32 (__A, __B),
-                                        _mm512_setzero_si512());
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS512
-_mm512_testn_epi8_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpeq_epi8_mask (_mm512_and_epi32 (__A, __B), _mm512_setzero_si512());
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS512
-_mm512_mask_testn_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpeq_epi8_mask (__U, _mm512_and_epi32 (__A, __B),
-                                      _mm512_setzero_si512());
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS512
-_mm512_testn_epi16_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpeq_epi16_mask (_mm512_and_epi32 (__A, __B),
-                                  _mm512_setzero_si512());
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS512
-_mm512_mask_testn_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpeq_epi16_mask (__U, _mm512_and_epi32 (__A, __B),
-                                       _mm512_setzero_si512());
-}
-
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS512
-_mm512_movepi8_mask (__m512i __A)
-{
-  return (__mmask64) __builtin_ia32_cvtb2mask512 ((__v64qi) __A);
-}
-
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS512
-_mm512_movepi16_mask (__m512i __A)
-{
-  return (__mmask32) __builtin_ia32_cvtw2mask512 ((__v32hi) __A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_movm_epi8 (__mmask64 __A)
-{
-  return (__m512i) __builtin_ia32_cvtmask2b512 (__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_movm_epi16 (__mmask32 __A)
-{
-  return (__m512i) __builtin_ia32_cvtmask2w512 (__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcastb_epi8 (__m128i __A)
-{
-  return (__m512i)__builtin_shufflevector((__v16qi) __A, (__v16qi) __A,
-                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcastb_epi8 (__m512i __O, __mmask64 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectb_512(__M,
-                                             (__v64qi) _mm512_broadcastb_epi8(__A),
-                                             (__v64qi) __O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcastb_epi8 (__mmask64 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectb_512(__M,
-                                             (__v64qi) _mm512_broadcastb_epi8(__A),
-                                             (__v64qi) _mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_set1_epi16 (__m512i __O, __mmask32 __M, short __A)
-{
-  return (__m512i) __builtin_ia32_selectw_512(__M,
-                                              (__v32hi) _mm512_set1_epi16(__A),
-                                              (__v32hi) __O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_set1_epi16 (__mmask32 __M, short __A)
-{
-  return (__m512i) __builtin_ia32_selectw_512(__M,
-                                              (__v32hi) _mm512_set1_epi16(__A),
-                                              (__v32hi) _mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcastw_epi16 (__m128i __A)
-{
-  return (__m512i)__builtin_shufflevector((__v8hi) __A, (__v8hi) __A,
-                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcastw_epi16 (__m512i __O, __mmask32 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectw_512(__M,
-                                             (__v32hi) _mm512_broadcastw_epi16(__A),
-                                             (__v32hi) __O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcastw_epi16 (__mmask32 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectw_512(__M,
-                                             (__v32hi) _mm512_broadcastw_epi16(__A),
-                                             (__v32hi) _mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_permutexvar_epi16 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_permvarhi512((__v32hi)__B, (__v32hi)__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutexvar_epi16 (__mmask32 __M, __m512i __A,
-        __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                    (__v32hi)_mm512_permutexvar_epi16(__A, __B),
-                                    (__v32hi)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_permutexvar_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
-             __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
-                                    (__v32hi)_mm512_permutexvar_epi16(__A, __B),
-                                    (__v32hi)__W);
-}
-
-#define _mm512_alignr_epi8(A, B, N) \
-  (__m512i)__builtin_ia32_palignr512((__v64qi)(__m512i)(A), \
-                                     (__v64qi)(__m512i)(B), (int)(N))
-
-#define _mm512_mask_alignr_epi8(W, U, A, B, N) \
-  (__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
-                             (__v64qi)_mm512_alignr_epi8((A), (B), (int)(N)), \
-                             (__v64qi)(__m512i)(W))
-
-#define _mm512_maskz_alignr_epi8(U, A, B, N) \
-  (__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
-                              (__v64qi)_mm512_alignr_epi8((A), (B), (int)(N)), \
-                              (__v64qi)(__m512i)_mm512_setzero_si512())
-
-#define _mm512_dbsad_epu8(A, B, imm) \
-  (__m512i)__builtin_ia32_dbpsadbw512((__v64qi)(__m512i)(A), \
-                                      (__v64qi)(__m512i)(B), (int)(imm))
-
-#define _mm512_mask_dbsad_epu8(W, U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                  (__v32hi)_mm512_dbsad_epu8((A), (B), (imm)), \
-                                  (__v32hi)(__m512i)(W))
-
-#define _mm512_maskz_dbsad_epu8(U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
-                                  (__v32hi)_mm512_dbsad_epu8((A), (B), (imm)), \
-                                  (__v32hi)_mm512_setzero_si512())
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sad_epu8 (__m512i __A, __m512i __B)
-{
- return (__m512i) __builtin_ia32_psadbw512 ((__v64qi) __A,
-               (__v64qi) __B);
-}
-
-#undef __DEFAULT_FN_ATTRS512
-#undef __DEFAULT_FN_ATTRS
-
-#endif
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512fintrin.h b/linux-x86/lib64/clang/11.0.1/include/avx512fintrin.h
deleted file mode 100644
index 7465da3..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/avx512fintrin.h
+++ /dev/null
@@ -1,9683 +0,0 @@
-/*===---- avx512fintrin.h - AVX512F intrinsics -----------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-#ifndef __IMMINTRIN_H
-#error "Never use <avx512fintrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __AVX512FINTRIN_H
-#define __AVX512FINTRIN_H
-
-typedef char __v64qi __attribute__((__vector_size__(64)));
-typedef short __v32hi __attribute__((__vector_size__(64)));
-typedef double __v8df __attribute__((__vector_size__(64)));
-typedef float __v16sf __attribute__((__vector_size__(64)));
-typedef long long __v8di __attribute__((__vector_size__(64)));
-typedef int __v16si __attribute__((__vector_size__(64)));
-
-/* Unsigned types */
-typedef unsigned char __v64qu __attribute__((__vector_size__(64)));
-typedef unsigned short __v32hu __attribute__((__vector_size__(64)));
-typedef unsigned long long __v8du __attribute__((__vector_size__(64)));
-typedef unsigned int __v16su __attribute__((__vector_size__(64)));
-
-typedef float __m512 __attribute__((__vector_size__(64), __aligned__(64)));
-typedef double __m512d __attribute__((__vector_size__(64), __aligned__(64)));
-typedef long long __m512i __attribute__((__vector_size__(64), __aligned__(64)));
-
-typedef float __m512_u __attribute__((__vector_size__(64), __aligned__(1)));
-typedef double __m512d_u __attribute__((__vector_size__(64), __aligned__(1)));
-typedef long long __m512i_u __attribute__((__vector_size__(64), __aligned__(1)));
-
-typedef unsigned char __mmask8;
-typedef unsigned short __mmask16;
-
-/* Rounding mode macros.  */
-#define _MM_FROUND_TO_NEAREST_INT   0x00
-#define _MM_FROUND_TO_NEG_INF       0x01
-#define _MM_FROUND_TO_POS_INF       0x02
-#define _MM_FROUND_TO_ZERO          0x03
-#define _MM_FROUND_CUR_DIRECTION    0x04
-
-/* Constants for integer comparison predicates */
-typedef enum {
-    _MM_CMPINT_EQ,      /* Equal */
-    _MM_CMPINT_LT,      /* Less than */
-    _MM_CMPINT_LE,      /* Less than or Equal */
-    _MM_CMPINT_UNUSED,
-    _MM_CMPINT_NE,      /* Not Equal */
-    _MM_CMPINT_NLT,     /* Not Less than */
-#define _MM_CMPINT_GE   _MM_CMPINT_NLT  /* Greater than or Equal */
-    _MM_CMPINT_NLE      /* Not Less than or Equal */
-#define _MM_CMPINT_GT   _MM_CMPINT_NLE  /* Greater than */
-} _MM_CMPINT_ENUM;
-
-typedef enum
-{
-  _MM_PERM_AAAA = 0x00, _MM_PERM_AAAB = 0x01, _MM_PERM_AAAC = 0x02,
-  _MM_PERM_AAAD = 0x03, _MM_PERM_AABA = 0x04, _MM_PERM_AABB = 0x05,
-  _MM_PERM_AABC = 0x06, _MM_PERM_AABD = 0x07, _MM_PERM_AACA = 0x08,
-  _MM_PERM_AACB = 0x09, _MM_PERM_AACC = 0x0A, _MM_PERM_AACD = 0x0B,
-  _MM_PERM_AADA = 0x0C, _MM_PERM_AADB = 0x0D, _MM_PERM_AADC = 0x0E,
-  _MM_PERM_AADD = 0x0F, _MM_PERM_ABAA = 0x10, _MM_PERM_ABAB = 0x11,
-  _MM_PERM_ABAC = 0x12, _MM_PERM_ABAD = 0x13, _MM_PERM_ABBA = 0x14,
-  _MM_PERM_ABBB = 0x15, _MM_PERM_ABBC = 0x16, _MM_PERM_ABBD = 0x17,
-  _MM_PERM_ABCA = 0x18, _MM_PERM_ABCB = 0x19, _MM_PERM_ABCC = 0x1A,
-  _MM_PERM_ABCD = 0x1B, _MM_PERM_ABDA = 0x1C, _MM_PERM_ABDB = 0x1D,
-  _MM_PERM_ABDC = 0x1E, _MM_PERM_ABDD = 0x1F, _MM_PERM_ACAA = 0x20,
-  _MM_PERM_ACAB = 0x21, _MM_PERM_ACAC = 0x22, _MM_PERM_ACAD = 0x23,
-  _MM_PERM_ACBA = 0x24, _MM_PERM_ACBB = 0x25, _MM_PERM_ACBC = 0x26,
-  _MM_PERM_ACBD = 0x27, _MM_PERM_ACCA = 0x28, _MM_PERM_ACCB = 0x29,
-  _MM_PERM_ACCC = 0x2A, _MM_PERM_ACCD = 0x2B, _MM_PERM_ACDA = 0x2C,
-  _MM_PERM_ACDB = 0x2D, _MM_PERM_ACDC = 0x2E, _MM_PERM_ACDD = 0x2F,
-  _MM_PERM_ADAA = 0x30, _MM_PERM_ADAB = 0x31, _MM_PERM_ADAC = 0x32,
-  _MM_PERM_ADAD = 0x33, _MM_PERM_ADBA = 0x34, _MM_PERM_ADBB = 0x35,
-  _MM_PERM_ADBC = 0x36, _MM_PERM_ADBD = 0x37, _MM_PERM_ADCA = 0x38,
-  _MM_PERM_ADCB = 0x39, _MM_PERM_ADCC = 0x3A, _MM_PERM_ADCD = 0x3B,
-  _MM_PERM_ADDA = 0x3C, _MM_PERM_ADDB = 0x3D, _MM_PERM_ADDC = 0x3E,
-  _MM_PERM_ADDD = 0x3F, _MM_PERM_BAAA = 0x40, _MM_PERM_BAAB = 0x41,
-  _MM_PERM_BAAC = 0x42, _MM_PERM_BAAD = 0x43, _MM_PERM_BABA = 0x44,
-  _MM_PERM_BABB = 0x45, _MM_PERM_BABC = 0x46, _MM_PERM_BABD = 0x47,
-  _MM_PERM_BACA = 0x48, _MM_PERM_BACB = 0x49, _MM_PERM_BACC = 0x4A,
-  _MM_PERM_BACD = 0x4B, _MM_PERM_BADA = 0x4C, _MM_PERM_BADB = 0x4D,
-  _MM_PERM_BADC = 0x4E, _MM_PERM_BADD = 0x4F, _MM_PERM_BBAA = 0x50,
-  _MM_PERM_BBAB = 0x51, _MM_PERM_BBAC = 0x52, _MM_PERM_BBAD = 0x53,
-  _MM_PERM_BBBA = 0x54, _MM_PERM_BBBB = 0x55, _MM_PERM_BBBC = 0x56,
-  _MM_PERM_BBBD = 0x57, _MM_PERM_BBCA = 0x58, _MM_PERM_BBCB = 0x59,
-  _MM_PERM_BBCC = 0x5A, _MM_PERM_BBCD = 0x5B, _MM_PERM_BBDA = 0x5C,
-  _MM_PERM_BBDB = 0x5D, _MM_PERM_BBDC = 0x5E, _MM_PERM_BBDD = 0x5F,
-  _MM_PERM_BCAA = 0x60, _MM_PERM_BCAB = 0x61, _MM_PERM_BCAC = 0x62,
-  _MM_PERM_BCAD = 0x63, _MM_PERM_BCBA = 0x64, _MM_PERM_BCBB = 0x65,
-  _MM_PERM_BCBC = 0x66, _MM_PERM_BCBD = 0x67, _MM_PERM_BCCA = 0x68,
-  _MM_PERM_BCCB = 0x69, _MM_PERM_BCCC = 0x6A, _MM_PERM_BCCD = 0x6B,
-  _MM_PERM_BCDA = 0x6C, _MM_PERM_BCDB = 0x6D, _MM_PERM_BCDC = 0x6E,
-  _MM_PERM_BCDD = 0x6F, _MM_PERM_BDAA = 0x70, _MM_PERM_BDAB = 0x71,
-  _MM_PERM_BDAC = 0x72, _MM_PERM_BDAD = 0x73, _MM_PERM_BDBA = 0x74,
-  _MM_PERM_BDBB = 0x75, _MM_PERM_BDBC = 0x76, _MM_PERM_BDBD = 0x77,
-  _MM_PERM_BDCA = 0x78, _MM_PERM_BDCB = 0x79, _MM_PERM_BDCC = 0x7A,
-  _MM_PERM_BDCD = 0x7B, _MM_PERM_BDDA = 0x7C, _MM_PERM_BDDB = 0x7D,
-  _MM_PERM_BDDC = 0x7E, _MM_PERM_BDDD = 0x7F, _MM_PERM_CAAA = 0x80,
-  _MM_PERM_CAAB = 0x81, _MM_PERM_CAAC = 0x82, _MM_PERM_CAAD = 0x83,
-  _MM_PERM_CABA = 0x84, _MM_PERM_CABB = 0x85, _MM_PERM_CABC = 0x86,
-  _MM_PERM_CABD = 0x87, _MM_PERM_CACA = 0x88, _MM_PERM_CACB = 0x89,
-  _MM_PERM_CACC = 0x8A, _MM_PERM_CACD = 0x8B, _MM_PERM_CADA = 0x8C,
-  _MM_PERM_CADB = 0x8D, _MM_PERM_CADC = 0x8E, _MM_PERM_CADD = 0x8F,
-  _MM_PERM_CBAA = 0x90, _MM_PERM_CBAB = 0x91, _MM_PERM_CBAC = 0x92,
-  _MM_PERM_CBAD = 0x93, _MM_PERM_CBBA = 0x94, _MM_PERM_CBBB = 0x95,
-  _MM_PERM_CBBC = 0x96, _MM_PERM_CBBD = 0x97, _MM_PERM_CBCA = 0x98,
-  _MM_PERM_CBCB = 0x99, _MM_PERM_CBCC = 0x9A, _MM_PERM_CBCD = 0x9B,
-  _MM_PERM_CBDA = 0x9C, _MM_PERM_CBDB = 0x9D, _MM_PERM_CBDC = 0x9E,
-  _MM_PERM_CBDD = 0x9F, _MM_PERM_CCAA = 0xA0, _MM_PERM_CCAB = 0xA1,
-  _MM_PERM_CCAC = 0xA2, _MM_PERM_CCAD = 0xA3, _MM_PERM_CCBA = 0xA4,
-  _MM_PERM_CCBB = 0xA5, _MM_PERM_CCBC = 0xA6, _MM_PERM_CCBD = 0xA7,
-  _MM_PERM_CCCA = 0xA8, _MM_PERM_CCCB = 0xA9, _MM_PERM_CCCC = 0xAA,
-  _MM_PERM_CCCD = 0xAB, _MM_PERM_CCDA = 0xAC, _MM_PERM_CCDB = 0xAD,
-  _MM_PERM_CCDC = 0xAE, _MM_PERM_CCDD = 0xAF, _MM_PERM_CDAA = 0xB0,
-  _MM_PERM_CDAB = 0xB1, _MM_PERM_CDAC = 0xB2, _MM_PERM_CDAD = 0xB3,
-  _MM_PERM_CDBA = 0xB4, _MM_PERM_CDBB = 0xB5, _MM_PERM_CDBC = 0xB6,
-  _MM_PERM_CDBD = 0xB7, _MM_PERM_CDCA = 0xB8, _MM_PERM_CDCB = 0xB9,
-  _MM_PERM_CDCC = 0xBA, _MM_PERM_CDCD = 0xBB, _MM_PERM_CDDA = 0xBC,
-  _MM_PERM_CDDB = 0xBD, _MM_PERM_CDDC = 0xBE, _MM_PERM_CDDD = 0xBF,
-  _MM_PERM_DAAA = 0xC0, _MM_PERM_DAAB = 0xC1, _MM_PERM_DAAC = 0xC2,
-  _MM_PERM_DAAD = 0xC3, _MM_PERM_DABA = 0xC4, _MM_PERM_DABB = 0xC5,
-  _MM_PERM_DABC = 0xC6, _MM_PERM_DABD = 0xC7, _MM_PERM_DACA = 0xC8,
-  _MM_PERM_DACB = 0xC9, _MM_PERM_DACC = 0xCA, _MM_PERM_DACD = 0xCB,
-  _MM_PERM_DADA = 0xCC, _MM_PERM_DADB = 0xCD, _MM_PERM_DADC = 0xCE,
-  _MM_PERM_DADD = 0xCF, _MM_PERM_DBAA = 0xD0, _MM_PERM_DBAB = 0xD1,
-  _MM_PERM_DBAC = 0xD2, _MM_PERM_DBAD = 0xD3, _MM_PERM_DBBA = 0xD4,
-  _MM_PERM_DBBB = 0xD5, _MM_PERM_DBBC = 0xD6, _MM_PERM_DBBD = 0xD7,
-  _MM_PERM_DBCA = 0xD8, _MM_PERM_DBCB = 0xD9, _MM_PERM_DBCC = 0xDA,
-  _MM_PERM_DBCD = 0xDB, _MM_PERM_DBDA = 0xDC, _MM_PERM_DBDB = 0xDD,
-  _MM_PERM_DBDC = 0xDE, _MM_PERM_DBDD = 0xDF, _MM_PERM_DCAA = 0xE0,
-  _MM_PERM_DCAB = 0xE1, _MM_PERM_DCAC = 0xE2, _MM_PERM_DCAD = 0xE3,
-  _MM_PERM_DCBA = 0xE4, _MM_PERM_DCBB = 0xE5, _MM_PERM_DCBC = 0xE6,
-  _MM_PERM_DCBD = 0xE7, _MM_PERM_DCCA = 0xE8, _MM_PERM_DCCB = 0xE9,
-  _MM_PERM_DCCC = 0xEA, _MM_PERM_DCCD = 0xEB, _MM_PERM_DCDA = 0xEC,
-  _MM_PERM_DCDB = 0xED, _MM_PERM_DCDC = 0xEE, _MM_PERM_DCDD = 0xEF,
-  _MM_PERM_DDAA = 0xF0, _MM_PERM_DDAB = 0xF1, _MM_PERM_DDAC = 0xF2,
-  _MM_PERM_DDAD = 0xF3, _MM_PERM_DDBA = 0xF4, _MM_PERM_DDBB = 0xF5,
-  _MM_PERM_DDBC = 0xF6, _MM_PERM_DDBD = 0xF7, _MM_PERM_DDCA = 0xF8,
-  _MM_PERM_DDCB = 0xF9, _MM_PERM_DDCC = 0xFA, _MM_PERM_DDCD = 0xFB,
-  _MM_PERM_DDDA = 0xFC, _MM_PERM_DDDB = 0xFD, _MM_PERM_DDDC = 0xFE,
-  _MM_PERM_DDDD = 0xFF
-} _MM_PERM_ENUM;
-
-typedef enum
-{
-  _MM_MANT_NORM_1_2,    /* interval [1, 2)      */
-  _MM_MANT_NORM_p5_2,   /* interval [0.5, 2)    */
-  _MM_MANT_NORM_p5_1,   /* interval [0.5, 1)    */
-  _MM_MANT_NORM_p75_1p5   /* interval [0.75, 1.5) */
-} _MM_MANTISSA_NORM_ENUM;
-
-typedef enum
-{
-  _MM_MANT_SIGN_src,    /* sign = sign(SRC)     */
-  _MM_MANT_SIGN_zero,   /* sign = 0             */
-  _MM_MANT_SIGN_nan   /* DEST = NaN if sign(SRC) = 1 */
-} _MM_MANTISSA_SIGN_ENUM;
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512f"), __min_vector_width__(512)))
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512f"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512f")))
-
-/* Create vectors with repeated elements */
-
-static  __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_setzero_si512(void)
-{
-  return __extension__ (__m512i)(__v8di){ 0, 0, 0, 0, 0, 0, 0, 0 };
-}
-
-#define _mm512_setzero_epi32 _mm512_setzero_si512
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_undefined_pd(void)
-{
-  return (__m512d)__builtin_ia32_undef512();
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_undefined(void)
-{
-  return (__m512)__builtin_ia32_undef512();
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_undefined_ps(void)
-{
-  return (__m512)__builtin_ia32_undef512();
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_undefined_epi32(void)
-{
-  return (__m512i)__builtin_ia32_undef512();
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcastd_epi32 (__m128i __A)
-{
-  return (__m512i)__builtin_shufflevector((__v4si) __A, (__v4si) __A,
-                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcastd_epi32 (__m512i __O, __mmask16 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__M,
-                                             (__v16si) _mm512_broadcastd_epi32(__A),
-                                             (__v16si) __O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcastd_epi32 (__mmask16 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__M,
-                                             (__v16si) _mm512_broadcastd_epi32(__A),
-                                             (__v16si) _mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcastq_epi64 (__m128i __A)
-{
-  return (__m512i)__builtin_shufflevector((__v2di) __A, (__v2di) __A,
-                                          0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcastq_epi64 (__m512i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__M,
-                                             (__v8di) _mm512_broadcastq_epi64(__A),
-                                             (__v8di) __O);
-
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__M,
-                                             (__v8di) _mm512_broadcastq_epi64(__A),
-                                             (__v8di) _mm512_setzero_si512());
-}
-
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_setzero_ps(void)
-{
-  return __extension__ (__m512){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-                                 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
-}
-
-#define _mm512_setzero _mm512_setzero_ps
-
-static  __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_setzero_pd(void)
-{
-  return __extension__ (__m512d){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_set1_ps(float __w)
-{
-  return __extension__ (__m512){ __w, __w, __w, __w, __w, __w, __w, __w,
-                                 __w, __w, __w, __w, __w, __w, __w, __w  };
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_set1_pd(double __w)
-{
-  return __extension__ (__m512d){ __w, __w, __w, __w, __w, __w, __w, __w };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set1_epi8(char __w)
-{
-  return __extension__ (__m512i)(__v64qi){
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w  };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set1_epi16(short __w)
-{
-  return __extension__ (__m512i)(__v32hi){
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set1_epi32(int __s)
-{
-  return __extension__ (__m512i)(__v16si){
-    __s, __s, __s, __s, __s, __s, __s, __s,
-    __s, __s, __s, __s, __s, __s, __s, __s };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_set1_epi32(__mmask16 __M, int __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__M,
-                                             (__v16si)_mm512_set1_epi32(__A),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set1_epi64(long long __d)
-{
-  return __extension__(__m512i)(__v8di){ __d, __d, __d, __d, __d, __d, __d, __d };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_set1_epi64(__mmask8 __M, long long __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__M,
-                                             (__v8di)_mm512_set1_epi64(__A),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_broadcastss_ps(__m128 __A)
-{
-  return (__m512)__builtin_shufflevector((__v4sf) __A, (__v4sf) __A,
-                                         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set4_epi32 (int __A, int __B, int __C, int __D)
-{
-  return __extension__ (__m512i)(__v16si)
-   { __D, __C, __B, __A, __D, __C, __B, __A,
-     __D, __C, __B, __A, __D, __C, __B, __A };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set4_epi64 (long long __A, long long __B, long long __C,
-       long long __D)
-{
-  return __extension__ (__m512i) (__v8di)
-   { __D, __C, __B, __A, __D, __C, __B, __A };
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_set4_pd (double __A, double __B, double __C, double __D)
-{
-  return __extension__ (__m512d)
-   { __D, __C, __B, __A, __D, __C, __B, __A };
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_set4_ps (float __A, float __B, float __C, float __D)
-{
-  return __extension__ (__m512)
-   { __D, __C, __B, __A, __D, __C, __B, __A,
-     __D, __C, __B, __A, __D, __C, __B, __A };
-}
-
-#define _mm512_setr4_epi32(e0,e1,e2,e3)               \
-  _mm512_set4_epi32((e3),(e2),(e1),(e0))
-
-#define _mm512_setr4_epi64(e0,e1,e2,e3)               \
-  _mm512_set4_epi64((e3),(e2),(e1),(e0))
-
-#define _mm512_setr4_pd(e0,e1,e2,e3)                \
-  _mm512_set4_pd((e3),(e2),(e1),(e0))
-
-#define _mm512_setr4_ps(e0,e1,e2,e3)                \
-  _mm512_set4_ps((e3),(e2),(e1),(e0))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_broadcastsd_pd(__m128d __A)
-{
-  return (__m512d)__builtin_shufflevector((__v2df) __A, (__v2df) __A,
-                                          0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-/* Cast between vector types */
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_castpd256_pd512(__m256d __a)
-{
-  return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_castps256_ps512(__m256 __a)
-{
-  return __builtin_shufflevector(__a, __a, 0,  1,  2,  3,  4,  5,  6,  7,
-                                          -1, -1, -1, -1, -1, -1, -1, -1);
-}
-
-static __inline __m128d __DEFAULT_FN_ATTRS512
-_mm512_castpd512_pd128(__m512d __a)
-{
-  return __builtin_shufflevector(__a, __a, 0, 1);
-}
-
-static __inline __m256d __DEFAULT_FN_ATTRS512
-_mm512_castpd512_pd256 (__m512d __A)
-{
-  return __builtin_shufflevector(__A, __A, 0, 1, 2, 3);
-}
-
-static __inline __m128 __DEFAULT_FN_ATTRS512
-_mm512_castps512_ps128(__m512 __a)
-{
-  return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
-}
-
-static __inline __m256 __DEFAULT_FN_ATTRS512
-_mm512_castps512_ps256 (__m512 __A)
-{
-  return __builtin_shufflevector(__A, __A, 0, 1, 2, 3, 4, 5, 6, 7);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_castpd_ps (__m512d __A)
-{
-  return (__m512) (__A);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_castpd_si512 (__m512d __A)
-{
-  return (__m512i) (__A);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_castpd128_pd512 (__m128d __A)
-{
-  return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_castps_pd (__m512 __A)
-{
-  return (__m512d) (__A);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_castps_si512 (__m512 __A)
-{
-  return (__m512i) (__A);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_castps128_ps512 (__m128 __A)
-{
-    return  __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_castsi128_si512 (__m128i __A)
-{
-   return  __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_castsi256_si512 (__m256i __A)
-{
-   return  __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_castsi512_ps (__m512i __A)
-{
-  return (__m512) (__A);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_castsi512_pd (__m512i __A)
-{
-  return (__m512d) (__A);
-}
-
-static __inline __m128i __DEFAULT_FN_ATTRS512
-_mm512_castsi512_si128 (__m512i __A)
-{
-  return (__m128i)__builtin_shufflevector(__A, __A , 0, 1);
-}
-
-static __inline __m256i __DEFAULT_FN_ATTRS512
-_mm512_castsi512_si256 (__m512i __A)
-{
-  return (__m256i)__builtin_shufflevector(__A, __A , 0, 1, 2, 3);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_int2mask(int __a)
-{
-  return (__mmask16)__a;
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm512_mask2int(__mmask16 __a)
-{
-  return (int)__a;
-}
-
-/// Constructs a 512-bit floating-point vector of [8 x double] from a
-///    128-bit floating-point vector of [2 x double]. The lower 128 bits
-///    contain the value of the source vector. The upper 384 bits are set
-///    to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 512-bit floating-point vector of [8 x double]. The lower 128 bits
-///    contain the value of the parameter. The upper 384 bits are set to zero.
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_zextpd128_pd512(__m128d __a)
-{
-  return __builtin_shufflevector((__v2df)__a, (__v2df)_mm_setzero_pd(), 0, 1, 2, 3, 2, 3, 2, 3);
-}
-
-/// Constructs a 512-bit floating-point vector of [8 x double] from a
-///    256-bit floating-point vector of [4 x double]. The lower 256 bits
-///    contain the value of the source vector. The upper 256 bits are set
-///    to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double].
-/// \returns A 512-bit floating-point vector of [8 x double]. The lower 256 bits
-///    contain the value of the parameter. The upper 256 bits are set to zero.
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_zextpd256_pd512(__m256d __a)
-{
-  return __builtin_shufflevector((__v4df)__a, (__v4df)_mm256_setzero_pd(), 0, 1, 2, 3, 4, 5, 6, 7);
-}
-
-/// Constructs a 512-bit floating-point vector of [16 x float] from a
-///    128-bit floating-point vector of [4 x float]. The lower 128 bits contain
-///    the value of the source vector. The upper 384 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 512-bit floating-point vector of [16 x float]. The lower 128 bits
-///    contain the value of the parameter. The upper 384 bits are set to zero.
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_zextps128_ps512(__m128 __a)
-{
-  return __builtin_shufflevector((__v4sf)__a, (__v4sf)_mm_setzero_ps(), 0, 1, 2, 3, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7);
-}
-
-/// Constructs a 512-bit floating-point vector of [16 x float] from a
-///    256-bit floating-point vector of [8 x float]. The lower 256 bits contain
-///    the value of the source vector. The upper 256 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float].
-/// \returns A 512-bit floating-point vector of [16 x float]. The lower 256 bits
-///    contain the value of the parameter. The upper 256 bits are set to zero.
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_zextps256_ps512(__m256 __a)
-{
-  return __builtin_shufflevector((__v8sf)__a, (__v8sf)_mm256_setzero_ps(), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-}
-
-/// Constructs a 512-bit integer vector from a 128-bit integer vector.
-///    The lower 128 bits contain the value of the source vector. The upper
-///    384 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \returns A 512-bit integer vector. The lower 128 bits contain the value of
-///    the parameter. The upper 384 bits are set to zero.
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_zextsi128_si512(__m128i __a)
-{
-  return __builtin_shufflevector((__v2di)__a, (__v2di)_mm_setzero_si128(), 0, 1, 2, 3, 2, 3, 2, 3);
-}
-
-/// Constructs a 512-bit integer vector from a 256-bit integer vector.
-///    The lower 256 bits contain the value of the source vector. The upper
-///    256 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 256-bit integer vector.
-/// \returns A 512-bit integer vector. The lower 256 bits contain the value of
-///    the parameter. The upper 256 bits are set to zero.
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_zextsi256_si512(__m256i __a)
-{
-  return __builtin_shufflevector((__v4di)__a, (__v4di)_mm256_setzero_si256(), 0, 1, 2, 3, 4, 5, 6, 7);
-}
-
-/* Bitwise operators */
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_and_epi32(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v16su)__a & (__v16su)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_and_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k,
-                (__v16si) _mm512_and_epi32(__a, __b),
-                (__v16si) __src);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_and_epi32(__mmask16 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i) _mm512_mask_and_epi32(_mm512_setzero_si512 (),
-                                         __k, __a, __b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_and_epi64(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v8du)__a & (__v8du)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_and_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
-{
-    return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __k,
-                (__v8di) _mm512_and_epi64(__a, __b),
-                (__v8di) __src);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_and_epi64(__mmask8 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i) _mm512_mask_and_epi64(_mm512_setzero_si512 (),
-                                         __k, __a, __b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_andnot_si512 (__m512i __A, __m512i __B)
-{
-  return (__m512i)(~(__v8du)__A & (__v8du)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_andnot_epi32 (__m512i __A, __m512i __B)
-{
-  return (__m512i)(~(__v16su)__A & (__v16su)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_andnot_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_andnot_epi32(__A, __B),
-                                         (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_andnot_epi32(__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)_mm512_mask_andnot_epi32(_mm512_setzero_si512(),
-                                           __U, __A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_andnot_epi64(__m512i __A, __m512i __B)
-{
-  return (__m512i)(~(__v8du)__A & (__v8du)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_andnot_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_andnot_epi64(__A, __B),
-                                          (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_andnot_epi64(__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)_mm512_mask_andnot_epi64(_mm512_setzero_si512(),
-                                           __U, __A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_or_epi32(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v16su)__a | (__v16su)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_or_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k,
-                                             (__v16si)_mm512_or_epi32(__a, __b),
-                                             (__v16si)__src);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_or_epi32(__mmask16 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)_mm512_mask_or_epi32(_mm512_setzero_si512(), __k, __a, __b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_or_epi64(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v8du)__a | (__v8du)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_or_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__k,
-                                             (__v8di)_mm512_or_epi64(__a, __b),
-                                             (__v8di)__src);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_or_epi64(__mmask8 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)_mm512_mask_or_epi64(_mm512_setzero_si512(), __k, __a, __b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_xor_epi32(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v16su)__a ^ (__v16su)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_xor_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k,
-                                            (__v16si)_mm512_xor_epi32(__a, __b),
-                                            (__v16si)__src);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_xor_epi32(__mmask16 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)_mm512_mask_xor_epi32(_mm512_setzero_si512(), __k, __a, __b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_xor_epi64(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v8du)__a ^ (__v8du)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_xor_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__k,
-                                             (__v8di)_mm512_xor_epi64(__a, __b),
-                                             (__v8di)__src);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_xor_epi64(__mmask8 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)_mm512_mask_xor_epi64(_mm512_setzero_si512(), __k, __a, __b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_and_si512(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v8du)__a & (__v8du)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_or_si512(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v8du)__a | (__v8du)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_xor_si512(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v8du)__a ^ (__v8du)__b);
-}
-
-/* Arithmetic */
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_add_pd(__m512d __a, __m512d __b)
-{
-  return (__m512d)((__v8df)__a + (__v8df)__b);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_add_ps(__m512 __a, __m512 __b)
-{
-  return (__m512)((__v16sf)__a + (__v16sf)__b);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_mul_pd(__m512d __a, __m512d __b)
-{
-  return (__m512d)((__v8df)__a * (__v8df)__b);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_mul_ps(__m512 __a, __m512 __b)
-{
-  return (__m512)((__v16sf)__a * (__v16sf)__b);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_sub_pd(__m512d __a, __m512d __b)
-{
-  return (__m512d)((__v8df)__a - (__v8df)__b);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_sub_ps(__m512 __a, __m512 __b)
-{
-  return (__m512)((__v16sf)__a - (__v16sf)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_add_epi64 (__m512i __A, __m512i __B)
-{
-  return (__m512i) ((__v8du) __A + (__v8du) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_add_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_add_epi64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_add_epi64(__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_add_epi64(__A, __B),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sub_epi64 (__m512i __A, __m512i __B)
-{
-  return (__m512i) ((__v8du) __A - (__v8du) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sub_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_sub_epi64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sub_epi64(__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_sub_epi64(__A, __B),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_add_epi32 (__m512i __A, __m512i __B)
-{
-  return (__m512i) ((__v16su) __A + (__v16su) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_add_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_add_epi32(__A, __B),
-                                             (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_add_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_add_epi32(__A, __B),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sub_epi32 (__m512i __A, __m512i __B)
-{
-  return (__m512i) ((__v16su) __A - (__v16su) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sub_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_sub_epi32(__A, __B),
-                                             (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sub_epi32(__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_sub_epi32(__A, __B),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-#define _mm512_max_round_pd(A, B, R) \
-  (__m512d)__builtin_ia32_maxpd512((__v8df)(__m512d)(A), \
-                                   (__v8df)(__m512d)(B), (int)(R))
-
-#define _mm512_mask_max_round_pd(W, U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_max_round_pd((A), (B), (R)), \
-                                   (__v8df)(W))
-
-#define _mm512_maskz_max_round_pd(U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_max_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd())
-
-static  __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_max_pd(__m512d __A, __m512d __B)
-{
-  return (__m512d) __builtin_ia32_maxpd512((__v8df) __A, (__v8df) __B,
-                                           _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_max_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                              (__v8df)_mm512_max_pd(__A, __B),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_pd (__mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                              (__v8df)_mm512_max_pd(__A, __B),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-#define _mm512_max_round_ps(A, B, R) \
-  (__m512)__builtin_ia32_maxps512((__v16sf)(__m512)(A), \
-                                  (__v16sf)(__m512)(B), (int)(R))
-
-#define _mm512_mask_max_round_ps(W, U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_max_round_ps((A), (B), (R)), \
-                                  (__v16sf)(W))
-
-#define _mm512_maskz_max_round_ps(U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_max_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps())
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_max_ps(__m512 __A, __m512 __B)
-{
-  return (__m512) __builtin_ia32_maxps512((__v16sf) __A, (__v16sf) __B,
-                                          _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_max_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                             (__v16sf)_mm512_max_ps(__A, __B),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_ps (__mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                             (__v16sf)_mm512_max_ps(__A, __B),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_max_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A,
-                (__v4sf) __B,
-                (__v4sf) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_max_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A,
-                (__v4sf) __B,
-                (__v4sf)  _mm_setzero_ps (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_max_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)-1, (int)(R))
-
-#define _mm_mask_max_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                          (int)(R))
-
-#define _mm_maskz_max_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)(U), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_max_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_maxsd_round_mask ((__v2df) __A,
-                (__v2df) __B,
-                (__v2df) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_max_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_maxsd_round_mask ((__v2df) __A,
-                (__v2df) __B,
-                (__v2df)  _mm_setzero_pd (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_max_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)-1, (int)(R))
-
-#define _mm_mask_max_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)(__m128d)(W), \
-                                           (__mmask8)(U), (int)(R))
-
-#define _mm_maskz_max_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
-
-static __inline __m512i
-__DEFAULT_FN_ATTRS512
-_mm512_max_epi32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxsd512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_max_epi32(__A, __B),
-                                            (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epi32 (__mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_max_epi32(__A, __B),
-                                            (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_max_epu32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxud512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_max_epu32(__A, __B),
-                                            (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epu32 (__mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_max_epu32(__A, __B),
-                                            (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_max_epi64(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxsq512((__v8di)__A, (__v8di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_max_epi64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epi64 (__mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_max_epi64(__A, __B),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_max_epu64(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxuq512((__v8di)__A, (__v8di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epu64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_max_epu64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epu64 (__mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_max_epu64(__A, __B),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-#define _mm512_min_round_pd(A, B, R) \
-  (__m512d)__builtin_ia32_minpd512((__v8df)(__m512d)(A), \
-                                   (__v8df)(__m512d)(B), (int)(R))
-
-#define _mm512_mask_min_round_pd(W, U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_min_round_pd((A), (B), (R)), \
-                                   (__v8df)(W))
-
-#define _mm512_maskz_min_round_pd(U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_min_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd())
-
-static  __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_min_pd(__m512d __A, __m512d __B)
-{
-  return (__m512d) __builtin_ia32_minpd512((__v8df) __A, (__v8df) __B,
-                                           _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_min_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                              (__v8df)_mm512_min_pd(__A, __B),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_pd (__mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                              (__v8df)_mm512_min_pd(__A, __B),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-#define _mm512_min_round_ps(A, B, R) \
-  (__m512)__builtin_ia32_minps512((__v16sf)(__m512)(A), \
-                                  (__v16sf)(__m512)(B), (int)(R))
-
-#define _mm512_mask_min_round_ps(W, U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_min_round_ps((A), (B), (R)), \
-                                  (__v16sf)(W))
-
-#define _mm512_maskz_min_round_ps(U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_min_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps())
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_min_ps(__m512 __A, __m512 __B)
-{
-  return (__m512) __builtin_ia32_minps512((__v16sf) __A, (__v16sf) __B,
-                                          _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_min_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                             (__v16sf)_mm512_min_ps(__A, __B),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_ps (__mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                             (__v16sf)_mm512_min_ps(__A, __B),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_min_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_minss_round_mask ((__v4sf) __A,
-                (__v4sf) __B,
-                (__v4sf) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_min_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_minss_round_mask ((__v4sf) __A,
-                (__v4sf) __B,
-                (__v4sf)  _mm_setzero_ps (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_min_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)-1, (int)(R))
-
-#define _mm_mask_min_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                          (int)(R))
-
-#define _mm_maskz_min_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)(U), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_min_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_minsd_round_mask ((__v2df) __A,
-                (__v2df) __B,
-                (__v2df) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_min_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_minsd_round_mask ((__v2df) __A,
-                (__v2df) __B,
-                (__v2df)  _mm_setzero_pd (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_min_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)-1, (int)(R))
-
-#define _mm_mask_min_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)(__m128d)(W), \
-                                           (__mmask8)(U), (int)(R))
-
-#define _mm_maskz_min_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
-
-static __inline __m512i
-__DEFAULT_FN_ATTRS512
-_mm512_min_epi32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminsd512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_min_epi32(__A, __B),
-                                            (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epi32 (__mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_min_epi32(__A, __B),
-                                            (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_min_epu32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminud512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_min_epu32(__A, __B),
-                                            (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epu32 (__mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_min_epu32(__A, __B),
-                                            (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_min_epi64(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminsq512((__v8di)__A, (__v8di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_min_epi64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epi64 (__mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_min_epi64(__A, __B),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_min_epu64(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminuq512((__v8di)__A, (__v8di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epu64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_min_epu64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epu64 (__mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_min_epu64(__A, __B),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mul_epi32(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_pmuldq512((__v16si)__X, (__v16si) __Y);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mul_epi32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_mul_epi32(__X, __Y),
-                                             (__v8di)__W);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mul_epi32(__mmask8 __M, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_mul_epi32(__X, __Y),
-                                             (__v8di)_mm512_setzero_si512 ());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mul_epu32(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_pmuludq512((__v16si)__X, (__v16si)__Y);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mul_epu32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_mul_epu32(__X, __Y),
-                                             (__v8di)__W);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mul_epu32(__mmask8 __M, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_mul_epu32(__X, __Y),
-                                             (__v8di)_mm512_setzero_si512 ());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mullo_epi32 (__m512i __A, __m512i __B)
-{
-  return (__m512i) ((__v16su) __A * (__v16su) __B);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mullo_epi32(__mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                             (__v16si)_mm512_mullo_epi32(__A, __B),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mullo_epi32(__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                             (__v16si)_mm512_mullo_epi32(__A, __B),
-                                             (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mullox_epi64 (__m512i __A, __m512i __B) {
-  return (__m512i) ((__v8du) __A * (__v8du) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mullox_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_mullox_epi64(__A, __B),
-                                             (__v8di)__W);
-}
-
-#define _mm512_sqrt_round_pd(A, R) \
-  (__m512d)__builtin_ia32_sqrtpd512((__v8df)(__m512d)(A), (int)(R))
-
-#define _mm512_mask_sqrt_round_pd(W, U, A, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_sqrt_round_pd((A), (R)), \
-                                       (__v8df)(__m512d)(W))
-
-#define _mm512_maskz_sqrt_round_pd(U, A, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_sqrt_round_pd((A), (R)), \
-                                       (__v8df)_mm512_setzero_pd())
-
-static  __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_sqrt_pd(__m512d __A)
-{
-  return (__m512d)__builtin_ia32_sqrtpd512((__v8df)__A,
-                                           _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_sqrt_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                              (__v8df)_mm512_sqrt_pd(__A),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_sqrt_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                              (__v8df)_mm512_sqrt_pd(__A),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-#define _mm512_sqrt_round_ps(A, R) \
-  (__m512)__builtin_ia32_sqrtps512((__v16sf)(__m512)(A), (int)(R))
-
-#define _mm512_mask_sqrt_round_ps(W, U, A, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_sqrt_round_ps((A), (R)), \
-                                      (__v16sf)(__m512)(W))
-
-#define _mm512_maskz_sqrt_round_ps(U, A, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_sqrt_round_ps((A), (R)), \
-                                      (__v16sf)_mm512_setzero_ps())
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_sqrt_ps(__m512 __A)
-{
-  return (__m512)__builtin_ia32_sqrtps512((__v16sf)__A,
-                                          _MM_FROUND_CUR_DIRECTION);
-}
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_sqrt_ps(__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                             (__v16sf)_mm512_sqrt_ps(__A),
-                                             (__v16sf)__W);
-}
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_sqrt_ps( __mmask16 __U, __m512 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                             (__v16sf)_mm512_sqrt_ps(__A),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static  __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_rsqrt14_pd(__m512d __A)
-{
-  return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
-                 (__v8df)
-                 _mm512_setzero_pd (),
-                 (__mmask8) -1);}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_rsqrt14_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
-                  (__v8df) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_rsqrt14_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
-                  (__v8df)
-                  _mm512_setzero_pd (),
-                  (__mmask8) __U);
-}
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_rsqrt14_ps(__m512 __A)
-{
-  return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
-                (__v16sf)
-                _mm512_setzero_ps (),
-                (__mmask16) -1);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_rsqrt14_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
-                 (__v16sf) __W,
-                 (__mmask16) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_rsqrt14_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
-                 (__v16sf)
-                 _mm512_setzero_ps (),
-                 (__mmask16) __U);
-}
-
-static  __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_rsqrt14_ss(__m128 __A, __m128 __B)
-{
-  return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
-             (__v4sf) __B,
-             (__v4sf)
-             _mm_setzero_ps (),
-             (__mmask8) -1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_rsqrt14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
-          (__v4sf) __B,
-          (__v4sf) __W,
-          (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_rsqrt14_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
-          (__v4sf) __B,
-          (__v4sf) _mm_setzero_ps (),
-          (__mmask8) __U);
-}
-
-static  __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_rsqrt14_sd(__m128d __A, __m128d __B)
-{
-  return (__m128d) __builtin_ia32_rsqrt14sd_mask ((__v2df) __A,
-              (__v2df) __B,
-              (__v2df)
-              _mm_setzero_pd (),
-              (__mmask8) -1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_rsqrt14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A,
-          (__v2df) __B,
-          (__v2df) __W,
-          (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_rsqrt14_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A,
-          (__v2df) __B,
-          (__v2df) _mm_setzero_pd (),
-          (__mmask8) __U);
-}
-
-static  __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_rcp14_pd(__m512d __A)
-{
-  return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
-               (__v8df)
-               _mm512_setzero_pd (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_rcp14_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
-                (__v8df) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_rcp14_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
-                (__v8df)
-                _mm512_setzero_pd (),
-                (__mmask8) __U);
-}
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_rcp14_ps(__m512 __A)
-{
-  return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
-              (__v16sf)
-              _mm512_setzero_ps (),
-              (__mmask16) -1);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_rcp14_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
-                   (__v16sf) __W,
-                   (__mmask16) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_rcp14_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
-                   (__v16sf)
-                   _mm512_setzero_ps (),
-                   (__mmask16) __U);
-}
-
-static  __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_rcp14_ss(__m128 __A, __m128 __B)
-{
-  return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
-                 (__v4sf) __B,
-                 (__v4sf)
-                 _mm_setzero_ps (),
-                 (__mmask8) -1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_rcp14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
-          (__v4sf) __B,
-          (__v4sf) __W,
-          (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_rcp14_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
-          (__v4sf) __B,
-          (__v4sf) _mm_setzero_ps (),
-          (__mmask8) __U);
-}
-
-static  __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_rcp14_sd(__m128d __A, __m128d __B)
-{
-  return (__m128d) __builtin_ia32_rcp14sd_mask ((__v2df) __A,
-            (__v2df) __B,
-            (__v2df)
-            _mm_setzero_pd (),
-            (__mmask8) -1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_rcp14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A,
-          (__v2df) __B,
-          (__v2df) __W,
-          (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_rcp14_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A,
-          (__v2df) __B,
-          (__v2df) _mm_setzero_pd (),
-          (__mmask8) __U);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_floor_ps(__m512 __A)
-{
-  return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
-                                                  _MM_FROUND_FLOOR,
-                                                  (__v16sf) __A, -1,
-                                                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_floor_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
-                   _MM_FROUND_FLOOR,
-                   (__v16sf) __W, __U,
-                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_floor_pd(__m512d __A)
-{
-  return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
-                                                   _MM_FROUND_FLOOR,
-                                                   (__v8df) __A, -1,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_floor_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
-                _MM_FROUND_FLOOR,
-                (__v8df) __W, __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_ceil_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
-                   _MM_FROUND_CEIL,
-                   (__v16sf) __W, __U,
-                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_ceil_ps(__m512 __A)
-{
-  return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
-                                                  _MM_FROUND_CEIL,
-                                                  (__v16sf) __A, -1,
-                                                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_ceil_pd(__m512d __A)
-{
-  return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
-                                                   _MM_FROUND_CEIL,
-                                                   (__v8df) __A, -1,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_ceil_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
-                _MM_FROUND_CEIL,
-                (__v8df) __W, __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_abs_epi64(__m512i __A)
-{
-  return (__m512i)__builtin_ia32_pabsq512((__v8di)__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_abs_epi64(__A),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_abs_epi64 (__mmask8 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_abs_epi64(__A),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_abs_epi32(__m512i __A)
-{
-  return (__m512i)__builtin_ia32_pabsd512((__v16si) __A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                                             (__v16si)_mm512_abs_epi32(__A),
-                                             (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_abs_epi32 (__mmask16 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                                             (__v16si)_mm512_abs_epi32(__A),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_add_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_add_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, __W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_add_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_add_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
-}
-
-#define _mm_add_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)-1, (int)(R))
-
-#define _mm_mask_add_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                          (int)(R))
-
-#define _mm_maskz_add_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)(U), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_add_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_add_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, __W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_add_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_add_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd());
-}
-#define _mm_add_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)-1, (int)(R))
-
-#define _mm_mask_add_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)(__m128d)(W), \
-                                           (__mmask8)(U), (int)(R))
-
-#define _mm_maskz_add_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_add_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_add_pd(__A, __B),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_add_pd(__mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_add_pd(__A, __B),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_add_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_add_ps(__A, __B),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_add_ps(__mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_add_ps(__A, __B),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-#define _mm512_add_round_pd(A, B, R) \
-  (__m512d)__builtin_ia32_addpd512((__v8df)(__m512d)(A), \
-                                   (__v8df)(__m512d)(B), (int)(R))
-
-#define _mm512_mask_add_round_pd(W, U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_add_round_pd((A), (B), (R)), \
-                                   (__v8df)(__m512d)(W))
-
-#define _mm512_maskz_add_round_pd(U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_add_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd())
-
-#define _mm512_add_round_ps(A, B, R) \
-  (__m512)__builtin_ia32_addps512((__v16sf)(__m512)(A), \
-                                  (__v16sf)(__m512)(B), (int)(R))
-
-#define _mm512_mask_add_round_ps(W, U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_add_round_ps((A), (B), (R)), \
-                                  (__v16sf)(__m512)(W))
-
-#define _mm512_maskz_add_round_ps(U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_add_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps())
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_sub_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_sub_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, __W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_sub_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_sub_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
-}
-#define _mm_sub_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)-1, (int)(R))
-
-#define _mm_mask_sub_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                          (int)(R))
-
-#define _mm_maskz_sub_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)(U), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_sub_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_sub_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, __W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_sub_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_sub_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd());
-}
-
-#define _mm_sub_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)-1, (int)(R))
-
-#define _mm_mask_sub_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)(__m128d)(W), \
-                                           (__mmask8)(U), (int)(R))
-
-#define _mm_maskz_sub_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_sub_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_sub_pd(__A, __B),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_sub_pd(__mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_sub_pd(__A, __B),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_sub_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_sub_ps(__A, __B),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_sub_ps(__mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_sub_ps(__A, __B),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-#define _mm512_sub_round_pd(A, B, R) \
-  (__m512d)__builtin_ia32_subpd512((__v8df)(__m512d)(A), \
-                                   (__v8df)(__m512d)(B), (int)(R))
-
-#define _mm512_mask_sub_round_pd(W, U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_sub_round_pd((A), (B), (R)), \
-                                   (__v8df)(__m512d)(W))
-
-#define _mm512_maskz_sub_round_pd(U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_sub_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd())
-
-#define _mm512_sub_round_ps(A, B, R) \
-  (__m512)__builtin_ia32_subps512((__v16sf)(__m512)(A), \
-                                  (__v16sf)(__m512)(B), (int)(R))
-
-#define _mm512_mask_sub_round_ps(W, U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_sub_round_ps((A), (B), (R)), \
-                                  (__v16sf)(__m512)(W))
-
-#define _mm512_maskz_sub_round_ps(U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_sub_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps())
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_mul_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_mul_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, __W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_mul_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_mul_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
-}
-#define _mm_mul_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)-1, (int)(R))
-
-#define _mm_mask_mul_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                          (int)(R))
-
-#define _mm_maskz_mul_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)(U), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_mul_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_mul_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, __W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_mul_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_mul_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd());
-}
-
-#define _mm_mul_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)-1, (int)(R))
-
-#define _mm_mask_mul_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)(__m128d)(W), \
-                                           (__mmask8)(U), (int)(R))
-
-#define _mm_maskz_mul_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_mul_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_mul_pd(__A, __B),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_mul_pd(__mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_mul_pd(__A, __B),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_mul_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_mul_ps(__A, __B),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_mul_ps(__mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_mul_ps(__A, __B),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-#define _mm512_mul_round_pd(A, B, R) \
-  (__m512d)__builtin_ia32_mulpd512((__v8df)(__m512d)(A), \
-                                   (__v8df)(__m512d)(B), (int)(R))
-
-#define _mm512_mask_mul_round_pd(W, U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_mul_round_pd((A), (B), (R)), \
-                                   (__v8df)(__m512d)(W))
-
-#define _mm512_maskz_mul_round_pd(U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_mul_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd())
-
-#define _mm512_mul_round_ps(A, B, R) \
-  (__m512)__builtin_ia32_mulps512((__v16sf)(__m512)(A), \
-                                  (__v16sf)(__m512)(B), (int)(R))
-
-#define _mm512_mask_mul_round_ps(W, U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_mul_round_ps((A), (B), (R)), \
-                                  (__v16sf)(__m512)(W))
-
-#define _mm512_maskz_mul_round_ps(U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_mul_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps())
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_div_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_div_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, __W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_div_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_div_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
-}
-
-#define _mm_div_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)-1, (int)(R))
-
-#define _mm_mask_div_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                          (int)(R))
-
-#define _mm_maskz_div_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)(U), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_div_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_div_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, __W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_div_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_div_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd());
-}
-
-#define _mm_div_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)-1, (int)(R))
-
-#define _mm_mask_div_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)(__m128d)(W), \
-                                           (__mmask8)(U), (int)(R))
-
-#define _mm_maskz_div_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_div_pd(__m512d __a, __m512d __b)
-{
-  return (__m512d)((__v8df)__a/(__v8df)__b);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_div_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_div_pd(__A, __B),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_div_pd(__mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_div_pd(__A, __B),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_div_ps(__m512 __a, __m512 __b)
-{
-  return (__m512)((__v16sf)__a/(__v16sf)__b);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_div_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_div_ps(__A, __B),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_div_ps(__mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_div_ps(__A, __B),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-#define _mm512_div_round_pd(A, B, R) \
-  (__m512d)__builtin_ia32_divpd512((__v8df)(__m512d)(A), \
-                                   (__v8df)(__m512d)(B), (int)(R))
-
-#define _mm512_mask_div_round_pd(W, U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_div_round_pd((A), (B), (R)), \
-                                   (__v8df)(__m512d)(W))
-
-#define _mm512_maskz_div_round_pd(U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_div_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd())
-
-#define _mm512_div_round_ps(A, B, R) \
-  (__m512)__builtin_ia32_divps512((__v16sf)(__m512)(A), \
-                                  (__v16sf)(__m512)(B), (int)(R))
-
-#define _mm512_mask_div_round_ps(W, U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_div_round_ps((A), (B), (R)), \
-                                  (__v16sf)(__m512)(W))
-
-#define _mm512_maskz_div_round_ps(U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_div_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps())
-
-#define _mm512_roundscale_ps(A, B) \
-  (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(B), \
-                                         (__v16sf)_mm512_undefined_ps(), \
-                                         (__mmask16)-1, \
-                                         _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_roundscale_ps(A, B, C, imm) \
-  (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
-                                         (__v16sf)(__m512)(A), (__mmask16)(B), \
-                                         _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_maskz_roundscale_ps(A, B, imm) \
-  (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
-                                         (__v16sf)_mm512_setzero_ps(), \
-                                         (__mmask16)(A), \
-                                         _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_roundscale_round_ps(A, B, C, imm, R) \
-  (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
-                                         (__v16sf)(__m512)(A), (__mmask16)(B), \
-                                         (int)(R))
-
-#define _mm512_maskz_roundscale_round_ps(A, B, imm, R) \
-  (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
-                                         (__v16sf)_mm512_setzero_ps(), \
-                                         (__mmask16)(A), (int)(R))
-
-#define _mm512_roundscale_round_ps(A, imm, R) \
-  (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(imm), \
-                                         (__v16sf)_mm512_undefined_ps(), \
-                                         (__mmask16)-1, (int)(R))
-
-#define _mm512_roundscale_pd(A, B) \
-  (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(B), \
-                                          (__v8df)_mm512_undefined_pd(), \
-                                          (__mmask8)-1, \
-                                          _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_roundscale_pd(A, B, C, imm) \
-  (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
-                                          (__v8df)(__m512d)(A), (__mmask8)(B), \
-                                          _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_maskz_roundscale_pd(A, B, imm) \
-  (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
-                                          (__v8df)_mm512_setzero_pd(), \
-                                          (__mmask8)(A), \
-                                          _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_roundscale_round_pd(A, B, C, imm, R) \
-  (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
-                                          (__v8df)(__m512d)(A), (__mmask8)(B), \
-                                          (int)(R))
-
-#define _mm512_maskz_roundscale_round_pd(A, B, imm, R) \
-  (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
-                                          (__v8df)_mm512_setzero_pd(), \
-                                          (__mmask8)(A), (int)(R))
-
-#define _mm512_roundscale_round_pd(A, imm, R) \
-  (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(imm), \
-                                          (__v8df)_mm512_undefined_pd(), \
-                                          (__mmask8)-1, (int)(R))
-
-#define _mm512_fmadd_round_pd(A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           (__v8df)(__m512d)(C), \
-                                           (__mmask8)-1, (int)(R))
-
-
-#define _mm512_mask_fmadd_round_pd(A, U, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           (__v8df)(__m512d)(C), \
-                                           (__mmask8)(U), (int)(R))
-
-
-#define _mm512_mask3_fmadd_round_pd(A, B, C, U, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask3((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R))
-
-
-#define _mm512_maskz_fmadd_round_pd(U, A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R))
-
-
-#define _mm512_fmsub_round_pd(A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           -(__v8df)(__m512d)(C), \
-                                           (__mmask8)-1, (int)(R))
-
-
-#define _mm512_mask_fmsub_round_pd(A, U, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           -(__v8df)(__m512d)(C), \
-                                           (__mmask8)(U), (int)(R))
-
-
-#define _mm512_maskz_fmsub_round_pd(U, A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            -(__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R))
-
-
-#define _mm512_fnmadd_round_pd(A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           (__v8df)(__m512d)(C), \
-                                           (__mmask8)-1, (int)(R))
-
-
-#define _mm512_mask3_fnmadd_round_pd(A, B, C, U, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask3(-(__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R))
-
-
-#define _mm512_maskz_fnmadd_round_pd(U, A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R))
-
-
-#define _mm512_fnmsub_round_pd(A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           -(__v8df)(__m512d)(C), \
-                                           (__mmask8)-1, (int)(R))
-
-
-#define _mm512_maskz_fnmsub_round_pd(U, A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            -(__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R))
-
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_fmadd_pd(__m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    (__v8df) __B,
-                                                    (__v8df) __C,
-                                                    (__mmask8) -1,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_fmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    (__v8df) __B,
-                                                    (__v8df) __C,
-                                                    (__mmask8) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask3 ((__v8df) __A,
-                                                     (__v8df) __B,
-                                                     (__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A,
-                                                     (__v8df) __B,
-                                                     (__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_fmsub_pd(__m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    (__v8df) __B,
-                                                    -(__v8df) __C,
-                                                    (__mmask8) -1,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_fmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    (__v8df) __B,
-                                                    -(__v8df) __C,
-                                                    (__mmask8) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A,
-                                                     (__v8df) __B,
-                                                     -(__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    -(__v8df) __B,
-                                                    (__v8df) __C,
-                                                    (__mmask8) -1,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask3_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask3 (-(__v8df) __A,
-                                                     (__v8df) __B,
-                                                     (__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_fnmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A,
-                                                     (__v8df) __B,
-                                                     (__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    -(__v8df) __B,
-                                                    -(__v8df) __C,
-                                                    (__mmask8) -1,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_fnmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A,
-                                                     (__v8df) __B,
-                                                     -(__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_fmadd_round_ps(A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), \
-                                          (__v16sf)(__m512)(C), \
-                                          (__mmask16)-1, (int)(R))
-
-
-#define _mm512_mask_fmadd_round_ps(A, U, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), \
-                                          (__v16sf)(__m512)(C), \
-                                          (__mmask16)(U), (int)(R))
-
-
-#define _mm512_mask3_fmadd_round_ps(A, B, C, U, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask3((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R))
-
-
-#define _mm512_maskz_fmadd_round_ps(U, A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R))
-
-
-#define _mm512_fmsub_round_ps(A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), \
-                                          -(__v16sf)(__m512)(C), \
-                                          (__mmask16)-1, (int)(R))
-
-
-#define _mm512_mask_fmsub_round_ps(A, U, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), \
-                                          -(__v16sf)(__m512)(C), \
-                                          (__mmask16)(U), (int)(R))
-
-
-#define _mm512_maskz_fmsub_round_ps(U, A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           -(__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R))
-
-
-#define _mm512_fnmadd_round_ps(A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                          -(__v16sf)(__m512)(B), \
-                                          (__v16sf)(__m512)(C), \
-                                          (__mmask16)-1, (int)(R))
-
-
-#define _mm512_mask3_fnmadd_round_ps(A, B, C, U, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask3(-(__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R))
-
-
-#define _mm512_maskz_fnmadd_round_ps(U, A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R))
-
-
-#define _mm512_fnmsub_round_ps(A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                          -(__v16sf)(__m512)(B), \
-                                          -(__v16sf)(__m512)(C), \
-                                          (__mmask16)-1, (int)(R))
-
-
-#define _mm512_maskz_fnmsub_round_ps(U, A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           -(__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R))
-
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_fmadd_ps(__m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   (__v16sf) __B,
-                                                   (__v16sf) __C,
-                                                   (__mmask16) -1,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_fmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   (__v16sf) __B,
-                                                   (__v16sf) __C,
-                                                   (__mmask16) __U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask3 ((__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    (__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    (__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_fmsub_ps(__m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   (__v16sf) __B,
-                                                   -(__v16sf) __C,
-                                                   (__mmask16) -1,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_fmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   (__v16sf) __B,
-                                                   -(__v16sf) __C,
-                                                   (__mmask16) __U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    -(__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   -(__v16sf) __B,
-                                                   (__v16sf) __C,
-                                                   (__mmask16) -1,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask3_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask3 (-(__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    (__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_fnmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    (__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   -(__v16sf) __B,
-                                                   -(__v16sf) __C,
-                                                   (__mmask16) -1,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_fnmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    -(__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_fmaddsub_round_pd(A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              (__v8df)(__m512d)(C), \
-                                              (__mmask8)-1, (int)(R))
-
-
-#define _mm512_mask_fmaddsub_round_pd(A, U, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              (__v8df)(__m512d)(C), \
-                                              (__mmask8)(U), (int)(R))
-
-
-#define _mm512_mask3_fmaddsub_round_pd(A, B, C, U, R) \
-  (__m512d)__builtin_ia32_vfmaddsubpd512_mask3((__v8df)(__m512d)(A), \
-                                               (__v8df)(__m512d)(B), \
-                                               (__v8df)(__m512d)(C), \
-                                               (__mmask8)(U), (int)(R))
-
-
-#define _mm512_maskz_fmaddsub_round_pd(U, A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
-                                               (__v8df)(__m512d)(B), \
-                                               (__v8df)(__m512d)(C), \
-                                               (__mmask8)(U), (int)(R))
-
-
-#define _mm512_fmsubadd_round_pd(A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              -(__v8df)(__m512d)(C), \
-                                              (__mmask8)-1, (int)(R))
-
-
-#define _mm512_mask_fmsubadd_round_pd(A, U, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              -(__v8df)(__m512d)(C), \
-                                              (__mmask8)(U), (int)(R))
-
-
-#define _mm512_maskz_fmsubadd_round_pd(U, A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
-                                               (__v8df)(__m512d)(B), \
-                                               -(__v8df)(__m512d)(C), \
-                                               (__mmask8)(U), (int)(R))
-
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
-                                                      (__v8df) __B,
-                                                      (__v8df) __C,
-                                                      (__mmask8) -1,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_fmaddsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
-                                                      (__v8df) __B,
-                                                      (__v8df) __C,
-                                                      (__mmask8) __U,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_mask3 ((__v8df) __A,
-                                                       (__v8df) __B,
-                                                       (__v8df) __C,
-                                                       (__mmask8) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmaddsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A,
-                                                       (__v8df) __B,
-                                                       (__v8df) __C,
-                                                       (__mmask8) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
-                                                       (__v8df) __B,
-                                                       -(__v8df) __C,
-                                                       (__mmask8) -1,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_fmsubadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
-                                                       (__v8df) __B,
-                                                       -(__v8df) __C,
-                                                       (__mmask8) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmsubadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A,
-                                                        (__v8df) __B,
-                                                        -(__v8df) __C,
-                                                        (__mmask8) __U,
-                                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_fmaddsub_round_ps(A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             (__v16sf)(__m512)(C), \
-                                             (__mmask16)-1, (int)(R))
-
-
-#define _mm512_mask_fmaddsub_round_ps(A, U, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             (__v16sf)(__m512)(C), \
-                                             (__mmask16)(U), (int)(R))
-
-
-#define _mm512_mask3_fmaddsub_round_ps(A, B, C, U, R) \
-  (__m512)__builtin_ia32_vfmaddsubps512_mask3((__v16sf)(__m512)(A), \
-                                              (__v16sf)(__m512)(B), \
-                                              (__v16sf)(__m512)(C), \
-                                              (__mmask16)(U), (int)(R))
-
-
-#define _mm512_maskz_fmaddsub_round_ps(U, A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
-                                              (__v16sf)(__m512)(B), \
-                                              (__v16sf)(__m512)(C), \
-                                              (__mmask16)(U), (int)(R))
-
-
-#define _mm512_fmsubadd_round_ps(A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             -(__v16sf)(__m512)(C), \
-                                             (__mmask16)-1, (int)(R))
-
-
-#define _mm512_mask_fmsubadd_round_ps(A, U, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             -(__v16sf)(__m512)(C), \
-                                             (__mmask16)(U), (int)(R))
-
-
-#define _mm512_maskz_fmsubadd_round_ps(U, A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
-                                              (__v16sf)(__m512)(B), \
-                                              -(__v16sf)(__m512)(C), \
-                                              (__mmask16)(U), (int)(R))
-
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
-                                                      (__v16sf) __B,
-                                                      (__v16sf) __C,
-                                                      (__mmask16) -1,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_fmaddsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
-                                                      (__v16sf) __B,
-                                                      (__v16sf) __C,
-                                                      (__mmask16) __U,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_mask3 ((__v16sf) __A,
-                                                       (__v16sf) __B,
-                                                       (__v16sf) __C,
-                                                       (__mmask16) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmaddsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A,
-                                                       (__v16sf) __B,
-                                                       (__v16sf) __C,
-                                                       (__mmask16) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
-                                                      (__v16sf) __B,
-                                                      -(__v16sf) __C,
-                                                      (__mmask16) -1,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_fmsubadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
-                                                      (__v16sf) __B,
-                                                      -(__v16sf) __C,
-                                                      (__mmask16) __U,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmsubadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A,
-                                                       (__v16sf) __B,
-                                                       -(__v16sf) __C,
-                                                       (__mmask16) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask3_fmsub_round_pd(A, B, C, U, R) \
-  (__m512d)__builtin_ia32_vfmsubpd512_mask3((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R))
-
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
-  return (__m512d)__builtin_ia32_vfmsubpd512_mask3 ((__v8df) __A,
-                                                    (__v8df) __B,
-                                                    (__v8df) __C,
-                                                    (__mmask8) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask3_fmsub_round_ps(A, B, C, U, R) \
-  (__m512)__builtin_ia32_vfmsubps512_mask3((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
-  return (__m512)__builtin_ia32_vfmsubps512_mask3 ((__v16sf) __A,
-                                                   (__v16sf) __B,
-                                                   (__v16sf) __C,
-                                                   (__mmask16) __U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask3_fmsubadd_round_pd(A, B, C, U, R) \
-  (__m512d)__builtin_ia32_vfmsubaddpd512_mask3((__v8df)(__m512d)(A), \
-                                               (__v8df)(__m512d)(B), \
-                                               (__v8df)(__m512d)(C), \
-                                               (__mmask8)(U), (int)(R))
-
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
-  return (__m512d)__builtin_ia32_vfmsubaddpd512_mask3 ((__v8df) __A,
-                                                       (__v8df) __B,
-                                                       (__v8df) __C,
-                                                       (__mmask8) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask3_fmsubadd_round_ps(A, B, C, U, R) \
-  (__m512)__builtin_ia32_vfmsubaddps512_mask3((__v16sf)(__m512)(A), \
-                                              (__v16sf)(__m512)(B), \
-                                              (__v16sf)(__m512)(C), \
-                                              (__mmask16)(U), (int)(R))
-
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
-  return (__m512)__builtin_ia32_vfmsubaddps512_mask3 ((__v16sf) __A,
-                                                      (__v16sf) __B,
-                                                      (__v16sf) __C,
-                                                      (__mmask16) __U,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask_fnmadd_round_pd(A, U, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                           -(__v8df)(__m512d)(B), \
-                                           (__v8df)(__m512d)(C), \
-                                           (__mmask8)(U), (int)(R))
-
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_fnmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    -(__v8df) __B,
-                                                    (__v8df) __C,
-                                                    (__mmask8) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask_fnmadd_round_ps(A, U, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                          -(__v16sf)(__m512)(B), \
-                                          (__v16sf)(__m512)(C), \
-                                          (__mmask16)(U), (int)(R))
-
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_fnmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   -(__v16sf) __B,
-                                                   (__v16sf) __C,
-                                                   (__mmask16) __U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask_fnmsub_round_pd(A, U, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                           -(__v8df)(__m512d)(B), \
-                                           -(__v8df)(__m512d)(C), \
-                                           (__mmask8)(U), (int)(R))
-
-
-#define _mm512_mask3_fnmsub_round_pd(A, B, C, U, R) \
-  (__m512d)__builtin_ia32_vfmsubpd512_mask3(-(__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R))
-
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_fnmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    -(__v8df) __B,
-                                                    -(__v8df) __C,
-                                                    (__mmask8) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask3_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
-  return (__m512d) __builtin_ia32_vfmsubpd512_mask3 (-(__v8df) __A,
-                                                     (__v8df) __B,
-                                                     (__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask_fnmsub_round_ps(A, U, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                          -(__v16sf)(__m512)(B), \
-                                          -(__v16sf)(__m512)(C), \
-                                          (__mmask16)(U), (int)(R))
-
-
-#define _mm512_mask3_fnmsub_round_ps(A, B, C, U, R) \
-  (__m512)__builtin_ia32_vfmsubps512_mask3(-(__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R))
-
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_fnmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   -(__v16sf) __B,
-                                                   -(__v16sf) __C,
-                                                   (__mmask16) __U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask3_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
-  return (__m512) __builtin_ia32_vfmsubps512_mask3 (-(__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    (__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-
-
-/* Vector permutations */
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_permutex2var_epi32(__m512i __A, __m512i __I, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_vpermi2vard512((__v16si)__A, (__v16si) __I,
-                                                (__v16si) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_permutex2var_epi32(__m512i __A, __mmask16 __U, __m512i __I,
-                               __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                              (__v16si)_mm512_permutex2var_epi32(__A, __I, __B),
-                              (__v16si)__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask2_permutex2var_epi32(__m512i __A, __m512i __I, __mmask16 __U,
-                                __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                              (__v16si)_mm512_permutex2var_epi32(__A, __I, __B),
-                              (__v16si)__I);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutex2var_epi32(__mmask16 __U, __m512i __A, __m512i __I,
-                                __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                              (__v16si)_mm512_permutex2var_epi32(__A, __I, __B),
-                              (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_permutex2var_epi64(__m512i __A, __m512i __I, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_vpermi2varq512((__v8di)__A, (__v8di) __I,
-                                                (__v8di) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_permutex2var_epi64(__m512i __A, __mmask8 __U, __m512i __I,
-                               __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                               (__v8di)_mm512_permutex2var_epi64(__A, __I, __B),
-                               (__v8di)__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask2_permutex2var_epi64(__m512i __A, __m512i __I, __mmask8 __U,
-                                __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                               (__v8di)_mm512_permutex2var_epi64(__A, __I, __B),
-                               (__v8di)__I);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutex2var_epi64(__mmask8 __U, __m512i __A, __m512i __I,
-                                __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                               (__v8di)_mm512_permutex2var_epi64(__A, __I, __B),
-                               (__v8di)_mm512_setzero_si512());
-}
-
-#define _mm512_alignr_epi64(A, B, I) \
-  (__m512i)__builtin_ia32_alignq512((__v8di)(__m512i)(A), \
-                                    (__v8di)(__m512i)(B), (int)(I))
-
-#define _mm512_mask_alignr_epi64(W, U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                 (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \
-                                 (__v8di)(__m512i)(W))
-
-#define _mm512_maskz_alignr_epi64(U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                 (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \
-                                 (__v8di)_mm512_setzero_si512())
-
-#define _mm512_alignr_epi32(A, B, I) \
-  (__m512i)__builtin_ia32_alignd512((__v16si)(__m512i)(A), \
-                                    (__v16si)(__m512i)(B), (int)(I))
-
-#define _mm512_mask_alignr_epi32(W, U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \
-                                (__v16si)(__m512i)(W))
-
-#define _mm512_maskz_alignr_epi32(U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \
-                                (__v16si)_mm512_setzero_si512())
-/* Vector Extract */
-
-#define _mm512_extractf64x4_pd(A, I) \
-  (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(I), \
-                                            (__v4df)_mm256_undefined_pd(), \
-                                            (__mmask8)-1)
-
-#define _mm512_mask_extractf64x4_pd(W, U, A, imm) \
-  (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
-                                            (__v4df)(__m256d)(W), \
-                                            (__mmask8)(U))
-
-#define _mm512_maskz_extractf64x4_pd(U, A, imm) \
-  (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
-                                            (__v4df)_mm256_setzero_pd(), \
-                                            (__mmask8)(U))
-
-#define _mm512_extractf32x4_ps(A, I) \
-  (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(I), \
-                                           (__v4sf)_mm_undefined_ps(), \
-                                           (__mmask8)-1)
-
-#define _mm512_mask_extractf32x4_ps(W, U, A, imm) \
-  (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
-                                           (__v4sf)(__m128)(W), \
-                                           (__mmask8)(U))
-
-#define _mm512_maskz_extractf32x4_ps(U, A, imm) \
-  (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)(U))
-
-/* Vector Blend */
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_blend_pd(__mmask8 __U, __m512d __A, __m512d __W)
-{
-  return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
-                 (__v8df) __W,
-                 (__v8df) __A);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_blend_ps(__mmask16 __U, __m512 __A, __m512 __W)
-{
-  return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
-                (__v16sf) __W,
-                (__v16sf) __A);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_blend_epi64(__mmask8 __U, __m512i __A, __m512i __W)
-{
-  return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
-                (__v8di) __W,
-                (__v8di) __A);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
-{
-  return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
-                (__v16si) __W,
-                (__v16si) __A);
-}
-
-/* Compare */
-
-#define _mm512_cmp_round_ps_mask(A, B, P, R) \
-  (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), (int)(P), \
-                                          (__mmask16)-1, (int)(R))
-
-#define _mm512_mask_cmp_round_ps_mask(U, A, B, P, R) \
-  (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), (int)(P), \
-                                          (__mmask16)(U), (int)(R))
-
-#define _mm512_cmp_ps_mask(A, B, P) \
-  _mm512_cmp_round_ps_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
-#define _mm512_mask_cmp_ps_mask(U, A, B, P) \
-  _mm512_mask_cmp_round_ps_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_cmpeq_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_EQ_OQ)
-#define _mm512_mask_cmpeq_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_EQ_OQ)
-
-#define _mm512_cmplt_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_LT_OS)
-#define _mm512_mask_cmplt_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_LT_OS)
-
-#define _mm512_cmple_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_LE_OS)
-#define _mm512_mask_cmple_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_LE_OS)
-
-#define _mm512_cmpunord_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_UNORD_Q)
-#define _mm512_mask_cmpunord_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_UNORD_Q)
-
-#define _mm512_cmpneq_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_NEQ_UQ)
-#define _mm512_mask_cmpneq_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_NEQ_UQ)
-
-#define _mm512_cmpnlt_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_NLT_US)
-#define _mm512_mask_cmpnlt_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_NLT_US)
-
-#define _mm512_cmpnle_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_NLE_US)
-#define _mm512_mask_cmpnle_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_NLE_US)
-
-#define _mm512_cmpord_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_ORD_Q)
-#define _mm512_mask_cmpord_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_ORD_Q)
-
-#define _mm512_cmp_round_pd_mask(A, B, P, R) \
-  (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
-                                         (__v8df)(__m512d)(B), (int)(P), \
-                                         (__mmask8)-1, (int)(R))
-
-#define _mm512_mask_cmp_round_pd_mask(U, A, B, P, R) \
-  (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
-                                         (__v8df)(__m512d)(B), (int)(P), \
-                                         (__mmask8)(U), (int)(R))
-
-#define _mm512_cmp_pd_mask(A, B, P) \
-  _mm512_cmp_round_pd_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
-#define _mm512_mask_cmp_pd_mask(U, A, B, P) \
-  _mm512_mask_cmp_round_pd_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_cmpeq_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_EQ_OQ)
-#define _mm512_mask_cmpeq_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_EQ_OQ)
-
-#define _mm512_cmplt_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_LT_OS)
-#define _mm512_mask_cmplt_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_LT_OS)
-
-#define _mm512_cmple_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_LE_OS)
-#define _mm512_mask_cmple_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_LE_OS)
-
-#define _mm512_cmpunord_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_UNORD_Q)
-#define _mm512_mask_cmpunord_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_UNORD_Q)
-
-#define _mm512_cmpneq_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_NEQ_UQ)
-#define _mm512_mask_cmpneq_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_NEQ_UQ)
-
-#define _mm512_cmpnlt_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_NLT_US)
-#define _mm512_mask_cmpnlt_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_NLT_US)
-
-#define _mm512_cmpnle_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_NLE_US)
-#define _mm512_mask_cmpnle_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_NLE_US)
-
-#define _mm512_cmpord_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_ORD_Q)
-#define _mm512_mask_cmpord_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_ORD_Q)
-
-/* Conversion */
-
-#define _mm512_cvtt_roundps_epu32(A, R) \
-  (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
-                                             (__v16si)_mm512_undefined_epi32(), \
-                                             (__mmask16)-1, (int)(R))
-
-#define _mm512_mask_cvtt_roundps_epu32(W, U, A, R) \
-  (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
-                                             (__v16si)(__m512i)(W), \
-                                             (__mmask16)(U), (int)(R))
-
-#define _mm512_maskz_cvtt_roundps_epu32(U, A, R) \
-  (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
-                                             (__v16si)_mm512_setzero_si512(), \
-                                             (__mmask16)(U), (int)(R))
-
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvttps_epu32(__m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
-                  (__v16si)
-                  _mm512_setzero_si512 (),
-                  (__mmask16) -1,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvttps_epu32 (__m512i __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
-                   (__v16si) __W,
-                   (__mmask16) __U,
-                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvttps_epu32 (__mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
-                   (__v16si) _mm512_setzero_si512 (),
-                   (__mmask16) __U,
-                   _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundepi32_ps(A, R) \
-  (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
-                                          (__v16sf)_mm512_setzero_ps(), \
-                                          (__mmask16)-1, (int)(R))
-
-#define _mm512_mask_cvt_roundepi32_ps(W, U, A, R) \
-  (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
-                                          (__v16sf)(__m512)(W), \
-                                          (__mmask16)(U), (int)(R))
-
-#define _mm512_maskz_cvt_roundepi32_ps(U, A, R) \
-  (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
-                                          (__v16sf)_mm512_setzero_ps(), \
-                                          (__mmask16)(U), (int)(R))
-
-#define _mm512_cvt_roundepu32_ps(A, R) \
-  (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
-                                           (__v16sf)_mm512_setzero_ps(), \
-                                           (__mmask16)-1, (int)(R))
-
-#define _mm512_mask_cvt_roundepu32_ps(W, U, A, R) \
-  (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
-                                           (__v16sf)(__m512)(W), \
-                                           (__mmask16)(U), (int)(R))
-
-#define _mm512_maskz_cvt_roundepu32_ps(U, A, R) \
-  (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
-                                           (__v16sf)_mm512_setzero_ps(), \
-                                           (__mmask16)(U), (int)(R))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_cvtepu32_ps (__m512i __A)
-{
-  return (__m512)__builtin_convertvector((__v16su)__A, __v16sf);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu32_ps (__m512 __W, __mmask16 __U, __m512i __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_cvtepu32_ps(__A),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu32_ps (__mmask16 __U, __m512i __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_cvtepu32_ps(__A),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32_pd(__m256i __A)
-{
-  return (__m512d)__builtin_convertvector((__v8si)__A, __v8df);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_pd (__m512d __W, __mmask8 __U, __m256i __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                              (__v8df)_mm512_cvtepi32_pd(__A),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi32_pd (__mmask8 __U, __m256i __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                              (__v8df)_mm512_cvtepi32_pd(__A),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32lo_pd(__m512i __A)
-{
-  return (__m512d) _mm512_cvtepi32_pd(_mm512_castsi512_si256(__A));
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32lo_pd(__m512d __W, __mmask8 __U,__m512i __A)
-{
-  return (__m512d) _mm512_mask_cvtepi32_pd(__W, __U, _mm512_castsi512_si256(__A));
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32_ps (__m512i __A)
-{
-  return (__m512)__builtin_convertvector((__v16si)__A, __v16sf);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_ps (__m512 __W, __mmask16 __U, __m512i __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_cvtepi32_ps(__A),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi32_ps (__mmask16 __U, __m512i __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_cvtepi32_ps(__A),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtepu32_pd(__m256i __A)
-{
-  return (__m512d)__builtin_convertvector((__v8su)__A, __v8df);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu32_pd (__m512d __W, __mmask8 __U, __m256i __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                              (__v8df)_mm512_cvtepu32_pd(__A),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu32_pd (__mmask8 __U, __m256i __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                              (__v8df)_mm512_cvtepu32_pd(__A),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtepu32lo_pd(__m512i __A)
-{
-  return (__m512d) _mm512_cvtepu32_pd(_mm512_castsi512_si256(__A));
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu32lo_pd(__m512d __W, __mmask8 __U,__m512i __A)
-{
-  return (__m512d) _mm512_mask_cvtepu32_pd(__W, __U, _mm512_castsi512_si256(__A));
-}
-
-#define _mm512_cvt_roundpd_ps(A, R) \
-  (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
-                                          (__v8sf)_mm256_setzero_ps(), \
-                                          (__mmask8)-1, (int)(R))
-
-#define _mm512_mask_cvt_roundpd_ps(W, U, A, R) \
-  (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
-                                          (__v8sf)(__m256)(W), (__mmask8)(U), \
-                                          (int)(R))
-
-#define _mm512_maskz_cvt_roundpd_ps(U, A, R) \
-  (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
-                                          (__v8sf)_mm256_setzero_ps(), \
-                                          (__mmask8)(U), (int)(R))
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS512
-_mm512_cvtpd_ps (__m512d __A)
-{
-  return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A,
-                (__v8sf) _mm256_undefined_ps (),
-                (__mmask8) -1,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtpd_ps (__m256 __W, __mmask8 __U, __m512d __A)
-{
-  return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A,
-                (__v8sf) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtpd_ps (__mmask8 __U, __m512d __A)
-{
-  return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A,
-                (__v8sf) _mm256_setzero_ps (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_cvtpd_pslo (__m512d __A)
-{
-  return (__m512) __builtin_shufflevector((__v8sf) _mm512_cvtpd_ps(__A),
-                (__v8sf) _mm256_setzero_ps (),
-                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtpd_pslo (__m512 __W, __mmask8 __U,__m512d __A)
-{
-  return (__m512) __builtin_shufflevector (
-                (__v8sf) _mm512_mask_cvtpd_ps (_mm512_castps512_ps256(__W),
-                                               __U, __A),
-                (__v8sf) _mm256_setzero_ps (),
-                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-}
-
-#define _mm512_cvt_roundps_ph(A, I) \
-  (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
-                                            (__v16hi)_mm256_undefined_si256(), \
-                                            (__mmask16)-1)
-
-#define _mm512_mask_cvt_roundps_ph(U, W, A, I) \
-  (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
-                                            (__v16hi)(__m256i)(U), \
-                                            (__mmask16)(W))
-
-#define _mm512_maskz_cvt_roundps_ph(W, A, I) \
-  (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
-                                            (__v16hi)_mm256_setzero_si256(), \
-                                            (__mmask16)(W))
-
-#define _mm512_cvtps_ph       _mm512_cvt_roundps_ph
-#define _mm512_mask_cvtps_ph  _mm512_mask_cvt_roundps_ph
-#define _mm512_maskz_cvtps_ph _mm512_maskz_cvt_roundps_ph
-
-#define _mm512_cvt_roundph_ps(A, R) \
-  (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
-                                           (__v16sf)_mm512_undefined_ps(), \
-                                           (__mmask16)-1, (int)(R))
-
-#define _mm512_mask_cvt_roundph_ps(W, U, A, R) \
-  (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
-                                           (__v16sf)(__m512)(W), \
-                                           (__mmask16)(U), (int)(R))
-
-#define _mm512_maskz_cvt_roundph_ps(U, A, R) \
-  (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
-                                           (__v16sf)_mm512_setzero_ps(), \
-                                           (__mmask16)(U), (int)(R))
-
-
-static  __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_cvtph_ps(__m256i __A)
-{
-  return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
-                (__v16sf)
-                _mm512_setzero_ps (),
-                (__mmask16) -1,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtph_ps (__m512 __W, __mmask16 __U, __m256i __A)
-{
-  return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
-                 (__v16sf) __W,
-                 (__mmask16) __U,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtph_ps (__mmask16 __U, __m256i __A)
-{
-  return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
-                 (__v16sf) _mm512_setzero_ps (),
-                 (__mmask16) __U,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvtt_roundpd_epi32(A, R) \
-  (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8si)_mm256_setzero_si256(), \
-                                            (__mmask8)-1, (int)(R))
-
-#define _mm512_mask_cvtt_roundpd_epi32(W, U, A, R) \
-  (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8si)(__m256i)(W), \
-                                            (__mmask8)(U), (int)(R))
-
-#define _mm512_maskz_cvtt_roundpd_epi32(U, A, R) \
-  (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8si)_mm256_setzero_si256(), \
-                                            (__mmask8)(U), (int)(R))
-
-static __inline __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvttpd_epi32(__m512d __a)
-{
-  return (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df) __a,
-                                                   (__v8si)_mm256_setzero_si256(),
-                                                   (__mmask8) -1,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvttpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A,
-                  (__v8si) __W,
-                  (__mmask8) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvttpd_epi32 (__mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A,
-                  (__v8si) _mm256_setzero_si256 (),
-                  (__mmask8) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvtt_roundps_epi32(A, R) \
-  (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
-                                            (__v16si)_mm512_setzero_si512(), \
-                                            (__mmask16)-1, (int)(R))
-
-#define _mm512_mask_cvtt_roundps_epi32(W, U, A, R) \
-  (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
-                                            (__v16si)(__m512i)(W), \
-                                            (__mmask16)(U), (int)(R))
-
-#define _mm512_maskz_cvtt_roundps_epi32(U, A, R) \
-  (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
-                                            (__v16si)_mm512_setzero_si512(), \
-                                            (__mmask16)(U), (int)(R))
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvttps_epi32(__m512 __a)
-{
-  return (__m512i)
-    __builtin_ia32_cvttps2dq512_mask((__v16sf) __a,
-                                     (__v16si) _mm512_setzero_si512 (),
-                                     (__mmask16) -1, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvttps_epi32 (__m512i __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A,
-                  (__v16si) __W,
-                  (__mmask16) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvttps_epi32 (__mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A,
-                  (__v16si) _mm512_setzero_si512 (),
-                  (__mmask16) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundps_epi32(A, R) \
-  (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
-                                           (__v16si)_mm512_setzero_si512(), \
-                                           (__mmask16)-1, (int)(R))
-
-#define _mm512_mask_cvt_roundps_epi32(W, U, A, R) \
-  (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
-                                           (__v16si)(__m512i)(W), \
-                                           (__mmask16)(U), (int)(R))
-
-#define _mm512_maskz_cvt_roundps_epi32(U, A, R) \
-  (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
-                                           (__v16si)_mm512_setzero_si512(), \
-                                           (__mmask16)(U), (int)(R))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtps_epi32 (__m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
-                 (__v16si) _mm512_undefined_epi32 (),
-                 (__mmask16) -1,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtps_epi32 (__m512i __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
-                 (__v16si) __W,
-                 (__mmask16) __U,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtps_epi32 (__mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
-                 (__v16si)
-                 _mm512_setzero_si512 (),
-                 (__mmask16) __U,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundpd_epi32(A, R) \
-  (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
-                                           (__v8si)_mm256_setzero_si256(), \
-                                           (__mmask8)-1, (int)(R))
-
-#define _mm512_mask_cvt_roundpd_epi32(W, U, A, R) \
-  (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
-                                           (__v8si)(__m256i)(W), \
-                                           (__mmask8)(U), (int)(R))
-
-#define _mm512_maskz_cvt_roundpd_epi32(U, A, R) \
-  (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
-                                           (__v8si)_mm256_setzero_si256(), \
-                                           (__mmask8)(U), (int)(R))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtpd_epi32 (__m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A,
-                 (__v8si)
-                 _mm256_undefined_si256 (),
-                 (__mmask8) -1,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A,
-                 (__v8si) __W,
-                 (__mmask8) __U,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtpd_epi32 (__mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A,
-                 (__v8si)
-                 _mm256_setzero_si256 (),
-                 (__mmask8) __U,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundps_epu32(A, R) \
-  (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
-                                            (__v16si)_mm512_setzero_si512(), \
-                                            (__mmask16)-1, (int)(R))
-
-#define _mm512_mask_cvt_roundps_epu32(W, U, A, R) \
-  (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
-                                            (__v16si)(__m512i)(W), \
-                                            (__mmask16)(U), (int)(R))
-
-#define _mm512_maskz_cvt_roundps_epu32(U, A, R) \
-  (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
-                                            (__v16si)_mm512_setzero_si512(), \
-                                            (__mmask16)(U), (int)(R))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtps_epu32 ( __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,\
-                  (__v16si)\
-                  _mm512_undefined_epi32 (),
-                  (__mmask16) -1,\
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtps_epu32 (__m512i __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,
-                  (__v16si) __W,
-                  (__mmask16) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtps_epu32 ( __mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,
-                  (__v16si)
-                  _mm512_setzero_si512 (),
-                  (__mmask16) __U ,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundpd_epu32(A, R) \
-  (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8si)_mm256_setzero_si256(), \
-                                            (__mmask8)-1, (int)(R))
-
-#define _mm512_mask_cvt_roundpd_epu32(W, U, A, R) \
-  (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8si)(__m256i)(W), \
-                                            (__mmask8)(U), (int)(R))
-
-#define _mm512_maskz_cvt_roundpd_epu32(U, A, R) \
-  (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8si)_mm256_setzero_si256(), \
-                                            (__mmask8)(U), (int)(R))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtpd_epu32 (__m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A,
-                  (__v8si)
-                  _mm256_undefined_si256 (),
-                  (__mmask8) -1,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A,
-                  (__v8si) __W,
-                  (__mmask8) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtpd_epu32 (__mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A,
-                  (__v8si)
-                  _mm256_setzero_si256 (),
-                  (__mmask8) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_cvtsd_f64(__m512d __a)
-{
-  return __a[0];
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_cvtss_f32(__m512 __a)
-{
-  return __a[0];
-}
-
-/* Unpack and Interleave */
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_pd(__m512d __a, __m512d __b)
-{
-  return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b,
-                                          1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_unpackhi_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                           (__v8df)_mm512_unpackhi_pd(__A, __B),
-                                           (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpackhi_pd(__mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                           (__v8df)_mm512_unpackhi_pd(__A, __B),
-                                           (__v8df)_mm512_setzero_pd());
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_pd(__m512d __a, __m512d __b)
-{
-  return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b,
-                                          0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_unpacklo_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                           (__v8df)_mm512_unpacklo_pd(__A, __B),
-                                           (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpacklo_pd (__mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                           (__v8df)_mm512_unpacklo_pd(__A, __B),
-                                           (__v8df)_mm512_setzero_pd());
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_ps(__m512 __a, __m512 __b)
-{
-  return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b,
-                                         2,    18,    3,    19,
-                                         2+4,  18+4,  3+4,  19+4,
-                                         2+8,  18+8,  3+8,  19+8,
-                                         2+12, 18+12, 3+12, 19+12);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_unpackhi_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
-                                          (__v16sf)_mm512_unpackhi_ps(__A, __B),
-                                          (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpackhi_ps (__mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
-                                          (__v16sf)_mm512_unpackhi_ps(__A, __B),
-                                          (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_ps(__m512 __a, __m512 __b)
-{
-  return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b,
-                                         0,    16,    1,    17,
-                                         0+4,  16+4,  1+4,  17+4,
-                                         0+8,  16+8,  1+8,  17+8,
-                                         0+12, 16+12, 1+12, 17+12);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_unpacklo_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
-                                          (__v16sf)_mm512_unpacklo_ps(__A, __B),
-                                          (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpacklo_ps (__mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
-                                          (__v16sf)_mm512_unpacklo_ps(__A, __B),
-                                          (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_epi32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_shufflevector((__v16si)__A, (__v16si)__B,
-                                          2,    18,    3,    19,
-                                          2+4,  18+4,  3+4,  19+4,
-                                          2+8,  18+8,  3+8,  19+8,
-                                          2+12, 18+12, 3+12, 19+12);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpackhi_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
-                                       (__v16si)_mm512_unpackhi_epi32(__A, __B),
-                                       (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpackhi_epi32(__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
-                                       (__v16si)_mm512_unpackhi_epi32(__A, __B),
-                                       (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_epi32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_shufflevector((__v16si)__A, (__v16si)__B,
-                                          0,    16,    1,    17,
-                                          0+4,  16+4,  1+4,  17+4,
-                                          0+8,  16+8,  1+8,  17+8,
-                                          0+12, 16+12, 1+12, 17+12);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpacklo_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
-                                       (__v16si)_mm512_unpacklo_epi32(__A, __B),
-                                       (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpacklo_epi32(__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
-                                       (__v16si)_mm512_unpacklo_epi32(__A, __B),
-                                       (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_epi64(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_shufflevector((__v8di)__A, (__v8di)__B,
-                                          1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpackhi_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
-                                        (__v8di)_mm512_unpackhi_epi64(__A, __B),
-                                        (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpackhi_epi64(__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
-                                        (__v8di)_mm512_unpackhi_epi64(__A, __B),
-                                        (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_epi64 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_shufflevector((__v8di)__A, (__v8di)__B,
-                                          0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpacklo_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
-                                        (__v8di)_mm512_unpacklo_epi64(__A, __B),
-                                        (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpacklo_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
-                                        (__v8di)_mm512_unpacklo_epi64(__A, __B),
-                                        (__v8di)_mm512_setzero_si512());
-}
-
-
-/* SIMD load ops */
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_loadu_si512 (void const *__P)
-{
-  struct __loadu_si512 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_si512*)__P)->__v;
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_loadu_epi32 (void const *__P)
-{
-  struct __loadu_epi32 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_epi32*)__P)->__v;
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_loadu_epi32 (__m512i __W, __mmask16 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *) __P,
-                  (__v16si) __W,
-                  (__mmask16) __U);
-}
-
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_loadu_epi32(__mmask16 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *)__P,
-                                                     (__v16si)
-                                                     _mm512_setzero_si512 (),
-                                                     (__mmask16) __U);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_loadu_epi64 (void const *__P)
-{
-  struct __loadu_epi64 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_epi64*)__P)->__v;
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_loadu_epi64 (__m512i __W, __mmask8 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddqudi512_mask ((const long long *) __P,
-                  (__v8di) __W,
-                  (__mmask8) __U);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_loadu_epi64(__mmask8 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddqudi512_mask ((const long long *)__P,
-                                                     (__v8di)
-                                                     _mm512_setzero_si512 (),
-                                                     (__mmask8) __U);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_loadu_ps (__m512 __W, __mmask16 __U, void const *__P)
-{
-  return (__m512) __builtin_ia32_loadups512_mask ((const float *) __P,
-                   (__v16sf) __W,
-                   (__mmask16) __U);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_loadu_ps(__mmask16 __U, void const *__P)
-{
-  return (__m512) __builtin_ia32_loadups512_mask ((const float *)__P,
-                                                  (__v16sf)
-                                                  _mm512_setzero_ps (),
-                                                  (__mmask16) __U);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_loadu_pd (__m512d __W, __mmask8 __U, void const *__P)
-{
-  return (__m512d) __builtin_ia32_loadupd512_mask ((const double *) __P,
-                (__v8df) __W,
-                (__mmask8) __U);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_loadu_pd(__mmask8 __U, void const *__P)
-{
-  return (__m512d) __builtin_ia32_loadupd512_mask ((const double *)__P,
-                                                   (__v8df)
-                                                   _mm512_setzero_pd (),
-                                                   (__mmask8) __U);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_loadu_pd(void const *__p)
-{
-  struct __loadu_pd {
-    __m512d_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_pd*)__p)->__v;
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_loadu_ps(void const *__p)
-{
-  struct __loadu_ps {
-    __m512_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_ps*)__p)->__v;
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_load_ps(void const *__p)
-{
-  return *(const __m512*)__p;
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_load_ps (__m512 __W, __mmask16 __U, void const *__P)
-{
-  return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *) __P,
-                   (__v16sf) __W,
-                   (__mmask16) __U);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_load_ps(__mmask16 __U, void const *__P)
-{
-  return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *)__P,
-                                                  (__v16sf)
-                                                  _mm512_setzero_ps (),
-                                                  (__mmask16) __U);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_load_pd(void const *__p)
-{
-  return *(const __m512d*)__p;
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_load_pd (__m512d __W, __mmask8 __U, void const *__P)
-{
-  return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *) __P,
-                          (__v8df) __W,
-                          (__mmask8) __U);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_load_pd(__mmask8 __U, void const *__P)
-{
-  return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *)__P,
-                                                   (__v8df)
-                                                   _mm512_setzero_pd (),
-                                                   (__mmask8) __U);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_load_si512 (void const *__P)
-{
-  return *(const __m512i *) __P;
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_load_epi32 (void const *__P)
-{
-  return *(const __m512i *) __P;
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_load_epi64 (void const *__P)
-{
-  return *(const __m512i *) __P;
-}
-
-/* SIMD store ops */
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_epi64 (void *__P, __m512i __A)
-{
-  struct __storeu_epi64 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi64*)__P)->__v = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_mask_storeu_epi64(void *__P, __mmask8 __U, __m512i __A)
-{
-  __builtin_ia32_storedqudi512_mask ((long long *)__P, (__v8di) __A,
-                                     (__mmask8) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_si512 (void *__P, __m512i __A)
-{
-  struct __storeu_si512 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si512*)__P)->__v = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_epi32 (void *__P, __m512i __A)
-{
-  struct __storeu_epi32 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi32*)__P)->__v = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_mask_storeu_epi32(void *__P, __mmask16 __U, __m512i __A)
-{
-  __builtin_ia32_storedqusi512_mask ((int *)__P, (__v16si) __A,
-                                     (__mmask16) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_mask_storeu_pd(void *__P, __mmask8 __U, __m512d __A)
-{
-  __builtin_ia32_storeupd512_mask ((double *)__P, (__v8df) __A, (__mmask8) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_pd(void *__P, __m512d __A)
-{
-  struct __storeu_pd {
-    __m512d_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_pd*)__P)->__v = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_mask_storeu_ps(void *__P, __mmask16 __U, __m512 __A)
-{
-  __builtin_ia32_storeups512_mask ((float *)__P, (__v16sf) __A,
-                                   (__mmask16) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_ps(void *__P, __m512 __A)
-{
-  struct __storeu_ps {
-    __m512_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_ps*)__P)->__v = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_mask_store_pd(void *__P, __mmask8 __U, __m512d __A)
-{
-  __builtin_ia32_storeapd512_mask ((__v8df *)__P, (__v8df) __A, (__mmask8) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_store_pd(void *__P, __m512d __A)
-{
-  *(__m512d*)__P = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_mask_store_ps(void *__P, __mmask16 __U, __m512 __A)
-{
-  __builtin_ia32_storeaps512_mask ((__v16sf *)__P, (__v16sf) __A,
-                                   (__mmask16) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_store_ps(void *__P, __m512 __A)
-{
-  *(__m512*)__P = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_store_si512 (void *__P, __m512i __A)
-{
-  *(__m512i *) __P = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_store_epi32 (void *__P, __m512i __A)
-{
-  *(__m512i *) __P = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_store_epi64 (void *__P, __m512i __A)
-{
-  *(__m512i *) __P = __A;
-}
-
-/* Mask ops */
-
-static __inline __mmask16 __DEFAULT_FN_ATTRS
-_mm512_knot(__mmask16 __M)
-{
-  return __builtin_ia32_knothi(__M);
-}
-
-/* Integer compare */
-
-#define _mm512_cmpeq_epi32_mask(A, B) \
-    _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epi32_mask(k, A, B) \
-    _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epi32_mask(A, B) \
-    _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epi32_mask(k, A, B) \
-    _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epi32_mask(A, B) \
-    _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epi32_mask(k, A, B) \
-    _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epi32_mask(A, B) \
-    _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epi32_mask(k, A, B) \
-    _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epi32_mask(A, B) \
-    _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epi32_mask(k, A, B) \
-    _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epi32_mask(A, B) \
-    _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epi32_mask(k, A, B) \
-    _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm512_cmpeq_epu32_mask(A, B) \
-    _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epu32_mask(k, A, B) \
-    _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epu32_mask(A, B) \
-    _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epu32_mask(k, A, B) \
-    _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epu32_mask(A, B) \
-    _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epu32_mask(k, A, B) \
-    _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epu32_mask(A, B) \
-    _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epu32_mask(k, A, B) \
-    _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epu32_mask(A, B) \
-    _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epu32_mask(k, A, B) \
-    _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epu32_mask(A, B) \
-    _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epu32_mask(k, A, B) \
-    _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm512_cmpeq_epi64_mask(A, B) \
-    _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epi64_mask(k, A, B) \
-    _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epi64_mask(A, B) \
-    _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epi64_mask(k, A, B) \
-    _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epi64_mask(A, B) \
-    _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epi64_mask(k, A, B) \
-    _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epi64_mask(A, B) \
-    _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epi64_mask(k, A, B) \
-    _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epi64_mask(A, B) \
-    _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epi64_mask(k, A, B) \
-    _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epi64_mask(A, B) \
-    _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epi64_mask(k, A, B) \
-    _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm512_cmpeq_epu64_mask(A, B) \
-    _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epu64_mask(k, A, B) \
-    _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epu64_mask(A, B) \
-    _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epu64_mask(k, A, B) \
-    _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epu64_mask(A, B) \
-    _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epu64_mask(k, A, B) \
-    _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epu64_mask(A, B) \
-    _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epu64_mask(k, A, B) \
-    _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epu64_mask(A, B) \
-    _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epu64_mask(k, A, B) \
-    _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epu64_mask(A, B) \
-    _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epu64_mask(k, A, B) \
-    _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE)
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi8_epi32(__m128i __A)
-{
-  /* This function always performs a signed extension, but __v16qi is a char
-     which may be signed or unsigned, so use __v16qs. */
-  return (__m512i)__builtin_convertvector((__v16qs)__A, __v16si);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi8_epi32(__m512i __W, __mmask16 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_cvtepi8_epi32(__A),
-                                             (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi8_epi32(__mmask16 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_cvtepi8_epi32(__A),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi8_epi64(__m128i __A)
-{
-  /* This function always performs a signed extension, but __v16qi is a char
-     which may be signed or unsigned, so use __v16qs. */
-  return (__m512i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__A, (__v16qs)__A, 0, 1, 2, 3, 4, 5, 6, 7), __v8di);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi8_epi64(__m512i __W, __mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepi8_epi64(__A),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepi8_epi64(__A),
-                                             (__v8di)_mm512_setzero_si512 ());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32_epi64(__m256i __X)
-{
-  return (__m512i)__builtin_convertvector((__v8si)__X, __v8di);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_epi64(__m512i __W, __mmask8 __U, __m256i __X)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepi32_epi64(__X),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi32_epi64(__mmask8 __U, __m256i __X)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepi32_epi64(__X),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi16_epi32(__m256i __A)
-{
-  return (__m512i)__builtin_convertvector((__v16hi)__A, __v16si);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi16_epi32(__m512i __W, __mmask16 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                            (__v16si)_mm512_cvtepi16_epi32(__A),
-                                            (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi16_epi32(__mmask16 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                            (__v16si)_mm512_cvtepi16_epi32(__A),
-                                            (__v16si)_mm512_setzero_si512 ());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi16_epi64(__m128i __A)
-{
-  return (__m512i)__builtin_convertvector((__v8hi)__A, __v8di);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi16_epi64(__m512i __W, __mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepi16_epi64(__A),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepi16_epi64(__A),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu8_epi32(__m128i __A)
-{
-  return (__m512i)__builtin_convertvector((__v16qu)__A, __v16si);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu8_epi32(__m512i __W, __mmask16 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_cvtepu8_epi32(__A),
-                                             (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu8_epi32(__mmask16 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_cvtepu8_epi32(__A),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu8_epi64(__m128i __A)
-{
-  return (__m512i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__A, (__v16qu)__A, 0, 1, 2, 3, 4, 5, 6, 7), __v8di);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu8_epi64(__m512i __W, __mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepu8_epi64(__A),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepu8_epi64(__A),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu32_epi64(__m256i __X)
-{
-  return (__m512i)__builtin_convertvector((__v8su)__X, __v8di);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu32_epi64(__m512i __W, __mmask8 __U, __m256i __X)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepu32_epi64(__X),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu32_epi64(__mmask8 __U, __m256i __X)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepu32_epi64(__X),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu16_epi32(__m256i __A)
-{
-  return (__m512i)__builtin_convertvector((__v16hu)__A, __v16si);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu16_epi32(__m512i __W, __mmask16 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                            (__v16si)_mm512_cvtepu16_epi32(__A),
-                                            (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu16_epi32(__mmask16 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                            (__v16si)_mm512_cvtepu16_epi32(__A),
-                                            (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu16_epi64(__m128i __A)
-{
-  return (__m512i)__builtin_convertvector((__v8hu)__A, __v8di);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu16_epi64(__m512i __W, __mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepu16_epi64(__A),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepu16_epi64(__A),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_rorv_epi32 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_prorvd512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_rorv_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                                           (__v16si)_mm512_rorv_epi32(__A, __B),
-                                           (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_rorv_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                                           (__v16si)_mm512_rorv_epi32(__A, __B),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_rorv_epi64 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_prorvq512((__v8di)__A, (__v8di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_rorv_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                                            (__v8di)_mm512_rorv_epi64(__A, __B),
-                                            (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_rorv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                                            (__v8di)_mm512_rorv_epi64(__A, __B),
-                                            (__v8di)_mm512_setzero_si512());
-}
-
-
-
-#define _mm512_cmp_epi32_mask(a, b, p) \
-  (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
-                                         (__v16si)(__m512i)(b), (int)(p), \
-                                         (__mmask16)-1)
-
-#define _mm512_cmp_epu32_mask(a, b, p) \
-  (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
-                                          (__v16si)(__m512i)(b), (int)(p), \
-                                          (__mmask16)-1)
-
-#define _mm512_cmp_epi64_mask(a, b, p) \
-  (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
-                                        (__v8di)(__m512i)(b), (int)(p), \
-                                        (__mmask8)-1)
-
-#define _mm512_cmp_epu64_mask(a, b, p) \
-  (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
-                                         (__v8di)(__m512i)(b), (int)(p), \
-                                         (__mmask8)-1)
-
-#define _mm512_mask_cmp_epi32_mask(m, a, b, p) \
-  (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
-                                         (__v16si)(__m512i)(b), (int)(p), \
-                                         (__mmask16)(m))
-
-#define _mm512_mask_cmp_epu32_mask(m, a, b, p) \
-  (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
-                                          (__v16si)(__m512i)(b), (int)(p), \
-                                          (__mmask16)(m))
-
-#define _mm512_mask_cmp_epi64_mask(m, a, b, p) \
-  (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
-                                        (__v8di)(__m512i)(b), (int)(p), \
-                                        (__mmask8)(m))
-
-#define _mm512_mask_cmp_epu64_mask(m, a, b, p) \
-  (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
-                                         (__v8di)(__m512i)(b), (int)(p), \
-                                         (__mmask8)(m))
-
-#define _mm512_rol_epi32(a, b) \
-  (__m512i)__builtin_ia32_prold512((__v16si)(__m512i)(a), (int)(b))
-
-#define _mm512_mask_rol_epi32(W, U, a, b) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                      (__v16si)_mm512_rol_epi32((a), (b)), \
-                                      (__v16si)(__m512i)(W))
-
-#define _mm512_maskz_rol_epi32(U, a, b) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                      (__v16si)_mm512_rol_epi32((a), (b)), \
-                                      (__v16si)_mm512_setzero_si512())
-
-#define _mm512_rol_epi64(a, b) \
-  (__m512i)__builtin_ia32_prolq512((__v8di)(__m512i)(a), (int)(b))
-
-#define _mm512_mask_rol_epi64(W, U, a, b) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                      (__v8di)_mm512_rol_epi64((a), (b)), \
-                                      (__v8di)(__m512i)(W))
-
-#define _mm512_maskz_rol_epi64(U, a, b) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                      (__v8di)_mm512_rol_epi64((a), (b)), \
-                                      (__v8di)_mm512_setzero_si512())
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_rolv_epi32 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_prolvd512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_rolv_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                                           (__v16si)_mm512_rolv_epi32(__A, __B),
-                                           (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_rolv_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                                           (__v16si)_mm512_rolv_epi32(__A, __B),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_rolv_epi64 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_prolvq512((__v8di)__A, (__v8di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_rolv_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                                            (__v8di)_mm512_rolv_epi64(__A, __B),
-                                            (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_rolv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                                            (__v8di)_mm512_rolv_epi64(__A, __B),
-                                            (__v8di)_mm512_setzero_si512());
-}
-
-#define _mm512_ror_epi32(A, B) \
-  (__m512i)__builtin_ia32_prord512((__v16si)(__m512i)(A), (int)(B))
-
-#define _mm512_mask_ror_epi32(W, U, A, B) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                      (__v16si)_mm512_ror_epi32((A), (B)), \
-                                      (__v16si)(__m512i)(W))
-
-#define _mm512_maskz_ror_epi32(U, A, B) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                      (__v16si)_mm512_ror_epi32((A), (B)), \
-                                      (__v16si)_mm512_setzero_si512())
-
-#define _mm512_ror_epi64(A, B) \
-  (__m512i)__builtin_ia32_prorq512((__v8di)(__m512i)(A), (int)(B))
-
-#define _mm512_mask_ror_epi64(W, U, A, B) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                      (__v8di)_mm512_ror_epi64((A), (B)), \
-                                      (__v8di)(__m512i)(W))
-
-#define _mm512_maskz_ror_epi64(U, A, B) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                      (__v8di)_mm512_ror_epi64((A), (B)), \
-                                      (__v8di)_mm512_setzero_si512())
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi32(__m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_pslldi512((__v16si)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_slli_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_slli_epi32(__A, __B),
-                                         (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_slli_epi32(__mmask16 __U, __m512i __A, int __B) {
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_slli_epi32(__A, __B),
-                                         (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi64(__m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_psllqi512((__v8di)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_slli_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_slli_epi64(__A, __B),
-                                          (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_slli_epi64(__A, __B),
-                                          (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi32(__m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_psrldi512((__v16si)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srli_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_srli_epi32(__A, __B),
-                                         (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srli_epi32(__mmask16 __U, __m512i __A, int __B) {
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_srli_epi32(__A, __B),
-                                         (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi64(__m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_psrlqi512((__v8di)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srli_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_srli_epi64(__A, __B),
-                                          (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srli_epi64(__mmask8 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_srli_epi64(__A, __B),
-                                          (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_load_epi32 (__m512i __W, __mmask16 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_movdqa32load512_mask ((const __v16si *) __P,
-              (__v16si) __W,
-              (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_load_epi32 (__mmask16 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_movdqa32load512_mask ((const __v16si *) __P,
-              (__v16si)
-              _mm512_setzero_si512 (),
-              (__mmask16) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_store_epi32 (void *__P, __mmask16 __U, __m512i __A)
-{
-  __builtin_ia32_movdqa32store512_mask ((__v16si *) __P, (__v16si) __A,
-          (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mov_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
-                 (__v16si) __A,
-                 (__v16si) __W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mov_epi32 (__mmask16 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
-                 (__v16si) __A,
-                 (__v16si) _mm512_setzero_si512 ());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mov_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
-                 (__v8di) __A,
-                 (__v8di) __W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mov_epi64 (__mmask8 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
-                 (__v8di) __A,
-                 (__v8di) _mm512_setzero_si512 ());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_load_epi64 (__m512i __W, __mmask8 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_movdqa64load512_mask ((const __v8di *) __P,
-              (__v8di) __W,
-              (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_load_epi64 (__mmask8 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_movdqa64load512_mask ((const __v8di *) __P,
-              (__v8di)
-              _mm512_setzero_si512 (),
-              (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_store_epi64 (void *__P, __mmask8 __U, __m512i __A)
-{
-  __builtin_ia32_movdqa64store512_mask ((__v8di *) __P, (__v8di) __A,
-          (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_movedup_pd (__m512d __A)
-{
-  return (__m512d)__builtin_shufflevector((__v8df)__A, (__v8df)__A,
-                                          0, 0, 2, 2, 4, 4, 6, 6);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_movedup_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_movedup_pd(__A),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_movedup_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_movedup_pd(__A),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-#define _mm512_fixupimm_round_pd(A, B, C, imm, R) \
-  (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             (__v8di)(__m512i)(C), (int)(imm), \
-                                             (__mmask8)-1, (int)(R))
-
-#define _mm512_mask_fixupimm_round_pd(A, U, B, C, imm, R) \
-  (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             (__v8di)(__m512i)(C), (int)(imm), \
-                                             (__mmask8)(U), (int)(R))
-
-#define _mm512_fixupimm_pd(A, B, C, imm) \
-  (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             (__v8di)(__m512i)(C), (int)(imm), \
-                                             (__mmask8)-1, \
-                                             _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_fixupimm_pd(A, U, B, C, imm) \
-  (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             (__v8di)(__m512i)(C), (int)(imm), \
-                                             (__mmask8)(U), \
-                                             _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_maskz_fixupimm_round_pd(U, A, B, C, imm, R) \
-  (__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              (__v8di)(__m512i)(C), \
-                                              (int)(imm), (__mmask8)(U), \
-                                              (int)(R))
-
-#define _mm512_maskz_fixupimm_pd(U, A, B, C, imm) \
-  (__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              (__v8di)(__m512i)(C), \
-                                              (int)(imm), (__mmask8)(U), \
-                                              _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_fixupimm_round_ps(A, B, C, imm, R) \
-  (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            (__v16si)(__m512i)(C), (int)(imm), \
-                                            (__mmask16)-1, (int)(R))
-
-#define _mm512_mask_fixupimm_round_ps(A, U, B, C, imm, R) \
-  (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            (__v16si)(__m512i)(C), (int)(imm), \
-                                            (__mmask16)(U), (int)(R))
-
-#define _mm512_fixupimm_ps(A, B, C, imm) \
-  (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            (__v16si)(__m512i)(C), (int)(imm), \
-                                            (__mmask16)-1, \
-                                            _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_fixupimm_ps(A, U, B, C, imm) \
-  (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            (__v16si)(__m512i)(C), (int)(imm), \
-                                            (__mmask16)(U), \
-                                            _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_maskz_fixupimm_round_ps(U, A, B, C, imm, R) \
-  (__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             (__v16si)(__m512i)(C), \
-                                             (int)(imm), (__mmask16)(U), \
-                                             (int)(R))
-
-#define _mm512_maskz_fixupimm_ps(U, A, B, C, imm) \
-  (__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             (__v16si)(__m512i)(C), \
-                                             (int)(imm), (__mmask16)(U), \
-                                             _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_fixupimm_round_sd(A, B, C, imm, R) \
-  (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), \
-                                          (__v2di)(__m128i)(C), (int)(imm), \
-                                          (__mmask8)-1, (int)(R))
-
-#define _mm_mask_fixupimm_round_sd(A, U, B, C, imm, R) \
-  (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), \
-                                          (__v2di)(__m128i)(C), (int)(imm), \
-                                          (__mmask8)(U), (int)(R))
-
-#define _mm_fixupimm_sd(A, B, C, imm) \
-  (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), \
-                                          (__v2di)(__m128i)(C), (int)(imm), \
-                                          (__mmask8)-1, \
-                                          _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_fixupimm_sd(A, U, B, C, imm) \
-  (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), \
-                                          (__v2di)(__m128i)(C), (int)(imm), \
-                                          (__mmask8)(U), \
-                                          _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_fixupimm_round_sd(U, A, B, C, imm, R) \
-  (__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2di)(__m128i)(C), (int)(imm), \
-                                           (__mmask8)(U), (int)(R))
-
-#define _mm_maskz_fixupimm_sd(U, A, B, C, imm) \
-  (__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2di)(__m128i)(C), (int)(imm), \
-                                           (__mmask8)(U), \
-                                           _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_fixupimm_round_ss(A, B, C, imm, R) \
-  (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), \
-                                         (__v4si)(__m128i)(C), (int)(imm), \
-                                         (__mmask8)-1, (int)(R))
-
-#define _mm_mask_fixupimm_round_ss(A, U, B, C, imm, R) \
-  (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), \
-                                         (__v4si)(__m128i)(C), (int)(imm), \
-                                         (__mmask8)(U), (int)(R))
-
-#define _mm_fixupimm_ss(A, B, C, imm) \
-  (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), \
-                                         (__v4si)(__m128i)(C), (int)(imm), \
-                                         (__mmask8)-1, \
-                                         _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_fixupimm_ss(A, U, B, C, imm) \
-  (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), \
-                                         (__v4si)(__m128i)(C), (int)(imm), \
-                                         (__mmask8)(U), \
-                                         _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_fixupimm_round_ss(U, A, B, C, imm, R) \
-  (__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4si)(__m128i)(C), (int)(imm), \
-                                          (__mmask8)(U), (int)(R))
-
-#define _mm_maskz_fixupimm_ss(U, A, B, C, imm) \
-  (__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4si)(__m128i)(C), (int)(imm), \
-                                          (__mmask8)(U), \
-                                          _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_getexp_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
-                                                 (__v2df)(__m128d)(B), \
-                                                 (__v2df)_mm_setzero_pd(), \
-                                                 (__mmask8)-1, (int)(R))
-
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_getexp_sd (__m128d __A, __m128d __B)
-{
-  return (__m128d) __builtin_ia32_getexpsd128_round_mask ((__v2df) __A,
-                 (__v2df) __B, (__v2df) _mm_setzero_pd(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_getexp_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_getexpsd128_round_mask ( (__v2df) __A,
-          (__v2df) __B,
-          (__v2df) __W,
-          (__mmask8) __U,
-          _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_getexp_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
-                                                 (__v2df)(__m128d)(B), \
-                                                 (__v2df)(__m128d)(W), \
-                                                 (__mmask8)(U), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_getexp_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_getexpsd128_round_mask ( (__v2df) __A,
-          (__v2df) __B,
-          (__v2df) _mm_setzero_pd (),
-          (__mmask8) __U,
-          _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_getexp_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
-                                                 (__v2df)(__m128d)(B), \
-                                                 (__v2df)_mm_setzero_pd(), \
-                                                 (__mmask8)(U), (int)(R))
-
-#define _mm_getexp_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
-                                                (__v4sf)(__m128)(B), \
-                                                (__v4sf)_mm_setzero_ps(), \
-                                                (__mmask8)-1, (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_getexp_ss (__m128 __A, __m128 __B)
-{
-  return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
-                (__v4sf) __B, (__v4sf)  _mm_setzero_ps(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_getexp_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
-          (__v4sf) __B,
-          (__v4sf) __W,
-          (__mmask8) __U,
-          _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_getexp_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
-                                                (__v4sf)(__m128)(B), \
-                                                (__v4sf)(__m128)(W), \
-                                                (__mmask8)(U), (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_getexp_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
-          (__v4sf) __B,
-          (__v4sf) _mm_setzero_ps (),
-          (__mmask8) __U,
-          _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_getexp_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
-                                                (__v4sf)(__m128)(B), \
-                                                (__v4sf)_mm_setzero_ps(), \
-                                                (__mmask8)(U), (int)(R))
-
-#define _mm_getmant_round_sd(A, B, C, D, R) \
-  (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v2df)_mm_setzero_pd(), \
-                                               (__mmask8)-1, (int)(R))
-
-#define _mm_getmant_sd(A, B, C, D)  \
-  (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v2df)_mm_setzero_pd(), \
-                                               (__mmask8)-1, \
-                                               _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_getmant_sd(W, U, A, B, C, D) \
-  (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v2df)(__m128d)(W), \
-                                               (__mmask8)(U), \
-                                               _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_getmant_round_sd(W, U, A, B, C, D, R) \
-  (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v2df)(__m128d)(W), \
-                                               (__mmask8)(U), (int)(R))
-
-#define _mm_maskz_getmant_sd(U, A, B, C, D) \
-  (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v2df)_mm_setzero_pd(), \
-                                               (__mmask8)(U), \
-                                               _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_getmant_round_sd(U, A, B, C, D, R) \
-  (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v2df)_mm_setzero_pd(), \
-                                               (__mmask8)(U), (int)(R))
-
-#define _mm_getmant_round_ss(A, B, C, D, R) \
-  (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (int)(((D)<<2) | (C)), \
-                                              (__v4sf)_mm_setzero_ps(), \
-                                              (__mmask8)-1, (int)(R))
-
-#define _mm_getmant_ss(A, B, C, D) \
-  (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (int)(((D)<<2) | (C)), \
-                                              (__v4sf)_mm_setzero_ps(), \
-                                              (__mmask8)-1, \
-                                              _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_getmant_ss(W, U, A, B, C, D) \
-  (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (int)(((D)<<2) | (C)), \
-                                              (__v4sf)(__m128)(W), \
-                                              (__mmask8)(U), \
-                                              _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_getmant_round_ss(W, U, A, B, C, D, R) \
-  (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (int)(((D)<<2) | (C)), \
-                                              (__v4sf)(__m128)(W), \
-                                              (__mmask8)(U), (int)(R))
-
-#define _mm_maskz_getmant_ss(U, A, B, C, D) \
-  (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (int)(((D)<<2) | (C)), \
-                                              (__v4sf)_mm_setzero_ps(), \
-                                              (__mmask8)(U), \
-                                              _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_getmant_round_ss(U, A, B, C, D, R) \
-  (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (int)(((D)<<2) | (C)), \
-                                              (__v4sf)_mm_setzero_ps(), \
-                                              (__mmask8)(U), (int)(R))
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kmov (__mmask16 __A)
-{
-  return  __A;
-}
-
-#define _mm_comi_round_sd(A, B, P, R) \
-  (int)__builtin_ia32_vcomisd((__v2df)(__m128d)(A), (__v2df)(__m128d)(B), \
-                              (int)(P), (int)(R))
-
-#define _mm_comi_round_ss(A, B, P, R) \
-  (int)__builtin_ia32_vcomiss((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), \
-                              (int)(P), (int)(R))
-
-#ifdef __x86_64__
-#define _mm_cvt_roundsd_si64(A, R) \
-  (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R))
-#endif
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sll_epi32(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_pslld512((__v16si) __A, (__v4si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sll_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                          (__v16si)_mm512_sll_epi32(__A, __B),
-                                          (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sll_epi32(__mmask16 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                          (__v16si)_mm512_sll_epi32(__A, __B),
-                                          (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sll_epi64(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psllq512((__v8di)__A, (__v2di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sll_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_sll_epi64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sll_epi64(__mmask8 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                           (__v8di)_mm512_sll_epi64(__A, __B),
-                                           (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sllv_epi32(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_psllv16si((__v16si)__X, (__v16si)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sllv_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_sllv_epi32(__X, __Y),
-                                           (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sllv_epi32(__mmask16 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_sllv_epi32(__X, __Y),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sllv_epi64(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_psllv8di((__v8di)__X, (__v8di)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sllv_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                            (__v8di)_mm512_sllv_epi64(__X, __Y),
-                                            (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sllv_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                            (__v8di)_mm512_sllv_epi64(__X, __Y),
-                                            (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sra_epi32(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psrad512((__v16si) __A, (__v4si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sra_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                          (__v16si)_mm512_sra_epi32(__A, __B),
-                                          (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sra_epi32(__mmask16 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                          (__v16si)_mm512_sra_epi32(__A, __B),
-                                          (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sra_epi64(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psraq512((__v8di)__A, (__v2di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sra_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                           (__v8di)_mm512_sra_epi64(__A, __B),
-                                           (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sra_epi64(__mmask8 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                           (__v8di)_mm512_sra_epi64(__A, __B),
-                                           (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srav_epi32(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_psrav16si((__v16si)__X, (__v16si)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srav_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_srav_epi32(__X, __Y),
-                                           (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srav_epi32(__mmask16 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_srav_epi32(__X, __Y),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srav_epi64(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_psrav8di((__v8di)__X, (__v8di)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srav_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                            (__v8di)_mm512_srav_epi64(__X, __Y),
-                                            (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srav_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                            (__v8di)_mm512_srav_epi64(__X, __Y),
-                                            (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srl_epi32(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psrld512((__v16si) __A, (__v4si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srl_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                          (__v16si)_mm512_srl_epi32(__A, __B),
-                                          (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srl_epi32(__mmask16 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                          (__v16si)_mm512_srl_epi32(__A, __B),
-                                          (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srl_epi64(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psrlq512((__v8di)__A, (__v2di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srl_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                           (__v8di)_mm512_srl_epi64(__A, __B),
-                                           (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srl_epi64(__mmask8 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                           (__v8di)_mm512_srl_epi64(__A, __B),
-                                           (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srlv_epi32(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_psrlv16si((__v16si)__X, (__v16si)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srlv_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_srlv_epi32(__X, __Y),
-                                           (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srlv_epi32(__mmask16 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_srlv_epi32(__X, __Y),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srlv_epi64 (__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_psrlv8di((__v8di)__X, (__v8di)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srlv_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                            (__v8di)_mm512_srlv_epi64(__X, __Y),
-                                            (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srlv_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                            (__v8di)_mm512_srlv_epi64(__X, __Y),
-                                            (__v8di)_mm512_setzero_si512());
-}
-
-#define _mm512_ternarylogic_epi32(A, B, C, imm) \
-  (__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
-                                            (__v16si)(__m512i)(B), \
-                                            (__v16si)(__m512i)(C), (int)(imm), \
-                                            (__mmask16)-1)
-
-#define _mm512_mask_ternarylogic_epi32(A, U, B, C, imm) \
-  (__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
-                                            (__v16si)(__m512i)(B), \
-                                            (__v16si)(__m512i)(C), (int)(imm), \
-                                            (__mmask16)(U))
-
-#define _mm512_maskz_ternarylogic_epi32(U, A, B, C, imm) \
-  (__m512i)__builtin_ia32_pternlogd512_maskz((__v16si)(__m512i)(A), \
-                                             (__v16si)(__m512i)(B), \
-                                             (__v16si)(__m512i)(C), \
-                                             (int)(imm), (__mmask16)(U))
-
-#define _mm512_ternarylogic_epi64(A, B, C, imm) \
-  (__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
-                                            (__v8di)(__m512i)(B), \
-                                            (__v8di)(__m512i)(C), (int)(imm), \
-                                            (__mmask8)-1)
-
-#define _mm512_mask_ternarylogic_epi64(A, U, B, C, imm) \
-  (__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
-                                            (__v8di)(__m512i)(B), \
-                                            (__v8di)(__m512i)(C), (int)(imm), \
-                                            (__mmask8)(U))
-
-#define _mm512_maskz_ternarylogic_epi64(U, A, B, C, imm) \
-  (__m512i)__builtin_ia32_pternlogq512_maskz((__v8di)(__m512i)(A), \
-                                             (__v8di)(__m512i)(B), \
-                                             (__v8di)(__m512i)(C), (int)(imm), \
-                                             (__mmask8)(U))
-
-#ifdef __x86_64__
-#define _mm_cvt_roundsd_i64(A, R) \
-  (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R))
-#endif
-
-#define _mm_cvt_roundsd_si32(A, R) \
-  (int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R))
-
-#define _mm_cvt_roundsd_i32(A, R) \
-  (int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R))
-
-#define _mm_cvt_roundsd_u32(A, R) \
-  (unsigned int)__builtin_ia32_vcvtsd2usi32((__v2df)(__m128d)(A), (int)(R))
-
-static __inline__ unsigned __DEFAULT_FN_ATTRS128
-_mm_cvtsd_u32 (__m128d __A)
-{
-  return (unsigned) __builtin_ia32_vcvtsd2usi32 ((__v2df) __A,
-             _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvt_roundsd_u64(A, R) \
-  (unsigned long long)__builtin_ia32_vcvtsd2usi64((__v2df)(__m128d)(A), \
-                                                  (int)(R))
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
-_mm_cvtsd_u64 (__m128d __A)
-{
-  return (unsigned long long) __builtin_ia32_vcvtsd2usi64 ((__v2df)
-                 __A,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm_cvt_roundss_si32(A, R) \
-  (int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R))
-
-#define _mm_cvt_roundss_i32(A, R) \
-  (int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R))
-
-#ifdef __x86_64__
-#define _mm_cvt_roundss_si64(A, R) \
-  (long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R))
-
-#define _mm_cvt_roundss_i64(A, R) \
-  (long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R))
-#endif
-
-#define _mm_cvt_roundss_u32(A, R) \
-  (unsigned int)__builtin_ia32_vcvtss2usi32((__v4sf)(__m128)(A), (int)(R))
-
-static __inline__ unsigned __DEFAULT_FN_ATTRS128
-_mm_cvtss_u32 (__m128 __A)
-{
-  return (unsigned) __builtin_ia32_vcvtss2usi32 ((__v4sf) __A,
-             _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvt_roundss_u64(A, R) \
-  (unsigned long long)__builtin_ia32_vcvtss2usi64((__v4sf)(__m128)(A), \
-                                                  (int)(R))
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
-_mm_cvtss_u64 (__m128 __A)
-{
-  return (unsigned long long) __builtin_ia32_vcvtss2usi64 ((__v4sf)
-                 __A,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm_cvtt_roundsd_i32(A, R) \
-  (int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R))
-
-#define _mm_cvtt_roundsd_si32(A, R) \
-  (int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R))
-
-static __inline__ int __DEFAULT_FN_ATTRS128
-_mm_cvttsd_i32 (__m128d __A)
-{
-  return (int) __builtin_ia32_vcvttsd2si32 ((__v2df) __A,
-              _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvtt_roundsd_si64(A, R) \
-  (long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R))
-
-#define _mm_cvtt_roundsd_i64(A, R) \
-  (long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R))
-
-static __inline__ long long __DEFAULT_FN_ATTRS128
-_mm_cvttsd_i64 (__m128d __A)
-{
-  return (long long) __builtin_ia32_vcvttsd2si64 ((__v2df) __A,
-              _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm_cvtt_roundsd_u32(A, R) \
-  (unsigned int)__builtin_ia32_vcvttsd2usi32((__v2df)(__m128d)(A), (int)(R))
-
-static __inline__ unsigned __DEFAULT_FN_ATTRS128
-_mm_cvttsd_u32 (__m128d __A)
-{
-  return (unsigned) __builtin_ia32_vcvttsd2usi32 ((__v2df) __A,
-              _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvtt_roundsd_u64(A, R) \
-  (unsigned long long)__builtin_ia32_vcvttsd2usi64((__v2df)(__m128d)(A), \
-                                                   (int)(R))
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
-_mm_cvttsd_u64 (__m128d __A)
-{
-  return (unsigned long long) __builtin_ia32_vcvttsd2usi64 ((__v2df)
-                  __A,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm_cvtt_roundss_i32(A, R) \
-  (int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R))
-
-#define _mm_cvtt_roundss_si32(A, R) \
-  (int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R))
-
-static __inline__ int __DEFAULT_FN_ATTRS128
-_mm_cvttss_i32 (__m128 __A)
-{
-  return (int) __builtin_ia32_vcvttss2si32 ((__v4sf) __A,
-              _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvtt_roundss_i64(A, R) \
-  (long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R))
-
-#define _mm_cvtt_roundss_si64(A, R) \
-  (long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R))
-
-static __inline__ long long __DEFAULT_FN_ATTRS128
-_mm_cvttss_i64 (__m128 __A)
-{
-  return (long long) __builtin_ia32_vcvttss2si64 ((__v4sf) __A,
-              _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm_cvtt_roundss_u32(A, R) \
-  (unsigned int)__builtin_ia32_vcvttss2usi32((__v4sf)(__m128)(A), (int)(R))
-
-static __inline__ unsigned __DEFAULT_FN_ATTRS128
-_mm_cvttss_u32 (__m128 __A)
-{
-  return (unsigned) __builtin_ia32_vcvttss2usi32 ((__v4sf) __A,
-              _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvtt_roundss_u64(A, R) \
-  (unsigned long long)__builtin_ia32_vcvttss2usi64((__v4sf)(__m128)(A), \
-                                                   (int)(R))
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
-_mm_cvttss_u64 (__m128 __A)
-{
-  return (unsigned long long) __builtin_ia32_vcvttss2usi64 ((__v4sf)
-                  __A,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm512_permute_pd(X, C) \
-  (__m512d)__builtin_ia32_vpermilpd512((__v8df)(__m512d)(X), (int)(C))
-
-#define _mm512_mask_permute_pd(W, U, X, C) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_permute_pd((X), (C)), \
-                                       (__v8df)(__m512d)(W))
-
-#define _mm512_maskz_permute_pd(U, X, C) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_permute_pd((X), (C)), \
-                                       (__v8df)_mm512_setzero_pd())
-
-#define _mm512_permute_ps(X, C) \
-  (__m512)__builtin_ia32_vpermilps512((__v16sf)(__m512)(X), (int)(C))
-
-#define _mm512_mask_permute_ps(W, U, X, C) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_permute_ps((X), (C)), \
-                                      (__v16sf)(__m512)(W))
-
-#define _mm512_maskz_permute_ps(U, X, C) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_permute_ps((X), (C)), \
-                                      (__v16sf)_mm512_setzero_ps())
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_permutevar_pd(__m512d __A, __m512i __C)
-{
-  return (__m512d)__builtin_ia32_vpermilvarpd512((__v8df)__A, (__v8di)__C);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_permutevar_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512i __C)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                         (__v8df)_mm512_permutevar_pd(__A, __C),
-                                         (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutevar_pd(__mmask8 __U, __m512d __A, __m512i __C)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                         (__v8df)_mm512_permutevar_pd(__A, __C),
-                                         (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_permutevar_ps(__m512 __A, __m512i __C)
-{
-  return (__m512)__builtin_ia32_vpermilvarps512((__v16sf)__A, (__v16si)__C);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_permutevar_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512i __C)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                        (__v16sf)_mm512_permutevar_ps(__A, __C),
-                                        (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutevar_ps(__mmask16 __U, __m512 __A, __m512i __C)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                        (__v16sf)_mm512_permutevar_ps(__A, __C),
-                                        (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_permutex2var_pd(__m512d __A, __m512i __I, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_vpermi2varpd512((__v8df)__A, (__v8di)__I,
-                                                 (__v8df)__B);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_permutex2var_pd(__m512d __A, __mmask8 __U, __m512i __I, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                  (__v8df)_mm512_permutex2var_pd(__A, __I, __B),
-                                  (__v8df)__A);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask2_permutex2var_pd(__m512d __A, __m512i __I, __mmask8 __U,
-                             __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                  (__v8df)_mm512_permutex2var_pd(__A, __I, __B),
-                                  (__v8df)(__m512d)__I);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutex2var_pd(__mmask8 __U, __m512d __A, __m512i __I,
-                             __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                  (__v8df)_mm512_permutex2var_pd(__A, __I, __B),
-                                  (__v8df)_mm512_setzero_pd());
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_permutex2var_ps(__m512 __A, __m512i __I, __m512 __B)
-{
-  return (__m512)__builtin_ia32_vpermi2varps512((__v16sf)__A, (__v16si)__I,
-                                                (__v16sf) __B);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_permutex2var_ps(__m512 __A, __mmask16 __U, __m512i __I, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                 (__v16sf)_mm512_permutex2var_ps(__A, __I, __B),
-                                 (__v16sf)__A);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask2_permutex2var_ps(__m512 __A, __m512i __I, __mmask16 __U, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                 (__v16sf)_mm512_permutex2var_ps(__A, __I, __B),
-                                 (__v16sf)(__m512)__I);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutex2var_ps(__mmask16 __U, __m512 __A, __m512i __I, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                 (__v16sf)_mm512_permutex2var_ps(__A, __I, __B),
-                                 (__v16sf)_mm512_setzero_ps());
-}
-
-
-#define _mm512_cvtt_roundpd_epu32(A, R) \
-  (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
-                                             (__v8si)_mm256_undefined_si256(), \
-                                             (__mmask8)-1, (int)(R))
-
-#define _mm512_mask_cvtt_roundpd_epu32(W, U, A, R) \
-  (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
-                                             (__v8si)(__m256i)(W), \
-                                             (__mmask8)(U), (int)(R))
-
-#define _mm512_maskz_cvtt_roundpd_epu32(U, A, R) \
-  (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
-                                             (__v8si)_mm256_setzero_si256(), \
-                                             (__mmask8)(U), (int)(R))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvttpd_epu32 (__m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A,
-                  (__v8si)
-                  _mm256_undefined_si256 (),
-                  (__mmask8) -1,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvttpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A,
-                  (__v8si) __W,
-                  (__mmask8) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvttpd_epu32 (__mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A,
-                  (__v8si)
-                  _mm256_setzero_si256 (),
-                  (__mmask8) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_roundscale_round_sd(A, B, imm, R) \
-  (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (__v2df)_mm_setzero_pd(), \
-                                                (__mmask8)-1, (int)(imm), \
-                                                (int)(R))
-
-#define _mm_roundscale_sd(A, B, imm) \
-  (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (__v2df)_mm_setzero_pd(), \
-                                                (__mmask8)-1, (int)(imm), \
-                                                _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_roundscale_sd(W, U, A, B, imm) \
-  (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (__v2df)(__m128d)(W), \
-                                                (__mmask8)(U), (int)(imm), \
-                                                _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_roundscale_round_sd(W, U, A, B, I, R) \
-  (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (__v2df)(__m128d)(W), \
-                                                (__mmask8)(U), (int)(I), \
-                                                (int)(R))
-
-#define _mm_maskz_roundscale_sd(U, A, B, I) \
-  (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (__v2df)_mm_setzero_pd(), \
-                                                (__mmask8)(U), (int)(I), \
-                                                _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_roundscale_round_sd(U, A, B, I, R) \
-  (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (__v2df)_mm_setzero_pd(), \
-                                                (__mmask8)(U), (int)(I), \
-                                                (int)(R))
-
-#define _mm_roundscale_round_ss(A, B, imm, R) \
-  (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v4sf)_mm_setzero_ps(), \
-                                               (__mmask8)-1, (int)(imm), \
-                                               (int)(R))
-
-#define _mm_roundscale_ss(A, B, imm) \
-  (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v4sf)_mm_setzero_ps(), \
-                                               (__mmask8)-1, (int)(imm), \
-                                               _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_roundscale_ss(W, U, A, B, I) \
-  (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v4sf)(__m128)(W), \
-                                               (__mmask8)(U), (int)(I), \
-                                               _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_roundscale_round_ss(W, U, A, B, I, R) \
-  (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v4sf)(__m128)(W), \
-                                               (__mmask8)(U), (int)(I), \
-                                               (int)(R))
-
-#define _mm_maskz_roundscale_ss(U, A, B, I) \
-  (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v4sf)_mm_setzero_ps(), \
-                                               (__mmask8)(U), (int)(I), \
-                                               _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_roundscale_round_ss(U, A, B, I, R) \
-  (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v4sf)_mm_setzero_ps(), \
-                                               (__mmask8)(U), (int)(I), \
-                                               (int)(R))
-
-#define _mm512_scalef_round_pd(A, B, R) \
-  (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           (__v8df)_mm512_undefined_pd(), \
-                                           (__mmask8)-1, (int)(R))
-
-#define _mm512_mask_scalef_round_pd(W, U, A, B, R) \
-  (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           (__v8df)(__m512d)(W), \
-                                           (__mmask8)(U), (int)(R))
-
-#define _mm512_maskz_scalef_round_pd(U, A, B, R) \
-  (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           (__v8df)_mm512_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_scalef_pd (__m512d __A, __m512d __B)
-{
-  return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A,
-                (__v8df) __B,
-                (__v8df)
-                _mm512_undefined_pd (),
-                (__mmask8) -1,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_scalef_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A,
-                (__v8df) __B,
-                (__v8df) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_scalef_pd (__mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A,
-                (__v8df) __B,
-                (__v8df)
-                _mm512_setzero_pd (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_scalef_round_ps(A, B, R) \
-  (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), \
-                                          (__v16sf)_mm512_undefined_ps(), \
-                                          (__mmask16)-1, (int)(R))
-
-#define _mm512_mask_scalef_round_ps(W, U, A, B, R) \
-  (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), \
-                                          (__v16sf)(__m512)(W), \
-                                          (__mmask16)(U), (int)(R))
-
-#define _mm512_maskz_scalef_round_ps(U, A, B, R) \
-  (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), \
-                                          (__v16sf)_mm512_setzero_ps(), \
-                                          (__mmask16)(U), (int)(R))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_scalef_ps (__m512 __A, __m512 __B)
-{
-  return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A,
-               (__v16sf) __B,
-               (__v16sf)
-               _mm512_undefined_ps (),
-               (__mmask16) -1,
-               _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_scalef_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A,
-               (__v16sf) __B,
-               (__v16sf) __W,
-               (__mmask16) __U,
-               _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_scalef_ps (__mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A,
-               (__v16sf) __B,
-               (__v16sf)
-               _mm512_setzero_ps (),
-               (__mmask16) __U,
-               _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_scalef_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
-                                              (__v2df)(__m128d)(B), \
-                                              (__v2df)_mm_setzero_pd(), \
-                                              (__mmask8)-1, (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_scalef_sd (__m128d __A, __m128d __B)
-{
-  return (__m128d) __builtin_ia32_scalefsd_round_mask ((__v2df) __A,
-              (__v2df)( __B), (__v2df) _mm_setzero_pd(),
-              (__mmask8) -1,
-              _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_scalef_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_scalefsd_round_mask ( (__v2df) __A,
-                 (__v2df) __B,
-                (__v2df) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_scalef_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
-                                              (__v2df)(__m128d)(B), \
-                                              (__v2df)(__m128d)(W), \
-                                              (__mmask8)(U), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_scalef_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_scalefsd_round_mask ( (__v2df) __A,
-                 (__v2df) __B,
-                (__v2df) _mm_setzero_pd (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_scalef_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
-                                              (__v2df)(__m128d)(B), \
-                                              (__v2df)_mm_setzero_pd(), \
-                                              (__mmask8)(U), (int)(R))
-
-#define _mm_scalef_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
-                                             (__v4sf)(__m128)(B), \
-                                             (__v4sf)_mm_setzero_ps(), \
-                                             (__mmask8)-1, (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_scalef_ss (__m128 __A, __m128 __B)
-{
-  return (__m128) __builtin_ia32_scalefss_round_mask ((__v4sf) __A,
-             (__v4sf)( __B), (__v4sf) _mm_setzero_ps(),
-             (__mmask8) -1,
-             _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_scalef_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_scalefss_round_mask ( (__v4sf) __A,
-                (__v4sf) __B,
-                (__v4sf) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_scalef_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
-                                             (__v4sf)(__m128)(B), \
-                                             (__v4sf)(__m128)(W), \
-                                             (__mmask8)(U), (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_scalef_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_scalefss_round_mask ( (__v4sf) __A,
-                 (__v4sf) __B,
-                (__v4sf) _mm_setzero_ps (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_scalef_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
-                                             (__v4sf)(__m128)(B), \
-                                             (__v4sf)_mm_setzero_ps(), \
-                                             (__mmask8)(U), \
-                                             (int)(R))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi32(__m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_psradi512((__v16si)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srai_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_srai_epi32(__A, __B),
-                                         (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi32(__mmask16 __U, __m512i __A, int __B) {
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_srai_epi32(__A, __B),
-                                         (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi64(__m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_psraqi512((__v8di)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srai_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_srai_epi64(__A, __B),
-                                          (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi64(__mmask8 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_srai_epi64(__A, __B),
-                                          (__v8di)_mm512_setzero_si512());
-}
-
-#define _mm512_shuffle_f32x4(A, B, imm) \
-  (__m512)__builtin_ia32_shuf_f32x4((__v16sf)(__m512)(A), \
-                                    (__v16sf)(__m512)(B), (int)(imm))
-
-#define _mm512_mask_shuffle_f32x4(W, U, A, B, imm) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \
-                                      (__v16sf)(__m512)(W))
-
-#define _mm512_maskz_shuffle_f32x4(U, A, B, imm) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \
-                                      (__v16sf)_mm512_setzero_ps())
-
-#define _mm512_shuffle_f64x2(A, B, imm) \
-  (__m512d)__builtin_ia32_shuf_f64x2((__v8df)(__m512d)(A), \
-                                     (__v8df)(__m512d)(B), (int)(imm))
-
-#define _mm512_mask_shuffle_f64x2(W, U, A, B, imm) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \
-                                       (__v8df)(__m512d)(W))
-
-#define _mm512_maskz_shuffle_f64x2(U, A, B, imm) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \
-                                       (__v8df)_mm512_setzero_pd())
-
-#define _mm512_shuffle_i32x4(A, B, imm) \
-  (__m512i)__builtin_ia32_shuf_i32x4((__v16si)(__m512i)(A), \
-                                     (__v16si)(__m512i)(B), (int)(imm))
-
-#define _mm512_mask_shuffle_i32x4(W, U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                      (__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \
-                                      (__v16si)(__m512i)(W))
-
-#define _mm512_maskz_shuffle_i32x4(U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                      (__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \
-                                      (__v16si)_mm512_setzero_si512())
-
-#define _mm512_shuffle_i64x2(A, B, imm) \
-  (__m512i)__builtin_ia32_shuf_i64x2((__v8di)(__m512i)(A), \
-                                     (__v8di)(__m512i)(B), (int)(imm))
-
-#define _mm512_mask_shuffle_i64x2(W, U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                      (__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \
-                                      (__v8di)(__m512i)(W))
-
-#define _mm512_maskz_shuffle_i64x2(U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                      (__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \
-                                      (__v8di)_mm512_setzero_si512())
-
-#define _mm512_shuffle_pd(A, B, M) \
-  (__m512d)__builtin_ia32_shufpd512((__v8df)(__m512d)(A), \
-                                    (__v8df)(__m512d)(B), (int)(M))
-
-#define _mm512_mask_shuffle_pd(W, U, A, B, M) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
-                                       (__v8df)(__m512d)(W))
-
-#define _mm512_maskz_shuffle_pd(U, A, B, M) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
-                                       (__v8df)_mm512_setzero_pd())
-
-#define _mm512_shuffle_ps(A, B, M) \
-  (__m512)__builtin_ia32_shufps512((__v16sf)(__m512)(A), \
-                                   (__v16sf)(__m512)(B), (int)(M))
-
-#define _mm512_mask_shuffle_ps(W, U, A, B, M) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
-                                      (__v16sf)(__m512)(W))
-
-#define _mm512_maskz_shuffle_ps(U, A, B, M) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
-                                      (__v16sf)_mm512_setzero_ps())
-
-#define _mm_sqrt_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)-1, (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_sqrt_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __A,
-                 (__v2df) __B,
-                (__v2df) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_sqrt_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)(__m128d)(W), \
-                                            (__mmask8)(U), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_sqrt_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __A,
-                 (__v2df) __B,
-                (__v2df) _mm_setzero_pd (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_sqrt_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)(U), (int)(R))
-
-#define _mm_sqrt_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)-1, (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_sqrt_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_sqrtss_round_mask ( (__v4sf) __A,
-                 (__v4sf) __B,
-                (__v4sf) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_sqrt_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                           (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_sqrt_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_sqrtss_round_mask ( (__v4sf) __A,
-                 (__v4sf) __B,
-                (__v4sf) _mm_setzero_ps (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_sqrt_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)(U), (int)(R))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_broadcast_f32x4(__m128 __A)
-{
-  return (__m512)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A,
-                                         0, 1, 2, 3, 0, 1, 2, 3,
-                                         0, 1, 2, 3, 0, 1, 2, 3);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcast_f32x4(__m512 __O, __mmask16 __M, __m128 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__M,
-                                           (__v16sf)_mm512_broadcast_f32x4(__A),
-                                           (__v16sf)__O);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcast_f32x4(__mmask16 __M, __m128 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__M,
-                                           (__v16sf)_mm512_broadcast_f32x4(__A),
-                                           (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_broadcast_f64x4(__m256d __A)
-{
-  return (__m512d)__builtin_shufflevector((__v4df)__A, (__v4df)__A,
-                                          0, 1, 2, 3, 0, 1, 2, 3);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcast_f64x4(__m512d __O, __mmask8 __M, __m256d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__M,
-                                            (__v8df)_mm512_broadcast_f64x4(__A),
-                                            (__v8df)__O);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcast_f64x4(__mmask8 __M, __m256d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__M,
-                                            (__v8df)_mm512_broadcast_f64x4(__A),
-                                            (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcast_i32x4(__m128i __A)
-{
-  return (__m512i)__builtin_shufflevector((__v4si)__A, (__v4si)__A,
-                                          0, 1, 2, 3, 0, 1, 2, 3,
-                                          0, 1, 2, 3, 0, 1, 2, 3);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcast_i32x4(__m512i __O, __mmask16 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                           (__v16si)_mm512_broadcast_i32x4(__A),
-                                           (__v16si)__O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcast_i32x4(__mmask16 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                           (__v16si)_mm512_broadcast_i32x4(__A),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcast_i64x4(__m256i __A)
-{
-  return (__m512i)__builtin_shufflevector((__v4di)__A, (__v4di)__A,
-                                          0, 1, 2, 3, 0, 1, 2, 3);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcast_i64x4(__m512i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                            (__v8di)_mm512_broadcast_i64x4(__A),
-                                            (__v8di)__O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcast_i64x4(__mmask8 __M, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                            (__v8di)_mm512_broadcast_i64x4(__A),
-                                            (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcastsd_pd (__m512d __O, __mmask8 __M, __m128d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__M,
-                                              (__v8df) _mm512_broadcastsd_pd(__A),
-                                              (__v8df) __O);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcastsd_pd (__mmask8 __M, __m128d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__M,
-                                              (__v8df) _mm512_broadcastsd_pd(__A),
-                                              (__v8df) _mm512_setzero_pd());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcastss_ps (__m512 __O, __mmask16 __M, __m128 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512(__M,
-                                             (__v16sf) _mm512_broadcastss_ps(__A),
-                                             (__v16sf) __O);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcastss_ps (__mmask16 __M, __m128 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512(__M,
-                                             (__v16sf) _mm512_broadcastss_ps(__A),
-                                             (__v16sf) _mm512_setzero_ps());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtsepi32_epi8 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A,
-               (__v16qi) _mm_undefined_si128 (),
-               (__mmask16) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A,
-               (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtsepi32_epi8 (__mmask16 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A,
-               (__v16qi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A)
-{
-  __builtin_ia32_pmovsdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtsepi32_epi16 (__m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A,
-               (__v16hi) _mm256_undefined_si256 (),
-               (__mmask16) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A,
-               (__v16hi) __O, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtsepi32_epi16 (__mmask16 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A,
-               (__v16hi) _mm256_setzero_si256 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi32_storeu_epi16 (void *__P, __mmask16 __M, __m512i __A)
-{
-  __builtin_ia32_pmovsdw512mem_mask ((__v16hi*) __P, (__v16si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtsepi64_epi8 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A,
-               (__v16qi) _mm_undefined_si128 (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A,
-               (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtsepi64_epi8 (__mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A,
-               (__v16qi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovsqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtsepi64_epi32 (__m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A,
-               (__v8si) _mm256_undefined_si256 (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A,
-               (__v8si) __O, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtsepi64_epi32 (__mmask8 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A,
-               (__v8si) _mm256_setzero_si256 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi64_storeu_epi32 (void *__P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovsqd512mem_mask ((__v8si *) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtsepi64_epi16 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A,
-               (__v8hi) _mm_undefined_si128 (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A,
-               (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtsepi64_epi16 (__mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A,
-               (__v8hi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovsqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtusepi32_epi8 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A,
-                (__v16qi) _mm_undefined_si128 (),
-                (__mmask16) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A,
-                (__v16qi) __O,
-                __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtusepi32_epi8 (__mmask16 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A,
-                (__v16qi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A)
-{
-  __builtin_ia32_pmovusdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtusepi32_epi16 (__m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A,
-                (__v16hi) _mm256_undefined_si256 (),
-                (__mmask16) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A,
-                (__v16hi) __O,
-                __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtusepi32_epi16 (__mmask16 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A,
-                (__v16hi) _mm256_setzero_si256 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi32_storeu_epi16 (void *__P, __mmask16 __M, __m512i __A)
-{
-  __builtin_ia32_pmovusdw512mem_mask ((__v16hi*) __P, (__v16si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtusepi64_epi8 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A,
-                (__v16qi) _mm_undefined_si128 (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A,
-                (__v16qi) __O,
-                __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtusepi64_epi8 (__mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A,
-                (__v16qi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovusqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtusepi64_epi32 (__m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A,
-                (__v8si) _mm256_undefined_si256 (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A,
-                (__v8si) __O, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtusepi64_epi32 (__mmask8 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A,
-                (__v8si) _mm256_setzero_si256 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi64_storeu_epi32 (void* __P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovusqd512mem_mask ((__v8si*) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtusepi64_epi16 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A,
-                (__v8hi) _mm_undefined_si128 (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A,
-                (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtusepi64_epi16 (__mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A,
-                (__v8hi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovusqw512mem_mask ((__v8hi*) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32_epi8 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
-              (__v16qi) _mm_undefined_si128 (),
-              (__mmask16) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
-              (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi32_epi8 (__mmask16 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
-              (__v16qi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A)
-{
-  __builtin_ia32_pmovdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32_epi16 (__m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
-              (__v16hi) _mm256_undefined_si256 (),
-              (__mmask16) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
-              (__v16hi) __O, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi32_epi16 (__mmask16 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
-              (__v16hi) _mm256_setzero_si256 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_storeu_epi16 (void * __P, __mmask16 __M, __m512i __A)
-{
-  __builtin_ia32_pmovdw512mem_mask ((__v16hi *) __P, (__v16si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi64_epi8 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A,
-              (__v16qi) _mm_undefined_si128 (),
-              (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A,
-              (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi64_epi8 (__mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A,
-              (__v16qi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi64_epi32 (__m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
-              (__v8si) _mm256_undefined_si256 (),
-              (__mmask8) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
-              (__v8si) __O, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi64_epi32 (__mmask8 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
-              (__v8si) _mm256_setzero_si256 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi64_storeu_epi32 (void* __P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovqd512mem_mask ((__v8si *) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi64_epi16 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
-              (__v8hi) _mm_undefined_si128 (),
-              (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
-              (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi64_epi16 (__mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
-              (__v8hi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M);
-}
-
-#define _mm512_extracti32x4_epi32(A, imm) \
-  (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
-                                            (__v4si)_mm_undefined_si128(), \
-                                            (__mmask8)-1)
-
-#define _mm512_mask_extracti32x4_epi32(W, U, A, imm) \
-  (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
-                                            (__v4si)(__m128i)(W), \
-                                            (__mmask8)(U))
-
-#define _mm512_maskz_extracti32x4_epi32(U, A, imm) \
-  (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
-                                            (__v4si)_mm_setzero_si128(), \
-                                            (__mmask8)(U))
-
-#define _mm512_extracti64x4_epi64(A, imm) \
-  (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
-                                            (__v4di)_mm256_undefined_si256(), \
-                                            (__mmask8)-1)
-
-#define _mm512_mask_extracti64x4_epi64(W, U, A, imm) \
-  (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
-                                            (__v4di)(__m256i)(W), \
-                                            (__mmask8)(U))
-
-#define _mm512_maskz_extracti64x4_epi64(U, A, imm) \
-  (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
-                                            (__v4di)_mm256_setzero_si256(), \
-                                            (__mmask8)(U))
-
-#define _mm512_insertf64x4(A, B, imm) \
-  (__m512d)__builtin_ia32_insertf64x4((__v8df)(__m512d)(A), \
-                                      (__v4df)(__m256d)(B), (int)(imm))
-
-#define _mm512_mask_insertf64x4(W, U, A, B, imm) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                  (__v8df)_mm512_insertf64x4((A), (B), (imm)), \
-                                  (__v8df)(__m512d)(W))
-
-#define _mm512_maskz_insertf64x4(U, A, B, imm) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                  (__v8df)_mm512_insertf64x4((A), (B), (imm)), \
-                                  (__v8df)_mm512_setzero_pd())
-
-#define _mm512_inserti64x4(A, B, imm) \
-  (__m512i)__builtin_ia32_inserti64x4((__v8di)(__m512i)(A), \
-                                      (__v4di)(__m256i)(B), (int)(imm))
-
-#define _mm512_mask_inserti64x4(W, U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                  (__v8di)_mm512_inserti64x4((A), (B), (imm)), \
-                                  (__v8di)(__m512i)(W))
-
-#define _mm512_maskz_inserti64x4(U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                  (__v8di)_mm512_inserti64x4((A), (B), (imm)), \
-                                  (__v8di)_mm512_setzero_si512())
-
-#define _mm512_insertf32x4(A, B, imm) \
-  (__m512)__builtin_ia32_insertf32x4((__v16sf)(__m512)(A), \
-                                     (__v4sf)(__m128)(B), (int)(imm))
-
-#define _mm512_mask_insertf32x4(W, U, A, B, imm) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                 (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \
-                                 (__v16sf)(__m512)(W))
-
-#define _mm512_maskz_insertf32x4(U, A, B, imm) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                 (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \
-                                 (__v16sf)_mm512_setzero_ps())
-
-#define _mm512_inserti32x4(A, B, imm) \
-  (__m512i)__builtin_ia32_inserti32x4((__v16si)(__m512i)(A), \
-                                      (__v4si)(__m128i)(B), (int)(imm))
-
-#define _mm512_mask_inserti32x4(W, U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                 (__v16si)_mm512_inserti32x4((A), (B), (imm)), \
-                                 (__v16si)(__m512i)(W))
-
-#define _mm512_maskz_inserti32x4(U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                 (__v16si)_mm512_inserti32x4((A), (B), (imm)), \
-                                 (__v16si)_mm512_setzero_si512())
-
-#define _mm512_getmant_round_pd(A, B, C, R) \
-  (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v8df)_mm512_undefined_pd(), \
-                                            (__mmask8)-1, (int)(R))
-
-#define _mm512_mask_getmant_round_pd(W, U, A, B, C, R) \
-  (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v8df)(__m512d)(W), \
-                                            (__mmask8)(U), (int)(R))
-
-#define _mm512_maskz_getmant_round_pd(U, A, B, C, R) \
-  (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v8df)_mm512_setzero_pd(), \
-                                            (__mmask8)(U), (int)(R))
-
-#define _mm512_getmant_pd(A, B, C) \
-  (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v8df)_mm512_setzero_pd(), \
-                                            (__mmask8)-1, \
-                                            _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_getmant_pd(W, U, A, B, C) \
-  (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v8df)(__m512d)(W), \
-                                            (__mmask8)(U), \
-                                            _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_maskz_getmant_pd(U, A, B, C) \
-  (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v8df)_mm512_setzero_pd(), \
-                                            (__mmask8)(U), \
-                                            _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_getmant_round_ps(A, B, C, R) \
-  (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                           (int)(((C)<<2) | (B)), \
-                                           (__v16sf)_mm512_undefined_ps(), \
-                                           (__mmask16)-1, (int)(R))
-
-#define _mm512_mask_getmant_round_ps(W, U, A, B, C, R) \
-  (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                           (int)(((C)<<2) | (B)), \
-                                           (__v16sf)(__m512)(W), \
-                                           (__mmask16)(U), (int)(R))
-
-#define _mm512_maskz_getmant_round_ps(U, A, B, C, R) \
-  (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                           (int)(((C)<<2) | (B)), \
-                                           (__v16sf)_mm512_setzero_ps(), \
-                                           (__mmask16)(U), (int)(R))
-
-#define _mm512_getmant_ps(A, B, C) \
-  (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                           (int)(((C)<<2)|(B)), \
-                                           (__v16sf)_mm512_undefined_ps(), \
-                                           (__mmask16)-1, \
-                                           _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_getmant_ps(W, U, A, B, C) \
-  (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                           (int)(((C)<<2)|(B)), \
-                                           (__v16sf)(__m512)(W), \
-                                           (__mmask16)(U), \
-                                           _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_maskz_getmant_ps(U, A, B, C) \
-  (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                           (int)(((C)<<2)|(B)), \
-                                           (__v16sf)_mm512_setzero_ps(), \
-                                           (__mmask16)(U), \
-                                           _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_getexp_round_pd(A, R) \
-  (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)_mm512_undefined_pd(), \
-                                           (__mmask8)-1, (int)(R))
-
-#define _mm512_mask_getexp_round_pd(W, U, A, R) \
-  (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(W), \
-                                           (__mmask8)(U), (int)(R))
-
-#define _mm512_maskz_getexp_round_pd(U, A, R) \
-  (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)_mm512_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_getexp_pd (__m512d __A)
-{
-  return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A,
-                (__v8df) _mm512_undefined_pd (),
-                (__mmask8) -1,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_getexp_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A,
-                (__v8df) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_getexp_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A,
-                (__v8df) _mm512_setzero_pd (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_getexp_round_ps(A, R) \
-  (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)_mm512_undefined_ps(), \
-                                          (__mmask16)-1, (int)(R))
-
-#define _mm512_mask_getexp_round_ps(W, U, A, R) \
-  (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(W), \
-                                          (__mmask16)(U), (int)(R))
-
-#define _mm512_maskz_getexp_round_ps(U, A, R) \
-  (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)_mm512_setzero_ps(), \
-                                          (__mmask16)(U), (int)(R))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_getexp_ps (__m512 __A)
-{
-  return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A,
-               (__v16sf) _mm512_undefined_ps (),
-               (__mmask16) -1,
-               _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_getexp_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A,
-               (__v16sf) __W,
-               (__mmask16) __U,
-               _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_getexp_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A,
-               (__v16sf) _mm512_setzero_ps (),
-               (__mmask16) __U,
-               _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_i64gather_ps(index, addr, scale) \
-  (__m256)__builtin_ia32_gatherdiv16sf((__v8sf)_mm256_undefined_ps(), \
-                                       (void const *)(addr), \
-                                       (__v8di)(__m512i)(index), (__mmask8)-1, \
-                                       (int)(scale))
-
-#define _mm512_mask_i64gather_ps(v1_old, mask, index, addr, scale) \
-  (__m256)__builtin_ia32_gatherdiv16sf((__v8sf)(__m256)(v1_old),\
-                                       (void const *)(addr), \
-                                       (__v8di)(__m512i)(index), \
-                                       (__mmask8)(mask), (int)(scale))
-
-#define _mm512_i64gather_epi32(index, addr, scale) \
-  (__m256i)__builtin_ia32_gatherdiv16si((__v8si)_mm256_undefined_si256(), \
-                                        (void const *)(addr), \
-                                        (__v8di)(__m512i)(index), \
-                                        (__mmask8)-1, (int)(scale))
-
-#define _mm512_mask_i64gather_epi32(v1_old, mask, index, addr, scale) \
-  (__m256i)__builtin_ia32_gatherdiv16si((__v8si)(__m256i)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v8di)(__m512i)(index), \
-                                        (__mmask8)(mask), (int)(scale))
-
-#define _mm512_i64gather_pd(index, addr, scale) \
-  (__m512d)__builtin_ia32_gatherdiv8df((__v8df)_mm512_undefined_pd(), \
-                                       (void const *)(addr), \
-                                       (__v8di)(__m512i)(index), (__mmask8)-1, \
-                                       (int)(scale))
-
-#define _mm512_mask_i64gather_pd(v1_old, mask, index, addr, scale) \
-  (__m512d)__builtin_ia32_gatherdiv8df((__v8df)(__m512d)(v1_old), \
-                                       (void const *)(addr), \
-                                       (__v8di)(__m512i)(index), \
-                                       (__mmask8)(mask), (int)(scale))
-
-#define _mm512_i64gather_epi64(index, addr, scale) \
-  (__m512i)__builtin_ia32_gatherdiv8di((__v8di)_mm512_undefined_epi32(), \
-                                       (void const *)(addr), \
-                                       (__v8di)(__m512i)(index), (__mmask8)-1, \
-                                       (int)(scale))
-
-#define _mm512_mask_i64gather_epi64(v1_old, mask, index, addr, scale) \
-  (__m512i)__builtin_ia32_gatherdiv8di((__v8di)(__m512i)(v1_old), \
-                                       (void const *)(addr), \
-                                       (__v8di)(__m512i)(index), \
-                                       (__mmask8)(mask), (int)(scale))
-
-#define _mm512_i32gather_ps(index, addr, scale) \
-  (__m512)__builtin_ia32_gathersiv16sf((__v16sf)_mm512_undefined_ps(), \
-                                       (void const *)(addr), \
-                                       (__v16si)(__m512)(index), \
-                                       (__mmask16)-1, (int)(scale))
-
-#define _mm512_mask_i32gather_ps(v1_old, mask, index, addr, scale) \
-  (__m512)__builtin_ia32_gathersiv16sf((__v16sf)(__m512)(v1_old), \
-                                       (void const *)(addr), \
-                                       (__v16si)(__m512)(index), \
-                                       (__mmask16)(mask), (int)(scale))
-
-#define _mm512_i32gather_epi32(index, addr, scale) \
-  (__m512i)__builtin_ia32_gathersiv16si((__v16si)_mm512_undefined_epi32(), \
-                                        (void const *)(addr), \
-                                        (__v16si)(__m512i)(index), \
-                                        (__mmask16)-1, (int)(scale))
-
-#define _mm512_mask_i32gather_epi32(v1_old, mask, index, addr, scale) \
-  (__m512i)__builtin_ia32_gathersiv16si((__v16si)(__m512i)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v16si)(__m512i)(index), \
-                                        (__mmask16)(mask), (int)(scale))
-
-#define _mm512_i32gather_pd(index, addr, scale) \
-  (__m512d)__builtin_ia32_gathersiv8df((__v8df)_mm512_undefined_pd(), \
-                                       (void const *)(addr), \
-                                       (__v8si)(__m256i)(index), (__mmask8)-1, \
-                                       (int)(scale))
-
-#define _mm512_mask_i32gather_pd(v1_old, mask, index, addr, scale) \
-  (__m512d)__builtin_ia32_gathersiv8df((__v8df)(__m512d)(v1_old), \
-                                       (void const *)(addr), \
-                                       (__v8si)(__m256i)(index), \
-                                       (__mmask8)(mask), (int)(scale))
-
-#define _mm512_i32gather_epi64(index, addr, scale) \
-  (__m512i)__builtin_ia32_gathersiv8di((__v8di)_mm512_undefined_epi32(), \
-                                       (void const *)(addr), \
-                                       (__v8si)(__m256i)(index), (__mmask8)-1, \
-                                       (int)(scale))
-
-#define _mm512_mask_i32gather_epi64(v1_old, mask, index, addr, scale) \
-  (__m512i)__builtin_ia32_gathersiv8di((__v8di)(__m512i)(v1_old), \
-                                       (void const *)(addr), \
-                                       (__v8si)(__m256i)(index), \
-                                       (__mmask8)(mask), (int)(scale))
-
-#define _mm512_i64scatter_ps(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv16sf((void *)(addr), (__mmask8)-1, \
-                                (__v8di)(__m512i)(index), \
-                                (__v8sf)(__m256)(v1), (int)(scale))
-
-#define _mm512_mask_i64scatter_ps(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv16sf((void *)(addr), (__mmask8)(mask), \
-                                (__v8di)(__m512i)(index), \
-                                (__v8sf)(__m256)(v1), (int)(scale))
-
-#define _mm512_i64scatter_epi32(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv16si((void *)(addr), (__mmask8)-1, \
-                                (__v8di)(__m512i)(index), \
-                                (__v8si)(__m256i)(v1), (int)(scale))
-
-#define _mm512_mask_i64scatter_epi32(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv16si((void *)(addr), (__mmask8)(mask), \
-                                (__v8di)(__m512i)(index), \
-                                (__v8si)(__m256i)(v1), (int)(scale))
-
-#define _mm512_i64scatter_pd(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv8df((void *)(addr), (__mmask8)-1, \
-                               (__v8di)(__m512i)(index), \
-                               (__v8df)(__m512d)(v1), (int)(scale))
-
-#define _mm512_mask_i64scatter_pd(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv8df((void *)(addr), (__mmask8)(mask), \
-                               (__v8di)(__m512i)(index), \
-                               (__v8df)(__m512d)(v1), (int)(scale))
-
-#define _mm512_i64scatter_epi64(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv8di((void *)(addr), (__mmask8)-1, \
-                               (__v8di)(__m512i)(index), \
-                               (__v8di)(__m512i)(v1), (int)(scale))
-
-#define _mm512_mask_i64scatter_epi64(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv8di((void *)(addr), (__mmask8)(mask), \
-                               (__v8di)(__m512i)(index), \
-                               (__v8di)(__m512i)(v1), (int)(scale))
-
-#define _mm512_i32scatter_ps(addr, index, v1, scale) \
-  __builtin_ia32_scattersiv16sf((void *)(addr), (__mmask16)-1, \
-                                (__v16si)(__m512i)(index), \
-                                (__v16sf)(__m512)(v1), (int)(scale))
-
-#define _mm512_mask_i32scatter_ps(addr, mask, index, v1, scale) \
-  __builtin_ia32_scattersiv16sf((void *)(addr), (__mmask16)(mask), \
-                                (__v16si)(__m512i)(index), \
-                                (__v16sf)(__m512)(v1), (int)(scale))
-
-#define _mm512_i32scatter_epi32(addr, index, v1, scale) \
-  __builtin_ia32_scattersiv16si((void *)(addr), (__mmask16)-1, \
-                                (__v16si)(__m512i)(index), \
-                                (__v16si)(__m512i)(v1), (int)(scale))
-
-#define _mm512_mask_i32scatter_epi32(addr, mask, index, v1, scale) \
-  __builtin_ia32_scattersiv16si((void *)(addr), (__mmask16)(mask), \
-                                (__v16si)(__m512i)(index), \
-                                (__v16si)(__m512i)(v1), (int)(scale))
-
-#define _mm512_i32scatter_pd(addr, index, v1, scale) \
-  __builtin_ia32_scattersiv8df((void *)(addr), (__mmask8)-1, \
-                               (__v8si)(__m256i)(index), \
-                               (__v8df)(__m512d)(v1), (int)(scale))
-
-#define _mm512_mask_i32scatter_pd(addr, mask, index, v1, scale) \
-  __builtin_ia32_scattersiv8df((void *)(addr), (__mmask8)(mask), \
-                               (__v8si)(__m256i)(index), \
-                               (__v8df)(__m512d)(v1), (int)(scale))
-
-#define _mm512_i32scatter_epi64(addr, index, v1, scale) \
-  __builtin_ia32_scattersiv8di((void *)(addr), (__mmask8)-1, \
-                               (__v8si)(__m256i)(index), \
-                               (__v8di)(__m512i)(v1), (int)(scale))
-
-#define _mm512_mask_i32scatter_epi64(addr, mask, index, v1, scale) \
-  __builtin_ia32_scattersiv8di((void *)(addr), (__mmask8)(mask), \
-                               (__v8si)(__m256i)(index), \
-                               (__v8di)(__m512i)(v1), (int)(scale))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
-  return __builtin_ia32_vfmaddss3_mask((__v4sf)__W,
-                                       (__v4sf)__A,
-                                       (__v4sf)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fmadd_round_ss(A, B, C, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
-                                        (__v4sf)(__m128)(B), \
-                                        (__v4sf)(__m128)(C), (__mmask8)-1, \
-                                        (int)(R))
-
-#define _mm_mask_fmadd_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
-                                        (__v4sf)(__m128)(A), \
-                                        (__v4sf)(__m128)(B), (__mmask8)(U), \
-                                        (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A,
-                                        (__v4sf)__B,
-                                        (__v4sf)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fmadd_round_ss(U, A, B, C, R) \
-  (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), \
-                                         (__v4sf)(__m128)(C), (__mmask8)(U), \
-                                         (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmaddss3_mask3((__v4sf)__W,
-                                        (__v4sf)__X,
-                                        (__v4sf)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fmadd_round_ss(W, X, Y, U, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
-                                         (__v4sf)(__m128)(X), \
-                                         (__v4sf)(__m128)(Y), (__mmask8)(U), \
-                                         (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
-  return __builtin_ia32_vfmaddss3_mask((__v4sf)__W,
-                                       (__v4sf)__A,
-                                       -(__v4sf)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fmsub_round_ss(A, B, C, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
-                                        (__v4sf)(__m128)(B), \
-                                        -(__v4sf)(__m128)(C), (__mmask8)-1, \
-                                        (int)(R))
-
-#define _mm_mask_fmsub_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
-                                        (__v4sf)(__m128)(A), \
-                                        -(__v4sf)(__m128)(B), (__mmask8)(U), \
-                                        (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A,
-                                        (__v4sf)__B,
-                                        -(__v4sf)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fmsub_round_ss(U, A, B, C, R) \
-  (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), \
-                                         -(__v4sf)(__m128)(C), (__mmask8)(U), \
-                                         (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmsubss3_mask3((__v4sf)__W,
-                                        (__v4sf)__X,
-                                        (__v4sf)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fmsub_round_ss(W, X, Y, U, R) \
-  (__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \
-                                         (__v4sf)(__m128)(X), \
-                                         (__v4sf)(__m128)(Y), (__mmask8)(U), \
-                                         (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fnmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
-  return __builtin_ia32_vfmaddss3_mask((__v4sf)__W,
-                                       -(__v4sf)__A,
-                                       (__v4sf)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fnmadd_round_ss(A, B, C, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
-                                        -(__v4sf)(__m128)(B), \
-                                        (__v4sf)(__m128)(C), (__mmask8)-1, \
-                                        (int)(R))
-
-#define _mm_mask_fnmadd_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
-                                        -(__v4sf)(__m128)(A), \
-                                        (__v4sf)(__m128)(B), (__mmask8)(U), \
-                                        (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A,
-                                        -(__v4sf)__B,
-                                        (__v4sf)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fnmadd_round_ss(U, A, B, C, R) \
-  (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
-                                         -(__v4sf)(__m128)(B), \
-                                         (__v4sf)(__m128)(C), (__mmask8)(U), \
-                                         (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmaddss3_mask3((__v4sf)__W,
-                                        -(__v4sf)__X,
-                                        (__v4sf)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fnmadd_round_ss(W, X, Y, U, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
-                                         -(__v4sf)(__m128)(X), \
-                                         (__v4sf)(__m128)(Y), (__mmask8)(U), \
-                                         (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fnmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
-  return __builtin_ia32_vfmaddss3_mask((__v4sf)__W,
-                                       -(__v4sf)__A,
-                                       -(__v4sf)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fnmsub_round_ss(A, B, C, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
-                                        -(__v4sf)(__m128)(B), \
-                                        -(__v4sf)(__m128)(C), (__mmask8)-1, \
-                                        (int)(R))
-
-#define _mm_mask_fnmsub_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
-                                        -(__v4sf)(__m128)(A), \
-                                        -(__v4sf)(__m128)(B), (__mmask8)(U), \
-                                        (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A,
-                                        -(__v4sf)__B,
-                                        -(__v4sf)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fnmsub_round_ss(U, A, B, C, R) \
-  (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
-                                         -(__v4sf)(__m128)(B), \
-                                         -(__v4sf)(__m128)(C), (__mmask8)(U), \
-                                         (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmsubss3_mask3((__v4sf)__W,
-                                        -(__v4sf)__X,
-                                        (__v4sf)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fnmsub_round_ss(W, X, Y, U, R) \
-  (__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \
-                                         -(__v4sf)(__m128)(X), \
-                                         (__v4sf)(__m128)(Y), (__mmask8)(U), \
-                                         (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
-  return __builtin_ia32_vfmaddsd3_mask((__v2df)__W,
-                                       (__v2df)__A,
-                                       (__v2df)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fmadd_round_sd(A, B, C, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
-                                         (__v2df)(__m128d)(B), \
-                                         (__v2df)(__m128d)(C), (__mmask8)-1, \
-                                         (int)(R))
-
-#define _mm_mask_fmadd_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
-                                         (__v2df)(__m128d)(A), \
-                                         (__v2df)(__m128d)(B), (__mmask8)(U), \
-                                         (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A,
-                                        (__v2df)__B,
-                                        (__v2df)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fmadd_round_sd(U, A, B, C, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), \
-                                          (__v2df)(__m128d)(C), (__mmask8)(U), \
-                                          (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmaddsd3_mask3((__v2df)__W,
-                                        (__v2df)__X,
-                                        (__v2df)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fmadd_round_sd(W, X, Y, U, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
-                                          (__v2df)(__m128d)(X), \
-                                          (__v2df)(__m128d)(Y), (__mmask8)(U), \
-                                          (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
-  return __builtin_ia32_vfmaddsd3_mask((__v2df)__W,
-                                       (__v2df)__A,
-                                       -(__v2df)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fmsub_round_sd(A, B, C, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
-                                         (__v2df)(__m128d)(B), \
-                                         -(__v2df)(__m128d)(C), (__mmask8)-1, \
-                                         (int)(R))
-
-#define _mm_mask_fmsub_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
-                                         (__v2df)(__m128d)(A), \
-                                         -(__v2df)(__m128d)(B), (__mmask8)(U), \
-                                         (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A,
-                                        (__v2df)__B,
-                                        -(__v2df)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fmsub_round_sd(U, A, B, C, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), \
-                                          -(__v2df)(__m128d)(C), \
-                                          (__mmask8)(U), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmsubsd3_mask3((__v2df)__W,
-                                        (__v2df)__X,
-                                        (__v2df)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fmsub_round_sd(W, X, Y, U, R) \
-  (__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \
-                                          (__v2df)(__m128d)(X), \
-                                          (__v2df)(__m128d)(Y), \
-                                          (__mmask8)(U), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fnmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
-  return __builtin_ia32_vfmaddsd3_mask((__v2df)__W,
-                                       -(__v2df)__A,
-                                       (__v2df)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fnmadd_round_sd(A, B, C, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
-                                         -(__v2df)(__m128d)(B), \
-                                         (__v2df)(__m128d)(C), (__mmask8)-1, \
-                                         (int)(R))
-
-#define _mm_mask_fnmadd_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
-                                         -(__v2df)(__m128d)(A), \
-                                         (__v2df)(__m128d)(B), (__mmask8)(U), \
-                                         (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A,
-                                        -(__v2df)__B,
-                                        (__v2df)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fnmadd_round_sd(U, A, B, C, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
-                                          -(__v2df)(__m128d)(B), \
-                                          (__v2df)(__m128d)(C), (__mmask8)(U), \
-                                          (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmaddsd3_mask3((__v2df)__W,
-                                        -(__v2df)__X,
-                                        (__v2df)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fnmadd_round_sd(W, X, Y, U, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
-                                          -(__v2df)(__m128d)(X), \
-                                          (__v2df)(__m128d)(Y), (__mmask8)(U), \
-                                          (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fnmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
-  return __builtin_ia32_vfmaddsd3_mask((__v2df)__W,
-                                       -(__v2df)__A,
-                                       -(__v2df)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fnmsub_round_sd(A, B, C, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
-                                         -(__v2df)(__m128d)(B), \
-                                         -(__v2df)(__m128d)(C), (__mmask8)-1, \
-                                         (int)(R))
-
-#define _mm_mask_fnmsub_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
-                                         -(__v2df)(__m128d)(A), \
-                                         -(__v2df)(__m128d)(B), (__mmask8)(U), \
-                                         (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A,
-                                        -(__v2df)__B,
-                                        -(__v2df)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fnmsub_round_sd(U, A, B, C, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
-                                          -(__v2df)(__m128d)(B), \
-                                          -(__v2df)(__m128d)(C), \
-                                          (__mmask8)(U), \
-                                          (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmsubsd3_mask3((__v2df)__W,
-                                        -(__v2df)__X,
-                                        (__v2df)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fnmsub_round_sd(W, X, Y, U, R) \
-  (__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \
-                                          -(__v2df)(__m128d)(X), \
-                                          (__v2df)(__m128d)(Y), \
-                                          (__mmask8)(U), (int)(R))
-
-#define _mm512_permutex_pd(X, C) \
-  (__m512d)__builtin_ia32_permdf512((__v8df)(__m512d)(X), (int)(C))
-
-#define _mm512_mask_permutex_pd(W, U, X, C) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_permutex_pd((X), (C)), \
-                                       (__v8df)(__m512d)(W))
-
-#define _mm512_maskz_permutex_pd(U, X, C) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_permutex_pd((X), (C)), \
-                                       (__v8df)_mm512_setzero_pd())
-
-#define _mm512_permutex_epi64(X, C) \
-  (__m512i)__builtin_ia32_permdi512((__v8di)(__m512i)(X), (int)(C))
-
-#define _mm512_mask_permutex_epi64(W, U, X, C) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                      (__v8di)_mm512_permutex_epi64((X), (C)), \
-                                      (__v8di)(__m512i)(W))
-
-#define _mm512_maskz_permutex_epi64(U, X, C) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                      (__v8di)_mm512_permutex_epi64((X), (C)), \
-                                      (__v8di)_mm512_setzero_si512())
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_permutexvar_pd (__m512i __X, __m512d __Y)
-{
-  return (__m512d)__builtin_ia32_permvardf512((__v8df) __Y, (__v8di) __X);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_permutexvar_pd (__m512d __W, __mmask8 __U, __m512i __X, __m512d __Y)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                        (__v8df)_mm512_permutexvar_pd(__X, __Y),
-                                        (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutexvar_pd (__mmask8 __U, __m512i __X, __m512d __Y)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                        (__v8df)_mm512_permutexvar_pd(__X, __Y),
-                                        (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_permutexvar_epi64 (__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_permvardi512((__v8di)__Y, (__v8di)__X);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutexvar_epi64 (__mmask8 __M, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                     (__v8di)_mm512_permutexvar_epi64(__X, __Y),
-                                     (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_permutexvar_epi64 (__m512i __W, __mmask8 __M, __m512i __X,
-             __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                     (__v8di)_mm512_permutexvar_epi64(__X, __Y),
-                                     (__v8di)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_permutexvar_ps (__m512i __X, __m512 __Y)
-{
-  return (__m512)__builtin_ia32_permvarsf512((__v16sf)__Y, (__v16si)__X);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_permutexvar_ps (__m512 __W, __mmask16 __U, __m512i __X, __m512 __Y)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                       (__v16sf)_mm512_permutexvar_ps(__X, __Y),
-                                       (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutexvar_ps (__mmask16 __U, __m512i __X, __m512 __Y)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                       (__v16sf)_mm512_permutexvar_ps(__X, __Y),
-                                       (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_permutexvar_epi32 (__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_permvarsi512((__v16si)__Y, (__v16si)__X);
-}
-
-#define _mm512_permutevar_epi32 _mm512_permutexvar_epi32
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutexvar_epi32 (__mmask16 __M, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                    (__v16si)_mm512_permutexvar_epi32(__X, __Y),
-                                    (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_permutexvar_epi32 (__m512i __W, __mmask16 __M, __m512i __X,
-             __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                    (__v16si)_mm512_permutexvar_epi32(__X, __Y),
-                                    (__v16si)__W);
-}
-
-#define _mm512_mask_permutevar_epi32 _mm512_mask_permutexvar_epi32
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kand (__mmask16 __A, __mmask16 __B)
-{
-  return (__mmask16) __builtin_ia32_kandhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kandn (__mmask16 __A, __mmask16 __B)
-{
-  return (__mmask16) __builtin_ia32_kandnhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kor (__mmask16 __A, __mmask16 __B)
-{
-  return (__mmask16) __builtin_ia32_korhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm512_kortestc (__mmask16 __A, __mmask16 __B)
-{
-  return __builtin_ia32_kortestchi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm512_kortestz (__mmask16 __A, __mmask16 __B)
-{
-  return __builtin_ia32_kortestzhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortestc_mask16_u8(__mmask16 __A, __mmask16 __B)
-{
-  return (unsigned char)__builtin_ia32_kortestchi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortestz_mask16_u8(__mmask16 __A, __mmask16 __B)
-{
-  return (unsigned char)__builtin_ia32_kortestzhi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortest_mask16_u8(__mmask16 __A, __mmask16 __B, unsigned char *__C) {
-  *__C = (unsigned char)__builtin_ia32_kortestchi(__A, __B);
-  return (unsigned char)__builtin_ia32_kortestzhi(__A, __B);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kunpackb (__mmask16 __A, __mmask16 __B)
-{
-  return (__mmask16) __builtin_ia32_kunpckhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kxnor (__mmask16 __A, __mmask16 __B)
-{
-  return (__mmask16) __builtin_ia32_kxnorhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kxor (__mmask16 __A, __mmask16 __B)
-{
-  return (__mmask16) __builtin_ia32_kxorhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-#define _kand_mask16 _mm512_kand
-#define _kandn_mask16 _mm512_kandn
-#define _knot_mask16 _mm512_knot
-#define _kor_mask16 _mm512_kor
-#define _kxnor_mask16 _mm512_kxnor
-#define _kxor_mask16 _mm512_kxor
-
-#define _kshiftli_mask16(A, I) \
-  (__mmask16)__builtin_ia32_kshiftlihi((__mmask16)(A), (unsigned int)(I))
-
-#define _kshiftri_mask16(A, I) \
-  (__mmask16)__builtin_ia32_kshiftrihi((__mmask16)(A), (unsigned int)(I))
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_cvtmask16_u32(__mmask16 __A) {
-  return (unsigned int)__builtin_ia32_kmovw((__mmask16)__A);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_cvtu32_mask16(unsigned int __A) {
-  return (__mmask16)__builtin_ia32_kmovw((__mmask16)__A);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_load_mask16(__mmask16 *__A) {
-  return (__mmask16)__builtin_ia32_kmovw(*(__mmask16 *)__A);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS
-_store_mask16(__mmask16 *__A, __mmask16 __B) {
-  *(__mmask16 *)__A = __builtin_ia32_kmovw((__mmask16)__B);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_stream_si512 (void * __P, __m512i __A)
-{
-  typedef __v8di __v8di_aligned __attribute__((aligned(64)));
-  __builtin_nontemporal_store((__v8di_aligned)__A, (__v8di_aligned*)__P);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_stream_load_si512 (void const *__P)
-{
-  typedef __v8di __v8di_aligned __attribute__((aligned(64)));
-  return (__m512i) __builtin_nontemporal_load((const __v8di_aligned *)__P);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_stream_pd (void *__P, __m512d __A)
-{
-  typedef __v8df __v8df_aligned __attribute__((aligned(64)));
-  __builtin_nontemporal_store((__v8df_aligned)__A, (__v8df_aligned*)__P);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_stream_ps (void *__P, __m512 __A)
-{
-  typedef __v16sf __v16sf_aligned __attribute__((aligned(64)));
-  __builtin_nontemporal_store((__v16sf_aligned)__A, (__v16sf_aligned*)__P);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_compress_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_compressdf512_mask ((__v8df) __A,
-                  (__v8df) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_compress_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_compressdf512_mask ((__v8df) __A,
-                  (__v8df)
-                  _mm512_setzero_pd (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_compress_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_compressdi512_mask ((__v8di) __A,
-                  (__v8di) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_compress_epi64 (__mmask8 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_compressdi512_mask ((__v8di) __A,
-                  (__v8di)
-                  _mm512_setzero_si512 (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_compress_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_compresssf512_mask ((__v16sf) __A,
-                 (__v16sf) __W,
-                 (__mmask16) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_compress_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_compresssf512_mask ((__v16sf) __A,
-                 (__v16sf)
-                 _mm512_setzero_ps (),
-                 (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_compress_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_compresssi512_mask ((__v16si) __A,
-                  (__v16si) __W,
-                  (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_compress_epi32 (__mmask16 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_compresssi512_mask ((__v16si) __A,
-                  (__v16si)
-                  _mm512_setzero_si512 (),
-                  (__mmask16) __U);
-}
-
-#define _mm_cmp_round_ss_mask(X, Y, P, R) \
-  (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
-                                      (__v4sf)(__m128)(Y), (int)(P), \
-                                      (__mmask8)-1, (int)(R))
-
-#define _mm_mask_cmp_round_ss_mask(M, X, Y, P, R) \
-  (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
-                                      (__v4sf)(__m128)(Y), (int)(P), \
-                                      (__mmask8)(M), (int)(R))
-
-#define _mm_cmp_ss_mask(X, Y, P) \
-  (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
-                                      (__v4sf)(__m128)(Y), (int)(P), \
-                                      (__mmask8)-1, \
-                                      _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_cmp_ss_mask(M, X, Y, P) \
-  (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
-                                      (__v4sf)(__m128)(Y), (int)(P), \
-                                      (__mmask8)(M), \
-                                      _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_cmp_round_sd_mask(X, Y, P, R) \
-  (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
-                                      (__v2df)(__m128d)(Y), (int)(P), \
-                                      (__mmask8)-1, (int)(R))
-
-#define _mm_mask_cmp_round_sd_mask(M, X, Y, P, R) \
-  (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
-                                      (__v2df)(__m128d)(Y), (int)(P), \
-                                      (__mmask8)(M), (int)(R))
-
-#define _mm_cmp_sd_mask(X, Y, P) \
-  (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
-                                      (__v2df)(__m128d)(Y), (int)(P), \
-                                      (__mmask8)-1, \
-                                      _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_cmp_sd_mask(M, X, Y, P) \
-  (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
-                                      (__v2df)(__m128d)(Y), (int)(P), \
-                                      (__mmask8)(M), \
-                                      _MM_FROUND_CUR_DIRECTION)
-
-/* Bit Test */
-
-static __inline __mmask16 __DEFAULT_FN_ATTRS512
-_mm512_test_epi32_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpneq_epi32_mask (_mm512_and_epi32(__A, __B),
-                                   _mm512_setzero_si512());
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
-_mm512_mask_test_epi32_mask (__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpneq_epi32_mask (__U, _mm512_and_epi32 (__A, __B),
-                                        _mm512_setzero_si512());
-}
-
-static __inline __mmask8 __DEFAULT_FN_ATTRS512
-_mm512_test_epi64_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpneq_epi64_mask (_mm512_and_epi32 (__A, __B),
-                                   _mm512_setzero_si512());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS512
-_mm512_mask_test_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpneq_epi64_mask (__U, _mm512_and_epi32 (__A, __B),
-                                        _mm512_setzero_si512());
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
-_mm512_testn_epi32_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpeq_epi32_mask (_mm512_and_epi32 (__A, __B),
-                                  _mm512_setzero_si512());
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
-_mm512_mask_testn_epi32_mask (__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpeq_epi32_mask (__U, _mm512_and_epi32 (__A, __B),
-                                       _mm512_setzero_si512());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS512
-_mm512_testn_epi64_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpeq_epi64_mask (_mm512_and_epi32 (__A, __B),
-                                  _mm512_setzero_si512());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS512
-_mm512_mask_testn_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpeq_epi64_mask (__U, _mm512_and_epi32 (__A, __B),
-                                       _mm512_setzero_si512());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_movehdup_ps (__m512 __A)
-{
-  return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A,
-                         1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_movehdup_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_movehdup_ps(__A),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_movehdup_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_movehdup_ps(__A),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_moveldup_ps (__m512 __A)
-{
-  return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A,
-                         0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_moveldup_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_moveldup_ps(__A),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_moveldup_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_moveldup_ps(__A),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_move_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
-  return __builtin_ia32_selectss_128(__U, _mm_move_ss(__A, __B), __W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_move_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
-  return __builtin_ia32_selectss_128(__U, _mm_move_ss(__A, __B),
-                                     _mm_setzero_ps());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_move_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
-  return __builtin_ia32_selectsd_128(__U, _mm_move_sd(__A, __B), __W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_move_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
-  return __builtin_ia32_selectsd_128(__U, _mm_move_sd(__A, __B),
-                                     _mm_setzero_pd());
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_store_ss (float * __W, __mmask8 __U, __m128 __A)
-{
-  __builtin_ia32_storess128_mask ((__v4sf *)__W, __A, __U & 1);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_store_sd (double * __W, __mmask8 __U, __m128d __A)
-{
-  __builtin_ia32_storesd128_mask ((__v2df *)__W, __A, __U & 1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_load_ss (__m128 __W, __mmask8 __U, const float* __A)
-{
-  __m128 src = (__v4sf) __builtin_shufflevector((__v4sf) __W,
-                                                (__v4sf)_mm_setzero_ps(),
-                                                0, 4, 4, 4);
-
-  return (__m128) __builtin_ia32_loadss128_mask ((const __v4sf *) __A, src, __U & 1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_load_ss (__mmask8 __U, const float* __A)
-{
-  return (__m128)__builtin_ia32_loadss128_mask ((const __v4sf *) __A,
-                                                (__v4sf) _mm_setzero_ps(),
-                                                __U & 1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_load_sd (__m128d __W, __mmask8 __U, const double* __A)
-{
-  __m128d src = (__v2df) __builtin_shufflevector((__v2df) __W,
-                                                 (__v2df)_mm_setzero_pd(),
-                                                 0, 2);
-
-  return (__m128d) __builtin_ia32_loadsd128_mask ((const __v2df *) __A, src, __U & 1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_load_sd (__mmask8 __U, const double* __A)
-{
-  return (__m128d) __builtin_ia32_loadsd128_mask ((const __v2df *) __A,
-                                                  (__v2df) _mm_setzero_pd(),
-                                                  __U & 1);
-}
-
-#define _mm512_shuffle_epi32(A, I) \
-  (__m512i)__builtin_ia32_pshufd512((__v16si)(__m512i)(A), (int)(I))
-
-#define _mm512_mask_shuffle_epi32(W, U, A, I) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                      (__v16si)_mm512_shuffle_epi32((A), (I)), \
-                                      (__v16si)(__m512i)(W))
-
-#define _mm512_maskz_shuffle_epi32(U, A, I) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                      (__v16si)_mm512_shuffle_epi32((A), (I)), \
-                                      (__v16si)_mm512_setzero_si512())
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_expand_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_expanddf512_mask ((__v8df) __A,
-                (__v8df) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_expand_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_expanddf512_mask ((__v8df) __A,
-                (__v8df) _mm512_setzero_pd (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_expand_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_expanddi512_mask ((__v8di) __A,
-                (__v8di) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_expand_epi64 ( __mmask8 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_expanddi512_mask ((__v8di) __A,
-                (__v8di) _mm512_setzero_si512 (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_expandloadu_pd(__m512d __W, __mmask8 __U, void const *__P)
-{
-  return (__m512d) __builtin_ia32_expandloaddf512_mask ((const __v8df *)__P,
-              (__v8df) __W,
-              (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_expandloadu_pd(__mmask8 __U, void const *__P)
-{
-  return (__m512d) __builtin_ia32_expandloaddf512_mask ((const __v8df *)__P,
-              (__v8df) _mm512_setzero_pd(),
-              (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_expandloadu_epi64(__m512i __W, __mmask8 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_expandloaddi512_mask ((const __v8di *)__P,
-              (__v8di) __W,
-              (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_expandloadu_epi64(__mmask8 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_expandloaddi512_mask ((const __v8di *)__P,
-              (__v8di) _mm512_setzero_si512(),
-              (__mmask8) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_expandloadu_ps(__m512 __W, __mmask16 __U, void const *__P)
-{
-  return (__m512) __builtin_ia32_expandloadsf512_mask ((const __v16sf *)__P,
-                   (__v16sf) __W,
-                   (__mmask16) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_expandloadu_ps(__mmask16 __U, void const *__P)
-{
-  return (__m512) __builtin_ia32_expandloadsf512_mask ((const __v16sf *)__P,
-                   (__v16sf) _mm512_setzero_ps(),
-                   (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_expandloadu_epi32(__m512i __W, __mmask16 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_expandloadsi512_mask ((const __v16si *)__P,
-              (__v16si) __W,
-              (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_expandloadu_epi32(__mmask16 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_expandloadsi512_mask ((const __v16si *)__P,
-              (__v16si) _mm512_setzero_si512(),
-              (__mmask16) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_expand_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_expandsf512_mask ((__v16sf) __A,
-               (__v16sf) __W,
-               (__mmask16) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_expand_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_expandsf512_mask ((__v16sf) __A,
-               (__v16sf) _mm512_setzero_ps(),
-               (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_expand_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_expandsi512_mask ((__v16si) __A,
-                (__v16si) __W,
-                (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_expand_epi32 (__mmask16 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_expandsi512_mask ((__v16si) __A,
-                (__v16si) _mm512_setzero_si512(),
-                (__mmask16) __U);
-}
-
-#define _mm512_cvt_roundps_pd(A, R) \
-  (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
-                                           (__v8df)_mm512_undefined_pd(), \
-                                           (__mmask8)-1, (int)(R))
-
-#define _mm512_mask_cvt_roundps_pd(W, U, A, R) \
-  (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
-                                           (__v8df)(__m512d)(W), \
-                                           (__mmask8)(U), (int)(R))
-
-#define _mm512_maskz_cvt_roundps_pd(U, A, R) \
-  (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
-                                           (__v8df)_mm512_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtps_pd (__m256 __A)
-{
-  return (__m512d) __builtin_convertvector((__v8sf)__A, __v8df);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtps_pd (__m512d __W, __mmask8 __U, __m256 __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_cvtps_pd(__A),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtps_pd (__mmask8 __U, __m256 __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_cvtps_pd(__A),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtpslo_pd (__m512 __A)
-{
-  return (__m512d) _mm512_cvtps_pd(_mm512_castps512_ps256(__A));
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtpslo_pd (__m512d __W, __mmask8 __U, __m512 __A)
-{
-  return (__m512d) _mm512_mask_cvtps_pd(__W, __U, _mm512_castps512_ps256(__A));
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_mov_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
-              (__v8df) __A,
-              (__v8df) __W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_mov_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
-              (__v8df) __A,
-              (__v8df) _mm512_setzero_pd ());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_mov_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
-             (__v16sf) __A,
-             (__v16sf) __W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_mov_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
-             (__v16sf) __A,
-             (__v16sf) _mm512_setzero_ps ());
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m512d __A)
-{
-  __builtin_ia32_compressstoredf512_mask ((__v8df *) __P, (__v8df) __A,
-            (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m512i __A)
-{
-  __builtin_ia32_compressstoredi512_mask ((__v8di *) __P, (__v8di) __A,
-            (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_compressstoreu_ps (void *__P, __mmask16 __U, __m512 __A)
-{
-  __builtin_ia32_compressstoresf512_mask ((__v16sf *) __P, (__v16sf) __A,
-            (__mmask16) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_compressstoreu_epi32 (void *__P, __mmask16 __U, __m512i __A)
-{
-  __builtin_ia32_compressstoresi512_mask ((__v16si *) __P, (__v16si) __A,
-            (__mmask16) __U);
-}
-
-#define _mm_cvt_roundsd_ss(A, B, R) \
-  (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
-                                             (__v2df)(__m128d)(B), \
-                                             (__v4sf)_mm_undefined_ps(), \
-                                             (__mmask8)-1, (int)(R))
-
-#define _mm_mask_cvt_roundsd_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
-                                             (__v2df)(__m128d)(B), \
-                                             (__v4sf)(__m128)(W), \
-                                             (__mmask8)(U), (int)(R))
-
-#define _mm_maskz_cvt_roundsd_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
-                                             (__v2df)(__m128d)(B), \
-                                             (__v4sf)_mm_setzero_ps(), \
-                                             (__mmask8)(U), (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128d __B)
-{
-  return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)__A,
-                                             (__v2df)__B,
-                                             (__v4sf)__W,
-                                             (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtsd_ss (__mmask8 __U, __m128 __A, __m128d __B)
-{
-  return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)__A,
-                                             (__v2df)__B,
-                                             (__v4sf)_mm_setzero_ps(),
-                                             (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_cvtss_i32 _mm_cvtss_si32
-#define _mm_cvtsd_i32 _mm_cvtsd_si32
-#define _mm_cvti32_sd _mm_cvtsi32_sd
-#define _mm_cvti32_ss _mm_cvtsi32_ss
-#ifdef __x86_64__
-#define _mm_cvtss_i64 _mm_cvtss_si64
-#define _mm_cvtsd_i64 _mm_cvtsd_si64
-#define _mm_cvti64_sd _mm_cvtsi64_sd
-#define _mm_cvti64_ss _mm_cvtsi64_ss
-#endif
-
-#ifdef __x86_64__
-#define _mm_cvt_roundi64_sd(A, B, R) \
-  (__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
-                                     (int)(R))
-
-#define _mm_cvt_roundsi64_sd(A, B, R) \
-  (__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
-                                     (int)(R))
-#endif
-
-#define _mm_cvt_roundsi32_ss(A, B, R) \
-  (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R))
-
-#define _mm_cvt_roundi32_ss(A, B, R) \
-  (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R))
-
-#ifdef __x86_64__
-#define _mm_cvt_roundsi64_ss(A, B, R) \
-  (__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
-                                    (int)(R))
-
-#define _mm_cvt_roundi64_ss(A, B, R) \
-  (__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
-                                    (int)(R))
-#endif
-
-#define _mm_cvt_roundss_sd(A, B, R) \
-  (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (__v2df)_mm_undefined_pd(), \
-                                              (__mmask8)-1, (int)(R))
-
-#define _mm_mask_cvt_roundss_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (__v2df)(__m128d)(W), \
-                                              (__mmask8)(U), (int)(R))
-
-#define _mm_maskz_cvt_roundss_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (__v2df)_mm_setzero_pd(), \
-                                              (__mmask8)(U), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_cvtss_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128 __B)
-{
-  return __builtin_ia32_cvtss2sd_round_mask((__v2df)__A,
-                                            (__v4sf)__B,
-                                            (__v2df)__W,
-                                            (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtss_sd (__mmask8 __U, __m128d __A, __m128 __B)
-{
-  return __builtin_ia32_cvtss2sd_round_mask((__v2df)__A,
-                                            (__v4sf)__B,
-                                            (__v2df)_mm_setzero_pd(),
-                                            (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_cvtu32_sd (__m128d __A, unsigned __B)
-{
-  __A[0] = __B;
-  return __A;
-}
-
-#ifdef __x86_64__
-#define _mm_cvt_roundu64_sd(A, B, R) \
-  (__m128d)__builtin_ia32_cvtusi2sd64((__v2df)(__m128d)(A), \
-                                      (unsigned long long)(B), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_cvtu64_sd (__m128d __A, unsigned long long __B)
-{
-  __A[0] = __B;
-  return __A;
-}
-#endif
-
-#define _mm_cvt_roundu32_ss(A, B, R) \
-  (__m128)__builtin_ia32_cvtusi2ss32((__v4sf)(__m128)(A), (unsigned int)(B), \
-                                     (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_cvtu32_ss (__m128 __A, unsigned __B)
-{
-  __A[0] = __B;
-  return __A;
-}
-
-#ifdef __x86_64__
-#define _mm_cvt_roundu64_ss(A, B, R) \
-  (__m128)__builtin_ia32_cvtusi2ss64((__v4sf)(__m128)(A), \
-                                     (unsigned long long)(B), (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_cvtu64_ss (__m128 __A, unsigned long long __B)
-{
-  __A[0] = __B;
-  return __A;
-}
-#endif
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_set1_epi32 (__m512i __O, __mmask16 __M, int __A)
-{
-  return (__m512i) __builtin_ia32_selectd_512(__M,
-                                              (__v16si) _mm512_set1_epi32(__A),
-                                              (__v16si) __O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_set1_epi64 (__m512i __O, __mmask8 __M, long long __A)
-{
-  return (__m512i) __builtin_ia32_selectq_512(__M,
-                                              (__v8di) _mm512_set1_epi64(__A),
-                                              (__v8di) __O);
-}
-
-static  __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set_epi8 (char __e63, char __e62, char __e61, char __e60, char __e59,
-    char __e58, char __e57, char __e56, char __e55, char __e54, char __e53,
-    char __e52, char __e51, char __e50, char __e49, char __e48, char __e47,
-    char __e46, char __e45, char __e44, char __e43, char __e42, char __e41,
-    char __e40, char __e39, char __e38, char __e37, char __e36, char __e35,
-    char __e34, char __e33, char __e32, char __e31, char __e30, char __e29,
-    char __e28, char __e27, char __e26, char __e25, char __e24, char __e23,
-    char __e22, char __e21, char __e20, char __e19, char __e18, char __e17,
-    char __e16, char __e15, char __e14, char __e13, char __e12, char __e11,
-    char __e10, char __e9, char __e8, char __e7, char __e6, char __e5,
-    char __e4, char __e3, char __e2, char __e1, char __e0) {
-
-  return __extension__ (__m512i)(__v64qi)
-    {__e0, __e1, __e2, __e3, __e4, __e5, __e6, __e7,
-     __e8, __e9, __e10, __e11, __e12, __e13, __e14, __e15,
-     __e16, __e17, __e18, __e19, __e20, __e21, __e22, __e23,
-     __e24, __e25, __e26, __e27, __e28, __e29, __e30, __e31,
-     __e32, __e33, __e34, __e35, __e36, __e37, __e38, __e39,
-     __e40, __e41, __e42, __e43, __e44, __e45, __e46, __e47,
-     __e48, __e49, __e50, __e51, __e52, __e53, __e54, __e55,
-     __e56, __e57, __e58, __e59, __e60, __e61, __e62, __e63};
-}
-
-static  __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set_epi16(short __e31, short __e30, short __e29, short __e28,
-    short __e27, short __e26, short __e25, short __e24, short __e23,
-    short __e22, short __e21, short __e20, short __e19, short __e18,
-    short __e17, short __e16, short __e15, short __e14, short __e13,
-    short __e12, short __e11, short __e10, short __e9, short __e8,
-    short __e7, short __e6, short __e5, short __e4, short __e3,
-    short __e2, short __e1, short __e0) {
-  return __extension__ (__m512i)(__v32hi)
-    {__e0, __e1, __e2, __e3, __e4, __e5, __e6, __e7,
-     __e8, __e9, __e10, __e11, __e12, __e13, __e14, __e15,
-     __e16, __e17, __e18, __e19, __e20, __e21, __e22, __e23,
-     __e24, __e25, __e26, __e27, __e28, __e29, __e30, __e31 };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set_epi32 (int __A, int __B, int __C, int __D,
-     int __E, int __F, int __G, int __H,
-     int __I, int __J, int __K, int __L,
-     int __M, int __N, int __O, int __P)
-{
-  return __extension__ (__m512i)(__v16si)
-  { __P, __O, __N, __M, __L, __K, __J, __I,
-    __H, __G, __F, __E, __D, __C, __B, __A };
-}
-
-#define _mm512_setr_epi32(e0,e1,e2,e3,e4,e5,e6,e7,           \
-       e8,e9,e10,e11,e12,e13,e14,e15)          \
-  _mm512_set_epi32((e15),(e14),(e13),(e12),(e11),(e10),(e9),(e8),(e7),(e6), \
-                   (e5),(e4),(e3),(e2),(e1),(e0))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_set_epi64 (long long __A, long long __B, long long __C,
-     long long __D, long long __E, long long __F,
-     long long __G, long long __H)
-{
-  return __extension__ (__m512i) (__v8di)
-  { __H, __G, __F, __E, __D, __C, __B, __A };
-}
-
-#define _mm512_setr_epi64(e0,e1,e2,e3,e4,e5,e6,e7)           \
-  _mm512_set_epi64((e7),(e6),(e5),(e4),(e3),(e2),(e1),(e0))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_set_pd (double __A, double __B, double __C, double __D,
-        double __E, double __F, double __G, double __H)
-{
-  return __extension__ (__m512d)
-  { __H, __G, __F, __E, __D, __C, __B, __A };
-}
-
-#define _mm512_setr_pd(e0,e1,e2,e3,e4,e5,e6,e7)              \
-  _mm512_set_pd((e7),(e6),(e5),(e4),(e3),(e2),(e1),(e0))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_set_ps (float __A, float __B, float __C, float __D,
-        float __E, float __F, float __G, float __H,
-        float __I, float __J, float __K, float __L,
-        float __M, float __N, float __O, float __P)
-{
-  return __extension__ (__m512)
-  { __P, __O, __N, __M, __L, __K, __J, __I,
-    __H, __G, __F, __E, __D, __C, __B, __A };
-}
-
-#define _mm512_setr_ps(e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10,e11,e12,e13,e14,e15) \
-  _mm512_set_ps((e15),(e14),(e13),(e12),(e11),(e10),(e9),(e8),(e7),(e6),(e5), \
-                (e4),(e3),(e2),(e1),(e0))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_abs_ps(__m512 __A)
-{
-  return (__m512)_mm512_and_epi32(_mm512_set1_epi32(0x7FFFFFFF),(__m512i)__A) ;
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_ps(__m512 __W, __mmask16 __K, __m512 __A)
-{
-  return (__m512)_mm512_mask_and_epi32((__m512i)__W, __K, _mm512_set1_epi32(0x7FFFFFFF),(__m512i)__A) ;
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_abs_pd(__m512d __A)
-{
-  return (__m512d)_mm512_and_epi64(_mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)__A) ;
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_pd(__m512d __W, __mmask8 __K, __m512d __A)
-{
-  return (__m512d)_mm512_mask_and_epi64((__v8di)__W, __K, _mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)__A);
-}
-
-/* Vector-reduction arithmetic accepts vectors as inputs and produces scalars as
- * outputs. This class of vector operation forms the basis of many scientific
- * computations. In vector-reduction arithmetic, the evaluation off is
- * independent of the order of the input elements of V.
-
- * Used bisection method. At each step, we partition the vector with previous
- * step in half, and the operation is performed on its two halves.
- * This takes log2(n) steps where n is the number of elements in the vector.
- */
-
-#define _mm512_mask_reduce_operator(op) \
-  __v4du __t1 = (__v4du)_mm512_extracti64x4_epi64(__W, 0); \
-  __v4du __t2 = (__v4du)_mm512_extracti64x4_epi64(__W, 1); \
-  __m256i __t3 = (__m256i)(__t1 op __t2); \
-  __v2du __t4 = (__v2du)_mm256_extracti128_si256(__t3, 0); \
-  __v2du __t5 = (__v2du)_mm256_extracti128_si256(__t3, 1); \
-  __v2du __t6 = __t4 op __t5; \
-  __v2du __t7 = __builtin_shufflevector(__t6, __t6, 1, 0); \
-  __v2du __t8 = __t6 op __t7; \
-  return __t8[0]
-
-static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_add_epi64(__m512i __W) {
-  _mm512_mask_reduce_operator(+);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_epi64(__m512i __W) {
-  _mm512_mask_reduce_operator(*);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_and_epi64(__m512i __W) {
-  _mm512_mask_reduce_operator(&);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_or_epi64(__m512i __W) {
-  _mm512_mask_reduce_operator(|);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_add_epi64(__mmask8 __M, __m512i __W) {
-  __W = _mm512_maskz_mov_epi64(__M, __W);
-  _mm512_mask_reduce_operator(+);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_mul_epi64(__mmask8 __M, __m512i __W) {
-  __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(1), __M, __W);
-  _mm512_mask_reduce_operator(*);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_and_epi64(__mmask8 __M, __m512i __W) {
-  __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __W);
-  _mm512_mask_reduce_operator(&);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_or_epi64(__mmask8 __M, __m512i __W) {
-  __W = _mm512_maskz_mov_epi64(__M, __W);
-  _mm512_mask_reduce_operator(|);
-}
-#undef _mm512_mask_reduce_operator
-
-#define _mm512_mask_reduce_operator(op) \
-  __m256d __t1 = _mm512_extractf64x4_pd(__W, 0); \
-  __m256d __t2 = _mm512_extractf64x4_pd(__W, 1); \
-  __m256d __t3 = __t1 op __t2; \
-  __m128d __t4 = _mm256_extractf128_pd(__t3, 0); \
-  __m128d __t5 = _mm256_extractf128_pd(__t3, 1); \
-  __m128d __t6 = __t4 op __t5; \
-  __m128d __t7 = __builtin_shufflevector(__t6, __t6, 1, 0); \
-  __m128d __t8 = __t6 op __t7; \
-  return __t8[0]
-
-static __inline__ double __DEFAULT_FN_ATTRS512 _mm512_reduce_add_pd(__m512d __W) {
-  _mm512_mask_reduce_operator(+);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_pd(__m512d __W) {
-  _mm512_mask_reduce_operator(*);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_add_pd(__mmask8 __M, __m512d __W) {
-  __W = _mm512_maskz_mov_pd(__M, __W);
-  _mm512_mask_reduce_operator(+);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_mul_pd(__mmask8 __M, __m512d __W) {
-  __W = _mm512_mask_mov_pd(_mm512_set1_pd(1.0), __M, __W);
-  _mm512_mask_reduce_operator(*);
-}
-#undef _mm512_mask_reduce_operator
-
-#define _mm512_mask_reduce_operator(op) \
-  __v8su __t1 = (__v8su)_mm512_extracti64x4_epi64(__W, 0); \
-  __v8su __t2 = (__v8su)_mm512_extracti64x4_epi64(__W, 1); \
-  __m256i __t3 = (__m256i)(__t1 op __t2); \
-  __v4su __t4 = (__v4su)_mm256_extracti128_si256(__t3, 0); \
-  __v4su __t5 = (__v4su)_mm256_extracti128_si256(__t3, 1); \
-  __v4su __t6 = __t4 op __t5; \
-  __v4su __t7 = __builtin_shufflevector(__t6, __t6, 2, 3, 0, 1); \
-  __v4su __t8 = __t6 op __t7; \
-  __v4su __t9 = __builtin_shufflevector(__t8, __t8, 1, 0, 3, 2); \
-  __v4su __t10 = __t8 op __t9; \
-  return __t10[0]
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_reduce_add_epi32(__m512i __W) {
-  _mm512_mask_reduce_operator(+);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_reduce_mul_epi32(__m512i __W) {
-  _mm512_mask_reduce_operator(*);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_reduce_and_epi32(__m512i __W) {
-  _mm512_mask_reduce_operator(&);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_reduce_or_epi32(__m512i __W) {
-  _mm512_mask_reduce_operator(|);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_add_epi32( __mmask16 __M, __m512i __W) {
-  __W = _mm512_maskz_mov_epi32(__M, __W);
-  _mm512_mask_reduce_operator(+);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_mul_epi32( __mmask16 __M, __m512i __W) {
-  __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(1), __M, __W);
-  _mm512_mask_reduce_operator(*);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_and_epi32( __mmask16 __M, __m512i __W) {
-  __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __W);
-  _mm512_mask_reduce_operator(&);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_or_epi32(__mmask16 __M, __m512i __W) {
-  __W = _mm512_maskz_mov_epi32(__M, __W);
-  _mm512_mask_reduce_operator(|);
-}
-#undef _mm512_mask_reduce_operator
-
-#define _mm512_mask_reduce_operator(op) \
-  __m256 __t1 = (__m256)_mm512_extractf64x4_pd((__m512d)__W, 0); \
-  __m256 __t2 = (__m256)_mm512_extractf64x4_pd((__m512d)__W, 1); \
-  __m256 __t3 = __t1 op __t2; \
-  __m128 __t4 = _mm256_extractf128_ps(__t3, 0); \
-  __m128 __t5 = _mm256_extractf128_ps(__t3, 1); \
-  __m128 __t6 = __t4 op __t5; \
-  __m128 __t7 = __builtin_shufflevector(__t6, __t6, 2, 3, 0, 1); \
-  __m128 __t8 = __t6 op __t7; \
-  __m128 __t9 = __builtin_shufflevector(__t8, __t8, 1, 0, 3, 2); \
-  __m128 __t10 = __t8 op __t9; \
-  return __t10[0]
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_reduce_add_ps(__m512 __W) {
-  _mm512_mask_reduce_operator(+);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_reduce_mul_ps(__m512 __W) {
-  _mm512_mask_reduce_operator(*);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_add_ps(__mmask16 __M, __m512 __W) {
-  __W = _mm512_maskz_mov_ps(__M, __W);
-  _mm512_mask_reduce_operator(+);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_mul_ps(__mmask16 __M, __m512 __W) {
-  __W = _mm512_mask_mov_ps(_mm512_set1_ps(1.0f), __M, __W);
-  _mm512_mask_reduce_operator(*);
-}
-#undef _mm512_mask_reduce_operator
-
-#define _mm512_mask_reduce_operator(op) \
-  __m512i __t1 = (__m512i)__builtin_shufflevector((__v8di)__V, (__v8di)__V, 4, 5, 6, 7, 0, 1, 2, 3); \
-  __m512i __t2 = _mm512_##op(__V, __t1); \
-  __m512i __t3 = (__m512i)__builtin_shufflevector((__v8di)__t2, (__v8di)__t2, 2, 3, 0, 1, 6, 7, 4, 5); \
-  __m512i __t4 = _mm512_##op(__t2, __t3); \
-  __m512i __t5 = (__m512i)__builtin_shufflevector((__v8di)__t4, (__v8di)__t4, 1, 0, 3, 2, 5, 4, 7, 6); \
-  __v8di __t6 = (__v8di)_mm512_##op(__t4, __t5); \
-  return __t6[0]
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_reduce_max_epi64(__m512i __V) {
-  _mm512_mask_reduce_operator(max_epi64);
-}
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
-_mm512_reduce_max_epu64(__m512i __V) {
-  _mm512_mask_reduce_operator(max_epu64);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_reduce_min_epi64(__m512i __V) {
-  _mm512_mask_reduce_operator(min_epi64);
-}
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
-_mm512_reduce_min_epu64(__m512i __V) {
-  _mm512_mask_reduce_operator(min_epu64);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_max_epi64(__mmask8 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(-__LONG_LONG_MAX__ - 1LL), __M, __V);
-  _mm512_mask_reduce_operator(max_epi64);
-}
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_max_epu64(__mmask8 __M, __m512i __V) {
-  __V = _mm512_maskz_mov_epi64(__M, __V);
-  _mm512_mask_reduce_operator(max_epu64);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_min_epi64(__mmask8 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(__LONG_LONG_MAX__), __M, __V);
-  _mm512_mask_reduce_operator(min_epi64);
-}
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __V);
-  _mm512_mask_reduce_operator(min_epu64);
-}
-#undef _mm512_mask_reduce_operator
-
-#define _mm512_mask_reduce_operator(op) \
-  __m256i __t1 = _mm512_extracti64x4_epi64(__V, 0); \
-  __m256i __t2 = _mm512_extracti64x4_epi64(__V, 1); \
-  __m256i __t3 = _mm256_##op(__t1, __t2); \
-  __m128i __t4 = _mm256_extracti128_si256(__t3, 0); \
-  __m128i __t5 = _mm256_extracti128_si256(__t3, 1); \
-  __m128i __t6 = _mm_##op(__t4, __t5); \
-  __m128i __t7 = (__m128i)__builtin_shufflevector((__v4si)__t6, (__v4si)__t6, 2, 3, 0, 1); \
-  __m128i __t8 = _mm_##op(__t6, __t7); \
-  __m128i __t9 = (__m128i)__builtin_shufflevector((__v4si)__t8, (__v4si)__t8, 1, 0, 3, 2); \
-  __v4si __t10 = (__v4si)_mm_##op(__t8, __t9); \
-  return __t10[0]
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_reduce_max_epi32(__m512i __V) {
-  _mm512_mask_reduce_operator(max_epi32);
-}
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS512
-_mm512_reduce_max_epu32(__m512i __V) {
-  _mm512_mask_reduce_operator(max_epu32);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_reduce_min_epi32(__m512i __V) {
-  _mm512_mask_reduce_operator(min_epi32);
-}
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS512
-_mm512_reduce_min_epu32(__m512i __V) {
-  _mm512_mask_reduce_operator(min_epu32);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_max_epi32(__mmask16 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(-__INT_MAX__ - 1), __M, __V);
-  _mm512_mask_reduce_operator(max_epi32);
-}
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_max_epu32(__mmask16 __M, __m512i __V) {
-  __V = _mm512_maskz_mov_epi32(__M, __V);
-  _mm512_mask_reduce_operator(max_epu32);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_min_epi32(__mmask16 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(__INT_MAX__), __M, __V);
-  _mm512_mask_reduce_operator(min_epi32);
-}
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_min_epu32(__mmask16 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __V);
-  _mm512_mask_reduce_operator(min_epu32);
-}
-#undef _mm512_mask_reduce_operator
-
-#define _mm512_mask_reduce_operator(op) \
-  __m256d __t1 = _mm512_extractf64x4_pd(__V, 0); \
-  __m256d __t2 = _mm512_extractf64x4_pd(__V, 1); \
-  __m256d __t3 = _mm256_##op(__t1, __t2); \
-  __m128d __t4 = _mm256_extractf128_pd(__t3, 0); \
-  __m128d __t5 = _mm256_extractf128_pd(__t3, 1); \
-  __m128d __t6 = _mm_##op(__t4, __t5); \
-  __m128d __t7 = __builtin_shufflevector(__t6, __t6, 1, 0); \
-  __m128d __t8 = _mm_##op(__t6, __t7); \
-  return __t8[0]
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_reduce_max_pd(__m512d __V) {
-  _mm512_mask_reduce_operator(max_pd);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_reduce_min_pd(__m512d __V) {
-  _mm512_mask_reduce_operator(min_pd);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_max_pd(__mmask8 __M, __m512d __V) {
-  __V = _mm512_mask_mov_pd(_mm512_set1_pd(-__builtin_inf()), __M, __V);
-  _mm512_mask_reduce_operator(max_pd);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_min_pd(__mmask8 __M, __m512d __V) {
-  __V = _mm512_mask_mov_pd(_mm512_set1_pd(__builtin_inf()), __M, __V);
-  _mm512_mask_reduce_operator(min_pd);
-}
-#undef _mm512_mask_reduce_operator
-
-#define _mm512_mask_reduce_operator(op) \
-  __m256 __t1 = (__m256)_mm512_extractf64x4_pd((__m512d)__V, 0); \
-  __m256 __t2 = (__m256)_mm512_extractf64x4_pd((__m512d)__V, 1); \
-  __m256 __t3 = _mm256_##op(__t1, __t2); \
-  __m128 __t4 = _mm256_extractf128_ps(__t3, 0); \
-  __m128 __t5 = _mm256_extractf128_ps(__t3, 1); \
-  __m128 __t6 = _mm_##op(__t4, __t5); \
-  __m128 __t7 = __builtin_shufflevector(__t6, __t6, 2, 3, 0, 1); \
-  __m128 __t8 = _mm_##op(__t6, __t7); \
-  __m128 __t9 = __builtin_shufflevector(__t8, __t8, 1, 0, 3, 2); \
-  __m128 __t10 = _mm_##op(__t8, __t9); \
-  return __t10[0]
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_reduce_max_ps(__m512 __V) {
-  _mm512_mask_reduce_operator(max_ps);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_reduce_min_ps(__m512 __V) {
-  _mm512_mask_reduce_operator(min_ps);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_max_ps(__mmask16 __M, __m512 __V) {
-  __V = _mm512_mask_mov_ps(_mm512_set1_ps(-__builtin_inff()), __M, __V);
-  _mm512_mask_reduce_operator(max_ps);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_min_ps(__mmask16 __M, __m512 __V) {
-  __V = _mm512_mask_mov_ps(_mm512_set1_ps(__builtin_inff()), __M, __V);
-  _mm512_mask_reduce_operator(min_ps);
-}
-#undef _mm512_mask_reduce_operator
-
-/// Moves the least significant 32 bits of a vector of [16 x i32] to a
-///    32-bit signed integer value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
-///
-/// \param __A
-///    A vector of [16 x i32]. The least significant 32 bits are moved to the
-///    destination.
-/// \returns A 32-bit signed integer containing the moved value.
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_cvtsi512_si32(__m512i __A) {
-  __v16si __b = (__v16si)__A;
-  return __b[0];
-}
-
-#undef __DEFAULT_FN_ATTRS512
-#undef __DEFAULT_FN_ATTRS128
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* __AVX512FINTRIN_H */
diff --git a/linux-x86/lib64/clang/11.0.1/include/bmiintrin.h b/linux-x86/lib64/clang/11.0.1/include/bmiintrin.h
deleted file mode 100644
index 841bd84..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/bmiintrin.h
+++ /dev/null
@@ -1,381 +0,0 @@
-/*===---- bmiintrin.h - BMI intrinsics -------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
-#error "Never use <bmiintrin.h> directly; include <x86intrin.h> instead."
-#endif
-
-#ifndef __BMIINTRIN_H
-#define __BMIINTRIN_H
-
-/* Allow using the tzcnt intrinsics even for non-BMI targets. Since the TZCNT
-   instruction behaves as BSF on non-BMI targets, there is code that expects
-   to use it as a potentially faster version of BSF. */
-#define __RELAXED_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
-
-#define _tzcnt_u16(a)     (__tzcnt_u16((a)))
-
-/// Counts the number of trailing zero bits in the operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
-///
-/// \param __X
-///    An unsigned 16-bit integer whose trailing zeros are to be counted.
-/// \returns An unsigned 16-bit integer containing the number of trailing zero
-///    bits in the operand.
-static __inline__ unsigned short __RELAXED_FN_ATTRS
-__tzcnt_u16(unsigned short __X)
-{
-  return __builtin_ia32_tzcnt_u16(__X);
-}
-
-/// Counts the number of trailing zero bits in the operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
-///
-/// \param __X
-///    An unsigned 32-bit integer whose trailing zeros are to be counted.
-/// \returns An unsigned 32-bit integer containing the number of trailing zero
-///    bits in the operand.
-static __inline__ unsigned int __RELAXED_FN_ATTRS
-__tzcnt_u32(unsigned int __X)
-{
-  return __builtin_ia32_tzcnt_u32(__X);
-}
-
-/// Counts the number of trailing zero bits in the operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
-///
-/// \param __X
-///    An unsigned 32-bit integer whose trailing zeros are to be counted.
-/// \returns An 32-bit integer containing the number of trailing zero bits in
-///    the operand.
-static __inline__ int __RELAXED_FN_ATTRS
-_mm_tzcnt_32(unsigned int __X)
-{
-  return __builtin_ia32_tzcnt_u32(__X);
-}
-
-#define _tzcnt_u32(a)     (__tzcnt_u32((a)))
-
-#ifdef __x86_64__
-
-/// Counts the number of trailing zero bits in the operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer whose trailing zeros are to be counted.
-/// \returns An unsigned 64-bit integer containing the number of trailing zero
-///    bits in the operand.
-static __inline__ unsigned long long __RELAXED_FN_ATTRS
-__tzcnt_u64(unsigned long long __X)
-{
-  return __builtin_ia32_tzcnt_u64(__X);
-}
-
-/// Counts the number of trailing zero bits in the operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer whose trailing zeros are to be counted.
-/// \returns An 64-bit integer containing the number of trailing zero bits in
-///    the operand.
-static __inline__ long long __RELAXED_FN_ATTRS
-_mm_tzcnt_64(unsigned long long __X)
-{
-  return __builtin_ia32_tzcnt_u64(__X);
-}
-
-#define _tzcnt_u64(a)     (__tzcnt_u64((a)))
-
-#endif /* __x86_64__ */
-
-#undef __RELAXED_FN_ATTRS
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI__)
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi")))
-
-#define _andn_u32(a, b)   (__andn_u32((a), (b)))
-
-/* _bextr_u32 != __bextr_u32 */
-#define _blsi_u32(a)      (__blsi_u32((a)))
-
-#define _blsmsk_u32(a)    (__blsmsk_u32((a)))
-
-#define _blsr_u32(a)      (__blsr_u32((a)))
-
-/// Performs a bitwise AND of the second operand with the one's
-///    complement of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> ANDN </c> instruction.
-///
-/// \param __X
-///    An unsigned integer containing one of the operands.
-/// \param __Y
-///    An unsigned integer containing one of the operands.
-/// \returns An unsigned integer containing the bitwise AND of the second
-///    operand with the one's complement of the first operand.
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__andn_u32(unsigned int __X, unsigned int __Y)
-{
-  return ~__X & __Y;
-}
-
-/* AMD-specified, double-leading-underscore version of BEXTR */
-/// Extracts the specified bits from the first operand and returns them
-///    in the least significant bits of the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
-///
-/// \param __X
-///    An unsigned integer whose bits are to be extracted.
-/// \param __Y
-///    An unsigned integer used to specify which bits are extracted. Bits [7:0]
-///    specify the index of the least significant bit. Bits [15:8] specify the
-///    number of bits to be extracted.
-/// \returns An unsigned integer whose least significant bits contain the
-///    extracted bits.
-/// \see _bextr_u32
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__bextr_u32(unsigned int __X, unsigned int __Y)
-{
-  return __builtin_ia32_bextr_u32(__X, __Y);
-}
-
-/* Intel-specified, single-leading-underscore version of BEXTR */
-/// Extracts the specified bits from the first operand and returns them
-///    in the least significant bits of the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
-///
-/// \param __X
-///    An unsigned integer whose bits are to be extracted.
-/// \param __Y
-///    An unsigned integer used to specify the index of the least significant
-///    bit for the bits to be extracted. Bits [7:0] specify the index.
-/// \param __Z
-///    An unsigned integer used to specify the number of bits to be extracted.
-///    Bits [7:0] specify the number of bits.
-/// \returns An unsigned integer whose least significant bits contain the
-///    extracted bits.
-/// \see __bextr_u32
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_bextr_u32(unsigned int __X, unsigned int __Y, unsigned int __Z)
-{
-  return __builtin_ia32_bextr_u32 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
-}
-
-/// Clears all bits in the source except for the least significant bit
-///    containing a value of 1 and returns the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BLSI </c> instruction.
-///
-/// \param __X
-///    An unsigned integer whose bits are to be cleared.
-/// \returns An unsigned integer containing the result of clearing the bits from
-///    the source operand.
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__blsi_u32(unsigned int __X)
-{
-  return __X & -__X;
-}
-
-/// Creates a mask whose bits are set to 1, using bit 0 up to and
-///    including the least significant bit that is set to 1 in the source
-///    operand and returns the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BLSMSK </c> instruction.
-///
-/// \param __X
-///    An unsigned integer used to create the mask.
-/// \returns An unsigned integer containing the newly created mask.
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__blsmsk_u32(unsigned int __X)
-{
-  return __X ^ (__X - 1);
-}
-
-/// Clears the least significant bit that is set to 1 in the source
-///    operand and returns the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BLSR </c> instruction.
-///
-/// \param __X
-///    An unsigned integer containing the operand to be cleared.
-/// \returns An unsigned integer containing the result of clearing the source
-///    operand.
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__blsr_u32(unsigned int __X)
-{
-  return __X & (__X - 1);
-}
-
-#ifdef __x86_64__
-
-#define _andn_u64(a, b)   (__andn_u64((a), (b)))
-
-/* _bextr_u64 != __bextr_u64 */
-#define _blsi_u64(a)      (__blsi_u64((a)))
-
-#define _blsmsk_u64(a)    (__blsmsk_u64((a)))
-
-#define _blsr_u64(a)      (__blsr_u64((a)))
-
-/// Performs a bitwise AND of the second operand with the one's
-///    complement of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> ANDN </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer containing one of the operands.
-/// \param __Y
-///    An unsigned 64-bit integer containing one of the operands.
-/// \returns An unsigned 64-bit integer containing the bitwise AND of the second
-///    operand with the one's complement of the first operand.
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__andn_u64 (unsigned long long __X, unsigned long long __Y)
-{
-  return ~__X & __Y;
-}
-
-/* AMD-specified, double-leading-underscore version of BEXTR */
-/// Extracts the specified bits from the first operand and returns them
-///    in the least significant bits of the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer whose bits are to be extracted.
-/// \param __Y
-///    An unsigned 64-bit integer used to specify which bits are extracted. Bits
-///    [7:0] specify the index of the least significant bit. Bits [15:8] specify
-///    the number of bits to be extracted.
-/// \returns An unsigned 64-bit integer whose least significant bits contain the
-///    extracted bits.
-/// \see _bextr_u64
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__bextr_u64(unsigned long long __X, unsigned long long __Y)
-{
-  return __builtin_ia32_bextr_u64(__X, __Y);
-}
-
-/* Intel-specified, single-leading-underscore version of BEXTR */
-/// Extracts the specified bits from the first operand and returns them
-///     in the least significant bits of the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer whose bits are to be extracted.
-/// \param __Y
-///    An unsigned integer used to specify the index of the least significant
-///    bit for the bits to be extracted. Bits [7:0] specify the index.
-/// \param __Z
-///    An unsigned integer used to specify the number of bits to be extracted.
-///    Bits [7:0] specify the number of bits.
-/// \returns An unsigned 64-bit integer whose least significant bits contain the
-///    extracted bits.
-/// \see __bextr_u64
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-_bextr_u64(unsigned long long __X, unsigned int __Y, unsigned int __Z)
-{
-  return __builtin_ia32_bextr_u64 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
-}
-
-/// Clears all bits in the source except for the least significant bit
-///    containing a value of 1 and returns the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BLSI </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer whose bits are to be cleared.
-/// \returns An unsigned 64-bit integer containing the result of clearing the
-///    bits from the source operand.
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__blsi_u64(unsigned long long __X)
-{
-  return __X & -__X;
-}
-
-/// Creates a mask whose bits are set to 1, using bit 0 up to and
-///    including the least significant bit that is set to 1 in the source
-///    operand and returns the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BLSMSK </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer used to create the mask.
-/// \returns An unsigned 64-bit integer containing the newly created mask.
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__blsmsk_u64(unsigned long long __X)
-{
-  return __X ^ (__X - 1);
-}
-
-/// Clears the least significant bit that is set to 1 in the source
-///    operand and returns the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BLSR </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer containing the operand to be cleared.
-/// \returns An unsigned 64-bit integer containing the result of clearing the
-///    source operand.
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__blsr_u64(unsigned long long __X)
-{
-  return __X & (__X - 1);
-}
-
-#endif /* __x86_64__ */
-
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI__) */
-
-#endif /* __BMIINTRIN_H */
diff --git a/linux-x86/lib64/clang/11.0.1/include/cpuid.h b/linux-x86/lib64/clang/11.0.1/include/cpuid.h
deleted file mode 100644
index 4ddd648..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/cpuid.h
+++ /dev/null
@@ -1,302 +0,0 @@
-/*===---- cpuid.h - X86 cpu model detection --------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#if !(__x86_64__ || __i386__)
-#error this header is for x86 only
-#endif
-
-/* Responses identification request with %eax 0 */
-/* AMD:     "AuthenticAMD" */
-#define signature_AMD_ebx 0x68747541
-#define signature_AMD_edx 0x69746e65
-#define signature_AMD_ecx 0x444d4163
-/* CENTAUR: "CentaurHauls" */
-#define signature_CENTAUR_ebx 0x746e6543
-#define signature_CENTAUR_edx 0x48727561
-#define signature_CENTAUR_ecx 0x736c7561
-/* CYRIX:   "CyrixInstead" */
-#define signature_CYRIX_ebx 0x69727943
-#define signature_CYRIX_edx 0x736e4978
-#define signature_CYRIX_ecx 0x64616574
-/* INTEL:   "GenuineIntel" */
-#define signature_INTEL_ebx 0x756e6547
-#define signature_INTEL_edx 0x49656e69
-#define signature_INTEL_ecx 0x6c65746e
-/* TM1:     "TransmetaCPU" */
-#define signature_TM1_ebx 0x6e617254
-#define signature_TM1_edx 0x74656d73
-#define signature_TM1_ecx 0x55504361
-/* TM2:     "GenuineTMx86" */
-#define signature_TM2_ebx 0x756e6547
-#define signature_TM2_edx 0x54656e69
-#define signature_TM2_ecx 0x3638784d
-/* NSC:     "Geode by NSC" */
-#define signature_NSC_ebx 0x646f6547
-#define signature_NSC_edx 0x79622065
-#define signature_NSC_ecx 0x43534e20
-/* NEXGEN:  "NexGenDriven" */
-#define signature_NEXGEN_ebx 0x4778654e
-#define signature_NEXGEN_edx 0x72446e65
-#define signature_NEXGEN_ecx 0x6e657669
-/* RISE:    "RiseRiseRise" */
-#define signature_RISE_ebx 0x65736952
-#define signature_RISE_edx 0x65736952
-#define signature_RISE_ecx 0x65736952
-/* SIS:     "SiS SiS SiS " */
-#define signature_SIS_ebx 0x20536953
-#define signature_SIS_edx 0x20536953
-#define signature_SIS_ecx 0x20536953
-/* UMC:     "UMC UMC UMC " */
-#define signature_UMC_ebx 0x20434d55
-#define signature_UMC_edx 0x20434d55
-#define signature_UMC_ecx 0x20434d55
-/* VIA:     "VIA VIA VIA " */
-#define signature_VIA_ebx 0x20414956
-#define signature_VIA_edx 0x20414956
-#define signature_VIA_ecx 0x20414956
-/* VORTEX:  "Vortex86 SoC" */
-#define signature_VORTEX_ebx 0x74726f56
-#define signature_VORTEX_edx 0x36387865
-#define signature_VORTEX_ecx 0x436f5320
-
-/* Features in %ecx for leaf 1 */
-#define bit_SSE3        0x00000001
-#define bit_PCLMULQDQ   0x00000002
-#define bit_PCLMUL      bit_PCLMULQDQ   /* for gcc compat */
-#define bit_DTES64      0x00000004
-#define bit_MONITOR     0x00000008
-#define bit_DSCPL       0x00000010
-#define bit_VMX         0x00000020
-#define bit_SMX         0x00000040
-#define bit_EIST        0x00000080
-#define bit_TM2         0x00000100
-#define bit_SSSE3       0x00000200
-#define bit_CNXTID      0x00000400
-#define bit_FMA         0x00001000
-#define bit_CMPXCHG16B  0x00002000
-#define bit_xTPR        0x00004000
-#define bit_PDCM        0x00008000
-#define bit_PCID        0x00020000
-#define bit_DCA         0x00040000
-#define bit_SSE41       0x00080000
-#define bit_SSE4_1      bit_SSE41       /* for gcc compat */
-#define bit_SSE42       0x00100000
-#define bit_SSE4_2      bit_SSE42       /* for gcc compat */
-#define bit_x2APIC      0x00200000
-#define bit_MOVBE       0x00400000
-#define bit_POPCNT      0x00800000
-#define bit_TSCDeadline 0x01000000
-#define bit_AESNI       0x02000000
-#define bit_AES         bit_AESNI       /* for gcc compat */
-#define bit_XSAVE       0x04000000
-#define bit_OSXSAVE     0x08000000
-#define bit_AVX         0x10000000
-#define bit_F16C        0x20000000
-#define bit_RDRND       0x40000000
-
-/* Features in %edx for leaf 1 */
-#define bit_FPU         0x00000001
-#define bit_VME         0x00000002
-#define bit_DE          0x00000004
-#define bit_PSE         0x00000008
-#define bit_TSC         0x00000010
-#define bit_MSR         0x00000020
-#define bit_PAE         0x00000040
-#define bit_MCE         0x00000080
-#define bit_CX8         0x00000100
-#define bit_CMPXCHG8B   bit_CX8         /* for gcc compat */
-#define bit_APIC        0x00000200
-#define bit_SEP         0x00000800
-#define bit_MTRR        0x00001000
-#define bit_PGE         0x00002000
-#define bit_MCA         0x00004000
-#define bit_CMOV        0x00008000
-#define bit_PAT         0x00010000
-#define bit_PSE36       0x00020000
-#define bit_PSN         0x00040000
-#define bit_CLFSH       0x00080000
-#define bit_DS          0x00200000
-#define bit_ACPI        0x00400000
-#define bit_MMX         0x00800000
-#define bit_FXSR        0x01000000
-#define bit_FXSAVE      bit_FXSR        /* for gcc compat */
-#define bit_SSE         0x02000000
-#define bit_SSE2        0x04000000
-#define bit_SS          0x08000000
-#define bit_HTT         0x10000000
-#define bit_TM          0x20000000
-#define bit_PBE         0x80000000
-
-/* Features in %ebx for leaf 7 sub-leaf 0 */
-#define bit_FSGSBASE    0x00000001
-#define bit_SGX         0x00000004
-#define bit_BMI         0x00000008
-#define bit_HLE         0x00000010
-#define bit_AVX2        0x00000020
-#define bit_SMEP        0x00000080
-#define bit_BMI2        0x00000100
-#define bit_ENH_MOVSB   0x00000200
-#define bit_INVPCID     0x00000400
-#define bit_RTM         0x00000800
-#define bit_MPX         0x00004000
-#define bit_AVX512F     0x00010000
-#define bit_AVX512DQ    0x00020000
-#define bit_RDSEED      0x00040000
-#define bit_ADX         0x00080000
-#define bit_AVX512IFMA  0x00200000
-#define bit_CLFLUSHOPT  0x00800000
-#define bit_CLWB        0x01000000
-#define bit_AVX512PF    0x04000000
-#define bit_AVX512ER    0x08000000
-#define bit_AVX512CD    0x10000000
-#define bit_SHA         0x20000000
-#define bit_AVX512BW    0x40000000
-#define bit_AVX512VL    0x80000000
-
-/* Features in %ecx for leaf 7 sub-leaf 0 */
-#define bit_PREFTCHWT1       0x00000001
-#define bit_AVX512VBMI       0x00000002
-#define bit_PKU              0x00000004
-#define bit_OSPKE            0x00000010
-#define bit_WAITPKG          0x00000020
-#define bit_AVX512VBMI2      0x00000040
-#define bit_SHSTK            0x00000080
-#define bit_GFNI             0x00000100
-#define bit_VAES             0x00000200
-#define bit_VPCLMULQDQ       0x00000400
-#define bit_AVX512VNNI       0x00000800
-#define bit_AVX512BITALG     0x00001000
-#define bit_AVX512VPOPCNTDQ  0x00004000
-#define bit_RDPID            0x00400000
-#define bit_CLDEMOTE         0x02000000
-#define bit_MOVDIRI          0x08000000
-#define bit_MOVDIR64B        0x10000000
-#define bit_ENQCMD           0x20000000
-
-/* Features in %edx for leaf 7 sub-leaf 0 */
-#define bit_AVX5124VNNIW  0x00000004
-#define bit_AVX5124FMAPS  0x00000008
-#define bit_PCONFIG       0x00040000
-#define bit_IBT           0x00100000
-
-/* Features in %eax for leaf 7 sub-leaf 1 */
-#define bit_AVX512BF16    0x00000020
-
-/* Features in %eax for leaf 13 sub-leaf 1 */
-#define bit_XSAVEOPT    0x00000001
-#define bit_XSAVEC      0x00000002
-#define bit_XSAVES      0x00000008
-
-/* Features in %eax for leaf 0x14 sub-leaf 0 */
-#define bit_PTWRITE     0x00000010
-
-/* Features in %ecx for leaf 0x80000001 */
-#define bit_LAHF_LM     0x00000001
-#define bit_ABM         0x00000020
-#define bit_LZCNT       bit_ABM        /* for gcc compat */
-#define bit_SSE4a       0x00000040
-#define bit_PRFCHW      0x00000100
-#define bit_XOP         0x00000800
-#define bit_LWP         0x00008000
-#define bit_FMA4        0x00010000
-#define bit_TBM         0x00200000
-#define bit_MWAITX      0x20000000
-
-/* Features in %edx for leaf 0x80000001 */
-#define bit_MMXEXT      0x00400000
-#define bit_LM          0x20000000
-#define bit_3DNOWP      0x40000000
-#define bit_3DNOW       0x80000000
-
-/* Features in %ebx for leaf 0x80000008 */
-#define bit_CLZERO      0x00000001
-#define bit_WBNOINVD    0x00000200
-
-
-#if __i386__
-#define __cpuid(__leaf, __eax, __ebx, __ecx, __edx) \
-    __asm("cpuid" : "=a"(__eax), "=b" (__ebx), "=c"(__ecx), "=d"(__edx) \
-                  : "0"(__leaf))
-
-#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx) \
-    __asm("cpuid" : "=a"(__eax), "=b" (__ebx), "=c"(__ecx), "=d"(__edx) \
-                  : "0"(__leaf), "2"(__count))
-#else
-/* x86-64 uses %rbx as the base register, so preserve it. */
-#define __cpuid(__leaf, __eax, __ebx, __ecx, __edx) \
-    __asm("  xchgq  %%rbx,%q1\n" \
-          "  cpuid\n" \
-          "  xchgq  %%rbx,%q1" \
-        : "=a"(__eax), "=r" (__ebx), "=c"(__ecx), "=d"(__edx) \
-        : "0"(__leaf))
-
-#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx) \
-    __asm("  xchgq  %%rbx,%q1\n" \
-          "  cpuid\n" \
-          "  xchgq  %%rbx,%q1" \
-        : "=a"(__eax), "=r" (__ebx), "=c"(__ecx), "=d"(__edx) \
-        : "0"(__leaf), "2"(__count))
-#endif
-
-static __inline int __get_cpuid_max (unsigned int __leaf, unsigned int *__sig)
-{
-    unsigned int __eax, __ebx, __ecx, __edx;
-#if __i386__
-    int __cpuid_supported;
-
-    __asm("  pushfl\n"
-          "  popl   %%eax\n"
-          "  movl   %%eax,%%ecx\n"
-          "  xorl   $0x00200000,%%eax\n"
-          "  pushl  %%eax\n"
-          "  popfl\n"
-          "  pushfl\n"
-          "  popl   %%eax\n"
-          "  movl   $0,%0\n"
-          "  cmpl   %%eax,%%ecx\n"
-          "  je     1f\n"
-          "  movl   $1,%0\n"
-          "1:"
-        : "=r" (__cpuid_supported) : : "eax", "ecx");
-    if (!__cpuid_supported)
-        return 0;
-#endif
-
-    __cpuid(__leaf, __eax, __ebx, __ecx, __edx);
-    if (__sig)
-        *__sig = __ebx;
-    return __eax;
-}
-
-static __inline int __get_cpuid (unsigned int __leaf, unsigned int *__eax,
-                                 unsigned int *__ebx, unsigned int *__ecx,
-                                 unsigned int *__edx)
-{
-    unsigned int __max_leaf = __get_cpuid_max(__leaf & 0x80000000, 0);
-
-    if (__max_leaf == 0 || __max_leaf < __leaf)
-        return 0;
-
-    __cpuid(__leaf, *__eax, *__ebx, *__ecx, *__edx);
-    return 1;
-}
-
-static __inline int __get_cpuid_count (unsigned int __leaf,
-                                       unsigned int __subleaf,
-                                       unsigned int *__eax, unsigned int *__ebx,
-                                       unsigned int *__ecx, unsigned int *__edx)
-{
-    unsigned int __max_leaf = __get_cpuid_max(__leaf & 0x80000000, 0);
-
-    if (__max_leaf == 0 || __max_leaf < __leaf)
-        return 0;
-
-    __cpuid_count(__leaf, __subleaf, *__eax, *__ebx, *__ecx, *__edx);
-    return 1;
-}
diff --git a/linux-x86/lib64/clang/11.0.1/include/emmintrin.h b/linux-x86/lib64/clang/11.0.1/include/emmintrin.h
deleted file mode 100644
index 993c688..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/emmintrin.h
+++ /dev/null
@@ -1,4981 +0,0 @@
-/*===---- emmintrin.h - SSE2 intrinsics ------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __EMMINTRIN_H
-#define __EMMINTRIN_H
-
-#include <xmmintrin.h>
-
-typedef double __m128d __attribute__((__vector_size__(16), __aligned__(16)));
-typedef long long __m128i __attribute__((__vector_size__(16), __aligned__(16)));
-
-typedef double __m128d_u __attribute__((__vector_size__(16), __aligned__(1)));
-typedef long long __m128i_u __attribute__((__vector_size__(16), __aligned__(1)));
-
-/* Type defines.  */
-typedef double __v2df __attribute__ ((__vector_size__ (16)));
-typedef long long __v2di __attribute__ ((__vector_size__ (16)));
-typedef short __v8hi __attribute__((__vector_size__(16)));
-typedef char __v16qi __attribute__((__vector_size__(16)));
-
-/* Unsigned types */
-typedef unsigned long long __v2du __attribute__ ((__vector_size__ (16)));
-typedef unsigned short __v8hu __attribute__((__vector_size__(16)));
-typedef unsigned char __v16qu __attribute__((__vector_size__(16)));
-
-/* We need an explicitly signed variant for char. Note that this shouldn't
- * appear in the interface though. */
-typedef signed char __v16qs __attribute__((__vector_size__(16)));
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse2"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS_MMX __attribute__((__always_inline__, __nodebug__, __target__("mmx,sse2"), __min_vector_width__(64)))
-
-/// Adds lower double-precision values in both operands and returns the
-///    sum in the lower 64 bits of the result. The upper 64 bits of the result
-///    are copied from the upper double-precision value of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VADDSD / ADDSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    sum of the lower 64 bits of both operands. The upper 64 bits are copied
-///    from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_add_sd(__m128d __a, __m128d __b)
-{
-  __a[0] += __b[0];
-  return __a;
-}
-
-/// Adds two 128-bit vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VADDPD / ADDPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \returns A 128-bit vector of [2 x double] containing the sums of both
-///    operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_add_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2df)__a + (__v2df)__b);
-}
-
-/// Subtracts the lower double-precision value of the second operand
-///    from the lower double-precision value of the first operand and returns
-///    the difference in the lower 64 bits of the result. The upper 64 bits of
-///    the result are copied from the upper double-precision value of the first
-///    operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSUBSD / SUBSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the minuend.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing the subtrahend.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    difference of the lower 64 bits of both operands. The upper 64 bits are
-///    copied from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sub_sd(__m128d __a, __m128d __b)
-{
-  __a[0] -= __b[0];
-  return __a;
-}
-
-/// Subtracts two 128-bit vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSUBPD / SUBPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the minuend.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing the subtrahend.
-/// \returns A 128-bit vector of [2 x double] containing the differences between
-///    both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sub_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2df)__a - (__v2df)__b);
-}
-
-/// Multiplies lower double-precision values in both operands and returns
-///    the product in the lower 64 bits of the result. The upper 64 bits of the
-///    result are copied from the upper double-precision value of the first
-///    operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMULSD / MULSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    product of the lower 64 bits of both operands. The upper 64 bits are
-///    copied from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mul_sd(__m128d __a, __m128d __b)
-{
-  __a[0] *= __b[0];
-  return __a;
-}
-
-/// Multiplies two 128-bit vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMULPD / MULPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the operands.
-/// \returns A 128-bit vector of [2 x double] containing the products of both
-///    operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mul_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2df)__a * (__v2df)__b);
-}
-
-/// Divides the lower double-precision value of the first operand by the
-///    lower double-precision value of the second operand and returns the
-///    quotient in the lower 64 bits of the result. The upper 64 bits of the
-///    result are copied from the upper double-precision value of the first
-///    operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VDIVSD / DIVSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the dividend.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing divisor.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    quotient of the lower 64 bits of both operands. The upper 64 bits are
-///    copied from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_div_sd(__m128d __a, __m128d __b)
-{
-  __a[0] /= __b[0];
-  return __a;
-}
-
-/// Performs an element-by-element division of two 128-bit vectors of
-///    [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VDIVPD / DIVPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the dividend.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing the divisor.
-/// \returns A 128-bit vector of [2 x double] containing the quotients of both
-///    operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_div_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2df)__a / (__v2df)__b);
-}
-
-/// Calculates the square root of the lower double-precision value of
-///    the second operand and returns it in the lower 64 bits of the result.
-///    The upper 64 bits of the result are copied from the upper
-///    double-precision value of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSQRTSD / SQRTSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the operands. The
-///    upper 64 bits of this operand are copied to the upper 64 bits of the
-///    result.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the operands. The
-///    square root is calculated using the lower 64 bits of this operand.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    square root of the lower 64 bits of operand \a __b, and whose upper 64
-///    bits are copied from the upper 64 bits of operand \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sqrt_sd(__m128d __a, __m128d __b)
-{
-  __m128d __c = __builtin_ia32_sqrtsd((__v2df)__b);
-  return __extension__ (__m128d) { __c[0], __a[1] };
-}
-
-/// Calculates the square root of the each of two values stored in a
-///    128-bit vector of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSQRTPD / SQRTPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector of [2 x double] containing the square roots of the
-///    values in the operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sqrt_pd(__m128d __a)
-{
-  return __builtin_ia32_sqrtpd((__v2df)__a);
-}
-
-/// Compares lower 64-bit double-precision values of both operands, and
-///    returns the lesser of the pair of values in the lower 64-bits of the
-///    result. The upper 64 bits of the result are copied from the upper
-///    double-precision value of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMINSD / MINSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the operands. The
-///    lower 64 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the operands. The
-///    lower 64 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    minimum value between both operands. The upper 64 bits are copied from
-///    the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_min_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_minsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Performs element-by-element comparison of the two 128-bit vectors of
-///    [2 x double] and returns the vector containing the lesser of each pair of
-///    values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMINPD / MINPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the operands.
-/// \returns A 128-bit vector of [2 x double] containing the minimum values
-///    between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_min_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_minpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares lower 64-bit double-precision values of both operands, and
-///    returns the greater of the pair of values in the lower 64-bits of the
-///    result. The upper 64 bits of the result are copied from the upper
-///    double-precision value of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMAXSD / MAXSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the operands. The
-///    lower 64 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the operands. The
-///    lower 64 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    maximum value between both operands. The upper 64 bits are copied from
-///    the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_max_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_maxsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Performs element-by-element comparison of the two 128-bit vectors of
-///    [2 x double] and returns the vector containing the greater of each pair
-///    of values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMAXPD / MAXPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the operands.
-/// \returns A 128-bit vector of [2 x double] containing the maximum values
-///    between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_max_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_maxpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Performs a bitwise AND of two 128-bit vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPAND / PAND </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \returns A 128-bit vector of [2 x double] containing the bitwise AND of the
-///    values between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_and_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2du)__a & (__v2du)__b);
-}
-
-/// Performs a bitwise AND of two 128-bit vectors of [2 x double], using
-///    the one's complement of the values contained in the first source operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPANDN / PANDN </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the left source operand. The
-///    one's complement of this value is used in the bitwise AND.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing the right source operand.
-/// \returns A 128-bit vector of [2 x double] containing the bitwise AND of the
-///    values in the second operand and the one's complement of the first
-///    operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_andnot_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)(~(__v2du)__a & (__v2du)__b);
-}
-
-/// Performs a bitwise OR of two 128-bit vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPOR / POR </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \returns A 128-bit vector of [2 x double] containing the bitwise OR of the
-///    values between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_or_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2du)__a | (__v2du)__b);
-}
-
-/// Performs a bitwise XOR of two 128-bit vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPXOR / PXOR </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \returns A 128-bit vector of [2 x double] containing the bitwise XOR of the
-///    values between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_xor_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2du)__a ^ (__v2du)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] for equality. Each comparison yields 0x0
-///    for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPEQPD / CMPEQPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpeq_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpeqpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are less than those in the second operand. Each comparison
-///    yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLTPD / CMPLTPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmplt_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpltpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are less than or equal to those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLEPD / CMPLEPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmple_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmplepd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are greater than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLTPD / CMPLTPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpgt_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpltpd((__v2df)__b, (__v2df)__a);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are greater than or equal to those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLEPD / CMPLEPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpge_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmplepd((__v2df)__b, (__v2df)__a);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are ordered with respect to those in the second operand.
-///
-///    A pair of double-precision values are "ordered" with respect to each
-///    other if neither value is a NaN. Each comparison yields 0x0 for false,
-///    0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPORDPD / CMPORDPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpord_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpordpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are unordered with respect to those in the second operand.
-///
-///    A pair of double-precision values are "unordered" with respect to each
-///    other if one or both values are NaN. Each comparison yields 0x0 for
-///    false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPUNORDPD / CMPUNORDPD </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpunord_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpunordpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are unequal to those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNEQPD / CMPNEQPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpneq_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpneqpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are not less than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLTPD / CMPNLTPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnlt_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are not less than or equal to those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLEPD / CMPNLEPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnle_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are not greater than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLTPD / CMPNLTPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpngt_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__b, (__v2df)__a);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are not greater than or equal to those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLEPD / CMPNLEPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnge_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__b, (__v2df)__a);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] for equality.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPEQSD / CMPEQSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpeq_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpeqsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is less than the corresponding value in
-///    the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLTSD / CMPLTSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmplt_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpltsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is less than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLESD / CMPLESD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmple_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmplesd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is greater than the corresponding value
-///    in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLTSD / CMPLTSD </c> instruction.
-///
-/// \param __a
-///     A 128-bit vector of [2 x double]. The lower double-precision value is
-///     compared to the lower double-precision value of \a __b.
-/// \param __b
-///     A 128-bit vector of [2 x double]. The lower double-precision value is
-///     compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///     results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpgt_sd(__m128d __a, __m128d __b)
-{
-  __m128d __c = __builtin_ia32_cmpltsd((__v2df)__b, (__v2df)__a);
-  return __extension__ (__m128d) { __c[0], __a[1] };
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is greater than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLESD / CMPLESD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpge_sd(__m128d __a, __m128d __b)
-{
-  __m128d __c = __builtin_ia32_cmplesd((__v2df)__b, (__v2df)__a);
-  return __extension__ (__m128d) { __c[0], __a[1] };
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is "ordered" with respect to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair
-///    of double-precision values are "ordered" with respect to each other if
-///    neither value is a NaN.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPORDSD / CMPORDSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpord_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpordsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is "unordered" with respect to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair
-///    of double-precision values are "unordered" with respect to each other if
-///    one or both values are NaN.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPUNORDSD / CMPUNORDSD </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpunord_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpunordsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is unequal to the corresponding value in
-///    the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNEQSD / CMPNEQSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpneq_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpneqsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is not less than the corresponding
-///    value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLTSD / CMPNLTSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnlt_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpnltsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is not less than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLESD / CMPNLESD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns  A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnle_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpnlesd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is not greater than the corresponding
-///    value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLTSD / CMPNLTSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpngt_sd(__m128d __a, __m128d __b)
-{
-  __m128d __c = __builtin_ia32_cmpnltsd((__v2df)__b, (__v2df)__a);
-  return __extension__ (__m128d) { __c[0], __a[1] };
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is not greater than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLESD / CMPNLESD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnge_sd(__m128d __a, __m128d __b)
-{
-  __m128d __c = __builtin_ia32_cmpnlesd((__v2df)__b, (__v2df)__a);
-  return __extension__ (__m128d) { __c[0], __a[1] };
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] for equality.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comieq_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_comisdeq((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is less than the corresponding value in
-///    the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comilt_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_comisdlt((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is less than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///     A 128-bit vector of [2 x double]. The lower double-precision value is
-///     compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comile_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_comisdle((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is greater than the corresponding value
-///    in the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comigt_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_comisdgt((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is greater than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comige_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_comisdge((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is unequal to the corresponding value in
-///    the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two
-///    lower double-precision values is NaN, 1 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower double-precision values is NaN, 1 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comineq_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_comisdneq((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] for equality. The
-///    comparison yields 0 for false, 1 for true.
-///
-///    If either of the two lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomieq_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_ucomisdeq((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is less than the corresponding value in
-///    the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two lower
-///    double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomilt_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_ucomisdlt((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is less than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two lower
-///    double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///     A 128-bit vector of [2 x double]. The lower double-precision value is
-///     compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomile_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_ucomisdle((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is greater than the corresponding value
-///    in the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two lower
-///    double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///     A 128-bit vector of [2 x double]. The lower double-precision value is
-///     compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomigt_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_ucomisdgt((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is greater than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true.  If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomige_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_ucomisdge((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is unequal to the corresponding value in
-///    the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two lower
-///    double-precision values is NaN, 1 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison result. If either of the two
-///    lower double-precision values is NaN, 1 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomineq_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_ucomisdneq((__v2df)__a, (__v2df)__b);
-}
-
-/// Converts the two double-precision floating-point elements of a
-///    128-bit vector of [2 x double] into two single-precision floating-point
-///    values, returned in the lower 64 bits of a 128-bit vector of [4 x float].
-///    The upper 64 bits of the result vector are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTPD2PS / CVTPD2PS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
-///    converted values. The upper 64 bits are set to zero.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtpd_ps(__m128d __a)
-{
-  return __builtin_ia32_cvtpd2ps((__v2df)__a);
-}
-
-/// Converts the lower two single-precision floating-point elements of a
-///    128-bit vector of [4 x float] into two double-precision floating-point
-///    values, returned in a 128-bit vector of [2 x double]. The upper two
-///    elements of the input vector are unused.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTPS2PD / CVTPS2PD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower two single-precision
-///    floating-point elements are converted to double-precision values. The
-///    upper two elements are unused.
-/// \returns A 128-bit vector of [2 x double] containing the converted values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtps_pd(__m128 __a)
-{
-  return (__m128d) __builtin_convertvector(
-      __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1), __v2df);
-}
-
-/// Converts the lower two integer elements of a 128-bit vector of
-///    [4 x i32] into two double-precision floating-point values, returned in a
-///    128-bit vector of [2 x double].
-///
-///    The upper two elements of the input vector are unused.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTDQ2PD / CVTDQ2PD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector of [4 x i32]. The lower two integer elements are
-///    converted to double-precision values.
-///
-///    The upper two elements are unused.
-/// \returns A 128-bit vector of [2 x double] containing the converted values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtepi32_pd(__m128i __a)
-{
-  return (__m128d) __builtin_convertvector(
-      __builtin_shufflevector((__v4si)__a, (__v4si)__a, 0, 1), __v2df);
-}
-
-/// Converts the two double-precision floating-point elements of a
-///    128-bit vector of [2 x double] into two signed 32-bit integer values,
-///    returned in the lower 64 bits of a 128-bit vector of [4 x i32]. The upper
-///    64 bits of the result vector are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTPD2DQ / CVTPD2DQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the
-///    converted values. The upper 64 bits are set to zero.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtpd_epi32(__m128d __a)
-{
-  return __builtin_ia32_cvtpd2dq((__v2df)__a);
-}
-
-/// Converts the low-order element of a 128-bit vector of [2 x double]
-///    into a 32-bit signed integer value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSD2SI / CVTSD2SI </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
-///    conversion.
-/// \returns A 32-bit signed integer containing the converted value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvtsd_si32(__m128d __a)
-{
-  return __builtin_ia32_cvtsd2si((__v2df)__a);
-}
-
-/// Converts the lower double-precision floating-point element of a
-///    128-bit vector of [2 x double], in the second parameter, into a
-///    single-precision floating-point value, returned in the lower 32 bits of a
-///    128-bit vector of [4 x float]. The upper 96 bits of the result vector are
-///    copied from the upper 96 bits of the first parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSD2SS / CVTSD2SS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The upper 96 bits of this parameter are
-///    copied to the upper 96 bits of the result.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision
-///    floating-point element is used in the conversion.
-/// \returns A 128-bit vector of [4 x float]. The lower 32 bits contain the
-///    converted value from the second parameter. The upper 96 bits are copied
-///    from the upper 96 bits of the first parameter.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtsd_ss(__m128 __a, __m128d __b)
-{
-  return (__m128)__builtin_ia32_cvtsd2ss((__v4sf)__a, (__v2df)__b);
-}
-
-/// Converts a 32-bit signed integer value, in the second parameter, into
-///    a double-precision floating-point value, returned in the lower 64 bits of
-///    a 128-bit vector of [2 x double]. The upper 64 bits of the result vector
-///    are copied from the upper 64 bits of the first parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSI2SD / CVTSI2SD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The upper 64 bits of this parameter are
-///    copied to the upper 64 bits of the result.
-/// \param __b
-///    A 32-bit signed integer containing the value to be converted.
-/// \returns A 128-bit vector of [2 x double]. The lower 64 bits contain the
-///    converted value from the second parameter. The upper 64 bits are copied
-///    from the upper 64 bits of the first parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtsi32_sd(__m128d __a, int __b)
-{
-  __a[0] = __b;
-  return __a;
-}
-
-/// Converts the lower single-precision floating-point element of a
-///    128-bit vector of [4 x float], in the second parameter, into a
-///    double-precision floating-point value, returned in the lower 64 bits of
-///    a 128-bit vector of [2 x double]. The upper 64 bits of the result vector
-///    are copied from the upper 64 bits of the first parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSS2SD / CVTSS2SD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The upper 64 bits of this parameter are
-///    copied to the upper 64 bits of the result.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower single-precision
-///    floating-point element is used in the conversion.
-/// \returns A 128-bit vector of [2 x double]. The lower 64 bits contain the
-///    converted value from the second parameter. The upper 64 bits are copied
-///    from the upper 64 bits of the first parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtss_sd(__m128d __a, __m128 __b)
-{
-  __a[0] = __b[0];
-  return __a;
-}
-
-/// Converts the two double-precision floating-point elements of a
-///    128-bit vector of [2 x double] into two signed 32-bit integer values,
-///    returned in the lower 64 bits of a 128-bit vector of [4 x i32].
-///
-///    If the result of either conversion is inexact, the result is truncated
-///    (rounded towards zero) regardless of the current MXCSR setting. The upper
-///    64 bits of the result vector are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTTPD2DQ / CVTTPD2DQ </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the
-///    converted values. The upper 64 bits are set to zero.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvttpd_epi32(__m128d __a)
-{
-  return (__m128i)__builtin_ia32_cvttpd2dq((__v2df)__a);
-}
-
-/// Converts the low-order element of a [2 x double] vector into a 32-bit
-///    signed integer value, truncating the result when it is inexact.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTTSD2SI / CVTTSD2SI </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
-///    conversion.
-/// \returns A 32-bit signed integer containing the converted value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvttsd_si32(__m128d __a)
-{
-  return __builtin_ia32_cvttsd2si((__v2df)__a);
-}
-
-/// Converts the two double-precision floating-point elements of a
-///    128-bit vector of [2 x double] into two signed 32-bit integer values,
-///    returned in a 64-bit vector of [2 x i32].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPD2PI </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 64-bit vector of [2 x i32] containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpd_pi32(__m128d __a)
-{
-  return (__m64)__builtin_ia32_cvtpd2pi((__v2df)__a);
-}
-
-/// Converts the two double-precision floating-point elements of a
-///    128-bit vector of [2 x double] into two signed 32-bit integer values,
-///    returned in a 64-bit vector of [2 x i32].
-///
-///    If the result of either conversion is inexact, the result is truncated
-///    (rounded towards zero) regardless of the current MXCSR setting.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTTPD2PI </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 64-bit vector of [2 x i32] containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvttpd_pi32(__m128d __a)
-{
-  return (__m64)__builtin_ia32_cvttpd2pi((__v2df)__a);
-}
-
-/// Converts the two signed 32-bit integer elements of a 64-bit vector of
-///    [2 x i32] into two double-precision floating-point values, returned in a
-///    128-bit vector of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPI2PD </c> instruction.
-///
-/// \param __a
-///    A 64-bit vector of [2 x i32].
-/// \returns A 128-bit vector of [2 x double] containing the converted values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpi32_pd(__m64 __a)
-{
-  return __builtin_ia32_cvtpi2pd((__v2si)__a);
-}
-
-/// Returns the low-order element of a 128-bit vector of [2 x double] as
-///    a double-precision floating-point value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower 64 bits are returned.
-/// \returns A double-precision floating-point value copied from the lower 64
-///    bits of \a __a.
-static __inline__ double __DEFAULT_FN_ATTRS
-_mm_cvtsd_f64(__m128d __a)
-{
-  return __a[0];
-}
-
-/// Loads a 128-bit floating-point vector of [2 x double] from an aligned
-///    memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVAPD / MOVAPD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 128-bit memory location. The address of the memory
-///    location has to be 16-byte aligned.
-/// \returns A 128-bit vector of [2 x double] containing the loaded values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_load_pd(double const *__dp)
-{
-  return *(const __m128d*)__dp;
-}
-
-/// Loads a double-precision floating-point value from a specified memory
-///    location and duplicates it to both vector elements of a 128-bit vector of
-///    [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDDUP / MOVDDUP </c> instruction.
-///
-/// \param __dp
-///    A pointer to a memory location containing a double-precision value.
-/// \returns A 128-bit vector of [2 x double] containing the loaded and
-///    duplicated values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_load1_pd(double const *__dp)
-{
-  struct __mm_load1_pd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  double __u = ((const struct __mm_load1_pd_struct*)__dp)->__u;
-  return __extension__ (__m128d){ __u, __u };
-}
-
-#define        _mm_load_pd1(dp)        _mm_load1_pd(dp)
-
-/// Loads two double-precision values, in reverse order, from an aligned
-///    memory location into a 128-bit vector of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVAPD / MOVAPD </c> instruction +
-/// needed shuffling instructions. In AVX mode, the shuffling may be combined
-/// with the \c VMOVAPD, resulting in only a \c VPERMILPD instruction.
-///
-/// \param __dp
-///    A 16-byte aligned pointer to an array of double-precision values to be
-///    loaded in reverse order.
-/// \returns A 128-bit vector of [2 x double] containing the reversed loaded
-///    values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadr_pd(double const *__dp)
-{
-  __m128d __u = *(const __m128d*)__dp;
-  return __builtin_shufflevector((__v2df)__u, (__v2df)__u, 1, 0);
-}
-
-/// Loads a 128-bit floating-point vector of [2 x double] from an
-///    unaligned memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVUPD / MOVUPD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 128-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \returns A 128-bit vector of [2 x double] containing the loaded values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadu_pd(double const *__dp)
-{
-  struct __loadu_pd {
-    __m128d_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_pd*)__dp)->__v;
-}
-
-/// Loads a 64-bit integer value to the low element of a 128-bit integer
-///    vector and clears the upper element.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __a
-///    A pointer to a 64-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \returns A 128-bit vector of [2 x i64] containing the loaded value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si64(void const *__a)
-{
-  struct __loadu_si64 {
-    long long __v;
-  } __attribute__((__packed__, __may_alias__));
-  long long __u = ((const struct __loadu_si64*)__a)->__v;
-  return __extension__ (__m128i)(__v2di){__u, 0LL};
-}
-
-/// Loads a 32-bit integer value to the low element of a 128-bit integer
-///    vector and clears the upper element.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
-///
-/// \param __a
-///    A pointer to a 32-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \returns A 128-bit vector of [4 x i32] containing the loaded value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si32(void const *__a)
-{
-  struct __loadu_si32 {
-    int __v;
-  } __attribute__((__packed__, __may_alias__));
-  int __u = ((const struct __loadu_si32*)__a)->__v;
-  return __extension__ (__m128i)(__v4si){__u, 0, 0, 0};
-}
-
-/// Loads a 16-bit integer value to the low element of a 128-bit integer
-///    vector and clears the upper element.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic does not correspond to a specific instruction.
-///
-/// \param __a
-///    A pointer to a 16-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \returns A 128-bit vector of [8 x i16] containing the loaded value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si16(void const *__a)
-{
-  struct __loadu_si16 {
-    short __v;
-  } __attribute__((__packed__, __may_alias__));
-  short __u = ((const struct __loadu_si16*)__a)->__v;
-  return __extension__ (__m128i)(__v8hi){__u, 0, 0, 0, 0, 0, 0, 0};
-}
-
-/// Loads a 64-bit double-precision value to the low element of a
-///    128-bit integer vector and clears the upper element.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVSD / MOVSD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a memory location containing a double-precision value.
-///    The address of the memory location does not have to be aligned.
-/// \returns A 128-bit vector of [2 x double] containing the loaded value.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_load_sd(double const *__dp)
-{
-  struct __mm_load_sd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  double __u = ((const struct __mm_load_sd_struct*)__dp)->__u;
-  return __extension__ (__m128d){ __u, 0 };
-}
-
-/// Loads a double-precision value into the high-order bits of a 128-bit
-///    vector of [2 x double]. The low-order bits are copied from the low-order
-///    bits of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVHPD / MOVHPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. \n
-///    Bits [63:0] are written to bits [63:0] of the result.
-/// \param __dp
-///    A pointer to a 64-bit memory location containing a double-precision
-///    floating-point value that is loaded. The loaded value is written to bits
-///    [127:64] of the result. The address of the memory location does not have
-///    to be aligned.
-/// \returns A 128-bit vector of [2 x double] containing the moved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadh_pd(__m128d __a, double const *__dp)
-{
-  struct __mm_loadh_pd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  double __u = ((const struct __mm_loadh_pd_struct*)__dp)->__u;
-  return __extension__ (__m128d){ __a[0], __u };
-}
-
-/// Loads a double-precision value into the low-order bits of a 128-bit
-///    vector of [2 x double]. The high-order bits are copied from the
-///    high-order bits of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVLPD / MOVLPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. \n
-///    Bits [127:64] are written to bits [127:64] of the result.
-/// \param __dp
-///    A pointer to a 64-bit memory location containing a double-precision
-///    floating-point value that is loaded. The loaded value is written to bits
-///    [63:0] of the result. The address of the memory location does not have to
-///    be aligned.
-/// \returns A 128-bit vector of [2 x double] containing the moved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadl_pd(__m128d __a, double const *__dp)
-{
-  struct __mm_loadl_pd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  double __u = ((const struct __mm_loadl_pd_struct*)__dp)->__u;
-  return __extension__ (__m128d){ __u, __a[1] };
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double] with
-///    unspecified content. This could be used as an argument to another
-///    intrinsic function where the argument is required but the value is not
-///    actually used.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \returns A 128-bit floating-point vector of [2 x double] with unspecified
-///    content.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_undefined_pd(void)
-{
-  return (__m128d)__builtin_ia32_undef128();
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double]. The lower
-///    64 bits of the vector are initialized with the specified double-precision
-///    floating-point value. The upper 64 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __w
-///    A double-precision floating-point value used to initialize the lower 64
-///    bits of the result.
-/// \returns An initialized 128-bit floating-point vector of [2 x double]. The
-///    lower 64 bits contain the value of the parameter. The upper 64 bits are
-///    set to zero.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set_sd(double __w)
-{
-  return __extension__ (__m128d){ __w, 0 };
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double], with each
-///    of the two double-precision floating-point vector elements set to the
-///    specified double-precision floating-point value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDDUP / MOVLHPS </c> instruction.
-///
-/// \param __w
-///    A double-precision floating-point value used to initialize each vector
-///    element of the result.
-/// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set1_pd(double __w)
-{
-  return __extension__ (__m128d){ __w, __w };
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double], with each
-///    of the two double-precision floating-point vector elements set to the
-///    specified double-precision floating-point value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDDUP / MOVLHPS </c> instruction.
-///
-/// \param __w
-///    A double-precision floating-point value used to initialize each vector
-///    element of the result.
-/// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set_pd1(double __w)
-{
-  return _mm_set1_pd(__w);
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double]
-///    initialized with the specified double-precision floating-point values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKLPD / UNPCKLPD </c> instruction.
-///
-/// \param __w
-///    A double-precision floating-point value used to initialize the upper 64
-///    bits of the result.
-/// \param __x
-///    A double-precision floating-point value used to initialize the lower 64
-///    bits of the result.
-/// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set_pd(double __w, double __x)
-{
-  return __extension__ (__m128d){ __x, __w };
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double],
-///    initialized in reverse order with the specified double-precision
-///    floating-point values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKLPD / UNPCKLPD </c> instruction.
-///
-/// \param __w
-///    A double-precision floating-point value used to initialize the lower 64
-///    bits of the result.
-/// \param __x
-///    A double-precision floating-point value used to initialize the upper 64
-///    bits of the result.
-/// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_setr_pd(double __w, double __x)
-{
-  return __extension__ (__m128d){ __w, __x };
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double]
-///    initialized to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VXORPS / XORPS </c> instruction.
-///
-/// \returns An initialized 128-bit floating-point vector of [2 x double] with
-///    all elements set to zero.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_setzero_pd(void)
-{
-  return __extension__ (__m128d){ 0, 0 };
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double]. The lower
-///    64 bits are set to the lower 64 bits of the second parameter. The upper
-///    64 bits are set to the upper 64 bits of the first parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VBLENDPD / BLENDPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The upper 64 bits are written to the
-///    upper 64 bits of the result.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower 64 bits are written to the
-///    lower 64 bits of the result.
-/// \returns A 128-bit vector of [2 x double] containing the moved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_move_sd(__m128d __a, __m128d __b)
-{
-  __a[0] = __b[0];
-  return __a;
-}
-
-/// Stores the lower 64 bits of a 128-bit vector of [2 x double] to a
-///    memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVSD / MOVSD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 64-bit memory location.
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_sd(double *__dp, __m128d __a)
-{
-  struct __mm_store_sd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_store_sd_struct*)__dp)->__u = __a[0];
-}
-
-/// Moves packed double-precision values from a 128-bit vector of
-///    [2 x double] to a memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c>VMOVAPD / MOVAPS</c> instruction.
-///
-/// \param __dp
-///    A pointer to an aligned memory location that can store two
-///    double-precision values.
-/// \param __a
-///    A packed 128-bit vector of [2 x double] containing the values to be
-///    moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_pd(double *__dp, __m128d __a)
-{
-  *(__m128d*)__dp = __a;
-}
-
-/// Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to
-///    the upper and lower 64 bits of a memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the
-///   <c> VMOVDDUP + VMOVAPD / MOVLHPS + MOVAPS </c> instruction.
-///
-/// \param __dp
-///    A pointer to a memory location that can store two double-precision
-///    values.
-/// \param __a
-///    A 128-bit vector of [2 x double] whose lower 64 bits are copied to each
-///    of the values in \a __dp.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store1_pd(double *__dp, __m128d __a)
-{
-  __a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);
-  _mm_store_pd(__dp, __a);
-}
-
-/// Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to
-///    the upper and lower 64 bits of a memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the
-///   <c> VMOVDDUP + VMOVAPD / MOVLHPS + MOVAPS </c> instruction.
-///
-/// \param __dp
-///    A pointer to a memory location that can store two double-precision
-///    values.
-/// \param __a
-///    A 128-bit vector of [2 x double] whose lower 64 bits are copied to each
-///    of the values in \a __dp.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_pd1(double *__dp, __m128d __a)
-{
-  _mm_store1_pd(__dp, __a);
-}
-
-/// Stores a 128-bit vector of [2 x double] into an unaligned memory
-///    location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVUPD / MOVUPD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 128-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_pd(double *__dp, __m128d __a)
-{
-  struct __storeu_pd {
-    __m128d_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_pd*)__dp)->__v = __a;
-}
-
-/// Stores two double-precision values, in reverse order, from a 128-bit
-///    vector of [2 x double] to a 16-byte aligned memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to a shuffling instruction followed by a
-/// <c> VMOVAPD / MOVAPD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 16-byte aligned memory location that can store two
-///    double-precision values.
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the values to be reversed and
-///    stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storer_pd(double *__dp, __m128d __a)
-{
-  __a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 1, 0);
-  *(__m128d *)__dp = __a;
-}
-
-/// Stores the upper 64 bits of a 128-bit vector of [2 x double] to a
-///    memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVHPD / MOVHPD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 64-bit memory location.
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeh_pd(double *__dp, __m128d __a)
-{
-  struct __mm_storeh_pd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[1];
-}
-
-/// Stores the lower 64 bits of a 128-bit vector of [2 x double] to a
-///    memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVLPD / MOVLPD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 64-bit memory location.
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storel_pd(double *__dp, __m128d __a)
-{
-  struct __mm_storeh_pd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[0];
-}
-
-/// Adds the corresponding elements of two 128-bit vectors of [16 x i8],
-///    saving the lower 8 bits of each sum in the corresponding element of a
-///    128-bit result vector of [16 x i8].
-///
-///    The integer elements of both parameters can be either signed or unsigned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDB / PADDB </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [16 x i8].
-/// \param __b
-///    A 128-bit vector of [16 x i8].
-/// \returns A 128-bit vector of [16 x i8] containing the sums of both
-///    parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v16qu)__a + (__v16qu)__b);
-}
-
-/// Adds the corresponding elements of two 128-bit vectors of [8 x i16],
-///    saving the lower 16 bits of each sum in the corresponding element of a
-///    128-bit result vector of [8 x i16].
-///
-///    The integer elements of both parameters can be either signed or unsigned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDW / PADDW </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [8 x i16].
-/// \param __b
-///    A 128-bit vector of [8 x i16].
-/// \returns A 128-bit vector of [8 x i16] containing the sums of both
-///    parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v8hu)__a + (__v8hu)__b);
-}
-
-/// Adds the corresponding elements of two 128-bit vectors of [4 x i32],
-///    saving the lower 32 bits of each sum in the corresponding element of a
-///    128-bit result vector of [4 x i32].
-///
-///    The integer elements of both parameters can be either signed or unsigned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDD / PADDD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x i32].
-/// \param __b
-///    A 128-bit vector of [4 x i32].
-/// \returns A 128-bit vector of [4 x i32] containing the sums of both
-///    parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v4su)__a + (__v4su)__b);
-}
-
-/// Adds two signed or unsigned 64-bit integer values, returning the
-///    lower 64 bits of the sum.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PADDQ </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer.
-/// \param __b
-///    A 64-bit integer.
-/// \returns A 64-bit integer containing the sum of both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_add_si64(__m64 __a, __m64 __b)
-{
-  return (__m64)__builtin_ia32_paddq((__v1di)__a, (__v1di)__b);
-}
-
-/// Adds the corresponding elements of two 128-bit vectors of [2 x i64],
-///    saving the lower 64 bits of each sum in the corresponding element of a
-///    128-bit result vector of [2 x i64].
-///
-///    The integer elements of both parameters can be either signed or unsigned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDQ / PADDQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x i64].
-/// \param __b
-///    A 128-bit vector of [2 x i64].
-/// \returns A 128-bit vector of [2 x i64] containing the sums of both
-///    parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a + (__v2du)__b);
-}
-
-/// Adds, with saturation, the corresponding elements of two 128-bit
-///    signed [16 x i8] vectors, saving each sum in the corresponding element of
-///    a 128-bit result vector of [16 x i8]. Positive sums greater than 0x7F are
-///    saturated to 0x7F. Negative sums less than 0x80 are saturated to 0x80.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDSB / PADDSB </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [16 x i8] vector.
-/// \param __b
-///    A 128-bit signed [16 x i8] vector.
-/// \returns A 128-bit signed [16 x i8] vector containing the saturated sums of
-///    both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_paddsb128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Adds, with saturation, the corresponding elements of two 128-bit
-///    signed [8 x i16] vectors, saving each sum in the corresponding element of
-///    a 128-bit result vector of [8 x i16]. Positive sums greater than 0x7FFF
-///    are saturated to 0x7FFF. Negative sums less than 0x8000 are saturated to
-///    0x8000.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDSW / PADDSW </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [8 x i16] vector.
-/// \param __b
-///    A 128-bit signed [8 x i16] vector.
-/// \returns A 128-bit signed [8 x i16] vector containing the saturated sums of
-///    both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_paddsw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Adds, with saturation, the corresponding elements of two 128-bit
-///    unsigned [16 x i8] vectors, saving each sum in the corresponding element
-///    of a 128-bit result vector of [16 x i8]. Positive sums greater than 0xFF
-///    are saturated to 0xFF. Negative sums are saturated to 0x00.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDUSB / PADDUSB </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [16 x i8] vector.
-/// \param __b
-///    A 128-bit unsigned [16 x i8] vector.
-/// \returns A 128-bit unsigned [16 x i8] vector containing the saturated sums
-///    of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_paddusb128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Adds, with saturation, the corresponding elements of two 128-bit
-///    unsigned [8 x i16] vectors, saving each sum in the corresponding element
-///    of a 128-bit result vector of [8 x i16]. Positive sums greater than
-///    0xFFFF are saturated to 0xFFFF. Negative sums are saturated to 0x0000.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDUSB / PADDUSB </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [8 x i16] vector.
-/// \param __b
-///    A 128-bit unsigned [8 x i16] vector.
-/// \returns A 128-bit unsigned [8 x i16] vector containing the saturated sums
-///    of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epu16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_paddusw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Computes the rounded averages of corresponding elements of two
-///    128-bit unsigned [16 x i8] vectors, saving each result in the
-///    corresponding element of a 128-bit result vector of [16 x i8].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPAVGB / PAVGB </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [16 x i8] vector.
-/// \param __b
-///    A 128-bit unsigned [16 x i8] vector.
-/// \returns A 128-bit unsigned [16 x i8] vector containing the rounded
-///    averages of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_avg_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pavgb128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Computes the rounded averages of corresponding elements of two
-///    128-bit unsigned [8 x i16] vectors, saving each result in the
-///    corresponding element of a 128-bit result vector of [8 x i16].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPAVGW / PAVGW </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [8 x i16] vector.
-/// \param __b
-///    A 128-bit unsigned [8 x i16] vector.
-/// \returns A 128-bit unsigned [8 x i16] vector containing the rounded
-///    averages of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_avg_epu16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pavgw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Multiplies the corresponding elements of two 128-bit signed [8 x i16]
-///    vectors, producing eight intermediate 32-bit signed integer products, and
-///    adds the consecutive pairs of 32-bit products to form a 128-bit signed
-///    [4 x i32] vector.
-///
-///    For example, bits [15:0] of both parameters are multiplied producing a
-///    32-bit product, bits [31:16] of both parameters are multiplied producing
-///    a 32-bit product, and the sum of those two products becomes bits [31:0]
-///    of the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMADDWD / PMADDWD </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [8 x i16] vector.
-/// \param __b
-///    A 128-bit signed [8 x i16] vector.
-/// \returns A 128-bit signed [4 x i32] vector containing the sums of products
-///    of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_madd_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Compares corresponding elements of two 128-bit signed [8 x i16]
-///    vectors, saving the greater value from each comparison in the
-///    corresponding element of a 128-bit result vector of [8 x i16].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMAXSW / PMAXSW </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [8 x i16] vector.
-/// \param __b
-///    A 128-bit signed [8 x i16] vector.
-/// \returns A 128-bit signed [8 x i16] vector containing the greater value of
-///    each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pmaxsw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Compares corresponding elements of two 128-bit unsigned [16 x i8]
-///    vectors, saving the greater value from each comparison in the
-///    corresponding element of a 128-bit result vector of [16 x i8].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMAXUB / PMAXUB </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [16 x i8] vector.
-/// \param __b
-///    A 128-bit unsigned [16 x i8] vector.
-/// \returns A 128-bit unsigned [16 x i8] vector containing the greater value of
-///    each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pmaxub128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Compares corresponding elements of two 128-bit signed [8 x i16]
-///    vectors, saving the smaller value from each comparison in the
-///    corresponding element of a 128-bit result vector of [8 x i16].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMINSW / PMINSW </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [8 x i16] vector.
-/// \param __b
-///    A 128-bit signed [8 x i16] vector.
-/// \returns A 128-bit signed [8 x i16] vector containing the smaller value of
-///    each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pminsw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Compares corresponding elements of two 128-bit unsigned [16 x i8]
-///    vectors, saving the smaller value from each comparison in the
-///    corresponding element of a 128-bit result vector of [16 x i8].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMINUB / PMINUB </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [16 x i8] vector.
-/// \param __b
-///    A 128-bit unsigned [16 x i8] vector.
-/// \returns A 128-bit unsigned [16 x i8] vector containing the smaller value of
-///    each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pminub128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Multiplies the corresponding elements of two signed [8 x i16]
-///    vectors, saving the upper 16 bits of each 32-bit product in the
-///    corresponding element of a 128-bit signed [8 x i16] result vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMULHW / PMULHW </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [8 x i16] vector.
-/// \param __b
-///    A 128-bit signed [8 x i16] vector.
-/// \returns A 128-bit signed [8 x i16] vector containing the upper 16 bits of
-///    each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mulhi_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pmulhw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Multiplies the corresponding elements of two unsigned [8 x i16]
-///    vectors, saving the upper 16 bits of each 32-bit product in the
-///    corresponding element of a 128-bit unsigned [8 x i16] result vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMULHUW / PMULHUW </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [8 x i16] vector.
-/// \param __b
-///    A 128-bit unsigned [8 x i16] vector.
-/// \returns A 128-bit unsigned [8 x i16] vector containing the upper 16 bits
-///    of each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mulhi_epu16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Multiplies the corresponding elements of two signed [8 x i16]
-///    vectors, saving the lower 16 bits of each 32-bit product in the
-///    corresponding element of a 128-bit signed [8 x i16] result vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMULLW / PMULLW </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [8 x i16] vector.
-/// \param __b
-///    A 128-bit signed [8 x i16] vector.
-/// \returns A 128-bit signed [8 x i16] vector containing the lower 16 bits of
-///    each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mullo_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v8hu)__a * (__v8hu)__b);
-}
-
-/// Multiplies 32-bit unsigned integer values contained in the lower bits
-///    of the two 64-bit integer vectors and returns the 64-bit unsigned
-///    product.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PMULUDQ </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer containing one of the source operands.
-/// \param __b
-///    A 64-bit integer containing one of the source operands.
-/// \returns A 64-bit integer vector containing the product of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_mul_su32(__m64 __a, __m64 __b)
-{
-  return __builtin_ia32_pmuludq((__v2si)__a, (__v2si)__b);
-}
-
-/// Multiplies 32-bit unsigned integer values contained in the lower
-///    bits of the corresponding elements of two [2 x i64] vectors, and returns
-///    the 64-bit products in the corresponding elements of a [2 x i64] vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMULUDQ / PMULUDQ </c> instruction.
-///
-/// \param __a
-///    A [2 x i64] vector containing one of the source operands.
-/// \param __b
-///    A [2 x i64] vector containing one of the source operands.
-/// \returns A [2 x i64] vector containing the product of both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mul_epu32(__m128i __a, __m128i __b)
-{
-  return __builtin_ia32_pmuludq128((__v4si)__a, (__v4si)__b);
-}
-
-/// Computes the absolute differences of corresponding 8-bit integer
-///    values in two 128-bit vectors. Sums the first 8 absolute differences, and
-///    separately sums the second 8 absolute differences. Packs these two
-///    unsigned 16-bit integer sums into the upper and lower elements of a
-///    [2 x i64] vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSADBW / PSADBW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 128-bit integer vector containing one of the source operands.
-/// \returns A [2 x i64] vector containing the sums of the sets of absolute
-///    differences between both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sad_epu8(__m128i __a, __m128i __b)
-{
-  return __builtin_ia32_psadbw128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Subtracts the corresponding 8-bit integer values in the operands.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBB / PSUBB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the differences of the values
-///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v16qu)__a - (__v16qu)__b);
-}
-
-/// Subtracts the corresponding 16-bit integer values in the operands.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBW / PSUBW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the differences of the values
-///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v8hu)__a - (__v8hu)__b);
-}
-
-/// Subtracts the corresponding 32-bit integer values in the operands.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBD / PSUBD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the differences of the values
-///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v4su)__a - (__v4su)__b);
-}
-
-/// Subtracts signed or unsigned 64-bit integer values and writes the
-///    difference to the corresponding bits in the destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PSUBQ </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer vector containing the minuend.
-/// \param __b
-///    A 64-bit integer vector containing the subtrahend.
-/// \returns A 64-bit integer vector containing the difference of the values in
-///    the operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_sub_si64(__m64 __a, __m64 __b)
-{
-  return (__m64)__builtin_ia32_psubq((__v1di)__a, (__v1di)__b);
-}
-
-/// Subtracts the corresponding elements of two [2 x i64] vectors.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBQ / PSUBQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the differences of the values
-///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a - (__v2du)__b);
-}
-
-/// Subtracts corresponding 8-bit signed integer values in the input and
-///    returns the differences in the corresponding bytes in the destination.
-///    Differences greater than 0x7F are saturated to 0x7F, and differences less
-///    than 0x80 are saturated to 0x80.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBSB / PSUBSB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the differences of the values
-///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_psubsb128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Subtracts corresponding 16-bit signed integer values in the input and
-///    returns the differences in the corresponding bytes in the destination.
-///    Differences greater than 0x7FFF are saturated to 0x7FFF, and values less
-///    than 0x8000 are saturated to 0x8000.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBSW / PSUBSW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the differences of the values
-///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_psubsw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Subtracts corresponding 8-bit unsigned integer values in the input
-///    and returns the differences in the corresponding bytes in the
-///    destination. Differences less than 0x00 are saturated to 0x00.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBUSB / PSUBUSB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the unsigned integer
-///    differences of the values in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_psubusb128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Subtracts corresponding 16-bit unsigned integer values in the input
-///    and returns the differences in the corresponding bytes in the
-///    destination. Differences less than 0x0000 are saturated to 0x0000.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBUSW / PSUBUSW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the unsigned integer
-///    differences of the values in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epu16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_psubusw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Performs a bitwise AND of two 128-bit integer vectors.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPAND / PAND </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 128-bit integer vector containing one of the source operands.
-/// \returns A 128-bit integer vector containing the bitwise AND of the values
-///    in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_and_si128(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a & (__v2du)__b);
-}
-
-/// Performs a bitwise AND of two 128-bit integer vectors, using the
-///    one's complement of the values contained in the first source operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPANDN / PANDN </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector containing the left source operand. The one's complement
-///    of this value is used in the bitwise AND.
-/// \param __b
-///    A 128-bit vector containing the right source operand.
-/// \returns A 128-bit integer vector containing the bitwise AND of the one's
-///    complement of the first operand and the values in the second operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_andnot_si128(__m128i __a, __m128i __b)
-{
-  return (__m128i)(~(__v2du)__a & (__v2du)__b);
-}
-/// Performs a bitwise OR of two 128-bit integer vectors.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPOR / POR </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 128-bit integer vector containing one of the source operands.
-/// \returns A 128-bit integer vector containing the bitwise OR of the values
-///    in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_or_si128(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a | (__v2du)__b);
-}
-
-/// Performs a bitwise exclusive OR of two 128-bit integer vectors.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPXOR / PXOR </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 128-bit integer vector containing one of the source operands.
-/// \returns A 128-bit integer vector containing the bitwise exclusive OR of the
-///    values in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_xor_si128(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a ^ (__v2du)__b);
-}
-
-/// Left-shifts the 128-bit integer vector operand by the specified
-///    number of bytes. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_slli_si128(__m128i a, const int imm);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPSLLDQ / PSLLDQ </c> instruction.
-///
-/// \param a
-///    A 128-bit integer vector containing the source operand.
-/// \param imm
-///    An immediate value specifying the number of bytes to left-shift operand
-///    \a a.
-/// \returns A 128-bit integer vector containing the left-shifted value.
-#define _mm_slli_si128(a, imm) \
-  (__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm))
-
-#define _mm_bslli_si128(a, imm) \
-  (__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm))
-
-/// Left-shifts each 16-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSLLW / PSLLW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to left-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_slli_epi16(__m128i __a, int __count)
-{
-  return (__m128i)__builtin_ia32_psllwi128((__v8hi)__a, __count);
-}
-
-/// Left-shifts each 16-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSLLW / PSLLW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to left-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sll_epi16(__m128i __a, __m128i __count)
-{
-  return (__m128i)__builtin_ia32_psllw128((__v8hi)__a, (__v8hi)__count);
-}
-
-/// Left-shifts each 32-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSLLD / PSLLD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to left-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_slli_epi32(__m128i __a, int __count)
-{
-  return (__m128i)__builtin_ia32_pslldi128((__v4si)__a, __count);
-}
-
-/// Left-shifts each 32-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSLLD / PSLLD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to left-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sll_epi32(__m128i __a, __m128i __count)
-{
-  return (__m128i)__builtin_ia32_pslld128((__v4si)__a, (__v4si)__count);
-}
-
-/// Left-shifts each 64-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSLLQ / PSLLQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to left-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_slli_epi64(__m128i __a, int __count)
-{
-  return __builtin_ia32_psllqi128((__v2di)__a, __count);
-}
-
-/// Left-shifts each 64-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSLLQ / PSLLQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to left-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sll_epi64(__m128i __a, __m128i __count)
-{
-  return __builtin_ia32_psllq128((__v2di)__a, (__v2di)__count);
-}
-
-/// Right-shifts each 16-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. High-order bits are filled with the sign
-///    bit of the initial value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRAW / PSRAW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to right-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srai_epi16(__m128i __a, int __count)
-{
-  return (__m128i)__builtin_ia32_psrawi128((__v8hi)__a, __count);
-}
-
-/// Right-shifts each 16-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. High-order bits are filled with the sign
-///    bit of the initial value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRAW / PSRAW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to right-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sra_epi16(__m128i __a, __m128i __count)
-{
-  return (__m128i)__builtin_ia32_psraw128((__v8hi)__a, (__v8hi)__count);
-}
-
-/// Right-shifts each 32-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. High-order bits are filled with the sign
-///    bit of the initial value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRAD / PSRAD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to right-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srai_epi32(__m128i __a, int __count)
-{
-  return (__m128i)__builtin_ia32_psradi128((__v4si)__a, __count);
-}
-
-/// Right-shifts each 32-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. High-order bits are filled with the sign
-///    bit of the initial value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRAD / PSRAD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to right-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sra_epi32(__m128i __a, __m128i __count)
-{
-  return (__m128i)__builtin_ia32_psrad128((__v4si)__a, (__v4si)__count);
-}
-
-/// Right-shifts the 128-bit integer vector operand by the specified
-///    number of bytes. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_srli_si128(__m128i a, const int imm);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPSRLDQ / PSRLDQ </c> instruction.
-///
-/// \param a
-///    A 128-bit integer vector containing the source operand.
-/// \param imm
-///    An immediate value specifying the number of bytes to right-shift operand
-///    \a a.
-/// \returns A 128-bit integer vector containing the right-shifted value.
-#define _mm_srli_si128(a, imm) \
-  (__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm))
-
-#define _mm_bsrli_si128(a, imm) \
-  (__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm))
-
-/// Right-shifts each of 16-bit values in the 128-bit integer vector
-///    operand by the specified number of bits. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRLW / PSRLW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to right-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srli_epi16(__m128i __a, int __count)
-{
-  return (__m128i)__builtin_ia32_psrlwi128((__v8hi)__a, __count);
-}
-
-/// Right-shifts each of 16-bit values in the 128-bit integer vector
-///    operand by the specified number of bits. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRLW / PSRLW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to right-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srl_epi16(__m128i __a, __m128i __count)
-{
-  return (__m128i)__builtin_ia32_psrlw128((__v8hi)__a, (__v8hi)__count);
-}
-
-/// Right-shifts each of 32-bit values in the 128-bit integer vector
-///    operand by the specified number of bits. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRLD / PSRLD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to right-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srli_epi32(__m128i __a, int __count)
-{
-  return (__m128i)__builtin_ia32_psrldi128((__v4si)__a, __count);
-}
-
-/// Right-shifts each of 32-bit values in the 128-bit integer vector
-///    operand by the specified number of bits. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRLD / PSRLD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to right-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srl_epi32(__m128i __a, __m128i __count)
-{
-  return (__m128i)__builtin_ia32_psrld128((__v4si)__a, (__v4si)__count);
-}
-
-/// Right-shifts each of 64-bit values in the 128-bit integer vector
-///    operand by the specified number of bits. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRLQ / PSRLQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to right-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srli_epi64(__m128i __a, int __count)
-{
-  return __builtin_ia32_psrlqi128((__v2di)__a, __count);
-}
-
-/// Right-shifts each of 64-bit values in the 128-bit integer vector
-///    operand by the specified number of bits. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRLQ / PSRLQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to right-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srl_epi64(__m128i __a, __m128i __count)
-{
-  return __builtin_ia32_psrlq128((__v2di)__a, (__v2di)__count);
-}
-
-/// Compares each of the corresponding 8-bit values of the 128-bit
-///    integer vectors for equality. Each comparison yields 0x0 for false, 0xFF
-///    for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPEQB / PCMPEQB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v16qi)__a == (__v16qi)__b);
-}
-
-/// Compares each of the corresponding 16-bit values of the 128-bit
-///    integer vectors for equality. Each comparison yields 0x0 for false,
-///    0xFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPEQW / PCMPEQW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v8hi)__a == (__v8hi)__b);
-}
-
-/// Compares each of the corresponding 32-bit values of the 128-bit
-///    integer vectors for equality. Each comparison yields 0x0 for false,
-///    0xFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPEQD / PCMPEQD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v4si)__a == (__v4si)__b);
-}
-
-/// Compares each of the corresponding signed 8-bit values of the 128-bit
-///    integer vectors to determine if the values in the first operand are
-///    greater than those in the second operand. Each comparison yields 0x0 for
-///    false, 0xFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPGTB / PCMPGTB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi8(__m128i __a, __m128i __b)
-{
-  /* This function always performs a signed comparison, but __v16qi is a char
-     which may be signed or unsigned, so use __v16qs. */
-  return (__m128i)((__v16qs)__a > (__v16qs)__b);
-}
-
-/// Compares each of the corresponding signed 16-bit values of the
-///    128-bit integer vectors to determine if the values in the first operand
-///    are greater than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPGTW / PCMPGTW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v8hi)__a > (__v8hi)__b);
-}
-
-/// Compares each of the corresponding signed 32-bit values of the
-///    128-bit integer vectors to determine if the values in the first operand
-///    are greater than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPGTD / PCMPGTD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v4si)__a > (__v4si)__b);
-}
-
-/// Compares each of the corresponding signed 8-bit values of the 128-bit
-///    integer vectors to determine if the values in the first operand are less
-///    than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPGTB / PCMPGTB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmplt_epi8(__m128i __a, __m128i __b)
-{
-  return _mm_cmpgt_epi8(__b, __a);
-}
-
-/// Compares each of the corresponding signed 16-bit values of the
-///    128-bit integer vectors to determine if the values in the first operand
-///    are less than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPGTW / PCMPGTW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmplt_epi16(__m128i __a, __m128i __b)
-{
-  return _mm_cmpgt_epi16(__b, __a);
-}
-
-/// Compares each of the corresponding signed 32-bit values of the
-///    128-bit integer vectors to determine if the values in the first operand
-///    are less than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPGTD / PCMPGTD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmplt_epi32(__m128i __a, __m128i __b)
-{
-  return _mm_cmpgt_epi32(__b, __a);
-}
-
-#ifdef __x86_64__
-/// Converts a 64-bit signed integer value from the second operand into a
-///    double-precision value and returns it in the lower element of a [2 x
-///    double] vector; the upper element of the returned vector is copied from
-///    the upper element of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSI2SD / CVTSI2SD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The upper 64 bits of this operand are
-///    copied to the upper 64 bits of the destination.
-/// \param __b
-///    A 64-bit signed integer operand containing the value to be converted.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    converted value of the second operand. The upper 64 bits are copied from
-///    the upper 64 bits of the first operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtsi64_sd(__m128d __a, long long __b)
-{
-  __a[0] = __b;
-  return __a;
-}
-
-/// Converts the first (lower) element of a vector of [2 x double] into a
-///    64-bit signed integer value, according to the current rounding mode.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSD2SI / CVTSD2SI </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
-///    conversion.
-/// \returns A 64-bit signed integer containing the converted value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvtsd_si64(__m128d __a)
-{
-  return __builtin_ia32_cvtsd2si64((__v2df)__a);
-}
-
-/// Converts the first (lower) element of a vector of [2 x double] into a
-///    64-bit signed integer value, truncating the result when it is inexact.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTTSD2SI / CVTTSD2SI </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
-///    conversion.
-/// \returns A 64-bit signed integer containing the converted value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvttsd_si64(__m128d __a)
-{
-  return __builtin_ia32_cvttsd2si64((__v2df)__a);
-}
-#endif
-
-/// Converts a vector of [4 x i32] into a vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTDQ2PS / CVTDQ2PS </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \returns A 128-bit vector of [4 x float] containing the converted values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtepi32_ps(__m128i __a)
-{
-  return (__m128)__builtin_convertvector((__v4si)__a, __v4sf);
-}
-
-/// Converts a vector of [4 x float] into a vector of [4 x i32].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTPS2DQ / CVTPS2DQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit integer vector of [4 x i32] containing the converted
-///    values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtps_epi32(__m128 __a)
-{
-  return (__m128i)__builtin_ia32_cvtps2dq((__v4sf)__a);
-}
-
-/// Converts a vector of [4 x float] into a vector of [4 x i32],
-///    truncating the result when it is inexact.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTTPS2DQ / CVTTPS2DQ </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x i32] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvttps_epi32(__m128 __a)
-{
-  return (__m128i)__builtin_ia32_cvttps2dq((__v4sf)__a);
-}
-
-/// Returns a vector of [4 x i32] where the lowest element is the input
-///    operand and the remaining elements are zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
-///
-/// \param __a
-///    A 32-bit signed integer operand.
-/// \returns A 128-bit vector of [4 x i32].
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtsi32_si128(int __a)
-{
-  return __extension__ (__m128i)(__v4si){ __a, 0, 0, 0 };
-}
-
-#ifdef __x86_64__
-/// Returns a vector of [2 x i64] where the lower element is the input
-///    operand and the upper element is zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __a
-///    A 64-bit signed integer operand containing the value to be converted.
-/// \returns A 128-bit vector of [2 x i64] containing the converted value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtsi64_si128(long long __a)
-{
-  return __extension__ (__m128i)(__v2di){ __a, 0 };
-}
-#endif
-
-/// Moves the least significant 32 bits of a vector of [4 x i32] to a
-///    32-bit signed integer value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
-///
-/// \param __a
-///    A vector of [4 x i32]. The least significant 32 bits are moved to the
-///    destination.
-/// \returns A 32-bit signed integer containing the moved value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvtsi128_si32(__m128i __a)
-{
-  __v4si __b = (__v4si)__a;
-  return __b[0];
-}
-
-#ifdef __x86_64__
-/// Moves the least significant 64 bits of a vector of [2 x i64] to a
-///    64-bit signed integer value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __a
-///    A vector of [2 x i64]. The least significant 64 bits are moved to the
-///    destination.
-/// \returns A 64-bit signed integer containing the moved value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvtsi128_si64(__m128i __a)
-{
-  return __a[0];
-}
-#endif
-
-/// Moves packed integer values from an aligned 128-bit memory location
-///    to elements in a 128-bit integer vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDQA / MOVDQA </c> instruction.
-///
-/// \param __p
-///    An aligned pointer to a memory location containing integer values.
-/// \returns A 128-bit integer vector containing the moved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_load_si128(__m128i const *__p)
-{
-  return *__p;
-}
-
-/// Moves packed integer values from an unaligned 128-bit memory location
-///    to elements in a 128-bit integer vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDQU / MOVDQU </c> instruction.
-///
-/// \param __p
-///    A pointer to a memory location containing integer values.
-/// \returns A 128-bit integer vector containing the moved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si128(__m128i_u const *__p)
-{
-  struct __loadu_si128 {
-    __m128i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_si128*)__p)->__v;
-}
-
-/// Returns a vector of [2 x i64] where the lower element is taken from
-///    the lower element of the operand, and the upper element is zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __p
-///    A 128-bit vector of [2 x i64]. Bits [63:0] are written to bits [63:0] of
-///    the destination.
-/// \returns A 128-bit vector of [2 x i64]. The lower order bits contain the
-///    moved value. The higher order bits are cleared.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadl_epi64(__m128i_u const *__p)
-{
-  struct __mm_loadl_epi64_struct {
-    long long __u;
-  } __attribute__((__packed__, __may_alias__));
-  return __extension__ (__m128i) { ((const struct __mm_loadl_epi64_struct*)__p)->__u, 0};
-}
-
-/// Generates a 128-bit vector of [4 x i32] with unspecified content.
-///    This could be used as an argument to another intrinsic function where the
-///    argument is required but the value is not actually used.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \returns A 128-bit vector of [4 x i32] with unspecified content.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_undefined_si128(void)
-{
-  return (__m128i)__builtin_ia32_undef128();
-}
-
-/// Initializes both 64-bit values in a 128-bit vector of [2 x i64] with
-///    the specified 64-bit integer values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __q1
-///    A 64-bit integer value used to initialize the upper 64 bits of the
-///    destination vector of [2 x i64].
-/// \param __q0
-///    A 64-bit integer value used to initialize the lower 64 bits of the
-///    destination vector of [2 x i64].
-/// \returns An initialized 128-bit vector of [2 x i64] containing the values
-///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi64x(long long __q1, long long __q0)
-{
-  return __extension__ (__m128i)(__v2di){ __q0, __q1 };
-}
-
-/// Initializes both 64-bit values in a 128-bit vector of [2 x i64] with
-///    the specified 64-bit integer values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __q1
-///    A 64-bit integer value used to initialize the upper 64 bits of the
-///    destination vector of [2 x i64].
-/// \param __q0
-///    A 64-bit integer value used to initialize the lower 64 bits of the
-///    destination vector of [2 x i64].
-/// \returns An initialized 128-bit vector of [2 x i64] containing the values
-///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi64(__m64 __q1, __m64 __q0)
-{
-  return _mm_set_epi64x((long long)__q1, (long long)__q0);
-}
-
-/// Initializes the 32-bit values in a 128-bit vector of [4 x i32] with
-///    the specified 32-bit integer values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __i3
-///    A 32-bit integer value used to initialize bits [127:96] of the
-///    destination vector.
-/// \param __i2
-///    A 32-bit integer value used to initialize bits [95:64] of the destination
-///    vector.
-/// \param __i1
-///    A 32-bit integer value used to initialize bits [63:32] of the destination
-///    vector.
-/// \param __i0
-///    A 32-bit integer value used to initialize bits [31:0] of the destination
-///    vector.
-/// \returns An initialized 128-bit vector of [4 x i32] containing the values
-///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi32(int __i3, int __i2, int __i1, int __i0)
-{
-  return __extension__ (__m128i)(__v4si){ __i0, __i1, __i2, __i3};
-}
-
-/// Initializes the 16-bit values in a 128-bit vector of [8 x i16] with
-///    the specified 16-bit integer values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __w7
-///    A 16-bit integer value used to initialize bits [127:112] of the
-///    destination vector.
-/// \param __w6
-///    A 16-bit integer value used to initialize bits [111:96] of the
-///    destination vector.
-/// \param __w5
-///    A 16-bit integer value used to initialize bits [95:80] of the destination
-///    vector.
-/// \param __w4
-///    A 16-bit integer value used to initialize bits [79:64] of the destination
-///    vector.
-/// \param __w3
-///    A 16-bit integer value used to initialize bits [63:48] of the destination
-///    vector.
-/// \param __w2
-///    A 16-bit integer value used to initialize bits [47:32] of the destination
-///    vector.
-/// \param __w1
-///    A 16-bit integer value used to initialize bits [31:16] of the destination
-///    vector.
-/// \param __w0
-///    A 16-bit integer value used to initialize bits [15:0] of the destination
-///    vector.
-/// \returns An initialized 128-bit vector of [8 x i16] containing the values
-///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3, short __w2, short __w1, short __w0)
-{
-  return __extension__ (__m128i)(__v8hi){ __w0, __w1, __w2, __w3, __w4, __w5, __w6, __w7 };
-}
-
-/// Initializes the 8-bit values in a 128-bit vector of [16 x i8] with
-///    the specified 8-bit integer values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __b15
-///    Initializes bits [127:120] of the destination vector.
-/// \param __b14
-///    Initializes bits [119:112] of the destination vector.
-/// \param __b13
-///    Initializes bits [111:104] of the destination vector.
-/// \param __b12
-///    Initializes bits [103:96] of the destination vector.
-/// \param __b11
-///    Initializes bits [95:88] of the destination vector.
-/// \param __b10
-///    Initializes bits [87:80] of the destination vector.
-/// \param __b9
-///    Initializes bits [79:72] of the destination vector.
-/// \param __b8
-///    Initializes bits [71:64] of the destination vector.
-/// \param __b7
-///    Initializes bits [63:56] of the destination vector.
-/// \param __b6
-///    Initializes bits [55:48] of the destination vector.
-/// \param __b5
-///    Initializes bits [47:40] of the destination vector.
-/// \param __b4
-///    Initializes bits [39:32] of the destination vector.
-/// \param __b3
-///    Initializes bits [31:24] of the destination vector.
-/// \param __b2
-///    Initializes bits [23:16] of the destination vector.
-/// \param __b1
-///    Initializes bits [15:8] of the destination vector.
-/// \param __b0
-///    Initializes bits [7:0] of the destination vector.
-/// \returns An initialized 128-bit vector of [16 x i8] containing the values
-///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b9, char __b8, char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0)
-{
-  return __extension__ (__m128i)(__v16qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15 };
-}
-
-/// Initializes both values in a 128-bit integer vector with the
-///    specified 64-bit integer value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __q
-///    Integer value used to initialize the elements of the destination integer
-///    vector.
-/// \returns An initialized 128-bit integer vector of [2 x i64] with both
-///    elements containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi64x(long long __q)
-{
-  return _mm_set_epi64x(__q, __q);
-}
-
-/// Initializes both values in a 128-bit vector of [2 x i64] with the
-///    specified 64-bit value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __q
-///    A 64-bit value used to initialize the elements of the destination integer
-///    vector.
-/// \returns An initialized 128-bit vector of [2 x i64] with all elements
-///    containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi64(__m64 __q)
-{
-  return _mm_set_epi64(__q, __q);
-}
-
-/// Initializes all values in a 128-bit vector of [4 x i32] with the
-///    specified 32-bit value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __i
-///    A 32-bit value used to initialize the elements of the destination integer
-///    vector.
-/// \returns An initialized 128-bit vector of [4 x i32] with all elements
-///    containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi32(int __i)
-{
-  return _mm_set_epi32(__i, __i, __i, __i);
-}
-
-/// Initializes all values in a 128-bit vector of [8 x i16] with the
-///    specified 16-bit value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __w
-///    A 16-bit value used to initialize the elements of the destination integer
-///    vector.
-/// \returns An initialized 128-bit vector of [8 x i16] with all elements
-///    containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi16(short __w)
-{
-  return _mm_set_epi16(__w, __w, __w, __w, __w, __w, __w, __w);
-}
-
-/// Initializes all values in a 128-bit vector of [16 x i8] with the
-///    specified 8-bit value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __b
-///    An 8-bit value used to initialize the elements of the destination integer
-///    vector.
-/// \returns An initialized 128-bit vector of [16 x i8] with all elements
-///    containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi8(char __b)
-{
-  return _mm_set_epi8(__b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b);
-}
-
-/// Constructs a 128-bit integer vector, initialized in reverse order
-///     with the specified 64-bit integral values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic does not correspond to a specific instruction.
-///
-/// \param __q0
-///    A 64-bit integral value used to initialize the lower 64 bits of the
-///    result.
-/// \param __q1
-///    A 64-bit integral value used to initialize the upper 64 bits of the
-///    result.
-/// \returns An initialized 128-bit integer vector.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi64(__m64 __q0, __m64 __q1)
-{
-  return _mm_set_epi64(__q1, __q0);
-}
-
-/// Constructs a 128-bit integer vector, initialized in reverse order
-///     with the specified 32-bit integral values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __i0
-///    A 32-bit integral value used to initialize bits [31:0] of the result.
-/// \param __i1
-///    A 32-bit integral value used to initialize bits [63:32] of the result.
-/// \param __i2
-///    A 32-bit integral value used to initialize bits [95:64] of the result.
-/// \param __i3
-///    A 32-bit integral value used to initialize bits [127:96] of the result.
-/// \returns An initialized 128-bit integer vector.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi32(int __i0, int __i1, int __i2, int __i3)
-{
-  return _mm_set_epi32(__i3, __i2, __i1, __i0);
-}
-
-/// Constructs a 128-bit integer vector, initialized in reverse order
-///     with the specified 16-bit integral values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __w0
-///    A 16-bit integral value used to initialize bits [15:0] of the result.
-/// \param __w1
-///    A 16-bit integral value used to initialize bits [31:16] of the result.
-/// \param __w2
-///    A 16-bit integral value used to initialize bits [47:32] of the result.
-/// \param __w3
-///    A 16-bit integral value used to initialize bits [63:48] of the result.
-/// \param __w4
-///    A 16-bit integral value used to initialize bits [79:64] of the result.
-/// \param __w5
-///    A 16-bit integral value used to initialize bits [95:80] of the result.
-/// \param __w6
-///    A 16-bit integral value used to initialize bits [111:96] of the result.
-/// \param __w7
-///    A 16-bit integral value used to initialize bits [127:112] of the result.
-/// \returns An initialized 128-bit integer vector.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4, short __w5, short __w6, short __w7)
-{
-  return _mm_set_epi16(__w7, __w6, __w5, __w4, __w3, __w2, __w1, __w0);
-}
-
-/// Constructs a 128-bit integer vector, initialized in reverse order
-///     with the specified 8-bit integral values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __b0
-///    An 8-bit integral value used to initialize bits [7:0] of the result.
-/// \param __b1
-///    An 8-bit integral value used to initialize bits [15:8] of the result.
-/// \param __b2
-///    An 8-bit integral value used to initialize bits [23:16] of the result.
-/// \param __b3
-///    An 8-bit integral value used to initialize bits [31:24] of the result.
-/// \param __b4
-///    An 8-bit integral value used to initialize bits [39:32] of the result.
-/// \param __b5
-///    An 8-bit integral value used to initialize bits [47:40] of the result.
-/// \param __b6
-///    An 8-bit integral value used to initialize bits [55:48] of the result.
-/// \param __b7
-///    An 8-bit integral value used to initialize bits [63:56] of the result.
-/// \param __b8
-///    An 8-bit integral value used to initialize bits [71:64] of the result.
-/// \param __b9
-///    An 8-bit integral value used to initialize bits [79:72] of the result.
-/// \param __b10
-///    An 8-bit integral value used to initialize bits [87:80] of the result.
-/// \param __b11
-///    An 8-bit integral value used to initialize bits [95:88] of the result.
-/// \param __b12
-///    An 8-bit integral value used to initialize bits [103:96] of the result.
-/// \param __b13
-///    An 8-bit integral value used to initialize bits [111:104] of the result.
-/// \param __b14
-///    An 8-bit integral value used to initialize bits [119:112] of the result.
-/// \param __b15
-///    An 8-bit integral value used to initialize bits [127:120] of the result.
-/// \returns An initialized 128-bit integer vector.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7, char __b8, char __b9, char __b10, char __b11, char __b12, char __b13, char __b14, char __b15)
-{
-  return _mm_set_epi8(__b15, __b14, __b13, __b12, __b11, __b10, __b9, __b8, __b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
-}
-
-/// Creates a 128-bit integer vector initialized to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VXORPS / XORPS </c> instruction.
-///
-/// \returns An initialized 128-bit integer vector with all elements set to
-///    zero.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setzero_si128(void)
-{
-  return __extension__ (__m128i)(__v2di){ 0LL, 0LL };
-}
-
-/// Stores a 128-bit integer vector to a memory location aligned on a
-///    128-bit boundary.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVAPS / MOVAPS </c> instruction.
-///
-/// \param __p
-///    A pointer to an aligned memory location that will receive the integer
-///    values.
-/// \param __b
-///    A 128-bit integer vector containing the values to be moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_si128(__m128i *__p, __m128i __b)
-{
-  *__p = __b;
-}
-
-/// Stores a 128-bit integer vector to an unaligned memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVUPS / MOVUPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a memory location that will receive the integer values.
-/// \param __b
-///    A 128-bit integer vector containing the values to be moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si128(__m128i_u *__p, __m128i __b)
-{
-  struct __storeu_si128 {
-    __m128i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si128*)__p)->__v = __b;
-}
-
-/// Stores a 64-bit integer value from the low element of a 128-bit integer
-///    vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __p
-///    A pointer to a 64-bit memory location. The address of the memory
-///    location does not have to be algned.
-/// \param __b
-///    A 128-bit integer vector containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si64(void *__p, __m128i __b)
-{
-  struct __storeu_si64 {
-    long long __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si64*)__p)->__v = ((__v2di)__b)[0];
-}
-
-/// Stores a 32-bit integer value from the low element of a 128-bit integer
-///    vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
-///
-/// \param __p
-///    A pointer to a 32-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \param __b
-///    A 128-bit integer vector containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si32(void *__p, __m128i __b)
-{
-  struct __storeu_si32 {
-    int __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si32*)__p)->__v = ((__v4si)__b)[0];
-}
-
-/// Stores a 16-bit integer value from the low element of a 128-bit integer
-///    vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic does not correspond to a specific instruction.
-///
-/// \param __p
-///    A pointer to a 16-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \param __b
-///    A 128-bit integer vector containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si16(void *__p, __m128i __b)
-{
-  struct __storeu_si16 {
-    short __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si16*)__p)->__v = ((__v8hi)__b)[0];
-}
-
-/// Moves bytes selected by the mask from the first operand to the
-///    specified unaligned memory location. When a mask bit is 1, the
-///    corresponding byte is written, otherwise it is not written.
-///
-///    To minimize caching, the data is flagged as non-temporal (unlikely to be
-///    used again soon). Exception and trap behavior for elements not selected
-///    for storage to memory are implementation dependent.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMASKMOVDQU / MASKMOVDQU </c>
-///   instruction.
-///
-/// \param __d
-///    A 128-bit integer vector containing the values to be moved.
-/// \param __n
-///    A 128-bit integer vector containing the mask. The most significant bit of
-///    each byte represents the mask bits.
-/// \param __p
-///    A pointer to an unaligned 128-bit memory location where the specified
-///    values are moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_maskmoveu_si128(__m128i __d, __m128i __n, char *__p)
-{
-  __builtin_ia32_maskmovdqu((__v16qi)__d, (__v16qi)__n, __p);
-}
-
-/// Stores the lower 64 bits of a 128-bit integer vector of [2 x i64] to
-///    a memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVLPS / MOVLPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a 64-bit memory location that will receive the lower 64 bits
-///    of the integer vector parameter.
-/// \param __a
-///    A 128-bit integer vector of [2 x i64]. The lower 64 bits contain the
-///    value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storel_epi64(__m128i_u *__p, __m128i __a)
-{
-  struct __mm_storel_epi64_struct {
-    long long __u;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_storel_epi64_struct*)__p)->__u = __a[0];
-}
-
-/// Stores a 128-bit floating point vector of [2 x double] to a 128-bit
-///    aligned memory location.
-///
-///    To minimize caching, the data is flagged as non-temporal (unlikely to be
-///    used again soon).
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVNTPS / MOVNTPS </c> instruction.
-///
-/// \param __p
-///    A pointer to the 128-bit aligned memory location used to store the value.
-/// \param __a
-///    A vector of [2 x double] containing the 64-bit values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_stream_pd(double *__p, __m128d __a)
-{
-  __builtin_nontemporal_store((__v2df)__a, (__v2df*)__p);
-}
-
-/// Stores a 128-bit integer vector to a 128-bit aligned memory location.
-///
-///    To minimize caching, the data is flagged as non-temporal (unlikely to be
-///    used again soon).
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVNTPS / MOVNTPS </c> instruction.
-///
-/// \param __p
-///    A pointer to the 128-bit aligned memory location used to store the value.
-/// \param __a
-///    A 128-bit integer vector containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_stream_si128(__m128i *__p, __m128i __a)
-{
-  __builtin_nontemporal_store((__v2di)__a, (__v2di*)__p);
-}
-
-/// Stores a 32-bit integer value in the specified memory location.
-///
-///    To minimize caching, the data is flagged as non-temporal (unlikely to be
-///    used again soon).
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MOVNTI </c> instruction.
-///
-/// \param __p
-///    A pointer to the 32-bit memory location used to store the value.
-/// \param __a
-///    A 32-bit integer containing the value to be stored.
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
-_mm_stream_si32(int *__p, int __a)
-{
-  __builtin_ia32_movnti(__p, __a);
-}
-
-#ifdef __x86_64__
-/// Stores a 64-bit integer value in the specified memory location.
-///
-///    To minimize caching, the data is flagged as non-temporal (unlikely to be
-///    used again soon).
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MOVNTIQ </c> instruction.
-///
-/// \param __p
-///    A pointer to the 64-bit memory location used to store the value.
-/// \param __a
-///    A 64-bit integer containing the value to be stored.
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
-_mm_stream_si64(long long *__p, long long __a)
-{
-  __builtin_ia32_movnti64(__p, __a);
-}
-#endif
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/// The cache line containing \a __p is flushed and invalidated from all
-///    caches in the coherency domain.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CLFLUSH </c> instruction.
-///
-/// \param __p
-///    A pointer to the memory location used to identify the cache line to be
-///    flushed.
-void _mm_clflush(void const * __p);
-
-/// Forces strong memory ordering (serialization) between load
-///    instructions preceding this instruction and load instructions following
-///    this instruction, ensuring the system completes all previous loads before
-///    executing subsequent loads.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> LFENCE </c> instruction.
-///
-void _mm_lfence(void);
-
-/// Forces strong memory ordering (serialization) between load and store
-///    instructions preceding this instruction and load and store instructions
-///    following this instruction, ensuring that the system completes all
-///    previous memory accesses before executing subsequent memory accesses.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MFENCE </c> instruction.
-///
-void _mm_mfence(void);
-
-#if defined(__cplusplus)
-} // extern "C"
-#endif
-
-/// Converts 16-bit signed integers from both 128-bit integer vector
-///    operands into 8-bit signed integers, and packs the results into the
-///    destination. Positive values greater than 0x7F are saturated to 0x7F.
-///    Negative values less than 0x80 are saturated to 0x80.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPACKSSWB / PACKSSWB </c> instruction.
-///
-/// \param __a
-///   A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
-///   a signed integer and is converted to a 8-bit signed integer with
-///   saturation. Values greater than 0x7F are saturated to 0x7F. Values less
-///   than 0x80 are saturated to 0x80. The converted [8 x i8] values are
-///   written to the lower 64 bits of the result.
-/// \param __b
-///   A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
-///   a signed integer and is converted to a 8-bit signed integer with
-///   saturation. Values greater than 0x7F are saturated to 0x7F. Values less
-///   than 0x80 are saturated to 0x80. The converted [8 x i8] values are
-///   written to the higher 64 bits of the result.
-/// \returns A 128-bit vector of [16 x i8] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packs_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_packsswb128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Converts 32-bit signed integers from both 128-bit integer vector
-///    operands into 16-bit signed integers, and packs the results into the
-///    destination. Positive values greater than 0x7FFF are saturated to 0x7FFF.
-///    Negative values less than 0x8000 are saturated to 0x8000.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPACKSSDW / PACKSSDW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector of [4 x i32]. Each 32-bit element is treated as
-///    a signed integer and is converted to a 16-bit signed integer with
-///    saturation. Values greater than 0x7FFF are saturated to 0x7FFF. Values
-///    less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values
-///    are written to the lower 64 bits of the result.
-/// \param __b
-///    A 128-bit integer vector of [4 x i32]. Each 32-bit element is treated as
-///    a signed integer and is converted to a 16-bit signed integer with
-///    saturation. Values greater than 0x7FFF are saturated to 0x7FFF. Values
-///    less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values
-///    are written to the higher 64 bits of the result.
-/// \returns A 128-bit vector of [8 x i16] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packs_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_packssdw128((__v4si)__a, (__v4si)__b);
-}
-
-/// Converts 16-bit signed integers from both 128-bit integer vector
-///    operands into 8-bit unsigned integers, and packs the results into the
-///    destination. Values greater than 0xFF are saturated to 0xFF. Values less
-///    than 0x00 are saturated to 0x00.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPACKUSWB / PACKUSWB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
-///    a signed integer and is converted to an 8-bit unsigned integer with
-///    saturation. Values greater than 0xFF are saturated to 0xFF. Values less
-///    than 0x00 are saturated to 0x00. The converted [8 x i8] values are
-///    written to the lower 64 bits of the result.
-/// \param __b
-///    A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
-///    a signed integer and is converted to an 8-bit unsigned integer with
-///    saturation. Values greater than 0xFF are saturated to 0xFF. Values less
-///    than 0x00 are saturated to 0x00. The converted [8 x i8] values are
-///    written to the higher 64 bits of the result.
-/// \returns A 128-bit vector of [16 x i8] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packus_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_packuswb128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Extracts 16 bits from a 128-bit integer vector of [8 x i16], using
-///    the immediate-value parameter as a selector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPEXTRW / PEXTRW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __imm
-///    An immediate value. Bits [2:0] selects values from \a __a to be assigned
-///    to bits[15:0] of the result. \n
-///    000: assign values from bits [15:0] of \a __a. \n
-///    001: assign values from bits [31:16] of \a __a. \n
-///    010: assign values from bits [47:32] of \a __a. \n
-///    011: assign values from bits [63:48] of \a __a. \n
-///    100: assign values from bits [79:64] of \a __a. \n
-///    101: assign values from bits [95:80] of \a __a. \n
-///    110: assign values from bits [111:96] of \a __a. \n
-///    111: assign values from bits [127:112] of \a __a.
-/// \returns An integer, whose lower 16 bits are selected from the 128-bit
-///    integer vector parameter and the remaining bits are assigned zeros.
-#define _mm_extract_epi16(a, imm) \
-  (int)(unsigned short)__builtin_ia32_vec_ext_v8hi((__v8hi)(__m128i)(a), \
-                                                   (int)(imm))
-
-/// Constructs a 128-bit integer vector by first making a copy of the
-///    128-bit integer vector parameter, and then inserting the lower 16 bits
-///    of an integer parameter into an offset specified by the immediate-value
-///    parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPINSRW / PINSRW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector of [8 x i16]. This vector is copied to the
-///    result and then one of the eight elements in the result is replaced by
-///    the lower 16 bits of \a __b.
-/// \param __b
-///    An integer. The lower 16 bits of this parameter are written to the
-///    result beginning at an offset specified by \a __imm.
-/// \param __imm
-///    An immediate value specifying the bit offset in the result at which the
-///    lower 16 bits of \a __b are written.
-/// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi16(a, b, imm) \
-  (__m128i)__builtin_ia32_vec_set_v8hi((__v8hi)(__m128i)(a), (int)(b), \
-                                       (int)(imm))
-
-/// Copies the values of the most significant bits from each 8-bit
-///    element in a 128-bit integer vector of [16 x i8] to create a 16-bit mask
-///    value, zero-extends the value, and writes it to the destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMOVMSKB / PMOVMSKB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the values with bits to be extracted.
-/// \returns The most significant bits from each 8-bit element in \a __a,
-///    written to bits [15:0]. The other bits are assigned zeros.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_movemask_epi8(__m128i __a)
-{
-  return __builtin_ia32_pmovmskb128((__v16qi)__a);
-}
-
-/// Constructs a 128-bit integer vector by shuffling four 32-bit
-///    elements of a 128-bit integer vector parameter, using the immediate-value
-///    parameter as a specifier.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_shuffle_epi32(__m128i a, const int imm);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPSHUFD / PSHUFD </c> instruction.
-///
-/// \param a
-///    A 128-bit integer vector containing the values to be copied.
-/// \param imm
-///    An immediate value containing an 8-bit value specifying which elements to
-///    copy from a. The destinations within the 128-bit destination are assigned
-///    values as follows: \n
-///    Bits [1:0] are used to assign values to bits [31:0] of the result. \n
-///    Bits [3:2] are used to assign values to bits [63:32] of the result. \n
-///    Bits [5:4] are used to assign values to bits [95:64] of the result. \n
-///    Bits [7:6] are used to assign values to bits [127:96] of the result. \n
-///    Bit value assignments: \n
-///    00: assign values from bits [31:0] of \a a. \n
-///    01: assign values from bits [63:32] of \a a. \n
-///    10: assign values from bits [95:64] of \a a. \n
-///    11: assign values from bits [127:96] of \a a.
-/// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shuffle_epi32(a, imm) \
-  (__m128i)__builtin_ia32_pshufd((__v4si)(__m128i)(a), (int)(imm))
-
-/// Constructs a 128-bit integer vector by shuffling four lower 16-bit
-///    elements of a 128-bit integer vector of [8 x i16], using the immediate
-///    value parameter as a specifier.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_shufflelo_epi16(__m128i a, const int imm);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPSHUFLW / PSHUFLW </c> instruction.
-///
-/// \param a
-///    A 128-bit integer vector of [8 x i16]. Bits [127:64] are copied to bits
-///    [127:64] of the result.
-/// \param imm
-///    An 8-bit immediate value specifying which elements to copy from \a a. \n
-///    Bits[1:0] are used to assign values to bits [15:0] of the result. \n
-///    Bits[3:2] are used to assign values to bits [31:16] of the result. \n
-///    Bits[5:4] are used to assign values to bits [47:32] of the result. \n
-///    Bits[7:6] are used to assign values to bits [63:48] of the result. \n
-///    Bit value assignments: \n
-///    00: assign values from bits [15:0] of \a a. \n
-///    01: assign values from bits [31:16] of \a a. \n
-///    10: assign values from bits [47:32] of \a a. \n
-///    11: assign values from bits [63:48] of \a a. \n
-/// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shufflelo_epi16(a, imm) \
-  (__m128i)__builtin_ia32_pshuflw((__v8hi)(__m128i)(a), (int)(imm))
-
-/// Constructs a 128-bit integer vector by shuffling four upper 16-bit
-///    elements of a 128-bit integer vector of [8 x i16], using the immediate
-///    value parameter as a specifier.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_shufflehi_epi16(__m128i a, const int imm);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPSHUFHW / PSHUFHW </c> instruction.
-///
-/// \param a
-///    A 128-bit integer vector of [8 x i16]. Bits [63:0] are copied to bits
-///    [63:0] of the result.
-/// \param imm
-///    An 8-bit immediate value specifying which elements to copy from \a a. \n
-///    Bits[1:0] are used to assign values to bits [79:64] of the result. \n
-///    Bits[3:2] are used to assign values to bits [95:80] of the result. \n
-///    Bits[5:4] are used to assign values to bits [111:96] of the result. \n
-///    Bits[7:6] are used to assign values to bits [127:112] of the result. \n
-///    Bit value assignments: \n
-///    00: assign values from bits [79:64] of \a a. \n
-///    01: assign values from bits [95:80] of \a a. \n
-///    10: assign values from bits [111:96] of \a a. \n
-///    11: assign values from bits [127:112] of \a a. \n
-/// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shufflehi_epi16(a, imm) \
-  (__m128i)__builtin_ia32_pshufhw((__v8hi)(__m128i)(a), (int)(imm))
-
-/// Unpacks the high-order (index 8-15) values from two 128-bit vectors
-///    of [16 x i8] and interleaves them into a 128-bit vector of [16 x i8].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKHBW / PUNPCKHBW </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [16 x i8].
-///    Bits [71:64] are written to bits [7:0] of the result. \n
-///    Bits [79:72] are written to bits [23:16] of the result. \n
-///    Bits [87:80] are written to bits [39:32] of the result. \n
-///    Bits [95:88] are written to bits [55:48] of the result. \n
-///    Bits [103:96] are written to bits [71:64] of the result. \n
-///    Bits [111:104] are written to bits [87:80] of the result. \n
-///    Bits [119:112] are written to bits [103:96] of the result. \n
-///    Bits [127:120] are written to bits [119:112] of the result.
-/// \param __b
-///    A 128-bit vector of [16 x i8]. \n
-///    Bits [71:64] are written to bits [15:8] of the result. \n
-///    Bits [79:72] are written to bits [31:24] of the result. \n
-///    Bits [87:80] are written to bits [47:40] of the result. \n
-///    Bits [95:88] are written to bits [63:56] of the result. \n
-///    Bits [103:96] are written to bits [79:72] of the result. \n
-///    Bits [111:104] are written to bits [95:88] of the result. \n
-///    Bits [119:112] are written to bits [111:104] of the result. \n
-///    Bits [127:120] are written to bits [127:120] of the result.
-/// \returns A 128-bit vector of [16 x i8] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
-}
-
-/// Unpacks the high-order (index 4-7) values from two 128-bit vectors of
-///    [8 x i16] and interleaves them into a 128-bit vector of [8 x i16].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKHWD / PUNPCKHWD </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [8 x i16].
-///    Bits [79:64] are written to bits [15:0] of the result. \n
-///    Bits [95:80] are written to bits [47:32] of the result. \n
-///    Bits [111:96] are written to bits [79:64] of the result. \n
-///    Bits [127:112] are written to bits [111:96] of the result.
-/// \param __b
-///    A 128-bit vector of [8 x i16].
-///    Bits [79:64] are written to bits [31:16] of the result. \n
-///    Bits [95:80] are written to bits [63:48] of the result. \n
-///    Bits [111:96] are written to bits [95:80] of the result. \n
-///    Bits [127:112] are written to bits [127:112] of the result.
-/// \returns A 128-bit vector of [8 x i16] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7);
-}
-
-/// Unpacks the high-order (index 2,3) values from two 128-bit vectors of
-///    [4 x i32] and interleaves them into a 128-bit vector of [4 x i32].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKHDQ / PUNPCKHDQ </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x i32]. \n
-///    Bits [95:64] are written to bits [31:0] of the destination. \n
-///    Bits [127:96] are written to bits [95:64] of the destination.
-/// \param __b
-///    A 128-bit vector of [4 x i32]. \n
-///    Bits [95:64] are written to bits [64:32] of the destination. \n
-///    Bits [127:96] are written to bits [127:96] of the destination.
-/// \returns A 128-bit vector of [4 x i32] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 2, 4+2, 3, 4+3);
-}
-
-/// Unpacks the high-order 64-bit elements from two 128-bit vectors of
-///    [2 x i64] and interleaves them into a 128-bit vector of [2 x i64].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKHQDQ / PUNPCKHQDQ </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x i64]. \n
-///    Bits [127:64] are written to bits [63:0] of the destination.
-/// \param __b
-///    A 128-bit vector of [2 x i64]. \n
-///    Bits [127:64] are written to bits [127:64] of the destination.
-/// \returns A 128-bit vector of [2 x i64] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 1, 2+1);
-}
-
-/// Unpacks the low-order (index 0-7) values from two 128-bit vectors of
-///    [16 x i8] and interleaves them into a 128-bit vector of [16 x i8].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKLBW / PUNPCKLBW </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [16 x i8]. \n
-///    Bits [7:0] are written to bits [7:0] of the result. \n
-///    Bits [15:8] are written to bits [23:16] of the result. \n
-///    Bits [23:16] are written to bits [39:32] of the result. \n
-///    Bits [31:24] are written to bits [55:48] of the result. \n
-///    Bits [39:32] are written to bits [71:64] of the result. \n
-///    Bits [47:40] are written to bits [87:80] of the result. \n
-///    Bits [55:48] are written to bits [103:96] of the result. \n
-///    Bits [63:56] are written to bits [119:112] of the result.
-/// \param __b
-///    A 128-bit vector of [16 x i8].
-///    Bits [7:0] are written to bits [15:8] of the result. \n
-///    Bits [15:8] are written to bits [31:24] of the result. \n
-///    Bits [23:16] are written to bits [47:40] of the result. \n
-///    Bits [31:24] are written to bits [63:56] of the result. \n
-///    Bits [39:32] are written to bits [79:72] of the result. \n
-///    Bits [47:40] are written to bits [95:88] of the result. \n
-///    Bits [55:48] are written to bits [111:104] of the result. \n
-///    Bits [63:56] are written to bits [127:120] of the result.
-/// \returns A 128-bit vector of [16 x i8] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7);
-}
-
-/// Unpacks the low-order (index 0-3) values from each of the two 128-bit
-///    vectors of [8 x i16] and interleaves them into a 128-bit vector of
-///    [8 x i16].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKLWD / PUNPCKLWD </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [8 x i16].
-///    Bits [15:0] are written to bits [15:0] of the result. \n
-///    Bits [31:16] are written to bits [47:32] of the result. \n
-///    Bits [47:32] are written to bits [79:64] of the result. \n
-///    Bits [63:48] are written to bits [111:96] of the result.
-/// \param __b
-///    A 128-bit vector of [8 x i16].
-///    Bits [15:0] are written to bits [31:16] of the result. \n
-///    Bits [31:16] are written to bits [63:48] of the result. \n
-///    Bits [47:32] are written to bits [95:80] of the result. \n
-///    Bits [63:48] are written to bits [127:112] of the result.
-/// \returns A 128-bit vector of [8 x i16] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3);
-}
-
-/// Unpacks the low-order (index 0,1) values from two 128-bit vectors of
-///    [4 x i32] and interleaves them into a 128-bit vector of [4 x i32].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKLDQ / PUNPCKLDQ </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x i32]. \n
-///    Bits [31:0] are written to bits [31:0] of the destination. \n
-///    Bits [63:32] are written to bits [95:64] of the destination.
-/// \param __b
-///    A 128-bit vector of [4 x i32]. \n
-///    Bits [31:0] are written to bits [64:32] of the destination. \n
-///    Bits [63:32] are written to bits [127:96] of the destination.
-/// \returns A 128-bit vector of [4 x i32] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 0, 4+0, 1, 4+1);
-}
-
-/// Unpacks the low-order 64-bit elements from two 128-bit vectors of
-///    [2 x i64] and interleaves them into a 128-bit vector of [2 x i64].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKLQDQ / PUNPCKLQDQ </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x i64]. \n
-///    Bits [63:0] are written to bits [63:0] of the destination. \n
-/// \param __b
-///    A 128-bit vector of [2 x i64]. \n
-///    Bits [63:0] are written to bits [127:64] of the destination. \n
-/// \returns A 128-bit vector of [2 x i64] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 0, 2+0);
-}
-
-/// Returns the lower 64 bits of a 128-bit integer vector as a 64-bit
-///    integer.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MOVDQ2Q </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector operand. The lower 64 bits are moved to the
-///    destination.
-/// \returns A 64-bit integer containing the lower 64 bits of the parameter.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_movepi64_pi64(__m128i __a)
-{
-  return (__m64)__a[0];
-}
-
-/// Moves the 64-bit operand to a 128-bit integer vector, zeroing the
-///    upper bits.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MOVD+VMOVQ </c> instruction.
-///
-/// \param __a
-///    A 64-bit value.
-/// \returns A 128-bit integer vector. The lower 64 bits contain the value from
-///    the operand. The upper 64 bits are assigned zeros.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_movpi64_epi64(__m64 __a)
-{
-  return __extension__ (__m128i)(__v2di){ (long long)__a, 0 };
-}
-
-/// Moves the lower 64 bits of a 128-bit integer vector to a 128-bit
-///    integer vector, zeroing the upper bits.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector operand. The lower 64 bits are moved to the
-///    destination.
-/// \returns A 128-bit integer vector. The lower 64 bits contain the value from
-///    the operand. The upper 64 bits are assigned zeros.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_move_epi64(__m128i __a)
-{
-  return __builtin_shufflevector((__v2di)__a, _mm_setzero_si128(), 0, 2);
-}
-
-/// Unpacks the high-order 64-bit elements from two 128-bit vectors of
-///    [2 x double] and interleaves them into a 128-bit vector of [2 x
-///    double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKHPD / UNPCKHPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. \n
-///    Bits [127:64] are written to bits [63:0] of the destination.
-/// \param __b
-///    A 128-bit vector of [2 x double]. \n
-///    Bits [127:64] are written to bits [127:64] of the destination.
-/// \returns A 128-bit vector of [2 x double] containing the interleaved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_unpackhi_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 1, 2+1);
-}
-
-/// Unpacks the low-order 64-bit elements from two 128-bit vectors
-///    of [2 x double] and interleaves them into a 128-bit vector of [2 x
-///    double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKLPD / UNPCKLPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. \n
-///    Bits [63:0] are written to bits [63:0] of the destination.
-/// \param __b
-///    A 128-bit vector of [2 x double]. \n
-///    Bits [63:0] are written to bits [127:64] of the destination.
-/// \returns A 128-bit vector of [2 x double] containing the interleaved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_unpacklo_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 0, 2+0);
-}
-
-/// Extracts the sign bits of the double-precision values in the 128-bit
-///    vector of [2 x double], zero-extends the value, and writes it to the
-///    low-order bits of the destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVMSKPD / MOVMSKPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the values with sign bits to
-///    be extracted.
-/// \returns The sign bits from each of the double-precision elements in \a __a,
-///    written to bits [1:0]. The remaining bits are assigned values of zero.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_movemask_pd(__m128d __a)
-{
-  return __builtin_ia32_movmskpd((__v2df)__a);
-}
-
-
-/// Constructs a 128-bit floating-point vector of [2 x double] from two
-///    128-bit vector parameters of [2 x double], using the immediate-value
-///     parameter as a specifier.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128d _mm_shuffle_pd(__m128d a, __m128d b, const int i);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VSHUFPD / SHUFPD </c> instruction.
-///
-/// \param a
-///    A 128-bit vector of [2 x double].
-/// \param b
-///    A 128-bit vector of [2 x double].
-/// \param i
-///    An 8-bit immediate value. The least significant two bits specify which
-///    elements to copy from \a a and \a b: \n
-///    Bit[0] = 0: lower element of \a a copied to lower element of result. \n
-///    Bit[0] = 1: upper element of \a a copied to lower element of result. \n
-///    Bit[1] = 0: lower element of \a b copied to upper element of result. \n
-///    Bit[1] = 1: upper element of \a b copied to upper element of result. \n
-/// \returns A 128-bit vector of [2 x double] containing the shuffled values.
-#define _mm_shuffle_pd(a, b, i) \
-  (__m128d)__builtin_ia32_shufpd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
-                                 (int)(i))
-
-/// Casts a 128-bit floating-point vector of [2 x double] into a 128-bit
-///    floating-point vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [2 x double].
-/// \returns A 128-bit floating-point vector of [4 x float] containing the same
-///    bitwise pattern as the parameter.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_castpd_ps(__m128d __a)
-{
-  return (__m128)__a;
-}
-
-/// Casts a 128-bit floating-point vector of [2 x double] into a 128-bit
-///    integer vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [2 x double].
-/// \returns A 128-bit integer vector containing the same bitwise pattern as the
-///    parameter.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_castpd_si128(__m128d __a)
-{
-  return (__m128i)__a;
-}
-
-/// Casts a 128-bit floating-point vector of [4 x float] into a 128-bit
-///    floating-point vector of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [4 x float].
-/// \returns A 128-bit floating-point vector of [2 x double] containing the same
-///    bitwise pattern as the parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_castps_pd(__m128 __a)
-{
-  return (__m128d)__a;
-}
-
-/// Casts a 128-bit floating-point vector of [4 x float] into a 128-bit
-///    integer vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [4 x float].
-/// \returns A 128-bit integer vector containing the same bitwise pattern as the
-///    parameter.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_castps_si128(__m128 __a)
-{
-  return (__m128i)__a;
-}
-
-/// Casts a 128-bit integer vector into a 128-bit floating-point vector
-///    of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \returns A 128-bit floating-point vector of [4 x float] containing the same
-///    bitwise pattern as the parameter.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_castsi128_ps(__m128i __a)
-{
-  return (__m128)__a;
-}
-
-/// Casts a 128-bit integer vector into a 128-bit floating-point vector
-///    of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \returns A 128-bit floating-point vector of [2 x double] containing the same
-///    bitwise pattern as the parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_castsi128_pd(__m128i __a)
-{
-  return (__m128d)__a;
-}
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/// Indicates that a spin loop is being executed for the purposes of
-///    optimizing power consumption during the loop.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PAUSE </c> instruction.
-///
-void _mm_pause(void);
-
-#if defined(__cplusplus)
-} // extern "C"
-#endif
-#undef __DEFAULT_FN_ATTRS
-#undef __DEFAULT_FN_ATTRS_MMX
-
-#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
-
-#define _MM_DENORMALS_ZERO_ON   (0x0040)
-#define _MM_DENORMALS_ZERO_OFF  (0x0000)
-
-#define _MM_DENORMALS_ZERO_MASK (0x0040)
-
-#define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK)
-#define _MM_SET_DENORMALS_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
-
-#endif /* __EMMINTRIN_H */
diff --git a/linux-x86/lib64/clang/11.0.1/include/fuzzer/FuzzedDataProvider.h b/linux-x86/lib64/clang/11.0.1/include/fuzzer/FuzzedDataProvider.h
deleted file mode 100644
index 3e069eb..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/fuzzer/FuzzedDataProvider.h
+++ /dev/null
@@ -1,305 +0,0 @@
-//===- FuzzedDataProvider.h - Utility header for fuzz targets ---*- C++ -* ===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-// A single header library providing an utility class to break up an array of
-// bytes. Whenever run on the same input, provides the same output, as long as
-// its methods are called in the same order, with the same arguments.
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_
-#define LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_
-
-#include <algorithm>
-#include <climits>
-#include <cstddef>
-#include <cstdint>
-#include <cstring>
-#include <initializer_list>
-#include <string>
-#include <type_traits>
-#include <utility>
-#include <vector>
-
-// In addition to the comments below, the API is also briefly documented at
-// https://github.com/google/fuzzing/blob/master/docs/split-inputs.md#fuzzed-data-provider
-class FuzzedDataProvider {
- public:
-  // |data| is an array of length |size| that the FuzzedDataProvider wraps to
-  // provide more granular access. |data| must outlive the FuzzedDataProvider.
-  FuzzedDataProvider(const uint8_t *data, size_t size)
-      : data_ptr_(data), remaining_bytes_(size) {}
-  ~FuzzedDataProvider() = default;
-
-  // Returns a std::vector containing |num_bytes| of input data. If fewer than
-  // |num_bytes| of data remain, returns a shorter std::vector containing all
-  // of the data that's left. Can be used with any byte sized type, such as
-  // char, unsigned char, uint8_t, etc.
-  template <typename T> std::vector<T> ConsumeBytes(size_t num_bytes) {
-    num_bytes = std::min(num_bytes, remaining_bytes_);
-    return ConsumeBytes<T>(num_bytes, num_bytes);
-  }
-
-  // Similar to |ConsumeBytes|, but also appends the terminator value at the end
-  // of the resulting vector. Useful, when a mutable null-terminated C-string is
-  // needed, for example. But that is a rare case. Better avoid it, if possible,
-  // and prefer using |ConsumeBytes| or |ConsumeBytesAsString| methods.
-  template <typename T>
-  std::vector<T> ConsumeBytesWithTerminator(size_t num_bytes,
-                                            T terminator = 0) {
-    num_bytes = std::min(num_bytes, remaining_bytes_);
-    std::vector<T> result = ConsumeBytes<T>(num_bytes + 1, num_bytes);
-    result.back() = terminator;
-    return result;
-  }
-
-  // Returns a std::string containing |num_bytes| of input data. Using this and
-  // |.c_str()| on the resulting string is the best way to get an immutable
-  // null-terminated C string. If fewer than |num_bytes| of data remain, returns
-  // a shorter std::string containing all of the data that's left.
-  std::string ConsumeBytesAsString(size_t num_bytes) {
-    static_assert(sizeof(std::string::value_type) == sizeof(uint8_t),
-                  "ConsumeBytesAsString cannot convert the data to a string.");
-
-    num_bytes = std::min(num_bytes, remaining_bytes_);
-    std::string result(
-        reinterpret_cast<const std::string::value_type *>(data_ptr_),
-        num_bytes);
-    Advance(num_bytes);
-    return result;
-  }
-
-  // Returns a number in the range [min, max] by consuming bytes from the
-  // input data. The value might not be uniformly distributed in the given
-  // range. If there's no input data left, always returns |min|. |min| must
-  // be less than or equal to |max|.
-  template <typename T> T ConsumeIntegralInRange(T min, T max) {
-    static_assert(std::is_integral<T>::value, "An integral type is required.");
-    static_assert(sizeof(T) <= sizeof(uint64_t), "Unsupported integral type.");
-
-    if (min > max)
-      abort();
-
-    // Use the biggest type possible to hold the range and the result.
-    uint64_t range = static_cast<uint64_t>(max) - min;
-    uint64_t result = 0;
-    size_t offset = 0;
-
-    while (offset < sizeof(T) * CHAR_BIT && (range >> offset) > 0 &&
-           remaining_bytes_ != 0) {
-      // Pull bytes off the end of the seed data. Experimentally, this seems to
-      // allow the fuzzer to more easily explore the input space. This makes
-      // sense, since it works by modifying inputs that caused new code to run,
-      // and this data is often used to encode length of data read by
-      // |ConsumeBytes|. Separating out read lengths makes it easier modify the
-      // contents of the data that is actually read.
-      --remaining_bytes_;
-      result = (result << CHAR_BIT) | data_ptr_[remaining_bytes_];
-      offset += CHAR_BIT;
-    }
-
-    // Avoid division by 0, in case |range + 1| results in overflow.
-    if (range != std::numeric_limits<decltype(range)>::max())
-      result = result % (range + 1);
-
-    return static_cast<T>(min + result);
-  }
-
-  // Returns a std::string of length from 0 to |max_length|. When it runs out of
-  // input data, returns what remains of the input. Designed to be more stable
-  // with respect to a fuzzer inserting characters than just picking a random
-  // length and then consuming that many bytes with |ConsumeBytes|.
-  std::string ConsumeRandomLengthString(size_t max_length) {
-    // Reads bytes from the start of |data_ptr_|. Maps "\\" to "\", and maps "\"
-    // followed by anything else to the end of the string. As a result of this
-    // logic, a fuzzer can insert characters into the string, and the string
-    // will be lengthened to include those new characters, resulting in a more
-    // stable fuzzer than picking the length of a string independently from
-    // picking its contents.
-    std::string result;
-
-    // Reserve the anticipated capaticity to prevent several reallocations.
-    result.reserve(std::min(max_length, remaining_bytes_));
-    for (size_t i = 0; i < max_length && remaining_bytes_ != 0; ++i) {
-      char next = ConvertUnsignedToSigned<char>(data_ptr_[0]);
-      Advance(1);
-      if (next == '\\' && remaining_bytes_ != 0) {
-        next = ConvertUnsignedToSigned<char>(data_ptr_[0]);
-        Advance(1);
-        if (next != '\\')
-          break;
-      }
-      result += next;
-    }
-
-    result.shrink_to_fit();
-    return result;
-  }
-
-  // Returns a std::vector containing all remaining bytes of the input data.
-  template <typename T> std::vector<T> ConsumeRemainingBytes() {
-    return ConsumeBytes<T>(remaining_bytes_);
-  }
-
-  // Returns a std::string containing all remaining bytes of the input data.
-  // Prefer using |ConsumeRemainingBytes| unless you actually need a std::string
-  // object.
-  std::string ConsumeRemainingBytesAsString() {
-    return ConsumeBytesAsString(remaining_bytes_);
-  }
-
-  // Returns a number in the range [Type's min, Type's max]. The value might
-  // not be uniformly distributed in the given range. If there's no input data
-  // left, always returns |min|.
-  template <typename T> T ConsumeIntegral() {
-    return ConsumeIntegralInRange(std::numeric_limits<T>::min(),
-                                  std::numeric_limits<T>::max());
-  }
-
-  // Reads one byte and returns a bool, or false when no data remains.
-  bool ConsumeBool() { return 1 & ConsumeIntegral<uint8_t>(); }
-
-  // Returns a copy of the value selected from the given fixed-size |array|.
-  template <typename T, size_t size>
-  T PickValueInArray(const T (&array)[size]) {
-    static_assert(size > 0, "The array must be non empty.");
-    return array[ConsumeIntegralInRange<size_t>(0, size - 1)];
-  }
-
-  template <typename T>
-  T PickValueInArray(std::initializer_list<const T> list) {
-    // TODO(Dor1s): switch to static_assert once C++14 is allowed.
-    if (!list.size())
-      abort();
-
-    return *(list.begin() + ConsumeIntegralInRange<size_t>(0, list.size() - 1));
-  }
-
-  // Returns an enum value. The enum must start at 0 and be contiguous. It must
-  // also contain |kMaxValue| aliased to its largest (inclusive) value. Such as:
-  // enum class Foo { SomeValue, OtherValue, kMaxValue = OtherValue };
-  template <typename T> T ConsumeEnum() {
-    static_assert(std::is_enum<T>::value, "|T| must be an enum type.");
-    return static_cast<T>(ConsumeIntegralInRange<uint32_t>(
-        0, static_cast<uint32_t>(T::kMaxValue)));
-  }
-
-  // Returns a floating point number in the range [0.0, 1.0]. If there's no
-  // input data left, always returns 0.
-  template <typename T> T ConsumeProbability() {
-    static_assert(std::is_floating_point<T>::value,
-                  "A floating point type is required.");
-
-    // Use different integral types for different floating point types in order
-    // to provide better density of the resulting values.
-    using IntegralType =
-        typename std::conditional<(sizeof(T) <= sizeof(uint32_t)), uint32_t,
-                                  uint64_t>::type;
-
-    T result = static_cast<T>(ConsumeIntegral<IntegralType>());
-    result /= static_cast<T>(std::numeric_limits<IntegralType>::max());
-    return result;
-  }
-
-  // Returns a floating point value in the range [Type's lowest, Type's max] by
-  // consuming bytes from the input data. If there's no input data left, always
-  // returns approximately 0.
-  template <typename T> T ConsumeFloatingPoint() {
-    return ConsumeFloatingPointInRange<T>(std::numeric_limits<T>::lowest(),
-                                          std::numeric_limits<T>::max());
-  }
-
-  // Returns a floating point value in the given range by consuming bytes from
-  // the input data. If there's no input data left, returns |min|. Note that
-  // |min| must be less than or equal to |max|.
-  template <typename T> T ConsumeFloatingPointInRange(T min, T max) {
-    if (min > max)
-      abort();
-
-    T range = .0;
-    T result = min;
-    constexpr T zero(.0);
-    if (max > zero && min < zero && max > min + std::numeric_limits<T>::max()) {
-      // The diff |max - min| would overflow the given floating point type. Use
-      // the half of the diff as the range and consume a bool to decide whether
-      // the result is in the first of the second part of the diff.
-      range = (max / 2.0) - (min / 2.0);
-      if (ConsumeBool()) {
-        result += range;
-      }
-    } else {
-      range = max - min;
-    }
-
-    return result + range * ConsumeProbability<T>();
-  }
-
-  // Reports the remaining bytes available for fuzzed input.
-  size_t remaining_bytes() { return remaining_bytes_; }
-
- private:
-  FuzzedDataProvider(const FuzzedDataProvider &) = delete;
-  FuzzedDataProvider &operator=(const FuzzedDataProvider &) = delete;
-
-  void Advance(size_t num_bytes) {
-    if (num_bytes > remaining_bytes_)
-      abort();
-
-    data_ptr_ += num_bytes;
-    remaining_bytes_ -= num_bytes;
-  }
-
-  template <typename T>
-  std::vector<T> ConsumeBytes(size_t size, size_t num_bytes_to_consume) {
-    static_assert(sizeof(T) == sizeof(uint8_t), "Incompatible data type.");
-
-    // The point of using the size-based constructor below is to increase the
-    // odds of having a vector object with capacity being equal to the length.
-    // That part is always implementation specific, but at least both libc++ and
-    // libstdc++ allocate the requested number of bytes in that constructor,
-    // which seems to be a natural choice for other implementations as well.
-    // To increase the odds even more, we also call |shrink_to_fit| below.
-    std::vector<T> result(size);
-    if (size == 0) {
-      if (num_bytes_to_consume != 0)
-        abort();
-      return result;
-    }
-
-    std::memcpy(result.data(), data_ptr_, num_bytes_to_consume);
-    Advance(num_bytes_to_consume);
-
-    // Even though |shrink_to_fit| is also implementation specific, we expect it
-    // to provide an additional assurance in case vector's constructor allocated
-    // a buffer which is larger than the actual amount of data we put inside it.
-    result.shrink_to_fit();
-    return result;
-  }
-
-  template <typename TS, typename TU> TS ConvertUnsignedToSigned(TU value) {
-    static_assert(sizeof(TS) == sizeof(TU), "Incompatible data types.");
-    static_assert(!std::numeric_limits<TU>::is_signed,
-                  "Source type must be unsigned.");
-
-    // TODO(Dor1s): change to `if constexpr` once C++17 becomes mainstream.
-    if (std::numeric_limits<TS>::is_modulo)
-      return static_cast<TS>(value);
-
-    // Avoid using implementation-defined unsigned to signer conversions.
-    // To learn more, see https://stackoverflow.com/questions/13150449.
-    if (value <= std::numeric_limits<TS>::max()) {
-      return static_cast<TS>(value);
-    } else {
-      constexpr auto TS_min = std::numeric_limits<TS>::min();
-      return TS_min + static_cast<char>(value - TS_min);
-    }
-  }
-
-  const uint8_t *data_ptr_;
-  size_t remaining_bytes_;
-};
-
-#endif // LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_
diff --git a/linux-x86/lib64/clang/11.0.1/include/immintrin.h b/linux-x86/lib64/clang/11.0.1/include/immintrin.h
deleted file mode 100644
index edf8c42..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/immintrin.h
+++ /dev/null
@@ -1,521 +0,0 @@
-/*===---- immintrin.h - Intel intrinsics -----------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __IMMINTRIN_H
-#define __IMMINTRIN_H
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__MMX__)
-#include <mmintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE__)
-#include <xmmintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE2__)
-#include <emmintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE3__)
-#include <pmmintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSSE3__)
-#include <tmmintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-    (defined(__SSE4_2__) || defined(__SSE4_1__))
-#include <smmintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-    (defined(__AES__) || defined(__PCLMUL__))
-#include <wmmintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLFLUSHOPT__)
-#include <clflushoptintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLWB__)
-#include <clwbintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX__)
-#include <avxintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX2__)
-#include <avx2intrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__F16C__)
-#include <f16cintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__VPCLMULQDQ__)
-#include <vpclmulqdqintrin.h>
-#endif
-
-/* No feature check desired due to internal checks */
-#include <bmiintrin.h>
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI2__)
-#include <bmi2intrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__LZCNT__)
-#include <lzcntintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__POPCNT__)
-#include <popcntintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FMA__)
-#include <fmaintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512F__)
-#include <avx512fintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VL__)
-#include <avx512vlintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512BW__)
-#include <avx512bwintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512BITALG__)
-#include <avx512bitalgintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512CD__)
-#include <avx512cdintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VPOPCNTDQ__)
-#include <avx512vpopcntdqintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-    (defined(__AVX512VL__) && defined(__AVX512VPOPCNTDQ__))
-#include <avx512vpopcntdqvlintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VNNI__)
-#include <avx512vnniintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-    (defined(__AVX512VL__) && defined(__AVX512VNNI__))
-#include <avx512vlvnniintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512DQ__)
-#include <avx512dqintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-    (defined(__AVX512VL__) && defined(__AVX512BITALG__))
-#include <avx512vlbitalgintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-    (defined(__AVX512VL__) && defined(__AVX512BW__))
-#include <avx512vlbwintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-    (defined(__AVX512VL__) && defined(__AVX512CD__))
-#include <avx512vlcdintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-    (defined(__AVX512VL__) && defined(__AVX512DQ__))
-#include <avx512vldqintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512ER__)
-#include <avx512erintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512IFMA__)
-#include <avx512ifmaintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-    (defined(__AVX512IFMA__) && defined(__AVX512VL__))
-#include <avx512ifmavlintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VBMI__)
-#include <avx512vbmiintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-    (defined(__AVX512VBMI__) && defined(__AVX512VL__))
-#include <avx512vbmivlintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VBMI2__)
-#include <avx512vbmi2intrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-    (defined(__AVX512VBMI2__) && defined(__AVX512VL__))
-#include <avx512vlvbmi2intrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512PF__)
-#include <avx512pfintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512BF16__)
-#include <avx512bf16intrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-    (defined(__AVX512VL__) && defined(__AVX512BF16__))
-#include <avx512vlbf16intrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PKU__)
-#include <pkuintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__VAES__)
-#include <vaesintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__GFNI__)
-#include <gfniintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDPID__)
-/// Returns the value of the IA32_TSC_AUX MSR (0xc0000103).
-///
-/// \headerfile <immintrin.h>
-///
-/// This intrinsic corresponds to the <c> RDPID </c> instruction.
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("rdpid")))
-_rdpid_u32(void) {
-  return __builtin_ia32_rdpid();
-}
-#endif // __RDPID__
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDRND__)
-static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
-_rdrand16_step(unsigned short *__p)
-{
-  return __builtin_ia32_rdrand16_step(__p);
-}
-
-static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
-_rdrand32_step(unsigned int *__p)
-{
-  return __builtin_ia32_rdrand32_step(__p);
-}
-
-#ifdef __x86_64__
-static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
-_rdrand64_step(unsigned long long *__p)
-{
-  return __builtin_ia32_rdrand64_step(__p);
-}
-#endif
-#endif /* __RDRND__ */
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FSGSBASE__)
-#ifdef __x86_64__
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_readfsbase_u32(void)
-{
-  return __builtin_ia32_rdfsbase32();
-}
-
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_readfsbase_u64(void)
-{
-  return __builtin_ia32_rdfsbase64();
-}
-
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_readgsbase_u32(void)
-{
-  return __builtin_ia32_rdgsbase32();
-}
-
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_readgsbase_u64(void)
-{
-  return __builtin_ia32_rdgsbase64();
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_writefsbase_u32(unsigned int __V)
-{
-  __builtin_ia32_wrfsbase32(__V);
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_writefsbase_u64(unsigned long long __V)
-{
-  __builtin_ia32_wrfsbase64(__V);
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_writegsbase_u32(unsigned int __V)
-{
-  __builtin_ia32_wrgsbase32(__V);
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_writegsbase_u64(unsigned long long __V)
-{
-  __builtin_ia32_wrgsbase64(__V);
-}
-
-#endif
-#endif /* __FSGSBASE__ */
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__MOVBE__)
-
-/* The structs used below are to force the load/store to be unaligned. This
- * is accomplished with the __packed__ attribute. The __may_alias__ prevents
- * tbaa metadata from being generated based on the struct and the type of the
- * field inside of it.
- */
-
-static __inline__ short __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
-_loadbe_i16(void const * __P) {
-  struct __loadu_i16 {
-    short __v;
-  } __attribute__((__packed__, __may_alias__));
-  return __builtin_bswap16(((const struct __loadu_i16*)__P)->__v);
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
-_storebe_i16(void * __P, short __D) {
-  struct __storeu_i16 {
-    short __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_i16*)__P)->__v = __builtin_bswap16(__D);
-}
-
-static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
-_loadbe_i32(void const * __P) {
-  struct __loadu_i32 {
-    int __v;
-  } __attribute__((__packed__, __may_alias__));
-  return __builtin_bswap32(((const struct __loadu_i32*)__P)->__v);
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
-_storebe_i32(void * __P, int __D) {
-  struct __storeu_i32 {
-    int __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_i32*)__P)->__v = __builtin_bswap32(__D);
-}
-
-#ifdef __x86_64__
-static __inline__ long long __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
-_loadbe_i64(void const * __P) {
-  struct __loadu_i64 {
-    long long __v;
-  } __attribute__((__packed__, __may_alias__));
-  return __builtin_bswap64(((const struct __loadu_i64*)__P)->__v);
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
-_storebe_i64(void * __P, long long __D) {
-  struct __storeu_i64 {
-    long long __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_i64*)__P)->__v = __builtin_bswap64(__D);
-}
-#endif
-#endif /* __MOVBE */
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RTM__)
-#include <rtmintrin.h>
-#include <xtestintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SHA__)
-#include <shaintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FXSR__)
-#include <fxsrintrin.h>
-#endif
-
-/* No feature check desired due to internal MSC_VER checks */
-#include <xsaveintrin.h>
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVEOPT__)
-#include <xsaveoptintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVEC__)
-#include <xsavecintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVES__)
-#include <xsavesintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SHSTK__)
-#include <cetintrin.h>
-#endif
-
-/* Some intrinsics inside adxintrin.h are available only on processors with ADX,
- * whereas others are also available at all times. */
-#include <adxintrin.h>
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDSEED__)
-#include <rdseedintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__WBNOINVD__)
-#include <wbnoinvdintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLDEMOTE__)
-#include <cldemoteintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__WAITPKG__)
-#include <waitpkgintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-  defined(__MOVDIRI__) || defined(__MOVDIR64B__)
-#include <movdirintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PCONFIG__)
-#include <pconfigintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SGX__)
-#include <sgxintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PTWRITE__)
-#include <ptwriteintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__INVPCID__)
-#include <invpcidintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-  defined(__AVX512VP2INTERSECT__)
-#include <avx512vp2intersectintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-  (defined(__AVX512VL__) && defined(__AVX512VP2INTERSECT__))
-#include <avx512vlvp2intersectintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__ENQCMD__)
-#include <enqcmdintrin.h>
-#endif
-
-#if defined(_MSC_VER) && __has_extension(gnu_asm)
-/* Define the default attributes for these intrinsics */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
-#ifdef __cplusplus
-extern "C" {
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Exchange HLE
-\*----------------------------------------------------------------------------*/
-#if defined(__i386__) || defined(__x86_64__)
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedExchange_HLEAcquire(long volatile *_Target, long _Value) {
-  __asm__ __volatile__(".byte 0xf2 ; lock ; xchg %0, %1"
-                       : "+r" (_Value), "+m" (*_Target) :: "memory");
-  return _Value;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedExchange_HLERelease(long volatile *_Target, long _Value) {
-  __asm__ __volatile__(".byte 0xf3 ; lock ; xchg %0, %1"
-                       : "+r" (_Value), "+m" (*_Target) :: "memory");
-  return _Value;
-}
-#endif
-#if defined(__x86_64__)
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchange64_HLEAcquire(__int64 volatile *_Target, __int64 _Value) {
-  __asm__ __volatile__(".byte 0xf2 ; lock ; xchg %0, %1"
-                       : "+r" (_Value), "+m" (*_Target) :: "memory");
-  return _Value;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchange64_HLERelease(__int64 volatile *_Target, __int64 _Value) {
-  __asm__ __volatile__(".byte 0xf3 ; lock ; xchg %0, %1"
-                       : "+r" (_Value), "+m" (*_Target) :: "memory");
-  return _Value;
-}
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Compare Exchange HLE
-\*----------------------------------------------------------------------------*/
-#if defined(__i386__) || defined(__x86_64__)
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange_HLEAcquire(long volatile *_Destination,
-                              long _Exchange, long _Comparand) {
-  __asm__ __volatile__(".byte 0xf2 ; lock ; cmpxchg %2, %1"
-                       : "+a" (_Comparand), "+m" (*_Destination)
-                       : "r" (_Exchange) : "memory");
-  return _Comparand;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange_HLERelease(long volatile *_Destination,
-                              long _Exchange, long _Comparand) {
-  __asm__ __volatile__(".byte 0xf3 ; lock ; cmpxchg %2, %1"
-                       : "+a" (_Comparand), "+m" (*_Destination)
-                       : "r" (_Exchange) : "memory");
-  return _Comparand;
-}
-#endif
-#if defined(__x86_64__)
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange64_HLEAcquire(__int64 volatile *_Destination,
-                              __int64 _Exchange, __int64 _Comparand) {
-  __asm__ __volatile__(".byte 0xf2 ; lock ; cmpxchg %2, %1"
-                       : "+a" (_Comparand), "+m" (*_Destination)
-                       : "r" (_Exchange) : "memory");
-  return _Comparand;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange64_HLERelease(__int64 volatile *_Destination,
-                              __int64 _Exchange, __int64 _Comparand) {
-  __asm__ __volatile__(".byte 0xf3 ; lock ; cmpxchg %2, %1"
-                       : "+a" (_Comparand), "+m" (*_Destination)
-                       : "r" (_Exchange) : "memory");
-  return _Comparand;
-}
-#endif
-#ifdef __cplusplus
-}
-#endif
-
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* defined(_MSC_VER) && __has_extension(gnu_asm) */
-
-#endif /* __IMMINTRIN_H */
diff --git a/linux-x86/lib64/clang/11.0.1/include/intrin.h b/linux-x86/lib64/clang/11.0.1/include/intrin.h
deleted file mode 100644
index f85f7a2..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/intrin.h
+++ /dev/null
@@ -1,595 +0,0 @@
-/* ===-------- intrin.h ---------------------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-/* Only include this if we're compiling for the windows platform. */
-#ifndef _MSC_VER
-#include_next <intrin.h>
-#else
-
-#ifndef __INTRIN_H
-#define __INTRIN_H
-
-/* First include the standard intrinsics. */
-#if defined(__i386__) || defined(__x86_64__)
-#include <x86intrin.h>
-#endif
-
-#if defined(__arm__)
-#include <armintr.h>
-#endif
-
-#if defined(__aarch64__)
-#include <arm64intr.h>
-#endif
-
-/* For the definition of jmp_buf. */
-#if __STDC_HOSTED__
-#include <setjmp.h>
-#endif
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
-
-#if __x86_64__
-#define __LPTRINT_TYPE__ __int64
-#else
-#define __LPTRINT_TYPE__ long
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#if defined(__MMX__)
-/* And the random ones that aren't in those files. */
-__m64 _m_from_float(float);
-float _m_to_float(__m64);
-#endif
-
-/* Other assorted instruction intrinsics. */
-void __addfsbyte(unsigned long, unsigned char);
-void __addfsdword(unsigned long, unsigned long);
-void __addfsword(unsigned long, unsigned short);
-void __code_seg(const char *);
-static __inline__
-void __cpuid(int[4], int);
-static __inline__
-void __cpuidex(int[4], int, int);
-static __inline__
-__int64 __emul(int, int);
-static __inline__
-unsigned __int64 __emulu(unsigned int, unsigned int);
-unsigned int __getcallerseflags(void);
-static __inline__
-void __halt(void);
-unsigned char __inbyte(unsigned short);
-void __inbytestring(unsigned short, unsigned char *, unsigned long);
-void __incfsbyte(unsigned long);
-void __incfsdword(unsigned long);
-void __incfsword(unsigned long);
-unsigned long __indword(unsigned short);
-void __indwordstring(unsigned short, unsigned long *, unsigned long);
-void __int2c(void);
-void __invlpg(void *);
-unsigned short __inword(unsigned short);
-void __inwordstring(unsigned short, unsigned short *, unsigned long);
-void __lidt(void *);
-unsigned __int64 __ll_lshift(unsigned __int64, int);
-__int64 __ll_rshift(__int64, int);
-static __inline__
-void __movsb(unsigned char *, unsigned char const *, size_t);
-static __inline__
-void __movsd(unsigned long *, unsigned long const *, size_t);
-static __inline__
-void __movsw(unsigned short *, unsigned short const *, size_t);
-static __inline__
-void __nop(void);
-void __nvreg_restore_fence(void);
-void __nvreg_save_fence(void);
-void __outbyte(unsigned short, unsigned char);
-void __outbytestring(unsigned short, unsigned char *, unsigned long);
-void __outdword(unsigned short, unsigned long);
-void __outdwordstring(unsigned short, unsigned long *, unsigned long);
-void __outword(unsigned short, unsigned short);
-void __outwordstring(unsigned short, unsigned short *, unsigned long);
-unsigned long __readcr0(void);
-unsigned long __readcr2(void);
-unsigned __LPTRINT_TYPE__ __readcr3(void);
-unsigned long __readcr4(void);
-unsigned long __readcr8(void);
-unsigned int __readdr(unsigned int);
-#ifdef __i386__
-static __inline__
-unsigned char __readfsbyte(unsigned long);
-static __inline__
-unsigned __int64 __readfsqword(unsigned long);
-static __inline__
-unsigned short __readfsword(unsigned long);
-#endif
-static __inline__
-unsigned __int64 __readmsr(unsigned long);
-unsigned __int64 __readpmc(unsigned long);
-unsigned long __segmentlimit(unsigned long);
-void __sidt(void *);
-static __inline__
-void __stosb(unsigned char *, unsigned char, size_t);
-static __inline__
-void __stosd(unsigned long *, unsigned long, size_t);
-static __inline__
-void __stosw(unsigned short *, unsigned short, size_t);
-void __svm_clgi(void);
-void __svm_invlpga(void *, int);
-void __svm_skinit(int);
-void __svm_stgi(void);
-void __svm_vmload(size_t);
-void __svm_vmrun(size_t);
-void __svm_vmsave(size_t);
-void __ud2(void);
-unsigned __int64 __ull_rshift(unsigned __int64, int);
-void __vmx_off(void);
-void __vmx_vmptrst(unsigned __int64 *);
-void __wbinvd(void);
-void __writecr0(unsigned int);
-static __inline__
-void __writecr3(unsigned __INTPTR_TYPE__);
-void __writecr4(unsigned int);
-void __writecr8(unsigned int);
-void __writedr(unsigned int, unsigned int);
-void __writefsbyte(unsigned long, unsigned char);
-void __writefsdword(unsigned long, unsigned long);
-void __writefsqword(unsigned long, unsigned __int64);
-void __writefsword(unsigned long, unsigned short);
-void __writemsr(unsigned long, unsigned __int64);
-static __inline__
-void *_AddressOfReturnAddress(void);
-static __inline__
-unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
-static __inline__
-unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask);
-unsigned char _bittest(long const *, long);
-unsigned char _bittestandcomplement(long *, long);
-unsigned char _bittestandreset(long *, long);
-unsigned char _bittestandset(long *, long);
-void __cdecl _disable(void);
-void __cdecl _enable(void);
-long _InterlockedAddLargeStatistic(__int64 volatile *_Addend, long _Value);
-unsigned char _interlockedbittestandreset(long volatile *, long);
-unsigned char _interlockedbittestandset(long volatile *, long);
-void *_InterlockedCompareExchangePointer_HLEAcquire(void *volatile *, void *,
-                                                    void *);
-void *_InterlockedCompareExchangePointer_HLERelease(void *volatile *, void *,
-                                                    void *);
-long _InterlockedExchangeAdd_HLEAcquire(long volatile *, long);
-long _InterlockedExchangeAdd_HLERelease(long volatile *, long);
-__int64 _InterlockedExchangeAdd64_HLEAcquire(__int64 volatile *, __int64);
-__int64 _InterlockedExchangeAdd64_HLERelease(__int64 volatile *, __int64);
-static __inline__ void
-__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
-_ReadBarrier(void);
-static __inline__ void
-__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
-_ReadWriteBarrier(void);
-unsigned int _rorx_u32(unsigned int, const unsigned int);
-int _sarx_i32(int, unsigned int);
-#if __STDC_HOSTED__
-int __cdecl _setjmp(jmp_buf);
-#endif
-unsigned int _shlx_u32(unsigned int, unsigned int);
-unsigned int _shrx_u32(unsigned int, unsigned int);
-void _Store_HLERelease(long volatile *, long);
-void _Store64_HLERelease(__int64 volatile *, __int64);
-void _StorePointer_HLERelease(void *volatile *, void *);
-static __inline__ void
-__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
-_WriteBarrier(void);
-unsigned __int32 xbegin(void);
-void _xend(void);
-
-/* These additional intrinsics are turned on in x64/amd64/x86_64 mode. */
-#ifdef __x86_64__
-void __addgsbyte(unsigned long, unsigned char);
-void __addgsdword(unsigned long, unsigned long);
-void __addgsqword(unsigned long, unsigned __int64);
-void __addgsword(unsigned long, unsigned short);
-static __inline__
-void __faststorefence(void);
-void __incgsbyte(unsigned long);
-void __incgsdword(unsigned long);
-void __incgsqword(unsigned long);
-void __incgsword(unsigned long);
-static __inline__
-void __movsq(unsigned long long *, unsigned long long const *, size_t);
-static __inline__
-unsigned char __readgsbyte(unsigned long);
-static __inline__
-unsigned long __readgsdword(unsigned long);
-static __inline__
-unsigned __int64 __readgsqword(unsigned long);
-unsigned short __readgsword(unsigned long);
-unsigned __int64 __shiftleft128(unsigned __int64 _LowPart,
-                                unsigned __int64 _HighPart,
-                                unsigned char _Shift);
-unsigned __int64 __shiftright128(unsigned __int64 _LowPart,
-                                 unsigned __int64 _HighPart,
-                                 unsigned char _Shift);
-static __inline__
-void __stosq(unsigned __int64 *, unsigned __int64, size_t);
-unsigned char __vmx_on(unsigned __int64 *);
-unsigned char __vmx_vmclear(unsigned __int64 *);
-unsigned char __vmx_vmlaunch(void);
-unsigned char __vmx_vmptrld(unsigned __int64 *);
-unsigned char __vmx_vmread(size_t, size_t *);
-unsigned char __vmx_vmresume(void);
-unsigned char __vmx_vmwrite(size_t, size_t);
-void __writegsbyte(unsigned long, unsigned char);
-void __writegsdword(unsigned long, unsigned long);
-void __writegsqword(unsigned long, unsigned __int64);
-void __writegsword(unsigned long, unsigned short);
-unsigned char _bittest64(__int64 const *, __int64);
-unsigned char _bittestandcomplement64(__int64 *, __int64);
-unsigned char _bittestandreset64(__int64 *, __int64);
-unsigned char _bittestandset64(__int64 *, __int64);
-long _InterlockedAnd_np(long volatile *_Value, long _Mask);
-short _InterlockedAnd16_np(short volatile *_Value, short _Mask);
-__int64 _InterlockedAnd64_np(__int64 volatile *_Value, __int64 _Mask);
-char _InterlockedAnd8_np(char volatile *_Value, char _Mask);
-unsigned char _interlockedbittestandreset64(__int64 volatile *, __int64);
-unsigned char _interlockedbittestandset64(__int64 volatile *, __int64);
-long _InterlockedCompareExchange_np(long volatile *_Destination, long _Exchange,
-                                    long _Comparand);
-unsigned char _InterlockedCompareExchange128(__int64 volatile *_Destination,
-                                             __int64 _ExchangeHigh,
-                                             __int64 _ExchangeLow,
-                                             __int64 *_CompareandResult);
-unsigned char _InterlockedCompareExchange128_np(__int64 volatile *_Destination,
-                                                __int64 _ExchangeHigh,
-                                                __int64 _ExchangeLow,
-                                                __int64 *_ComparandResult);
-short _InterlockedCompareExchange16_np(short volatile *_Destination,
-                                       short _Exchange, short _Comparand);
-__int64 _InterlockedCompareExchange64_np(__int64 volatile *_Destination,
-                                         __int64 _Exchange, __int64 _Comparand);
-void *_InterlockedCompareExchangePointer_np(void *volatile *_Destination,
-                                            void *_Exchange, void *_Comparand);
-long _InterlockedOr_np(long volatile *_Value, long _Mask);
-short _InterlockedOr16_np(short volatile *_Value, short _Mask);
-__int64 _InterlockedOr64_np(__int64 volatile *_Value, __int64 _Mask);
-char _InterlockedOr8_np(char volatile *_Value, char _Mask);
-long _InterlockedXor_np(long volatile *_Value, long _Mask);
-short _InterlockedXor16_np(short volatile *_Value, short _Mask);
-__int64 _InterlockedXor64_np(__int64 volatile *_Value, __int64 _Mask);
-char _InterlockedXor8_np(char volatile *_Value, char _Mask);
-unsigned __int64 _rorx_u64(unsigned __int64, const unsigned int);
-__int64 _sarx_i64(__int64, unsigned int);
-unsigned __int64 _shlx_u64(unsigned __int64, unsigned int);
-unsigned __int64 _shrx_u64(unsigned __int64, unsigned int);
-static __inline__
-__int64 __mulh(__int64, __int64);
-static __inline__
-unsigned __int64 __umulh(unsigned __int64, unsigned __int64);
-static __inline__
-__int64 _mul128(__int64, __int64, __int64*);
-static __inline__
-unsigned __int64 _umul128(unsigned __int64,
-                          unsigned __int64,
-                          unsigned __int64*);
-
-#endif /* __x86_64__ */
-
-#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
-
-static __inline__
-unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
-static __inline__
-unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
-
-static __inline__
-__int64 _InterlockedDecrement64(__int64 volatile *_Addend);
-static __inline__
-__int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value);
-static __inline__
-__int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value);
-static __inline__
-__int64 _InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value);
-static __inline__
-__int64 _InterlockedIncrement64(__int64 volatile *_Addend);
-static __inline__
-__int64 _InterlockedOr64(__int64 volatile *_Value, __int64 _Mask);
-static __inline__
-__int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask);
-static __inline__
-__int64 _InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask);
-
-#endif
-
-/*----------------------------------------------------------------------------*\
-|* Interlocked Exchange Add
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-char _InterlockedExchangeAdd8_acq(char volatile *_Addend, char _Value);
-char _InterlockedExchangeAdd8_nf(char volatile *_Addend, char _Value);
-char _InterlockedExchangeAdd8_rel(char volatile *_Addend, char _Value);
-short _InterlockedExchangeAdd16_acq(short volatile *_Addend, short _Value);
-short _InterlockedExchangeAdd16_nf(short volatile *_Addend, short _Value);
-short _InterlockedExchangeAdd16_rel(short volatile *_Addend, short _Value);
-long _InterlockedExchangeAdd_acq(long volatile *_Addend, long _Value);
-long _InterlockedExchangeAdd_nf(long volatile *_Addend, long _Value);
-long _InterlockedExchangeAdd_rel(long volatile *_Addend, long _Value);
-__int64 _InterlockedExchangeAdd64_acq(__int64 volatile *_Addend, __int64 _Value);
-__int64 _InterlockedExchangeAdd64_nf(__int64 volatile *_Addend, __int64 _Value);
-__int64 _InterlockedExchangeAdd64_rel(__int64 volatile *_Addend, __int64 _Value);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Increment
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-short _InterlockedIncrement16_acq(short volatile *_Value);
-short _InterlockedIncrement16_nf(short volatile *_Value);
-short _InterlockedIncrement16_rel(short volatile *_Value);
-long _InterlockedIncrement_acq(long volatile *_Value);
-long _InterlockedIncrement_nf(long volatile *_Value);
-long _InterlockedIncrement_rel(long volatile *_Value);
-__int64 _InterlockedIncrement64_acq(__int64 volatile *_Value);
-__int64 _InterlockedIncrement64_nf(__int64 volatile *_Value);
-__int64 _InterlockedIncrement64_rel(__int64 volatile *_Value);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Decrement
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-short _InterlockedDecrement16_acq(short volatile *_Value);
-short _InterlockedDecrement16_nf(short volatile *_Value);
-short _InterlockedDecrement16_rel(short volatile *_Value);
-long _InterlockedDecrement_acq(long volatile *_Value);
-long _InterlockedDecrement_nf(long volatile *_Value);
-long _InterlockedDecrement_rel(long volatile *_Value);
-__int64 _InterlockedDecrement64_acq(__int64 volatile *_Value);
-__int64 _InterlockedDecrement64_nf(__int64 volatile *_Value);
-__int64 _InterlockedDecrement64_rel(__int64 volatile *_Value);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked And
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-char _InterlockedAnd8_acq(char volatile *_Value, char _Mask);
-char _InterlockedAnd8_nf(char volatile *_Value, char _Mask);
-char _InterlockedAnd8_rel(char volatile *_Value, char _Mask);
-short _InterlockedAnd16_acq(short volatile *_Value, short _Mask);
-short _InterlockedAnd16_nf(short volatile *_Value, short _Mask);
-short _InterlockedAnd16_rel(short volatile *_Value, short _Mask);
-long _InterlockedAnd_acq(long volatile *_Value, long _Mask);
-long _InterlockedAnd_nf(long volatile *_Value, long _Mask);
-long _InterlockedAnd_rel(long volatile *_Value, long _Mask);
-__int64 _InterlockedAnd64_acq(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedAnd64_nf(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedAnd64_rel(__int64 volatile *_Value, __int64 _Mask);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Bit Counting and Testing
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-unsigned char _interlockedbittestandset_acq(long volatile *_BitBase,
-                                            long _BitPos);
-unsigned char _interlockedbittestandset_nf(long volatile *_BitBase,
-                                           long _BitPos);
-unsigned char _interlockedbittestandset_rel(long volatile *_BitBase,
-                                            long _BitPos);
-unsigned char _interlockedbittestandreset_acq(long volatile *_BitBase,
-                                              long _BitPos);
-unsigned char _interlockedbittestandreset_nf(long volatile *_BitBase,
-                                             long _BitPos);
-unsigned char _interlockedbittestandreset_rel(long volatile *_BitBase,
-                                              long _BitPos);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Or
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-char _InterlockedOr8_acq(char volatile *_Value, char _Mask);
-char _InterlockedOr8_nf(char volatile *_Value, char _Mask);
-char _InterlockedOr8_rel(char volatile *_Value, char _Mask);
-short _InterlockedOr16_acq(short volatile *_Value, short _Mask);
-short _InterlockedOr16_nf(short volatile *_Value, short _Mask);
-short _InterlockedOr16_rel(short volatile *_Value, short _Mask);
-long _InterlockedOr_acq(long volatile *_Value, long _Mask);
-long _InterlockedOr_nf(long volatile *_Value, long _Mask);
-long _InterlockedOr_rel(long volatile *_Value, long _Mask);
-__int64 _InterlockedOr64_acq(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedOr64_nf(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedOr64_rel(__int64 volatile *_Value, __int64 _Mask);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Xor
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-char _InterlockedXor8_acq(char volatile *_Value, char _Mask);
-char _InterlockedXor8_nf(char volatile *_Value, char _Mask);
-char _InterlockedXor8_rel(char volatile *_Value, char _Mask);
-short _InterlockedXor16_acq(short volatile *_Value, short _Mask);
-short _InterlockedXor16_nf(short volatile *_Value, short _Mask);
-short _InterlockedXor16_rel(short volatile *_Value, short _Mask);
-long _InterlockedXor_acq(long volatile *_Value, long _Mask);
-long _InterlockedXor_nf(long volatile *_Value, long _Mask);
-long _InterlockedXor_rel(long volatile *_Value, long _Mask);
-__int64 _InterlockedXor64_acq(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedXor64_nf(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedXor64_rel(__int64 volatile *_Value, __int64 _Mask);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Exchange
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-char _InterlockedExchange8_acq(char volatile *_Target, char _Value);
-char _InterlockedExchange8_nf(char volatile *_Target, char _Value);
-char _InterlockedExchange8_rel(char volatile *_Target, char _Value);
-short _InterlockedExchange16_acq(short volatile *_Target, short _Value);
-short _InterlockedExchange16_nf(short volatile *_Target, short _Value);
-short _InterlockedExchange16_rel(short volatile *_Target, short _Value);
-long _InterlockedExchange_acq(long volatile *_Target, long _Value);
-long _InterlockedExchange_nf(long volatile *_Target, long _Value);
-long _InterlockedExchange_rel(long volatile *_Target, long _Value);
-__int64 _InterlockedExchange64_acq(__int64 volatile *_Target, __int64 _Value);
-__int64 _InterlockedExchange64_nf(__int64 volatile *_Target, __int64 _Value);
-__int64 _InterlockedExchange64_rel(__int64 volatile *_Target, __int64 _Value);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Compare Exchange
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-char _InterlockedCompareExchange8_acq(char volatile *_Destination,
-                             char _Exchange, char _Comparand);
-char _InterlockedCompareExchange8_nf(char volatile *_Destination,
-                             char _Exchange, char _Comparand);
-char _InterlockedCompareExchange8_rel(char volatile *_Destination,
-                             char _Exchange, char _Comparand);
-short _InterlockedCompareExchange16_acq(short volatile *_Destination,
-                              short _Exchange, short _Comparand);
-short _InterlockedCompareExchange16_nf(short volatile *_Destination,
-                              short _Exchange, short _Comparand);
-short _InterlockedCompareExchange16_rel(short volatile *_Destination,
-                              short _Exchange, short _Comparand);
-long _InterlockedCompareExchange_acq(long volatile *_Destination,
-                              long _Exchange, long _Comparand);
-long _InterlockedCompareExchange_nf(long volatile *_Destination,
-                              long _Exchange, long _Comparand);
-long _InterlockedCompareExchange_rel(long volatile *_Destination,
-                              long _Exchange, long _Comparand);
-__int64 _InterlockedCompareExchange64_acq(__int64 volatile *_Destination,
-                              __int64 _Exchange, __int64 _Comparand);
-__int64 _InterlockedCompareExchange64_nf(__int64 volatile *_Destination,
-                              __int64 _Exchange, __int64 _Comparand);
-__int64 _InterlockedCompareExchange64_rel(__int64 volatile *_Destination,
-                              __int64 _Exchange, __int64 _Comparand);
-#endif
-
-/*----------------------------------------------------------------------------*\
-|* movs, stos
-\*----------------------------------------------------------------------------*/
-#if defined(__i386__) || defined(__x86_64__)
-static __inline__ void __DEFAULT_FN_ATTRS
-__movsb(unsigned char *__dst, unsigned char const *__src, size_t __n) {
-  __asm__ __volatile__("rep movsb" : "+D"(__dst), "+S"(__src), "+c"(__n)
-                       : : "memory");
-}
-static __inline__ void __DEFAULT_FN_ATTRS
-__movsd(unsigned long *__dst, unsigned long const *__src, size_t __n) {
-  __asm__ __volatile__("rep movsl" : "+D"(__dst), "+S"(__src), "+c"(__n)
-                       : : "memory");
-}
-static __inline__ void __DEFAULT_FN_ATTRS
-__movsw(unsigned short *__dst, unsigned short const *__src, size_t __n) {
-  __asm__ __volatile__("rep movsw" : "+D"(__dst), "+S"(__src), "+c"(__n)
-                       : : "memory");
-}
-static __inline__ void __DEFAULT_FN_ATTRS
-__stosd(unsigned long *__dst, unsigned long __x, size_t __n) {
-  __asm__ __volatile__("rep stosl" : "+D"(__dst), "+c"(__n) : "a"(__x)
-                       : "memory");
-}
-static __inline__ void __DEFAULT_FN_ATTRS
-__stosw(unsigned short *__dst, unsigned short __x, size_t __n) {
-  __asm__ __volatile__("rep stosw" : "+D"(__dst), "+c"(__n) : "a"(__x)
-                       : "memory");
-}
-#endif
-#ifdef __x86_64__
-static __inline__ void __DEFAULT_FN_ATTRS
-__movsq(unsigned long long *__dst, unsigned long long const *__src, size_t __n) {
-  __asm__ __volatile__("rep movsq" : "+D"(__dst), "+S"(__src), "+c"(__n)
-                       : : "memory");
-}
-static __inline__ void __DEFAULT_FN_ATTRS
-__stosq(unsigned __int64 *__dst, unsigned __int64 __x, size_t __n) {
-  __asm__ __volatile__("rep stosq" : "+D"(__dst), "+c"(__n) : "a"(__x)
-                       : "memory");
-}
-#endif
-
-/*----------------------------------------------------------------------------*\
-|* Misc
-\*----------------------------------------------------------------------------*/
-#if defined(__i386__) || defined(__x86_64__)
-static __inline__ void __DEFAULT_FN_ATTRS
-__cpuid(int __info[4], int __level) {
-  __asm__ ("cpuid" : "=a"(__info[0]), "=b" (__info[1]), "=c"(__info[2]), "=d"(__info[3])
-                   : "a"(__level), "c"(0));
-}
-static __inline__ void __DEFAULT_FN_ATTRS
-__cpuidex(int __info[4], int __level, int __ecx) {
-  __asm__ ("cpuid" : "=a"(__info[0]), "=b" (__info[1]), "=c"(__info[2]), "=d"(__info[3])
-                   : "a"(__level), "c"(__ecx));
-}
-static __inline__ void __DEFAULT_FN_ATTRS
-__halt(void) {
-  __asm__ volatile ("hlt");
-}
-#endif
-
-#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
-static __inline__ void __DEFAULT_FN_ATTRS
-__nop(void) {
-  __asm__ volatile ("nop");
-}
-#endif
-
-/*----------------------------------------------------------------------------*\
-|* MS AArch64 specific
-\*----------------------------------------------------------------------------*/
-#if defined(__aarch64__)
-unsigned __int64 __getReg(int);
-long _InterlockedAdd(long volatile *Addend, long Value);
-__int64 _ReadStatusReg(int);
-void _WriteStatusReg(int, __int64);
-
-unsigned short __cdecl _byteswap_ushort(unsigned short val);
-unsigned long __cdecl _byteswap_ulong (unsigned long val);
-unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64 val);
-#endif
-
-/*----------------------------------------------------------------------------*\
-|* Privileged intrinsics
-\*----------------------------------------------------------------------------*/
-#if defined(__i386__) || defined(__x86_64__)
-static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
-__readmsr(unsigned long __register) {
-  // Loads the contents of a 64-bit model specific register (MSR) specified in
-  // the ECX register into registers EDX:EAX. The EDX register is loaded with
-  // the high-order 32 bits of the MSR and the EAX register is loaded with the
-  // low-order 32 bits. If less than 64 bits are implemented in the MSR being
-  // read, the values returned to EDX:EAX in unimplemented bit locations are
-  // undefined.
-  unsigned long __edx;
-  unsigned long __eax;
-  __asm__ ("rdmsr" : "=d"(__edx), "=a"(__eax) : "c"(__register));
-  return (((unsigned __int64)__edx) << 32) | (unsigned __int64)__eax;
-}
-#endif
-
-static __inline__ unsigned __LPTRINT_TYPE__ __DEFAULT_FN_ATTRS
-__readcr3(void) {
-  unsigned __LPTRINT_TYPE__ __cr3_val;
-  __asm__ __volatile__ ("mov %%cr3, %0" : "=r"(__cr3_val) : : "memory");
-  return __cr3_val;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS
-__writecr3(unsigned __INTPTR_TYPE__ __cr3_val) {
-  __asm__ ("mov %0, %%cr3" : : "r"(__cr3_val) : "memory");
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#undef __LPTRINT_TYPE__
-
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* __INTRIN_H */
-#endif /* _MSC_VER */
diff --git a/linux-x86/lib64/clang/11.0.1/include/omp.h b/linux-x86/lib64/clang/11.0.1/include/omp.h
deleted file mode 100644
index 0581ab4..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/omp.h
+++ /dev/null
@@ -1,371 +0,0 @@
-/*
- * include/omp.h.var
- */
-
-
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-
-#ifndef __OMP_H
-#   define __OMP_H
-
-#   include <stdlib.h>
-#   include <stdint.h>
-
-#   define KMP_VERSION_MAJOR    5
-#   define KMP_VERSION_MINOR    0
-#   define KMP_VERSION_BUILD    20140926
-#   define KMP_BUILD_DATE       "No_Timestamp"
-
-#   ifdef __cplusplus
-    extern "C" {
-#   endif
-
-#   define omp_set_affinity_format   ompc_set_affinity_format
-#   define omp_get_affinity_format   ompc_get_affinity_format
-#   define omp_display_affinity      ompc_display_affinity
-#   define omp_capture_affinity      ompc_capture_affinity
-
-#   if defined(_WIN32)
-#       define __KAI_KMPC_CONVENTION __cdecl
-#       ifndef __KMP_IMP
-#           define __KMP_IMP __declspec(dllimport)
-#       endif
-#   else
-#       define __KAI_KMPC_CONVENTION
-#       ifndef __KMP_IMP
-#           define __KMP_IMP
-#       endif
-#   endif
-
-    /* schedule kind constants */
-    typedef enum omp_sched_t {
-        omp_sched_static  = 1,
-        omp_sched_dynamic = 2,
-        omp_sched_guided  = 3,
-        omp_sched_auto    = 4,
-        omp_sched_monotonic = 0x80000000
-    } omp_sched_t;
-
-    /* set API functions */
-    extern void   __KAI_KMPC_CONVENTION  omp_set_num_threads (int);
-    extern void   __KAI_KMPC_CONVENTION  omp_set_dynamic     (int);
-    extern void   __KAI_KMPC_CONVENTION  omp_set_nested      (int);
-    extern void   __KAI_KMPC_CONVENTION  omp_set_max_active_levels (int);
-    extern void   __KAI_KMPC_CONVENTION  omp_set_schedule          (omp_sched_t, int);
-
-    /* query API functions */
-    extern int    __KAI_KMPC_CONVENTION  omp_get_num_threads  (void);
-    extern int    __KAI_KMPC_CONVENTION  omp_get_dynamic      (void);
-    extern int    __KAI_KMPC_CONVENTION  omp_get_nested       (void);
-    extern int    __KAI_KMPC_CONVENTION  omp_get_max_threads  (void);
-    extern int    __KAI_KMPC_CONVENTION  omp_get_thread_num   (void);
-    extern int    __KAI_KMPC_CONVENTION  omp_get_num_procs    (void);
-    extern int    __KAI_KMPC_CONVENTION  omp_in_parallel      (void);
-    extern int    __KAI_KMPC_CONVENTION  omp_in_final         (void);
-    extern int    __KAI_KMPC_CONVENTION  omp_get_active_level        (void);
-    extern int    __KAI_KMPC_CONVENTION  omp_get_level               (void);
-    extern int    __KAI_KMPC_CONVENTION  omp_get_ancestor_thread_num (int);
-    extern int    __KAI_KMPC_CONVENTION  omp_get_team_size           (int);
-    extern int    __KAI_KMPC_CONVENTION  omp_get_thread_limit        (void);
-    extern int    __KAI_KMPC_CONVENTION  omp_get_max_active_levels   (void);
-    extern void   __KAI_KMPC_CONVENTION  omp_get_schedule            (omp_sched_t *, int *);
-    extern int    __KAI_KMPC_CONVENTION  omp_get_max_task_priority   (void);
-
-    /* lock API functions */
-    typedef struct omp_lock_t {
-        void * _lk;
-    } omp_lock_t;
-
-    extern void   __KAI_KMPC_CONVENTION  omp_init_lock    (omp_lock_t *);
-    extern void   __KAI_KMPC_CONVENTION  omp_set_lock     (omp_lock_t *);
-    extern void   __KAI_KMPC_CONVENTION  omp_unset_lock   (omp_lock_t *);
-    extern void   __KAI_KMPC_CONVENTION  omp_destroy_lock (omp_lock_t *);
-    extern int    __KAI_KMPC_CONVENTION  omp_test_lock    (omp_lock_t *);
-
-    /* nested lock API functions */
-    typedef struct omp_nest_lock_t {
-        void * _lk;
-    } omp_nest_lock_t;
-
-    extern void   __KAI_KMPC_CONVENTION  omp_init_nest_lock    (omp_nest_lock_t *);
-    extern void   __KAI_KMPC_CONVENTION  omp_set_nest_lock     (omp_nest_lock_t *);
-    extern void   __KAI_KMPC_CONVENTION  omp_unset_nest_lock   (omp_nest_lock_t *);
-    extern void   __KAI_KMPC_CONVENTION  omp_destroy_nest_lock (omp_nest_lock_t *);
-    extern int    __KAI_KMPC_CONVENTION  omp_test_nest_lock    (omp_nest_lock_t *);
-
-    /* OpenMP 5.0  Synchronization hints*/
-    typedef enum omp_sync_hint_t {
-        omp_sync_hint_none           = 0,
-        omp_lock_hint_none           = omp_sync_hint_none,
-        omp_sync_hint_uncontended    = 1,
-        omp_lock_hint_uncontended    = omp_sync_hint_uncontended,
-        omp_sync_hint_contended      = (1<<1),
-        omp_lock_hint_contended      = omp_sync_hint_contended,
-        omp_sync_hint_nonspeculative = (1<<2),
-        omp_lock_hint_nonspeculative = omp_sync_hint_nonspeculative,
-        omp_sync_hint_speculative    = (1<<3),
-        omp_lock_hint_speculative    = omp_sync_hint_speculative,
-        kmp_lock_hint_hle            = (1<<16),
-        kmp_lock_hint_rtm            = (1<<17),
-        kmp_lock_hint_adaptive       = (1<<18)
-    } omp_sync_hint_t;
-
-    /* lock hint type for dynamic user lock */
-    typedef omp_sync_hint_t omp_lock_hint_t;
-
-    /* hinted lock initializers */
-    extern void __KAI_KMPC_CONVENTION omp_init_lock_with_hint(omp_lock_t *, omp_lock_hint_t);
-    extern void __KAI_KMPC_CONVENTION omp_init_nest_lock_with_hint(omp_nest_lock_t *, omp_lock_hint_t);
-
-    /* time API functions */
-    extern double __KAI_KMPC_CONVENTION  omp_get_wtime (void);
-    extern double __KAI_KMPC_CONVENTION  omp_get_wtick (void);
-
-    /* OpenMP 4.0 */
-    extern int  __KAI_KMPC_CONVENTION  omp_get_default_device (void);
-    extern void __KAI_KMPC_CONVENTION  omp_set_default_device (int);
-    extern int  __KAI_KMPC_CONVENTION  omp_is_initial_device (void);
-    extern int  __KAI_KMPC_CONVENTION  omp_get_num_devices (void);
-    extern int  __KAI_KMPC_CONVENTION  omp_get_num_teams (void);
-    extern int  __KAI_KMPC_CONVENTION  omp_get_team_num (void);
-    extern int  __KAI_KMPC_CONVENTION  omp_get_cancellation (void);
-
-    /* OpenMP 4.5 */
-    extern int   __KAI_KMPC_CONVENTION  omp_get_initial_device (void);
-    extern void* __KAI_KMPC_CONVENTION  omp_target_alloc(size_t, int);
-    extern void  __KAI_KMPC_CONVENTION  omp_target_free(void *, int);
-    extern int   __KAI_KMPC_CONVENTION  omp_target_is_present(void *, int);
-    extern int   __KAI_KMPC_CONVENTION  omp_target_memcpy(void *, void *, size_t, size_t, size_t, int, int);
-    extern int   __KAI_KMPC_CONVENTION  omp_target_memcpy_rect(void *, void *, size_t, int, const size_t *,
-                                            const size_t *, const size_t *, const size_t *, const size_t *, int, int);
-    extern int   __KAI_KMPC_CONVENTION  omp_target_associate_ptr(void *, void *, size_t, size_t, int);
-    extern int   __KAI_KMPC_CONVENTION  omp_target_disassociate_ptr(void *, int);
-
-    /* OpenMP 5.0 */
-    extern int   __KAI_KMPC_CONVENTION  omp_get_device_num (void);
-    typedef void * omp_depend_t;
-
-    /* kmp API functions */
-    extern int    __KAI_KMPC_CONVENTION  kmp_get_stacksize          (void);
-    extern void   __KAI_KMPC_CONVENTION  kmp_set_stacksize          (int);
-    extern size_t __KAI_KMPC_CONVENTION  kmp_get_stacksize_s        (void);
-    extern void   __KAI_KMPC_CONVENTION  kmp_set_stacksize_s        (size_t);
-    extern int    __KAI_KMPC_CONVENTION  kmp_get_blocktime          (void);
-    extern int    __KAI_KMPC_CONVENTION  kmp_get_library            (void);
-    extern void   __KAI_KMPC_CONVENTION  kmp_set_blocktime          (int);
-    extern void   __KAI_KMPC_CONVENTION  kmp_set_library            (int);
-    extern void   __KAI_KMPC_CONVENTION  kmp_set_library_serial     (void);
-    extern void   __KAI_KMPC_CONVENTION  kmp_set_library_turnaround (void);
-    extern void   __KAI_KMPC_CONVENTION  kmp_set_library_throughput (void);
-    extern void   __KAI_KMPC_CONVENTION  kmp_set_defaults           (char const *);
-    extern void   __KAI_KMPC_CONVENTION  kmp_set_disp_num_buffers   (int);
-
-    /* Intel affinity API */
-    typedef void * kmp_affinity_mask_t;
-
-    extern int    __KAI_KMPC_CONVENTION  kmp_set_affinity             (kmp_affinity_mask_t *);
-    extern int    __KAI_KMPC_CONVENTION  kmp_get_affinity             (kmp_affinity_mask_t *);
-    extern int    __KAI_KMPC_CONVENTION  kmp_get_affinity_max_proc    (void);
-    extern void   __KAI_KMPC_CONVENTION  kmp_create_affinity_mask     (kmp_affinity_mask_t *);
-    extern void   __KAI_KMPC_CONVENTION  kmp_destroy_affinity_mask    (kmp_affinity_mask_t *);
-    extern int    __KAI_KMPC_CONVENTION  kmp_set_affinity_mask_proc   (int, kmp_affinity_mask_t *);
-    extern int    __KAI_KMPC_CONVENTION  kmp_unset_affinity_mask_proc (int, kmp_affinity_mask_t *);
-    extern int    __KAI_KMPC_CONVENTION  kmp_get_affinity_mask_proc   (int, kmp_affinity_mask_t *);
-
-    /* OpenMP 4.0 affinity API */
-    typedef enum omp_proc_bind_t {
-        omp_proc_bind_false = 0,
-        omp_proc_bind_true = 1,
-        omp_proc_bind_master = 2,
-        omp_proc_bind_close = 3,
-        omp_proc_bind_spread = 4
-    } omp_proc_bind_t;
-
-    extern omp_proc_bind_t __KAI_KMPC_CONVENTION omp_get_proc_bind (void);
-
-    /* OpenMP 4.5 affinity API */
-    extern int  __KAI_KMPC_CONVENTION omp_get_num_places (void);
-    extern int  __KAI_KMPC_CONVENTION omp_get_place_num_procs (int);
-    extern void __KAI_KMPC_CONVENTION omp_get_place_proc_ids (int, int *);
-    extern int  __KAI_KMPC_CONVENTION omp_get_place_num (void);
-    extern int  __KAI_KMPC_CONVENTION omp_get_partition_num_places (void);
-    extern void __KAI_KMPC_CONVENTION omp_get_partition_place_nums (int *);
-
-    extern void * __KAI_KMPC_CONVENTION  kmp_malloc  (size_t);
-    extern void * __KAI_KMPC_CONVENTION  kmp_aligned_malloc  (size_t, size_t);
-    extern void * __KAI_KMPC_CONVENTION  kmp_calloc  (size_t, size_t);
-    extern void * __KAI_KMPC_CONVENTION  kmp_realloc (void *, size_t);
-    extern void   __KAI_KMPC_CONVENTION  kmp_free    (void *);
-
-    extern void   __KAI_KMPC_CONVENTION  kmp_set_warnings_on(void);
-    extern void   __KAI_KMPC_CONVENTION  kmp_set_warnings_off(void);
-
-    /* OpenMP 5.0 Tool Control */
-    typedef enum omp_control_tool_result_t {
-        omp_control_tool_notool = -2,
-        omp_control_tool_nocallback = -1,
-        omp_control_tool_success = 0,
-        omp_control_tool_ignored = 1
-    } omp_control_tool_result_t;
-
-    typedef enum omp_control_tool_t {
-        omp_control_tool_start = 1,
-        omp_control_tool_pause = 2,
-        omp_control_tool_flush = 3,
-        omp_control_tool_end = 4
-    } omp_control_tool_t;
-
-    extern int __KAI_KMPC_CONVENTION omp_control_tool(int, int, void*);
-
-    /* OpenMP 5.0 Memory Management */
-    typedef uintptr_t omp_uintptr_t;
-
-    typedef enum {
-        omp_atk_threadmodel = 1,
-        omp_atk_alignment = 2,
-        omp_atk_access = 3,
-        omp_atk_pool_size = 4,
-        omp_atk_fallback = 5,
-        omp_atk_fb_data = 6,
-        omp_atk_pinned = 7,
-        omp_atk_partition = 8
-    } omp_alloctrait_key_t;
-
-    typedef enum {
-        omp_atv_false = 0,
-        omp_atv_true = 1,
-        omp_atv_default = 2,
-        omp_atv_contended = 3,
-        omp_atv_uncontended = 4,
-        omp_atv_sequential = 5,
-        omp_atv_private = 6,
-        omp_atv_all = 7,
-        omp_atv_thread = 8,
-        omp_atv_pteam = 9,
-        omp_atv_cgroup = 10,
-        omp_atv_default_mem_fb = 11,
-        omp_atv_null_fb = 12,
-        omp_atv_abort_fb = 13,
-        omp_atv_allocator_fb = 14,
-        omp_atv_environment = 15,
-        omp_atv_nearest = 16,
-        omp_atv_blocked = 17,
-        omp_atv_interleaved = 18
-    } omp_alloctrait_value_t;
-
-    typedef struct {
-        omp_alloctrait_key_t key;
-        omp_uintptr_t value;
-    } omp_alloctrait_t;
-
-#   if defined(_WIN32)
-    // On Windows cl and icl do not support 64-bit enum, let's use integer then.
-    typedef omp_uintptr_t omp_allocator_handle_t;
-    extern __KMP_IMP omp_allocator_handle_t const omp_null_allocator;
-    extern __KMP_IMP omp_allocator_handle_t const omp_default_mem_alloc;
-    extern __KMP_IMP omp_allocator_handle_t const omp_large_cap_mem_alloc;
-    extern __KMP_IMP omp_allocator_handle_t const omp_const_mem_alloc;
-    extern __KMP_IMP omp_allocator_handle_t const omp_high_bw_mem_alloc;
-    extern __KMP_IMP omp_allocator_handle_t const omp_low_lat_mem_alloc;
-    extern __KMP_IMP omp_allocator_handle_t const omp_cgroup_mem_alloc;
-    extern __KMP_IMP omp_allocator_handle_t const omp_pteam_mem_alloc;
-    extern __KMP_IMP omp_allocator_handle_t const omp_thread_mem_alloc;
-    typedef omp_uintptr_t omp_memspace_handle_t;
-    extern __KMP_IMP omp_memspace_handle_t const omp_default_mem_space;
-    extern __KMP_IMP omp_memspace_handle_t const omp_large_cap_mem_space;
-    extern __KMP_IMP omp_memspace_handle_t const omp_const_mem_space;
-    extern __KMP_IMP omp_memspace_handle_t const omp_high_bw_mem_space;
-    extern __KMP_IMP omp_memspace_handle_t const omp_low_lat_mem_space;
-#   else
-#       if __cplusplus >= 201103
-    typedef enum omp_allocator_handle_t : omp_uintptr_t
-#       else
-    typedef enum omp_allocator_handle_t
-#       endif
-    {
-      omp_null_allocator = 0,
-      omp_default_mem_alloc = 1,
-      omp_large_cap_mem_alloc = 2,
-      omp_const_mem_alloc = 3,
-      omp_high_bw_mem_alloc = 4,
-      omp_low_lat_mem_alloc = 5,
-      omp_cgroup_mem_alloc = 6,
-      omp_pteam_mem_alloc = 7,
-      omp_thread_mem_alloc = 8,
-      KMP_ALLOCATOR_MAX_HANDLE = UINTPTR_MAX
-    } omp_allocator_handle_t;
-#       if __cplusplus >= 201103
-    typedef enum omp_memspace_handle_t : omp_uintptr_t
-#       else
-    typedef enum omp_memspace_handle_t
-#       endif
-    {
-      omp_default_mem_space = 0,
-      omp_large_cap_mem_space = 1,
-      omp_const_mem_space = 2,
-      omp_high_bw_mem_space = 3,
-      omp_low_lat_mem_space = 4,
-      KMP_MEMSPACE_MAX_HANDLE = UINTPTR_MAX
-    } omp_memspace_handle_t;
-#   endif
-    extern omp_allocator_handle_t __KAI_KMPC_CONVENTION omp_init_allocator(omp_memspace_handle_t m,
-                                                       int ntraits, omp_alloctrait_t traits[]);
-    extern void __KAI_KMPC_CONVENTION omp_destroy_allocator(omp_allocator_handle_t allocator);
-
-    extern void __KAI_KMPC_CONVENTION omp_set_default_allocator(omp_allocator_handle_t a);
-    extern omp_allocator_handle_t __KAI_KMPC_CONVENTION omp_get_default_allocator(void);
-#   ifdef __cplusplus
-    extern void *__KAI_KMPC_CONVENTION omp_alloc(size_t size, omp_allocator_handle_t a = omp_null_allocator);
-    extern void __KAI_KMPC_CONVENTION omp_free(void * ptr, omp_allocator_handle_t a = omp_null_allocator);
-#   else
-    extern void *__KAI_KMPC_CONVENTION omp_alloc(size_t size, omp_allocator_handle_t a);
-    extern void __KAI_KMPC_CONVENTION omp_free(void *ptr, omp_allocator_handle_t a);
-#   endif
-
-    /* OpenMP 5.0 Affinity Format */
-    extern void __KAI_KMPC_CONVENTION omp_set_affinity_format(char const *);
-    extern size_t __KAI_KMPC_CONVENTION omp_get_affinity_format(char *, size_t);
-    extern void __KAI_KMPC_CONVENTION omp_display_affinity(char const *);
-    extern size_t __KAI_KMPC_CONVENTION omp_capture_affinity(char *, size_t, char const *);
-
-    /* OpenMP 5.0 events */
-#   if defined(_WIN32)
-    // On Windows cl and icl do not support 64-bit enum, let's use integer then.
-    typedef omp_uintptr_t omp_event_handle_t;
-#   else
-    typedef enum omp_event_handle_t { KMP_EVENT_MAX_HANDLE = UINTPTR_MAX } omp_event_handle_t;
-#   endif
-    extern void __KAI_KMPC_CONVENTION omp_fulfill_event ( omp_event_handle_t event );
-
-    /* OpenMP 5.0 Pause Resources */
-    typedef enum omp_pause_resource_t {
-      omp_pause_resume = 0,
-      omp_pause_soft = 1,
-      omp_pause_hard = 2
-    } omp_pause_resource_t;
-    extern int __KAI_KMPC_CONVENTION omp_pause_resource(omp_pause_resource_t, int);
-    extern int __KAI_KMPC_CONVENTION omp_pause_resource_all(omp_pause_resource_t);
-
-    extern int __KAI_KMPC_CONVENTION omp_get_supported_active_levels(void);
-
-#   undef __KAI_KMPC_CONVENTION
-#   undef __KMP_IMP
-
-    /* Warning:
-       The following typedefs are not standard, deprecated and will be removed in a future release.
-    */
-    typedef int     omp_int_t;
-    typedef double  omp_wtime_t;
-
-#   ifdef __cplusplus
-    }
-#   endif
-
-#endif /* __OMP_H */
diff --git a/linux-x86/lib64/clang/11.0.1/include/opencl-c.h b/linux-x86/lib64/clang/11.0.1/include/opencl-c.h
deleted file mode 100644
index 3210f93..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/opencl-c.h
+++ /dev/null
@@ -1,16504 +0,0 @@
-//===--- opencl-c.h - OpenCL C language builtin function header -----------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _OPENCL_H_
-#define _OPENCL_H_
-
-#include "opencl-c-base.h"
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#ifndef cl_khr_depth_images
-#define cl_khr_depth_images
-#endif //cl_khr_depth_images
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-#if __OPENCL_C_VERSION__ < CL_VERSION_2_0
-#ifdef cl_khr_3d_image_writes
-#pragma OPENCL EXTENSION cl_khr_3d_image_writes : enable
-#endif //cl_khr_3d_image_writes
-#endif //__OPENCL_C_VERSION__ < CL_VERSION_2_0
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-#pragma OPENCL EXTENSION cl_intel_planar_yuv : begin
-#pragma OPENCL EXTENSION cl_intel_planar_yuv : end
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-
-#define __ovld __attribute__((overloadable))
-#define __conv __attribute__((convergent))
-
-// Optimizations
-#define __purefn __attribute__((pure))
-#define __cnfn __attribute__((const))
-
-
-// OpenCL v1.1/1.2/2.0 s6.2.3 - Explicit conversions
-
-char __ovld __cnfn convert_char_rte(char);
-char __ovld __cnfn convert_char_sat_rte(char);
-char __ovld __cnfn convert_char_rtz(char);
-char __ovld __cnfn convert_char_sat_rtz(char);
-char __ovld __cnfn convert_char_rtp(char);
-char __ovld __cnfn convert_char_sat_rtp(char);
-char __ovld __cnfn convert_char_rtn(char);
-char __ovld __cnfn convert_char_sat_rtn(char);
-char __ovld __cnfn convert_char(char);
-char __ovld __cnfn convert_char_sat(char);
-char __ovld __cnfn convert_char_rte(uchar);
-char __ovld __cnfn convert_char_sat_rte(uchar);
-char __ovld __cnfn convert_char_rtz(uchar);
-char __ovld __cnfn convert_char_sat_rtz(uchar);
-char __ovld __cnfn convert_char_rtp(uchar);
-char __ovld __cnfn convert_char_sat_rtp(uchar);
-char __ovld __cnfn convert_char_rtn(uchar);
-char __ovld __cnfn convert_char_sat_rtn(uchar);
-char __ovld __cnfn convert_char(uchar);
-char __ovld __cnfn convert_char_sat(uchar);
-char __ovld __cnfn convert_char_rte(short);
-char __ovld __cnfn convert_char_sat_rte(short);
-char __ovld __cnfn convert_char_rtz(short);
-char __ovld __cnfn convert_char_sat_rtz(short);
-char __ovld __cnfn convert_char_rtp(short);
-char __ovld __cnfn convert_char_sat_rtp(short);
-char __ovld __cnfn convert_char_rtn(short);
-char __ovld __cnfn convert_char_sat_rtn(short);
-char __ovld __cnfn convert_char(short);
-char __ovld __cnfn convert_char_sat(short);
-char __ovld __cnfn convert_char_rte(ushort);
-char __ovld __cnfn convert_char_sat_rte(ushort);
-char __ovld __cnfn convert_char_rtz(ushort);
-char __ovld __cnfn convert_char_sat_rtz(ushort);
-char __ovld __cnfn convert_char_rtp(ushort);
-char __ovld __cnfn convert_char_sat_rtp(ushort);
-char __ovld __cnfn convert_char_rtn(ushort);
-char __ovld __cnfn convert_char_sat_rtn(ushort);
-char __ovld __cnfn convert_char(ushort);
-char __ovld __cnfn convert_char_sat(ushort);
-char __ovld __cnfn convert_char_rte(int);
-char __ovld __cnfn convert_char_sat_rte(int);
-char __ovld __cnfn convert_char_rtz(int);
-char __ovld __cnfn convert_char_sat_rtz(int);
-char __ovld __cnfn convert_char_rtp(int);
-char __ovld __cnfn convert_char_sat_rtp(int);
-char __ovld __cnfn convert_char_rtn(int);
-char __ovld __cnfn convert_char_sat_rtn(int);
-char __ovld __cnfn convert_char(int);
-char __ovld __cnfn convert_char_sat(int);
-char __ovld __cnfn convert_char_rte(uint);
-char __ovld __cnfn convert_char_sat_rte(uint);
-char __ovld __cnfn convert_char_rtz(uint);
-char __ovld __cnfn convert_char_sat_rtz(uint);
-char __ovld __cnfn convert_char_rtp(uint);
-char __ovld __cnfn convert_char_sat_rtp(uint);
-char __ovld __cnfn convert_char_rtn(uint);
-char __ovld __cnfn convert_char_sat_rtn(uint);
-char __ovld __cnfn convert_char(uint);
-char __ovld __cnfn convert_char_sat(uint);
-char __ovld __cnfn convert_char_rte(long);
-char __ovld __cnfn convert_char_sat_rte(long);
-char __ovld __cnfn convert_char_rtz(long);
-char __ovld __cnfn convert_char_sat_rtz(long);
-char __ovld __cnfn convert_char_rtp(long);
-char __ovld __cnfn convert_char_sat_rtp(long);
-char __ovld __cnfn convert_char_rtn(long);
-char __ovld __cnfn convert_char_sat_rtn(long);
-char __ovld __cnfn convert_char(long);
-char __ovld __cnfn convert_char_sat(long);
-char __ovld __cnfn convert_char_rte(ulong);
-char __ovld __cnfn convert_char_sat_rte(ulong);
-char __ovld __cnfn convert_char_rtz(ulong);
-char __ovld __cnfn convert_char_sat_rtz(ulong);
-char __ovld __cnfn convert_char_rtp(ulong);
-char __ovld __cnfn convert_char_sat_rtp(ulong);
-char __ovld __cnfn convert_char_rtn(ulong);
-char __ovld __cnfn convert_char_sat_rtn(ulong);
-char __ovld __cnfn convert_char(ulong);
-char __ovld __cnfn convert_char_sat(ulong);
-char __ovld __cnfn convert_char_rte(float);
-char __ovld __cnfn convert_char_sat_rte(float);
-char __ovld __cnfn convert_char_rtz(float);
-char __ovld __cnfn convert_char_sat_rtz(float);
-char __ovld __cnfn convert_char_rtp(float);
-char __ovld __cnfn convert_char_sat_rtp(float);
-char __ovld __cnfn convert_char_rtn(float);
-char __ovld __cnfn convert_char_sat_rtn(float);
-char __ovld __cnfn convert_char(float);
-char __ovld __cnfn convert_char_sat(float);
-uchar __ovld __cnfn convert_uchar_rte(char);
-uchar __ovld __cnfn convert_uchar_sat_rte(char);
-uchar __ovld __cnfn convert_uchar_rtz(char);
-uchar __ovld __cnfn convert_uchar_sat_rtz(char);
-uchar __ovld __cnfn convert_uchar_rtp(char);
-uchar __ovld __cnfn convert_uchar_sat_rtp(char);
-uchar __ovld __cnfn convert_uchar_rtn(char);
-uchar __ovld __cnfn convert_uchar_sat_rtn(char);
-uchar __ovld __cnfn convert_uchar(char);
-uchar __ovld __cnfn convert_uchar_sat(char);
-uchar __ovld __cnfn convert_uchar_rte(uchar);
-uchar __ovld __cnfn convert_uchar_sat_rte(uchar);
-uchar __ovld __cnfn convert_uchar_rtz(uchar);
-uchar __ovld __cnfn convert_uchar_sat_rtz(uchar);
-uchar __ovld __cnfn convert_uchar_rtp(uchar);
-uchar __ovld __cnfn convert_uchar_sat_rtp(uchar);
-uchar __ovld __cnfn convert_uchar_rtn(uchar);
-uchar __ovld __cnfn convert_uchar_sat_rtn(uchar);
-uchar __ovld __cnfn convert_uchar(uchar);
-uchar __ovld __cnfn convert_uchar_sat(uchar);
-uchar __ovld __cnfn convert_uchar_rte(short);
-uchar __ovld __cnfn convert_uchar_sat_rte(short);
-uchar __ovld __cnfn convert_uchar_rtz(short);
-uchar __ovld __cnfn convert_uchar_sat_rtz(short);
-uchar __ovld __cnfn convert_uchar_rtp(short);
-uchar __ovld __cnfn convert_uchar_sat_rtp(short);
-uchar __ovld __cnfn convert_uchar_rtn(short);
-uchar __ovld __cnfn convert_uchar_sat_rtn(short);
-uchar __ovld __cnfn convert_uchar(short);
-uchar __ovld __cnfn convert_uchar_sat(short);
-uchar __ovld __cnfn convert_uchar_rte(ushort);
-uchar __ovld __cnfn convert_uchar_sat_rte(ushort);
-uchar __ovld __cnfn convert_uchar_rtz(ushort);
-uchar __ovld __cnfn convert_uchar_sat_rtz(ushort);
-uchar __ovld __cnfn convert_uchar_rtp(ushort);
-uchar __ovld __cnfn convert_uchar_sat_rtp(ushort);
-uchar __ovld __cnfn convert_uchar_rtn(ushort);
-uchar __ovld __cnfn convert_uchar_sat_rtn(ushort);
-uchar __ovld __cnfn convert_uchar(ushort);
-uchar __ovld __cnfn convert_uchar_sat(ushort);
-uchar __ovld __cnfn convert_uchar_rte(int);
-uchar __ovld __cnfn convert_uchar_sat_rte(int);
-uchar __ovld __cnfn convert_uchar_rtz(int);
-uchar __ovld __cnfn convert_uchar_sat_rtz(int);
-uchar __ovld __cnfn convert_uchar_rtp(int);
-uchar __ovld __cnfn convert_uchar_sat_rtp(int);
-uchar __ovld __cnfn convert_uchar_rtn(int);
-uchar __ovld __cnfn convert_uchar_sat_rtn(int);
-uchar __ovld __cnfn convert_uchar(int);
-uchar __ovld __cnfn convert_uchar_sat(int);
-uchar __ovld __cnfn convert_uchar_rte(uint);
-uchar __ovld __cnfn convert_uchar_sat_rte(uint);
-uchar __ovld __cnfn convert_uchar_rtz(uint);
-uchar __ovld __cnfn convert_uchar_sat_rtz(uint);
-uchar __ovld __cnfn convert_uchar_rtp(uint);
-uchar __ovld __cnfn convert_uchar_sat_rtp(uint);
-uchar __ovld __cnfn convert_uchar_rtn(uint);
-uchar __ovld __cnfn convert_uchar_sat_rtn(uint);
-uchar __ovld __cnfn convert_uchar(uint);
-uchar __ovld __cnfn convert_uchar_sat(uint);
-uchar __ovld __cnfn convert_uchar_rte(long);
-uchar __ovld __cnfn convert_uchar_sat_rte(long);
-uchar __ovld __cnfn convert_uchar_rtz(long);
-uchar __ovld __cnfn convert_uchar_sat_rtz(long);
-uchar __ovld __cnfn convert_uchar_rtp(long);
-uchar __ovld __cnfn convert_uchar_sat_rtp(long);
-uchar __ovld __cnfn convert_uchar_rtn(long);
-uchar __ovld __cnfn convert_uchar_sat_rtn(long);
-uchar __ovld __cnfn convert_uchar(long);
-uchar __ovld __cnfn convert_uchar_sat(long);
-uchar __ovld __cnfn convert_uchar_rte(ulong);
-uchar __ovld __cnfn convert_uchar_sat_rte(ulong);
-uchar __ovld __cnfn convert_uchar_rtz(ulong);
-uchar __ovld __cnfn convert_uchar_sat_rtz(ulong);
-uchar __ovld __cnfn convert_uchar_rtp(ulong);
-uchar __ovld __cnfn convert_uchar_sat_rtp(ulong);
-uchar __ovld __cnfn convert_uchar_rtn(ulong);
-uchar __ovld __cnfn convert_uchar_sat_rtn(ulong);
-uchar __ovld __cnfn convert_uchar(ulong);
-uchar __ovld __cnfn convert_uchar_sat(ulong);
-uchar __ovld __cnfn convert_uchar_rte(float);
-uchar __ovld __cnfn convert_uchar_sat_rte(float);
-uchar __ovld __cnfn convert_uchar_rtz(float);
-uchar __ovld __cnfn convert_uchar_sat_rtz(float);
-uchar __ovld __cnfn convert_uchar_rtp(float);
-uchar __ovld __cnfn convert_uchar_sat_rtp(float);
-uchar __ovld __cnfn convert_uchar_rtn(float);
-uchar __ovld __cnfn convert_uchar_sat_rtn(float);
-uchar __ovld __cnfn convert_uchar(float);
-uchar __ovld __cnfn convert_uchar_sat(float);
-
-short __ovld __cnfn convert_short_rte(char);
-short __ovld __cnfn convert_short_sat_rte(char);
-short __ovld __cnfn convert_short_rtz(char);
-short __ovld __cnfn convert_short_sat_rtz(char);
-short __ovld __cnfn convert_short_rtp(char);
-short __ovld __cnfn convert_short_sat_rtp(char);
-short __ovld __cnfn convert_short_rtn(char);
-short __ovld __cnfn convert_short_sat_rtn(char);
-short __ovld __cnfn convert_short(char);
-short __ovld __cnfn convert_short_sat(char);
-short __ovld __cnfn convert_short_rte(uchar);
-short __ovld __cnfn convert_short_sat_rte(uchar);
-short __ovld __cnfn convert_short_rtz(uchar);
-short __ovld __cnfn convert_short_sat_rtz(uchar);
-short __ovld __cnfn convert_short_rtp(uchar);
-short __ovld __cnfn convert_short_sat_rtp(uchar);
-short __ovld __cnfn convert_short_rtn(uchar);
-short __ovld __cnfn convert_short_sat_rtn(uchar);
-short __ovld __cnfn convert_short(uchar);
-short __ovld __cnfn convert_short_sat(uchar);
-short __ovld __cnfn convert_short_rte(short);
-short __ovld __cnfn convert_short_sat_rte(short);
-short __ovld __cnfn convert_short_rtz(short);
-short __ovld __cnfn convert_short_sat_rtz(short);
-short __ovld __cnfn convert_short_rtp(short);
-short __ovld __cnfn convert_short_sat_rtp(short);
-short __ovld __cnfn convert_short_rtn(short);
-short __ovld __cnfn convert_short_sat_rtn(short);
-short __ovld __cnfn convert_short(short);
-short __ovld __cnfn convert_short_sat(short);
-short __ovld __cnfn convert_short_rte(ushort);
-short __ovld __cnfn convert_short_sat_rte(ushort);
-short __ovld __cnfn convert_short_rtz(ushort);
-short __ovld __cnfn convert_short_sat_rtz(ushort);
-short __ovld __cnfn convert_short_rtp(ushort);
-short __ovld __cnfn convert_short_sat_rtp(ushort);
-short __ovld __cnfn convert_short_rtn(ushort);
-short __ovld __cnfn convert_short_sat_rtn(ushort);
-short __ovld __cnfn convert_short(ushort);
-short __ovld __cnfn convert_short_sat(ushort);
-short __ovld __cnfn convert_short_rte(int);
-short __ovld __cnfn convert_short_sat_rte(int);
-short __ovld __cnfn convert_short_rtz(int);
-short __ovld __cnfn convert_short_sat_rtz(int);
-short __ovld __cnfn convert_short_rtp(int);
-short __ovld __cnfn convert_short_sat_rtp(int);
-short __ovld __cnfn convert_short_rtn(int);
-short __ovld __cnfn convert_short_sat_rtn(int);
-short __ovld __cnfn convert_short(int);
-short __ovld __cnfn convert_short_sat(int);
-short __ovld __cnfn convert_short_rte(uint);
-short __ovld __cnfn convert_short_sat_rte(uint);
-short __ovld __cnfn convert_short_rtz(uint);
-short __ovld __cnfn convert_short_sat_rtz(uint);
-short __ovld __cnfn convert_short_rtp(uint);
-short __ovld __cnfn convert_short_sat_rtp(uint);
-short __ovld __cnfn convert_short_rtn(uint);
-short __ovld __cnfn convert_short_sat_rtn(uint);
-short __ovld __cnfn convert_short(uint);
-short __ovld __cnfn convert_short_sat(uint);
-short __ovld __cnfn convert_short_rte(long);
-short __ovld __cnfn convert_short_sat_rte(long);
-short __ovld __cnfn convert_short_rtz(long);
-short __ovld __cnfn convert_short_sat_rtz(long);
-short __ovld __cnfn convert_short_rtp(long);
-short __ovld __cnfn convert_short_sat_rtp(long);
-short __ovld __cnfn convert_short_rtn(long);
-short __ovld __cnfn convert_short_sat_rtn(long);
-short __ovld __cnfn convert_short(long);
-short __ovld __cnfn convert_short_sat(long);
-short __ovld __cnfn convert_short_rte(ulong);
-short __ovld __cnfn convert_short_sat_rte(ulong);
-short __ovld __cnfn convert_short_rtz(ulong);
-short __ovld __cnfn convert_short_sat_rtz(ulong);
-short __ovld __cnfn convert_short_rtp(ulong);
-short __ovld __cnfn convert_short_sat_rtp(ulong);
-short __ovld __cnfn convert_short_rtn(ulong);
-short __ovld __cnfn convert_short_sat_rtn(ulong);
-short __ovld __cnfn convert_short(ulong);
-short __ovld __cnfn convert_short_sat(ulong);
-short __ovld __cnfn convert_short_rte(float);
-short __ovld __cnfn convert_short_sat_rte(float);
-short __ovld __cnfn convert_short_rtz(float);
-short __ovld __cnfn convert_short_sat_rtz(float);
-short __ovld __cnfn convert_short_rtp(float);
-short __ovld __cnfn convert_short_sat_rtp(float);
-short __ovld __cnfn convert_short_rtn(float);
-short __ovld __cnfn convert_short_sat_rtn(float);
-short __ovld __cnfn convert_short(float);
-short __ovld __cnfn convert_short_sat(float);
-ushort __ovld __cnfn convert_ushort_rte(char);
-ushort __ovld __cnfn convert_ushort_sat_rte(char);
-ushort __ovld __cnfn convert_ushort_rtz(char);
-ushort __ovld __cnfn convert_ushort_sat_rtz(char);
-ushort __ovld __cnfn convert_ushort_rtp(char);
-ushort __ovld __cnfn convert_ushort_sat_rtp(char);
-ushort __ovld __cnfn convert_ushort_rtn(char);
-ushort __ovld __cnfn convert_ushort_sat_rtn(char);
-ushort __ovld __cnfn convert_ushort(char);
-ushort __ovld __cnfn convert_ushort_sat(char);
-ushort __ovld __cnfn convert_ushort_rte(uchar);
-ushort __ovld __cnfn convert_ushort_sat_rte(uchar);
-ushort __ovld __cnfn convert_ushort_rtz(uchar);
-ushort __ovld __cnfn convert_ushort_sat_rtz(uchar);
-ushort __ovld __cnfn convert_ushort_rtp(uchar);
-ushort __ovld __cnfn convert_ushort_sat_rtp(uchar);
-ushort __ovld __cnfn convert_ushort_rtn(uchar);
-ushort __ovld __cnfn convert_ushort_sat_rtn(uchar);
-ushort __ovld __cnfn convert_ushort(uchar);
-ushort __ovld __cnfn convert_ushort_sat(uchar);
-ushort __ovld __cnfn convert_ushort_rte(short);
-ushort __ovld __cnfn convert_ushort_sat_rte(short);
-ushort __ovld __cnfn convert_ushort_rtz(short);
-ushort __ovld __cnfn convert_ushort_sat_rtz(short);
-ushort __ovld __cnfn convert_ushort_rtp(short);
-ushort __ovld __cnfn convert_ushort_sat_rtp(short);
-ushort __ovld __cnfn convert_ushort_rtn(short);
-ushort __ovld __cnfn convert_ushort_sat_rtn(short);
-ushort __ovld __cnfn convert_ushort(short);
-ushort __ovld __cnfn convert_ushort_sat(short);
-ushort __ovld __cnfn convert_ushort_rte(ushort);
-ushort __ovld __cnfn convert_ushort_sat_rte(ushort);
-ushort __ovld __cnfn convert_ushort_rtz(ushort);
-ushort __ovld __cnfn convert_ushort_sat_rtz(ushort);
-ushort __ovld __cnfn convert_ushort_rtp(ushort);
-ushort __ovld __cnfn convert_ushort_sat_rtp(ushort);
-ushort __ovld __cnfn convert_ushort_rtn(ushort);
-ushort __ovld __cnfn convert_ushort_sat_rtn(ushort);
-ushort __ovld __cnfn convert_ushort(ushort);
-ushort __ovld __cnfn convert_ushort_sat(ushort);
-ushort __ovld __cnfn convert_ushort_rte(int);
-ushort __ovld __cnfn convert_ushort_sat_rte(int);
-ushort __ovld __cnfn convert_ushort_rtz(int);
-ushort __ovld __cnfn convert_ushort_sat_rtz(int);
-ushort __ovld __cnfn convert_ushort_rtp(int);
-ushort __ovld __cnfn convert_ushort_sat_rtp(int);
-ushort __ovld __cnfn convert_ushort_rtn(int);
-ushort __ovld __cnfn convert_ushort_sat_rtn(int);
-ushort __ovld __cnfn convert_ushort(int);
-ushort __ovld __cnfn convert_ushort_sat(int);
-ushort __ovld __cnfn convert_ushort_rte(uint);
-ushort __ovld __cnfn convert_ushort_sat_rte(uint);
-ushort __ovld __cnfn convert_ushort_rtz(uint);
-ushort __ovld __cnfn convert_ushort_sat_rtz(uint);
-ushort __ovld __cnfn convert_ushort_rtp(uint);
-ushort __ovld __cnfn convert_ushort_sat_rtp(uint);
-ushort __ovld __cnfn convert_ushort_rtn(uint);
-ushort __ovld __cnfn convert_ushort_sat_rtn(uint);
-ushort __ovld __cnfn convert_ushort(uint);
-ushort __ovld __cnfn convert_ushort_sat(uint);
-ushort __ovld __cnfn convert_ushort_rte(long);
-ushort __ovld __cnfn convert_ushort_sat_rte(long);
-ushort __ovld __cnfn convert_ushort_rtz(long);
-ushort __ovld __cnfn convert_ushort_sat_rtz(long);
-ushort __ovld __cnfn convert_ushort_rtp(long);
-ushort __ovld __cnfn convert_ushort_sat_rtp(long);
-ushort __ovld __cnfn convert_ushort_rtn(long);
-ushort __ovld __cnfn convert_ushort_sat_rtn(long);
-ushort __ovld __cnfn convert_ushort(long);
-ushort __ovld __cnfn convert_ushort_sat(long);
-ushort __ovld __cnfn convert_ushort_rte(ulong);
-ushort __ovld __cnfn convert_ushort_sat_rte(ulong);
-ushort __ovld __cnfn convert_ushort_rtz(ulong);
-ushort __ovld __cnfn convert_ushort_sat_rtz(ulong);
-ushort __ovld __cnfn convert_ushort_rtp(ulong);
-ushort __ovld __cnfn convert_ushort_sat_rtp(ulong);
-ushort __ovld __cnfn convert_ushort_rtn(ulong);
-ushort __ovld __cnfn convert_ushort_sat_rtn(ulong);
-ushort __ovld __cnfn convert_ushort(ulong);
-ushort __ovld __cnfn convert_ushort_sat(ulong);
-ushort __ovld __cnfn convert_ushort_rte(float);
-ushort __ovld __cnfn convert_ushort_sat_rte(float);
-ushort __ovld __cnfn convert_ushort_rtz(float);
-ushort __ovld __cnfn convert_ushort_sat_rtz(float);
-ushort __ovld __cnfn convert_ushort_rtp(float);
-ushort __ovld __cnfn convert_ushort_sat_rtp(float);
-ushort __ovld __cnfn convert_ushort_rtn(float);
-ushort __ovld __cnfn convert_ushort_sat_rtn(float);
-ushort __ovld __cnfn convert_ushort(float);
-ushort __ovld __cnfn convert_ushort_sat(float);
-int __ovld __cnfn convert_int_rte(char);
-int __ovld __cnfn convert_int_sat_rte(char);
-int __ovld __cnfn convert_int_rtz(char);
-int __ovld __cnfn convert_int_sat_rtz(char);
-int __ovld __cnfn convert_int_rtp(char);
-int __ovld __cnfn convert_int_sat_rtp(char);
-int __ovld __cnfn convert_int_rtn(char);
-int __ovld __cnfn convert_int_sat_rtn(char);
-int __ovld __cnfn convert_int(char);
-int __ovld __cnfn convert_int_sat(char);
-int __ovld __cnfn convert_int_rte(uchar);
-int __ovld __cnfn convert_int_sat_rte(uchar);
-int __ovld __cnfn convert_int_rtz(uchar);
-int __ovld __cnfn convert_int_sat_rtz(uchar);
-int __ovld __cnfn convert_int_rtp(uchar);
-int __ovld __cnfn convert_int_sat_rtp(uchar);
-int __ovld __cnfn convert_int_rtn(uchar);
-int __ovld __cnfn convert_int_sat_rtn(uchar);
-int __ovld __cnfn convert_int(uchar);
-int __ovld __cnfn convert_int_sat(uchar);
-int __ovld __cnfn convert_int_rte(short);
-int __ovld __cnfn convert_int_sat_rte(short);
-int __ovld __cnfn convert_int_rtz(short);
-int __ovld __cnfn convert_int_sat_rtz(short);
-int __ovld __cnfn convert_int_rtp(short);
-int __ovld __cnfn convert_int_sat_rtp(short);
-int __ovld __cnfn convert_int_rtn(short);
-int __ovld __cnfn convert_int_sat_rtn(short);
-int __ovld __cnfn convert_int(short);
-int __ovld __cnfn convert_int_sat(short);
-int __ovld __cnfn convert_int_rte(ushort);
-int __ovld __cnfn convert_int_sat_rte(ushort);
-int __ovld __cnfn convert_int_rtz(ushort);
-int __ovld __cnfn convert_int_sat_rtz(ushort);
-int __ovld __cnfn convert_int_rtp(ushort);
-int __ovld __cnfn convert_int_sat_rtp(ushort);
-int __ovld __cnfn convert_int_rtn(ushort);
-int __ovld __cnfn convert_int_sat_rtn(ushort);
-int __ovld __cnfn convert_int(ushort);
-int __ovld __cnfn convert_int_sat(ushort);
-int __ovld __cnfn convert_int_rte(int);
-int __ovld __cnfn convert_int_sat_rte(int);
-int __ovld __cnfn convert_int_rtz(int);
-int __ovld __cnfn convert_int_sat_rtz(int);
-int __ovld __cnfn convert_int_rtp(int);
-int __ovld __cnfn convert_int_sat_rtp(int);
-int __ovld __cnfn convert_int_rtn(int);
-int __ovld __cnfn convert_int_sat_rtn(int);
-int __ovld __cnfn convert_int(int);
-int __ovld __cnfn convert_int_sat(int);
-int __ovld __cnfn convert_int_rte(uint);
-int __ovld __cnfn convert_int_sat_rte(uint);
-int __ovld __cnfn convert_int_rtz(uint);
-int __ovld __cnfn convert_int_sat_rtz(uint);
-int __ovld __cnfn convert_int_rtp(uint);
-int __ovld __cnfn convert_int_sat_rtp(uint);
-int __ovld __cnfn convert_int_rtn(uint);
-int __ovld __cnfn convert_int_sat_rtn(uint);
-int __ovld __cnfn convert_int(uint);
-int __ovld __cnfn convert_int_sat(uint);
-int __ovld __cnfn convert_int_rte(long);
-int __ovld __cnfn convert_int_sat_rte(long);
-int __ovld __cnfn convert_int_rtz(long);
-int __ovld __cnfn convert_int_sat_rtz(long);
-int __ovld __cnfn convert_int_rtp(long);
-int __ovld __cnfn convert_int_sat_rtp(long);
-int __ovld __cnfn convert_int_rtn(long);
-int __ovld __cnfn convert_int_sat_rtn(long);
-int __ovld __cnfn convert_int(long);
-int __ovld __cnfn convert_int_sat(long);
-int __ovld __cnfn convert_int_rte(ulong);
-int __ovld __cnfn convert_int_sat_rte(ulong);
-int __ovld __cnfn convert_int_rtz(ulong);
-int __ovld __cnfn convert_int_sat_rtz(ulong);
-int __ovld __cnfn convert_int_rtp(ulong);
-int __ovld __cnfn convert_int_sat_rtp(ulong);
-int __ovld __cnfn convert_int_rtn(ulong);
-int __ovld __cnfn convert_int_sat_rtn(ulong);
-int __ovld __cnfn convert_int(ulong);
-int __ovld __cnfn convert_int_sat(ulong);
-int __ovld __cnfn convert_int_rte(float);
-int __ovld __cnfn convert_int_sat_rte(float);
-int __ovld __cnfn convert_int_rtz(float);
-int __ovld __cnfn convert_int_sat_rtz(float);
-int __ovld __cnfn convert_int_rtp(float);
-int __ovld __cnfn convert_int_sat_rtp(float);
-int __ovld __cnfn convert_int_rtn(float);
-int __ovld __cnfn convert_int_sat_rtn(float);
-int __ovld __cnfn convert_int(float);
-int __ovld __cnfn convert_int_sat(float);
-uint __ovld __cnfn convert_uint_rte(char);
-uint __ovld __cnfn convert_uint_sat_rte(char);
-uint __ovld __cnfn convert_uint_rtz(char);
-uint __ovld __cnfn convert_uint_sat_rtz(char);
-uint __ovld __cnfn convert_uint_rtp(char);
-uint __ovld __cnfn convert_uint_sat_rtp(char);
-uint __ovld __cnfn convert_uint_rtn(char);
-uint __ovld __cnfn convert_uint_sat_rtn(char);
-uint __ovld __cnfn convert_uint(char);
-uint __ovld __cnfn convert_uint_sat(char);
-uint __ovld __cnfn convert_uint_rte(uchar);
-uint __ovld __cnfn convert_uint_sat_rte(uchar);
-uint __ovld __cnfn convert_uint_rtz(uchar);
-uint __ovld __cnfn convert_uint_sat_rtz(uchar);
-uint __ovld __cnfn convert_uint_rtp(uchar);
-uint __ovld __cnfn convert_uint_sat_rtp(uchar);
-uint __ovld __cnfn convert_uint_rtn(uchar);
-uint __ovld __cnfn convert_uint_sat_rtn(uchar);
-uint __ovld __cnfn convert_uint(uchar);
-uint __ovld __cnfn convert_uint_sat(uchar);
-uint __ovld __cnfn convert_uint_rte(short);
-uint __ovld __cnfn convert_uint_sat_rte(short);
-uint __ovld __cnfn convert_uint_rtz(short);
-uint __ovld __cnfn convert_uint_sat_rtz(short);
-uint __ovld __cnfn convert_uint_rtp(short);
-uint __ovld __cnfn convert_uint_sat_rtp(short);
-uint __ovld __cnfn convert_uint_rtn(short);
-uint __ovld __cnfn convert_uint_sat_rtn(short);
-uint __ovld __cnfn convert_uint(short);
-uint __ovld __cnfn convert_uint_sat(short);
-uint __ovld __cnfn convert_uint_rte(ushort);
-uint __ovld __cnfn convert_uint_sat_rte(ushort);
-uint __ovld __cnfn convert_uint_rtz(ushort);
-uint __ovld __cnfn convert_uint_sat_rtz(ushort);
-uint __ovld __cnfn convert_uint_rtp(ushort);
-uint __ovld __cnfn convert_uint_sat_rtp(ushort);
-uint __ovld __cnfn convert_uint_rtn(ushort);
-uint __ovld __cnfn convert_uint_sat_rtn(ushort);
-uint __ovld __cnfn convert_uint(ushort);
-uint __ovld __cnfn convert_uint_sat(ushort);
-uint __ovld __cnfn convert_uint_rte(int);
-uint __ovld __cnfn convert_uint_sat_rte(int);
-uint __ovld __cnfn convert_uint_rtz(int);
-uint __ovld __cnfn convert_uint_sat_rtz(int);
-uint __ovld __cnfn convert_uint_rtp(int);
-uint __ovld __cnfn convert_uint_sat_rtp(int);
-uint __ovld __cnfn convert_uint_rtn(int);
-uint __ovld __cnfn convert_uint_sat_rtn(int);
-uint __ovld __cnfn convert_uint(int);
-uint __ovld __cnfn convert_uint_sat(int);
-uint __ovld __cnfn convert_uint_rte(uint);
-uint __ovld __cnfn convert_uint_sat_rte(uint);
-uint __ovld __cnfn convert_uint_rtz(uint);
-uint __ovld __cnfn convert_uint_sat_rtz(uint);
-uint __ovld __cnfn convert_uint_rtp(uint);
-uint __ovld __cnfn convert_uint_sat_rtp(uint);
-uint __ovld __cnfn convert_uint_rtn(uint);
-uint __ovld __cnfn convert_uint_sat_rtn(uint);
-uint __ovld __cnfn convert_uint(uint);
-uint __ovld __cnfn convert_uint_sat(uint);
-uint __ovld __cnfn convert_uint_rte(long);
-uint __ovld __cnfn convert_uint_sat_rte(long);
-uint __ovld __cnfn convert_uint_rtz(long);
-uint __ovld __cnfn convert_uint_sat_rtz(long);
-uint __ovld __cnfn convert_uint_rtp(long);
-uint __ovld __cnfn convert_uint_sat_rtp(long);
-uint __ovld __cnfn convert_uint_rtn(long);
-uint __ovld __cnfn convert_uint_sat_rtn(long);
-uint __ovld __cnfn convert_uint(long);
-uint __ovld __cnfn convert_uint_sat(long);
-uint __ovld __cnfn convert_uint_rte(ulong);
-uint __ovld __cnfn convert_uint_sat_rte(ulong);
-uint __ovld __cnfn convert_uint_rtz(ulong);
-uint __ovld __cnfn convert_uint_sat_rtz(ulong);
-uint __ovld __cnfn convert_uint_rtp(ulong);
-uint __ovld __cnfn convert_uint_sat_rtp(ulong);
-uint __ovld __cnfn convert_uint_rtn(ulong);
-uint __ovld __cnfn convert_uint_sat_rtn(ulong);
-uint __ovld __cnfn convert_uint(ulong);
-uint __ovld __cnfn convert_uint_sat(ulong);
-uint __ovld __cnfn convert_uint_rte(float);
-uint __ovld __cnfn convert_uint_sat_rte(float);
-uint __ovld __cnfn convert_uint_rtz(float);
-uint __ovld __cnfn convert_uint_sat_rtz(float);
-uint __ovld __cnfn convert_uint_rtp(float);
-uint __ovld __cnfn convert_uint_sat_rtp(float);
-uint __ovld __cnfn convert_uint_rtn(float);
-uint __ovld __cnfn convert_uint_sat_rtn(float);
-uint __ovld __cnfn convert_uint(float);
-uint __ovld __cnfn convert_uint_sat(float);
-long __ovld __cnfn convert_long_rte(char);
-long __ovld __cnfn convert_long_sat_rte(char);
-long __ovld __cnfn convert_long_rtz(char);
-long __ovld __cnfn convert_long_sat_rtz(char);
-long __ovld __cnfn convert_long_rtp(char);
-long __ovld __cnfn convert_long_sat_rtp(char);
-long __ovld __cnfn convert_long_rtn(char);
-long __ovld __cnfn convert_long_sat_rtn(char);
-long __ovld __cnfn convert_long(char);
-long __ovld __cnfn convert_long_sat(char);
-long __ovld __cnfn convert_long_rte(uchar);
-long __ovld __cnfn convert_long_sat_rte(uchar);
-long __ovld __cnfn convert_long_rtz(uchar);
-long __ovld __cnfn convert_long_sat_rtz(uchar);
-long __ovld __cnfn convert_long_rtp(uchar);
-long __ovld __cnfn convert_long_sat_rtp(uchar);
-long __ovld __cnfn convert_long_rtn(uchar);
-long __ovld __cnfn convert_long_sat_rtn(uchar);
-long __ovld __cnfn convert_long(uchar);
-long __ovld __cnfn convert_long_sat(uchar);
-long __ovld __cnfn convert_long_rte(short);
-long __ovld __cnfn convert_long_sat_rte(short);
-long __ovld __cnfn convert_long_rtz(short);
-long __ovld __cnfn convert_long_sat_rtz(short);
-long __ovld __cnfn convert_long_rtp(short);
-long __ovld __cnfn convert_long_sat_rtp(short);
-long __ovld __cnfn convert_long_rtn(short);
-long __ovld __cnfn convert_long_sat_rtn(short);
-long __ovld __cnfn convert_long(short);
-long __ovld __cnfn convert_long_sat(short);
-long __ovld __cnfn convert_long_rte(ushort);
-long __ovld __cnfn convert_long_sat_rte(ushort);
-long __ovld __cnfn convert_long_rtz(ushort);
-long __ovld __cnfn convert_long_sat_rtz(ushort);
-long __ovld __cnfn convert_long_rtp(ushort);
-long __ovld __cnfn convert_long_sat_rtp(ushort);
-long __ovld __cnfn convert_long_rtn(ushort);
-long __ovld __cnfn convert_long_sat_rtn(ushort);
-long __ovld __cnfn convert_long(ushort);
-long __ovld __cnfn convert_long_sat(ushort);
-long __ovld __cnfn convert_long_rte(int);
-long __ovld __cnfn convert_long_sat_rte(int);
-long __ovld __cnfn convert_long_rtz(int);
-long __ovld __cnfn convert_long_sat_rtz(int);
-long __ovld __cnfn convert_long_rtp(int);
-long __ovld __cnfn convert_long_sat_rtp(int);
-long __ovld __cnfn convert_long_rtn(int);
-long __ovld __cnfn convert_long_sat_rtn(int);
-long __ovld __cnfn convert_long(int);
-long __ovld __cnfn convert_long_sat(int);
-long __ovld __cnfn convert_long_rte(uint);
-long __ovld __cnfn convert_long_sat_rte(uint);
-long __ovld __cnfn convert_long_rtz(uint);
-long __ovld __cnfn convert_long_sat_rtz(uint);
-long __ovld __cnfn convert_long_rtp(uint);
-long __ovld __cnfn convert_long_sat_rtp(uint);
-long __ovld __cnfn convert_long_rtn(uint);
-long __ovld __cnfn convert_long_sat_rtn(uint);
-long __ovld __cnfn convert_long(uint);
-long __ovld __cnfn convert_long_sat(uint);
-long __ovld __cnfn convert_long_rte(long);
-long __ovld __cnfn convert_long_sat_rte(long);
-long __ovld __cnfn convert_long_rtz(long);
-long __ovld __cnfn convert_long_sat_rtz(long);
-long __ovld __cnfn convert_long_rtp(long);
-long __ovld __cnfn convert_long_sat_rtp(long);
-long __ovld __cnfn convert_long_rtn(long);
-long __ovld __cnfn convert_long_sat_rtn(long);
-long __ovld __cnfn convert_long(long);
-long __ovld __cnfn convert_long_sat(long);
-long __ovld __cnfn convert_long_rte(ulong);
-long __ovld __cnfn convert_long_sat_rte(ulong);
-long __ovld __cnfn convert_long_rtz(ulong);
-long __ovld __cnfn convert_long_sat_rtz(ulong);
-long __ovld __cnfn convert_long_rtp(ulong);
-long __ovld __cnfn convert_long_sat_rtp(ulong);
-long __ovld __cnfn convert_long_rtn(ulong);
-long __ovld __cnfn convert_long_sat_rtn(ulong);
-long __ovld __cnfn convert_long(ulong);
-long __ovld __cnfn convert_long_sat(ulong);
-long __ovld __cnfn convert_long_rte(float);
-long __ovld __cnfn convert_long_sat_rte(float);
-long __ovld __cnfn convert_long_rtz(float);
-long __ovld __cnfn convert_long_sat_rtz(float);
-long __ovld __cnfn convert_long_rtp(float);
-long __ovld __cnfn convert_long_sat_rtp(float);
-long __ovld __cnfn convert_long_rtn(float);
-long __ovld __cnfn convert_long_sat_rtn(float);
-long __ovld __cnfn convert_long(float);
-long __ovld __cnfn convert_long_sat(float);
-ulong __ovld __cnfn convert_ulong_rte(char);
-ulong __ovld __cnfn convert_ulong_sat_rte(char);
-ulong __ovld __cnfn convert_ulong_rtz(char);
-ulong __ovld __cnfn convert_ulong_sat_rtz(char);
-ulong __ovld __cnfn convert_ulong_rtp(char);
-ulong __ovld __cnfn convert_ulong_sat_rtp(char);
-ulong __ovld __cnfn convert_ulong_rtn(char);
-ulong __ovld __cnfn convert_ulong_sat_rtn(char);
-ulong __ovld __cnfn convert_ulong(char);
-ulong __ovld __cnfn convert_ulong_sat(char);
-ulong __ovld __cnfn convert_ulong_rte(uchar);
-ulong __ovld __cnfn convert_ulong_sat_rte(uchar);
-ulong __ovld __cnfn convert_ulong_rtz(uchar);
-ulong __ovld __cnfn convert_ulong_sat_rtz(uchar);
-ulong __ovld __cnfn convert_ulong_rtp(uchar);
-ulong __ovld __cnfn convert_ulong_sat_rtp(uchar);
-ulong __ovld __cnfn convert_ulong_rtn(uchar);
-ulong __ovld __cnfn convert_ulong_sat_rtn(uchar);
-ulong __ovld __cnfn convert_ulong(uchar);
-ulong __ovld __cnfn convert_ulong_sat(uchar);
-ulong __ovld __cnfn convert_ulong_rte(short);
-ulong __ovld __cnfn convert_ulong_sat_rte(short);
-ulong __ovld __cnfn convert_ulong_rtz(short);
-ulong __ovld __cnfn convert_ulong_sat_rtz(short);
-ulong __ovld __cnfn convert_ulong_rtp(short);
-ulong __ovld __cnfn convert_ulong_sat_rtp(short);
-ulong __ovld __cnfn convert_ulong_rtn(short);
-ulong __ovld __cnfn convert_ulong_sat_rtn(short);
-ulong __ovld __cnfn convert_ulong(short);
-ulong __ovld __cnfn convert_ulong_sat(short);
-ulong __ovld __cnfn convert_ulong_rte(ushort);
-ulong __ovld __cnfn convert_ulong_sat_rte(ushort);
-ulong __ovld __cnfn convert_ulong_rtz(ushort);
-ulong __ovld __cnfn convert_ulong_sat_rtz(ushort);
-ulong __ovld __cnfn convert_ulong_rtp(ushort);
-ulong __ovld __cnfn convert_ulong_sat_rtp(ushort);
-ulong __ovld __cnfn convert_ulong_rtn(ushort);
-ulong __ovld __cnfn convert_ulong_sat_rtn(ushort);
-ulong __ovld __cnfn convert_ulong(ushort);
-ulong __ovld __cnfn convert_ulong_sat(ushort);
-ulong __ovld __cnfn convert_ulong_rte(int);
-ulong __ovld __cnfn convert_ulong_sat_rte(int);
-ulong __ovld __cnfn convert_ulong_rtz(int);
-ulong __ovld __cnfn convert_ulong_sat_rtz(int);
-ulong __ovld __cnfn convert_ulong_rtp(int);
-ulong __ovld __cnfn convert_ulong_sat_rtp(int);
-ulong __ovld __cnfn convert_ulong_rtn(int);
-ulong __ovld __cnfn convert_ulong_sat_rtn(int);
-ulong __ovld __cnfn convert_ulong(int);
-ulong __ovld __cnfn convert_ulong_sat(int);
-ulong __ovld __cnfn convert_ulong_rte(uint);
-ulong __ovld __cnfn convert_ulong_sat_rte(uint);
-ulong __ovld __cnfn convert_ulong_rtz(uint);
-ulong __ovld __cnfn convert_ulong_sat_rtz(uint);
-ulong __ovld __cnfn convert_ulong_rtp(uint);
-ulong __ovld __cnfn convert_ulong_sat_rtp(uint);
-ulong __ovld __cnfn convert_ulong_rtn(uint);
-ulong __ovld __cnfn convert_ulong_sat_rtn(uint);
-ulong __ovld __cnfn convert_ulong(uint);
-ulong __ovld __cnfn convert_ulong_sat(uint);
-ulong __ovld __cnfn convert_ulong_rte(long);
-ulong __ovld __cnfn convert_ulong_sat_rte(long);
-ulong __ovld __cnfn convert_ulong_rtz(long);
-ulong __ovld __cnfn convert_ulong_sat_rtz(long);
-ulong __ovld __cnfn convert_ulong_rtp(long);
-ulong __ovld __cnfn convert_ulong_sat_rtp(long);
-ulong __ovld __cnfn convert_ulong_rtn(long);
-ulong __ovld __cnfn convert_ulong_sat_rtn(long);
-ulong __ovld __cnfn convert_ulong(long);
-ulong __ovld __cnfn convert_ulong_sat(long);
-ulong __ovld __cnfn convert_ulong_rte(ulong);
-ulong __ovld __cnfn convert_ulong_sat_rte(ulong);
-ulong __ovld __cnfn convert_ulong_rtz(ulong);
-ulong __ovld __cnfn convert_ulong_sat_rtz(ulong);
-ulong __ovld __cnfn convert_ulong_rtp(ulong);
-ulong __ovld __cnfn convert_ulong_sat_rtp(ulong);
-ulong __ovld __cnfn convert_ulong_rtn(ulong);
-ulong __ovld __cnfn convert_ulong_sat_rtn(ulong);
-ulong __ovld __cnfn convert_ulong(ulong);
-ulong __ovld __cnfn convert_ulong_sat(ulong);
-ulong __ovld __cnfn convert_ulong_rte(float);
-ulong __ovld __cnfn convert_ulong_sat_rte(float);
-ulong __ovld __cnfn convert_ulong_rtz(float);
-ulong __ovld __cnfn convert_ulong_sat_rtz(float);
-ulong __ovld __cnfn convert_ulong_rtp(float);
-ulong __ovld __cnfn convert_ulong_sat_rtp(float);
-ulong __ovld __cnfn convert_ulong_rtn(float);
-ulong __ovld __cnfn convert_ulong_sat_rtn(float);
-ulong __ovld __cnfn convert_ulong(float);
-ulong __ovld __cnfn convert_ulong_sat(float);
-float __ovld __cnfn convert_float_rte(char);
-float __ovld __cnfn convert_float_rtz(char);
-float __ovld __cnfn convert_float_rtp(char);
-float __ovld __cnfn convert_float_rtn(char);
-float __ovld __cnfn convert_float(char);
-float __ovld __cnfn convert_float_rte(uchar);
-float __ovld __cnfn convert_float_rtz(uchar);
-float __ovld __cnfn convert_float_rtp(uchar);
-float __ovld __cnfn convert_float_rtn(uchar);
-float __ovld __cnfn convert_float(uchar);
-float __ovld __cnfn convert_float_rte(short);
-float __ovld __cnfn convert_float_rtz(short);
-float __ovld __cnfn convert_float_rtp(short);
-float __ovld __cnfn convert_float_rtn(short);
-float __ovld __cnfn convert_float(short);
-float __ovld __cnfn convert_float_rte(ushort);
-float __ovld __cnfn convert_float_rtz(ushort);
-float __ovld __cnfn convert_float_rtp(ushort);
-float __ovld __cnfn convert_float_rtn(ushort);
-float __ovld __cnfn convert_float(ushort);
-float __ovld __cnfn convert_float_rte(int);
-float __ovld __cnfn convert_float_rtz(int);
-float __ovld __cnfn convert_float_rtp(int);
-float __ovld __cnfn convert_float_rtn(int);
-float __ovld __cnfn convert_float(int);
-float __ovld __cnfn convert_float_rte(uint);
-float __ovld __cnfn convert_float_rtz(uint);
-float __ovld __cnfn convert_float_rtp(uint);
-float __ovld __cnfn convert_float_rtn(uint);
-float __ovld __cnfn convert_float(uint);
-float __ovld __cnfn convert_float_rte(long);
-float __ovld __cnfn convert_float_rtz(long);
-float __ovld __cnfn convert_float_rtp(long);
-float __ovld __cnfn convert_float_rtn(long);
-float __ovld __cnfn convert_float(long);
-float __ovld __cnfn convert_float_rte(ulong);
-float __ovld __cnfn convert_float_rtz(ulong);
-float __ovld __cnfn convert_float_rtp(ulong);
-float __ovld __cnfn convert_float_rtn(ulong);
-float __ovld __cnfn convert_float(ulong);
-float __ovld __cnfn convert_float_rte(float);
-float __ovld __cnfn convert_float_rtz(float);
-float __ovld __cnfn convert_float_rtp(float);
-float __ovld __cnfn convert_float_rtn(float);
-float __ovld __cnfn convert_float(float);
-char2 __ovld __cnfn convert_char2_rte(char2);
-char2 __ovld __cnfn convert_char2_sat_rte(char2);
-char2 __ovld __cnfn convert_char2_rtz(char2);
-char2 __ovld __cnfn convert_char2_sat_rtz(char2);
-char2 __ovld __cnfn convert_char2_rtp(char2);
-char2 __ovld __cnfn convert_char2_sat_rtp(char2);
-char2 __ovld __cnfn convert_char2_rtn(char2);
-char2 __ovld __cnfn convert_char2_sat_rtn(char2);
-char2 __ovld __cnfn convert_char2(char2);
-char2 __ovld __cnfn convert_char2_sat(char2);
-char2 __ovld __cnfn convert_char2_rte(uchar2);
-char2 __ovld __cnfn convert_char2_sat_rte(uchar2);
-char2 __ovld __cnfn convert_char2_rtz(uchar2);
-char2 __ovld __cnfn convert_char2_sat_rtz(uchar2);
-char2 __ovld __cnfn convert_char2_rtp(uchar2);
-char2 __ovld __cnfn convert_char2_sat_rtp(uchar2);
-char2 __ovld __cnfn convert_char2_rtn(uchar2);
-char2 __ovld __cnfn convert_char2_sat_rtn(uchar2);
-char2 __ovld __cnfn convert_char2(uchar2);
-char2 __ovld __cnfn convert_char2_sat(uchar2);
-char2 __ovld __cnfn convert_char2_rte(short2);
-char2 __ovld __cnfn convert_char2_sat_rte(short2);
-char2 __ovld __cnfn convert_char2_rtz(short2);
-char2 __ovld __cnfn convert_char2_sat_rtz(short2);
-char2 __ovld __cnfn convert_char2_rtp(short2);
-char2 __ovld __cnfn convert_char2_sat_rtp(short2);
-char2 __ovld __cnfn convert_char2_rtn(short2);
-char2 __ovld __cnfn convert_char2_sat_rtn(short2);
-char2 __ovld __cnfn convert_char2(short2);
-char2 __ovld __cnfn convert_char2_sat(short2);
-char2 __ovld __cnfn convert_char2_rte(ushort2);
-char2 __ovld __cnfn convert_char2_sat_rte(ushort2);
-char2 __ovld __cnfn convert_char2_rtz(ushort2);
-char2 __ovld __cnfn convert_char2_sat_rtz(ushort2);
-char2 __ovld __cnfn convert_char2_rtp(ushort2);
-char2 __ovld __cnfn convert_char2_sat_rtp(ushort2);
-char2 __ovld __cnfn convert_char2_rtn(ushort2);
-char2 __ovld __cnfn convert_char2_sat_rtn(ushort2);
-char2 __ovld __cnfn convert_char2(ushort2);
-char2 __ovld __cnfn convert_char2_sat(ushort2);
-char2 __ovld __cnfn convert_char2_rte(int2);
-char2 __ovld __cnfn convert_char2_sat_rte(int2);
-char2 __ovld __cnfn convert_char2_rtz(int2);
-char2 __ovld __cnfn convert_char2_sat_rtz(int2);
-char2 __ovld __cnfn convert_char2_rtp(int2);
-char2 __ovld __cnfn convert_char2_sat_rtp(int2);
-char2 __ovld __cnfn convert_char2_rtn(int2);
-char2 __ovld __cnfn convert_char2_sat_rtn(int2);
-char2 __ovld __cnfn convert_char2(int2);
-char2 __ovld __cnfn convert_char2_sat(int2);
-char2 __ovld __cnfn convert_char2_rte(uint2);
-char2 __ovld __cnfn convert_char2_sat_rte(uint2);
-char2 __ovld __cnfn convert_char2_rtz(uint2);
-char2 __ovld __cnfn convert_char2_sat_rtz(uint2);
-char2 __ovld __cnfn convert_char2_rtp(uint2);
-char2 __ovld __cnfn convert_char2_sat_rtp(uint2);
-char2 __ovld __cnfn convert_char2_rtn(uint2);
-char2 __ovld __cnfn convert_char2_sat_rtn(uint2);
-char2 __ovld __cnfn convert_char2(uint2);
-char2 __ovld __cnfn convert_char2_sat(uint2);
-char2 __ovld __cnfn convert_char2_rte(long2);
-char2 __ovld __cnfn convert_char2_sat_rte(long2);
-char2 __ovld __cnfn convert_char2_rtz(long2);
-char2 __ovld __cnfn convert_char2_sat_rtz(long2);
-char2 __ovld __cnfn convert_char2_rtp(long2);
-char2 __ovld __cnfn convert_char2_sat_rtp(long2);
-char2 __ovld __cnfn convert_char2_rtn(long2);
-char2 __ovld __cnfn convert_char2_sat_rtn(long2);
-char2 __ovld __cnfn convert_char2(long2);
-char2 __ovld __cnfn convert_char2_sat(long2);
-char2 __ovld __cnfn convert_char2_rte(ulong2);
-char2 __ovld __cnfn convert_char2_sat_rte(ulong2);
-char2 __ovld __cnfn convert_char2_rtz(ulong2);
-char2 __ovld __cnfn convert_char2_sat_rtz(ulong2);
-char2 __ovld __cnfn convert_char2_rtp(ulong2);
-char2 __ovld __cnfn convert_char2_sat_rtp(ulong2);
-char2 __ovld __cnfn convert_char2_rtn(ulong2);
-char2 __ovld __cnfn convert_char2_sat_rtn(ulong2);
-char2 __ovld __cnfn convert_char2(ulong2);
-char2 __ovld __cnfn convert_char2_sat(ulong2);
-char2 __ovld __cnfn convert_char2_rte(float2);
-char2 __ovld __cnfn convert_char2_sat_rte(float2);
-char2 __ovld __cnfn convert_char2_rtz(float2);
-char2 __ovld __cnfn convert_char2_sat_rtz(float2);
-char2 __ovld __cnfn convert_char2_rtp(float2);
-char2 __ovld __cnfn convert_char2_sat_rtp(float2);
-char2 __ovld __cnfn convert_char2_rtn(float2);
-char2 __ovld __cnfn convert_char2_sat_rtn(float2);
-char2 __ovld __cnfn convert_char2(float2);
-char2 __ovld __cnfn convert_char2_sat(float2);
-uchar2 __ovld __cnfn convert_uchar2_rte(char2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(char2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(char2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(char2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(char2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(char2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(char2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(char2);
-uchar2 __ovld __cnfn convert_uchar2(char2);
-uchar2 __ovld __cnfn convert_uchar2_sat(char2);
-uchar2 __ovld __cnfn convert_uchar2_rte(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(uchar2);
-uchar2 __ovld __cnfn convert_uchar2(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_sat(uchar2);
-uchar2 __ovld __cnfn convert_uchar2_rte(short2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(short2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(short2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(short2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(short2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(short2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(short2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(short2);
-uchar2 __ovld __cnfn convert_uchar2(short2);
-uchar2 __ovld __cnfn convert_uchar2_sat(short2);
-uchar2 __ovld __cnfn convert_uchar2_rte(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(ushort2);
-uchar2 __ovld __cnfn convert_uchar2(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_sat(ushort2);
-uchar2 __ovld __cnfn convert_uchar2_rte(int2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(int2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(int2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(int2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(int2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(int2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(int2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(int2);
-uchar2 __ovld __cnfn convert_uchar2(int2);
-uchar2 __ovld __cnfn convert_uchar2_sat(int2);
-uchar2 __ovld __cnfn convert_uchar2_rte(uint2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(uint2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(uint2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(uint2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(uint2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(uint2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(uint2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(uint2);
-uchar2 __ovld __cnfn convert_uchar2(uint2);
-uchar2 __ovld __cnfn convert_uchar2_sat(uint2);
-uchar2 __ovld __cnfn convert_uchar2_rte(long2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(long2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(long2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(long2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(long2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(long2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(long2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(long2);
-uchar2 __ovld __cnfn convert_uchar2(long2);
-uchar2 __ovld __cnfn convert_uchar2_sat(long2);
-uchar2 __ovld __cnfn convert_uchar2_rte(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(ulong2);
-uchar2 __ovld __cnfn convert_uchar2(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_sat(ulong2);
-uchar2 __ovld __cnfn convert_uchar2_rte(float2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(float2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(float2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(float2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(float2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(float2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(float2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(float2);
-uchar2 __ovld __cnfn convert_uchar2(float2);
-uchar2 __ovld __cnfn convert_uchar2_sat(float2);
-short2 __ovld __cnfn convert_short2_rte(char2);
-short2 __ovld __cnfn convert_short2_sat_rte(char2);
-short2 __ovld __cnfn convert_short2_rtz(char2);
-short2 __ovld __cnfn convert_short2_sat_rtz(char2);
-short2 __ovld __cnfn convert_short2_rtp(char2);
-short2 __ovld __cnfn convert_short2_sat_rtp(char2);
-short2 __ovld __cnfn convert_short2_rtn(char2);
-short2 __ovld __cnfn convert_short2_sat_rtn(char2);
-short2 __ovld __cnfn convert_short2(char2);
-short2 __ovld __cnfn convert_short2_sat(char2);
-short2 __ovld __cnfn convert_short2_rte(uchar2);
-short2 __ovld __cnfn convert_short2_sat_rte(uchar2);
-short2 __ovld __cnfn convert_short2_rtz(uchar2);
-short2 __ovld __cnfn convert_short2_sat_rtz(uchar2);
-short2 __ovld __cnfn convert_short2_rtp(uchar2);
-short2 __ovld __cnfn convert_short2_sat_rtp(uchar2);
-short2 __ovld __cnfn convert_short2_rtn(uchar2);
-short2 __ovld __cnfn convert_short2_sat_rtn(uchar2);
-short2 __ovld __cnfn convert_short2(uchar2);
-short2 __ovld __cnfn convert_short2_sat(uchar2);
-short2 __ovld __cnfn convert_short2_rte(short2);
-short2 __ovld __cnfn convert_short2_sat_rte(short2);
-short2 __ovld __cnfn convert_short2_rtz(short2);
-short2 __ovld __cnfn convert_short2_sat_rtz(short2);
-short2 __ovld __cnfn convert_short2_rtp(short2);
-short2 __ovld __cnfn convert_short2_sat_rtp(short2);
-short2 __ovld __cnfn convert_short2_rtn(short2);
-short2 __ovld __cnfn convert_short2_sat_rtn(short2);
-short2 __ovld __cnfn convert_short2(short2);
-short2 __ovld __cnfn convert_short2_sat(short2);
-short2 __ovld __cnfn convert_short2_rte(ushort2);
-short2 __ovld __cnfn convert_short2_sat_rte(ushort2);
-short2 __ovld __cnfn convert_short2_rtz(ushort2);
-short2 __ovld __cnfn convert_short2_sat_rtz(ushort2);
-short2 __ovld __cnfn convert_short2_rtp(ushort2);
-short2 __ovld __cnfn convert_short2_sat_rtp(ushort2);
-short2 __ovld __cnfn convert_short2_rtn(ushort2);
-short2 __ovld __cnfn convert_short2_sat_rtn(ushort2);
-short2 __ovld __cnfn convert_short2(ushort2);
-short2 __ovld __cnfn convert_short2_sat(ushort2);
-short2 __ovld __cnfn convert_short2_rte(int2);
-short2 __ovld __cnfn convert_short2_sat_rte(int2);
-short2 __ovld __cnfn convert_short2_rtz(int2);
-short2 __ovld __cnfn convert_short2_sat_rtz(int2);
-short2 __ovld __cnfn convert_short2_rtp(int2);
-short2 __ovld __cnfn convert_short2_sat_rtp(int2);
-short2 __ovld __cnfn convert_short2_rtn(int2);
-short2 __ovld __cnfn convert_short2_sat_rtn(int2);
-short2 __ovld __cnfn convert_short2(int2);
-short2 __ovld __cnfn convert_short2_sat(int2);
-short2 __ovld __cnfn convert_short2_rte(uint2);
-short2 __ovld __cnfn convert_short2_sat_rte(uint2);
-short2 __ovld __cnfn convert_short2_rtz(uint2);
-short2 __ovld __cnfn convert_short2_sat_rtz(uint2);
-short2 __ovld __cnfn convert_short2_rtp(uint2);
-short2 __ovld __cnfn convert_short2_sat_rtp(uint2);
-short2 __ovld __cnfn convert_short2_rtn(uint2);
-short2 __ovld __cnfn convert_short2_sat_rtn(uint2);
-short2 __ovld __cnfn convert_short2(uint2);
-short2 __ovld __cnfn convert_short2_sat(uint2);
-short2 __ovld __cnfn convert_short2_rte(long2);
-short2 __ovld __cnfn convert_short2_sat_rte(long2);
-short2 __ovld __cnfn convert_short2_rtz(long2);
-short2 __ovld __cnfn convert_short2_sat_rtz(long2);
-short2 __ovld __cnfn convert_short2_rtp(long2);
-short2 __ovld __cnfn convert_short2_sat_rtp(long2);
-short2 __ovld __cnfn convert_short2_rtn(long2);
-short2 __ovld __cnfn convert_short2_sat_rtn(long2);
-short2 __ovld __cnfn convert_short2(long2);
-short2 __ovld __cnfn convert_short2_sat(long2);
-short2 __ovld __cnfn convert_short2_rte(ulong2);
-short2 __ovld __cnfn convert_short2_sat_rte(ulong2);
-short2 __ovld __cnfn convert_short2_rtz(ulong2);
-short2 __ovld __cnfn convert_short2_sat_rtz(ulong2);
-short2 __ovld __cnfn convert_short2_rtp(ulong2);
-short2 __ovld __cnfn convert_short2_sat_rtp(ulong2);
-short2 __ovld __cnfn convert_short2_rtn(ulong2);
-short2 __ovld __cnfn convert_short2_sat_rtn(ulong2);
-short2 __ovld __cnfn convert_short2(ulong2);
-short2 __ovld __cnfn convert_short2_sat(ulong2);
-short2 __ovld __cnfn convert_short2_rte(float2);
-short2 __ovld __cnfn convert_short2_sat_rte(float2);
-short2 __ovld __cnfn convert_short2_rtz(float2);
-short2 __ovld __cnfn convert_short2_sat_rtz(float2);
-short2 __ovld __cnfn convert_short2_rtp(float2);
-short2 __ovld __cnfn convert_short2_sat_rtp(float2);
-short2 __ovld __cnfn convert_short2_rtn(float2);
-short2 __ovld __cnfn convert_short2_sat_rtn(float2);
-short2 __ovld __cnfn convert_short2(float2);
-short2 __ovld __cnfn convert_short2_sat(float2);
-ushort2 __ovld __cnfn convert_ushort2_rte(char2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(char2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(char2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(char2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(char2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(char2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(char2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(char2);
-ushort2 __ovld __cnfn convert_ushort2(char2);
-ushort2 __ovld __cnfn convert_ushort2_sat(char2);
-ushort2 __ovld __cnfn convert_ushort2_rte(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(uchar2);
-ushort2 __ovld __cnfn convert_ushort2(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_sat(uchar2);
-ushort2 __ovld __cnfn convert_ushort2_rte(short2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(short2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(short2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(short2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(short2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(short2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(short2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(short2);
-ushort2 __ovld __cnfn convert_ushort2(short2);
-ushort2 __ovld __cnfn convert_ushort2_sat(short2);
-ushort2 __ovld __cnfn convert_ushort2_rte(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(ushort2);
-ushort2 __ovld __cnfn convert_ushort2(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_sat(ushort2);
-ushort2 __ovld __cnfn convert_ushort2_rte(int2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(int2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(int2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(int2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(int2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(int2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(int2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(int2);
-ushort2 __ovld __cnfn convert_ushort2(int2);
-ushort2 __ovld __cnfn convert_ushort2_sat(int2);
-ushort2 __ovld __cnfn convert_ushort2_rte(uint2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(uint2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(uint2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(uint2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(uint2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(uint2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(uint2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(uint2);
-ushort2 __ovld __cnfn convert_ushort2(uint2);
-ushort2 __ovld __cnfn convert_ushort2_sat(uint2);
-ushort2 __ovld __cnfn convert_ushort2_rte(long2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(long2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(long2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(long2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(long2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(long2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(long2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(long2);
-ushort2 __ovld __cnfn convert_ushort2(long2);
-ushort2 __ovld __cnfn convert_ushort2_sat(long2);
-ushort2 __ovld __cnfn convert_ushort2_rte(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(ulong2);
-ushort2 __ovld __cnfn convert_ushort2(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_sat(ulong2);
-ushort2 __ovld __cnfn convert_ushort2_rte(float2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(float2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(float2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(float2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(float2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(float2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(float2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(float2);
-ushort2 __ovld __cnfn convert_ushort2(float2);
-ushort2 __ovld __cnfn convert_ushort2_sat(float2);
-int2 __ovld __cnfn convert_int2_rte(char2);
-int2 __ovld __cnfn convert_int2_sat_rte(char2);
-int2 __ovld __cnfn convert_int2_rtz(char2);
-int2 __ovld __cnfn convert_int2_sat_rtz(char2);
-int2 __ovld __cnfn convert_int2_rtp(char2);
-int2 __ovld __cnfn convert_int2_sat_rtp(char2);
-int2 __ovld __cnfn convert_int2_rtn(char2);
-int2 __ovld __cnfn convert_int2_sat_rtn(char2);
-int2 __ovld __cnfn convert_int2(char2);
-int2 __ovld __cnfn convert_int2_sat(char2);
-int2 __ovld __cnfn convert_int2_rte(uchar2);
-int2 __ovld __cnfn convert_int2_sat_rte(uchar2);
-int2 __ovld __cnfn convert_int2_rtz(uchar2);
-int2 __ovld __cnfn convert_int2_sat_rtz(uchar2);
-int2 __ovld __cnfn convert_int2_rtp(uchar2);
-int2 __ovld __cnfn convert_int2_sat_rtp(uchar2);
-int2 __ovld __cnfn convert_int2_rtn(uchar2);
-int2 __ovld __cnfn convert_int2_sat_rtn(uchar2);
-int2 __ovld __cnfn convert_int2(uchar2);
-int2 __ovld __cnfn convert_int2_sat(uchar2);
-int2 __ovld __cnfn convert_int2_rte(short2);
-int2 __ovld __cnfn convert_int2_sat_rte(short2);
-int2 __ovld __cnfn convert_int2_rtz(short2);
-int2 __ovld __cnfn convert_int2_sat_rtz(short2);
-int2 __ovld __cnfn convert_int2_rtp(short2);
-int2 __ovld __cnfn convert_int2_sat_rtp(short2);
-int2 __ovld __cnfn convert_int2_rtn(short2);
-int2 __ovld __cnfn convert_int2_sat_rtn(short2);
-int2 __ovld __cnfn convert_int2(short2);
-int2 __ovld __cnfn convert_int2_sat(short2);
-int2 __ovld __cnfn convert_int2_rte(ushort2);
-int2 __ovld __cnfn convert_int2_sat_rte(ushort2);
-int2 __ovld __cnfn convert_int2_rtz(ushort2);
-int2 __ovld __cnfn convert_int2_sat_rtz(ushort2);
-int2 __ovld __cnfn convert_int2_rtp(ushort2);
-int2 __ovld __cnfn convert_int2_sat_rtp(ushort2);
-int2 __ovld __cnfn convert_int2_rtn(ushort2);
-int2 __ovld __cnfn convert_int2_sat_rtn(ushort2);
-int2 __ovld __cnfn convert_int2(ushort2);
-int2 __ovld __cnfn convert_int2_sat(ushort2);
-int2 __ovld __cnfn convert_int2_rte(int2);
-int2 __ovld __cnfn convert_int2_sat_rte(int2);
-int2 __ovld __cnfn convert_int2_rtz(int2);
-int2 __ovld __cnfn convert_int2_sat_rtz(int2);
-int2 __ovld __cnfn convert_int2_rtp(int2);
-int2 __ovld __cnfn convert_int2_sat_rtp(int2);
-int2 __ovld __cnfn convert_int2_rtn(int2);
-int2 __ovld __cnfn convert_int2_sat_rtn(int2);
-int2 __ovld __cnfn convert_int2(int2);
-int2 __ovld __cnfn convert_int2_sat(int2);
-int2 __ovld __cnfn convert_int2_rte(uint2);
-int2 __ovld __cnfn convert_int2_sat_rte(uint2);
-int2 __ovld __cnfn convert_int2_rtz(uint2);
-int2 __ovld __cnfn convert_int2_sat_rtz(uint2);
-int2 __ovld __cnfn convert_int2_rtp(uint2);
-int2 __ovld __cnfn convert_int2_sat_rtp(uint2);
-int2 __ovld __cnfn convert_int2_rtn(uint2);
-int2 __ovld __cnfn convert_int2_sat_rtn(uint2);
-int2 __ovld __cnfn convert_int2(uint2);
-int2 __ovld __cnfn convert_int2_sat(uint2);
-int2 __ovld __cnfn convert_int2_rte(long2);
-int2 __ovld __cnfn convert_int2_sat_rte(long2);
-int2 __ovld __cnfn convert_int2_rtz(long2);
-int2 __ovld __cnfn convert_int2_sat_rtz(long2);
-int2 __ovld __cnfn convert_int2_rtp(long2);
-int2 __ovld __cnfn convert_int2_sat_rtp(long2);
-int2 __ovld __cnfn convert_int2_rtn(long2);
-int2 __ovld __cnfn convert_int2_sat_rtn(long2);
-int2 __ovld __cnfn convert_int2(long2);
-int2 __ovld __cnfn convert_int2_sat(long2);
-int2 __ovld __cnfn convert_int2_rte(ulong2);
-int2 __ovld __cnfn convert_int2_sat_rte(ulong2);
-int2 __ovld __cnfn convert_int2_rtz(ulong2);
-int2 __ovld __cnfn convert_int2_sat_rtz(ulong2);
-int2 __ovld __cnfn convert_int2_rtp(ulong2);
-int2 __ovld __cnfn convert_int2_sat_rtp(ulong2);
-int2 __ovld __cnfn convert_int2_rtn(ulong2);
-int2 __ovld __cnfn convert_int2_sat_rtn(ulong2);
-int2 __ovld __cnfn convert_int2(ulong2);
-int2 __ovld __cnfn convert_int2_sat(ulong2);
-int2 __ovld __cnfn convert_int2_rte(float2);
-int2 __ovld __cnfn convert_int2_sat_rte(float2);
-int2 __ovld __cnfn convert_int2_rtz(float2);
-int2 __ovld __cnfn convert_int2_sat_rtz(float2);
-int2 __ovld __cnfn convert_int2_rtp(float2);
-int2 __ovld __cnfn convert_int2_sat_rtp(float2);
-int2 __ovld __cnfn convert_int2_rtn(float2);
-int2 __ovld __cnfn convert_int2_sat_rtn(float2);
-int2 __ovld __cnfn convert_int2(float2);
-int2 __ovld __cnfn convert_int2_sat(float2);
-uint2 __ovld __cnfn convert_uint2_rte(char2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(char2);
-uint2 __ovld __cnfn convert_uint2_rtz(char2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(char2);
-uint2 __ovld __cnfn convert_uint2_rtp(char2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(char2);
-uint2 __ovld __cnfn convert_uint2_rtn(char2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(char2);
-uint2 __ovld __cnfn convert_uint2(char2);
-uint2 __ovld __cnfn convert_uint2_sat(char2);
-uint2 __ovld __cnfn convert_uint2_rte(uchar2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(uchar2);
-uint2 __ovld __cnfn convert_uint2_rtz(uchar2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(uchar2);
-uint2 __ovld __cnfn convert_uint2_rtp(uchar2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(uchar2);
-uint2 __ovld __cnfn convert_uint2_rtn(uchar2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(uchar2);
-uint2 __ovld __cnfn convert_uint2(uchar2);
-uint2 __ovld __cnfn convert_uint2_sat(uchar2);
-uint2 __ovld __cnfn convert_uint2_rte(short2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(short2);
-uint2 __ovld __cnfn convert_uint2_rtz(short2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(short2);
-uint2 __ovld __cnfn convert_uint2_rtp(short2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(short2);
-uint2 __ovld __cnfn convert_uint2_rtn(short2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(short2);
-uint2 __ovld __cnfn convert_uint2(short2);
-uint2 __ovld __cnfn convert_uint2_sat(short2);
-uint2 __ovld __cnfn convert_uint2_rte(ushort2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(ushort2);
-uint2 __ovld __cnfn convert_uint2_rtz(ushort2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(ushort2);
-uint2 __ovld __cnfn convert_uint2_rtp(ushort2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(ushort2);
-uint2 __ovld __cnfn convert_uint2_rtn(ushort2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(ushort2);
-uint2 __ovld __cnfn convert_uint2(ushort2);
-uint2 __ovld __cnfn convert_uint2_sat(ushort2);
-uint2 __ovld __cnfn convert_uint2_rte(int2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(int2);
-uint2 __ovld __cnfn convert_uint2_rtz(int2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(int2);
-uint2 __ovld __cnfn convert_uint2_rtp(int2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(int2);
-uint2 __ovld __cnfn convert_uint2_rtn(int2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(int2);
-uint2 __ovld __cnfn convert_uint2(int2);
-uint2 __ovld __cnfn convert_uint2_sat(int2);
-uint2 __ovld __cnfn convert_uint2_rte(uint2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(uint2);
-uint2 __ovld __cnfn convert_uint2_rtz(uint2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(uint2);
-uint2 __ovld __cnfn convert_uint2_rtp(uint2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(uint2);
-uint2 __ovld __cnfn convert_uint2_rtn(uint2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(uint2);
-uint2 __ovld __cnfn convert_uint2(uint2);
-uint2 __ovld __cnfn convert_uint2_sat(uint2);
-uint2 __ovld __cnfn convert_uint2_rte(long2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(long2);
-uint2 __ovld __cnfn convert_uint2_rtz(long2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(long2);
-uint2 __ovld __cnfn convert_uint2_rtp(long2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(long2);
-uint2 __ovld __cnfn convert_uint2_rtn(long2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(long2);
-uint2 __ovld __cnfn convert_uint2(long2);
-uint2 __ovld __cnfn convert_uint2_sat(long2);
-uint2 __ovld __cnfn convert_uint2_rte(ulong2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(ulong2);
-uint2 __ovld __cnfn convert_uint2_rtz(ulong2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(ulong2);
-uint2 __ovld __cnfn convert_uint2_rtp(ulong2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(ulong2);
-uint2 __ovld __cnfn convert_uint2_rtn(ulong2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(ulong2);
-uint2 __ovld __cnfn convert_uint2(ulong2);
-uint2 __ovld __cnfn convert_uint2_sat(ulong2);
-uint2 __ovld __cnfn convert_uint2_rte(float2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(float2);
-uint2 __ovld __cnfn convert_uint2_rtz(float2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(float2);
-uint2 __ovld __cnfn convert_uint2_rtp(float2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(float2);
-uint2 __ovld __cnfn convert_uint2_rtn(float2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(float2);
-uint2 __ovld __cnfn convert_uint2(float2);
-uint2 __ovld __cnfn convert_uint2_sat(float2);
-long2 __ovld __cnfn convert_long2_rte(char2);
-long2 __ovld __cnfn convert_long2_sat_rte(char2);
-long2 __ovld __cnfn convert_long2_rtz(char2);
-long2 __ovld __cnfn convert_long2_sat_rtz(char2);
-long2 __ovld __cnfn convert_long2_rtp(char2);
-long2 __ovld __cnfn convert_long2_sat_rtp(char2);
-long2 __ovld __cnfn convert_long2_rtn(char2);
-long2 __ovld __cnfn convert_long2_sat_rtn(char2);
-long2 __ovld __cnfn convert_long2(char2);
-long2 __ovld __cnfn convert_long2_sat(char2);
-long2 __ovld __cnfn convert_long2_rte(uchar2);
-long2 __ovld __cnfn convert_long2_sat_rte(uchar2);
-long2 __ovld __cnfn convert_long2_rtz(uchar2);
-long2 __ovld __cnfn convert_long2_sat_rtz(uchar2);
-long2 __ovld __cnfn convert_long2_rtp(uchar2);
-long2 __ovld __cnfn convert_long2_sat_rtp(uchar2);
-long2 __ovld __cnfn convert_long2_rtn(uchar2);
-long2 __ovld __cnfn convert_long2_sat_rtn(uchar2);
-long2 __ovld __cnfn convert_long2(uchar2);
-long2 __ovld __cnfn convert_long2_sat(uchar2);
-long2 __ovld __cnfn convert_long2_rte(short2);
-long2 __ovld __cnfn convert_long2_sat_rte(short2);
-long2 __ovld __cnfn convert_long2_rtz(short2);
-long2 __ovld __cnfn convert_long2_sat_rtz(short2);
-long2 __ovld __cnfn convert_long2_rtp(short2);
-long2 __ovld __cnfn convert_long2_sat_rtp(short2);
-long2 __ovld __cnfn convert_long2_rtn(short2);
-long2 __ovld __cnfn convert_long2_sat_rtn(short2);
-long2 __ovld __cnfn convert_long2(short2);
-long2 __ovld __cnfn convert_long2_sat(short2);
-long2 __ovld __cnfn convert_long2_rte(ushort2);
-long2 __ovld __cnfn convert_long2_sat_rte(ushort2);
-long2 __ovld __cnfn convert_long2_rtz(ushort2);
-long2 __ovld __cnfn convert_long2_sat_rtz(ushort2);
-long2 __ovld __cnfn convert_long2_rtp(ushort2);
-long2 __ovld __cnfn convert_long2_sat_rtp(ushort2);
-long2 __ovld __cnfn convert_long2_rtn(ushort2);
-long2 __ovld __cnfn convert_long2_sat_rtn(ushort2);
-long2 __ovld __cnfn convert_long2(ushort2);
-long2 __ovld __cnfn convert_long2_sat(ushort2);
-long2 __ovld __cnfn convert_long2_rte(int2);
-long2 __ovld __cnfn convert_long2_sat_rte(int2);
-long2 __ovld __cnfn convert_long2_rtz(int2);
-long2 __ovld __cnfn convert_long2_sat_rtz(int2);
-long2 __ovld __cnfn convert_long2_rtp(int2);
-long2 __ovld __cnfn convert_long2_sat_rtp(int2);
-long2 __ovld __cnfn convert_long2_rtn(int2);
-long2 __ovld __cnfn convert_long2_sat_rtn(int2);
-long2 __ovld __cnfn convert_long2(int2);
-long2 __ovld __cnfn convert_long2_sat(int2);
-long2 __ovld __cnfn convert_long2_rte(uint2);
-long2 __ovld __cnfn convert_long2_sat_rte(uint2);
-long2 __ovld __cnfn convert_long2_rtz(uint2);
-long2 __ovld __cnfn convert_long2_sat_rtz(uint2);
-long2 __ovld __cnfn convert_long2_rtp(uint2);
-long2 __ovld __cnfn convert_long2_sat_rtp(uint2);
-long2 __ovld __cnfn convert_long2_rtn(uint2);
-long2 __ovld __cnfn convert_long2_sat_rtn(uint2);
-long2 __ovld __cnfn convert_long2(uint2);
-long2 __ovld __cnfn convert_long2_sat(uint2);
-long2 __ovld __cnfn convert_long2_rte(long2);
-long2 __ovld __cnfn convert_long2_sat_rte(long2);
-long2 __ovld __cnfn convert_long2_rtz(long2);
-long2 __ovld __cnfn convert_long2_sat_rtz(long2);
-long2 __ovld __cnfn convert_long2_rtp(long2);
-long2 __ovld __cnfn convert_long2_sat_rtp(long2);
-long2 __ovld __cnfn convert_long2_rtn(long2);
-long2 __ovld __cnfn convert_long2_sat_rtn(long2);
-long2 __ovld __cnfn convert_long2(long2);
-long2 __ovld __cnfn convert_long2_sat(long2);
-long2 __ovld __cnfn convert_long2_rte(ulong2);
-long2 __ovld __cnfn convert_long2_sat_rte(ulong2);
-long2 __ovld __cnfn convert_long2_rtz(ulong2);
-long2 __ovld __cnfn convert_long2_sat_rtz(ulong2);
-long2 __ovld __cnfn convert_long2_rtp(ulong2);
-long2 __ovld __cnfn convert_long2_sat_rtp(ulong2);
-long2 __ovld __cnfn convert_long2_rtn(ulong2);
-long2 __ovld __cnfn convert_long2_sat_rtn(ulong2);
-long2 __ovld __cnfn convert_long2(ulong2);
-long2 __ovld __cnfn convert_long2_sat(ulong2);
-long2 __ovld __cnfn convert_long2_rte(float2);
-long2 __ovld __cnfn convert_long2_sat_rte(float2);
-long2 __ovld __cnfn convert_long2_rtz(float2);
-long2 __ovld __cnfn convert_long2_sat_rtz(float2);
-long2 __ovld __cnfn convert_long2_rtp(float2);
-long2 __ovld __cnfn convert_long2_sat_rtp(float2);
-long2 __ovld __cnfn convert_long2_rtn(float2);
-long2 __ovld __cnfn convert_long2_sat_rtn(float2);
-long2 __ovld __cnfn convert_long2(float2);
-long2 __ovld __cnfn convert_long2_sat(float2);
-ulong2 __ovld __cnfn convert_ulong2_rte(char2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(char2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(char2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(char2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(char2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(char2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(char2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(char2);
-ulong2 __ovld __cnfn convert_ulong2(char2);
-ulong2 __ovld __cnfn convert_ulong2_sat(char2);
-ulong2 __ovld __cnfn convert_ulong2_rte(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(uchar2);
-ulong2 __ovld __cnfn convert_ulong2(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_sat(uchar2);
-ulong2 __ovld __cnfn convert_ulong2_rte(short2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(short2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(short2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(short2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(short2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(short2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(short2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(short2);
-ulong2 __ovld __cnfn convert_ulong2(short2);
-ulong2 __ovld __cnfn convert_ulong2_sat(short2);
-ulong2 __ovld __cnfn convert_ulong2_rte(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(ushort2);
-ulong2 __ovld __cnfn convert_ulong2(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_sat(ushort2);
-ulong2 __ovld __cnfn convert_ulong2_rte(int2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(int2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(int2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(int2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(int2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(int2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(int2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(int2);
-ulong2 __ovld __cnfn convert_ulong2(int2);
-ulong2 __ovld __cnfn convert_ulong2_sat(int2);
-ulong2 __ovld __cnfn convert_ulong2_rte(uint2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(uint2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(uint2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(uint2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(uint2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(uint2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(uint2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(uint2);
-ulong2 __ovld __cnfn convert_ulong2(uint2);
-ulong2 __ovld __cnfn convert_ulong2_sat(uint2);
-ulong2 __ovld __cnfn convert_ulong2_rte(long2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(long2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(long2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(long2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(long2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(long2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(long2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(long2);
-ulong2 __ovld __cnfn convert_ulong2(long2);
-ulong2 __ovld __cnfn convert_ulong2_sat(long2);
-ulong2 __ovld __cnfn convert_ulong2_rte(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(ulong2);
-ulong2 __ovld __cnfn convert_ulong2(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_sat(ulong2);
-ulong2 __ovld __cnfn convert_ulong2_rte(float2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(float2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(float2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(float2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(float2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(float2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(float2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(float2);
-ulong2 __ovld __cnfn convert_ulong2(float2);
-ulong2 __ovld __cnfn convert_ulong2_sat(float2);
-float2 __ovld __cnfn convert_float2_rte(char2);
-float2 __ovld __cnfn convert_float2_rtz(char2);
-float2 __ovld __cnfn convert_float2_rtp(char2);
-float2 __ovld __cnfn convert_float2_rtn(char2);
-float2 __ovld __cnfn convert_float2(char2);
-float2 __ovld __cnfn convert_float2_rte(uchar2);
-float2 __ovld __cnfn convert_float2_rtz(uchar2);
-float2 __ovld __cnfn convert_float2_rtp(uchar2);
-float2 __ovld __cnfn convert_float2_rtn(uchar2);
-float2 __ovld __cnfn convert_float2(uchar2);
-float2 __ovld __cnfn convert_float2_rte(short2);
-float2 __ovld __cnfn convert_float2_rtz(short2);
-float2 __ovld __cnfn convert_float2_rtp(short2);
-float2 __ovld __cnfn convert_float2_rtn(short2);
-float2 __ovld __cnfn convert_float2(short2);
-float2 __ovld __cnfn convert_float2_rte(ushort2);
-float2 __ovld __cnfn convert_float2_rtz(ushort2);
-float2 __ovld __cnfn convert_float2_rtp(ushort2);
-float2 __ovld __cnfn convert_float2_rtn(ushort2);
-float2 __ovld __cnfn convert_float2(ushort2);
-float2 __ovld __cnfn convert_float2_rte(int2);
-float2 __ovld __cnfn convert_float2_rtz(int2);
-float2 __ovld __cnfn convert_float2_rtp(int2);
-float2 __ovld __cnfn convert_float2_rtn(int2);
-float2 __ovld __cnfn convert_float2(int2);
-float2 __ovld __cnfn convert_float2_rte(uint2);
-float2 __ovld __cnfn convert_float2_rtz(uint2);
-float2 __ovld __cnfn convert_float2_rtp(uint2);
-float2 __ovld __cnfn convert_float2_rtn(uint2);
-float2 __ovld __cnfn convert_float2(uint2);
-float2 __ovld __cnfn convert_float2_rte(long2);
-float2 __ovld __cnfn convert_float2_rtz(long2);
-float2 __ovld __cnfn convert_float2_rtp(long2);
-float2 __ovld __cnfn convert_float2_rtn(long2);
-float2 __ovld __cnfn convert_float2(long2);
-float2 __ovld __cnfn convert_float2_rte(ulong2);
-float2 __ovld __cnfn convert_float2_rtz(ulong2);
-float2 __ovld __cnfn convert_float2_rtp(ulong2);
-float2 __ovld __cnfn convert_float2_rtn(ulong2);
-float2 __ovld __cnfn convert_float2(ulong2);
-float2 __ovld __cnfn convert_float2_rte(float2);
-float2 __ovld __cnfn convert_float2_rtz(float2);
-float2 __ovld __cnfn convert_float2_rtp(float2);
-float2 __ovld __cnfn convert_float2_rtn(float2);
-float2 __ovld __cnfn convert_float2(float2);
-char3 __ovld __cnfn convert_char3_rte(char3);
-char3 __ovld __cnfn convert_char3_sat_rte(char3);
-char3 __ovld __cnfn convert_char3_rtz(char3);
-char3 __ovld __cnfn convert_char3_sat_rtz(char3);
-char3 __ovld __cnfn convert_char3_rtp(char3);
-char3 __ovld __cnfn convert_char3_sat_rtp(char3);
-char3 __ovld __cnfn convert_char3_rtn(char3);
-char3 __ovld __cnfn convert_char3_sat_rtn(char3);
-char3 __ovld __cnfn convert_char3(char3);
-char3 __ovld __cnfn convert_char3_sat(char3);
-char3 __ovld __cnfn convert_char3_rte(uchar3);
-char3 __ovld __cnfn convert_char3_sat_rte(uchar3);
-char3 __ovld __cnfn convert_char3_rtz(uchar3);
-char3 __ovld __cnfn convert_char3_sat_rtz(uchar3);
-char3 __ovld __cnfn convert_char3_rtp(uchar3);
-char3 __ovld __cnfn convert_char3_sat_rtp(uchar3);
-char3 __ovld __cnfn convert_char3_rtn(uchar3);
-char3 __ovld __cnfn convert_char3_sat_rtn(uchar3);
-char3 __ovld __cnfn convert_char3(uchar3);
-char3 __ovld __cnfn convert_char3_sat(uchar3);
-char3 __ovld __cnfn convert_char3_rte(short3);
-char3 __ovld __cnfn convert_char3_sat_rte(short3);
-char3 __ovld __cnfn convert_char3_rtz(short3);
-char3 __ovld __cnfn convert_char3_sat_rtz(short3);
-char3 __ovld __cnfn convert_char3_rtp(short3);
-char3 __ovld __cnfn convert_char3_sat_rtp(short3);
-char3 __ovld __cnfn convert_char3_rtn(short3);
-char3 __ovld __cnfn convert_char3_sat_rtn(short3);
-char3 __ovld __cnfn convert_char3(short3);
-char3 __ovld __cnfn convert_char3_sat(short3);
-char3 __ovld __cnfn convert_char3_rte(ushort3);
-char3 __ovld __cnfn convert_char3_sat_rte(ushort3);
-char3 __ovld __cnfn convert_char3_rtz(ushort3);
-char3 __ovld __cnfn convert_char3_sat_rtz(ushort3);
-char3 __ovld __cnfn convert_char3_rtp(ushort3);
-char3 __ovld __cnfn convert_char3_sat_rtp(ushort3);
-char3 __ovld __cnfn convert_char3_rtn(ushort3);
-char3 __ovld __cnfn convert_char3_sat_rtn(ushort3);
-char3 __ovld __cnfn convert_char3(ushort3);
-char3 __ovld __cnfn convert_char3_sat(ushort3);
-char3 __ovld __cnfn convert_char3_rte(int3);
-char3 __ovld __cnfn convert_char3_sat_rte(int3);
-char3 __ovld __cnfn convert_char3_rtz(int3);
-char3 __ovld __cnfn convert_char3_sat_rtz(int3);
-char3 __ovld __cnfn convert_char3_rtp(int3);
-char3 __ovld __cnfn convert_char3_sat_rtp(int3);
-char3 __ovld __cnfn convert_char3_rtn(int3);
-char3 __ovld __cnfn convert_char3_sat_rtn(int3);
-char3 __ovld __cnfn convert_char3(int3);
-char3 __ovld __cnfn convert_char3_sat(int3);
-char3 __ovld __cnfn convert_char3_rte(uint3);
-char3 __ovld __cnfn convert_char3_sat_rte(uint3);
-char3 __ovld __cnfn convert_char3_rtz(uint3);
-char3 __ovld __cnfn convert_char3_sat_rtz(uint3);
-char3 __ovld __cnfn convert_char3_rtp(uint3);
-char3 __ovld __cnfn convert_char3_sat_rtp(uint3);
-char3 __ovld __cnfn convert_char3_rtn(uint3);
-char3 __ovld __cnfn convert_char3_sat_rtn(uint3);
-char3 __ovld __cnfn convert_char3(uint3);
-char3 __ovld __cnfn convert_char3_sat(uint3);
-char3 __ovld __cnfn convert_char3_rte(long3);
-char3 __ovld __cnfn convert_char3_sat_rte(long3);
-char3 __ovld __cnfn convert_char3_rtz(long3);
-char3 __ovld __cnfn convert_char3_sat_rtz(long3);
-char3 __ovld __cnfn convert_char3_rtp(long3);
-char3 __ovld __cnfn convert_char3_sat_rtp(long3);
-char3 __ovld __cnfn convert_char3_rtn(long3);
-char3 __ovld __cnfn convert_char3_sat_rtn(long3);
-char3 __ovld __cnfn convert_char3(long3);
-char3 __ovld __cnfn convert_char3_sat(long3);
-char3 __ovld __cnfn convert_char3_rte(ulong3);
-char3 __ovld __cnfn convert_char3_sat_rte(ulong3);
-char3 __ovld __cnfn convert_char3_rtz(ulong3);
-char3 __ovld __cnfn convert_char3_sat_rtz(ulong3);
-char3 __ovld __cnfn convert_char3_rtp(ulong3);
-char3 __ovld __cnfn convert_char3_sat_rtp(ulong3);
-char3 __ovld __cnfn convert_char3_rtn(ulong3);
-char3 __ovld __cnfn convert_char3_sat_rtn(ulong3);
-char3 __ovld __cnfn convert_char3(ulong3);
-char3 __ovld __cnfn convert_char3_sat(ulong3);
-char3 __ovld __cnfn convert_char3_rte(float3);
-char3 __ovld __cnfn convert_char3_sat_rte(float3);
-char3 __ovld __cnfn convert_char3_rtz(float3);
-char3 __ovld __cnfn convert_char3_sat_rtz(float3);
-char3 __ovld __cnfn convert_char3_rtp(float3);
-char3 __ovld __cnfn convert_char3_sat_rtp(float3);
-char3 __ovld __cnfn convert_char3_rtn(float3);
-char3 __ovld __cnfn convert_char3_sat_rtn(float3);
-char3 __ovld __cnfn convert_char3(float3);
-char3 __ovld __cnfn convert_char3_sat(float3);
-uchar3 __ovld __cnfn convert_uchar3_rte(char3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(char3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(char3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(char3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(char3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(char3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(char3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(char3);
-uchar3 __ovld __cnfn convert_uchar3(char3);
-uchar3 __ovld __cnfn convert_uchar3_sat(char3);
-uchar3 __ovld __cnfn convert_uchar3_rte(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(uchar3);
-uchar3 __ovld __cnfn convert_uchar3(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_sat(uchar3);
-uchar3 __ovld __cnfn convert_uchar3_rte(short3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(short3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(short3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(short3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(short3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(short3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(short3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(short3);
-uchar3 __ovld __cnfn convert_uchar3(short3);
-uchar3 __ovld __cnfn convert_uchar3_sat(short3);
-uchar3 __ovld __cnfn convert_uchar3_rte(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(ushort3);
-uchar3 __ovld __cnfn convert_uchar3(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_sat(ushort3);
-uchar3 __ovld __cnfn convert_uchar3_rte(int3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(int3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(int3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(int3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(int3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(int3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(int3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(int3);
-uchar3 __ovld __cnfn convert_uchar3(int3);
-uchar3 __ovld __cnfn convert_uchar3_sat(int3);
-uchar3 __ovld __cnfn convert_uchar3_rte(uint3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(uint3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(uint3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(uint3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(uint3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(uint3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(uint3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(uint3);
-uchar3 __ovld __cnfn convert_uchar3(uint3);
-uchar3 __ovld __cnfn convert_uchar3_sat(uint3);
-uchar3 __ovld __cnfn convert_uchar3_rte(long3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(long3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(long3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(long3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(long3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(long3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(long3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(long3);
-uchar3 __ovld __cnfn convert_uchar3(long3);
-uchar3 __ovld __cnfn convert_uchar3_sat(long3);
-uchar3 __ovld __cnfn convert_uchar3_rte(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(ulong3);
-uchar3 __ovld __cnfn convert_uchar3(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_sat(ulong3);
-uchar3 __ovld __cnfn convert_uchar3_rte(float3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(float3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(float3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(float3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(float3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(float3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(float3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(float3);
-uchar3 __ovld __cnfn convert_uchar3(float3);
-uchar3 __ovld __cnfn convert_uchar3_sat(float3);
-short3 __ovld __cnfn convert_short3_rte(char3);
-short3 __ovld __cnfn convert_short3_sat_rte(char3);
-short3 __ovld __cnfn convert_short3_rtz(char3);
-short3 __ovld __cnfn convert_short3_sat_rtz(char3);
-short3 __ovld __cnfn convert_short3_rtp(char3);
-short3 __ovld __cnfn convert_short3_sat_rtp(char3);
-short3 __ovld __cnfn convert_short3_rtn(char3);
-short3 __ovld __cnfn convert_short3_sat_rtn(char3);
-short3 __ovld __cnfn convert_short3(char3);
-short3 __ovld __cnfn convert_short3_sat(char3);
-short3 __ovld __cnfn convert_short3_rte(uchar3);
-short3 __ovld __cnfn convert_short3_sat_rte(uchar3);
-short3 __ovld __cnfn convert_short3_rtz(uchar3);
-short3 __ovld __cnfn convert_short3_sat_rtz(uchar3);
-short3 __ovld __cnfn convert_short3_rtp(uchar3);
-short3 __ovld __cnfn convert_short3_sat_rtp(uchar3);
-short3 __ovld __cnfn convert_short3_rtn(uchar3);
-short3 __ovld __cnfn convert_short3_sat_rtn(uchar3);
-short3 __ovld __cnfn convert_short3(uchar3);
-short3 __ovld __cnfn convert_short3_sat(uchar3);
-short3 __ovld __cnfn convert_short3_rte(short3);
-short3 __ovld __cnfn convert_short3_sat_rte(short3);
-short3 __ovld __cnfn convert_short3_rtz(short3);
-short3 __ovld __cnfn convert_short3_sat_rtz(short3);
-short3 __ovld __cnfn convert_short3_rtp(short3);
-short3 __ovld __cnfn convert_short3_sat_rtp(short3);
-short3 __ovld __cnfn convert_short3_rtn(short3);
-short3 __ovld __cnfn convert_short3_sat_rtn(short3);
-short3 __ovld __cnfn convert_short3(short3);
-short3 __ovld __cnfn convert_short3_sat(short3);
-short3 __ovld __cnfn convert_short3_rte(ushort3);
-short3 __ovld __cnfn convert_short3_sat_rte(ushort3);
-short3 __ovld __cnfn convert_short3_rtz(ushort3);
-short3 __ovld __cnfn convert_short3_sat_rtz(ushort3);
-short3 __ovld __cnfn convert_short3_rtp(ushort3);
-short3 __ovld __cnfn convert_short3_sat_rtp(ushort3);
-short3 __ovld __cnfn convert_short3_rtn(ushort3);
-short3 __ovld __cnfn convert_short3_sat_rtn(ushort3);
-short3 __ovld __cnfn convert_short3(ushort3);
-short3 __ovld __cnfn convert_short3_sat(ushort3);
-short3 __ovld __cnfn convert_short3_rte(int3);
-short3 __ovld __cnfn convert_short3_sat_rte(int3);
-short3 __ovld __cnfn convert_short3_rtz(int3);
-short3 __ovld __cnfn convert_short3_sat_rtz(int3);
-short3 __ovld __cnfn convert_short3_rtp(int3);
-short3 __ovld __cnfn convert_short3_sat_rtp(int3);
-short3 __ovld __cnfn convert_short3_rtn(int3);
-short3 __ovld __cnfn convert_short3_sat_rtn(int3);
-short3 __ovld __cnfn convert_short3(int3);
-short3 __ovld __cnfn convert_short3_sat(int3);
-short3 __ovld __cnfn convert_short3_rte(uint3);
-short3 __ovld __cnfn convert_short3_sat_rte(uint3);
-short3 __ovld __cnfn convert_short3_rtz(uint3);
-short3 __ovld __cnfn convert_short3_sat_rtz(uint3);
-short3 __ovld __cnfn convert_short3_rtp(uint3);
-short3 __ovld __cnfn convert_short3_sat_rtp(uint3);
-short3 __ovld __cnfn convert_short3_rtn(uint3);
-short3 __ovld __cnfn convert_short3_sat_rtn(uint3);
-short3 __ovld __cnfn convert_short3(uint3);
-short3 __ovld __cnfn convert_short3_sat(uint3);
-short3 __ovld __cnfn convert_short3_rte(long3);
-short3 __ovld __cnfn convert_short3_sat_rte(long3);
-short3 __ovld __cnfn convert_short3_rtz(long3);
-short3 __ovld __cnfn convert_short3_sat_rtz(long3);
-short3 __ovld __cnfn convert_short3_rtp(long3);
-short3 __ovld __cnfn convert_short3_sat_rtp(long3);
-short3 __ovld __cnfn convert_short3_rtn(long3);
-short3 __ovld __cnfn convert_short3_sat_rtn(long3);
-short3 __ovld __cnfn convert_short3(long3);
-short3 __ovld __cnfn convert_short3_sat(long3);
-short3 __ovld __cnfn convert_short3_rte(ulong3);
-short3 __ovld __cnfn convert_short3_sat_rte(ulong3);
-short3 __ovld __cnfn convert_short3_rtz(ulong3);
-short3 __ovld __cnfn convert_short3_sat_rtz(ulong3);
-short3 __ovld __cnfn convert_short3_rtp(ulong3);
-short3 __ovld __cnfn convert_short3_sat_rtp(ulong3);
-short3 __ovld __cnfn convert_short3_rtn(ulong3);
-short3 __ovld __cnfn convert_short3_sat_rtn(ulong3);
-short3 __ovld __cnfn convert_short3(ulong3);
-short3 __ovld __cnfn convert_short3_sat(ulong3);
-short3 __ovld __cnfn convert_short3_rte(float3);
-short3 __ovld __cnfn convert_short3_sat_rte(float3);
-short3 __ovld __cnfn convert_short3_rtz(float3);
-short3 __ovld __cnfn convert_short3_sat_rtz(float3);
-short3 __ovld __cnfn convert_short3_rtp(float3);
-short3 __ovld __cnfn convert_short3_sat_rtp(float3);
-short3 __ovld __cnfn convert_short3_rtn(float3);
-short3 __ovld __cnfn convert_short3_sat_rtn(float3);
-short3 __ovld __cnfn convert_short3(float3);
-short3 __ovld __cnfn convert_short3_sat(float3);
-ushort3 __ovld __cnfn convert_ushort3_rte(char3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(char3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(char3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(char3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(char3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(char3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(char3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(char3);
-ushort3 __ovld __cnfn convert_ushort3(char3);
-ushort3 __ovld __cnfn convert_ushort3_sat(char3);
-ushort3 __ovld __cnfn convert_ushort3_rte(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(uchar3);
-ushort3 __ovld __cnfn convert_ushort3(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_sat(uchar3);
-ushort3 __ovld __cnfn convert_ushort3_rte(short3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(short3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(short3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(short3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(short3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(short3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(short3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(short3);
-ushort3 __ovld __cnfn convert_ushort3(short3);
-ushort3 __ovld __cnfn convert_ushort3_sat(short3);
-ushort3 __ovld __cnfn convert_ushort3_rte(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(ushort3);
-ushort3 __ovld __cnfn convert_ushort3(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_sat(ushort3);
-ushort3 __ovld __cnfn convert_ushort3_rte(int3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(int3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(int3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(int3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(int3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(int3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(int3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(int3);
-ushort3 __ovld __cnfn convert_ushort3(int3);
-ushort3 __ovld __cnfn convert_ushort3_sat(int3);
-ushort3 __ovld __cnfn convert_ushort3_rte(uint3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(uint3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(uint3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(uint3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(uint3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(uint3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(uint3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(uint3);
-ushort3 __ovld __cnfn convert_ushort3(uint3);
-ushort3 __ovld __cnfn convert_ushort3_sat(uint3);
-ushort3 __ovld __cnfn convert_ushort3_rte(long3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(long3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(long3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(long3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(long3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(long3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(long3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(long3);
-ushort3 __ovld __cnfn convert_ushort3(long3);
-ushort3 __ovld __cnfn convert_ushort3_sat(long3);
-ushort3 __ovld __cnfn convert_ushort3_rte(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(ulong3);
-ushort3 __ovld __cnfn convert_ushort3(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_sat(ulong3);
-ushort3 __ovld __cnfn convert_ushort3_rte(float3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(float3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(float3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(float3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(float3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(float3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(float3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(float3);
-ushort3 __ovld __cnfn convert_ushort3(float3);
-ushort3 __ovld __cnfn convert_ushort3_sat(float3);
-int3 __ovld __cnfn convert_int3_rte(char3);
-int3 __ovld __cnfn convert_int3_sat_rte(char3);
-int3 __ovld __cnfn convert_int3_rtz(char3);
-int3 __ovld __cnfn convert_int3_sat_rtz(char3);
-int3 __ovld __cnfn convert_int3_rtp(char3);
-int3 __ovld __cnfn convert_int3_sat_rtp(char3);
-int3 __ovld __cnfn convert_int3_rtn(char3);
-int3 __ovld __cnfn convert_int3_sat_rtn(char3);
-int3 __ovld __cnfn convert_int3(char3);
-int3 __ovld __cnfn convert_int3_sat(char3);
-int3 __ovld __cnfn convert_int3_rte(uchar3);
-int3 __ovld __cnfn convert_int3_sat_rte(uchar3);
-int3 __ovld __cnfn convert_int3_rtz(uchar3);
-int3 __ovld __cnfn convert_int3_sat_rtz(uchar3);
-int3 __ovld __cnfn convert_int3_rtp(uchar3);
-int3 __ovld __cnfn convert_int3_sat_rtp(uchar3);
-int3 __ovld __cnfn convert_int3_rtn(uchar3);
-int3 __ovld __cnfn convert_int3_sat_rtn(uchar3);
-int3 __ovld __cnfn convert_int3(uchar3);
-int3 __ovld __cnfn convert_int3_sat(uchar3);
-int3 __ovld __cnfn convert_int3_rte(short3);
-int3 __ovld __cnfn convert_int3_sat_rte(short3);
-int3 __ovld __cnfn convert_int3_rtz(short3);
-int3 __ovld __cnfn convert_int3_sat_rtz(short3);
-int3 __ovld __cnfn convert_int3_rtp(short3);
-int3 __ovld __cnfn convert_int3_sat_rtp(short3);
-int3 __ovld __cnfn convert_int3_rtn(short3);
-int3 __ovld __cnfn convert_int3_sat_rtn(short3);
-int3 __ovld __cnfn convert_int3(short3);
-int3 __ovld __cnfn convert_int3_sat(short3);
-int3 __ovld __cnfn convert_int3_rte(ushort3);
-int3 __ovld __cnfn convert_int3_sat_rte(ushort3);
-int3 __ovld __cnfn convert_int3_rtz(ushort3);
-int3 __ovld __cnfn convert_int3_sat_rtz(ushort3);
-int3 __ovld __cnfn convert_int3_rtp(ushort3);
-int3 __ovld __cnfn convert_int3_sat_rtp(ushort3);
-int3 __ovld __cnfn convert_int3_rtn(ushort3);
-int3 __ovld __cnfn convert_int3_sat_rtn(ushort3);
-int3 __ovld __cnfn convert_int3(ushort3);
-int3 __ovld __cnfn convert_int3_sat(ushort3);
-int3 __ovld __cnfn convert_int3_rte(int3);
-int3 __ovld __cnfn convert_int3_sat_rte(int3);
-int3 __ovld __cnfn convert_int3_rtz(int3);
-int3 __ovld __cnfn convert_int3_sat_rtz(int3);
-int3 __ovld __cnfn convert_int3_rtp(int3);
-int3 __ovld __cnfn convert_int3_sat_rtp(int3);
-int3 __ovld __cnfn convert_int3_rtn(int3);
-int3 __ovld __cnfn convert_int3_sat_rtn(int3);
-int3 __ovld __cnfn convert_int3(int3);
-int3 __ovld __cnfn convert_int3_sat(int3);
-int3 __ovld __cnfn convert_int3_rte(uint3);
-int3 __ovld __cnfn convert_int3_sat_rte(uint3);
-int3 __ovld __cnfn convert_int3_rtz(uint3);
-int3 __ovld __cnfn convert_int3_sat_rtz(uint3);
-int3 __ovld __cnfn convert_int3_rtp(uint3);
-int3 __ovld __cnfn convert_int3_sat_rtp(uint3);
-int3 __ovld __cnfn convert_int3_rtn(uint3);
-int3 __ovld __cnfn convert_int3_sat_rtn(uint3);
-int3 __ovld __cnfn convert_int3(uint3);
-int3 __ovld __cnfn convert_int3_sat(uint3);
-int3 __ovld __cnfn convert_int3_rte(long3);
-int3 __ovld __cnfn convert_int3_sat_rte(long3);
-int3 __ovld __cnfn convert_int3_rtz(long3);
-int3 __ovld __cnfn convert_int3_sat_rtz(long3);
-int3 __ovld __cnfn convert_int3_rtp(long3);
-int3 __ovld __cnfn convert_int3_sat_rtp(long3);
-int3 __ovld __cnfn convert_int3_rtn(long3);
-int3 __ovld __cnfn convert_int3_sat_rtn(long3);
-int3 __ovld __cnfn convert_int3(long3);
-int3 __ovld __cnfn convert_int3_sat(long3);
-int3 __ovld __cnfn convert_int3_rte(ulong3);
-int3 __ovld __cnfn convert_int3_sat_rte(ulong3);
-int3 __ovld __cnfn convert_int3_rtz(ulong3);
-int3 __ovld __cnfn convert_int3_sat_rtz(ulong3);
-int3 __ovld __cnfn convert_int3_rtp(ulong3);
-int3 __ovld __cnfn convert_int3_sat_rtp(ulong3);
-int3 __ovld __cnfn convert_int3_rtn(ulong3);
-int3 __ovld __cnfn convert_int3_sat_rtn(ulong3);
-int3 __ovld __cnfn convert_int3(ulong3);
-int3 __ovld __cnfn convert_int3_sat(ulong3);
-int3 __ovld __cnfn convert_int3_rte(float3);
-int3 __ovld __cnfn convert_int3_sat_rte(float3);
-int3 __ovld __cnfn convert_int3_rtz(float3);
-int3 __ovld __cnfn convert_int3_sat_rtz(float3);
-int3 __ovld __cnfn convert_int3_rtp(float3);
-int3 __ovld __cnfn convert_int3_sat_rtp(float3);
-int3 __ovld __cnfn convert_int3_rtn(float3);
-int3 __ovld __cnfn convert_int3_sat_rtn(float3);
-int3 __ovld __cnfn convert_int3(float3);
-int3 __ovld __cnfn convert_int3_sat(float3);
-uint3 __ovld __cnfn convert_uint3_rte(char3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(char3);
-uint3 __ovld __cnfn convert_uint3_rtz(char3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(char3);
-uint3 __ovld __cnfn convert_uint3_rtp(char3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(char3);
-uint3 __ovld __cnfn convert_uint3_rtn(char3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(char3);
-uint3 __ovld __cnfn convert_uint3(char3);
-uint3 __ovld __cnfn convert_uint3_sat(char3);
-uint3 __ovld __cnfn convert_uint3_rte(uchar3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(uchar3);
-uint3 __ovld __cnfn convert_uint3_rtz(uchar3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(uchar3);
-uint3 __ovld __cnfn convert_uint3_rtp(uchar3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(uchar3);
-uint3 __ovld __cnfn convert_uint3_rtn(uchar3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(uchar3);
-uint3 __ovld __cnfn convert_uint3(uchar3);
-uint3 __ovld __cnfn convert_uint3_sat(uchar3);
-uint3 __ovld __cnfn convert_uint3_rte(short3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(short3);
-uint3 __ovld __cnfn convert_uint3_rtz(short3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(short3);
-uint3 __ovld __cnfn convert_uint3_rtp(short3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(short3);
-uint3 __ovld __cnfn convert_uint3_rtn(short3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(short3);
-uint3 __ovld __cnfn convert_uint3(short3);
-uint3 __ovld __cnfn convert_uint3_sat(short3);
-uint3 __ovld __cnfn convert_uint3_rte(ushort3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(ushort3);
-uint3 __ovld __cnfn convert_uint3_rtz(ushort3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(ushort3);
-uint3 __ovld __cnfn convert_uint3_rtp(ushort3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(ushort3);
-uint3 __ovld __cnfn convert_uint3_rtn(ushort3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(ushort3);
-uint3 __ovld __cnfn convert_uint3(ushort3);
-uint3 __ovld __cnfn convert_uint3_sat(ushort3);
-uint3 __ovld __cnfn convert_uint3_rte(int3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(int3);
-uint3 __ovld __cnfn convert_uint3_rtz(int3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(int3);
-uint3 __ovld __cnfn convert_uint3_rtp(int3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(int3);
-uint3 __ovld __cnfn convert_uint3_rtn(int3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(int3);
-uint3 __ovld __cnfn convert_uint3(int3);
-uint3 __ovld __cnfn convert_uint3_sat(int3);
-uint3 __ovld __cnfn convert_uint3_rte(uint3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(uint3);
-uint3 __ovld __cnfn convert_uint3_rtz(uint3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(uint3);
-uint3 __ovld __cnfn convert_uint3_rtp(uint3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(uint3);
-uint3 __ovld __cnfn convert_uint3_rtn(uint3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(uint3);
-uint3 __ovld __cnfn convert_uint3(uint3);
-uint3 __ovld __cnfn convert_uint3_sat(uint3);
-uint3 __ovld __cnfn convert_uint3_rte(long3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(long3);
-uint3 __ovld __cnfn convert_uint3_rtz(long3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(long3);
-uint3 __ovld __cnfn convert_uint3_rtp(long3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(long3);
-uint3 __ovld __cnfn convert_uint3_rtn(long3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(long3);
-uint3 __ovld __cnfn convert_uint3(long3);
-uint3 __ovld __cnfn convert_uint3_sat(long3);
-uint3 __ovld __cnfn convert_uint3_rte(ulong3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(ulong3);
-uint3 __ovld __cnfn convert_uint3_rtz(ulong3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(ulong3);
-uint3 __ovld __cnfn convert_uint3_rtp(ulong3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(ulong3);
-uint3 __ovld __cnfn convert_uint3_rtn(ulong3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(ulong3);
-uint3 __ovld __cnfn convert_uint3(ulong3);
-uint3 __ovld __cnfn convert_uint3_sat(ulong3);
-uint3 __ovld __cnfn convert_uint3_rte(float3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(float3);
-uint3 __ovld __cnfn convert_uint3_rtz(float3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(float3);
-uint3 __ovld __cnfn convert_uint3_rtp(float3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(float3);
-uint3 __ovld __cnfn convert_uint3_rtn(float3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(float3);
-uint3 __ovld __cnfn convert_uint3(float3);
-uint3 __ovld __cnfn convert_uint3_sat(float3);
-long3 __ovld __cnfn convert_long3_rte(char3);
-long3 __ovld __cnfn convert_long3_sat_rte(char3);
-long3 __ovld __cnfn convert_long3_rtz(char3);
-long3 __ovld __cnfn convert_long3_sat_rtz(char3);
-long3 __ovld __cnfn convert_long3_rtp(char3);
-long3 __ovld __cnfn convert_long3_sat_rtp(char3);
-long3 __ovld __cnfn convert_long3_rtn(char3);
-long3 __ovld __cnfn convert_long3_sat_rtn(char3);
-long3 __ovld __cnfn convert_long3(char3);
-long3 __ovld __cnfn convert_long3_sat(char3);
-long3 __ovld __cnfn convert_long3_rte(uchar3);
-long3 __ovld __cnfn convert_long3_sat_rte(uchar3);
-long3 __ovld __cnfn convert_long3_rtz(uchar3);
-long3 __ovld __cnfn convert_long3_sat_rtz(uchar3);
-long3 __ovld __cnfn convert_long3_rtp(uchar3);
-long3 __ovld __cnfn convert_long3_sat_rtp(uchar3);
-long3 __ovld __cnfn convert_long3_rtn(uchar3);
-long3 __ovld __cnfn convert_long3_sat_rtn(uchar3);
-long3 __ovld __cnfn convert_long3(uchar3);
-long3 __ovld __cnfn convert_long3_sat(uchar3);
-long3 __ovld __cnfn convert_long3_rte(short3);
-long3 __ovld __cnfn convert_long3_sat_rte(short3);
-long3 __ovld __cnfn convert_long3_rtz(short3);
-long3 __ovld __cnfn convert_long3_sat_rtz(short3);
-long3 __ovld __cnfn convert_long3_rtp(short3);
-long3 __ovld __cnfn convert_long3_sat_rtp(short3);
-long3 __ovld __cnfn convert_long3_rtn(short3);
-long3 __ovld __cnfn convert_long3_sat_rtn(short3);
-long3 __ovld __cnfn convert_long3(short3);
-long3 __ovld __cnfn convert_long3_sat(short3);
-long3 __ovld __cnfn convert_long3_rte(ushort3);
-long3 __ovld __cnfn convert_long3_sat_rte(ushort3);
-long3 __ovld __cnfn convert_long3_rtz(ushort3);
-long3 __ovld __cnfn convert_long3_sat_rtz(ushort3);
-long3 __ovld __cnfn convert_long3_rtp(ushort3);
-long3 __ovld __cnfn convert_long3_sat_rtp(ushort3);
-long3 __ovld __cnfn convert_long3_rtn(ushort3);
-long3 __ovld __cnfn convert_long3_sat_rtn(ushort3);
-long3 __ovld __cnfn convert_long3(ushort3);
-long3 __ovld __cnfn convert_long3_sat(ushort3);
-long3 __ovld __cnfn convert_long3_rte(int3);
-long3 __ovld __cnfn convert_long3_sat_rte(int3);
-long3 __ovld __cnfn convert_long3_rtz(int3);
-long3 __ovld __cnfn convert_long3_sat_rtz(int3);
-long3 __ovld __cnfn convert_long3_rtp(int3);
-long3 __ovld __cnfn convert_long3_sat_rtp(int3);
-long3 __ovld __cnfn convert_long3_rtn(int3);
-long3 __ovld __cnfn convert_long3_sat_rtn(int3);
-long3 __ovld __cnfn convert_long3(int3);
-long3 __ovld __cnfn convert_long3_sat(int3);
-long3 __ovld __cnfn convert_long3_rte(uint3);
-long3 __ovld __cnfn convert_long3_sat_rte(uint3);
-long3 __ovld __cnfn convert_long3_rtz(uint3);
-long3 __ovld __cnfn convert_long3_sat_rtz(uint3);
-long3 __ovld __cnfn convert_long3_rtp(uint3);
-long3 __ovld __cnfn convert_long3_sat_rtp(uint3);
-long3 __ovld __cnfn convert_long3_rtn(uint3);
-long3 __ovld __cnfn convert_long3_sat_rtn(uint3);
-long3 __ovld __cnfn convert_long3(uint3);
-long3 __ovld __cnfn convert_long3_sat(uint3);
-long3 __ovld __cnfn convert_long3_rte(long3);
-long3 __ovld __cnfn convert_long3_sat_rte(long3);
-long3 __ovld __cnfn convert_long3_rtz(long3);
-long3 __ovld __cnfn convert_long3_sat_rtz(long3);
-long3 __ovld __cnfn convert_long3_rtp(long3);
-long3 __ovld __cnfn convert_long3_sat_rtp(long3);
-long3 __ovld __cnfn convert_long3_rtn(long3);
-long3 __ovld __cnfn convert_long3_sat_rtn(long3);
-long3 __ovld __cnfn convert_long3(long3);
-long3 __ovld __cnfn convert_long3_sat(long3);
-long3 __ovld __cnfn convert_long3_rte(ulong3);
-long3 __ovld __cnfn convert_long3_sat_rte(ulong3);
-long3 __ovld __cnfn convert_long3_rtz(ulong3);
-long3 __ovld __cnfn convert_long3_sat_rtz(ulong3);
-long3 __ovld __cnfn convert_long3_rtp(ulong3);
-long3 __ovld __cnfn convert_long3_sat_rtp(ulong3);
-long3 __ovld __cnfn convert_long3_rtn(ulong3);
-long3 __ovld __cnfn convert_long3_sat_rtn(ulong3);
-long3 __ovld __cnfn convert_long3(ulong3);
-long3 __ovld __cnfn convert_long3_sat(ulong3);
-long3 __ovld __cnfn convert_long3_rte(float3);
-long3 __ovld __cnfn convert_long3_sat_rte(float3);
-long3 __ovld __cnfn convert_long3_rtz(float3);
-long3 __ovld __cnfn convert_long3_sat_rtz(float3);
-long3 __ovld __cnfn convert_long3_rtp(float3);
-long3 __ovld __cnfn convert_long3_sat_rtp(float3);
-long3 __ovld __cnfn convert_long3_rtn(float3);
-long3 __ovld __cnfn convert_long3_sat_rtn(float3);
-long3 __ovld __cnfn convert_long3(float3);
-long3 __ovld __cnfn convert_long3_sat(float3);
-ulong3 __ovld __cnfn convert_ulong3_rte(char3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(char3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(char3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(char3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(char3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(char3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(char3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(char3);
-ulong3 __ovld __cnfn convert_ulong3(char3);
-ulong3 __ovld __cnfn convert_ulong3_sat(char3);
-ulong3 __ovld __cnfn convert_ulong3_rte(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(uchar3);
-ulong3 __ovld __cnfn convert_ulong3(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_sat(uchar3);
-ulong3 __ovld __cnfn convert_ulong3_rte(short3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(short3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(short3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(short3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(short3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(short3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(short3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(short3);
-ulong3 __ovld __cnfn convert_ulong3(short3);
-ulong3 __ovld __cnfn convert_ulong3_sat(short3);
-ulong3 __ovld __cnfn convert_ulong3_rte(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(ushort3);
-ulong3 __ovld __cnfn convert_ulong3(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_sat(ushort3);
-ulong3 __ovld __cnfn convert_ulong3_rte(int3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(int3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(int3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(int3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(int3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(int3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(int3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(int3);
-ulong3 __ovld __cnfn convert_ulong3(int3);
-ulong3 __ovld __cnfn convert_ulong3_sat(int3);
-ulong3 __ovld __cnfn convert_ulong3_rte(uint3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(uint3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(uint3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(uint3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(uint3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(uint3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(uint3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(uint3);
-ulong3 __ovld __cnfn convert_ulong3(uint3);
-ulong3 __ovld __cnfn convert_ulong3_sat(uint3);
-ulong3 __ovld __cnfn convert_ulong3_rte(long3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(long3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(long3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(long3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(long3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(long3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(long3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(long3);
-ulong3 __ovld __cnfn convert_ulong3(long3);
-ulong3 __ovld __cnfn convert_ulong3_sat(long3);
-ulong3 __ovld __cnfn convert_ulong3_rte(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(ulong3);
-ulong3 __ovld __cnfn convert_ulong3(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_sat(ulong3);
-ulong3 __ovld __cnfn convert_ulong3_rte(float3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(float3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(float3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(float3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(float3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(float3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(float3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(float3);
-ulong3 __ovld __cnfn convert_ulong3(float3);
-ulong3 __ovld __cnfn convert_ulong3_sat(float3);
-float3 __ovld __cnfn convert_float3_rte(char3);
-float3 __ovld __cnfn convert_float3_rtz(char3);
-float3 __ovld __cnfn convert_float3_rtp(char3);
-float3 __ovld __cnfn convert_float3_rtn(char3);
-float3 __ovld __cnfn convert_float3(char3);
-float3 __ovld __cnfn convert_float3_rte(uchar3);
-float3 __ovld __cnfn convert_float3_rtz(uchar3);
-float3 __ovld __cnfn convert_float3_rtp(uchar3);
-float3 __ovld __cnfn convert_float3_rtn(uchar3);
-float3 __ovld __cnfn convert_float3(uchar3);
-float3 __ovld __cnfn convert_float3_rte(short3);
-float3 __ovld __cnfn convert_float3_rtz(short3);
-float3 __ovld __cnfn convert_float3_rtp(short3);
-float3 __ovld __cnfn convert_float3_rtn(short3);
-float3 __ovld __cnfn convert_float3(short3);
-float3 __ovld __cnfn convert_float3_rte(ushort3);
-float3 __ovld __cnfn convert_float3_rtz(ushort3);
-float3 __ovld __cnfn convert_float3_rtp(ushort3);
-float3 __ovld __cnfn convert_float3_rtn(ushort3);
-float3 __ovld __cnfn convert_float3(ushort3);
-float3 __ovld __cnfn convert_float3_rte(int3);
-float3 __ovld __cnfn convert_float3_rtz(int3);
-float3 __ovld __cnfn convert_float3_rtp(int3);
-float3 __ovld __cnfn convert_float3_rtn(int3);
-float3 __ovld __cnfn convert_float3(int3);
-float3 __ovld __cnfn convert_float3_rte(uint3);
-float3 __ovld __cnfn convert_float3_rtz(uint3);
-float3 __ovld __cnfn convert_float3_rtp(uint3);
-float3 __ovld __cnfn convert_float3_rtn(uint3);
-float3 __ovld __cnfn convert_float3(uint3);
-float3 __ovld __cnfn convert_float3_rte(long3);
-float3 __ovld __cnfn convert_float3_rtz(long3);
-float3 __ovld __cnfn convert_float3_rtp(long3);
-float3 __ovld __cnfn convert_float3_rtn(long3);
-float3 __ovld __cnfn convert_float3(long3);
-float3 __ovld __cnfn convert_float3_rte(ulong3);
-float3 __ovld __cnfn convert_float3_rtz(ulong3);
-float3 __ovld __cnfn convert_float3_rtp(ulong3);
-float3 __ovld __cnfn convert_float3_rtn(ulong3);
-float3 __ovld __cnfn convert_float3(ulong3);
-float3 __ovld __cnfn convert_float3_rte(float3);
-float3 __ovld __cnfn convert_float3_rtz(float3);
-float3 __ovld __cnfn convert_float3_rtp(float3);
-float3 __ovld __cnfn convert_float3_rtn(float3);
-float3 __ovld __cnfn convert_float3(float3);
-char4 __ovld __cnfn convert_char4_rte(char4);
-char4 __ovld __cnfn convert_char4_sat_rte(char4);
-char4 __ovld __cnfn convert_char4_rtz(char4);
-char4 __ovld __cnfn convert_char4_sat_rtz(char4);
-char4 __ovld __cnfn convert_char4_rtp(char4);
-char4 __ovld __cnfn convert_char4_sat_rtp(char4);
-char4 __ovld __cnfn convert_char4_rtn(char4);
-char4 __ovld __cnfn convert_char4_sat_rtn(char4);
-char4 __ovld __cnfn convert_char4(char4);
-char4 __ovld __cnfn convert_char4_sat(char4);
-char4 __ovld __cnfn convert_char4_rte(uchar4);
-char4 __ovld __cnfn convert_char4_sat_rte(uchar4);
-char4 __ovld __cnfn convert_char4_rtz(uchar4);
-char4 __ovld __cnfn convert_char4_sat_rtz(uchar4);
-char4 __ovld __cnfn convert_char4_rtp(uchar4);
-char4 __ovld __cnfn convert_char4_sat_rtp(uchar4);
-char4 __ovld __cnfn convert_char4_rtn(uchar4);
-char4 __ovld __cnfn convert_char4_sat_rtn(uchar4);
-char4 __ovld __cnfn convert_char4(uchar4);
-char4 __ovld __cnfn convert_char4_sat(uchar4);
-char4 __ovld __cnfn convert_char4_rte(short4);
-char4 __ovld __cnfn convert_char4_sat_rte(short4);
-char4 __ovld __cnfn convert_char4_rtz(short4);
-char4 __ovld __cnfn convert_char4_sat_rtz(short4);
-char4 __ovld __cnfn convert_char4_rtp(short4);
-char4 __ovld __cnfn convert_char4_sat_rtp(short4);
-char4 __ovld __cnfn convert_char4_rtn(short4);
-char4 __ovld __cnfn convert_char4_sat_rtn(short4);
-char4 __ovld __cnfn convert_char4(short4);
-char4 __ovld __cnfn convert_char4_sat(short4);
-char4 __ovld __cnfn convert_char4_rte(ushort4);
-char4 __ovld __cnfn convert_char4_sat_rte(ushort4);
-char4 __ovld __cnfn convert_char4_rtz(ushort4);
-char4 __ovld __cnfn convert_char4_sat_rtz(ushort4);
-char4 __ovld __cnfn convert_char4_rtp(ushort4);
-char4 __ovld __cnfn convert_char4_sat_rtp(ushort4);
-char4 __ovld __cnfn convert_char4_rtn(ushort4);
-char4 __ovld __cnfn convert_char4_sat_rtn(ushort4);
-char4 __ovld __cnfn convert_char4(ushort4);
-char4 __ovld __cnfn convert_char4_sat(ushort4);
-char4 __ovld __cnfn convert_char4_rte(int4);
-char4 __ovld __cnfn convert_char4_sat_rte(int4);
-char4 __ovld __cnfn convert_char4_rtz(int4);
-char4 __ovld __cnfn convert_char4_sat_rtz(int4);
-char4 __ovld __cnfn convert_char4_rtp(int4);
-char4 __ovld __cnfn convert_char4_sat_rtp(int4);
-char4 __ovld __cnfn convert_char4_rtn(int4);
-char4 __ovld __cnfn convert_char4_sat_rtn(int4);
-char4 __ovld __cnfn convert_char4(int4);
-char4 __ovld __cnfn convert_char4_sat(int4);
-char4 __ovld __cnfn convert_char4_rte(uint4);
-char4 __ovld __cnfn convert_char4_sat_rte(uint4);
-char4 __ovld __cnfn convert_char4_rtz(uint4);
-char4 __ovld __cnfn convert_char4_sat_rtz(uint4);
-char4 __ovld __cnfn convert_char4_rtp(uint4);
-char4 __ovld __cnfn convert_char4_sat_rtp(uint4);
-char4 __ovld __cnfn convert_char4_rtn(uint4);
-char4 __ovld __cnfn convert_char4_sat_rtn(uint4);
-char4 __ovld __cnfn convert_char4(uint4);
-char4 __ovld __cnfn convert_char4_sat(uint4);
-char4 __ovld __cnfn convert_char4_rte(long4);
-char4 __ovld __cnfn convert_char4_sat_rte(long4);
-char4 __ovld __cnfn convert_char4_rtz(long4);
-char4 __ovld __cnfn convert_char4_sat_rtz(long4);
-char4 __ovld __cnfn convert_char4_rtp(long4);
-char4 __ovld __cnfn convert_char4_sat_rtp(long4);
-char4 __ovld __cnfn convert_char4_rtn(long4);
-char4 __ovld __cnfn convert_char4_sat_rtn(long4);
-char4 __ovld __cnfn convert_char4(long4);
-char4 __ovld __cnfn convert_char4_sat(long4);
-char4 __ovld __cnfn convert_char4_rte(ulong4);
-char4 __ovld __cnfn convert_char4_sat_rte(ulong4);
-char4 __ovld __cnfn convert_char4_rtz(ulong4);
-char4 __ovld __cnfn convert_char4_sat_rtz(ulong4);
-char4 __ovld __cnfn convert_char4_rtp(ulong4);
-char4 __ovld __cnfn convert_char4_sat_rtp(ulong4);
-char4 __ovld __cnfn convert_char4_rtn(ulong4);
-char4 __ovld __cnfn convert_char4_sat_rtn(ulong4);
-char4 __ovld __cnfn convert_char4(ulong4);
-char4 __ovld __cnfn convert_char4_sat(ulong4);
-char4 __ovld __cnfn convert_char4_rte(float4);
-char4 __ovld __cnfn convert_char4_sat_rte(float4);
-char4 __ovld __cnfn convert_char4_rtz(float4);
-char4 __ovld __cnfn convert_char4_sat_rtz(float4);
-char4 __ovld __cnfn convert_char4_rtp(float4);
-char4 __ovld __cnfn convert_char4_sat_rtp(float4);
-char4 __ovld __cnfn convert_char4_rtn(float4);
-char4 __ovld __cnfn convert_char4_sat_rtn(float4);
-char4 __ovld __cnfn convert_char4(float4);
-char4 __ovld __cnfn convert_char4_sat(float4);
-uchar4 __ovld __cnfn convert_uchar4_rte(char4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(char4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(char4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(char4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(char4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(char4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(char4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(char4);
-uchar4 __ovld __cnfn convert_uchar4(char4);
-uchar4 __ovld __cnfn convert_uchar4_sat(char4);
-uchar4 __ovld __cnfn convert_uchar4_rte(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(uchar4);
-uchar4 __ovld __cnfn convert_uchar4(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_sat(uchar4);
-uchar4 __ovld __cnfn convert_uchar4_rte(short4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(short4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(short4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(short4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(short4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(short4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(short4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(short4);
-uchar4 __ovld __cnfn convert_uchar4(short4);
-uchar4 __ovld __cnfn convert_uchar4_sat(short4);
-uchar4 __ovld __cnfn convert_uchar4_rte(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(ushort4);
-uchar4 __ovld __cnfn convert_uchar4(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_sat(ushort4);
-uchar4 __ovld __cnfn convert_uchar4_rte(int4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(int4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(int4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(int4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(int4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(int4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(int4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(int4);
-uchar4 __ovld __cnfn convert_uchar4(int4);
-uchar4 __ovld __cnfn convert_uchar4_sat(int4);
-uchar4 __ovld __cnfn convert_uchar4_rte(uint4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(uint4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(uint4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(uint4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(uint4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(uint4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(uint4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(uint4);
-uchar4 __ovld __cnfn convert_uchar4(uint4);
-uchar4 __ovld __cnfn convert_uchar4_sat(uint4);
-uchar4 __ovld __cnfn convert_uchar4_rte(long4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(long4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(long4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(long4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(long4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(long4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(long4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(long4);
-uchar4 __ovld __cnfn convert_uchar4(long4);
-uchar4 __ovld __cnfn convert_uchar4_sat(long4);
-uchar4 __ovld __cnfn convert_uchar4_rte(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(ulong4);
-uchar4 __ovld __cnfn convert_uchar4(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_sat(ulong4);
-uchar4 __ovld __cnfn convert_uchar4_rte(float4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(float4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(float4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(float4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(float4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(float4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(float4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(float4);
-uchar4 __ovld __cnfn convert_uchar4(float4);
-uchar4 __ovld __cnfn convert_uchar4_sat(float4);
-short4 __ovld __cnfn convert_short4_rte(char4);
-short4 __ovld __cnfn convert_short4_sat_rte(char4);
-short4 __ovld __cnfn convert_short4_rtz(char4);
-short4 __ovld __cnfn convert_short4_sat_rtz(char4);
-short4 __ovld __cnfn convert_short4_rtp(char4);
-short4 __ovld __cnfn convert_short4_sat_rtp(char4);
-short4 __ovld __cnfn convert_short4_rtn(char4);
-short4 __ovld __cnfn convert_short4_sat_rtn(char4);
-short4 __ovld __cnfn convert_short4(char4);
-short4 __ovld __cnfn convert_short4_sat(char4);
-short4 __ovld __cnfn convert_short4_rte(uchar4);
-short4 __ovld __cnfn convert_short4_sat_rte(uchar4);
-short4 __ovld __cnfn convert_short4_rtz(uchar4);
-short4 __ovld __cnfn convert_short4_sat_rtz(uchar4);
-short4 __ovld __cnfn convert_short4_rtp(uchar4);
-short4 __ovld __cnfn convert_short4_sat_rtp(uchar4);
-short4 __ovld __cnfn convert_short4_rtn(uchar4);
-short4 __ovld __cnfn convert_short4_sat_rtn(uchar4);
-short4 __ovld __cnfn convert_short4(uchar4);
-short4 __ovld __cnfn convert_short4_sat(uchar4);
-short4 __ovld __cnfn convert_short4_rte(short4);
-short4 __ovld __cnfn convert_short4_sat_rte(short4);
-short4 __ovld __cnfn convert_short4_rtz(short4);
-short4 __ovld __cnfn convert_short4_sat_rtz(short4);
-short4 __ovld __cnfn convert_short4_rtp(short4);
-short4 __ovld __cnfn convert_short4_sat_rtp(short4);
-short4 __ovld __cnfn convert_short4_rtn(short4);
-short4 __ovld __cnfn convert_short4_sat_rtn(short4);
-short4 __ovld __cnfn convert_short4(short4);
-short4 __ovld __cnfn convert_short4_sat(short4);
-short4 __ovld __cnfn convert_short4_rte(ushort4);
-short4 __ovld __cnfn convert_short4_sat_rte(ushort4);
-short4 __ovld __cnfn convert_short4_rtz(ushort4);
-short4 __ovld __cnfn convert_short4_sat_rtz(ushort4);
-short4 __ovld __cnfn convert_short4_rtp(ushort4);
-short4 __ovld __cnfn convert_short4_sat_rtp(ushort4);
-short4 __ovld __cnfn convert_short4_rtn(ushort4);
-short4 __ovld __cnfn convert_short4_sat_rtn(ushort4);
-short4 __ovld __cnfn convert_short4(ushort4);
-short4 __ovld __cnfn convert_short4_sat(ushort4);
-short4 __ovld __cnfn convert_short4_rte(int4);
-short4 __ovld __cnfn convert_short4_sat_rte(int4);
-short4 __ovld __cnfn convert_short4_rtz(int4);
-short4 __ovld __cnfn convert_short4_sat_rtz(int4);
-short4 __ovld __cnfn convert_short4_rtp(int4);
-short4 __ovld __cnfn convert_short4_sat_rtp(int4);
-short4 __ovld __cnfn convert_short4_rtn(int4);
-short4 __ovld __cnfn convert_short4_sat_rtn(int4);
-short4 __ovld __cnfn convert_short4(int4);
-short4 __ovld __cnfn convert_short4_sat(int4);
-short4 __ovld __cnfn convert_short4_rte(uint4);
-short4 __ovld __cnfn convert_short4_sat_rte(uint4);
-short4 __ovld __cnfn convert_short4_rtz(uint4);
-short4 __ovld __cnfn convert_short4_sat_rtz(uint4);
-short4 __ovld __cnfn convert_short4_rtp(uint4);
-short4 __ovld __cnfn convert_short4_sat_rtp(uint4);
-short4 __ovld __cnfn convert_short4_rtn(uint4);
-short4 __ovld __cnfn convert_short4_sat_rtn(uint4);
-short4 __ovld __cnfn convert_short4(uint4);
-short4 __ovld __cnfn convert_short4_sat(uint4);
-short4 __ovld __cnfn convert_short4_rte(long4);
-short4 __ovld __cnfn convert_short4_sat_rte(long4);
-short4 __ovld __cnfn convert_short4_rtz(long4);
-short4 __ovld __cnfn convert_short4_sat_rtz(long4);
-short4 __ovld __cnfn convert_short4_rtp(long4);
-short4 __ovld __cnfn convert_short4_sat_rtp(long4);
-short4 __ovld __cnfn convert_short4_rtn(long4);
-short4 __ovld __cnfn convert_short4_sat_rtn(long4);
-short4 __ovld __cnfn convert_short4(long4);
-short4 __ovld __cnfn convert_short4_sat(long4);
-short4 __ovld __cnfn convert_short4_rte(ulong4);
-short4 __ovld __cnfn convert_short4_sat_rte(ulong4);
-short4 __ovld __cnfn convert_short4_rtz(ulong4);
-short4 __ovld __cnfn convert_short4_sat_rtz(ulong4);
-short4 __ovld __cnfn convert_short4_rtp(ulong4);
-short4 __ovld __cnfn convert_short4_sat_rtp(ulong4);
-short4 __ovld __cnfn convert_short4_rtn(ulong4);
-short4 __ovld __cnfn convert_short4_sat_rtn(ulong4);
-short4 __ovld __cnfn convert_short4(ulong4);
-short4 __ovld __cnfn convert_short4_sat(ulong4);
-short4 __ovld __cnfn convert_short4_rte(float4);
-short4 __ovld __cnfn convert_short4_sat_rte(float4);
-short4 __ovld __cnfn convert_short4_rtz(float4);
-short4 __ovld __cnfn convert_short4_sat_rtz(float4);
-short4 __ovld __cnfn convert_short4_rtp(float4);
-short4 __ovld __cnfn convert_short4_sat_rtp(float4);
-short4 __ovld __cnfn convert_short4_rtn(float4);
-short4 __ovld __cnfn convert_short4_sat_rtn(float4);
-short4 __ovld __cnfn convert_short4(float4);
-short4 __ovld __cnfn convert_short4_sat(float4);
-ushort4 __ovld __cnfn convert_ushort4_rte(char4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(char4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(char4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(char4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(char4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(char4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(char4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(char4);
-ushort4 __ovld __cnfn convert_ushort4(char4);
-ushort4 __ovld __cnfn convert_ushort4_sat(char4);
-ushort4 __ovld __cnfn convert_ushort4_rte(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(uchar4);
-ushort4 __ovld __cnfn convert_ushort4(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_sat(uchar4);
-ushort4 __ovld __cnfn convert_ushort4_rte(short4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(short4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(short4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(short4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(short4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(short4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(short4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(short4);
-ushort4 __ovld __cnfn convert_ushort4(short4);
-ushort4 __ovld __cnfn convert_ushort4_sat(short4);
-ushort4 __ovld __cnfn convert_ushort4_rte(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(ushort4);
-ushort4 __ovld __cnfn convert_ushort4(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_sat(ushort4);
-ushort4 __ovld __cnfn convert_ushort4_rte(int4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(int4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(int4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(int4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(int4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(int4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(int4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(int4);
-ushort4 __ovld __cnfn convert_ushort4(int4);
-ushort4 __ovld __cnfn convert_ushort4_sat(int4);
-ushort4 __ovld __cnfn convert_ushort4_rte(uint4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(uint4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(uint4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(uint4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(uint4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(uint4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(uint4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(uint4);
-ushort4 __ovld __cnfn convert_ushort4(uint4);
-ushort4 __ovld __cnfn convert_ushort4_sat(uint4);
-ushort4 __ovld __cnfn convert_ushort4_rte(long4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(long4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(long4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(long4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(long4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(long4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(long4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(long4);
-ushort4 __ovld __cnfn convert_ushort4(long4);
-ushort4 __ovld __cnfn convert_ushort4_sat(long4);
-ushort4 __ovld __cnfn convert_ushort4_rte(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(ulong4);
-ushort4 __ovld __cnfn convert_ushort4(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_sat(ulong4);
-ushort4 __ovld __cnfn convert_ushort4_rte(float4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(float4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(float4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(float4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(float4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(float4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(float4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(float4);
-ushort4 __ovld __cnfn convert_ushort4(float4);
-ushort4 __ovld __cnfn convert_ushort4_sat(float4);
-int4 __ovld __cnfn convert_int4_rte(char4);
-int4 __ovld __cnfn convert_int4_sat_rte(char4);
-int4 __ovld __cnfn convert_int4_rtz(char4);
-int4 __ovld __cnfn convert_int4_sat_rtz(char4);
-int4 __ovld __cnfn convert_int4_rtp(char4);
-int4 __ovld __cnfn convert_int4_sat_rtp(char4);
-int4 __ovld __cnfn convert_int4_rtn(char4);
-int4 __ovld __cnfn convert_int4_sat_rtn(char4);
-int4 __ovld __cnfn convert_int4(char4);
-int4 __ovld __cnfn convert_int4_sat(char4);
-int4 __ovld __cnfn convert_int4_rte(uchar4);
-int4 __ovld __cnfn convert_int4_sat_rte(uchar4);
-int4 __ovld __cnfn convert_int4_rtz(uchar4);
-int4 __ovld __cnfn convert_int4_sat_rtz(uchar4);
-int4 __ovld __cnfn convert_int4_rtp(uchar4);
-int4 __ovld __cnfn convert_int4_sat_rtp(uchar4);
-int4 __ovld __cnfn convert_int4_rtn(uchar4);
-int4 __ovld __cnfn convert_int4_sat_rtn(uchar4);
-int4 __ovld __cnfn convert_int4(uchar4);
-int4 __ovld __cnfn convert_int4_sat(uchar4);
-int4 __ovld __cnfn convert_int4_rte(short4);
-int4 __ovld __cnfn convert_int4_sat_rte(short4);
-int4 __ovld __cnfn convert_int4_rtz(short4);
-int4 __ovld __cnfn convert_int4_sat_rtz(short4);
-int4 __ovld __cnfn convert_int4_rtp(short4);
-int4 __ovld __cnfn convert_int4_sat_rtp(short4);
-int4 __ovld __cnfn convert_int4_rtn(short4);
-int4 __ovld __cnfn convert_int4_sat_rtn(short4);
-int4 __ovld __cnfn convert_int4(short4);
-int4 __ovld __cnfn convert_int4_sat(short4);
-int4 __ovld __cnfn convert_int4_rte(ushort4);
-int4 __ovld __cnfn convert_int4_sat_rte(ushort4);
-int4 __ovld __cnfn convert_int4_rtz(ushort4);
-int4 __ovld __cnfn convert_int4_sat_rtz(ushort4);
-int4 __ovld __cnfn convert_int4_rtp(ushort4);
-int4 __ovld __cnfn convert_int4_sat_rtp(ushort4);
-int4 __ovld __cnfn convert_int4_rtn(ushort4);
-int4 __ovld __cnfn convert_int4_sat_rtn(ushort4);
-int4 __ovld __cnfn convert_int4(ushort4);
-int4 __ovld __cnfn convert_int4_sat(ushort4);
-int4 __ovld __cnfn convert_int4_rte(int4);
-int4 __ovld __cnfn convert_int4_sat_rte(int4);
-int4 __ovld __cnfn convert_int4_rtz(int4);
-int4 __ovld __cnfn convert_int4_sat_rtz(int4);
-int4 __ovld __cnfn convert_int4_rtp(int4);
-int4 __ovld __cnfn convert_int4_sat_rtp(int4);
-int4 __ovld __cnfn convert_int4_rtn(int4);
-int4 __ovld __cnfn convert_int4_sat_rtn(int4);
-int4 __ovld __cnfn convert_int4(int4);
-int4 __ovld __cnfn convert_int4_sat(int4);
-int4 __ovld __cnfn convert_int4_rte(uint4);
-int4 __ovld __cnfn convert_int4_sat_rte(uint4);
-int4 __ovld __cnfn convert_int4_rtz(uint4);
-int4 __ovld __cnfn convert_int4_sat_rtz(uint4);
-int4 __ovld __cnfn convert_int4_rtp(uint4);
-int4 __ovld __cnfn convert_int4_sat_rtp(uint4);
-int4 __ovld __cnfn convert_int4_rtn(uint4);
-int4 __ovld __cnfn convert_int4_sat_rtn(uint4);
-int4 __ovld __cnfn convert_int4(uint4);
-int4 __ovld __cnfn convert_int4_sat(uint4);
-int4 __ovld __cnfn convert_int4_rte(long4);
-int4 __ovld __cnfn convert_int4_sat_rte(long4);
-int4 __ovld __cnfn convert_int4_rtz(long4);
-int4 __ovld __cnfn convert_int4_sat_rtz(long4);
-int4 __ovld __cnfn convert_int4_rtp(long4);
-int4 __ovld __cnfn convert_int4_sat_rtp(long4);
-int4 __ovld __cnfn convert_int4_rtn(long4);
-int4 __ovld __cnfn convert_int4_sat_rtn(long4);
-int4 __ovld __cnfn convert_int4(long4);
-int4 __ovld __cnfn convert_int4_sat(long4);
-int4 __ovld __cnfn convert_int4_rte(ulong4);
-int4 __ovld __cnfn convert_int4_sat_rte(ulong4);
-int4 __ovld __cnfn convert_int4_rtz(ulong4);
-int4 __ovld __cnfn convert_int4_sat_rtz(ulong4);
-int4 __ovld __cnfn convert_int4_rtp(ulong4);
-int4 __ovld __cnfn convert_int4_sat_rtp(ulong4);
-int4 __ovld __cnfn convert_int4_rtn(ulong4);
-int4 __ovld __cnfn convert_int4_sat_rtn(ulong4);
-int4 __ovld __cnfn convert_int4(ulong4);
-int4 __ovld __cnfn convert_int4_sat(ulong4);
-int4 __ovld __cnfn convert_int4_rte(float4);
-int4 __ovld __cnfn convert_int4_sat_rte(float4);
-int4 __ovld __cnfn convert_int4_rtz(float4);
-int4 __ovld __cnfn convert_int4_sat_rtz(float4);
-int4 __ovld __cnfn convert_int4_rtp(float4);
-int4 __ovld __cnfn convert_int4_sat_rtp(float4);
-int4 __ovld __cnfn convert_int4_rtn(float4);
-int4 __ovld __cnfn convert_int4_sat_rtn(float4);
-int4 __ovld __cnfn convert_int4(float4);
-int4 __ovld __cnfn convert_int4_sat(float4);
-uint4 __ovld __cnfn convert_uint4_rte(char4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(char4);
-uint4 __ovld __cnfn convert_uint4_rtz(char4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(char4);
-uint4 __ovld __cnfn convert_uint4_rtp(char4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(char4);
-uint4 __ovld __cnfn convert_uint4_rtn(char4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(char4);
-uint4 __ovld __cnfn convert_uint4(char4);
-uint4 __ovld __cnfn convert_uint4_sat(char4);
-uint4 __ovld __cnfn convert_uint4_rte(uchar4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(uchar4);
-uint4 __ovld __cnfn convert_uint4_rtz(uchar4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(uchar4);
-uint4 __ovld __cnfn convert_uint4_rtp(uchar4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(uchar4);
-uint4 __ovld __cnfn convert_uint4_rtn(uchar4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(uchar4);
-uint4 __ovld __cnfn convert_uint4(uchar4);
-uint4 __ovld __cnfn convert_uint4_sat(uchar4);
-uint4 __ovld __cnfn convert_uint4_rte(short4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(short4);
-uint4 __ovld __cnfn convert_uint4_rtz(short4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(short4);
-uint4 __ovld __cnfn convert_uint4_rtp(short4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(short4);
-uint4 __ovld __cnfn convert_uint4_rtn(short4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(short4);
-uint4 __ovld __cnfn convert_uint4(short4);
-uint4 __ovld __cnfn convert_uint4_sat(short4);
-uint4 __ovld __cnfn convert_uint4_rte(ushort4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(ushort4);
-uint4 __ovld __cnfn convert_uint4_rtz(ushort4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(ushort4);
-uint4 __ovld __cnfn convert_uint4_rtp(ushort4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(ushort4);
-uint4 __ovld __cnfn convert_uint4_rtn(ushort4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(ushort4);
-uint4 __ovld __cnfn convert_uint4(ushort4);
-uint4 __ovld __cnfn convert_uint4_sat(ushort4);
-uint4 __ovld __cnfn convert_uint4_rte(int4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(int4);
-uint4 __ovld __cnfn convert_uint4_rtz(int4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(int4);
-uint4 __ovld __cnfn convert_uint4_rtp(int4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(int4);
-uint4 __ovld __cnfn convert_uint4_rtn(int4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(int4);
-uint4 __ovld __cnfn convert_uint4(int4);
-uint4 __ovld __cnfn convert_uint4_sat(int4);
-uint4 __ovld __cnfn convert_uint4_rte(uint4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(uint4);
-uint4 __ovld __cnfn convert_uint4_rtz(uint4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(uint4);
-uint4 __ovld __cnfn convert_uint4_rtp(uint4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(uint4);
-uint4 __ovld __cnfn convert_uint4_rtn(uint4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(uint4);
-uint4 __ovld __cnfn convert_uint4(uint4);
-uint4 __ovld __cnfn convert_uint4_sat(uint4);
-uint4 __ovld __cnfn convert_uint4_rte(long4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(long4);
-uint4 __ovld __cnfn convert_uint4_rtz(long4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(long4);
-uint4 __ovld __cnfn convert_uint4_rtp(long4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(long4);
-uint4 __ovld __cnfn convert_uint4_rtn(long4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(long4);
-uint4 __ovld __cnfn convert_uint4(long4);
-uint4 __ovld __cnfn convert_uint4_sat(long4);
-uint4 __ovld __cnfn convert_uint4_rte(ulong4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(ulong4);
-uint4 __ovld __cnfn convert_uint4_rtz(ulong4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(ulong4);
-uint4 __ovld __cnfn convert_uint4_rtp(ulong4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(ulong4);
-uint4 __ovld __cnfn convert_uint4_rtn(ulong4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(ulong4);
-uint4 __ovld __cnfn convert_uint4(ulong4);
-uint4 __ovld __cnfn convert_uint4_sat(ulong4);
-uint4 __ovld __cnfn convert_uint4_rte(float4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(float4);
-uint4 __ovld __cnfn convert_uint4_rtz(float4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(float4);
-uint4 __ovld __cnfn convert_uint4_rtp(float4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(float4);
-uint4 __ovld __cnfn convert_uint4_rtn(float4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(float4);
-uint4 __ovld __cnfn convert_uint4(float4);
-uint4 __ovld __cnfn convert_uint4_sat(float4);
-long4 __ovld __cnfn convert_long4_rte(char4);
-long4 __ovld __cnfn convert_long4_sat_rte(char4);
-long4 __ovld __cnfn convert_long4_rtz(char4);
-long4 __ovld __cnfn convert_long4_sat_rtz(char4);
-long4 __ovld __cnfn convert_long4_rtp(char4);
-long4 __ovld __cnfn convert_long4_sat_rtp(char4);
-long4 __ovld __cnfn convert_long4_rtn(char4);
-long4 __ovld __cnfn convert_long4_sat_rtn(char4);
-long4 __ovld __cnfn convert_long4(char4);
-long4 __ovld __cnfn convert_long4_sat(char4);
-long4 __ovld __cnfn convert_long4_rte(uchar4);
-long4 __ovld __cnfn convert_long4_sat_rte(uchar4);
-long4 __ovld __cnfn convert_long4_rtz(uchar4);
-long4 __ovld __cnfn convert_long4_sat_rtz(uchar4);
-long4 __ovld __cnfn convert_long4_rtp(uchar4);
-long4 __ovld __cnfn convert_long4_sat_rtp(uchar4);
-long4 __ovld __cnfn convert_long4_rtn(uchar4);
-long4 __ovld __cnfn convert_long4_sat_rtn(uchar4);
-long4 __ovld __cnfn convert_long4(uchar4);
-long4 __ovld __cnfn convert_long4_sat(uchar4);
-long4 __ovld __cnfn convert_long4_rte(short4);
-long4 __ovld __cnfn convert_long4_sat_rte(short4);
-long4 __ovld __cnfn convert_long4_rtz(short4);
-long4 __ovld __cnfn convert_long4_sat_rtz(short4);
-long4 __ovld __cnfn convert_long4_rtp(short4);
-long4 __ovld __cnfn convert_long4_sat_rtp(short4);
-long4 __ovld __cnfn convert_long4_rtn(short4);
-long4 __ovld __cnfn convert_long4_sat_rtn(short4);
-long4 __ovld __cnfn convert_long4(short4);
-long4 __ovld __cnfn convert_long4_sat(short4);
-long4 __ovld __cnfn convert_long4_rte(ushort4);
-long4 __ovld __cnfn convert_long4_sat_rte(ushort4);
-long4 __ovld __cnfn convert_long4_rtz(ushort4);
-long4 __ovld __cnfn convert_long4_sat_rtz(ushort4);
-long4 __ovld __cnfn convert_long4_rtp(ushort4);
-long4 __ovld __cnfn convert_long4_sat_rtp(ushort4);
-long4 __ovld __cnfn convert_long4_rtn(ushort4);
-long4 __ovld __cnfn convert_long4_sat_rtn(ushort4);
-long4 __ovld __cnfn convert_long4(ushort4);
-long4 __ovld __cnfn convert_long4_sat(ushort4);
-long4 __ovld __cnfn convert_long4_rte(int4);
-long4 __ovld __cnfn convert_long4_sat_rte(int4);
-long4 __ovld __cnfn convert_long4_rtz(int4);
-long4 __ovld __cnfn convert_long4_sat_rtz(int4);
-long4 __ovld __cnfn convert_long4_rtp(int4);
-long4 __ovld __cnfn convert_long4_sat_rtp(int4);
-long4 __ovld __cnfn convert_long4_rtn(int4);
-long4 __ovld __cnfn convert_long4_sat_rtn(int4);
-long4 __ovld __cnfn convert_long4(int4);
-long4 __ovld __cnfn convert_long4_sat(int4);
-long4 __ovld __cnfn convert_long4_rte(uint4);
-long4 __ovld __cnfn convert_long4_sat_rte(uint4);
-long4 __ovld __cnfn convert_long4_rtz(uint4);
-long4 __ovld __cnfn convert_long4_sat_rtz(uint4);
-long4 __ovld __cnfn convert_long4_rtp(uint4);
-long4 __ovld __cnfn convert_long4_sat_rtp(uint4);
-long4 __ovld __cnfn convert_long4_rtn(uint4);
-long4 __ovld __cnfn convert_long4_sat_rtn(uint4);
-long4 __ovld __cnfn convert_long4(uint4);
-long4 __ovld __cnfn convert_long4_sat(uint4);
-long4 __ovld __cnfn convert_long4_rte(long4);
-long4 __ovld __cnfn convert_long4_sat_rte(long4);
-long4 __ovld __cnfn convert_long4_rtz(long4);
-long4 __ovld __cnfn convert_long4_sat_rtz(long4);
-long4 __ovld __cnfn convert_long4_rtp(long4);
-long4 __ovld __cnfn convert_long4_sat_rtp(long4);
-long4 __ovld __cnfn convert_long4_rtn(long4);
-long4 __ovld __cnfn convert_long4_sat_rtn(long4);
-long4 __ovld __cnfn convert_long4(long4);
-long4 __ovld __cnfn convert_long4_sat(long4);
-long4 __ovld __cnfn convert_long4_rte(ulong4);
-long4 __ovld __cnfn convert_long4_sat_rte(ulong4);
-long4 __ovld __cnfn convert_long4_rtz(ulong4);
-long4 __ovld __cnfn convert_long4_sat_rtz(ulong4);
-long4 __ovld __cnfn convert_long4_rtp(ulong4);
-long4 __ovld __cnfn convert_long4_sat_rtp(ulong4);
-long4 __ovld __cnfn convert_long4_rtn(ulong4);
-long4 __ovld __cnfn convert_long4_sat_rtn(ulong4);
-long4 __ovld __cnfn convert_long4(ulong4);
-long4 __ovld __cnfn convert_long4_sat(ulong4);
-long4 __ovld __cnfn convert_long4_rte(float4);
-long4 __ovld __cnfn convert_long4_sat_rte(float4);
-long4 __ovld __cnfn convert_long4_rtz(float4);
-long4 __ovld __cnfn convert_long4_sat_rtz(float4);
-long4 __ovld __cnfn convert_long4_rtp(float4);
-long4 __ovld __cnfn convert_long4_sat_rtp(float4);
-long4 __ovld __cnfn convert_long4_rtn(float4);
-long4 __ovld __cnfn convert_long4_sat_rtn(float4);
-long4 __ovld __cnfn convert_long4(float4);
-long4 __ovld __cnfn convert_long4_sat(float4);
-ulong4 __ovld __cnfn convert_ulong4_rte(char4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(char4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(char4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(char4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(char4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(char4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(char4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(char4);
-ulong4 __ovld __cnfn convert_ulong4(char4);
-ulong4 __ovld __cnfn convert_ulong4_sat(char4);
-ulong4 __ovld __cnfn convert_ulong4_rte(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(uchar4);
-ulong4 __ovld __cnfn convert_ulong4(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_sat(uchar4);
-ulong4 __ovld __cnfn convert_ulong4_rte(short4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(short4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(short4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(short4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(short4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(short4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(short4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(short4);
-ulong4 __ovld __cnfn convert_ulong4(short4);
-ulong4 __ovld __cnfn convert_ulong4_sat(short4);
-ulong4 __ovld __cnfn convert_ulong4_rte(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(ushort4);
-ulong4 __ovld __cnfn convert_ulong4(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_sat(ushort4);
-ulong4 __ovld __cnfn convert_ulong4_rte(int4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(int4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(int4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(int4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(int4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(int4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(int4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(int4);
-ulong4 __ovld __cnfn convert_ulong4(int4);
-ulong4 __ovld __cnfn convert_ulong4_sat(int4);
-ulong4 __ovld __cnfn convert_ulong4_rte(uint4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(uint4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(uint4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(uint4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(uint4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(uint4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(uint4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(uint4);
-ulong4 __ovld __cnfn convert_ulong4(uint4);
-ulong4 __ovld __cnfn convert_ulong4_sat(uint4);
-ulong4 __ovld __cnfn convert_ulong4_rte(long4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(long4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(long4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(long4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(long4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(long4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(long4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(long4);
-ulong4 __ovld __cnfn convert_ulong4(long4);
-ulong4 __ovld __cnfn convert_ulong4_sat(long4);
-ulong4 __ovld __cnfn convert_ulong4_rte(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(ulong4);
-ulong4 __ovld __cnfn convert_ulong4(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_sat(ulong4);
-ulong4 __ovld __cnfn convert_ulong4_rte(float4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(float4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(float4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(float4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(float4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(float4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(float4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(float4);
-ulong4 __ovld __cnfn convert_ulong4(float4);
-ulong4 __ovld __cnfn convert_ulong4_sat(float4);
-float4 __ovld __cnfn convert_float4_rte(char4);
-float4 __ovld __cnfn convert_float4_rtz(char4);
-float4 __ovld __cnfn convert_float4_rtp(char4);
-float4 __ovld __cnfn convert_float4_rtn(char4);
-float4 __ovld __cnfn convert_float4(char4);
-float4 __ovld __cnfn convert_float4_rte(uchar4);
-float4 __ovld __cnfn convert_float4_rtz(uchar4);
-float4 __ovld __cnfn convert_float4_rtp(uchar4);
-float4 __ovld __cnfn convert_float4_rtn(uchar4);
-float4 __ovld __cnfn convert_float4(uchar4);
-float4 __ovld __cnfn convert_float4_rte(short4);
-float4 __ovld __cnfn convert_float4_rtz(short4);
-float4 __ovld __cnfn convert_float4_rtp(short4);
-float4 __ovld __cnfn convert_float4_rtn(short4);
-float4 __ovld __cnfn convert_float4(short4);
-float4 __ovld __cnfn convert_float4_rte(ushort4);
-float4 __ovld __cnfn convert_float4_rtz(ushort4);
-float4 __ovld __cnfn convert_float4_rtp(ushort4);
-float4 __ovld __cnfn convert_float4_rtn(ushort4);
-float4 __ovld __cnfn convert_float4(ushort4);
-float4 __ovld __cnfn convert_float4_rte(int4);
-float4 __ovld __cnfn convert_float4_rtz(int4);
-float4 __ovld __cnfn convert_float4_rtp(int4);
-float4 __ovld __cnfn convert_float4_rtn(int4);
-float4 __ovld __cnfn convert_float4(int4);
-float4 __ovld __cnfn convert_float4_rte(uint4);
-float4 __ovld __cnfn convert_float4_rtz(uint4);
-float4 __ovld __cnfn convert_float4_rtp(uint4);
-float4 __ovld __cnfn convert_float4_rtn(uint4);
-float4 __ovld __cnfn convert_float4(uint4);
-float4 __ovld __cnfn convert_float4_rte(long4);
-float4 __ovld __cnfn convert_float4_rtz(long4);
-float4 __ovld __cnfn convert_float4_rtp(long4);
-float4 __ovld __cnfn convert_float4_rtn(long4);
-float4 __ovld __cnfn convert_float4(long4);
-float4 __ovld __cnfn convert_float4_rte(ulong4);
-float4 __ovld __cnfn convert_float4_rtz(ulong4);
-float4 __ovld __cnfn convert_float4_rtp(ulong4);
-float4 __ovld __cnfn convert_float4_rtn(ulong4);
-float4 __ovld __cnfn convert_float4(ulong4);
-float4 __ovld __cnfn convert_float4_rte(float4);
-float4 __ovld __cnfn convert_float4_rtz(float4);
-float4 __ovld __cnfn convert_float4_rtp(float4);
-float4 __ovld __cnfn convert_float4_rtn(float4);
-float4 __ovld __cnfn convert_float4(float4);
-char8 __ovld __cnfn convert_char8_rte(char8);
-char8 __ovld __cnfn convert_char8_sat_rte(char8);
-char8 __ovld __cnfn convert_char8_rtz(char8);
-char8 __ovld __cnfn convert_char8_sat_rtz(char8);
-char8 __ovld __cnfn convert_char8_rtp(char8);
-char8 __ovld __cnfn convert_char8_sat_rtp(char8);
-char8 __ovld __cnfn convert_char8_rtn(char8);
-char8 __ovld __cnfn convert_char8_sat_rtn(char8);
-char8 __ovld __cnfn convert_char8(char8);
-char8 __ovld __cnfn convert_char8_sat(char8);
-char8 __ovld __cnfn convert_char8_rte(uchar8);
-char8 __ovld __cnfn convert_char8_sat_rte(uchar8);
-char8 __ovld __cnfn convert_char8_rtz(uchar8);
-char8 __ovld __cnfn convert_char8_sat_rtz(uchar8);
-char8 __ovld __cnfn convert_char8_rtp(uchar8);
-char8 __ovld __cnfn convert_char8_sat_rtp(uchar8);
-char8 __ovld __cnfn convert_char8_rtn(uchar8);
-char8 __ovld __cnfn convert_char8_sat_rtn(uchar8);
-char8 __ovld __cnfn convert_char8(uchar8);
-char8 __ovld __cnfn convert_char8_sat(uchar8);
-char8 __ovld __cnfn convert_char8_rte(short8);
-char8 __ovld __cnfn convert_char8_sat_rte(short8);
-char8 __ovld __cnfn convert_char8_rtz(short8);
-char8 __ovld __cnfn convert_char8_sat_rtz(short8);
-char8 __ovld __cnfn convert_char8_rtp(short8);
-char8 __ovld __cnfn convert_char8_sat_rtp(short8);
-char8 __ovld __cnfn convert_char8_rtn(short8);
-char8 __ovld __cnfn convert_char8_sat_rtn(short8);
-char8 __ovld __cnfn convert_char8(short8);
-char8 __ovld __cnfn convert_char8_sat(short8);
-char8 __ovld __cnfn convert_char8_rte(ushort8);
-char8 __ovld __cnfn convert_char8_sat_rte(ushort8);
-char8 __ovld __cnfn convert_char8_rtz(ushort8);
-char8 __ovld __cnfn convert_char8_sat_rtz(ushort8);
-char8 __ovld __cnfn convert_char8_rtp(ushort8);
-char8 __ovld __cnfn convert_char8_sat_rtp(ushort8);
-char8 __ovld __cnfn convert_char8_rtn(ushort8);
-char8 __ovld __cnfn convert_char8_sat_rtn(ushort8);
-char8 __ovld __cnfn convert_char8(ushort8);
-char8 __ovld __cnfn convert_char8_sat(ushort8);
-char8 __ovld __cnfn convert_char8_rte(int8);
-char8 __ovld __cnfn convert_char8_sat_rte(int8);
-char8 __ovld __cnfn convert_char8_rtz(int8);
-char8 __ovld __cnfn convert_char8_sat_rtz(int8);
-char8 __ovld __cnfn convert_char8_rtp(int8);
-char8 __ovld __cnfn convert_char8_sat_rtp(int8);
-char8 __ovld __cnfn convert_char8_rtn(int8);
-char8 __ovld __cnfn convert_char8_sat_rtn(int8);
-char8 __ovld __cnfn convert_char8(int8);
-char8 __ovld __cnfn convert_char8_sat(int8);
-char8 __ovld __cnfn convert_char8_rte(uint8);
-char8 __ovld __cnfn convert_char8_sat_rte(uint8);
-char8 __ovld __cnfn convert_char8_rtz(uint8);
-char8 __ovld __cnfn convert_char8_sat_rtz(uint8);
-char8 __ovld __cnfn convert_char8_rtp(uint8);
-char8 __ovld __cnfn convert_char8_sat_rtp(uint8);
-char8 __ovld __cnfn convert_char8_rtn(uint8);
-char8 __ovld __cnfn convert_char8_sat_rtn(uint8);
-char8 __ovld __cnfn convert_char8(uint8);
-char8 __ovld __cnfn convert_char8_sat(uint8);
-char8 __ovld __cnfn convert_char8_rte(long8);
-char8 __ovld __cnfn convert_char8_sat_rte(long8);
-char8 __ovld __cnfn convert_char8_rtz(long8);
-char8 __ovld __cnfn convert_char8_sat_rtz(long8);
-char8 __ovld __cnfn convert_char8_rtp(long8);
-char8 __ovld __cnfn convert_char8_sat_rtp(long8);
-char8 __ovld __cnfn convert_char8_rtn(long8);
-char8 __ovld __cnfn convert_char8_sat_rtn(long8);
-char8 __ovld __cnfn convert_char8(long8);
-char8 __ovld __cnfn convert_char8_sat(long8);
-char8 __ovld __cnfn convert_char8_rte(ulong8);
-char8 __ovld __cnfn convert_char8_sat_rte(ulong8);
-char8 __ovld __cnfn convert_char8_rtz(ulong8);
-char8 __ovld __cnfn convert_char8_sat_rtz(ulong8);
-char8 __ovld __cnfn convert_char8_rtp(ulong8);
-char8 __ovld __cnfn convert_char8_sat_rtp(ulong8);
-char8 __ovld __cnfn convert_char8_rtn(ulong8);
-char8 __ovld __cnfn convert_char8_sat_rtn(ulong8);
-char8 __ovld __cnfn convert_char8(ulong8);
-char8 __ovld __cnfn convert_char8_sat(ulong8);
-char8 __ovld __cnfn convert_char8_rte(float8);
-char8 __ovld __cnfn convert_char8_sat_rte(float8);
-char8 __ovld __cnfn convert_char8_rtz(float8);
-char8 __ovld __cnfn convert_char8_sat_rtz(float8);
-char8 __ovld __cnfn convert_char8_rtp(float8);
-char8 __ovld __cnfn convert_char8_sat_rtp(float8);
-char8 __ovld __cnfn convert_char8_rtn(float8);
-char8 __ovld __cnfn convert_char8_sat_rtn(float8);
-char8 __ovld __cnfn convert_char8(float8);
-char8 __ovld __cnfn convert_char8_sat(float8);
-uchar8 __ovld __cnfn convert_uchar8_rte(char8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(char8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(char8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(char8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(char8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(char8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(char8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(char8);
-uchar8 __ovld __cnfn convert_uchar8(char8);
-uchar8 __ovld __cnfn convert_uchar8_sat(char8);
-uchar8 __ovld __cnfn convert_uchar8_rte(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(uchar8);
-uchar8 __ovld __cnfn convert_uchar8(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_sat(uchar8);
-uchar8 __ovld __cnfn convert_uchar8_rte(short8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(short8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(short8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(short8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(short8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(short8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(short8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(short8);
-uchar8 __ovld __cnfn convert_uchar8(short8);
-uchar8 __ovld __cnfn convert_uchar8_sat(short8);
-uchar8 __ovld __cnfn convert_uchar8_rte(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(ushort8);
-uchar8 __ovld __cnfn convert_uchar8(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_sat(ushort8);
-uchar8 __ovld __cnfn convert_uchar8_rte(int8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(int8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(int8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(int8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(int8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(int8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(int8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(int8);
-uchar8 __ovld __cnfn convert_uchar8(int8);
-uchar8 __ovld __cnfn convert_uchar8_sat(int8);
-uchar8 __ovld __cnfn convert_uchar8_rte(uint8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(uint8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(uint8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(uint8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(uint8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(uint8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(uint8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(uint8);
-uchar8 __ovld __cnfn convert_uchar8(uint8);
-uchar8 __ovld __cnfn convert_uchar8_sat(uint8);
-uchar8 __ovld __cnfn convert_uchar8_rte(long8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(long8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(long8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(long8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(long8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(long8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(long8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(long8);
-uchar8 __ovld __cnfn convert_uchar8(long8);
-uchar8 __ovld __cnfn convert_uchar8_sat(long8);
-uchar8 __ovld __cnfn convert_uchar8_rte(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(ulong8);
-uchar8 __ovld __cnfn convert_uchar8(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_sat(ulong8);
-uchar8 __ovld __cnfn convert_uchar8_rte(float8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(float8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(float8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(float8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(float8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(float8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(float8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(float8);
-uchar8 __ovld __cnfn convert_uchar8(float8);
-uchar8 __ovld __cnfn convert_uchar8_sat(float8);
-short8 __ovld __cnfn convert_short8_rte(char8);
-short8 __ovld __cnfn convert_short8_sat_rte(char8);
-short8 __ovld __cnfn convert_short8_rtz(char8);
-short8 __ovld __cnfn convert_short8_sat_rtz(char8);
-short8 __ovld __cnfn convert_short8_rtp(char8);
-short8 __ovld __cnfn convert_short8_sat_rtp(char8);
-short8 __ovld __cnfn convert_short8_rtn(char8);
-short8 __ovld __cnfn convert_short8_sat_rtn(char8);
-short8 __ovld __cnfn convert_short8(char8);
-short8 __ovld __cnfn convert_short8_sat(char8);
-short8 __ovld __cnfn convert_short8_rte(uchar8);
-short8 __ovld __cnfn convert_short8_sat_rte(uchar8);
-short8 __ovld __cnfn convert_short8_rtz(uchar8);
-short8 __ovld __cnfn convert_short8_sat_rtz(uchar8);
-short8 __ovld __cnfn convert_short8_rtp(uchar8);
-short8 __ovld __cnfn convert_short8_sat_rtp(uchar8);
-short8 __ovld __cnfn convert_short8_rtn(uchar8);
-short8 __ovld __cnfn convert_short8_sat_rtn(uchar8);
-short8 __ovld __cnfn convert_short8(uchar8);
-short8 __ovld __cnfn convert_short8_sat(uchar8);
-short8 __ovld __cnfn convert_short8_rte(short8);
-short8 __ovld __cnfn convert_short8_sat_rte(short8);
-short8 __ovld __cnfn convert_short8_rtz(short8);
-short8 __ovld __cnfn convert_short8_sat_rtz(short8);
-short8 __ovld __cnfn convert_short8_rtp(short8);
-short8 __ovld __cnfn convert_short8_sat_rtp(short8);
-short8 __ovld __cnfn convert_short8_rtn(short8);
-short8 __ovld __cnfn convert_short8_sat_rtn(short8);
-short8 __ovld __cnfn convert_short8(short8);
-short8 __ovld __cnfn convert_short8_sat(short8);
-short8 __ovld __cnfn convert_short8_rte(ushort8);
-short8 __ovld __cnfn convert_short8_sat_rte(ushort8);
-short8 __ovld __cnfn convert_short8_rtz(ushort8);
-short8 __ovld __cnfn convert_short8_sat_rtz(ushort8);
-short8 __ovld __cnfn convert_short8_rtp(ushort8);
-short8 __ovld __cnfn convert_short8_sat_rtp(ushort8);
-short8 __ovld __cnfn convert_short8_rtn(ushort8);
-short8 __ovld __cnfn convert_short8_sat_rtn(ushort8);
-short8 __ovld __cnfn convert_short8(ushort8);
-short8 __ovld __cnfn convert_short8_sat(ushort8);
-short8 __ovld __cnfn convert_short8_rte(int8);
-short8 __ovld __cnfn convert_short8_sat_rte(int8);
-short8 __ovld __cnfn convert_short8_rtz(int8);
-short8 __ovld __cnfn convert_short8_sat_rtz(int8);
-short8 __ovld __cnfn convert_short8_rtp(int8);
-short8 __ovld __cnfn convert_short8_sat_rtp(int8);
-short8 __ovld __cnfn convert_short8_rtn(int8);
-short8 __ovld __cnfn convert_short8_sat_rtn(int8);
-short8 __ovld __cnfn convert_short8(int8);
-short8 __ovld __cnfn convert_short8_sat(int8);
-short8 __ovld __cnfn convert_short8_rte(uint8);
-short8 __ovld __cnfn convert_short8_sat_rte(uint8);
-short8 __ovld __cnfn convert_short8_rtz(uint8);
-short8 __ovld __cnfn convert_short8_sat_rtz(uint8);
-short8 __ovld __cnfn convert_short8_rtp(uint8);
-short8 __ovld __cnfn convert_short8_sat_rtp(uint8);
-short8 __ovld __cnfn convert_short8_rtn(uint8);
-short8 __ovld __cnfn convert_short8_sat_rtn(uint8);
-short8 __ovld __cnfn convert_short8(uint8);
-short8 __ovld __cnfn convert_short8_sat(uint8);
-short8 __ovld __cnfn convert_short8_rte(long8);
-short8 __ovld __cnfn convert_short8_sat_rte(long8);
-short8 __ovld __cnfn convert_short8_rtz(long8);
-short8 __ovld __cnfn convert_short8_sat_rtz(long8);
-short8 __ovld __cnfn convert_short8_rtp(long8);
-short8 __ovld __cnfn convert_short8_sat_rtp(long8);
-short8 __ovld __cnfn convert_short8_rtn(long8);
-short8 __ovld __cnfn convert_short8_sat_rtn(long8);
-short8 __ovld __cnfn convert_short8(long8);
-short8 __ovld __cnfn convert_short8_sat(long8);
-short8 __ovld __cnfn convert_short8_rte(ulong8);
-short8 __ovld __cnfn convert_short8_sat_rte(ulong8);
-short8 __ovld __cnfn convert_short8_rtz(ulong8);
-short8 __ovld __cnfn convert_short8_sat_rtz(ulong8);
-short8 __ovld __cnfn convert_short8_rtp(ulong8);
-short8 __ovld __cnfn convert_short8_sat_rtp(ulong8);
-short8 __ovld __cnfn convert_short8_rtn(ulong8);
-short8 __ovld __cnfn convert_short8_sat_rtn(ulong8);
-short8 __ovld __cnfn convert_short8(ulong8);
-short8 __ovld __cnfn convert_short8_sat(ulong8);
-short8 __ovld __cnfn convert_short8_rte(float8);
-short8 __ovld __cnfn convert_short8_sat_rte(float8);
-short8 __ovld __cnfn convert_short8_rtz(float8);
-short8 __ovld __cnfn convert_short8_sat_rtz(float8);
-short8 __ovld __cnfn convert_short8_rtp(float8);
-short8 __ovld __cnfn convert_short8_sat_rtp(float8);
-short8 __ovld __cnfn convert_short8_rtn(float8);
-short8 __ovld __cnfn convert_short8_sat_rtn(float8);
-short8 __ovld __cnfn convert_short8(float8);
-short8 __ovld __cnfn convert_short8_sat(float8);
-ushort8 __ovld __cnfn convert_ushort8_rte(char8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(char8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(char8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(char8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(char8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(char8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(char8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(char8);
-ushort8 __ovld __cnfn convert_ushort8(char8);
-ushort8 __ovld __cnfn convert_ushort8_sat(char8);
-ushort8 __ovld __cnfn convert_ushort8_rte(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(uchar8);
-ushort8 __ovld __cnfn convert_ushort8(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_sat(uchar8);
-ushort8 __ovld __cnfn convert_ushort8_rte(short8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(short8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(short8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(short8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(short8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(short8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(short8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(short8);
-ushort8 __ovld __cnfn convert_ushort8(short8);
-ushort8 __ovld __cnfn convert_ushort8_sat(short8);
-ushort8 __ovld __cnfn convert_ushort8_rte(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(ushort8);
-ushort8 __ovld __cnfn convert_ushort8(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_sat(ushort8);
-ushort8 __ovld __cnfn convert_ushort8_rte(int8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(int8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(int8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(int8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(int8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(int8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(int8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(int8);
-ushort8 __ovld __cnfn convert_ushort8(int8);
-ushort8 __ovld __cnfn convert_ushort8_sat(int8);
-ushort8 __ovld __cnfn convert_ushort8_rte(uint8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(uint8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(uint8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(uint8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(uint8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(uint8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(uint8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(uint8);
-ushort8 __ovld __cnfn convert_ushort8(uint8);
-ushort8 __ovld __cnfn convert_ushort8_sat(uint8);
-ushort8 __ovld __cnfn convert_ushort8_rte(long8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(long8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(long8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(long8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(long8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(long8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(long8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(long8);
-ushort8 __ovld __cnfn convert_ushort8(long8);
-ushort8 __ovld __cnfn convert_ushort8_sat(long8);
-ushort8 __ovld __cnfn convert_ushort8_rte(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(ulong8);
-ushort8 __ovld __cnfn convert_ushort8(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_sat(ulong8);
-ushort8 __ovld __cnfn convert_ushort8_rte(float8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(float8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(float8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(float8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(float8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(float8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(float8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(float8);
-ushort8 __ovld __cnfn convert_ushort8(float8);
-ushort8 __ovld __cnfn convert_ushort8_sat(float8);
-int8 __ovld __cnfn convert_int8_rte(char8);
-int8 __ovld __cnfn convert_int8_sat_rte(char8);
-int8 __ovld __cnfn convert_int8_rtz(char8);
-int8 __ovld __cnfn convert_int8_sat_rtz(char8);
-int8 __ovld __cnfn convert_int8_rtp(char8);
-int8 __ovld __cnfn convert_int8_sat_rtp(char8);
-int8 __ovld __cnfn convert_int8_rtn(char8);
-int8 __ovld __cnfn convert_int8_sat_rtn(char8);
-int8 __ovld __cnfn convert_int8(char8);
-int8 __ovld __cnfn convert_int8_sat(char8);
-int8 __ovld __cnfn convert_int8_rte(uchar8);
-int8 __ovld __cnfn convert_int8_sat_rte(uchar8);
-int8 __ovld __cnfn convert_int8_rtz(uchar8);
-int8 __ovld __cnfn convert_int8_sat_rtz(uchar8);
-int8 __ovld __cnfn convert_int8_rtp(uchar8);
-int8 __ovld __cnfn convert_int8_sat_rtp(uchar8);
-int8 __ovld __cnfn convert_int8_rtn(uchar8);
-int8 __ovld __cnfn convert_int8_sat_rtn(uchar8);
-int8 __ovld __cnfn convert_int8(uchar8);
-int8 __ovld __cnfn convert_int8_sat(uchar8);
-int8 __ovld __cnfn convert_int8_rte(short8);
-int8 __ovld __cnfn convert_int8_sat_rte(short8);
-int8 __ovld __cnfn convert_int8_rtz(short8);
-int8 __ovld __cnfn convert_int8_sat_rtz(short8);
-int8 __ovld __cnfn convert_int8_rtp(short8);
-int8 __ovld __cnfn convert_int8_sat_rtp(short8);
-int8 __ovld __cnfn convert_int8_rtn(short8);
-int8 __ovld __cnfn convert_int8_sat_rtn(short8);
-int8 __ovld __cnfn convert_int8(short8);
-int8 __ovld __cnfn convert_int8_sat(short8);
-int8 __ovld __cnfn convert_int8_rte(ushort8);
-int8 __ovld __cnfn convert_int8_sat_rte(ushort8);
-int8 __ovld __cnfn convert_int8_rtz(ushort8);
-int8 __ovld __cnfn convert_int8_sat_rtz(ushort8);
-int8 __ovld __cnfn convert_int8_rtp(ushort8);
-int8 __ovld __cnfn convert_int8_sat_rtp(ushort8);
-int8 __ovld __cnfn convert_int8_rtn(ushort8);
-int8 __ovld __cnfn convert_int8_sat_rtn(ushort8);
-int8 __ovld __cnfn convert_int8(ushort8);
-int8 __ovld __cnfn convert_int8_sat(ushort8);
-int8 __ovld __cnfn convert_int8_rte(int8);
-int8 __ovld __cnfn convert_int8_sat_rte(int8);
-int8 __ovld __cnfn convert_int8_rtz(int8);
-int8 __ovld __cnfn convert_int8_sat_rtz(int8);
-int8 __ovld __cnfn convert_int8_rtp(int8);
-int8 __ovld __cnfn convert_int8_sat_rtp(int8);
-int8 __ovld __cnfn convert_int8_rtn(int8);
-int8 __ovld __cnfn convert_int8_sat_rtn(int8);
-int8 __ovld __cnfn convert_int8(int8);
-int8 __ovld __cnfn convert_int8_sat(int8);
-int8 __ovld __cnfn convert_int8_rte(uint8);
-int8 __ovld __cnfn convert_int8_sat_rte(uint8);
-int8 __ovld __cnfn convert_int8_rtz(uint8);
-int8 __ovld __cnfn convert_int8_sat_rtz(uint8);
-int8 __ovld __cnfn convert_int8_rtp(uint8);
-int8 __ovld __cnfn convert_int8_sat_rtp(uint8);
-int8 __ovld __cnfn convert_int8_rtn(uint8);
-int8 __ovld __cnfn convert_int8_sat_rtn(uint8);
-int8 __ovld __cnfn convert_int8(uint8);
-int8 __ovld __cnfn convert_int8_sat(uint8);
-int8 __ovld __cnfn convert_int8_rte(long8);
-int8 __ovld __cnfn convert_int8_sat_rte(long8);
-int8 __ovld __cnfn convert_int8_rtz(long8);
-int8 __ovld __cnfn convert_int8_sat_rtz(long8);
-int8 __ovld __cnfn convert_int8_rtp(long8);
-int8 __ovld __cnfn convert_int8_sat_rtp(long8);
-int8 __ovld __cnfn convert_int8_rtn(long8);
-int8 __ovld __cnfn convert_int8_sat_rtn(long8);
-int8 __ovld __cnfn convert_int8(long8);
-int8 __ovld __cnfn convert_int8_sat(long8);
-int8 __ovld __cnfn convert_int8_rte(ulong8);
-int8 __ovld __cnfn convert_int8_sat_rte(ulong8);
-int8 __ovld __cnfn convert_int8_rtz(ulong8);
-int8 __ovld __cnfn convert_int8_sat_rtz(ulong8);
-int8 __ovld __cnfn convert_int8_rtp(ulong8);
-int8 __ovld __cnfn convert_int8_sat_rtp(ulong8);
-int8 __ovld __cnfn convert_int8_rtn(ulong8);
-int8 __ovld __cnfn convert_int8_sat_rtn(ulong8);
-int8 __ovld __cnfn convert_int8(ulong8);
-int8 __ovld __cnfn convert_int8_sat(ulong8);
-int8 __ovld __cnfn convert_int8_rte(float8);
-int8 __ovld __cnfn convert_int8_sat_rte(float8);
-int8 __ovld __cnfn convert_int8_rtz(float8);
-int8 __ovld __cnfn convert_int8_sat_rtz(float8);
-int8 __ovld __cnfn convert_int8_rtp(float8);
-int8 __ovld __cnfn convert_int8_sat_rtp(float8);
-int8 __ovld __cnfn convert_int8_rtn(float8);
-int8 __ovld __cnfn convert_int8_sat_rtn(float8);
-int8 __ovld __cnfn convert_int8(float8);
-int8 __ovld __cnfn convert_int8_sat(float8);
-uint8 __ovld __cnfn convert_uint8_rte(char8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(char8);
-uint8 __ovld __cnfn convert_uint8_rtz(char8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(char8);
-uint8 __ovld __cnfn convert_uint8_rtp(char8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(char8);
-uint8 __ovld __cnfn convert_uint8_rtn(char8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(char8);
-uint8 __ovld __cnfn convert_uint8(char8);
-uint8 __ovld __cnfn convert_uint8_sat(char8);
-uint8 __ovld __cnfn convert_uint8_rte(uchar8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(uchar8);
-uint8 __ovld __cnfn convert_uint8_rtz(uchar8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(uchar8);
-uint8 __ovld __cnfn convert_uint8_rtp(uchar8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(uchar8);
-uint8 __ovld __cnfn convert_uint8_rtn(uchar8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(uchar8);
-uint8 __ovld __cnfn convert_uint8(uchar8);
-uint8 __ovld __cnfn convert_uint8_sat(uchar8);
-uint8 __ovld __cnfn convert_uint8_rte(short8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(short8);
-uint8 __ovld __cnfn convert_uint8_rtz(short8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(short8);
-uint8 __ovld __cnfn convert_uint8_rtp(short8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(short8);
-uint8 __ovld __cnfn convert_uint8_rtn(short8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(short8);
-uint8 __ovld __cnfn convert_uint8(short8);
-uint8 __ovld __cnfn convert_uint8_sat(short8);
-uint8 __ovld __cnfn convert_uint8_rte(ushort8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(ushort8);
-uint8 __ovld __cnfn convert_uint8_rtz(ushort8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(ushort8);
-uint8 __ovld __cnfn convert_uint8_rtp(ushort8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(ushort8);
-uint8 __ovld __cnfn convert_uint8_rtn(ushort8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(ushort8);
-uint8 __ovld __cnfn convert_uint8(ushort8);
-uint8 __ovld __cnfn convert_uint8_sat(ushort8);
-uint8 __ovld __cnfn convert_uint8_rte(int8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(int8);
-uint8 __ovld __cnfn convert_uint8_rtz(int8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(int8);
-uint8 __ovld __cnfn convert_uint8_rtp(int8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(int8);
-uint8 __ovld __cnfn convert_uint8_rtn(int8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(int8);
-uint8 __ovld __cnfn convert_uint8(int8);
-uint8 __ovld __cnfn convert_uint8_sat(int8);
-uint8 __ovld __cnfn convert_uint8_rte(uint8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(uint8);
-uint8 __ovld __cnfn convert_uint8_rtz(uint8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(uint8);
-uint8 __ovld __cnfn convert_uint8_rtp(uint8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(uint8);
-uint8 __ovld __cnfn convert_uint8_rtn(uint8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(uint8);
-uint8 __ovld __cnfn convert_uint8(uint8);
-uint8 __ovld __cnfn convert_uint8_sat(uint8);
-uint8 __ovld __cnfn convert_uint8_rte(long8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(long8);
-uint8 __ovld __cnfn convert_uint8_rtz(long8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(long8);
-uint8 __ovld __cnfn convert_uint8_rtp(long8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(long8);
-uint8 __ovld __cnfn convert_uint8_rtn(long8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(long8);
-uint8 __ovld __cnfn convert_uint8(long8);
-uint8 __ovld __cnfn convert_uint8_sat(long8);
-uint8 __ovld __cnfn convert_uint8_rte(ulong8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(ulong8);
-uint8 __ovld __cnfn convert_uint8_rtz(ulong8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(ulong8);
-uint8 __ovld __cnfn convert_uint8_rtp(ulong8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(ulong8);
-uint8 __ovld __cnfn convert_uint8_rtn(ulong8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(ulong8);
-uint8 __ovld __cnfn convert_uint8(ulong8);
-uint8 __ovld __cnfn convert_uint8_sat(ulong8);
-uint8 __ovld __cnfn convert_uint8_rte(float8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(float8);
-uint8 __ovld __cnfn convert_uint8_rtz(float8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(float8);
-uint8 __ovld __cnfn convert_uint8_rtp(float8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(float8);
-uint8 __ovld __cnfn convert_uint8_rtn(float8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(float8);
-uint8 __ovld __cnfn convert_uint8(float8);
-uint8 __ovld __cnfn convert_uint8_sat(float8);
-long8 __ovld __cnfn convert_long8_rte(char8);
-long8 __ovld __cnfn convert_long8_sat_rte(char8);
-long8 __ovld __cnfn convert_long8_rtz(char8);
-long8 __ovld __cnfn convert_long8_sat_rtz(char8);
-long8 __ovld __cnfn convert_long8_rtp(char8);
-long8 __ovld __cnfn convert_long8_sat_rtp(char8);
-long8 __ovld __cnfn convert_long8_rtn(char8);
-long8 __ovld __cnfn convert_long8_sat_rtn(char8);
-long8 __ovld __cnfn convert_long8(char8);
-long8 __ovld __cnfn convert_long8_sat(char8);
-long8 __ovld __cnfn convert_long8_rte(uchar8);
-long8 __ovld __cnfn convert_long8_sat_rte(uchar8);
-long8 __ovld __cnfn convert_long8_rtz(uchar8);
-long8 __ovld __cnfn convert_long8_sat_rtz(uchar8);
-long8 __ovld __cnfn convert_long8_rtp(uchar8);
-long8 __ovld __cnfn convert_long8_sat_rtp(uchar8);
-long8 __ovld __cnfn convert_long8_rtn(uchar8);
-long8 __ovld __cnfn convert_long8_sat_rtn(uchar8);
-long8 __ovld __cnfn convert_long8(uchar8);
-long8 __ovld __cnfn convert_long8_sat(uchar8);
-long8 __ovld __cnfn convert_long8_rte(short8);
-long8 __ovld __cnfn convert_long8_sat_rte(short8);
-long8 __ovld __cnfn convert_long8_rtz(short8);
-long8 __ovld __cnfn convert_long8_sat_rtz(short8);
-long8 __ovld __cnfn convert_long8_rtp(short8);
-long8 __ovld __cnfn convert_long8_sat_rtp(short8);
-long8 __ovld __cnfn convert_long8_rtn(short8);
-long8 __ovld __cnfn convert_long8_sat_rtn(short8);
-long8 __ovld __cnfn convert_long8(short8);
-long8 __ovld __cnfn convert_long8_sat(short8);
-long8 __ovld __cnfn convert_long8_rte(ushort8);
-long8 __ovld __cnfn convert_long8_sat_rte(ushort8);
-long8 __ovld __cnfn convert_long8_rtz(ushort8);
-long8 __ovld __cnfn convert_long8_sat_rtz(ushort8);
-long8 __ovld __cnfn convert_long8_rtp(ushort8);
-long8 __ovld __cnfn convert_long8_sat_rtp(ushort8);
-long8 __ovld __cnfn convert_long8_rtn(ushort8);
-long8 __ovld __cnfn convert_long8_sat_rtn(ushort8);
-long8 __ovld __cnfn convert_long8(ushort8);
-long8 __ovld __cnfn convert_long8_sat(ushort8);
-long8 __ovld __cnfn convert_long8_rte(int8);
-long8 __ovld __cnfn convert_long8_sat_rte(int8);
-long8 __ovld __cnfn convert_long8_rtz(int8);
-long8 __ovld __cnfn convert_long8_sat_rtz(int8);
-long8 __ovld __cnfn convert_long8_rtp(int8);
-long8 __ovld __cnfn convert_long8_sat_rtp(int8);
-long8 __ovld __cnfn convert_long8_rtn(int8);
-long8 __ovld __cnfn convert_long8_sat_rtn(int8);
-long8 __ovld __cnfn convert_long8(int8);
-long8 __ovld __cnfn convert_long8_sat(int8);
-long8 __ovld __cnfn convert_long8_rte(uint8);
-long8 __ovld __cnfn convert_long8_sat_rte(uint8);
-long8 __ovld __cnfn convert_long8_rtz(uint8);
-long8 __ovld __cnfn convert_long8_sat_rtz(uint8);
-long8 __ovld __cnfn convert_long8_rtp(uint8);
-long8 __ovld __cnfn convert_long8_sat_rtp(uint8);
-long8 __ovld __cnfn convert_long8_rtn(uint8);
-long8 __ovld __cnfn convert_long8_sat_rtn(uint8);
-long8 __ovld __cnfn convert_long8(uint8);
-long8 __ovld __cnfn convert_long8_sat(uint8);
-long8 __ovld __cnfn convert_long8_rte(long8);
-long8 __ovld __cnfn convert_long8_sat_rte(long8);
-long8 __ovld __cnfn convert_long8_rtz(long8);
-long8 __ovld __cnfn convert_long8_sat_rtz(long8);
-long8 __ovld __cnfn convert_long8_rtp(long8);
-long8 __ovld __cnfn convert_long8_sat_rtp(long8);
-long8 __ovld __cnfn convert_long8_rtn(long8);
-long8 __ovld __cnfn convert_long8_sat_rtn(long8);
-long8 __ovld __cnfn convert_long8(long8);
-long8 __ovld __cnfn convert_long8_sat(long8);
-long8 __ovld __cnfn convert_long8_rte(ulong8);
-long8 __ovld __cnfn convert_long8_sat_rte(ulong8);
-long8 __ovld __cnfn convert_long8_rtz(ulong8);
-long8 __ovld __cnfn convert_long8_sat_rtz(ulong8);
-long8 __ovld __cnfn convert_long8_rtp(ulong8);
-long8 __ovld __cnfn convert_long8_sat_rtp(ulong8);
-long8 __ovld __cnfn convert_long8_rtn(ulong8);
-long8 __ovld __cnfn convert_long8_sat_rtn(ulong8);
-long8 __ovld __cnfn convert_long8(ulong8);
-long8 __ovld __cnfn convert_long8_sat(ulong8);
-long8 __ovld __cnfn convert_long8_rte(float8);
-long8 __ovld __cnfn convert_long8_sat_rte(float8);
-long8 __ovld __cnfn convert_long8_rtz(float8);
-long8 __ovld __cnfn convert_long8_sat_rtz(float8);
-long8 __ovld __cnfn convert_long8_rtp(float8);
-long8 __ovld __cnfn convert_long8_sat_rtp(float8);
-long8 __ovld __cnfn convert_long8_rtn(float8);
-long8 __ovld __cnfn convert_long8_sat_rtn(float8);
-long8 __ovld __cnfn convert_long8(float8);
-long8 __ovld __cnfn convert_long8_sat(float8);
-ulong8 __ovld __cnfn convert_ulong8_rte(char8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(char8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(char8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(char8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(char8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(char8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(char8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(char8);
-ulong8 __ovld __cnfn convert_ulong8(char8);
-ulong8 __ovld __cnfn convert_ulong8_sat(char8);
-ulong8 __ovld __cnfn convert_ulong8_rte(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(uchar8);
-ulong8 __ovld __cnfn convert_ulong8(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_sat(uchar8);
-ulong8 __ovld __cnfn convert_ulong8_rte(short8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(short8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(short8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(short8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(short8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(short8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(short8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(short8);
-ulong8 __ovld __cnfn convert_ulong8(short8);
-ulong8 __ovld __cnfn convert_ulong8_sat(short8);
-ulong8 __ovld __cnfn convert_ulong8_rte(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(ushort8);
-ulong8 __ovld __cnfn convert_ulong8(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_sat(ushort8);
-ulong8 __ovld __cnfn convert_ulong8_rte(int8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(int8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(int8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(int8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(int8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(int8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(int8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(int8);
-ulong8 __ovld __cnfn convert_ulong8(int8);
-ulong8 __ovld __cnfn convert_ulong8_sat(int8);
-ulong8 __ovld __cnfn convert_ulong8_rte(uint8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(uint8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(uint8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(uint8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(uint8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(uint8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(uint8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(uint8);
-ulong8 __ovld __cnfn convert_ulong8(uint8);
-ulong8 __ovld __cnfn convert_ulong8_sat(uint8);
-ulong8 __ovld __cnfn convert_ulong8_rte(long8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(long8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(long8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(long8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(long8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(long8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(long8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(long8);
-ulong8 __ovld __cnfn convert_ulong8(long8);
-ulong8 __ovld __cnfn convert_ulong8_sat(long8);
-ulong8 __ovld __cnfn convert_ulong8_rte(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(ulong8);
-ulong8 __ovld __cnfn convert_ulong8(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_sat(ulong8);
-ulong8 __ovld __cnfn convert_ulong8_rte(float8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(float8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(float8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(float8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(float8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(float8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(float8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(float8);
-ulong8 __ovld __cnfn convert_ulong8(float8);
-ulong8 __ovld __cnfn convert_ulong8_sat(float8);
-float8 __ovld __cnfn convert_float8_rte(char8);
-float8 __ovld __cnfn convert_float8_rtz(char8);
-float8 __ovld __cnfn convert_float8_rtp(char8);
-float8 __ovld __cnfn convert_float8_rtn(char8);
-float8 __ovld __cnfn convert_float8(char8);
-float8 __ovld __cnfn convert_float8_rte(uchar8);
-float8 __ovld __cnfn convert_float8_rtz(uchar8);
-float8 __ovld __cnfn convert_float8_rtp(uchar8);
-float8 __ovld __cnfn convert_float8_rtn(uchar8);
-float8 __ovld __cnfn convert_float8(uchar8);
-float8 __ovld __cnfn convert_float8_rte(short8);
-float8 __ovld __cnfn convert_float8_rtz(short8);
-float8 __ovld __cnfn convert_float8_rtp(short8);
-float8 __ovld __cnfn convert_float8_rtn(short8);
-float8 __ovld __cnfn convert_float8(short8);
-float8 __ovld __cnfn convert_float8_rte(ushort8);
-float8 __ovld __cnfn convert_float8_rtz(ushort8);
-float8 __ovld __cnfn convert_float8_rtp(ushort8);
-float8 __ovld __cnfn convert_float8_rtn(ushort8);
-float8 __ovld __cnfn convert_float8(ushort8);
-float8 __ovld __cnfn convert_float8_rte(int8);
-float8 __ovld __cnfn convert_float8_rtz(int8);
-float8 __ovld __cnfn convert_float8_rtp(int8);
-float8 __ovld __cnfn convert_float8_rtn(int8);
-float8 __ovld __cnfn convert_float8(int8);
-float8 __ovld __cnfn convert_float8_rte(uint8);
-float8 __ovld __cnfn convert_float8_rtz(uint8);
-float8 __ovld __cnfn convert_float8_rtp(uint8);
-float8 __ovld __cnfn convert_float8_rtn(uint8);
-float8 __ovld __cnfn convert_float8(uint8);
-float8 __ovld __cnfn convert_float8_rte(long8);
-float8 __ovld __cnfn convert_float8_rtz(long8);
-float8 __ovld __cnfn convert_float8_rtp(long8);
-float8 __ovld __cnfn convert_float8_rtn(long8);
-float8 __ovld __cnfn convert_float8(long8);
-float8 __ovld __cnfn convert_float8_rte(ulong8);
-float8 __ovld __cnfn convert_float8_rtz(ulong8);
-float8 __ovld __cnfn convert_float8_rtp(ulong8);
-float8 __ovld __cnfn convert_float8_rtn(ulong8);
-float8 __ovld __cnfn convert_float8(ulong8);
-float8 __ovld __cnfn convert_float8_rte(float8);
-float8 __ovld __cnfn convert_float8_rtz(float8);
-float8 __ovld __cnfn convert_float8_rtp(float8);
-float8 __ovld __cnfn convert_float8_rtn(float8);
-float8 __ovld __cnfn convert_float8(float8);
-char16 __ovld __cnfn convert_char16_rte(char16);
-char16 __ovld __cnfn convert_char16_sat_rte(char16);
-char16 __ovld __cnfn convert_char16_rtz(char16);
-char16 __ovld __cnfn convert_char16_sat_rtz(char16);
-char16 __ovld __cnfn convert_char16_rtp(char16);
-char16 __ovld __cnfn convert_char16_sat_rtp(char16);
-char16 __ovld __cnfn convert_char16_rtn(char16);
-char16 __ovld __cnfn convert_char16_sat_rtn(char16);
-char16 __ovld __cnfn convert_char16(char16);
-char16 __ovld __cnfn convert_char16_sat(char16);
-char16 __ovld __cnfn convert_char16_rte(uchar16);
-char16 __ovld __cnfn convert_char16_sat_rte(uchar16);
-char16 __ovld __cnfn convert_char16_rtz(uchar16);
-char16 __ovld __cnfn convert_char16_sat_rtz(uchar16);
-char16 __ovld __cnfn convert_char16_rtp(uchar16);
-char16 __ovld __cnfn convert_char16_sat_rtp(uchar16);
-char16 __ovld __cnfn convert_char16_rtn(uchar16);
-char16 __ovld __cnfn convert_char16_sat_rtn(uchar16);
-char16 __ovld __cnfn convert_char16(uchar16);
-char16 __ovld __cnfn convert_char16_sat(uchar16);
-char16 __ovld __cnfn convert_char16_rte(short16);
-char16 __ovld __cnfn convert_char16_sat_rte(short16);
-char16 __ovld __cnfn convert_char16_rtz(short16);
-char16 __ovld __cnfn convert_char16_sat_rtz(short16);
-char16 __ovld __cnfn convert_char16_rtp(short16);
-char16 __ovld __cnfn convert_char16_sat_rtp(short16);
-char16 __ovld __cnfn convert_char16_rtn(short16);
-char16 __ovld __cnfn convert_char16_sat_rtn(short16);
-char16 __ovld __cnfn convert_char16(short16);
-char16 __ovld __cnfn convert_char16_sat(short16);
-char16 __ovld __cnfn convert_char16_rte(ushort16);
-char16 __ovld __cnfn convert_char16_sat_rte(ushort16);
-char16 __ovld __cnfn convert_char16_rtz(ushort16);
-char16 __ovld __cnfn convert_char16_sat_rtz(ushort16);
-char16 __ovld __cnfn convert_char16_rtp(ushort16);
-char16 __ovld __cnfn convert_char16_sat_rtp(ushort16);
-char16 __ovld __cnfn convert_char16_rtn(ushort16);
-char16 __ovld __cnfn convert_char16_sat_rtn(ushort16);
-char16 __ovld __cnfn convert_char16(ushort16);
-char16 __ovld __cnfn convert_char16_sat(ushort16);
-char16 __ovld __cnfn convert_char16_rte(int16);
-char16 __ovld __cnfn convert_char16_sat_rte(int16);
-char16 __ovld __cnfn convert_char16_rtz(int16);
-char16 __ovld __cnfn convert_char16_sat_rtz(int16);
-char16 __ovld __cnfn convert_char16_rtp(int16);
-char16 __ovld __cnfn convert_char16_sat_rtp(int16);
-char16 __ovld __cnfn convert_char16_rtn(int16);
-char16 __ovld __cnfn convert_char16_sat_rtn(int16);
-char16 __ovld __cnfn convert_char16(int16);
-char16 __ovld __cnfn convert_char16_sat(int16);
-char16 __ovld __cnfn convert_char16_rte(uint16);
-char16 __ovld __cnfn convert_char16_sat_rte(uint16);
-char16 __ovld __cnfn convert_char16_rtz(uint16);
-char16 __ovld __cnfn convert_char16_sat_rtz(uint16);
-char16 __ovld __cnfn convert_char16_rtp(uint16);
-char16 __ovld __cnfn convert_char16_sat_rtp(uint16);
-char16 __ovld __cnfn convert_char16_rtn(uint16);
-char16 __ovld __cnfn convert_char16_sat_rtn(uint16);
-char16 __ovld __cnfn convert_char16(uint16);
-char16 __ovld __cnfn convert_char16_sat(uint16);
-char16 __ovld __cnfn convert_char16_rte(long16);
-char16 __ovld __cnfn convert_char16_sat_rte(long16);
-char16 __ovld __cnfn convert_char16_rtz(long16);
-char16 __ovld __cnfn convert_char16_sat_rtz(long16);
-char16 __ovld __cnfn convert_char16_rtp(long16);
-char16 __ovld __cnfn convert_char16_sat_rtp(long16);
-char16 __ovld __cnfn convert_char16_rtn(long16);
-char16 __ovld __cnfn convert_char16_sat_rtn(long16);
-char16 __ovld __cnfn convert_char16(long16);
-char16 __ovld __cnfn convert_char16_sat(long16);
-char16 __ovld __cnfn convert_char16_rte(ulong16);
-char16 __ovld __cnfn convert_char16_sat_rte(ulong16);
-char16 __ovld __cnfn convert_char16_rtz(ulong16);
-char16 __ovld __cnfn convert_char16_sat_rtz(ulong16);
-char16 __ovld __cnfn convert_char16_rtp(ulong16);
-char16 __ovld __cnfn convert_char16_sat_rtp(ulong16);
-char16 __ovld __cnfn convert_char16_rtn(ulong16);
-char16 __ovld __cnfn convert_char16_sat_rtn(ulong16);
-char16 __ovld __cnfn convert_char16(ulong16);
-char16 __ovld __cnfn convert_char16_sat(ulong16);
-char16 __ovld __cnfn convert_char16_rte(float16);
-char16 __ovld __cnfn convert_char16_sat_rte(float16);
-char16 __ovld __cnfn convert_char16_rtz(float16);
-char16 __ovld __cnfn convert_char16_sat_rtz(float16);
-char16 __ovld __cnfn convert_char16_rtp(float16);
-char16 __ovld __cnfn convert_char16_sat_rtp(float16);
-char16 __ovld __cnfn convert_char16_rtn(float16);
-char16 __ovld __cnfn convert_char16_sat_rtn(float16);
-char16 __ovld __cnfn convert_char16(float16);
-char16 __ovld __cnfn convert_char16_sat(float16);
-uchar16 __ovld __cnfn convert_uchar16_rte(char16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(char16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(char16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(char16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(char16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(char16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(char16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(char16);
-uchar16 __ovld __cnfn convert_uchar16(char16);
-uchar16 __ovld __cnfn convert_uchar16_sat(char16);
-uchar16 __ovld __cnfn convert_uchar16_rte(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(uchar16);
-uchar16 __ovld __cnfn convert_uchar16(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_sat(uchar16);
-uchar16 __ovld __cnfn convert_uchar16_rte(short16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(short16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(short16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(short16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(short16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(short16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(short16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(short16);
-uchar16 __ovld __cnfn convert_uchar16(short16);
-uchar16 __ovld __cnfn convert_uchar16_sat(short16);
-uchar16 __ovld __cnfn convert_uchar16_rte(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(ushort16);
-uchar16 __ovld __cnfn convert_uchar16(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_sat(ushort16);
-uchar16 __ovld __cnfn convert_uchar16_rte(int16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(int16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(int16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(int16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(int16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(int16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(int16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(int16);
-uchar16 __ovld __cnfn convert_uchar16(int16);
-uchar16 __ovld __cnfn convert_uchar16_sat(int16);
-uchar16 __ovld __cnfn convert_uchar16_rte(uint16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(uint16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(uint16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(uint16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(uint16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(uint16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(uint16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(uint16);
-uchar16 __ovld __cnfn convert_uchar16(uint16);
-uchar16 __ovld __cnfn convert_uchar16_sat(uint16);
-uchar16 __ovld __cnfn convert_uchar16_rte(long16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(long16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(long16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(long16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(long16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(long16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(long16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(long16);
-uchar16 __ovld __cnfn convert_uchar16(long16);
-uchar16 __ovld __cnfn convert_uchar16_sat(long16);
-uchar16 __ovld __cnfn convert_uchar16_rte(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(ulong16);
-uchar16 __ovld __cnfn convert_uchar16(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_sat(ulong16);
-uchar16 __ovld __cnfn convert_uchar16_rte(float16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(float16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(float16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(float16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(float16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(float16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(float16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(float16);
-uchar16 __ovld __cnfn convert_uchar16(float16);
-uchar16 __ovld __cnfn convert_uchar16_sat(float16);
-short16 __ovld __cnfn convert_short16_rte(char16);
-short16 __ovld __cnfn convert_short16_sat_rte(char16);
-short16 __ovld __cnfn convert_short16_rtz(char16);
-short16 __ovld __cnfn convert_short16_sat_rtz(char16);
-short16 __ovld __cnfn convert_short16_rtp(char16);
-short16 __ovld __cnfn convert_short16_sat_rtp(char16);
-short16 __ovld __cnfn convert_short16_rtn(char16);
-short16 __ovld __cnfn convert_short16_sat_rtn(char16);
-short16 __ovld __cnfn convert_short16(char16);
-short16 __ovld __cnfn convert_short16_sat(char16);
-short16 __ovld __cnfn convert_short16_rte(uchar16);
-short16 __ovld __cnfn convert_short16_sat_rte(uchar16);
-short16 __ovld __cnfn convert_short16_rtz(uchar16);
-short16 __ovld __cnfn convert_short16_sat_rtz(uchar16);
-short16 __ovld __cnfn convert_short16_rtp(uchar16);
-short16 __ovld __cnfn convert_short16_sat_rtp(uchar16);
-short16 __ovld __cnfn convert_short16_rtn(uchar16);
-short16 __ovld __cnfn convert_short16_sat_rtn(uchar16);
-short16 __ovld __cnfn convert_short16(uchar16);
-short16 __ovld __cnfn convert_short16_sat(uchar16);
-short16 __ovld __cnfn convert_short16_rte(short16);
-short16 __ovld __cnfn convert_short16_sat_rte(short16);
-short16 __ovld __cnfn convert_short16_rtz(short16);
-short16 __ovld __cnfn convert_short16_sat_rtz(short16);
-short16 __ovld __cnfn convert_short16_rtp(short16);
-short16 __ovld __cnfn convert_short16_sat_rtp(short16);
-short16 __ovld __cnfn convert_short16_rtn(short16);
-short16 __ovld __cnfn convert_short16_sat_rtn(short16);
-short16 __ovld __cnfn convert_short16(short16);
-short16 __ovld __cnfn convert_short16_sat(short16);
-short16 __ovld __cnfn convert_short16_rte(ushort16);
-short16 __ovld __cnfn convert_short16_sat_rte(ushort16);
-short16 __ovld __cnfn convert_short16_rtz(ushort16);
-short16 __ovld __cnfn convert_short16_sat_rtz(ushort16);
-short16 __ovld __cnfn convert_short16_rtp(ushort16);
-short16 __ovld __cnfn convert_short16_sat_rtp(ushort16);
-short16 __ovld __cnfn convert_short16_rtn(ushort16);
-short16 __ovld __cnfn convert_short16_sat_rtn(ushort16);
-short16 __ovld __cnfn convert_short16(ushort16);
-short16 __ovld __cnfn convert_short16_sat(ushort16);
-short16 __ovld __cnfn convert_short16_rte(int16);
-short16 __ovld __cnfn convert_short16_sat_rte(int16);
-short16 __ovld __cnfn convert_short16_rtz(int16);
-short16 __ovld __cnfn convert_short16_sat_rtz(int16);
-short16 __ovld __cnfn convert_short16_rtp(int16);
-short16 __ovld __cnfn convert_short16_sat_rtp(int16);
-short16 __ovld __cnfn convert_short16_rtn(int16);
-short16 __ovld __cnfn convert_short16_sat_rtn(int16);
-short16 __ovld __cnfn convert_short16(int16);
-short16 __ovld __cnfn convert_short16_sat(int16);
-short16 __ovld __cnfn convert_short16_rte(uint16);
-short16 __ovld __cnfn convert_short16_sat_rte(uint16);
-short16 __ovld __cnfn convert_short16_rtz(uint16);
-short16 __ovld __cnfn convert_short16_sat_rtz(uint16);
-short16 __ovld __cnfn convert_short16_rtp(uint16);
-short16 __ovld __cnfn convert_short16_sat_rtp(uint16);
-short16 __ovld __cnfn convert_short16_rtn(uint16);
-short16 __ovld __cnfn convert_short16_sat_rtn(uint16);
-short16 __ovld __cnfn convert_short16(uint16);
-short16 __ovld __cnfn convert_short16_sat(uint16);
-short16 __ovld __cnfn convert_short16_rte(long16);
-short16 __ovld __cnfn convert_short16_sat_rte(long16);
-short16 __ovld __cnfn convert_short16_rtz(long16);
-short16 __ovld __cnfn convert_short16_sat_rtz(long16);
-short16 __ovld __cnfn convert_short16_rtp(long16);
-short16 __ovld __cnfn convert_short16_sat_rtp(long16);
-short16 __ovld __cnfn convert_short16_rtn(long16);
-short16 __ovld __cnfn convert_short16_sat_rtn(long16);
-short16 __ovld __cnfn convert_short16(long16);
-short16 __ovld __cnfn convert_short16_sat(long16);
-short16 __ovld __cnfn convert_short16_rte(ulong16);
-short16 __ovld __cnfn convert_short16_sat_rte(ulong16);
-short16 __ovld __cnfn convert_short16_rtz(ulong16);
-short16 __ovld __cnfn convert_short16_sat_rtz(ulong16);
-short16 __ovld __cnfn convert_short16_rtp(ulong16);
-short16 __ovld __cnfn convert_short16_sat_rtp(ulong16);
-short16 __ovld __cnfn convert_short16_rtn(ulong16);
-short16 __ovld __cnfn convert_short16_sat_rtn(ulong16);
-short16 __ovld __cnfn convert_short16(ulong16);
-short16 __ovld __cnfn convert_short16_sat(ulong16);
-short16 __ovld __cnfn convert_short16_rte(float16);
-short16 __ovld __cnfn convert_short16_sat_rte(float16);
-short16 __ovld __cnfn convert_short16_rtz(float16);
-short16 __ovld __cnfn convert_short16_sat_rtz(float16);
-short16 __ovld __cnfn convert_short16_rtp(float16);
-short16 __ovld __cnfn convert_short16_sat_rtp(float16);
-short16 __ovld __cnfn convert_short16_rtn(float16);
-short16 __ovld __cnfn convert_short16_sat_rtn(float16);
-short16 __ovld __cnfn convert_short16(float16);
-short16 __ovld __cnfn convert_short16_sat(float16);
-ushort16 __ovld __cnfn convert_ushort16_rte(char16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(char16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(char16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(char16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(char16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(char16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(char16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(char16);
-ushort16 __ovld __cnfn convert_ushort16(char16);
-ushort16 __ovld __cnfn convert_ushort16_sat(char16);
-ushort16 __ovld __cnfn convert_ushort16_rte(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(uchar16);
-ushort16 __ovld __cnfn convert_ushort16(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_sat(uchar16);
-ushort16 __ovld __cnfn convert_ushort16_rte(short16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(short16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(short16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(short16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(short16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(short16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(short16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(short16);
-ushort16 __ovld __cnfn convert_ushort16(short16);
-ushort16 __ovld __cnfn convert_ushort16_sat(short16);
-ushort16 __ovld __cnfn convert_ushort16_rte(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(ushort16);
-ushort16 __ovld __cnfn convert_ushort16(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_sat(ushort16);
-ushort16 __ovld __cnfn convert_ushort16_rte(int16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(int16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(int16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(int16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(int16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(int16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(int16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(int16);
-ushort16 __ovld __cnfn convert_ushort16(int16);
-ushort16 __ovld __cnfn convert_ushort16_sat(int16);
-ushort16 __ovld __cnfn convert_ushort16_rte(uint16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(uint16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(uint16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(uint16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(uint16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(uint16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(uint16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(uint16);
-ushort16 __ovld __cnfn convert_ushort16(uint16);
-ushort16 __ovld __cnfn convert_ushort16_sat(uint16);
-ushort16 __ovld __cnfn convert_ushort16_rte(long16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(long16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(long16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(long16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(long16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(long16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(long16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(long16);
-ushort16 __ovld __cnfn convert_ushort16(long16);
-ushort16 __ovld __cnfn convert_ushort16_sat(long16);
-ushort16 __ovld __cnfn convert_ushort16_rte(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(ulong16);
-ushort16 __ovld __cnfn convert_ushort16(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_sat(ulong16);
-ushort16 __ovld __cnfn convert_ushort16_rte(float16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(float16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(float16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(float16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(float16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(float16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(float16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(float16);
-ushort16 __ovld __cnfn convert_ushort16(float16);
-ushort16 __ovld __cnfn convert_ushort16_sat(float16);
-int16 __ovld __cnfn convert_int16_rte(char16);
-int16 __ovld __cnfn convert_int16_sat_rte(char16);
-int16 __ovld __cnfn convert_int16_rtz(char16);
-int16 __ovld __cnfn convert_int16_sat_rtz(char16);
-int16 __ovld __cnfn convert_int16_rtp(char16);
-int16 __ovld __cnfn convert_int16_sat_rtp(char16);
-int16 __ovld __cnfn convert_int16_rtn(char16);
-int16 __ovld __cnfn convert_int16_sat_rtn(char16);
-int16 __ovld __cnfn convert_int16(char16);
-int16 __ovld __cnfn convert_int16_sat(char16);
-int16 __ovld __cnfn convert_int16_rte(uchar16);
-int16 __ovld __cnfn convert_int16_sat_rte(uchar16);
-int16 __ovld __cnfn convert_int16_rtz(uchar16);
-int16 __ovld __cnfn convert_int16_sat_rtz(uchar16);
-int16 __ovld __cnfn convert_int16_rtp(uchar16);
-int16 __ovld __cnfn convert_int16_sat_rtp(uchar16);
-int16 __ovld __cnfn convert_int16_rtn(uchar16);
-int16 __ovld __cnfn convert_int16_sat_rtn(uchar16);
-int16 __ovld __cnfn convert_int16(uchar16);
-int16 __ovld __cnfn convert_int16_sat(uchar16);
-int16 __ovld __cnfn convert_int16_rte(short16);
-int16 __ovld __cnfn convert_int16_sat_rte(short16);
-int16 __ovld __cnfn convert_int16_rtz(short16);
-int16 __ovld __cnfn convert_int16_sat_rtz(short16);
-int16 __ovld __cnfn convert_int16_rtp(short16);
-int16 __ovld __cnfn convert_int16_sat_rtp(short16);
-int16 __ovld __cnfn convert_int16_rtn(short16);
-int16 __ovld __cnfn convert_int16_sat_rtn(short16);
-int16 __ovld __cnfn convert_int16(short16);
-int16 __ovld __cnfn convert_int16_sat(short16);
-int16 __ovld __cnfn convert_int16_rte(ushort16);
-int16 __ovld __cnfn convert_int16_sat_rte(ushort16);
-int16 __ovld __cnfn convert_int16_rtz(ushort16);
-int16 __ovld __cnfn convert_int16_sat_rtz(ushort16);
-int16 __ovld __cnfn convert_int16_rtp(ushort16);
-int16 __ovld __cnfn convert_int16_sat_rtp(ushort16);
-int16 __ovld __cnfn convert_int16_rtn(ushort16);
-int16 __ovld __cnfn convert_int16_sat_rtn(ushort16);
-int16 __ovld __cnfn convert_int16(ushort16);
-int16 __ovld __cnfn convert_int16_sat(ushort16);
-int16 __ovld __cnfn convert_int16_rte(int16);
-int16 __ovld __cnfn convert_int16_sat_rte(int16);
-int16 __ovld __cnfn convert_int16_rtz(int16);
-int16 __ovld __cnfn convert_int16_sat_rtz(int16);
-int16 __ovld __cnfn convert_int16_rtp(int16);
-int16 __ovld __cnfn convert_int16_sat_rtp(int16);
-int16 __ovld __cnfn convert_int16_rtn(int16);
-int16 __ovld __cnfn convert_int16_sat_rtn(int16);
-int16 __ovld __cnfn convert_int16(int16);
-int16 __ovld __cnfn convert_int16_sat(int16);
-int16 __ovld __cnfn convert_int16_rte(uint16);
-int16 __ovld __cnfn convert_int16_sat_rte(uint16);
-int16 __ovld __cnfn convert_int16_rtz(uint16);
-int16 __ovld __cnfn convert_int16_sat_rtz(uint16);
-int16 __ovld __cnfn convert_int16_rtp(uint16);
-int16 __ovld __cnfn convert_int16_sat_rtp(uint16);
-int16 __ovld __cnfn convert_int16_rtn(uint16);
-int16 __ovld __cnfn convert_int16_sat_rtn(uint16);
-int16 __ovld __cnfn convert_int16(uint16);
-int16 __ovld __cnfn convert_int16_sat(uint16);
-int16 __ovld __cnfn convert_int16_rte(long16);
-int16 __ovld __cnfn convert_int16_sat_rte(long16);
-int16 __ovld __cnfn convert_int16_rtz(long16);
-int16 __ovld __cnfn convert_int16_sat_rtz(long16);
-int16 __ovld __cnfn convert_int16_rtp(long16);
-int16 __ovld __cnfn convert_int16_sat_rtp(long16);
-int16 __ovld __cnfn convert_int16_rtn(long16);
-int16 __ovld __cnfn convert_int16_sat_rtn(long16);
-int16 __ovld __cnfn convert_int16(long16);
-int16 __ovld __cnfn convert_int16_sat(long16);
-int16 __ovld __cnfn convert_int16_rte(ulong16);
-int16 __ovld __cnfn convert_int16_sat_rte(ulong16);
-int16 __ovld __cnfn convert_int16_rtz(ulong16);
-int16 __ovld __cnfn convert_int16_sat_rtz(ulong16);
-int16 __ovld __cnfn convert_int16_rtp(ulong16);
-int16 __ovld __cnfn convert_int16_sat_rtp(ulong16);
-int16 __ovld __cnfn convert_int16_rtn(ulong16);
-int16 __ovld __cnfn convert_int16_sat_rtn(ulong16);
-int16 __ovld __cnfn convert_int16(ulong16);
-int16 __ovld __cnfn convert_int16_sat(ulong16);
-int16 __ovld __cnfn convert_int16_rte(float16);
-int16 __ovld __cnfn convert_int16_sat_rte(float16);
-int16 __ovld __cnfn convert_int16_rtz(float16);
-int16 __ovld __cnfn convert_int16_sat_rtz(float16);
-int16 __ovld __cnfn convert_int16_rtp(float16);
-int16 __ovld __cnfn convert_int16_sat_rtp(float16);
-int16 __ovld __cnfn convert_int16_rtn(float16);
-int16 __ovld __cnfn convert_int16_sat_rtn(float16);
-int16 __ovld __cnfn convert_int16(float16);
-int16 __ovld __cnfn convert_int16_sat(float16);
-uint16 __ovld __cnfn convert_uint16_rte(char16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(char16);
-uint16 __ovld __cnfn convert_uint16_rtz(char16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(char16);
-uint16 __ovld __cnfn convert_uint16_rtp(char16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(char16);
-uint16 __ovld __cnfn convert_uint16_rtn(char16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(char16);
-uint16 __ovld __cnfn convert_uint16(char16);
-uint16 __ovld __cnfn convert_uint16_sat(char16);
-uint16 __ovld __cnfn convert_uint16_rte(uchar16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(uchar16);
-uint16 __ovld __cnfn convert_uint16_rtz(uchar16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(uchar16);
-uint16 __ovld __cnfn convert_uint16_rtp(uchar16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(uchar16);
-uint16 __ovld __cnfn convert_uint16_rtn(uchar16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(uchar16);
-uint16 __ovld __cnfn convert_uint16(uchar16);
-uint16 __ovld __cnfn convert_uint16_sat(uchar16);
-uint16 __ovld __cnfn convert_uint16_rte(short16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(short16);
-uint16 __ovld __cnfn convert_uint16_rtz(short16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(short16);
-uint16 __ovld __cnfn convert_uint16_rtp(short16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(short16);
-uint16 __ovld __cnfn convert_uint16_rtn(short16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(short16);
-uint16 __ovld __cnfn convert_uint16(short16);
-uint16 __ovld __cnfn convert_uint16_sat(short16);
-uint16 __ovld __cnfn convert_uint16_rte(ushort16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(ushort16);
-uint16 __ovld __cnfn convert_uint16_rtz(ushort16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(ushort16);
-uint16 __ovld __cnfn convert_uint16_rtp(ushort16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(ushort16);
-uint16 __ovld __cnfn convert_uint16_rtn(ushort16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(ushort16);
-uint16 __ovld __cnfn convert_uint16(ushort16);
-uint16 __ovld __cnfn convert_uint16_sat(ushort16);
-uint16 __ovld __cnfn convert_uint16_rte(int16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(int16);
-uint16 __ovld __cnfn convert_uint16_rtz(int16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(int16);
-uint16 __ovld __cnfn convert_uint16_rtp(int16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(int16);
-uint16 __ovld __cnfn convert_uint16_rtn(int16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(int16);
-uint16 __ovld __cnfn convert_uint16(int16);
-uint16 __ovld __cnfn convert_uint16_sat(int16);
-uint16 __ovld __cnfn convert_uint16_rte(uint16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(uint16);
-uint16 __ovld __cnfn convert_uint16_rtz(uint16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(uint16);
-uint16 __ovld __cnfn convert_uint16_rtp(uint16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(uint16);
-uint16 __ovld __cnfn convert_uint16_rtn(uint16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(uint16);
-uint16 __ovld __cnfn convert_uint16(uint16);
-uint16 __ovld __cnfn convert_uint16_sat(uint16);
-uint16 __ovld __cnfn convert_uint16_rte(long16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(long16);
-uint16 __ovld __cnfn convert_uint16_rtz(long16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(long16);
-uint16 __ovld __cnfn convert_uint16_rtp(long16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(long16);
-uint16 __ovld __cnfn convert_uint16_rtn(long16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(long16);
-uint16 __ovld __cnfn convert_uint16(long16);
-uint16 __ovld __cnfn convert_uint16_sat(long16);
-uint16 __ovld __cnfn convert_uint16_rte(ulong16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(ulong16);
-uint16 __ovld __cnfn convert_uint16_rtz(ulong16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(ulong16);
-uint16 __ovld __cnfn convert_uint16_rtp(ulong16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(ulong16);
-uint16 __ovld __cnfn convert_uint16_rtn(ulong16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(ulong16);
-uint16 __ovld __cnfn convert_uint16(ulong16);
-uint16 __ovld __cnfn convert_uint16_sat(ulong16);
-uint16 __ovld __cnfn convert_uint16_rte(float16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(float16);
-uint16 __ovld __cnfn convert_uint16_rtz(float16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(float16);
-uint16 __ovld __cnfn convert_uint16_rtp(float16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(float16);
-uint16 __ovld __cnfn convert_uint16_rtn(float16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(float16);
-uint16 __ovld __cnfn convert_uint16(float16);
-uint16 __ovld __cnfn convert_uint16_sat(float16);
-long16 __ovld __cnfn convert_long16_rte(char16);
-long16 __ovld __cnfn convert_long16_sat_rte(char16);
-long16 __ovld __cnfn convert_long16_rtz(char16);
-long16 __ovld __cnfn convert_long16_sat_rtz(char16);
-long16 __ovld __cnfn convert_long16_rtp(char16);
-long16 __ovld __cnfn convert_long16_sat_rtp(char16);
-long16 __ovld __cnfn convert_long16_rtn(char16);
-long16 __ovld __cnfn convert_long16_sat_rtn(char16);
-long16 __ovld __cnfn convert_long16(char16);
-long16 __ovld __cnfn convert_long16_sat(char16);
-long16 __ovld __cnfn convert_long16_rte(uchar16);
-long16 __ovld __cnfn convert_long16_sat_rte(uchar16);
-long16 __ovld __cnfn convert_long16_rtz(uchar16);
-long16 __ovld __cnfn convert_long16_sat_rtz(uchar16);
-long16 __ovld __cnfn convert_long16_rtp(uchar16);
-long16 __ovld __cnfn convert_long16_sat_rtp(uchar16);
-long16 __ovld __cnfn convert_long16_rtn(uchar16);
-long16 __ovld __cnfn convert_long16_sat_rtn(uchar16);
-long16 __ovld __cnfn convert_long16(uchar16);
-long16 __ovld __cnfn convert_long16_sat(uchar16);
-long16 __ovld __cnfn convert_long16_rte(short16);
-long16 __ovld __cnfn convert_long16_sat_rte(short16);
-long16 __ovld __cnfn convert_long16_rtz(short16);
-long16 __ovld __cnfn convert_long16_sat_rtz(short16);
-long16 __ovld __cnfn convert_long16_rtp(short16);
-long16 __ovld __cnfn convert_long16_sat_rtp(short16);
-long16 __ovld __cnfn convert_long16_rtn(short16);
-long16 __ovld __cnfn convert_long16_sat_rtn(short16);
-long16 __ovld __cnfn convert_long16(short16);
-long16 __ovld __cnfn convert_long16_sat(short16);
-long16 __ovld __cnfn convert_long16_rte(ushort16);
-long16 __ovld __cnfn convert_long16_sat_rte(ushort16);
-long16 __ovld __cnfn convert_long16_rtz(ushort16);
-long16 __ovld __cnfn convert_long16_sat_rtz(ushort16);
-long16 __ovld __cnfn convert_long16_rtp(ushort16);
-long16 __ovld __cnfn convert_long16_sat_rtp(ushort16);
-long16 __ovld __cnfn convert_long16_rtn(ushort16);
-long16 __ovld __cnfn convert_long16_sat_rtn(ushort16);
-long16 __ovld __cnfn convert_long16(ushort16);
-long16 __ovld __cnfn convert_long16_sat(ushort16);
-long16 __ovld __cnfn convert_long16_rte(int16);
-long16 __ovld __cnfn convert_long16_sat_rte(int16);
-long16 __ovld __cnfn convert_long16_rtz(int16);
-long16 __ovld __cnfn convert_long16_sat_rtz(int16);
-long16 __ovld __cnfn convert_long16_rtp(int16);
-long16 __ovld __cnfn convert_long16_sat_rtp(int16);
-long16 __ovld __cnfn convert_long16_rtn(int16);
-long16 __ovld __cnfn convert_long16_sat_rtn(int16);
-long16 __ovld __cnfn convert_long16(int16);
-long16 __ovld __cnfn convert_long16_sat(int16);
-long16 __ovld __cnfn convert_long16_rte(uint16);
-long16 __ovld __cnfn convert_long16_sat_rte(uint16);
-long16 __ovld __cnfn convert_long16_rtz(uint16);
-long16 __ovld __cnfn convert_long16_sat_rtz(uint16);
-long16 __ovld __cnfn convert_long16_rtp(uint16);
-long16 __ovld __cnfn convert_long16_sat_rtp(uint16);
-long16 __ovld __cnfn convert_long16_rtn(uint16);
-long16 __ovld __cnfn convert_long16_sat_rtn(uint16);
-long16 __ovld __cnfn convert_long16(uint16);
-long16 __ovld __cnfn convert_long16_sat(uint16);
-long16 __ovld __cnfn convert_long16_rte(long16);
-long16 __ovld __cnfn convert_long16_sat_rte(long16);
-long16 __ovld __cnfn convert_long16_rtz(long16);
-long16 __ovld __cnfn convert_long16_sat_rtz(long16);
-long16 __ovld __cnfn convert_long16_rtp(long16);
-long16 __ovld __cnfn convert_long16_sat_rtp(long16);
-long16 __ovld __cnfn convert_long16_rtn(long16);
-long16 __ovld __cnfn convert_long16_sat_rtn(long16);
-long16 __ovld __cnfn convert_long16(long16);
-long16 __ovld __cnfn convert_long16_sat(long16);
-long16 __ovld __cnfn convert_long16_rte(ulong16);
-long16 __ovld __cnfn convert_long16_sat_rte(ulong16);
-long16 __ovld __cnfn convert_long16_rtz(ulong16);
-long16 __ovld __cnfn convert_long16_sat_rtz(ulong16);
-long16 __ovld __cnfn convert_long16_rtp(ulong16);
-long16 __ovld __cnfn convert_long16_sat_rtp(ulong16);
-long16 __ovld __cnfn convert_long16_rtn(ulong16);
-long16 __ovld __cnfn convert_long16_sat_rtn(ulong16);
-long16 __ovld __cnfn convert_long16(ulong16);
-long16 __ovld __cnfn convert_long16_sat(ulong16);
-long16 __ovld __cnfn convert_long16_rte(float16);
-long16 __ovld __cnfn convert_long16_sat_rte(float16);
-long16 __ovld __cnfn convert_long16_rtz(float16);
-long16 __ovld __cnfn convert_long16_sat_rtz(float16);
-long16 __ovld __cnfn convert_long16_rtp(float16);
-long16 __ovld __cnfn convert_long16_sat_rtp(float16);
-long16 __ovld __cnfn convert_long16_rtn(float16);
-long16 __ovld __cnfn convert_long16_sat_rtn(float16);
-long16 __ovld __cnfn convert_long16(float16);
-long16 __ovld __cnfn convert_long16_sat(float16);
-ulong16 __ovld __cnfn convert_ulong16_rte(char16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(char16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(char16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(char16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(char16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(char16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(char16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(char16);
-ulong16 __ovld __cnfn convert_ulong16(char16);
-ulong16 __ovld __cnfn convert_ulong16_sat(char16);
-ulong16 __ovld __cnfn convert_ulong16_rte(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(uchar16);
-ulong16 __ovld __cnfn convert_ulong16(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_sat(uchar16);
-ulong16 __ovld __cnfn convert_ulong16_rte(short16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(short16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(short16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(short16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(short16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(short16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(short16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(short16);
-ulong16 __ovld __cnfn convert_ulong16(short16);
-ulong16 __ovld __cnfn convert_ulong16_sat(short16);
-ulong16 __ovld __cnfn convert_ulong16_rte(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(ushort16);
-ulong16 __ovld __cnfn convert_ulong16(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_sat(ushort16);
-ulong16 __ovld __cnfn convert_ulong16_rte(int16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(int16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(int16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(int16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(int16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(int16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(int16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(int16);
-ulong16 __ovld __cnfn convert_ulong16(int16);
-ulong16 __ovld __cnfn convert_ulong16_sat(int16);
-ulong16 __ovld __cnfn convert_ulong16_rte(uint16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(uint16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(uint16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(uint16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(uint16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(uint16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(uint16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(uint16);
-ulong16 __ovld __cnfn convert_ulong16(uint16);
-ulong16 __ovld __cnfn convert_ulong16_sat(uint16);
-ulong16 __ovld __cnfn convert_ulong16_rte(long16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(long16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(long16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(long16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(long16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(long16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(long16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(long16);
-ulong16 __ovld __cnfn convert_ulong16(long16);
-ulong16 __ovld __cnfn convert_ulong16_sat(long16);
-ulong16 __ovld __cnfn convert_ulong16_rte(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(ulong16);
-ulong16 __ovld __cnfn convert_ulong16(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_sat(ulong16);
-ulong16 __ovld __cnfn convert_ulong16_rte(float16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(float16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(float16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(float16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(float16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(float16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(float16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(float16);
-ulong16 __ovld __cnfn convert_ulong16(float16);
-ulong16 __ovld __cnfn convert_ulong16_sat(float16);
-float16 __ovld __cnfn convert_float16_rte(char16);
-float16 __ovld __cnfn convert_float16_rtz(char16);
-float16 __ovld __cnfn convert_float16_rtp(char16);
-float16 __ovld __cnfn convert_float16_rtn(char16);
-float16 __ovld __cnfn convert_float16(char16);
-float16 __ovld __cnfn convert_float16_rte(uchar16);
-float16 __ovld __cnfn convert_float16_rtz(uchar16);
-float16 __ovld __cnfn convert_float16_rtp(uchar16);
-float16 __ovld __cnfn convert_float16_rtn(uchar16);
-float16 __ovld __cnfn convert_float16(uchar16);
-float16 __ovld __cnfn convert_float16_rte(short16);
-float16 __ovld __cnfn convert_float16_rtz(short16);
-float16 __ovld __cnfn convert_float16_rtp(short16);
-float16 __ovld __cnfn convert_float16_rtn(short16);
-float16 __ovld __cnfn convert_float16(short16);
-float16 __ovld __cnfn convert_float16_rte(ushort16);
-float16 __ovld __cnfn convert_float16_rtz(ushort16);
-float16 __ovld __cnfn convert_float16_rtp(ushort16);
-float16 __ovld __cnfn convert_float16_rtn(ushort16);
-float16 __ovld __cnfn convert_float16(ushort16);
-float16 __ovld __cnfn convert_float16_rte(int16);
-float16 __ovld __cnfn convert_float16_rtz(int16);
-float16 __ovld __cnfn convert_float16_rtp(int16);
-float16 __ovld __cnfn convert_float16_rtn(int16);
-float16 __ovld __cnfn convert_float16(int16);
-float16 __ovld __cnfn convert_float16_rte(uint16);
-float16 __ovld __cnfn convert_float16_rtz(uint16);
-float16 __ovld __cnfn convert_float16_rtp(uint16);
-float16 __ovld __cnfn convert_float16_rtn(uint16);
-float16 __ovld __cnfn convert_float16(uint16);
-float16 __ovld __cnfn convert_float16_rte(long16);
-float16 __ovld __cnfn convert_float16_rtz(long16);
-float16 __ovld __cnfn convert_float16_rtp(long16);
-float16 __ovld __cnfn convert_float16_rtn(long16);
-float16 __ovld __cnfn convert_float16(long16);
-float16 __ovld __cnfn convert_float16_rte(ulong16);
-float16 __ovld __cnfn convert_float16_rtz(ulong16);
-float16 __ovld __cnfn convert_float16_rtp(ulong16);
-float16 __ovld __cnfn convert_float16_rtn(ulong16);
-float16 __ovld __cnfn convert_float16(ulong16);
-float16 __ovld __cnfn convert_float16_rte(float16);
-float16 __ovld __cnfn convert_float16_rtz(float16);
-float16 __ovld __cnfn convert_float16_rtp(float16);
-float16 __ovld __cnfn convert_float16_rtn(float16);
-float16 __ovld __cnfn convert_float16(float16);
-
-// Conversions with double data type parameters or return value.
-
-#ifdef cl_khr_fp64
-char __ovld __cnfn convert_char(double);
-char __ovld __cnfn convert_char_rte(double);
-char __ovld __cnfn convert_char_rtn(double);
-char __ovld __cnfn convert_char_rtp(double);
-char __ovld __cnfn convert_char_rtz(double);
-char __ovld __cnfn convert_char_sat(double);
-char __ovld __cnfn convert_char_sat_rte(double);
-char __ovld __cnfn convert_char_sat_rtn(double);
-char __ovld __cnfn convert_char_sat_rtp(double);
-char __ovld __cnfn convert_char_sat_rtz(double);
-char2 __ovld __cnfn convert_char2(double2);
-char2 __ovld __cnfn convert_char2_rte(double2);
-char2 __ovld __cnfn convert_char2_rtn(double2);
-char2 __ovld __cnfn convert_char2_rtp(double2);
-char2 __ovld __cnfn convert_char2_rtz(double2);
-char2 __ovld __cnfn convert_char2_sat(double2);
-char2 __ovld __cnfn convert_char2_sat_rte(double2);
-char2 __ovld __cnfn convert_char2_sat_rtn(double2);
-char2 __ovld __cnfn convert_char2_sat_rtp(double2);
-char2 __ovld __cnfn convert_char2_sat_rtz(double2);
-char3 __ovld __cnfn convert_char3(double3);
-char3 __ovld __cnfn convert_char3_rte(double3);
-char3 __ovld __cnfn convert_char3_rtn(double3);
-char3 __ovld __cnfn convert_char3_rtp(double3);
-char3 __ovld __cnfn convert_char3_rtz(double3);
-char3 __ovld __cnfn convert_char3_sat(double3);
-char3 __ovld __cnfn convert_char3_sat_rte(double3);
-char3 __ovld __cnfn convert_char3_sat_rtn(double3);
-char3 __ovld __cnfn convert_char3_sat_rtp(double3);
-char3 __ovld __cnfn convert_char3_sat_rtz(double3);
-char4 __ovld __cnfn convert_char4(double4);
-char4 __ovld __cnfn convert_char4_rte(double4);
-char4 __ovld __cnfn convert_char4_rtn(double4);
-char4 __ovld __cnfn convert_char4_rtp(double4);
-char4 __ovld __cnfn convert_char4_rtz(double4);
-char4 __ovld __cnfn convert_char4_sat(double4);
-char4 __ovld __cnfn convert_char4_sat_rte(double4);
-char4 __ovld __cnfn convert_char4_sat_rtn(double4);
-char4 __ovld __cnfn convert_char4_sat_rtp(double4);
-char4 __ovld __cnfn convert_char4_sat_rtz(double4);
-char8 __ovld __cnfn convert_char8(double8);
-char8 __ovld __cnfn convert_char8_rte(double8);
-char8 __ovld __cnfn convert_char8_rtn(double8);
-char8 __ovld __cnfn convert_char8_rtp(double8);
-char8 __ovld __cnfn convert_char8_rtz(double8);
-char8 __ovld __cnfn convert_char8_sat(double8);
-char8 __ovld __cnfn convert_char8_sat_rte(double8);
-char8 __ovld __cnfn convert_char8_sat_rtn(double8);
-char8 __ovld __cnfn convert_char8_sat_rtp(double8);
-char8 __ovld __cnfn convert_char8_sat_rtz(double8);
-char16 __ovld __cnfn convert_char16(double16);
-char16 __ovld __cnfn convert_char16_rte(double16);
-char16 __ovld __cnfn convert_char16_rtn(double16);
-char16 __ovld __cnfn convert_char16_rtp(double16);
-char16 __ovld __cnfn convert_char16_rtz(double16);
-char16 __ovld __cnfn convert_char16_sat(double16);
-char16 __ovld __cnfn convert_char16_sat_rte(double16);
-char16 __ovld __cnfn convert_char16_sat_rtn(double16);
-char16 __ovld __cnfn convert_char16_sat_rtp(double16);
-char16 __ovld __cnfn convert_char16_sat_rtz(double16);
-
-uchar __ovld __cnfn convert_uchar(double);
-uchar __ovld __cnfn convert_uchar_rte(double);
-uchar __ovld __cnfn convert_uchar_rtn(double);
-uchar __ovld __cnfn convert_uchar_rtp(double);
-uchar __ovld __cnfn convert_uchar_rtz(double);
-uchar __ovld __cnfn convert_uchar_sat(double);
-uchar __ovld __cnfn convert_uchar_sat_rte(double);
-uchar __ovld __cnfn convert_uchar_sat_rtn(double);
-uchar __ovld __cnfn convert_uchar_sat_rtp(double);
-uchar __ovld __cnfn convert_uchar_sat_rtz(double);
-uchar2 __ovld __cnfn convert_uchar2(double2);
-uchar2 __ovld __cnfn convert_uchar2_rte(double2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(double2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(double2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(double2);
-uchar2 __ovld __cnfn convert_uchar2_sat(double2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(double2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(double2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(double2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(double2);
-uchar3 __ovld __cnfn convert_uchar3(double3);
-uchar3 __ovld __cnfn convert_uchar3_rte(double3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(double3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(double3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(double3);
-uchar3 __ovld __cnfn convert_uchar3_sat(double3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(double3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(double3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(double3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(double3);
-uchar4 __ovld __cnfn convert_uchar4(double4);
-uchar4 __ovld __cnfn convert_uchar4_rte(double4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(double4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(double4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(double4);
-uchar4 __ovld __cnfn convert_uchar4_sat(double4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(double4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(double4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(double4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(double4);
-uchar8 __ovld __cnfn convert_uchar8(double8);
-uchar8 __ovld __cnfn convert_uchar8_rte(double8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(double8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(double8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(double8);
-uchar8 __ovld __cnfn convert_uchar8_sat(double8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(double8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(double8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(double8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(double8);
-uchar16 __ovld __cnfn convert_uchar16(double16);
-uchar16 __ovld __cnfn convert_uchar16_rte(double16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(double16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(double16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(double16);
-uchar16 __ovld __cnfn convert_uchar16_sat(double16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(double16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(double16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(double16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(double16);
-
-short __ovld __cnfn convert_short(double);
-short __ovld __cnfn convert_short_rte(double);
-short __ovld __cnfn convert_short_rtn(double);
-short __ovld __cnfn convert_short_rtp(double);
-short __ovld __cnfn convert_short_rtz(double);
-short __ovld __cnfn convert_short_sat(double);
-short __ovld __cnfn convert_short_sat_rte(double);
-short __ovld __cnfn convert_short_sat_rtn(double);
-short __ovld __cnfn convert_short_sat_rtp(double);
-short __ovld __cnfn convert_short_sat_rtz(double);
-short2 __ovld __cnfn convert_short2(double2);
-short2 __ovld __cnfn convert_short2_rte(double2);
-short2 __ovld __cnfn convert_short2_rtn(double2);
-short2 __ovld __cnfn convert_short2_rtp(double2);
-short2 __ovld __cnfn convert_short2_rtz(double2);
-short2 __ovld __cnfn convert_short2_sat(double2);
-short2 __ovld __cnfn convert_short2_sat_rte(double2);
-short2 __ovld __cnfn convert_short2_sat_rtn(double2);
-short2 __ovld __cnfn convert_short2_sat_rtp(double2);
-short2 __ovld __cnfn convert_short2_sat_rtz(double2);
-short3 __ovld __cnfn convert_short3(double3);
-short3 __ovld __cnfn convert_short3_rte(double3);
-short3 __ovld __cnfn convert_short3_rtn(double3);
-short3 __ovld __cnfn convert_short3_rtp(double3);
-short3 __ovld __cnfn convert_short3_rtz(double3);
-short3 __ovld __cnfn convert_short3_sat(double3);
-short3 __ovld __cnfn convert_short3_sat_rte(double3);
-short3 __ovld __cnfn convert_short3_sat_rtn(double3);
-short3 __ovld __cnfn convert_short3_sat_rtp(double3);
-short3 __ovld __cnfn convert_short3_sat_rtz(double3);
-short4 __ovld __cnfn convert_short4(double4);
-short4 __ovld __cnfn convert_short4_rte(double4);
-short4 __ovld __cnfn convert_short4_rtn(double4);
-short4 __ovld __cnfn convert_short4_rtp(double4);
-short4 __ovld __cnfn convert_short4_rtz(double4);
-short4 __ovld __cnfn convert_short4_sat(double4);
-short4 __ovld __cnfn convert_short4_sat_rte(double4);
-short4 __ovld __cnfn convert_short4_sat_rtn(double4);
-short4 __ovld __cnfn convert_short4_sat_rtp(double4);
-short4 __ovld __cnfn convert_short4_sat_rtz(double4);
-short8 __ovld __cnfn convert_short8(double8);
-short8 __ovld __cnfn convert_short8_rte(double8);
-short8 __ovld __cnfn convert_short8_rtn(double8);
-short8 __ovld __cnfn convert_short8_rtp(double8);
-short8 __ovld __cnfn convert_short8_rtz(double8);
-short8 __ovld __cnfn convert_short8_sat(double8);
-short8 __ovld __cnfn convert_short8_sat_rte(double8);
-short8 __ovld __cnfn convert_short8_sat_rtn(double8);
-short8 __ovld __cnfn convert_short8_sat_rtp(double8);
-short8 __ovld __cnfn convert_short8_sat_rtz(double8);
-short16 __ovld __cnfn convert_short16(double16);
-short16 __ovld __cnfn convert_short16_rte(double16);
-short16 __ovld __cnfn convert_short16_rtn(double16);
-short16 __ovld __cnfn convert_short16_rtp(double16);
-short16 __ovld __cnfn convert_short16_rtz(double16);
-short16 __ovld __cnfn convert_short16_sat(double16);
-short16 __ovld __cnfn convert_short16_sat_rte(double16);
-short16 __ovld __cnfn convert_short16_sat_rtn(double16);
-short16 __ovld __cnfn convert_short16_sat_rtp(double16);
-short16 __ovld __cnfn convert_short16_sat_rtz(double16);
-
-ushort __ovld __cnfn convert_ushort(double);
-ushort __ovld __cnfn convert_ushort_rte(double);
-ushort __ovld __cnfn convert_ushort_rtn(double);
-ushort __ovld __cnfn convert_ushort_rtp(double);
-ushort __ovld __cnfn convert_ushort_rtz(double);
-ushort __ovld __cnfn convert_ushort_sat(double);
-ushort __ovld __cnfn convert_ushort_sat_rte(double);
-ushort __ovld __cnfn convert_ushort_sat_rtn(double);
-ushort __ovld __cnfn convert_ushort_sat_rtp(double);
-ushort __ovld __cnfn convert_ushort_sat_rtz(double);
-ushort2 __ovld __cnfn convert_ushort2(double2);
-ushort2 __ovld __cnfn convert_ushort2_rte(double2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(double2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(double2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(double2);
-ushort2 __ovld __cnfn convert_ushort2_sat(double2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(double2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(double2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(double2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(double2);
-ushort3 __ovld __cnfn convert_ushort3(double3);
-ushort3 __ovld __cnfn convert_ushort3_rte(double3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(double3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(double3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(double3);
-ushort3 __ovld __cnfn convert_ushort3_sat(double3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(double3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(double3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(double3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(double3);
-ushort4 __ovld __cnfn convert_ushort4(double4);
-ushort4 __ovld __cnfn convert_ushort4_rte(double4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(double4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(double4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(double4);
-ushort4 __ovld __cnfn convert_ushort4_sat(double4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(double4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(double4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(double4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(double4);
-ushort8 __ovld __cnfn convert_ushort8(double8);
-ushort8 __ovld __cnfn convert_ushort8_rte(double8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(double8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(double8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(double8);
-ushort8 __ovld __cnfn convert_ushort8_sat(double8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(double8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(double8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(double8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(double8);
-ushort16 __ovld __cnfn convert_ushort16(double16);
-ushort16 __ovld __cnfn convert_ushort16_rte(double16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(double16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(double16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(double16);
-ushort16 __ovld __cnfn convert_ushort16_sat(double16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(double16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(double16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(double16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(double16);
-
-int __ovld __cnfn convert_int(double);
-int __ovld __cnfn convert_int_rte(double);
-int __ovld __cnfn convert_int_rtn(double);
-int __ovld __cnfn convert_int_rtp(double);
-int __ovld __cnfn convert_int_rtz(double);
-int __ovld __cnfn convert_int_sat(double);
-int __ovld __cnfn convert_int_sat_rte(double);
-int __ovld __cnfn convert_int_sat_rtn(double);
-int __ovld __cnfn convert_int_sat_rtp(double);
-int __ovld __cnfn convert_int_sat_rtz(double);
-int2 __ovld __cnfn convert_int2(double2);
-int2 __ovld __cnfn convert_int2_rte(double2);
-int2 __ovld __cnfn convert_int2_rtn(double2);
-int2 __ovld __cnfn convert_int2_rtp(double2);
-int2 __ovld __cnfn convert_int2_rtz(double2);
-int2 __ovld __cnfn convert_int2_sat(double2);
-int2 __ovld __cnfn convert_int2_sat_rte(double2);
-int2 __ovld __cnfn convert_int2_sat_rtn(double2);
-int2 __ovld __cnfn convert_int2_sat_rtp(double2);
-int2 __ovld __cnfn convert_int2_sat_rtz(double2);
-int3 __ovld __cnfn convert_int3(double3);
-int3 __ovld __cnfn convert_int3_rte(double3);
-int3 __ovld __cnfn convert_int3_rtn(double3);
-int3 __ovld __cnfn convert_int3_rtp(double3);
-int3 __ovld __cnfn convert_int3_rtz(double3);
-int3 __ovld __cnfn convert_int3_sat(double3);
-int3 __ovld __cnfn convert_int3_sat_rte(double3);
-int3 __ovld __cnfn convert_int3_sat_rtn(double3);
-int3 __ovld __cnfn convert_int3_sat_rtp(double3);
-int3 __ovld __cnfn convert_int3_sat_rtz(double3);
-int4 __ovld __cnfn convert_int4(double4);
-int4 __ovld __cnfn convert_int4_rte(double4);
-int4 __ovld __cnfn convert_int4_rtn(double4);
-int4 __ovld __cnfn convert_int4_rtp(double4);
-int4 __ovld __cnfn convert_int4_rtz(double4);
-int4 __ovld __cnfn convert_int4_sat(double4);
-int4 __ovld __cnfn convert_int4_sat_rte(double4);
-int4 __ovld __cnfn convert_int4_sat_rtn(double4);
-int4 __ovld __cnfn convert_int4_sat_rtp(double4);
-int4 __ovld __cnfn convert_int4_sat_rtz(double4);
-int8 __ovld __cnfn convert_int8(double8);
-int8 __ovld __cnfn convert_int8_rte(double8);
-int8 __ovld __cnfn convert_int8_rtn(double8);
-int8 __ovld __cnfn convert_int8_rtp(double8);
-int8 __ovld __cnfn convert_int8_rtz(double8);
-int8 __ovld __cnfn convert_int8_sat(double8);
-int8 __ovld __cnfn convert_int8_sat_rte(double8);
-int8 __ovld __cnfn convert_int8_sat_rtn(double8);
-int8 __ovld __cnfn convert_int8_sat_rtp(double8);
-int8 __ovld __cnfn convert_int8_sat_rtz(double8);
-int16 __ovld __cnfn convert_int16(double16);
-int16 __ovld __cnfn convert_int16_rte(double16);
-int16 __ovld __cnfn convert_int16_rtn(double16);
-int16 __ovld __cnfn convert_int16_rtp(double16);
-int16 __ovld __cnfn convert_int16_rtz(double16);
-int16 __ovld __cnfn convert_int16_sat(double16);
-int16 __ovld __cnfn convert_int16_sat_rte(double16);
-int16 __ovld __cnfn convert_int16_sat_rtn(double16);
-int16 __ovld __cnfn convert_int16_sat_rtp(double16);
-int16 __ovld __cnfn convert_int16_sat_rtz(double16);
-
-uint __ovld __cnfn convert_uint(double);
-uint __ovld __cnfn convert_uint_rte(double);
-uint __ovld __cnfn convert_uint_rtn(double);
-uint __ovld __cnfn convert_uint_rtp(double);
-uint __ovld __cnfn convert_uint_rtz(double);
-uint __ovld __cnfn convert_uint_sat(double);
-uint __ovld __cnfn convert_uint_sat_rte(double);
-uint __ovld __cnfn convert_uint_sat_rtn(double);
-uint __ovld __cnfn convert_uint_sat_rtp(double);
-uint __ovld __cnfn convert_uint_sat_rtz(double);
-uint2 __ovld __cnfn convert_uint2(double2);
-uint2 __ovld __cnfn convert_uint2_rte(double2);
-uint2 __ovld __cnfn convert_uint2_rtn(double2);
-uint2 __ovld __cnfn convert_uint2_rtp(double2);
-uint2 __ovld __cnfn convert_uint2_rtz(double2);
-uint2 __ovld __cnfn convert_uint2_sat(double2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(double2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(double2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(double2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(double2);
-uint3 __ovld __cnfn convert_uint3(double3);
-uint3 __ovld __cnfn convert_uint3_rte(double3);
-uint3 __ovld __cnfn convert_uint3_rtn(double3);
-uint3 __ovld __cnfn convert_uint3_rtp(double3);
-uint3 __ovld __cnfn convert_uint3_rtz(double3);
-uint3 __ovld __cnfn convert_uint3_sat(double3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(double3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(double3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(double3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(double3);
-uint4 __ovld __cnfn convert_uint4(double4);
-uint4 __ovld __cnfn convert_uint4_rte(double4);
-uint4 __ovld __cnfn convert_uint4_rtn(double4);
-uint4 __ovld __cnfn convert_uint4_rtp(double4);
-uint4 __ovld __cnfn convert_uint4_rtz(double4);
-uint4 __ovld __cnfn convert_uint4_sat(double4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(double4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(double4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(double4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(double4);
-uint8 __ovld __cnfn convert_uint8(double8);
-uint8 __ovld __cnfn convert_uint8_rte(double8);
-uint8 __ovld __cnfn convert_uint8_rtn(double8);
-uint8 __ovld __cnfn convert_uint8_rtp(double8);
-uint8 __ovld __cnfn convert_uint8_rtz(double8);
-uint8 __ovld __cnfn convert_uint8_sat(double8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(double8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(double8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(double8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(double8);
-uint16 __ovld __cnfn convert_uint16(double16);
-uint16 __ovld __cnfn convert_uint16_rte(double16);
-uint16 __ovld __cnfn convert_uint16_rtn(double16);
-uint16 __ovld __cnfn convert_uint16_rtp(double16);
-uint16 __ovld __cnfn convert_uint16_rtz(double16);
-uint16 __ovld __cnfn convert_uint16_sat(double16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(double16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(double16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(double16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(double16);
-
-long __ovld __cnfn convert_long(double);
-long __ovld __cnfn convert_long_rte(double);
-long __ovld __cnfn convert_long_rtn(double);
-long __ovld __cnfn convert_long_rtp(double);
-long __ovld __cnfn convert_long_rtz(double);
-long __ovld __cnfn convert_long_sat(double);
-long __ovld __cnfn convert_long_sat_rte(double);
-long __ovld __cnfn convert_long_sat_rtn(double);
-long __ovld __cnfn convert_long_sat_rtp(double);
-long __ovld __cnfn convert_long_sat_rtz(double);
-long2 __ovld __cnfn convert_long2(double2);
-long2 __ovld __cnfn convert_long2_rte(double2);
-long2 __ovld __cnfn convert_long2_rtn(double2);
-long2 __ovld __cnfn convert_long2_rtp(double2);
-long2 __ovld __cnfn convert_long2_rtz(double2);
-long2 __ovld __cnfn convert_long2_sat(double2);
-long2 __ovld __cnfn convert_long2_sat_rte(double2);
-long2 __ovld __cnfn convert_long2_sat_rtn(double2);
-long2 __ovld __cnfn convert_long2_sat_rtp(double2);
-long2 __ovld __cnfn convert_long2_sat_rtz(double2);
-long3 __ovld __cnfn convert_long3(double3);
-long3 __ovld __cnfn convert_long3_rte(double3);
-long3 __ovld __cnfn convert_long3_rtn(double3);
-long3 __ovld __cnfn convert_long3_rtp(double3);
-long3 __ovld __cnfn convert_long3_rtz(double3);
-long3 __ovld __cnfn convert_long3_sat(double3);
-long3 __ovld __cnfn convert_long3_sat_rte(double3);
-long3 __ovld __cnfn convert_long3_sat_rtn(double3);
-long3 __ovld __cnfn convert_long3_sat_rtp(double3);
-long3 __ovld __cnfn convert_long3_sat_rtz(double3);
-long4 __ovld __cnfn convert_long4(double4);
-long4 __ovld __cnfn convert_long4_rte(double4);
-long4 __ovld __cnfn convert_long4_rtn(double4);
-long4 __ovld __cnfn convert_long4_rtp(double4);
-long4 __ovld __cnfn convert_long4_rtz(double4);
-long4 __ovld __cnfn convert_long4_sat(double4);
-long4 __ovld __cnfn convert_long4_sat_rte(double4);
-long4 __ovld __cnfn convert_long4_sat_rtn(double4);
-long4 __ovld __cnfn convert_long4_sat_rtp(double4);
-long4 __ovld __cnfn convert_long4_sat_rtz(double4);
-long8 __ovld __cnfn convert_long8(double8);
-long8 __ovld __cnfn convert_long8_rte(double8);
-long8 __ovld __cnfn convert_long8_rtn(double8);
-long8 __ovld __cnfn convert_long8_rtp(double8);
-long8 __ovld __cnfn convert_long8_rtz(double8);
-long8 __ovld __cnfn convert_long8_sat(double8);
-long8 __ovld __cnfn convert_long8_sat_rte(double8);
-long8 __ovld __cnfn convert_long8_sat_rtn(double8);
-long8 __ovld __cnfn convert_long8_sat_rtp(double8);
-long8 __ovld __cnfn convert_long8_sat_rtz(double8);
-long16 __ovld __cnfn convert_long16(double16);
-long16 __ovld __cnfn convert_long16_rte(double16);
-long16 __ovld __cnfn convert_long16_rtn(double16);
-long16 __ovld __cnfn convert_long16_rtp(double16);
-long16 __ovld __cnfn convert_long16_rtz(double16);
-long16 __ovld __cnfn convert_long16_sat(double16);
-long16 __ovld __cnfn convert_long16_sat_rte(double16);
-long16 __ovld __cnfn convert_long16_sat_rtn(double16);
-long16 __ovld __cnfn convert_long16_sat_rtp(double16);
-long16 __ovld __cnfn convert_long16_sat_rtz(double16);
-
-ulong __ovld __cnfn convert_ulong(double);
-ulong __ovld __cnfn convert_ulong_rte(double);
-ulong __ovld __cnfn convert_ulong_rtn(double);
-ulong __ovld __cnfn convert_ulong_rtp(double);
-ulong __ovld __cnfn convert_ulong_rtz(double);
-ulong __ovld __cnfn convert_ulong_sat(double);
-ulong __ovld __cnfn convert_ulong_sat_rte(double);
-ulong __ovld __cnfn convert_ulong_sat_rtn(double);
-ulong __ovld __cnfn convert_ulong_sat_rtp(double);
-ulong __ovld __cnfn convert_ulong_sat_rtz(double);
-ulong2 __ovld __cnfn convert_ulong2(double2);
-ulong2 __ovld __cnfn convert_ulong2_rte(double2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(double2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(double2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(double2);
-ulong2 __ovld __cnfn convert_ulong2_sat(double2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(double2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(double2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(double2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(double2);
-ulong3 __ovld __cnfn convert_ulong3(double3);
-ulong3 __ovld __cnfn convert_ulong3_rte(double3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(double3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(double3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(double3);
-ulong3 __ovld __cnfn convert_ulong3_sat(double3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(double3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(double3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(double3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(double3);
-ulong4 __ovld __cnfn convert_ulong4(double4);
-ulong4 __ovld __cnfn convert_ulong4_rte(double4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(double4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(double4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(double4);
-ulong4 __ovld __cnfn convert_ulong4_sat(double4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(double4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(double4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(double4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(double4);
-ulong8 __ovld __cnfn convert_ulong8(double8);
-ulong8 __ovld __cnfn convert_ulong8_rte(double8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(double8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(double8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(double8);
-ulong8 __ovld __cnfn convert_ulong8_sat(double8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(double8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(double8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(double8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(double8);
-ulong16 __ovld __cnfn convert_ulong16(double16);
-ulong16 __ovld __cnfn convert_ulong16_rte(double16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(double16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(double16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(double16);
-ulong16 __ovld __cnfn convert_ulong16_sat(double16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(double16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(double16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(double16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(double16);
-
-float __ovld __cnfn convert_float(double);
-float __ovld __cnfn convert_float_rte(double);
-float __ovld __cnfn convert_float_rtn(double);
-float __ovld __cnfn convert_float_rtp(double);
-float __ovld __cnfn convert_float_rtz(double);
-float2 __ovld __cnfn convert_float2(double2);
-float2 __ovld __cnfn convert_float2_rte(double2);
-float2 __ovld __cnfn convert_float2_rtn(double2);
-float2 __ovld __cnfn convert_float2_rtp(double2);
-float2 __ovld __cnfn convert_float2_rtz(double2);
-float3 __ovld __cnfn convert_float3(double3);
-float3 __ovld __cnfn convert_float3_rte(double3);
-float3 __ovld __cnfn convert_float3_rtn(double3);
-float3 __ovld __cnfn convert_float3_rtp(double3);
-float3 __ovld __cnfn convert_float3_rtz(double3);
-float4 __ovld __cnfn convert_float4(double4);
-float4 __ovld __cnfn convert_float4_rte(double4);
-float4 __ovld __cnfn convert_float4_rtn(double4);
-float4 __ovld __cnfn convert_float4_rtp(double4);
-float4 __ovld __cnfn convert_float4_rtz(double4);
-float8 __ovld __cnfn convert_float8(double8);
-float8 __ovld __cnfn convert_float8_rte(double8);
-float8 __ovld __cnfn convert_float8_rtn(double8);
-float8 __ovld __cnfn convert_float8_rtp(double8);
-float8 __ovld __cnfn convert_float8_rtz(double8);
-float16 __ovld __cnfn convert_float16(double16);
-float16 __ovld __cnfn convert_float16_rte(double16);
-float16 __ovld __cnfn convert_float16_rtn(double16);
-float16 __ovld __cnfn convert_float16_rtp(double16);
-float16 __ovld __cnfn convert_float16_rtz(double16);
-
-double __ovld __cnfn convert_double(char);
-double __ovld __cnfn convert_double(double);
-double __ovld __cnfn convert_double(float);
-double __ovld __cnfn convert_double(int);
-double __ovld __cnfn convert_double(long);
-double __ovld __cnfn convert_double(short);
-double __ovld __cnfn convert_double(uchar);
-double __ovld __cnfn convert_double(uint);
-double __ovld __cnfn convert_double(ulong);
-double __ovld __cnfn convert_double(ushort);
-double __ovld __cnfn convert_double_rte(char);
-double __ovld __cnfn convert_double_rte(double);
-double __ovld __cnfn convert_double_rte(float);
-double __ovld __cnfn convert_double_rte(int);
-double __ovld __cnfn convert_double_rte(long);
-double __ovld __cnfn convert_double_rte(short);
-double __ovld __cnfn convert_double_rte(uchar);
-double __ovld __cnfn convert_double_rte(uint);
-double __ovld __cnfn convert_double_rte(ulong);
-double __ovld __cnfn convert_double_rte(ushort);
-double __ovld __cnfn convert_double_rtn(char);
-double __ovld __cnfn convert_double_rtn(double);
-double __ovld __cnfn convert_double_rtn(float);
-double __ovld __cnfn convert_double_rtn(int);
-double __ovld __cnfn convert_double_rtn(long);
-double __ovld __cnfn convert_double_rtn(short);
-double __ovld __cnfn convert_double_rtn(uchar);
-double __ovld __cnfn convert_double_rtn(uint);
-double __ovld __cnfn convert_double_rtn(ulong);
-double __ovld __cnfn convert_double_rtn(ushort);
-double __ovld __cnfn convert_double_rtp(char);
-double __ovld __cnfn convert_double_rtp(double);
-double __ovld __cnfn convert_double_rtp(float);
-double __ovld __cnfn convert_double_rtp(int);
-double __ovld __cnfn convert_double_rtp(long);
-double __ovld __cnfn convert_double_rtp(short);
-double __ovld __cnfn convert_double_rtp(uchar);
-double __ovld __cnfn convert_double_rtp(uint);
-double __ovld __cnfn convert_double_rtp(ulong);
-double __ovld __cnfn convert_double_rtp(ushort);
-double __ovld __cnfn convert_double_rtz(char);
-double __ovld __cnfn convert_double_rtz(double);
-double __ovld __cnfn convert_double_rtz(float);
-double __ovld __cnfn convert_double_rtz(int);
-double __ovld __cnfn convert_double_rtz(long);
-double __ovld __cnfn convert_double_rtz(short);
-double __ovld __cnfn convert_double_rtz(uchar);
-double __ovld __cnfn convert_double_rtz(uint);
-double __ovld __cnfn convert_double_rtz(ulong);
-double __ovld __cnfn convert_double_rtz(ushort);
-double2 __ovld __cnfn convert_double2(char2);
-double2 __ovld __cnfn convert_double2(double2);
-double2 __ovld __cnfn convert_double2(float2);
-double2 __ovld __cnfn convert_double2(int2);
-double2 __ovld __cnfn convert_double2(long2);
-double2 __ovld __cnfn convert_double2(short2);
-double2 __ovld __cnfn convert_double2(uchar2);
-double2 __ovld __cnfn convert_double2(uint2);
-double2 __ovld __cnfn convert_double2(ulong2);
-double2 __ovld __cnfn convert_double2(ushort2);
-double2 __ovld __cnfn convert_double2_rte(char2);
-double2 __ovld __cnfn convert_double2_rte(double2);
-double2 __ovld __cnfn convert_double2_rte(float2);
-double2 __ovld __cnfn convert_double2_rte(int2);
-double2 __ovld __cnfn convert_double2_rte(long2);
-double2 __ovld __cnfn convert_double2_rte(short2);
-double2 __ovld __cnfn convert_double2_rte(uchar2);
-double2 __ovld __cnfn convert_double2_rte(uint2);
-double2 __ovld __cnfn convert_double2_rte(ulong2);
-double2 __ovld __cnfn convert_double2_rte(ushort2);
-double2 __ovld __cnfn convert_double2_rtn(char2);
-double2 __ovld __cnfn convert_double2_rtn(double2);
-double2 __ovld __cnfn convert_double2_rtn(float2);
-double2 __ovld __cnfn convert_double2_rtn(int2);
-double2 __ovld __cnfn convert_double2_rtn(long2);
-double2 __ovld __cnfn convert_double2_rtn(short2);
-double2 __ovld __cnfn convert_double2_rtn(uchar2);
-double2 __ovld __cnfn convert_double2_rtn(uint2);
-double2 __ovld __cnfn convert_double2_rtn(ulong2);
-double2 __ovld __cnfn convert_double2_rtn(ushort2);
-double2 __ovld __cnfn convert_double2_rtp(char2);
-double2 __ovld __cnfn convert_double2_rtp(double2);
-double2 __ovld __cnfn convert_double2_rtp(float2);
-double2 __ovld __cnfn convert_double2_rtp(int2);
-double2 __ovld __cnfn convert_double2_rtp(long2);
-double2 __ovld __cnfn convert_double2_rtp(short2);
-double2 __ovld __cnfn convert_double2_rtp(uchar2);
-double2 __ovld __cnfn convert_double2_rtp(uint2);
-double2 __ovld __cnfn convert_double2_rtp(ulong2);
-double2 __ovld __cnfn convert_double2_rtp(ushort2);
-double2 __ovld __cnfn convert_double2_rtz(char2);
-double2 __ovld __cnfn convert_double2_rtz(double2);
-double2 __ovld __cnfn convert_double2_rtz(float2);
-double2 __ovld __cnfn convert_double2_rtz(int2);
-double2 __ovld __cnfn convert_double2_rtz(long2);
-double2 __ovld __cnfn convert_double2_rtz(short2);
-double2 __ovld __cnfn convert_double2_rtz(uchar2);
-double2 __ovld __cnfn convert_double2_rtz(uint2);
-double2 __ovld __cnfn convert_double2_rtz(ulong2);
-double2 __ovld __cnfn convert_double2_rtz(ushort2);
-double3 __ovld __cnfn convert_double3(char3);
-double3 __ovld __cnfn convert_double3(double3);
-double3 __ovld __cnfn convert_double3(float3);
-double3 __ovld __cnfn convert_double3(int3);
-double3 __ovld __cnfn convert_double3(long3);
-double3 __ovld __cnfn convert_double3(short3);
-double3 __ovld __cnfn convert_double3(uchar3);
-double3 __ovld __cnfn convert_double3(uint3);
-double3 __ovld __cnfn convert_double3(ulong3);
-double3 __ovld __cnfn convert_double3(ushort3);
-double3 __ovld __cnfn convert_double3_rte(char3);
-double3 __ovld __cnfn convert_double3_rte(double3);
-double3 __ovld __cnfn convert_double3_rte(float3);
-double3 __ovld __cnfn convert_double3_rte(int3);
-double3 __ovld __cnfn convert_double3_rte(long3);
-double3 __ovld __cnfn convert_double3_rte(short3);
-double3 __ovld __cnfn convert_double3_rte(uchar3);
-double3 __ovld __cnfn convert_double3_rte(uint3);
-double3 __ovld __cnfn convert_double3_rte(ulong3);
-double3 __ovld __cnfn convert_double3_rte(ushort3);
-double3 __ovld __cnfn convert_double3_rtn(char3);
-double3 __ovld __cnfn convert_double3_rtn(double3);
-double3 __ovld __cnfn convert_double3_rtn(float3);
-double3 __ovld __cnfn convert_double3_rtn(int3);
-double3 __ovld __cnfn convert_double3_rtn(long3);
-double3 __ovld __cnfn convert_double3_rtn(short3);
-double3 __ovld __cnfn convert_double3_rtn(uchar3);
-double3 __ovld __cnfn convert_double3_rtn(uint3);
-double3 __ovld __cnfn convert_double3_rtn(ulong3);
-double3 __ovld __cnfn convert_double3_rtn(ushort3);
-double3 __ovld __cnfn convert_double3_rtp(char3);
-double3 __ovld __cnfn convert_double3_rtp(double3);
-double3 __ovld __cnfn convert_double3_rtp(float3);
-double3 __ovld __cnfn convert_double3_rtp(int3);
-double3 __ovld __cnfn convert_double3_rtp(long3);
-double3 __ovld __cnfn convert_double3_rtp(short3);
-double3 __ovld __cnfn convert_double3_rtp(uchar3);
-double3 __ovld __cnfn convert_double3_rtp(uint3);
-double3 __ovld __cnfn convert_double3_rtp(ulong3);
-double3 __ovld __cnfn convert_double3_rtp(ushort3);
-double3 __ovld __cnfn convert_double3_rtz(char3);
-double3 __ovld __cnfn convert_double3_rtz(double3);
-double3 __ovld __cnfn convert_double3_rtz(float3);
-double3 __ovld __cnfn convert_double3_rtz(int3);
-double3 __ovld __cnfn convert_double3_rtz(long3);
-double3 __ovld __cnfn convert_double3_rtz(short3);
-double3 __ovld __cnfn convert_double3_rtz(uchar3);
-double3 __ovld __cnfn convert_double3_rtz(uint3);
-double3 __ovld __cnfn convert_double3_rtz(ulong3);
-double3 __ovld __cnfn convert_double3_rtz(ushort3);
-double4 __ovld __cnfn convert_double4(char4);
-double4 __ovld __cnfn convert_double4(double4);
-double4 __ovld __cnfn convert_double4(float4);
-double4 __ovld __cnfn convert_double4(int4);
-double4 __ovld __cnfn convert_double4(long4);
-double4 __ovld __cnfn convert_double4(short4);
-double4 __ovld __cnfn convert_double4(uchar4);
-double4 __ovld __cnfn convert_double4(uint4);
-double4 __ovld __cnfn convert_double4(ulong4);
-double4 __ovld __cnfn convert_double4(ushort4);
-double4 __ovld __cnfn convert_double4_rte(char4);
-double4 __ovld __cnfn convert_double4_rte(double4);
-double4 __ovld __cnfn convert_double4_rte(float4);
-double4 __ovld __cnfn convert_double4_rte(int4);
-double4 __ovld __cnfn convert_double4_rte(long4);
-double4 __ovld __cnfn convert_double4_rte(short4);
-double4 __ovld __cnfn convert_double4_rte(uchar4);
-double4 __ovld __cnfn convert_double4_rte(uint4);
-double4 __ovld __cnfn convert_double4_rte(ulong4);
-double4 __ovld __cnfn convert_double4_rte(ushort4);
-double4 __ovld __cnfn convert_double4_rtn(char4);
-double4 __ovld __cnfn convert_double4_rtn(double4);
-double4 __ovld __cnfn convert_double4_rtn(float4);
-double4 __ovld __cnfn convert_double4_rtn(int4);
-double4 __ovld __cnfn convert_double4_rtn(long4);
-double4 __ovld __cnfn convert_double4_rtn(short4);
-double4 __ovld __cnfn convert_double4_rtn(uchar4);
-double4 __ovld __cnfn convert_double4_rtn(uint4);
-double4 __ovld __cnfn convert_double4_rtn(ulong4);
-double4 __ovld __cnfn convert_double4_rtn(ushort4);
-double4 __ovld __cnfn convert_double4_rtp(char4);
-double4 __ovld __cnfn convert_double4_rtp(double4);
-double4 __ovld __cnfn convert_double4_rtp(float4);
-double4 __ovld __cnfn convert_double4_rtp(int4);
-double4 __ovld __cnfn convert_double4_rtp(long4);
-double4 __ovld __cnfn convert_double4_rtp(short4);
-double4 __ovld __cnfn convert_double4_rtp(uchar4);
-double4 __ovld __cnfn convert_double4_rtp(uint4);
-double4 __ovld __cnfn convert_double4_rtp(ulong4);
-double4 __ovld __cnfn convert_double4_rtp(ushort4);
-double4 __ovld __cnfn convert_double4_rtz(char4);
-double4 __ovld __cnfn convert_double4_rtz(double4);
-double4 __ovld __cnfn convert_double4_rtz(float4);
-double4 __ovld __cnfn convert_double4_rtz(int4);
-double4 __ovld __cnfn convert_double4_rtz(long4);
-double4 __ovld __cnfn convert_double4_rtz(short4);
-double4 __ovld __cnfn convert_double4_rtz(uchar4);
-double4 __ovld __cnfn convert_double4_rtz(uint4);
-double4 __ovld __cnfn convert_double4_rtz(ulong4);
-double4 __ovld __cnfn convert_double4_rtz(ushort4);
-double8 __ovld __cnfn convert_double8(char8);
-double8 __ovld __cnfn convert_double8(double8);
-double8 __ovld __cnfn convert_double8(float8);
-double8 __ovld __cnfn convert_double8(int8);
-double8 __ovld __cnfn convert_double8(long8);
-double8 __ovld __cnfn convert_double8(short8);
-double8 __ovld __cnfn convert_double8(uchar8);
-double8 __ovld __cnfn convert_double8(uint8);
-double8 __ovld __cnfn convert_double8(ulong8);
-double8 __ovld __cnfn convert_double8(ushort8);
-double8 __ovld __cnfn convert_double8_rte(char8);
-double8 __ovld __cnfn convert_double8_rte(double8);
-double8 __ovld __cnfn convert_double8_rte(float8);
-double8 __ovld __cnfn convert_double8_rte(int8);
-double8 __ovld __cnfn convert_double8_rte(long8);
-double8 __ovld __cnfn convert_double8_rte(short8);
-double8 __ovld __cnfn convert_double8_rte(uchar8);
-double8 __ovld __cnfn convert_double8_rte(uint8);
-double8 __ovld __cnfn convert_double8_rte(ulong8);
-double8 __ovld __cnfn convert_double8_rte(ushort8);
-double8 __ovld __cnfn convert_double8_rtn(char8);
-double8 __ovld __cnfn convert_double8_rtn(double8);
-double8 __ovld __cnfn convert_double8_rtn(float8);
-double8 __ovld __cnfn convert_double8_rtn(int8);
-double8 __ovld __cnfn convert_double8_rtn(long8);
-double8 __ovld __cnfn convert_double8_rtn(short8);
-double8 __ovld __cnfn convert_double8_rtn(uchar8);
-double8 __ovld __cnfn convert_double8_rtn(uint8);
-double8 __ovld __cnfn convert_double8_rtn(ulong8);
-double8 __ovld __cnfn convert_double8_rtn(ushort8);
-double8 __ovld __cnfn convert_double8_rtp(char8);
-double8 __ovld __cnfn convert_double8_rtp(double8);
-double8 __ovld __cnfn convert_double8_rtp(float8);
-double8 __ovld __cnfn convert_double8_rtp(int8);
-double8 __ovld __cnfn convert_double8_rtp(long8);
-double8 __ovld __cnfn convert_double8_rtp(short8);
-double8 __ovld __cnfn convert_double8_rtp(uchar8);
-double8 __ovld __cnfn convert_double8_rtp(uint8);
-double8 __ovld __cnfn convert_double8_rtp(ulong8);
-double8 __ovld __cnfn convert_double8_rtp(ushort8);
-double8 __ovld __cnfn convert_double8_rtz(char8);
-double8 __ovld __cnfn convert_double8_rtz(double8);
-double8 __ovld __cnfn convert_double8_rtz(float8);
-double8 __ovld __cnfn convert_double8_rtz(int8);
-double8 __ovld __cnfn convert_double8_rtz(long8);
-double8 __ovld __cnfn convert_double8_rtz(short8);
-double8 __ovld __cnfn convert_double8_rtz(uchar8);
-double8 __ovld __cnfn convert_double8_rtz(uint8);
-double8 __ovld __cnfn convert_double8_rtz(ulong8);
-double8 __ovld __cnfn convert_double8_rtz(ushort8);
-double16 __ovld __cnfn convert_double16(char16);
-double16 __ovld __cnfn convert_double16(double16);
-double16 __ovld __cnfn convert_double16(float16);
-double16 __ovld __cnfn convert_double16(int16);
-double16 __ovld __cnfn convert_double16(long16);
-double16 __ovld __cnfn convert_double16(short16);
-double16 __ovld __cnfn convert_double16(uchar16);
-double16 __ovld __cnfn convert_double16(uint16);
-double16 __ovld __cnfn convert_double16(ulong16);
-double16 __ovld __cnfn convert_double16(ushort16);
-double16 __ovld __cnfn convert_double16_rte(char16);
-double16 __ovld __cnfn convert_double16_rte(double16);
-double16 __ovld __cnfn convert_double16_rte(float16);
-double16 __ovld __cnfn convert_double16_rte(int16);
-double16 __ovld __cnfn convert_double16_rte(long16);
-double16 __ovld __cnfn convert_double16_rte(short16);
-double16 __ovld __cnfn convert_double16_rte(uchar16);
-double16 __ovld __cnfn convert_double16_rte(uint16);
-double16 __ovld __cnfn convert_double16_rte(ulong16);
-double16 __ovld __cnfn convert_double16_rte(ushort16);
-double16 __ovld __cnfn convert_double16_rtn(char16);
-double16 __ovld __cnfn convert_double16_rtn(double16);
-double16 __ovld __cnfn convert_double16_rtn(float16);
-double16 __ovld __cnfn convert_double16_rtn(int16);
-double16 __ovld __cnfn convert_double16_rtn(long16);
-double16 __ovld __cnfn convert_double16_rtn(short16);
-double16 __ovld __cnfn convert_double16_rtn(uchar16);
-double16 __ovld __cnfn convert_double16_rtn(uint16);
-double16 __ovld __cnfn convert_double16_rtn(ulong16);
-double16 __ovld __cnfn convert_double16_rtn(ushort16);
-double16 __ovld __cnfn convert_double16_rtp(char16);
-double16 __ovld __cnfn convert_double16_rtp(double16);
-double16 __ovld __cnfn convert_double16_rtp(float16);
-double16 __ovld __cnfn convert_double16_rtp(int16);
-double16 __ovld __cnfn convert_double16_rtp(long16);
-double16 __ovld __cnfn convert_double16_rtp(short16);
-double16 __ovld __cnfn convert_double16_rtp(uchar16);
-double16 __ovld __cnfn convert_double16_rtp(uint16);
-double16 __ovld __cnfn convert_double16_rtp(ulong16);
-double16 __ovld __cnfn convert_double16_rtp(ushort16);
-double16 __ovld __cnfn convert_double16_rtz(char16);
-double16 __ovld __cnfn convert_double16_rtz(double16);
-double16 __ovld __cnfn convert_double16_rtz(float16);
-double16 __ovld __cnfn convert_double16_rtz(int16);
-double16 __ovld __cnfn convert_double16_rtz(long16);
-double16 __ovld __cnfn convert_double16_rtz(short16);
-double16 __ovld __cnfn convert_double16_rtz(uchar16);
-double16 __ovld __cnfn convert_double16_rtz(uint16);
-double16 __ovld __cnfn convert_double16_rtz(ulong16);
-double16 __ovld __cnfn convert_double16_rtz(ushort16);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-// Convert half types to non-double types.
-uchar __ovld __cnfn convert_uchar(half);
-uchar __ovld __cnfn convert_uchar_rte(half);
-uchar __ovld __cnfn convert_uchar_rtp(half);
-uchar __ovld __cnfn convert_uchar_rtn(half);
-uchar __ovld __cnfn convert_uchar_rtz(half);
-uchar __ovld __cnfn convert_uchar_sat(half);
-uchar __ovld __cnfn convert_uchar_sat_rte(half);
-uchar __ovld __cnfn convert_uchar_sat_rtp(half);
-uchar __ovld __cnfn convert_uchar_sat_rtn(half);
-uchar __ovld __cnfn convert_uchar_sat_rtz(half);
-uchar2 __ovld __cnfn convert_uchar2(half2);
-uchar2 __ovld __cnfn convert_uchar2_rte(half2);
-uchar2 __ovld __cnfn convert_uchar2_rtp(half2);
-uchar2 __ovld __cnfn convert_uchar2_rtn(half2);
-uchar2 __ovld __cnfn convert_uchar2_rtz(half2);
-uchar2 __ovld __cnfn convert_uchar2_sat(half2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rte(half2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtp(half2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtn(half2);
-uchar2 __ovld __cnfn convert_uchar2_sat_rtz(half2);
-uchar3 __ovld __cnfn convert_uchar3(half3);
-uchar3 __ovld __cnfn convert_uchar3_rte(half3);
-uchar3 __ovld __cnfn convert_uchar3_rtp(half3);
-uchar3 __ovld __cnfn convert_uchar3_rtn(half3);
-uchar3 __ovld __cnfn convert_uchar3_rtz(half3);
-uchar3 __ovld __cnfn convert_uchar3_sat(half3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rte(half3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtp(half3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtn(half3);
-uchar3 __ovld __cnfn convert_uchar3_sat_rtz(half3);
-uchar4 __ovld __cnfn convert_uchar4(half4);
-uchar4 __ovld __cnfn convert_uchar4_rte(half4);
-uchar4 __ovld __cnfn convert_uchar4_rtp(half4);
-uchar4 __ovld __cnfn convert_uchar4_rtn(half4);
-uchar4 __ovld __cnfn convert_uchar4_rtz(half4);
-uchar4 __ovld __cnfn convert_uchar4_sat(half4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rte(half4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtp(half4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtn(half4);
-uchar4 __ovld __cnfn convert_uchar4_sat_rtz(half4);
-uchar8 __ovld __cnfn convert_uchar8(half8);
-uchar8 __ovld __cnfn convert_uchar8_rte(half8);
-uchar8 __ovld __cnfn convert_uchar8_rtp(half8);
-uchar8 __ovld __cnfn convert_uchar8_rtn(half8);
-uchar8 __ovld __cnfn convert_uchar8_rtz(half8);
-uchar8 __ovld __cnfn convert_uchar8_sat(half8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rte(half8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtp(half8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtn(half8);
-uchar8 __ovld __cnfn convert_uchar8_sat_rtz(half8);
-uchar16 __ovld __cnfn convert_uchar16(half16);
-uchar16 __ovld __cnfn convert_uchar16_rte(half16);
-uchar16 __ovld __cnfn convert_uchar16_rtp(half16);
-uchar16 __ovld __cnfn convert_uchar16_rtn(half16);
-uchar16 __ovld __cnfn convert_uchar16_rtz(half16);
-uchar16 __ovld __cnfn convert_uchar16_sat(half16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rte(half16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtp(half16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtn(half16);
-uchar16 __ovld __cnfn convert_uchar16_sat_rtz(half16);
-ushort __ovld __cnfn convert_ushort(half);
-ushort __ovld __cnfn convert_ushort_rte(half);
-ushort __ovld __cnfn convert_ushort_rtp(half);
-ushort __ovld __cnfn convert_ushort_rtn(half);
-ushort __ovld __cnfn convert_ushort_rtz(half);
-ushort __ovld __cnfn convert_ushort_sat(half);
-ushort __ovld __cnfn convert_ushort_sat_rte(half);
-ushort __ovld __cnfn convert_ushort_sat_rtp(half);
-ushort __ovld __cnfn convert_ushort_sat_rtn(half);
-ushort __ovld __cnfn convert_ushort_sat_rtz(half);
-ushort2 __ovld __cnfn convert_ushort2(half2);
-ushort2 __ovld __cnfn convert_ushort2_rte(half2);
-ushort2 __ovld __cnfn convert_ushort2_rtp(half2);
-ushort2 __ovld __cnfn convert_ushort2_rtn(half2);
-ushort2 __ovld __cnfn convert_ushort2_rtz(half2);
-ushort2 __ovld __cnfn convert_ushort2_sat(half2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rte(half2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtp(half2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtn(half2);
-ushort2 __ovld __cnfn convert_ushort2_sat_rtz(half2);
-ushort3 __ovld __cnfn convert_ushort3(half3);
-ushort3 __ovld __cnfn convert_ushort3_rte(half3);
-ushort3 __ovld __cnfn convert_ushort3_rtp(half3);
-ushort3 __ovld __cnfn convert_ushort3_rtn(half3);
-ushort3 __ovld __cnfn convert_ushort3_rtz(half3);
-ushort3 __ovld __cnfn convert_ushort3_sat(half3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rte(half3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtp(half3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtn(half3);
-ushort3 __ovld __cnfn convert_ushort3_sat_rtz(half3);
-ushort4 __ovld __cnfn convert_ushort4(half4);
-ushort4 __ovld __cnfn convert_ushort4_rte(half4);
-ushort4 __ovld __cnfn convert_ushort4_rtp(half4);
-ushort4 __ovld __cnfn convert_ushort4_rtn(half4);
-ushort4 __ovld __cnfn convert_ushort4_rtz(half4);
-ushort4 __ovld __cnfn convert_ushort4_sat(half4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rte(half4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtp(half4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtn(half4);
-ushort4 __ovld __cnfn convert_ushort4_sat_rtz(half4);
-ushort8 __ovld __cnfn convert_ushort8(half8);
-ushort8 __ovld __cnfn convert_ushort8_rte(half8);
-ushort8 __ovld __cnfn convert_ushort8_rtp(half8);
-ushort8 __ovld __cnfn convert_ushort8_rtn(half8);
-ushort8 __ovld __cnfn convert_ushort8_rtz(half8);
-ushort8 __ovld __cnfn convert_ushort8_sat(half8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rte(half8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtp(half8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtn(half8);
-ushort8 __ovld __cnfn convert_ushort8_sat_rtz(half8);
-ushort16 __ovld __cnfn convert_ushort16(half16);
-ushort16 __ovld __cnfn convert_ushort16_rte(half16);
-ushort16 __ovld __cnfn convert_ushort16_rtp(half16);
-ushort16 __ovld __cnfn convert_ushort16_rtn(half16);
-ushort16 __ovld __cnfn convert_ushort16_rtz(half16);
-ushort16 __ovld __cnfn convert_ushort16_sat(half16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rte(half16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtp(half16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtn(half16);
-ushort16 __ovld __cnfn convert_ushort16_sat_rtz(half16);
-uint __ovld __cnfn convert_uint(half);
-uint __ovld __cnfn convert_uint_rte(half);
-uint __ovld __cnfn convert_uint_rtp(half);
-uint __ovld __cnfn convert_uint_rtn(half);
-uint __ovld __cnfn convert_uint_rtz(half);
-uint __ovld __cnfn convert_uint_sat(half);
-uint __ovld __cnfn convert_uint_sat_rte(half);
-uint __ovld __cnfn convert_uint_sat_rtp(half);
-uint __ovld __cnfn convert_uint_sat_rtn(half);
-uint __ovld __cnfn convert_uint_sat_rtz(half);
-uint2 __ovld __cnfn convert_uint2(half2);
-uint2 __ovld __cnfn convert_uint2_rte(half2);
-uint2 __ovld __cnfn convert_uint2_rtp(half2);
-uint2 __ovld __cnfn convert_uint2_rtn(half2);
-uint2 __ovld __cnfn convert_uint2_rtz(half2);
-uint2 __ovld __cnfn convert_uint2_sat(half2);
-uint2 __ovld __cnfn convert_uint2_sat_rte(half2);
-uint2 __ovld __cnfn convert_uint2_sat_rtp(half2);
-uint2 __ovld __cnfn convert_uint2_sat_rtn(half2);
-uint2 __ovld __cnfn convert_uint2_sat_rtz(half2);
-uint3 __ovld __cnfn convert_uint3(half3);
-uint3 __ovld __cnfn convert_uint3_rte(half3);
-uint3 __ovld __cnfn convert_uint3_rtp(half3);
-uint3 __ovld __cnfn convert_uint3_rtn(half3);
-uint3 __ovld __cnfn convert_uint3_rtz(half3);
-uint3 __ovld __cnfn convert_uint3_sat(half3);
-uint3 __ovld __cnfn convert_uint3_sat_rte(half3);
-uint3 __ovld __cnfn convert_uint3_sat_rtp(half3);
-uint3 __ovld __cnfn convert_uint3_sat_rtn(half3);
-uint3 __ovld __cnfn convert_uint3_sat_rtz(half3);
-uint4 __ovld __cnfn convert_uint4(half4);
-uint4 __ovld __cnfn convert_uint4_rte(half4);
-uint4 __ovld __cnfn convert_uint4_rtp(half4);
-uint4 __ovld __cnfn convert_uint4_rtn(half4);
-uint4 __ovld __cnfn convert_uint4_rtz(half4);
-uint4 __ovld __cnfn convert_uint4_sat(half4);
-uint4 __ovld __cnfn convert_uint4_sat_rte(half4);
-uint4 __ovld __cnfn convert_uint4_sat_rtp(half4);
-uint4 __ovld __cnfn convert_uint4_sat_rtn(half4);
-uint4 __ovld __cnfn convert_uint4_sat_rtz(half4);
-uint8 __ovld __cnfn convert_uint8(half8);
-uint8 __ovld __cnfn convert_uint8_rte(half8);
-uint8 __ovld __cnfn convert_uint8_rtp(half8);
-uint8 __ovld __cnfn convert_uint8_rtn(half8);
-uint8 __ovld __cnfn convert_uint8_rtz(half8);
-uint8 __ovld __cnfn convert_uint8_sat(half8);
-uint8 __ovld __cnfn convert_uint8_sat_rte(half8);
-uint8 __ovld __cnfn convert_uint8_sat_rtp(half8);
-uint8 __ovld __cnfn convert_uint8_sat_rtn(half8);
-uint8 __ovld __cnfn convert_uint8_sat_rtz(half8);
-uint16 __ovld __cnfn convert_uint16(half16);
-uint16 __ovld __cnfn convert_uint16_rte(half16);
-uint16 __ovld __cnfn convert_uint16_rtp(half16);
-uint16 __ovld __cnfn convert_uint16_rtn(half16);
-uint16 __ovld __cnfn convert_uint16_rtz(half16);
-uint16 __ovld __cnfn convert_uint16_sat(half16);
-uint16 __ovld __cnfn convert_uint16_sat_rte(half16);
-uint16 __ovld __cnfn convert_uint16_sat_rtp(half16);
-uint16 __ovld __cnfn convert_uint16_sat_rtn(half16);
-uint16 __ovld __cnfn convert_uint16_sat_rtz(half16);
-ulong __ovld __cnfn convert_ulong(half);
-ulong __ovld __cnfn convert_ulong_rte(half);
-ulong __ovld __cnfn convert_ulong_rtp(half);
-ulong __ovld __cnfn convert_ulong_rtn(half);
-ulong __ovld __cnfn convert_ulong_rtz(half);
-ulong __ovld __cnfn convert_ulong_sat(half);
-ulong __ovld __cnfn convert_ulong_sat_rte(half);
-ulong __ovld __cnfn convert_ulong_sat_rtp(half);
-ulong __ovld __cnfn convert_ulong_sat_rtn(half);
-ulong __ovld __cnfn convert_ulong_sat_rtz(half);
-ulong2 __ovld __cnfn convert_ulong2(half2);
-ulong2 __ovld __cnfn convert_ulong2_rte(half2);
-ulong2 __ovld __cnfn convert_ulong2_rtp(half2);
-ulong2 __ovld __cnfn convert_ulong2_rtn(half2);
-ulong2 __ovld __cnfn convert_ulong2_rtz(half2);
-ulong2 __ovld __cnfn convert_ulong2_sat(half2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rte(half2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtp(half2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtn(half2);
-ulong2 __ovld __cnfn convert_ulong2_sat_rtz(half2);
-ulong3 __ovld __cnfn convert_ulong3(half3);
-ulong3 __ovld __cnfn convert_ulong3_rte(half3);
-ulong3 __ovld __cnfn convert_ulong3_rtp(half3);
-ulong3 __ovld __cnfn convert_ulong3_rtn(half3);
-ulong3 __ovld __cnfn convert_ulong3_rtz(half3);
-ulong3 __ovld __cnfn convert_ulong3_sat(half3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rte(half3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtp(half3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtn(half3);
-ulong3 __ovld __cnfn convert_ulong3_sat_rtz(half3);
-ulong4 __ovld __cnfn convert_ulong4(half4);
-ulong4 __ovld __cnfn convert_ulong4_rte(half4);
-ulong4 __ovld __cnfn convert_ulong4_rtp(half4);
-ulong4 __ovld __cnfn convert_ulong4_rtn(half4);
-ulong4 __ovld __cnfn convert_ulong4_rtz(half4);
-ulong4 __ovld __cnfn convert_ulong4_sat(half4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rte(half4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtp(half4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtn(half4);
-ulong4 __ovld __cnfn convert_ulong4_sat_rtz(half4);
-ulong8 __ovld __cnfn convert_ulong8(half8);
-ulong8 __ovld __cnfn convert_ulong8_rte(half8);
-ulong8 __ovld __cnfn convert_ulong8_rtp(half8);
-ulong8 __ovld __cnfn convert_ulong8_rtn(half8);
-ulong8 __ovld __cnfn convert_ulong8_rtz(half8);
-ulong8 __ovld __cnfn convert_ulong8_sat(half8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rte(half8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtp(half8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtn(half8);
-ulong8 __ovld __cnfn convert_ulong8_sat_rtz(half8);
-ulong16 __ovld __cnfn convert_ulong16(half16);
-ulong16 __ovld __cnfn convert_ulong16_rte(half16);
-ulong16 __ovld __cnfn convert_ulong16_rtp(half16);
-ulong16 __ovld __cnfn convert_ulong16_rtn(half16);
-ulong16 __ovld __cnfn convert_ulong16_rtz(half16);
-ulong16 __ovld __cnfn convert_ulong16_sat(half16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rte(half16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtp(half16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtn(half16);
-ulong16 __ovld __cnfn convert_ulong16_sat_rtz(half16);
-char __ovld __cnfn convert_char(half);
-char __ovld __cnfn convert_char_rte(half);
-char __ovld __cnfn convert_char_rtp(half);
-char __ovld __cnfn convert_char_rtn(half);
-char __ovld __cnfn convert_char_rtz(half);
-char __ovld __cnfn convert_char_sat(half);
-char __ovld __cnfn convert_char_sat_rte(half);
-char __ovld __cnfn convert_char_sat_rtp(half);
-char __ovld __cnfn convert_char_sat_rtn(half);
-char __ovld __cnfn convert_char_sat_rtz(half);
-char2 __ovld __cnfn convert_char2(half2);
-char2 __ovld __cnfn convert_char2_rte(half2);
-char2 __ovld __cnfn convert_char2_rtp(half2);
-char2 __ovld __cnfn convert_char2_rtn(half2);
-char2 __ovld __cnfn convert_char2_rtz(half2);
-char2 __ovld __cnfn convert_char2_sat(half2);
-char2 __ovld __cnfn convert_char2_sat_rte(half2);
-char2 __ovld __cnfn convert_char2_sat_rtp(half2);
-char2 __ovld __cnfn convert_char2_sat_rtn(half2);
-char2 __ovld __cnfn convert_char2_sat_rtz(half2);
-char3 __ovld __cnfn convert_char3(half3);
-char3 __ovld __cnfn convert_char3_rte(half3);
-char3 __ovld __cnfn convert_char3_rtp(half3);
-char3 __ovld __cnfn convert_char3_rtn(half3);
-char3 __ovld __cnfn convert_char3_rtz(half3);
-char3 __ovld __cnfn convert_char3_sat(half3);
-char3 __ovld __cnfn convert_char3_sat_rte(half3);
-char3 __ovld __cnfn convert_char3_sat_rtp(half3);
-char3 __ovld __cnfn convert_char3_sat_rtn(half3);
-char3 __ovld __cnfn convert_char3_sat_rtz(half3);
-char4 __ovld __cnfn convert_char4(half4);
-char4 __ovld __cnfn convert_char4_rte(half4);
-char4 __ovld __cnfn convert_char4_rtp(half4);
-char4 __ovld __cnfn convert_char4_rtn(half4);
-char4 __ovld __cnfn convert_char4_rtz(half4);
-char4 __ovld __cnfn convert_char4_sat(half4);
-char4 __ovld __cnfn convert_char4_sat_rte(half4);
-char4 __ovld __cnfn convert_char4_sat_rtp(half4);
-char4 __ovld __cnfn convert_char4_sat_rtn(half4);
-char4 __ovld __cnfn convert_char4_sat_rtz(half4);
-char8 __ovld __cnfn convert_char8(half8);
-char8 __ovld __cnfn convert_char8_rte(half8);
-char8 __ovld __cnfn convert_char8_rtp(half8);
-char8 __ovld __cnfn convert_char8_rtn(half8);
-char8 __ovld __cnfn convert_char8_rtz(half8);
-char8 __ovld __cnfn convert_char8_sat(half8);
-char8 __ovld __cnfn convert_char8_sat_rte(half8);
-char8 __ovld __cnfn convert_char8_sat_rtp(half8);
-char8 __ovld __cnfn convert_char8_sat_rtn(half8);
-char8 __ovld __cnfn convert_char8_sat_rtz(half8);
-char16 __ovld __cnfn convert_char16(half16);
-char16 __ovld __cnfn convert_char16_rte(half16);
-char16 __ovld __cnfn convert_char16_rtp(half16);
-char16 __ovld __cnfn convert_char16_rtn(half16);
-char16 __ovld __cnfn convert_char16_rtz(half16);
-char16 __ovld __cnfn convert_char16_sat(half16);
-char16 __ovld __cnfn convert_char16_sat_rte(half16);
-char16 __ovld __cnfn convert_char16_sat_rtp(half16);
-char16 __ovld __cnfn convert_char16_sat_rtn(half16);
-char16 __ovld __cnfn convert_char16_sat_rtz(half16);
-short __ovld __cnfn convert_short(half);
-short __ovld __cnfn convert_short_rte(half);
-short __ovld __cnfn convert_short_rtp(half);
-short __ovld __cnfn convert_short_rtn(half);
-short __ovld __cnfn convert_short_rtz(half);
-short __ovld __cnfn convert_short_sat(half);
-short __ovld __cnfn convert_short_sat_rte(half);
-short __ovld __cnfn convert_short_sat_rtp(half);
-short __ovld __cnfn convert_short_sat_rtn(half);
-short __ovld __cnfn convert_short_sat_rtz(half);
-short2 __ovld __cnfn convert_short2(half2);
-short2 __ovld __cnfn convert_short2_rte(half2);
-short2 __ovld __cnfn convert_short2_rtp(half2);
-short2 __ovld __cnfn convert_short2_rtn(half2);
-short2 __ovld __cnfn convert_short2_rtz(half2);
-short2 __ovld __cnfn convert_short2_sat(half2);
-short2 __ovld __cnfn convert_short2_sat_rte(half2);
-short2 __ovld __cnfn convert_short2_sat_rtp(half2);
-short2 __ovld __cnfn convert_short2_sat_rtn(half2);
-short2 __ovld __cnfn convert_short2_sat_rtz(half2);
-short3 __ovld __cnfn convert_short3(half3);
-short3 __ovld __cnfn convert_short3_rte(half3);
-short3 __ovld __cnfn convert_short3_rtp(half3);
-short3 __ovld __cnfn convert_short3_rtn(half3);
-short3 __ovld __cnfn convert_short3_rtz(half3);
-short3 __ovld __cnfn convert_short3_sat(half3);
-short3 __ovld __cnfn convert_short3_sat_rte(half3);
-short3 __ovld __cnfn convert_short3_sat_rtp(half3);
-short3 __ovld __cnfn convert_short3_sat_rtn(half3);
-short3 __ovld __cnfn convert_short3_sat_rtz(half3);
-short4 __ovld __cnfn convert_short4(half4);
-short4 __ovld __cnfn convert_short4_rte(half4);
-short4 __ovld __cnfn convert_short4_rtp(half4);
-short4 __ovld __cnfn convert_short4_rtn(half4);
-short4 __ovld __cnfn convert_short4_rtz(half4);
-short4 __ovld __cnfn convert_short4_sat(half4);
-short4 __ovld __cnfn convert_short4_sat_rte(half4);
-short4 __ovld __cnfn convert_short4_sat_rtp(half4);
-short4 __ovld __cnfn convert_short4_sat_rtn(half4);
-short4 __ovld __cnfn convert_short4_sat_rtz(half4);
-short8 __ovld __cnfn convert_short8(half8);
-short8 __ovld __cnfn convert_short8_rte(half8);
-short8 __ovld __cnfn convert_short8_rtp(half8);
-short8 __ovld __cnfn convert_short8_rtn(half8);
-short8 __ovld __cnfn convert_short8_rtz(half8);
-short8 __ovld __cnfn convert_short8_sat(half8);
-short8 __ovld __cnfn convert_short8_sat_rte(half8);
-short8 __ovld __cnfn convert_short8_sat_rtp(half8);
-short8 __ovld __cnfn convert_short8_sat_rtn(half8);
-short8 __ovld __cnfn convert_short8_sat_rtz(half8);
-short16 __ovld __cnfn convert_short16(half16);
-short16 __ovld __cnfn convert_short16_rte(half16);
-short16 __ovld __cnfn convert_short16_rtp(half16);
-short16 __ovld __cnfn convert_short16_rtn(half16);
-short16 __ovld __cnfn convert_short16_rtz(half16);
-short16 __ovld __cnfn convert_short16_sat(half16);
-short16 __ovld __cnfn convert_short16_sat_rte(half16);
-short16 __ovld __cnfn convert_short16_sat_rtp(half16);
-short16 __ovld __cnfn convert_short16_sat_rtn(half16);
-short16 __ovld __cnfn convert_short16_sat_rtz(half16);
-int __ovld __cnfn convert_int(half);
-int __ovld __cnfn convert_int_rte(half);
-int __ovld __cnfn convert_int_rtp(half);
-int __ovld __cnfn convert_int_rtn(half);
-int __ovld __cnfn convert_int_rtz(half);
-int __ovld __cnfn convert_int_sat(half);
-int __ovld __cnfn convert_int_sat_rte(half);
-int __ovld __cnfn convert_int_sat_rtp(half);
-int __ovld __cnfn convert_int_sat_rtn(half);
-int __ovld __cnfn convert_int_sat_rtz(half);
-int2 __ovld __cnfn convert_int2(half2);
-int2 __ovld __cnfn convert_int2_rte(half2);
-int2 __ovld __cnfn convert_int2_rtp(half2);
-int2 __ovld __cnfn convert_int2_rtn(half2);
-int2 __ovld __cnfn convert_int2_rtz(half2);
-int2 __ovld __cnfn convert_int2_sat(half2);
-int2 __ovld __cnfn convert_int2_sat_rte(half2);
-int2 __ovld __cnfn convert_int2_sat_rtp(half2);
-int2 __ovld __cnfn convert_int2_sat_rtn(half2);
-int2 __ovld __cnfn convert_int2_sat_rtz(half2);
-int3 __ovld __cnfn convert_int3(half3);
-int3 __ovld __cnfn convert_int3_rte(half3);
-int3 __ovld __cnfn convert_int3_rtp(half3);
-int3 __ovld __cnfn convert_int3_rtn(half3);
-int3 __ovld __cnfn convert_int3_rtz(half3);
-int3 __ovld __cnfn convert_int3_sat(half3);
-int3 __ovld __cnfn convert_int3_sat_rte(half3);
-int3 __ovld __cnfn convert_int3_sat_rtp(half3);
-int3 __ovld __cnfn convert_int3_sat_rtn(half3);
-int3 __ovld __cnfn convert_int3_sat_rtz(half3);
-int4 __ovld __cnfn convert_int4(half4);
-int4 __ovld __cnfn convert_int4_rte(half4);
-int4 __ovld __cnfn convert_int4_rtp(half4);
-int4 __ovld __cnfn convert_int4_rtn(half4);
-int4 __ovld __cnfn convert_int4_rtz(half4);
-int4 __ovld __cnfn convert_int4_sat(half4);
-int4 __ovld __cnfn convert_int4_sat_rte(half4);
-int4 __ovld __cnfn convert_int4_sat_rtp(half4);
-int4 __ovld __cnfn convert_int4_sat_rtn(half4);
-int4 __ovld __cnfn convert_int4_sat_rtz(half4);
-int8 __ovld __cnfn convert_int8(half8);
-int8 __ovld __cnfn convert_int8_rte(half8);
-int8 __ovld __cnfn convert_int8_rtp(half8);
-int8 __ovld __cnfn convert_int8_rtn(half8);
-int8 __ovld __cnfn convert_int8_rtz(half8);
-int8 __ovld __cnfn convert_int8_sat(half8);
-int8 __ovld __cnfn convert_int8_sat_rte(half8);
-int8 __ovld __cnfn convert_int8_sat_rtp(half8);
-int8 __ovld __cnfn convert_int8_sat_rtn(half8);
-int8 __ovld __cnfn convert_int8_sat_rtz(half8);
-int16 __ovld __cnfn convert_int16(half16);
-int16 __ovld __cnfn convert_int16_rte(half16);
-int16 __ovld __cnfn convert_int16_rtp(half16);
-int16 __ovld __cnfn convert_int16_rtn(half16);
-int16 __ovld __cnfn convert_int16_rtz(half16);
-int16 __ovld __cnfn convert_int16_sat(half16);
-int16 __ovld __cnfn convert_int16_sat_rte(half16);
-int16 __ovld __cnfn convert_int16_sat_rtp(half16);
-int16 __ovld __cnfn convert_int16_sat_rtn(half16);
-int16 __ovld __cnfn convert_int16_sat_rtz(half16);
-long __ovld __cnfn convert_long(half);
-long __ovld __cnfn convert_long_rte(half);
-long __ovld __cnfn convert_long_rtp(half);
-long __ovld __cnfn convert_long_rtn(half);
-long __ovld __cnfn convert_long_rtz(half);
-long __ovld __cnfn convert_long_sat(half);
-long __ovld __cnfn convert_long_sat_rte(half);
-long __ovld __cnfn convert_long_sat_rtp(half);
-long __ovld __cnfn convert_long_sat_rtn(half);
-long __ovld __cnfn convert_long_sat_rtz(half);
-long2 __ovld __cnfn convert_long2(half2);
-long2 __ovld __cnfn convert_long2_rte(half2);
-long2 __ovld __cnfn convert_long2_rtp(half2);
-long2 __ovld __cnfn convert_long2_rtn(half2);
-long2 __ovld __cnfn convert_long2_rtz(half2);
-long2 __ovld __cnfn convert_long2_sat(half2);
-long2 __ovld __cnfn convert_long2_sat_rte(half2);
-long2 __ovld __cnfn convert_long2_sat_rtp(half2);
-long2 __ovld __cnfn convert_long2_sat_rtn(half2);
-long2 __ovld __cnfn convert_long2_sat_rtz(half2);
-long3 __ovld __cnfn convert_long3(half3);
-long3 __ovld __cnfn convert_long3_rte(half3);
-long3 __ovld __cnfn convert_long3_rtp(half3);
-long3 __ovld __cnfn convert_long3_rtn(half3);
-long3 __ovld __cnfn convert_long3_rtz(half3);
-long3 __ovld __cnfn convert_long3_sat(half3);
-long3 __ovld __cnfn convert_long3_sat_rte(half3);
-long3 __ovld __cnfn convert_long3_sat_rtp(half3);
-long3 __ovld __cnfn convert_long3_sat_rtn(half3);
-long3 __ovld __cnfn convert_long3_sat_rtz(half3);
-long4 __ovld __cnfn convert_long4(half4);
-long4 __ovld __cnfn convert_long4_rte(half4);
-long4 __ovld __cnfn convert_long4_rtp(half4);
-long4 __ovld __cnfn convert_long4_rtn(half4);
-long4 __ovld __cnfn convert_long4_rtz(half4);
-long4 __ovld __cnfn convert_long4_sat(half4);
-long4 __ovld __cnfn convert_long4_sat_rte(half4);
-long4 __ovld __cnfn convert_long4_sat_rtp(half4);
-long4 __ovld __cnfn convert_long4_sat_rtn(half4);
-long4 __ovld __cnfn convert_long4_sat_rtz(half4);
-long8 __ovld __cnfn convert_long8(half8);
-long8 __ovld __cnfn convert_long8_rte(half8);
-long8 __ovld __cnfn convert_long8_rtp(half8);
-long8 __ovld __cnfn convert_long8_rtn(half8);
-long8 __ovld __cnfn convert_long8_rtz(half8);
-long8 __ovld __cnfn convert_long8_sat(half8);
-long8 __ovld __cnfn convert_long8_sat_rte(half8);
-long8 __ovld __cnfn convert_long8_sat_rtp(half8);
-long8 __ovld __cnfn convert_long8_sat_rtn(half8);
-long8 __ovld __cnfn convert_long8_sat_rtz(half8);
-long16 __ovld __cnfn convert_long16(half16);
-long16 __ovld __cnfn convert_long16_rte(half16);
-long16 __ovld __cnfn convert_long16_rtp(half16);
-long16 __ovld __cnfn convert_long16_rtn(half16);
-long16 __ovld __cnfn convert_long16_rtz(half16);
-long16 __ovld __cnfn convert_long16_sat(half16);
-long16 __ovld __cnfn convert_long16_sat_rte(half16);
-long16 __ovld __cnfn convert_long16_sat_rtp(half16);
-long16 __ovld __cnfn convert_long16_sat_rtn(half16);
-long16 __ovld __cnfn convert_long16_sat_rtz(half16);
-float __ovld __cnfn convert_float(half);
-float __ovld __cnfn convert_float_rte(half);
-float __ovld __cnfn convert_float_rtp(half);
-float __ovld __cnfn convert_float_rtn(half);
-float __ovld __cnfn convert_float_rtz(half);
-float2 __ovld __cnfn convert_float2(half2);
-float2 __ovld __cnfn convert_float2_rte(half2);
-float2 __ovld __cnfn convert_float2_rtp(half2);
-float2 __ovld __cnfn convert_float2_rtn(half2);
-float2 __ovld __cnfn convert_float2_rtz(half2);
-float3 __ovld __cnfn convert_float3(half3);
-float3 __ovld __cnfn convert_float3_rte(half3);
-float3 __ovld __cnfn convert_float3_rtp(half3);
-float3 __ovld __cnfn convert_float3_rtn(half3);
-float3 __ovld __cnfn convert_float3_rtz(half3);
-float4 __ovld __cnfn convert_float4(half4);
-float4 __ovld __cnfn convert_float4_rte(half4);
-float4 __ovld __cnfn convert_float4_rtp(half4);
-float4 __ovld __cnfn convert_float4_rtn(half4);
-float4 __ovld __cnfn convert_float4_rtz(half4);
-float8 __ovld __cnfn convert_float8(half8);
-float8 __ovld __cnfn convert_float8_rte(half8);
-float8 __ovld __cnfn convert_float8_rtp(half8);
-float8 __ovld __cnfn convert_float8_rtn(half8);
-float8 __ovld __cnfn convert_float8_rtz(half8);
-float16 __ovld __cnfn convert_float16(half16);
-float16 __ovld __cnfn convert_float16_rte(half16);
-float16 __ovld __cnfn convert_float16_rtp(half16);
-float16 __ovld __cnfn convert_float16_rtn(half16);
-float16 __ovld __cnfn convert_float16_rtz(half16);
-
-// Convert non-double types to half types.
-half __ovld __cnfn convert_half(uchar);
-half __ovld __cnfn convert_half(ushort);
-half __ovld __cnfn convert_half(uint);
-half __ovld __cnfn convert_half(ulong);
-half __ovld __cnfn convert_half(char);
-half __ovld __cnfn convert_half(short);
-half __ovld __cnfn convert_half(int);
-half __ovld __cnfn convert_half(long);
-half __ovld __cnfn convert_half(float);
-half __ovld __cnfn convert_half(half);
-half __ovld __cnfn convert_half_rte(uchar);
-half __ovld __cnfn convert_half_rte(ushort);
-half __ovld __cnfn convert_half_rte(uint);
-half __ovld __cnfn convert_half_rte(ulong);
-half __ovld __cnfn convert_half_rte(char);
-half __ovld __cnfn convert_half_rte(short);
-half __ovld __cnfn convert_half_rte(int);
-half __ovld __cnfn convert_half_rte(long);
-half __ovld __cnfn convert_half_rte(float);
-half __ovld __cnfn convert_half_rte(half);
-half __ovld __cnfn convert_half_rtp(uchar);
-half __ovld __cnfn convert_half_rtp(ushort);
-half __ovld __cnfn convert_half_rtp(uint);
-half __ovld __cnfn convert_half_rtp(ulong);
-half __ovld __cnfn convert_half_rtp(char);
-half __ovld __cnfn convert_half_rtp(short);
-half __ovld __cnfn convert_half_rtp(int);
-half __ovld __cnfn convert_half_rtp(long);
-half __ovld __cnfn convert_half_rtp(float);
-half __ovld __cnfn convert_half_rtp(half);
-half __ovld __cnfn convert_half_rtn(uchar);
-half __ovld __cnfn convert_half_rtn(ushort);
-half __ovld __cnfn convert_half_rtn(uint);
-half __ovld __cnfn convert_half_rtn(ulong);
-half __ovld __cnfn convert_half_rtn(char);
-half __ovld __cnfn convert_half_rtn(short);
-half __ovld __cnfn convert_half_rtn(int);
-half __ovld __cnfn convert_half_rtn(long);
-half __ovld __cnfn convert_half_rtn(float);
-half __ovld __cnfn convert_half_rtn(half);
-half __ovld __cnfn convert_half_rtz(uchar);
-half __ovld __cnfn convert_half_rtz(ushort);
-half __ovld __cnfn convert_half_rtz(uint);
-half __ovld __cnfn convert_half_rtz(ulong);
-half __ovld __cnfn convert_half_rtz(char);
-half __ovld __cnfn convert_half_rtz(short);
-half __ovld __cnfn convert_half_rtz(int);
-half __ovld __cnfn convert_half_rtz(long);
-half __ovld __cnfn convert_half_rtz(float);
-half __ovld __cnfn convert_half_rtz(half);
-half2 __ovld __cnfn convert_half2(char2);
-half2 __ovld __cnfn convert_half2(uchar2);
-half2 __ovld __cnfn convert_half2(short2);
-half2 __ovld __cnfn convert_half2(ushort2);
-half2 __ovld __cnfn convert_half2(int2);
-half2 __ovld __cnfn convert_half2(uint2);
-half2 __ovld __cnfn convert_half2(long2);
-half2 __ovld __cnfn convert_half2(ulong2);
-half2 __ovld __cnfn convert_half2(float2);
-half2 __ovld __cnfn convert_half2(half2);
-half2 __ovld __cnfn convert_half2_rte(char2);
-half2 __ovld __cnfn convert_half2_rte(uchar2);
-half2 __ovld __cnfn convert_half2_rte(short2);
-half2 __ovld __cnfn convert_half2_rte(ushort2);
-half2 __ovld __cnfn convert_half2_rte(int2);
-half2 __ovld __cnfn convert_half2_rte(uint2);
-half2 __ovld __cnfn convert_half2_rte(long2);
-half2 __ovld __cnfn convert_half2_rte(ulong2);
-half2 __ovld __cnfn convert_half2_rte(float2);
-half2 __ovld __cnfn convert_half2_rte(half2);
-half2 __ovld __cnfn convert_half2_rtp(char2);
-half2 __ovld __cnfn convert_half2_rtp(uchar2);
-half2 __ovld __cnfn convert_half2_rtp(short2);
-half2 __ovld __cnfn convert_half2_rtp(ushort2);
-half2 __ovld __cnfn convert_half2_rtp(int2);
-half2 __ovld __cnfn convert_half2_rtp(uint2);
-half2 __ovld __cnfn convert_half2_rtp(long2);
-half2 __ovld __cnfn convert_half2_rtp(ulong2);
-half2 __ovld __cnfn convert_half2_rtp(float2);
-half2 __ovld __cnfn convert_half2_rtp(half2);
-half2 __ovld __cnfn convert_half2_rtn(char2);
-half2 __ovld __cnfn convert_half2_rtn(uchar2);
-half2 __ovld __cnfn convert_half2_rtn(short2);
-half2 __ovld __cnfn convert_half2_rtn(ushort2);
-half2 __ovld __cnfn convert_half2_rtn(int2);
-half2 __ovld __cnfn convert_half2_rtn(uint2);
-half2 __ovld __cnfn convert_half2_rtn(long2);
-half2 __ovld __cnfn convert_half2_rtn(ulong2);
-half2 __ovld __cnfn convert_half2_rtn(float2);
-half2 __ovld __cnfn convert_half2_rtn(half2);
-half2 __ovld __cnfn convert_half2_rtz(char2);
-half2 __ovld __cnfn convert_half2_rtz(uchar2);
-half2 __ovld __cnfn convert_half2_rtz(short2);
-half2 __ovld __cnfn convert_half2_rtz(ushort2);
-half2 __ovld __cnfn convert_half2_rtz(int2);
-half2 __ovld __cnfn convert_half2_rtz(uint2);
-half2 __ovld __cnfn convert_half2_rtz(long2);
-half2 __ovld __cnfn convert_half2_rtz(ulong2);
-half2 __ovld __cnfn convert_half2_rtz(float2);
-half2 __ovld __cnfn convert_half2_rtz(half2);
-half3 __ovld __cnfn convert_half3(char3);
-half3 __ovld __cnfn convert_half3(uchar3);
-half3 __ovld __cnfn convert_half3(short3);
-half3 __ovld __cnfn convert_half3(ushort3);
-half3 __ovld __cnfn convert_half3(int3);
-half3 __ovld __cnfn convert_half3(uint3);
-half3 __ovld __cnfn convert_half3(long3);
-half3 __ovld __cnfn convert_half3(ulong3);
-half3 __ovld __cnfn convert_half3(float3);
-half3 __ovld __cnfn convert_half3(half3);
-half3 __ovld __cnfn convert_half3_rte(char3);
-half3 __ovld __cnfn convert_half3_rte(uchar3);
-half3 __ovld __cnfn convert_half3_rte(short3);
-half3 __ovld __cnfn convert_half3_rte(ushort3);
-half3 __ovld __cnfn convert_half3_rte(int3);
-half3 __ovld __cnfn convert_half3_rte(uint3);
-half3 __ovld __cnfn convert_half3_rte(long3);
-half3 __ovld __cnfn convert_half3_rte(ulong3);
-half3 __ovld __cnfn convert_half3_rte(float3);
-half3 __ovld __cnfn convert_half3_rte(half3);
-half3 __ovld __cnfn convert_half3_rtp(char3);
-half3 __ovld __cnfn convert_half3_rtp(uchar3);
-half3 __ovld __cnfn convert_half3_rtp(short3);
-half3 __ovld __cnfn convert_half3_rtp(ushort3);
-half3 __ovld __cnfn convert_half3_rtp(int3);
-half3 __ovld __cnfn convert_half3_rtp(uint3);
-half3 __ovld __cnfn convert_half3_rtp(long3);
-half3 __ovld __cnfn convert_half3_rtp(ulong3);
-half3 __ovld __cnfn convert_half3_rtp(float3);
-half3 __ovld __cnfn convert_half3_rtp(half3);
-half3 __ovld __cnfn convert_half3_rtn(char3);
-half3 __ovld __cnfn convert_half3_rtn(uchar3);
-half3 __ovld __cnfn convert_half3_rtn(short3);
-half3 __ovld __cnfn convert_half3_rtn(ushort3);
-half3 __ovld __cnfn convert_half3_rtn(int3);
-half3 __ovld __cnfn convert_half3_rtn(uint3);
-half3 __ovld __cnfn convert_half3_rtn(long3);
-half3 __ovld __cnfn convert_half3_rtn(ulong3);
-half3 __ovld __cnfn convert_half3_rtn(float3);
-half3 __ovld __cnfn convert_half3_rtn(half3);
-half3 __ovld __cnfn convert_half3_rtz(char3);
-half3 __ovld __cnfn convert_half3_rtz(uchar3);
-half3 __ovld __cnfn convert_half3_rtz(short3);
-half3 __ovld __cnfn convert_half3_rtz(ushort3);
-half3 __ovld __cnfn convert_half3_rtz(int3);
-half3 __ovld __cnfn convert_half3_rtz(uint3);
-half3 __ovld __cnfn convert_half3_rtz(long3);
-half3 __ovld __cnfn convert_half3_rtz(ulong3);
-half3 __ovld __cnfn convert_half3_rtz(float3);
-half3 __ovld __cnfn convert_half3_rtz(half3);
-half4 __ovld __cnfn convert_half4(char4);
-half4 __ovld __cnfn convert_half4(uchar4);
-half4 __ovld __cnfn convert_half4(short4);
-half4 __ovld __cnfn convert_half4(ushort4);
-half4 __ovld __cnfn convert_half4(int4);
-half4 __ovld __cnfn convert_half4(uint4);
-half4 __ovld __cnfn convert_half4(long4);
-half4 __ovld __cnfn convert_half4(ulong4);
-half4 __ovld __cnfn convert_half4(float4);
-half4 __ovld __cnfn convert_half4(half4);
-half4 __ovld __cnfn convert_half4_rte(char4);
-half4 __ovld __cnfn convert_half4_rte(uchar4);
-half4 __ovld __cnfn convert_half4_rte(short4);
-half4 __ovld __cnfn convert_half4_rte(ushort4);
-half4 __ovld __cnfn convert_half4_rte(int4);
-half4 __ovld __cnfn convert_half4_rte(uint4);
-half4 __ovld __cnfn convert_half4_rte(long4);
-half4 __ovld __cnfn convert_half4_rte(ulong4);
-half4 __ovld __cnfn convert_half4_rte(float4);
-half4 __ovld __cnfn convert_half4_rte(half4);
-half4 __ovld __cnfn convert_half4_rtp(char4);
-half4 __ovld __cnfn convert_half4_rtp(uchar4);
-half4 __ovld __cnfn convert_half4_rtp(short4);
-half4 __ovld __cnfn convert_half4_rtp(ushort4);
-half4 __ovld __cnfn convert_half4_rtp(int4);
-half4 __ovld __cnfn convert_half4_rtp(uint4);
-half4 __ovld __cnfn convert_half4_rtp(long4);
-half4 __ovld __cnfn convert_half4_rtp(ulong4);
-half4 __ovld __cnfn convert_half4_rtp(float4);
-half4 __ovld __cnfn convert_half4_rtp(half4);
-half4 __ovld __cnfn convert_half4_rtn(char4);
-half4 __ovld __cnfn convert_half4_rtn(uchar4);
-half4 __ovld __cnfn convert_half4_rtn(short4);
-half4 __ovld __cnfn convert_half4_rtn(ushort4);
-half4 __ovld __cnfn convert_half4_rtn(int4);
-half4 __ovld __cnfn convert_half4_rtn(uint4);
-half4 __ovld __cnfn convert_half4_rtn(long4);
-half4 __ovld __cnfn convert_half4_rtn(ulong4);
-half4 __ovld __cnfn convert_half4_rtn(float4);
-half4 __ovld __cnfn convert_half4_rtn(half4);
-half4 __ovld __cnfn convert_half4_rtz(char4);
-half4 __ovld __cnfn convert_half4_rtz(uchar4);
-half4 __ovld __cnfn convert_half4_rtz(short4);
-half4 __ovld __cnfn convert_half4_rtz(ushort4);
-half4 __ovld __cnfn convert_half4_rtz(int4);
-half4 __ovld __cnfn convert_half4_rtz(uint4);
-half4 __ovld __cnfn convert_half4_rtz(long4);
-half4 __ovld __cnfn convert_half4_rtz(ulong4);
-half4 __ovld __cnfn convert_half4_rtz(float4);
-half4 __ovld __cnfn convert_half4_rtz(half4);
-half8 __ovld __cnfn convert_half8(char8);
-half8 __ovld __cnfn convert_half8(uchar8);
-half8 __ovld __cnfn convert_half8(short8);
-half8 __ovld __cnfn convert_half8(ushort8);
-half8 __ovld __cnfn convert_half8(int8);
-half8 __ovld __cnfn convert_half8(uint8);
-half8 __ovld __cnfn convert_half8(long8);
-half8 __ovld __cnfn convert_half8(ulong8);
-half8 __ovld __cnfn convert_half8(float8);
-half8 __ovld __cnfn convert_half8(half8);
-half8 __ovld __cnfn convert_half8_rte(char8);
-half8 __ovld __cnfn convert_half8_rte(uchar8);
-half8 __ovld __cnfn convert_half8_rte(short8);
-half8 __ovld __cnfn convert_half8_rte(ushort8);
-half8 __ovld __cnfn convert_half8_rte(int8);
-half8 __ovld __cnfn convert_half8_rte(uint8);
-half8 __ovld __cnfn convert_half8_rte(long8);
-half8 __ovld __cnfn convert_half8_rte(ulong8);
-half8 __ovld __cnfn convert_half8_rte(float8);
-half8 __ovld __cnfn convert_half8_rte(half8);
-half8 __ovld __cnfn convert_half8_rtp(char8);
-half8 __ovld __cnfn convert_half8_rtp(uchar8);
-half8 __ovld __cnfn convert_half8_rtp(short8);
-half8 __ovld __cnfn convert_half8_rtp(ushort8);
-half8 __ovld __cnfn convert_half8_rtp(int8);
-half8 __ovld __cnfn convert_half8_rtp(uint8);
-half8 __ovld __cnfn convert_half8_rtp(long8);
-half8 __ovld __cnfn convert_half8_rtp(ulong8);
-half8 __ovld __cnfn convert_half8_rtp(float8);
-half8 __ovld __cnfn convert_half8_rtp(half8);
-half8 __ovld __cnfn convert_half8_rtn(char8);
-half8 __ovld __cnfn convert_half8_rtn(uchar8);
-half8 __ovld __cnfn convert_half8_rtn(short8);
-half8 __ovld __cnfn convert_half8_rtn(ushort8);
-half8 __ovld __cnfn convert_half8_rtn(int8);
-half8 __ovld __cnfn convert_half8_rtn(uint8);
-half8 __ovld __cnfn convert_half8_rtn(long8);
-half8 __ovld __cnfn convert_half8_rtn(ulong8);
-half8 __ovld __cnfn convert_half8_rtn(float8);
-half8 __ovld __cnfn convert_half8_rtn(half8);
-half8 __ovld __cnfn convert_half8_rtz(char8);
-half8 __ovld __cnfn convert_half8_rtz(uchar8);
-half8 __ovld __cnfn convert_half8_rtz(short8);
-half8 __ovld __cnfn convert_half8_rtz(ushort8);
-half8 __ovld __cnfn convert_half8_rtz(int8);
-half8 __ovld __cnfn convert_half8_rtz(uint8);
-half8 __ovld __cnfn convert_half8_rtz(long8);
-half8 __ovld __cnfn convert_half8_rtz(ulong8);
-half8 __ovld __cnfn convert_half8_rtz(float8);
-half8 __ovld __cnfn convert_half8_rtz(half8);
-half16 __ovld __cnfn convert_half16(char16);
-half16 __ovld __cnfn convert_half16(uchar16);
-half16 __ovld __cnfn convert_half16(short16);
-half16 __ovld __cnfn convert_half16(ushort16);
-half16 __ovld __cnfn convert_half16(int16);
-half16 __ovld __cnfn convert_half16(uint16);
-half16 __ovld __cnfn convert_half16(long16);
-half16 __ovld __cnfn convert_half16(ulong16);
-half16 __ovld __cnfn convert_half16(float16);
-half16 __ovld __cnfn convert_half16(half16);
-half16 __ovld __cnfn convert_half16_rte(char16);
-half16 __ovld __cnfn convert_half16_rte(uchar16);
-half16 __ovld __cnfn convert_half16_rte(short16);
-half16 __ovld __cnfn convert_half16_rte(ushort16);
-half16 __ovld __cnfn convert_half16_rte(int16);
-half16 __ovld __cnfn convert_half16_rte(uint16);
-half16 __ovld __cnfn convert_half16_rte(long16);
-half16 __ovld __cnfn convert_half16_rte(ulong16);
-half16 __ovld __cnfn convert_half16_rte(float16);
-half16 __ovld __cnfn convert_half16_rte(half16);
-half16 __ovld __cnfn convert_half16_rtp(char16);
-half16 __ovld __cnfn convert_half16_rtp(uchar16);
-half16 __ovld __cnfn convert_half16_rtp(short16);
-half16 __ovld __cnfn convert_half16_rtp(ushort16);
-half16 __ovld __cnfn convert_half16_rtp(int16);
-half16 __ovld __cnfn convert_half16_rtp(uint16);
-half16 __ovld __cnfn convert_half16_rtp(long16);
-half16 __ovld __cnfn convert_half16_rtp(ulong16);
-half16 __ovld __cnfn convert_half16_rtp(float16);
-half16 __ovld __cnfn convert_half16_rtp(half16);
-half16 __ovld __cnfn convert_half16_rtn(char16);
-half16 __ovld __cnfn convert_half16_rtn(uchar16);
-half16 __ovld __cnfn convert_half16_rtn(short16);
-half16 __ovld __cnfn convert_half16_rtn(ushort16);
-half16 __ovld __cnfn convert_half16_rtn(int16);
-half16 __ovld __cnfn convert_half16_rtn(uint16);
-half16 __ovld __cnfn convert_half16_rtn(long16);
-half16 __ovld __cnfn convert_half16_rtn(ulong16);
-half16 __ovld __cnfn convert_half16_rtn(float16);
-half16 __ovld __cnfn convert_half16_rtn(half16);
-half16 __ovld __cnfn convert_half16_rtz(char16);
-half16 __ovld __cnfn convert_half16_rtz(uchar16);
-half16 __ovld __cnfn convert_half16_rtz(short16);
-half16 __ovld __cnfn convert_half16_rtz(ushort16);
-half16 __ovld __cnfn convert_half16_rtz(int16);
-half16 __ovld __cnfn convert_half16_rtz(uint16);
-half16 __ovld __cnfn convert_half16_rtz(long16);
-half16 __ovld __cnfn convert_half16_rtz(ulong16);
-half16 __ovld __cnfn convert_half16_rtz(float16);
-half16 __ovld __cnfn convert_half16_rtz(half16);
-
-// Convert half types to double types.
-#ifdef cl_khr_fp64
-double __ovld __cnfn convert_double(half);
-double __ovld __cnfn convert_double_rte(half);
-double __ovld __cnfn convert_double_rtp(half);
-double __ovld __cnfn convert_double_rtn(half);
-double __ovld __cnfn convert_double_rtz(half);
-double2 __ovld __cnfn convert_double2(half2);
-double2 __ovld __cnfn convert_double2_rte(half2);
-double2 __ovld __cnfn convert_double2_rtp(half2);
-double2 __ovld __cnfn convert_double2_rtn(half2);
-double2 __ovld __cnfn convert_double2_rtz(half2);
-double3 __ovld __cnfn convert_double3(half3);
-double3 __ovld __cnfn convert_double3_rte(half3);
-double3 __ovld __cnfn convert_double3_rtp(half3);
-double3 __ovld __cnfn convert_double3_rtn(half3);
-double3 __ovld __cnfn convert_double3_rtz(half3);
-double4 __ovld __cnfn convert_double4(half4);
-double4 __ovld __cnfn convert_double4_rte(half4);
-double4 __ovld __cnfn convert_double4_rtp(half4);
-double4 __ovld __cnfn convert_double4_rtn(half4);
-double4 __ovld __cnfn convert_double4_rtz(half4);
-double8 __ovld __cnfn convert_double8(half8);
-double8 __ovld __cnfn convert_double8_rte(half8);
-double8 __ovld __cnfn convert_double8_rtp(half8);
-double8 __ovld __cnfn convert_double8_rtn(half8);
-double8 __ovld __cnfn convert_double8_rtz(half8);
-double16 __ovld __cnfn convert_double16(half16);
-double16 __ovld __cnfn convert_double16_rte(half16);
-double16 __ovld __cnfn convert_double16_rtp(half16);
-double16 __ovld __cnfn convert_double16_rtn(half16);
-double16 __ovld __cnfn convert_double16_rtz(half16);
-
-// Convert double types to half types.
-half __ovld __cnfn convert_half(double);
-half __ovld __cnfn convert_half_rte(double);
-half __ovld __cnfn convert_half_rtp(double);
-half __ovld __cnfn convert_half_rtn(double);
-half __ovld __cnfn convert_half_rtz(double);
-half2 __ovld __cnfn convert_half2(double2);
-half2 __ovld __cnfn convert_half2_rte(double2);
-half2 __ovld __cnfn convert_half2_rtp(double2);
-half2 __ovld __cnfn convert_half2_rtn(double2);
-half2 __ovld __cnfn convert_half2_rtz(double2);
-half3 __ovld __cnfn convert_half3(double3);
-half3 __ovld __cnfn convert_half3_rte(double3);
-half3 __ovld __cnfn convert_half3_rtp(double3);
-half3 __ovld __cnfn convert_half3_rtn(double3);
-half3 __ovld __cnfn convert_half3_rtz(double3);
-half4 __ovld __cnfn convert_half4(double4);
-half4 __ovld __cnfn convert_half4_rte(double4);
-half4 __ovld __cnfn convert_half4_rtp(double4);
-half4 __ovld __cnfn convert_half4_rtn(double4);
-half4 __ovld __cnfn convert_half4_rtz(double4);
-half8 __ovld __cnfn convert_half8(double8);
-half8 __ovld __cnfn convert_half8_rte(double8);
-half8 __ovld __cnfn convert_half8_rtp(double8);
-half8 __ovld __cnfn convert_half8_rtn(double8);
-half8 __ovld __cnfn convert_half8_rtz(double8);
-half16 __ovld __cnfn convert_half16(double16);
-half16 __ovld __cnfn convert_half16_rte(double16);
-half16 __ovld __cnfn convert_half16_rtp(double16);
-half16 __ovld __cnfn convert_half16_rtn(double16);
-half16 __ovld __cnfn convert_half16_rtz(double16);
-#endif //cl_khr_fp64
-
-#endif // cl_khr_fp16
-
-/**
- * OpenCL v1.1/1.2/2.0 s6.2.4.2 - as_type operators
- * Reinterprets a data type as another data type of the same size
- */
-#define as_char(x) __builtin_astype((x),   char)
-#define as_char2(x) __builtin_astype((x),  char2)
-#define as_char3(x) __builtin_astype((x),  char3)
-#define as_char4(x) __builtin_astype((x),  char4)
-#define as_char8(x) __builtin_astype((x),  char8)
-#define as_char16(x) __builtin_astype((x), char16)
-
-#define as_uchar(x) __builtin_astype((x),   uchar)
-#define as_uchar2(x) __builtin_astype((x),  uchar2)
-#define as_uchar3(x) __builtin_astype((x),  uchar3)
-#define as_uchar4(x) __builtin_astype((x),  uchar4)
-#define as_uchar8(x) __builtin_astype((x),  uchar8)
-#define as_uchar16(x) __builtin_astype((x), uchar16)
-
-#define as_short(x) __builtin_astype((x),   short)
-#define as_short2(x) __builtin_astype((x),  short2)
-#define as_short3(x) __builtin_astype((x),  short3)
-#define as_short4(x) __builtin_astype((x),  short4)
-#define as_short8(x) __builtin_astype((x),  short8)
-#define as_short16(x) __builtin_astype((x), short16)
-
-#define as_ushort(x) __builtin_astype((x),   ushort)
-#define as_ushort2(x) __builtin_astype((x),  ushort2)
-#define as_ushort3(x) __builtin_astype((x),  ushort3)
-#define as_ushort4(x) __builtin_astype((x),  ushort4)
-#define as_ushort8(x) __builtin_astype((x),  ushort8)
-#define as_ushort16(x) __builtin_astype((x), ushort16)
-
-#define as_int(x) __builtin_astype((x),   int)
-#define as_int2(x) __builtin_astype((x),  int2)
-#define as_int3(x) __builtin_astype((x),  int3)
-#define as_int4(x) __builtin_astype((x),  int4)
-#define as_int8(x) __builtin_astype((x),  int8)
-#define as_int16(x) __builtin_astype((x), int16)
-
-#define as_uint(x) __builtin_astype((x),   uint)
-#define as_uint2(x) __builtin_astype((x),  uint2)
-#define as_uint3(x) __builtin_astype((x),  uint3)
-#define as_uint4(x) __builtin_astype((x),  uint4)
-#define as_uint8(x) __builtin_astype((x),  uint8)
-#define as_uint16(x) __builtin_astype((x), uint16)
-
-#define as_long(x) __builtin_astype((x),   long)
-#define as_long2(x) __builtin_astype((x),  long2)
-#define as_long3(x) __builtin_astype((x),  long3)
-#define as_long4(x) __builtin_astype((x),  long4)
-#define as_long8(x) __builtin_astype((x),  long8)
-#define as_long16(x) __builtin_astype((x), long16)
-
-#define as_ulong(x) __builtin_astype((x),   ulong)
-#define as_ulong2(x) __builtin_astype((x),  ulong2)
-#define as_ulong3(x) __builtin_astype((x),  ulong3)
-#define as_ulong4(x) __builtin_astype((x),  ulong4)
-#define as_ulong8(x) __builtin_astype((x),  ulong8)
-#define as_ulong16(x) __builtin_astype((x), ulong16)
-
-#define as_float(x) __builtin_astype((x),   float)
-#define as_float2(x) __builtin_astype((x),  float2)
-#define as_float3(x) __builtin_astype((x),  float3)
-#define as_float4(x) __builtin_astype((x),  float4)
-#define as_float8(x) __builtin_astype((x),  float8)
-#define as_float16(x) __builtin_astype((x), float16)
-
-#ifdef cl_khr_fp64
-#define as_double(x) __builtin_astype((x),   double)
-#define as_double2(x) __builtin_astype((x),  double2)
-#define as_double3(x) __builtin_astype((x),  double3)
-#define as_double4(x) __builtin_astype((x),  double4)
-#define as_double8(x) __builtin_astype((x),  double8)
-#define as_double16(x) __builtin_astype((x), double16)
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-#define as_half(x) __builtin_astype((x),   half)
-#define as_half2(x) __builtin_astype((x),  half2)
-#define as_half3(x) __builtin_astype((x),  half3)
-#define as_half4(x) __builtin_astype((x),  half4)
-#define as_half8(x) __builtin_astype((x),  half8)
-#define as_half16(x) __builtin_astype((x), half16)
-#endif //cl_khr_fp16
-
-// OpenCL v1.1 s6.9, v1.2/2.0 s6.10 - Function qualifiers
-
-#define __kernel_exec(X, typen) __kernel \
-	__attribute__((work_group_size_hint(X, 1, 1))) \
-	__attribute__((vec_type_hint(typen)))
-
-#define kernel_exec(X, typen) __kernel \
-	__attribute__((work_group_size_hint(X, 1, 1))) \
-	__attribute__((vec_type_hint(typen)))
-
-// OpenCL v1.1 s6.11.1, v1.2 s6.12.1, v2.0 s6.13.1 - Work-item Functions
-
-/**
- * Returns the number of dimensions in use. This is the
- * value given to the work_dim argument specified in
- * clEnqueueNDRangeKernel.
- * For clEnqueueTask, this returns 1.
- */
-uint __ovld __cnfn get_work_dim(void);
-
-/**
- * Returns the number of global work-items specified for
- * dimension identified by dimindx. This value is given by
- * the global_work_size argument to
- * clEnqueueNDRangeKernel. Valid values of dimindx
- * are 0 to get_work_dim() - 1. For other values of
- * dimindx, get_global_size() returns 1.
- * For clEnqueueTask, this always returns 1.
- */
-size_t __ovld __cnfn get_global_size(uint dimindx);
-
-/**
- * Returns the unique global work-item ID value for
- * dimension identified by dimindx. The global work-item
- * ID specifies the work-item ID based on the number of
- * global work-items specified to execute the kernel. Valid
- * values of dimindx are 0 to get_work_dim() - 1. For
- * other values of dimindx, get_global_id() returns 0.
- * For clEnqueueTask, this returns 0.
- */
-size_t __ovld __cnfn get_global_id(uint dimindx);
-
-/**
- * Returns the number of local work-items specified in
- * dimension identified by dimindx. This value is given by
- * the local_work_size argument to
- * clEnqueueNDRangeKernel if local_work_size is not
- * NULL; otherwise the OpenCL implementation chooses
- * an appropriate local_work_size value which is returned
- * by this function. Valid values of dimindx are 0 to
- * get_work_dim() - 1. For other values of dimindx,
- * get_local_size() returns 1.
- * For clEnqueueTask, this always returns 1.
- */
-size_t __ovld __cnfn get_local_size(uint dimindx);
-
-/**
- * Returns the unique local work-item ID i.e. a work-item
- * within a specific work-group for dimension identified by
- * dimindx. Valid values of dimindx are 0 to
- * get_work_dim() - 1. For other values of dimindx,
- * get_local_id() returns 0.
- * For clEnqueueTask, this returns 0.
- */
-size_t __ovld __cnfn get_local_id(uint dimindx);
-
-/**
- * Returns the number of work-groups that will execute a
- * kernel for dimension identified by dimindx.
- * Valid values of dimindx are 0 to get_work_dim() - 1.
- * For other values of dimindx, get_num_groups () returns
- * 1.
- * For clEnqueueTask, this always returns 1.
- */
-size_t __ovld __cnfn get_num_groups(uint dimindx);
-
-/**
- * get_group_id returns the work-group ID which is a
- * number from 0 .. get_num_groups(dimindx) - 1.
- * Valid values of dimindx are 0 to get_work_dim() - 1.
- * For other values, get_group_id() returns 0.
- * For clEnqueueTask, this returns 0.
- */
-size_t __ovld __cnfn get_group_id(uint dimindx);
-
-/**
- * get_global_offset returns the offset values specified in
- * global_work_offset argument to
- * clEnqueueNDRangeKernel.
- * Valid values of dimindx are 0 to get_work_dim() - 1.
- * For other values, get_global_offset() returns 0.
- * For clEnqueueTask, this returns 0.
- */
-size_t __ovld __cnfn get_global_offset(uint dimindx);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-size_t __ovld get_enqueued_local_size(uint dimindx);
-size_t __ovld get_global_linear_id(void);
-size_t __ovld get_local_linear_id(void);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// OpenCL v1.1 s6.11.2, v1.2 s6.12.2, v2.0 s6.13.2 - Math functions
-
-/**
- * Arc cosine function.
- */
-float __ovld __cnfn acos(float);
-float2 __ovld __cnfn acos(float2);
-float3 __ovld __cnfn acos(float3);
-float4 __ovld __cnfn acos(float4);
-float8 __ovld __cnfn acos(float8);
-float16 __ovld __cnfn acos(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn acos(double);
-double2 __ovld __cnfn acos(double2);
-double3 __ovld __cnfn acos(double3);
-double4 __ovld __cnfn acos(double4);
-double8 __ovld __cnfn acos(double8);
-double16 __ovld __cnfn acos(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn acos(half);
-half2 __ovld __cnfn acos(half2);
-half3 __ovld __cnfn acos(half3);
-half4 __ovld __cnfn acos(half4);
-half8 __ovld __cnfn acos(half8);
-half16 __ovld __cnfn acos(half16);
-#endif //cl_khr_fp16
-
-/**
- * Inverse hyperbolic cosine.
- */
-float __ovld __cnfn acosh(float);
-float2 __ovld __cnfn acosh(float2);
-float3 __ovld __cnfn acosh(float3);
-float4 __ovld __cnfn acosh(float4);
-float8 __ovld __cnfn acosh(float8);
-float16 __ovld __cnfn acosh(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn acosh(double);
-double2 __ovld __cnfn acosh(double2);
-double3 __ovld __cnfn acosh(double3);
-double4 __ovld __cnfn acosh(double4);
-double8 __ovld __cnfn acosh(double8);
-double16 __ovld __cnfn acosh(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn acosh(half);
-half2 __ovld __cnfn acosh(half2);
-half3 __ovld __cnfn acosh(half3);
-half4 __ovld __cnfn acosh(half4);
-half8 __ovld __cnfn acosh(half8);
-half16 __ovld __cnfn acosh(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute acos (x) / PI.
- */
-float __ovld __cnfn acospi(float x);
-float2 __ovld __cnfn acospi(float2 x);
-float3 __ovld __cnfn acospi(float3 x);
-float4 __ovld __cnfn acospi(float4 x);
-float8 __ovld __cnfn acospi(float8 x);
-float16 __ovld __cnfn acospi(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn acospi(double x);
-double2 __ovld __cnfn acospi(double2 x);
-double3 __ovld __cnfn acospi(double3 x);
-double4 __ovld __cnfn acospi(double4 x);
-double8 __ovld __cnfn acospi(double8 x);
-double16 __ovld __cnfn acospi(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn acospi(half x);
-half2 __ovld __cnfn acospi(half2 x);
-half3 __ovld __cnfn acospi(half3 x);
-half4 __ovld __cnfn acospi(half4 x);
-half8 __ovld __cnfn acospi(half8 x);
-half16 __ovld __cnfn acospi(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Arc sine function.
- */
-float __ovld __cnfn asin(float);
-float2 __ovld __cnfn asin(float2);
-float3 __ovld __cnfn asin(float3);
-float4 __ovld __cnfn asin(float4);
-float8 __ovld __cnfn asin(float8);
-float16 __ovld __cnfn asin(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn asin(double);
-double2 __ovld __cnfn asin(double2);
-double3 __ovld __cnfn asin(double3);
-double4 __ovld __cnfn asin(double4);
-double8 __ovld __cnfn asin(double8);
-double16 __ovld __cnfn asin(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn asin(half);
-half2 __ovld __cnfn asin(half2);
-half3 __ovld __cnfn asin(half3);
-half4 __ovld __cnfn asin(half4);
-half8 __ovld __cnfn asin(half8);
-half16 __ovld __cnfn asin(half16);
-#endif //cl_khr_fp16
-
-/**
- * Inverse hyperbolic sine.
- */
-float __ovld __cnfn asinh(float);
-float2 __ovld __cnfn asinh(float2);
-float3 __ovld __cnfn asinh(float3);
-float4 __ovld __cnfn asinh(float4);
-float8 __ovld __cnfn asinh(float8);
-float16 __ovld __cnfn asinh(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn asinh(double);
-double2 __ovld __cnfn asinh(double2);
-double3 __ovld __cnfn asinh(double3);
-double4 __ovld __cnfn asinh(double4);
-double8 __ovld __cnfn asinh(double8);
-double16 __ovld __cnfn asinh(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn asinh(half);
-half2 __ovld __cnfn asinh(half2);
-half3 __ovld __cnfn asinh(half3);
-half4 __ovld __cnfn asinh(half4);
-half8 __ovld __cnfn asinh(half8);
-half16 __ovld __cnfn asinh(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute asin (x) / PI.
- */
-float __ovld __cnfn asinpi(float x);
-float2 __ovld __cnfn asinpi(float2 x);
-float3 __ovld __cnfn asinpi(float3 x);
-float4 __ovld __cnfn asinpi(float4 x);
-float8 __ovld __cnfn asinpi(float8 x);
-float16 __ovld __cnfn asinpi(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn asinpi(double x);
-double2 __ovld __cnfn asinpi(double2 x);
-double3 __ovld __cnfn asinpi(double3 x);
-double4 __ovld __cnfn asinpi(double4 x);
-double8 __ovld __cnfn asinpi(double8 x);
-double16 __ovld __cnfn asinpi(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn asinpi(half x);
-half2 __ovld __cnfn asinpi(half2 x);
-half3 __ovld __cnfn asinpi(half3 x);
-half4 __ovld __cnfn asinpi(half4 x);
-half8 __ovld __cnfn asinpi(half8 x);
-half16 __ovld __cnfn asinpi(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Arc tangent function.
- */
-float __ovld __cnfn atan(float y_over_x);
-float2 __ovld __cnfn atan(float2 y_over_x);
-float3 __ovld __cnfn atan(float3 y_over_x);
-float4 __ovld __cnfn atan(float4 y_over_x);
-float8 __ovld __cnfn atan(float8 y_over_x);
-float16 __ovld __cnfn atan(float16 y_over_x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn atan(double y_over_x);
-double2 __ovld __cnfn atan(double2 y_over_x);
-double3 __ovld __cnfn atan(double3 y_over_x);
-double4 __ovld __cnfn atan(double4 y_over_x);
-double8 __ovld __cnfn atan(double8 y_over_x);
-double16 __ovld __cnfn atan(double16 y_over_x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn atan(half y_over_x);
-half2 __ovld __cnfn atan(half2 y_over_x);
-half3 __ovld __cnfn atan(half3 y_over_x);
-half4 __ovld __cnfn atan(half4 y_over_x);
-half8 __ovld __cnfn atan(half8 y_over_x);
-half16 __ovld __cnfn atan(half16 y_over_x);
-#endif //cl_khr_fp16
-
-/**
- * Arc tangent of y / x.
- */
-float __ovld __cnfn atan2(float y, float x);
-float2 __ovld __cnfn atan2(float2 y, float2 x);
-float3 __ovld __cnfn atan2(float3 y, float3 x);
-float4 __ovld __cnfn atan2(float4 y, float4 x);
-float8 __ovld __cnfn atan2(float8 y, float8 x);
-float16 __ovld __cnfn atan2(float16 y, float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn atan2(double y, double x);
-double2 __ovld __cnfn atan2(double2 y, double2 x);
-double3 __ovld __cnfn atan2(double3 y, double3 x);
-double4 __ovld __cnfn atan2(double4 y, double4 x);
-double8 __ovld __cnfn atan2(double8 y, double8 x);
-double16 __ovld __cnfn atan2(double16 y, double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn atan2(half y, half x);
-half2 __ovld __cnfn atan2(half2 y, half2 x);
-half3 __ovld __cnfn atan2(half3 y, half3 x);
-half4 __ovld __cnfn atan2(half4 y, half4 x);
-half8 __ovld __cnfn atan2(half8 y, half8 x);
-half16 __ovld __cnfn atan2(half16 y, half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Hyperbolic arc tangent.
- */
-float __ovld __cnfn atanh(float);
-float2 __ovld __cnfn atanh(float2);
-float3 __ovld __cnfn atanh(float3);
-float4 __ovld __cnfn atanh(float4);
-float8 __ovld __cnfn atanh(float8);
-float16 __ovld __cnfn atanh(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn atanh(double);
-double2 __ovld __cnfn atanh(double2);
-double3 __ovld __cnfn atanh(double3);
-double4 __ovld __cnfn atanh(double4);
-double8 __ovld __cnfn atanh(double8);
-double16 __ovld __cnfn atanh(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn atanh(half);
-half2 __ovld __cnfn atanh(half2);
-half3 __ovld __cnfn atanh(half3);
-half4 __ovld __cnfn atanh(half4);
-half8 __ovld __cnfn atanh(half8);
-half16 __ovld __cnfn atanh(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute atan (x) / PI.
- */
-float __ovld __cnfn atanpi(float x);
-float2 __ovld __cnfn atanpi(float2 x);
-float3 __ovld __cnfn atanpi(float3 x);
-float4 __ovld __cnfn atanpi(float4 x);
-float8 __ovld __cnfn atanpi(float8 x);
-float16 __ovld __cnfn atanpi(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn atanpi(double x);
-double2 __ovld __cnfn atanpi(double2 x);
-double3 __ovld __cnfn atanpi(double3 x);
-double4 __ovld __cnfn atanpi(double4 x);
-double8 __ovld __cnfn atanpi(double8 x);
-double16 __ovld __cnfn atanpi(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn atanpi(half x);
-half2 __ovld __cnfn atanpi(half2 x);
-half3 __ovld __cnfn atanpi(half3 x);
-half4 __ovld __cnfn atanpi(half4 x);
-half8 __ovld __cnfn atanpi(half8 x);
-half16 __ovld __cnfn atanpi(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute atan2 (y, x) / PI.
- */
-float __ovld __cnfn atan2pi(float y, float x);
-float2 __ovld __cnfn atan2pi(float2 y, float2 x);
-float3 __ovld __cnfn atan2pi(float3 y, float3 x);
-float4 __ovld __cnfn atan2pi(float4 y, float4 x);
-float8 __ovld __cnfn atan2pi(float8 y, float8 x);
-float16 __ovld __cnfn atan2pi(float16 y, float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn atan2pi(double y, double x);
-double2 __ovld __cnfn atan2pi(double2 y, double2 x);
-double3 __ovld __cnfn atan2pi(double3 y, double3 x);
-double4 __ovld __cnfn atan2pi(double4 y, double4 x);
-double8 __ovld __cnfn atan2pi(double8 y, double8 x);
-double16 __ovld __cnfn atan2pi(double16 y, double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn atan2pi(half y, half x);
-half2 __ovld __cnfn atan2pi(half2 y, half2 x);
-half3 __ovld __cnfn atan2pi(half3 y, half3 x);
-half4 __ovld __cnfn atan2pi(half4 y, half4 x);
-half8 __ovld __cnfn atan2pi(half8 y, half8 x);
-half16 __ovld __cnfn atan2pi(half16 y, half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute cube-root.
- */
-float __ovld __cnfn cbrt(float);
-float2 __ovld __cnfn cbrt(float2);
-float3 __ovld __cnfn cbrt(float3);
-float4 __ovld __cnfn cbrt(float4);
-float8 __ovld __cnfn cbrt(float8);
-float16 __ovld __cnfn cbrt(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn cbrt(double);
-double2 __ovld __cnfn cbrt(double2);
-double3 __ovld __cnfn cbrt(double3);
-double4 __ovld __cnfn cbrt(double4);
-double8 __ovld __cnfn cbrt(double8);
-double16 __ovld __cnfn cbrt(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn cbrt(half);
-half2 __ovld __cnfn cbrt(half2);
-half3 __ovld __cnfn cbrt(half3);
-half4 __ovld __cnfn cbrt(half4);
-half8 __ovld __cnfn cbrt(half8);
-half16 __ovld __cnfn cbrt(half16);
-#endif //cl_khr_fp16
-
-/**
- * Round to integral value using the round to positive
- * infinity rounding mode.
- */
-float __ovld __cnfn ceil(float);
-float2 __ovld __cnfn ceil(float2);
-float3 __ovld __cnfn ceil(float3);
-float4 __ovld __cnfn ceil(float4);
-float8 __ovld __cnfn ceil(float8);
-float16 __ovld __cnfn ceil(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn ceil(double);
-double2 __ovld __cnfn ceil(double2);
-double3 __ovld __cnfn ceil(double3);
-double4 __ovld __cnfn ceil(double4);
-double8 __ovld __cnfn ceil(double8);
-double16 __ovld __cnfn ceil(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn ceil(half);
-half2 __ovld __cnfn ceil(half2);
-half3 __ovld __cnfn ceil(half3);
-half4 __ovld __cnfn ceil(half4);
-half8 __ovld __cnfn ceil(half8);
-half16 __ovld __cnfn ceil(half16);
-#endif //cl_khr_fp16
-
-/**
- * Returns x with its sign changed to match the sign of y.
- */
-float __ovld __cnfn copysign(float x, float y);
-float2 __ovld __cnfn copysign(float2 x, float2 y);
-float3 __ovld __cnfn copysign(float3 x, float3 y);
-float4 __ovld __cnfn copysign(float4 x, float4 y);
-float8 __ovld __cnfn copysign(float8 x, float8 y);
-float16 __ovld __cnfn copysign(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn copysign(double x, double y);
-double2 __ovld __cnfn copysign(double2 x, double2 y);
-double3 __ovld __cnfn copysign(double3 x, double3 y);
-double4 __ovld __cnfn copysign(double4 x, double4 y);
-double8 __ovld __cnfn copysign(double8 x, double8 y);
-double16 __ovld __cnfn copysign(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn copysign(half x, half y);
-half2 __ovld __cnfn copysign(half2 x, half2 y);
-half3 __ovld __cnfn copysign(half3 x, half3 y);
-half4 __ovld __cnfn copysign(half4 x, half4 y);
-half8 __ovld __cnfn copysign(half8 x, half8 y);
-half16 __ovld __cnfn copysign(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Compute cosine.
- */
-float __ovld __cnfn cos(float);
-float2 __ovld __cnfn cos(float2);
-float3 __ovld __cnfn cos(float3);
-float4 __ovld __cnfn cos(float4);
-float8 __ovld __cnfn cos(float8);
-float16 __ovld __cnfn cos(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn cos(double);
-double2 __ovld __cnfn cos(double2);
-double3 __ovld __cnfn cos(double3);
-double4 __ovld __cnfn cos(double4);
-double8 __ovld __cnfn cos(double8);
-double16 __ovld __cnfn cos(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn cos(half);
-half2 __ovld __cnfn cos(half2);
-half3 __ovld __cnfn cos(half3);
-half4 __ovld __cnfn cos(half4);
-half8 __ovld __cnfn cos(half8);
-half16 __ovld __cnfn cos(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute hyperbolic cosine.
- */
-float __ovld __cnfn cosh(float);
-float2 __ovld __cnfn cosh(float2);
-float3 __ovld __cnfn cosh(float3);
-float4 __ovld __cnfn cosh(float4);
-float8 __ovld __cnfn cosh(float8);
-float16 __ovld __cnfn cosh(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn cosh(double);
-double2 __ovld __cnfn cosh(double2);
-double3 __ovld __cnfn cosh(double3);
-double4 __ovld __cnfn cosh(double4);
-double8 __ovld __cnfn cosh(double8);
-double16 __ovld __cnfn cosh(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn cosh(half);
-half2 __ovld __cnfn cosh(half2);
-half3 __ovld __cnfn cosh(half3);
-half4 __ovld __cnfn cosh(half4);
-half8 __ovld __cnfn cosh(half8);
-half16 __ovld __cnfn cosh(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute cos (PI * x).
- */
-float __ovld __cnfn cospi(float x);
-float2 __ovld __cnfn cospi(float2 x);
-float3 __ovld __cnfn cospi(float3 x);
-float4 __ovld __cnfn cospi(float4 x);
-float8 __ovld __cnfn cospi(float8 x);
-float16 __ovld __cnfn cospi(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn cospi(double x);
-double2 __ovld __cnfn cospi(double2 x);
-double3 __ovld __cnfn cospi(double3 x);
-double4 __ovld __cnfn cospi(double4 x);
-double8 __ovld __cnfn cospi(double8 x);
-double16 __ovld __cnfn cospi(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn cospi(half x);
-half2 __ovld __cnfn cospi(half2 x);
-half3 __ovld __cnfn cospi(half3 x);
-half4 __ovld __cnfn cospi(half4 x);
-half8 __ovld __cnfn cospi(half8 x);
-half16 __ovld __cnfn cospi(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Complementary error function.
- */
-float __ovld __cnfn erfc(float);
-float2 __ovld __cnfn erfc(float2);
-float3 __ovld __cnfn erfc(float3);
-float4 __ovld __cnfn erfc(float4);
-float8 __ovld __cnfn erfc(float8);
-float16 __ovld __cnfn erfc(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn erfc(double);
-double2 __ovld __cnfn erfc(double2);
-double3 __ovld __cnfn erfc(double3);
-double4 __ovld __cnfn erfc(double4);
-double8 __ovld __cnfn erfc(double8);
-double16 __ovld __cnfn erfc(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn erfc(half);
-half2 __ovld __cnfn erfc(half2);
-half3 __ovld __cnfn erfc(half3);
-half4 __ovld __cnfn erfc(half4);
-half8 __ovld __cnfn erfc(half8);
-half16 __ovld __cnfn erfc(half16);
-#endif //cl_khr_fp16
-
-/**
- * Error function encountered in integrating the
- * normal distribution.
- */
-float __ovld __cnfn erf(float);
-float2 __ovld __cnfn erf(float2);
-float3 __ovld __cnfn erf(float3);
-float4 __ovld __cnfn erf(float4);
-float8 __ovld __cnfn erf(float8);
-float16 __ovld __cnfn erf(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn erf(double);
-double2 __ovld __cnfn erf(double2);
-double3 __ovld __cnfn erf(double3);
-double4 __ovld __cnfn erf(double4);
-double8 __ovld __cnfn erf(double8);
-double16 __ovld __cnfn erf(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn erf(half);
-half2 __ovld __cnfn erf(half2);
-half3 __ovld __cnfn erf(half3);
-half4 __ovld __cnfn erf(half4);
-half8 __ovld __cnfn erf(half8);
-half16 __ovld __cnfn erf(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute the base e exponential function of x.
- */
-float __ovld __cnfn exp(float x);
-float2 __ovld __cnfn exp(float2 x);
-float3 __ovld __cnfn exp(float3 x);
-float4 __ovld __cnfn exp(float4 x);
-float8 __ovld __cnfn exp(float8 x);
-float16 __ovld __cnfn exp(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn exp(double x);
-double2 __ovld __cnfn exp(double2 x);
-double3 __ovld __cnfn exp(double3 x);
-double4 __ovld __cnfn exp(double4 x);
-double8 __ovld __cnfn exp(double8 x);
-double16 __ovld __cnfn exp(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn exp(half x);
-half2 __ovld __cnfn exp(half2 x);
-half3 __ovld __cnfn exp(half3 x);
-half4 __ovld __cnfn exp(half4 x);
-half8 __ovld __cnfn exp(half8 x);
-half16 __ovld __cnfn exp(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Exponential base 2 function.
- */
-float __ovld __cnfn exp2(float);
-float2 __ovld __cnfn exp2(float2);
-float3 __ovld __cnfn exp2(float3);
-float4 __ovld __cnfn exp2(float4);
-float8 __ovld __cnfn exp2(float8);
-float16 __ovld __cnfn exp2(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn exp2(double);
-double2 __ovld __cnfn exp2(double2);
-double3 __ovld __cnfn exp2(double3);
-double4 __ovld __cnfn exp2(double4);
-double8 __ovld __cnfn exp2(double8);
-double16 __ovld __cnfn exp2(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn exp2(half);
-half2 __ovld __cnfn exp2(half2);
-half3 __ovld __cnfn exp2(half3);
-half4 __ovld __cnfn exp2(half4);
-half8 __ovld __cnfn exp2(half8);
-half16 __ovld __cnfn exp2(half16);
-#endif //cl_khr_fp16
-
-/**
- * Exponential base 10 function.
- */
-float __ovld __cnfn exp10(float);
-float2 __ovld __cnfn exp10(float2);
-float3 __ovld __cnfn exp10(float3);
-float4 __ovld __cnfn exp10(float4);
-float8 __ovld __cnfn exp10(float8);
-float16 __ovld __cnfn exp10(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn exp10(double);
-double2 __ovld __cnfn exp10(double2);
-double3 __ovld __cnfn exp10(double3);
-double4 __ovld __cnfn exp10(double4);
-double8 __ovld __cnfn exp10(double8);
-double16 __ovld __cnfn exp10(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn exp10(half);
-half2 __ovld __cnfn exp10(half2);
-half3 __ovld __cnfn exp10(half3);
-half4 __ovld __cnfn exp10(half4);
-half8 __ovld __cnfn exp10(half8);
-half16 __ovld __cnfn exp10(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute e^x- 1.0.
- */
-float __ovld __cnfn expm1(float x);
-float2 __ovld __cnfn expm1(float2 x);
-float3 __ovld __cnfn expm1(float3 x);
-float4 __ovld __cnfn expm1(float4 x);
-float8 __ovld __cnfn expm1(float8 x);
-float16 __ovld __cnfn expm1(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn expm1(double x);
-double2 __ovld __cnfn expm1(double2 x);
-double3 __ovld __cnfn expm1(double3 x);
-double4 __ovld __cnfn expm1(double4 x);
-double8 __ovld __cnfn expm1(double8 x);
-double16 __ovld __cnfn expm1(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn expm1(half x);
-half2 __ovld __cnfn expm1(half2 x);
-half3 __ovld __cnfn expm1(half3 x);
-half4 __ovld __cnfn expm1(half4 x);
-half8 __ovld __cnfn expm1(half8 x);
-half16 __ovld __cnfn expm1(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute absolute value of a floating-point number.
- */
-float __ovld __cnfn fabs(float);
-float2 __ovld __cnfn fabs(float2);
-float3 __ovld __cnfn fabs(float3);
-float4 __ovld __cnfn fabs(float4);
-float8 __ovld __cnfn fabs(float8);
-float16 __ovld __cnfn fabs(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn fabs(double);
-double2 __ovld __cnfn fabs(double2);
-double3 __ovld __cnfn fabs(double3);
-double4 __ovld __cnfn fabs(double4);
-double8 __ovld __cnfn fabs(double8);
-double16 __ovld __cnfn fabs(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn fabs(half);
-half2 __ovld __cnfn fabs(half2);
-half3 __ovld __cnfn fabs(half3);
-half4 __ovld __cnfn fabs(half4);
-half8 __ovld __cnfn fabs(half8);
-half16 __ovld __cnfn fabs(half16);
-#endif //cl_khr_fp16
-
-/**
- * x - y if x > y, +0 if x is less than or equal to y.
- */
-float __ovld __cnfn fdim(float x, float y);
-float2 __ovld __cnfn fdim(float2 x, float2 y);
-float3 __ovld __cnfn fdim(float3 x, float3 y);
-float4 __ovld __cnfn fdim(float4 x, float4 y);
-float8 __ovld __cnfn fdim(float8 x, float8 y);
-float16 __ovld __cnfn fdim(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn fdim(double x, double y);
-double2 __ovld __cnfn fdim(double2 x, double2 y);
-double3 __ovld __cnfn fdim(double3 x, double3 y);
-double4 __ovld __cnfn fdim(double4 x, double4 y);
-double8 __ovld __cnfn fdim(double8 x, double8 y);
-double16 __ovld __cnfn fdim(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn fdim(half x, half y);
-half2 __ovld __cnfn fdim(half2 x, half2 y);
-half3 __ovld __cnfn fdim(half3 x, half3 y);
-half4 __ovld __cnfn fdim(half4 x, half4 y);
-half8 __ovld __cnfn fdim(half8 x, half8 y);
-half16 __ovld __cnfn fdim(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Round to integral value using the round to -ve
- * infinity rounding mode.
- */
-float __ovld __cnfn floor(float);
-float2 __ovld __cnfn floor(float2);
-float3 __ovld __cnfn floor(float3);
-float4 __ovld __cnfn floor(float4);
-float8 __ovld __cnfn floor(float8);
-float16 __ovld __cnfn floor(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn floor(double);
-double2 __ovld __cnfn floor(double2);
-double3 __ovld __cnfn floor(double3);
-double4 __ovld __cnfn floor(double4);
-double8 __ovld __cnfn floor(double8);
-double16 __ovld __cnfn floor(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn floor(half);
-half2 __ovld __cnfn floor(half2);
-half3 __ovld __cnfn floor(half3);
-half4 __ovld __cnfn floor(half4);
-half8 __ovld __cnfn floor(half8);
-half16 __ovld __cnfn floor(half16);
-#endif //cl_khr_fp16
-
-/**
- * Returns the correctly rounded floating-point
- * representation of the sum of c with the infinitely
- * precise product of a and b. Rounding of
- * intermediate products shall not occur. Edge case
- * behavior is per the IEEE 754-2008 standard.
- */
-float __ovld __cnfn fma(float a, float b, float c);
-float2 __ovld __cnfn fma(float2 a, float2 b, float2 c);
-float3 __ovld __cnfn fma(float3 a, float3 b, float3 c);
-float4 __ovld __cnfn fma(float4 a, float4 b, float4 c);
-float8 __ovld __cnfn fma(float8 a, float8 b, float8 c);
-float16 __ovld __cnfn fma(float16 a, float16 b, float16 c);
-#ifdef cl_khr_fp64
-double __ovld __cnfn fma(double a, double b, double c);
-double2 __ovld __cnfn fma(double2 a, double2 b, double2 c);
-double3 __ovld __cnfn fma(double3 a, double3 b, double3 c);
-double4 __ovld __cnfn fma(double4 a, double4 b, double4 c);
-double8 __ovld __cnfn fma(double8 a, double8 b, double8 c);
-double16 __ovld __cnfn fma(double16 a, double16 b, double16 c);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn fma(half a, half b, half c);
-half2 __ovld __cnfn fma(half2 a, half2 b, half2 c);
-half3 __ovld __cnfn fma(half3 a, half3 b, half3 c);
-half4 __ovld __cnfn fma(half4 a, half4 b, half4 c);
-half8 __ovld __cnfn fma(half8 a, half8 b, half8 c);
-half16 __ovld __cnfn fma(half16 a, half16 b, half16 c);
-#endif //cl_khr_fp16
-
-/**
- * Returns y if x < y, otherwise it returns x. If one
- * argument is a NaN, fmax() returns the other
- * argument. If both arguments are NaNs, fmax()
- * returns a NaN.
- */
-float __ovld __cnfn fmax(float x, float y);
-float2 __ovld __cnfn fmax(float2 x, float2 y);
-float3 __ovld __cnfn fmax(float3 x, float3 y);
-float4 __ovld __cnfn fmax(float4 x, float4 y);
-float8 __ovld __cnfn fmax(float8 x, float8 y);
-float16 __ovld __cnfn fmax(float16 x, float16 y);
-float2 __ovld __cnfn fmax(float2 x, float y);
-float3 __ovld __cnfn fmax(float3 x, float y);
-float4 __ovld __cnfn fmax(float4 x, float y);
-float8 __ovld __cnfn fmax(float8 x, float y);
-float16 __ovld __cnfn fmax(float16 x, float y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn fmax(double x, double y);
-double2 __ovld __cnfn fmax(double2 x, double2 y);
-double3 __ovld __cnfn fmax(double3 x, double3 y);
-double4 __ovld __cnfn fmax(double4 x, double4 y);
-double8 __ovld __cnfn fmax(double8 x, double8 y);
-double16 __ovld __cnfn fmax(double16 x, double16 y);
-double2 __ovld __cnfn fmax(double2 x, double y);
-double3 __ovld __cnfn fmax(double3 x, double y);
-double4 __ovld __cnfn fmax(double4 x, double y);
-double8 __ovld __cnfn fmax(double8 x, double y);
-double16 __ovld __cnfn fmax(double16 x, double y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn fmax(half x, half y);
-half2 __ovld __cnfn fmax(half2 x, half2 y);
-half3 __ovld __cnfn fmax(half3 x, half3 y);
-half4 __ovld __cnfn fmax(half4 x, half4 y);
-half8 __ovld __cnfn fmax(half8 x, half8 y);
-half16 __ovld __cnfn fmax(half16 x, half16 y);
-half2 __ovld __cnfn fmax(half2 x, half y);
-half3 __ovld __cnfn fmax(half3 x, half y);
-half4 __ovld __cnfn fmax(half4 x, half y);
-half8 __ovld __cnfn fmax(half8 x, half y);
-half16 __ovld __cnfn fmax(half16 x, half y);
-#endif //cl_khr_fp16
-
-/**
- * Returns y if y < x, otherwise it returns x. If one
- * argument is a NaN, fmin() returns the other
- * argument. If both arguments are NaNs, fmin()
- * returns a NaN.
- */
-float __ovld __cnfn fmin(float x, float y);
-float2 __ovld __cnfn fmin(float2 x, float2 y);
-float3 __ovld __cnfn fmin(float3 x, float3 y);
-float4 __ovld __cnfn fmin(float4 x, float4 y);
-float8 __ovld __cnfn fmin(float8 x, float8 y);
-float16 __ovld __cnfn fmin(float16 x, float16 y);
-float2 __ovld __cnfn fmin(float2 x, float y);
-float3 __ovld __cnfn fmin(float3 x, float y);
-float4 __ovld __cnfn fmin(float4 x, float y);
-float8 __ovld __cnfn fmin(float8 x, float y);
-float16 __ovld __cnfn fmin(float16 x, float y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn fmin(double x, double y);
-double2 __ovld __cnfn fmin(double2 x, double2 y);
-double3 __ovld __cnfn fmin(double3 x, double3 y);
-double4 __ovld __cnfn fmin(double4 x, double4 y);
-double8 __ovld __cnfn fmin(double8 x, double8 y);
-double16 __ovld __cnfn fmin(double16 x, double16 y);
-double2 __ovld __cnfn fmin(double2 x, double y);
-double3 __ovld __cnfn fmin(double3 x, double y);
-double4 __ovld __cnfn fmin(double4 x, double y);
-double8 __ovld __cnfn fmin(double8 x, double y);
-double16 __ovld __cnfn fmin(double16 x, double y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn fmin(half x, half y);
-half2 __ovld __cnfn fmin(half2 x, half2 y);
-half3 __ovld __cnfn fmin(half3 x, half3 y);
-half4 __ovld __cnfn fmin(half4 x, half4 y);
-half8 __ovld __cnfn fmin(half8 x, half8 y);
-half16 __ovld __cnfn fmin(half16 x, half16 y);
-half2 __ovld __cnfn fmin(half2 x, half y);
-half3 __ovld __cnfn fmin(half3 x, half y);
-half4 __ovld __cnfn fmin(half4 x, half y);
-half8 __ovld __cnfn fmin(half8 x, half y);
-half16 __ovld __cnfn fmin(half16 x, half y);
-#endif //cl_khr_fp16
-
-/**
- * Modulus. Returns x - y * trunc (x/y).
- */
-float __ovld __cnfn fmod(float x, float y);
-float2 __ovld __cnfn fmod(float2 x, float2 y);
-float3 __ovld __cnfn fmod(float3 x, float3 y);
-float4 __ovld __cnfn fmod(float4 x, float4 y);
-float8 __ovld __cnfn fmod(float8 x, float8 y);
-float16 __ovld __cnfn fmod(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn fmod(double x, double y);
-double2 __ovld __cnfn fmod(double2 x, double2 y);
-double3 __ovld __cnfn fmod(double3 x, double3 y);
-double4 __ovld __cnfn fmod(double4 x, double4 y);
-double8 __ovld __cnfn fmod(double8 x, double8 y);
-double16 __ovld __cnfn fmod(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn fmod(half x, half y);
-half2 __ovld __cnfn fmod(half2 x, half2 y);
-half3 __ovld __cnfn fmod(half3 x, half3 y);
-half4 __ovld __cnfn fmod(half4 x, half4 y);
-half8 __ovld __cnfn fmod(half8 x, half8 y);
-half16 __ovld __cnfn fmod(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns fmin(x - floor (x), 0x1.fffffep-1f ).
- * floor(x) is returned in iptr.
- */
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-float __ovld fract(float x, float *iptr);
-float2 __ovld fract(float2 x, float2 *iptr);
-float3 __ovld fract(float3 x, float3 *iptr);
-float4 __ovld fract(float4 x, float4 *iptr);
-float8 __ovld fract(float8 x, float8 *iptr);
-float16 __ovld fract(float16 x, float16 *iptr);
-#ifdef cl_khr_fp64
-double __ovld fract(double x, double *iptr);
-double2 __ovld fract(double2 x, double2 *iptr);
-double3 __ovld fract(double3 x, double3 *iptr);
-double4 __ovld fract(double4 x, double4 *iptr);
-double8 __ovld fract(double8 x, double8 *iptr);
-double16 __ovld fract(double16 x, double16 *iptr);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld fract(half x, half *iptr);
-half2 __ovld fract(half2 x, half2 *iptr);
-half3 __ovld fract(half3 x, half3 *iptr);
-half4 __ovld fract(half4 x, half4 *iptr);
-half8 __ovld fract(half8 x, half8 *iptr);
-half16 __ovld fract(half16 x, half16 *iptr);
-#endif //cl_khr_fp16
-#else
-float __ovld fract(float x, __global float *iptr);
-float2 __ovld fract(float2 x, __global float2 *iptr);
-float3 __ovld fract(float3 x, __global float3 *iptr);
-float4 __ovld fract(float4 x, __global float4 *iptr);
-float8 __ovld fract(float8 x, __global float8 *iptr);
-float16 __ovld fract(float16 x, __global float16 *iptr);
-float __ovld fract(float x, __local float *iptr);
-float2 __ovld fract(float2 x, __local float2 *iptr);
-float3 __ovld fract(float3 x, __local float3 *iptr);
-float4 __ovld fract(float4 x, __local float4 *iptr);
-float8 __ovld fract(float8 x, __local float8 *iptr);
-float16 __ovld fract(float16 x, __local float16 *iptr);
-float __ovld fract(float x, __private float *iptr);
-float2 __ovld fract(float2 x, __private float2 *iptr);
-float3 __ovld fract(float3 x, __private float3 *iptr);
-float4 __ovld fract(float4 x, __private float4 *iptr);
-float8 __ovld fract(float8 x, __private float8 *iptr);
-float16 __ovld fract(float16 x, __private float16 *iptr);
-#ifdef cl_khr_fp64
-double __ovld fract(double x, __global double *iptr);
-double2 __ovld fract(double2 x, __global double2 *iptr);
-double3 __ovld fract(double3 x, __global double3 *iptr);
-double4 __ovld fract(double4 x, __global double4 *iptr);
-double8 __ovld fract(double8 x, __global double8 *iptr);
-double16 __ovld fract(double16 x, __global double16 *iptr);
-double __ovld fract(double x, __local double *iptr);
-double2 __ovld fract(double2 x, __local double2 *iptr);
-double3 __ovld fract(double3 x, __local double3 *iptr);
-double4 __ovld fract(double4 x, __local double4 *iptr);
-double8 __ovld fract(double8 x, __local double8 *iptr);
-double16 __ovld fract(double16 x, __local double16 *iptr);
-double __ovld fract(double x, __private double *iptr);
-double2 __ovld fract(double2 x, __private double2 *iptr);
-double3 __ovld fract(double3 x, __private double3 *iptr);
-double4 __ovld fract(double4 x, __private double4 *iptr);
-double8 __ovld fract(double8 x, __private double8 *iptr);
-double16 __ovld fract(double16 x, __private double16 *iptr);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld fract(half x, __global half *iptr);
-half2 __ovld fract(half2 x, __global half2 *iptr);
-half3 __ovld fract(half3 x, __global half3 *iptr);
-half4 __ovld fract(half4 x, __global half4 *iptr);
-half8 __ovld fract(half8 x, __global half8 *iptr);
-half16 __ovld fract(half16 x, __global half16 *iptr);
-half __ovld fract(half x, __local half *iptr);
-half2 __ovld fract(half2 x, __local half2 *iptr);
-half3 __ovld fract(half3 x, __local half3 *iptr);
-half4 __ovld fract(half4 x, __local half4 *iptr);
-half8 __ovld fract(half8 x, __local half8 *iptr);
-half16 __ovld fract(half16 x, __local half16 *iptr);
-half __ovld fract(half x, __private half *iptr);
-half2 __ovld fract(half2 x, __private half2 *iptr);
-half3 __ovld fract(half3 x, __private half3 *iptr);
-half4 __ovld fract(half4 x, __private half4 *iptr);
-half8 __ovld fract(half8 x, __private half8 *iptr);
-half16 __ovld fract(half16 x, __private half16 *iptr);
-#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Extract mantissa and exponent from x. For each
- * component the mantissa returned is a float with
- * magnitude in the interval [1/2, 1) or 0. Each
- * component of x equals mantissa returned * 2^exp.
- */
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-float __ovld frexp(float x, int *exp);
-float2 __ovld frexp(float2 x, int2 *exp);
-float3 __ovld frexp(float3 x, int3 *exp);
-float4 __ovld frexp(float4 x, int4 *exp);
-float8 __ovld frexp(float8 x, int8 *exp);
-float16 __ovld frexp(float16 x, int16 *exp);
-#ifdef cl_khr_fp64
-double __ovld frexp(double x, int *exp);
-double2 __ovld frexp(double2 x, int2 *exp);
-double3 __ovld frexp(double3 x, int3 *exp);
-double4 __ovld frexp(double4 x, int4 *exp);
-double8 __ovld frexp(double8 x, int8 *exp);
-double16 __ovld frexp(double16 x, int16 *exp);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld frexp(half x, int *exp);
-half2 __ovld frexp(half2 x, int2 *exp);
-half3 __ovld frexp(half3 x, int3 *exp);
-half4 __ovld frexp(half4 x, int4 *exp);
-half8 __ovld frexp(half8 x, int8 *exp);
-half16 __ovld frexp(half16 x, int16 *exp);
-#endif //cl_khr_fp16
-#else
-float __ovld frexp(float x, __global int *exp);
-float2 __ovld frexp(float2 x, __global int2 *exp);
-float3 __ovld frexp(float3 x, __global int3 *exp);
-float4 __ovld frexp(float4 x, __global int4 *exp);
-float8 __ovld frexp(float8 x, __global int8 *exp);
-float16 __ovld frexp(float16 x, __global int16 *exp);
-float __ovld frexp(float x, __local int *exp);
-float2 __ovld frexp(float2 x, __local int2 *exp);
-float3 __ovld frexp(float3 x, __local int3 *exp);
-float4 __ovld frexp(float4 x, __local int4 *exp);
-float8 __ovld frexp(float8 x, __local int8 *exp);
-float16 __ovld frexp(float16 x, __local int16 *exp);
-float __ovld frexp(float x, __private int *exp);
-float2 __ovld frexp(float2 x, __private int2 *exp);
-float3 __ovld frexp(float3 x, __private int3 *exp);
-float4 __ovld frexp(float4 x, __private int4 *exp);
-float8 __ovld frexp(float8 x, __private int8 *exp);
-float16 __ovld frexp(float16 x, __private int16 *exp);
-#ifdef cl_khr_fp64
-double __ovld frexp(double x, __global int *exp);
-double2 __ovld frexp(double2 x, __global int2 *exp);
-double3 __ovld frexp(double3 x, __global int3 *exp);
-double4 __ovld frexp(double4 x, __global int4 *exp);
-double8 __ovld frexp(double8 x, __global int8 *exp);
-double16 __ovld frexp(double16 x, __global int16 *exp);
-double __ovld frexp(double x, __local int *exp);
-double2 __ovld frexp(double2 x, __local int2 *exp);
-double3 __ovld frexp(double3 x, __local int3 *exp);
-double4 __ovld frexp(double4 x, __local int4 *exp);
-double8 __ovld frexp(double8 x, __local int8 *exp);
-double16 __ovld frexp(double16 x, __local int16 *exp);
-double __ovld frexp(double x, __private int *exp);
-double2 __ovld frexp(double2 x, __private int2 *exp);
-double3 __ovld frexp(double3 x, __private int3 *exp);
-double4 __ovld frexp(double4 x, __private int4 *exp);
-double8 __ovld frexp(double8 x, __private int8 *exp);
-double16 __ovld frexp(double16 x, __private int16 *exp);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld frexp(half x, __global int *exp);
-half2 __ovld frexp(half2 x, __global int2 *exp);
-half3 __ovld frexp(half3 x, __global int3 *exp);
-half4 __ovld frexp(half4 x, __global int4 *exp);
-half8 __ovld frexp(half8 x, __global int8 *exp);
-half16 __ovld frexp(half16 x, __global int16 *exp);
-half __ovld frexp(half x, __local int *exp);
-half2 __ovld frexp(half2 x, __local int2 *exp);
-half3 __ovld frexp(half3 x, __local int3 *exp);
-half4 __ovld frexp(half4 x, __local int4 *exp);
-half8 __ovld frexp(half8 x, __local int8 *exp);
-half16 __ovld frexp(half16 x, __local int16 *exp);
-half __ovld frexp(half x, __private int *exp);
-half2 __ovld frexp(half2 x, __private int2 *exp);
-half3 __ovld frexp(half3 x, __private int3 *exp);
-half4 __ovld frexp(half4 x, __private int4 *exp);
-half8 __ovld frexp(half8 x, __private int8 *exp);
-half16 __ovld frexp(half16 x, __private int16 *exp);
-#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Compute the value of the square root of x^2 + y^2
- * without undue overflow or underflow.
- */
-float __ovld __cnfn hypot(float x, float y);
-float2 __ovld __cnfn hypot(float2 x, float2 y);
-float3 __ovld __cnfn hypot(float3 x, float3 y);
-float4 __ovld __cnfn hypot(float4 x, float4 y);
-float8 __ovld __cnfn hypot(float8 x, float8 y);
-float16 __ovld __cnfn hypot(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn hypot(double x, double y);
-double2 __ovld __cnfn hypot(double2 x, double2 y);
-double3 __ovld __cnfn hypot(double3 x, double3 y);
-double4 __ovld __cnfn hypot(double4 x, double4 y);
-double8 __ovld __cnfn hypot(double8 x, double8 y);
-double16 __ovld __cnfn hypot(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn hypot(half x, half y);
-half2 __ovld __cnfn hypot(half2 x, half2 y);
-half3 __ovld __cnfn hypot(half3 x, half3 y);
-half4 __ovld __cnfn hypot(half4 x, half4 y);
-half8 __ovld __cnfn hypot(half8 x, half8 y);
-half16 __ovld __cnfn hypot(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Return the exponent as an integer value.
- */
-int __ovld __cnfn ilogb(float x);
-int2 __ovld __cnfn ilogb(float2 x);
-int3 __ovld __cnfn ilogb(float3 x);
-int4 __ovld __cnfn ilogb(float4 x);
-int8 __ovld __cnfn ilogb(float8 x);
-int16 __ovld __cnfn ilogb(float16 x);
-#ifdef cl_khr_fp64
-int __ovld __cnfn ilogb(double x);
-int2 __ovld __cnfn ilogb(double2 x);
-int3 __ovld __cnfn ilogb(double3 x);
-int4 __ovld __cnfn ilogb(double4 x);
-int8 __ovld __cnfn ilogb(double8 x);
-int16 __ovld __cnfn ilogb(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn ilogb(half x);
-int2 __ovld __cnfn ilogb(half2 x);
-int3 __ovld __cnfn ilogb(half3 x);
-int4 __ovld __cnfn ilogb(half4 x);
-int8 __ovld __cnfn ilogb(half8 x);
-int16 __ovld __cnfn ilogb(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Multiply x by 2 to the power n.
- */
-float __ovld __cnfn ldexp(float x, int n);
-float2 __ovld __cnfn ldexp(float2 x, int2 n);
-float3 __ovld __cnfn ldexp(float3 x, int3 n);
-float4 __ovld __cnfn ldexp(float4 x, int4 n);
-float8 __ovld __cnfn ldexp(float8 x, int8 n);
-float16 __ovld __cnfn ldexp(float16 x, int16 n);
-float2 __ovld __cnfn ldexp(float2 x, int n);
-float3 __ovld __cnfn ldexp(float3 x, int n);
-float4 __ovld __cnfn ldexp(float4 x, int n);
-float8 __ovld __cnfn ldexp(float8 x, int n);
-float16 __ovld __cnfn ldexp(float16 x, int n);
-#ifdef cl_khr_fp64
-double __ovld __cnfn ldexp(double x, int n);
-double2 __ovld __cnfn ldexp(double2 x, int2 n);
-double3 __ovld __cnfn ldexp(double3 x, int3 n);
-double4 __ovld __cnfn ldexp(double4 x, int4 n);
-double8 __ovld __cnfn ldexp(double8 x, int8 n);
-double16 __ovld __cnfn ldexp(double16 x, int16 n);
-double2 __ovld __cnfn ldexp(double2 x, int n);
-double3 __ovld __cnfn ldexp(double3 x, int n);
-double4 __ovld __cnfn ldexp(double4 x, int n);
-double8 __ovld __cnfn ldexp(double8 x, int n);
-double16 __ovld __cnfn ldexp(double16 x, int n);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn ldexp(half x, int n);
-half2 __ovld __cnfn ldexp(half2 x, int2 n);
-half3 __ovld __cnfn ldexp(half3 x, int3 n);
-half4 __ovld __cnfn ldexp(half4 x, int4 n);
-half8 __ovld __cnfn ldexp(half8 x, int8 n);
-half16 __ovld __cnfn ldexp(half16 x, int16 n);
-half2 __ovld __cnfn ldexp(half2 x, int n);
-half3 __ovld __cnfn ldexp(half3 x, int n);
-half4 __ovld __cnfn ldexp(half4 x, int n);
-half8 __ovld __cnfn ldexp(half8 x, int n);
-half16 __ovld __cnfn ldexp(half16 x, int n);
-#endif //cl_khr_fp16
-
-/**
- * Log gamma function. Returns the natural
- * logarithm of the absolute value of the gamma
- * function. The sign of the gamma function is
- * returned in the signp argument of lgamma_r.
- */
-float __ovld __cnfn lgamma(float x);
-float2 __ovld __cnfn lgamma(float2 x);
-float3 __ovld __cnfn lgamma(float3 x);
-float4 __ovld __cnfn lgamma(float4 x);
-float8 __ovld __cnfn lgamma(float8 x);
-float16 __ovld __cnfn lgamma(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn lgamma(double x);
-double2 __ovld __cnfn lgamma(double2 x);
-double3 __ovld __cnfn lgamma(double3 x);
-double4 __ovld __cnfn lgamma(double4 x);
-double8 __ovld __cnfn lgamma(double8 x);
-double16 __ovld __cnfn lgamma(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn lgamma(half x);
-half2 __ovld __cnfn lgamma(half2 x);
-half3 __ovld __cnfn lgamma(half3 x);
-half4 __ovld __cnfn lgamma(half4 x);
-half8 __ovld __cnfn lgamma(half8 x);
-half16 __ovld __cnfn lgamma(half16 x);
-#endif //cl_khr_fp16
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-float __ovld lgamma_r(float x, int *signp);
-float2 __ovld lgamma_r(float2 x, int2 *signp);
-float3 __ovld lgamma_r(float3 x, int3 *signp);
-float4 __ovld lgamma_r(float4 x, int4 *signp);
-float8 __ovld lgamma_r(float8 x, int8 *signp);
-float16 __ovld lgamma_r(float16 x, int16 *signp);
-#ifdef cl_khr_fp64
-double __ovld lgamma_r(double x, int *signp);
-double2 __ovld lgamma_r(double2 x, int2 *signp);
-double3 __ovld lgamma_r(double3 x, int3 *signp);
-double4 __ovld lgamma_r(double4 x, int4 *signp);
-double8 __ovld lgamma_r(double8 x, int8 *signp);
-double16 __ovld lgamma_r(double16 x, int16 *signp);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld lgamma_r(half x, int *signp);
-half2 __ovld lgamma_r(half2 x, int2 *signp);
-half3 __ovld lgamma_r(half3 x, int3 *signp);
-half4 __ovld lgamma_r(half4 x, int4 *signp);
-half8 __ovld lgamma_r(half8 x, int8 *signp);
-half16 __ovld lgamma_r(half16 x, int16 *signp);
-#endif //cl_khr_fp16
-#else
-float __ovld lgamma_r(float x, __global int *signp);
-float2 __ovld lgamma_r(float2 x, __global int2 *signp);
-float3 __ovld lgamma_r(float3 x, __global int3 *signp);
-float4 __ovld lgamma_r(float4 x, __global int4 *signp);
-float8 __ovld lgamma_r(float8 x, __global int8 *signp);
-float16 __ovld lgamma_r(float16 x, __global int16 *signp);
-float __ovld lgamma_r(float x, __local int *signp);
-float2 __ovld lgamma_r(float2 x, __local int2 *signp);
-float3 __ovld lgamma_r(float3 x, __local int3 *signp);
-float4 __ovld lgamma_r(float4 x, __local int4 *signp);
-float8 __ovld lgamma_r(float8 x, __local int8 *signp);
-float16 __ovld lgamma_r(float16 x, __local int16 *signp);
-float __ovld lgamma_r(float x, __private int *signp);
-float2 __ovld lgamma_r(float2 x, __private int2 *signp);
-float3 __ovld lgamma_r(float3 x, __private int3 *signp);
-float4 __ovld lgamma_r(float4 x, __private int4 *signp);
-float8 __ovld lgamma_r(float8 x, __private int8 *signp);
-float16 __ovld lgamma_r(float16 x, __private int16 *signp);
-#ifdef cl_khr_fp64
-double __ovld lgamma_r(double x, __global int *signp);
-double2 __ovld lgamma_r(double2 x, __global int2 *signp);
-double3 __ovld lgamma_r(double3 x, __global int3 *signp);
-double4 __ovld lgamma_r(double4 x, __global int4 *signp);
-double8 __ovld lgamma_r(double8 x, __global int8 *signp);
-double16 __ovld lgamma_r(double16 x, __global int16 *signp);
-double __ovld lgamma_r(double x, __local int *signp);
-double2 __ovld lgamma_r(double2 x, __local int2 *signp);
-double3 __ovld lgamma_r(double3 x, __local int3 *signp);
-double4 __ovld lgamma_r(double4 x, __local int4 *signp);
-double8 __ovld lgamma_r(double8 x, __local int8 *signp);
-double16 __ovld lgamma_r(double16 x, __local int16 *signp);
-double __ovld lgamma_r(double x, __private int *signp);
-double2 __ovld lgamma_r(double2 x, __private int2 *signp);
-double3 __ovld lgamma_r(double3 x, __private int3 *signp);
-double4 __ovld lgamma_r(double4 x, __private int4 *signp);
-double8 __ovld lgamma_r(double8 x, __private int8 *signp);
-double16 __ovld lgamma_r(double16 x, __private int16 *signp);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld lgamma_r(half x, __global int *signp);
-half2 __ovld lgamma_r(half2 x, __global int2 *signp);
-half3 __ovld lgamma_r(half3 x, __global int3 *signp);
-half4 __ovld lgamma_r(half4 x, __global int4 *signp);
-half8 __ovld lgamma_r(half8 x, __global int8 *signp);
-half16 __ovld lgamma_r(half16 x, __global int16 *signp);
-half __ovld lgamma_r(half x, __local int *signp);
-half2 __ovld lgamma_r(half2 x, __local int2 *signp);
-half3 __ovld lgamma_r(half3 x, __local int3 *signp);
-half4 __ovld lgamma_r(half4 x, __local int4 *signp);
-half8 __ovld lgamma_r(half8 x, __local int8 *signp);
-half16 __ovld lgamma_r(half16 x, __local int16 *signp);
-half __ovld lgamma_r(half x, __private int *signp);
-half2 __ovld lgamma_r(half2 x, __private int2 *signp);
-half3 __ovld lgamma_r(half3 x, __private int3 *signp);
-half4 __ovld lgamma_r(half4 x, __private int4 *signp);
-half8 __ovld lgamma_r(half8 x, __private int8 *signp);
-half16 __ovld lgamma_r(half16 x, __private int16 *signp);
-#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Compute natural logarithm.
- */
-float __ovld __cnfn log(float);
-float2 __ovld __cnfn log(float2);
-float3 __ovld __cnfn log(float3);
-float4 __ovld __cnfn log(float4);
-float8 __ovld __cnfn log(float8);
-float16 __ovld __cnfn log(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn log(double);
-double2 __ovld __cnfn log(double2);
-double3 __ovld __cnfn log(double3);
-double4 __ovld __cnfn log(double4);
-double8 __ovld __cnfn log(double8);
-double16 __ovld __cnfn log(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn log(half);
-half2 __ovld __cnfn log(half2);
-half3 __ovld __cnfn log(half3);
-half4 __ovld __cnfn log(half4);
-half8 __ovld __cnfn log(half8);
-half16 __ovld __cnfn log(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute a base 2 logarithm.
- */
-float __ovld __cnfn log2(float);
-float2 __ovld __cnfn log2(float2);
-float3 __ovld __cnfn log2(float3);
-float4 __ovld __cnfn log2(float4);
-float8 __ovld __cnfn log2(float8);
-float16 __ovld __cnfn log2(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn log2(double);
-double2 __ovld __cnfn log2(double2);
-double3 __ovld __cnfn log2(double3);
-double4 __ovld __cnfn log2(double4);
-double8 __ovld __cnfn log2(double8);
-double16 __ovld __cnfn log2(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn log2(half);
-half2 __ovld __cnfn log2(half2);
-half3 __ovld __cnfn log2(half3);
-half4 __ovld __cnfn log2(half4);
-half8 __ovld __cnfn log2(half8);
-half16 __ovld __cnfn log2(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute a base 10 logarithm.
- */
-float __ovld __cnfn log10(float);
-float2 __ovld __cnfn log10(float2);
-float3 __ovld __cnfn log10(float3);
-float4 __ovld __cnfn log10(float4);
-float8 __ovld __cnfn log10(float8);
-float16 __ovld __cnfn log10(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn log10(double);
-double2 __ovld __cnfn log10(double2);
-double3 __ovld __cnfn log10(double3);
-double4 __ovld __cnfn log10(double4);
-double8 __ovld __cnfn log10(double8);
-double16 __ovld __cnfn log10(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn log10(half);
-half2 __ovld __cnfn log10(half2);
-half3 __ovld __cnfn log10(half3);
-half4 __ovld __cnfn log10(half4);
-half8 __ovld __cnfn log10(half8);
-half16 __ovld __cnfn log10(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute a base e logarithm of (1.0 + x).
- */
-float __ovld __cnfn log1p(float x);
-float2 __ovld __cnfn log1p(float2 x);
-float3 __ovld __cnfn log1p(float3 x);
-float4 __ovld __cnfn log1p(float4 x);
-float8 __ovld __cnfn log1p(float8 x);
-float16 __ovld __cnfn log1p(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn log1p(double x);
-double2 __ovld __cnfn log1p(double2 x);
-double3 __ovld __cnfn log1p(double3 x);
-double4 __ovld __cnfn log1p(double4 x);
-double8 __ovld __cnfn log1p(double8 x);
-double16 __ovld __cnfn log1p(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn log1p(half x);
-half2 __ovld __cnfn log1p(half2 x);
-half3 __ovld __cnfn log1p(half3 x);
-half4 __ovld __cnfn log1p(half4 x);
-half8 __ovld __cnfn log1p(half8 x);
-half16 __ovld __cnfn log1p(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute the exponent of x, which is the integral
- * part of logr | x |.
- */
-float __ovld __cnfn logb(float x);
-float2 __ovld __cnfn logb(float2 x);
-float3 __ovld __cnfn logb(float3 x);
-float4 __ovld __cnfn logb(float4 x);
-float8 __ovld __cnfn logb(float8 x);
-float16 __ovld __cnfn logb(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn logb(double x);
-double2 __ovld __cnfn logb(double2 x);
-double3 __ovld __cnfn logb(double3 x);
-double4 __ovld __cnfn logb(double4 x);
-double8 __ovld __cnfn logb(double8 x);
-double16 __ovld __cnfn logb(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn logb(half x);
-half2 __ovld __cnfn logb(half2 x);
-half3 __ovld __cnfn logb(half3 x);
-half4 __ovld __cnfn logb(half4 x);
-half8 __ovld __cnfn logb(half8 x);
-half16 __ovld __cnfn logb(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * mad approximates a * b + c. Whether or how the
- * product of a * b is rounded and how supernormal or
- * subnormal intermediate products are handled is not
- * defined. mad is intended to be used where speed is
- * preferred over accuracy.
- */
-float __ovld __cnfn mad(float a, float b, float c);
-float2 __ovld __cnfn mad(float2 a, float2 b, float2 c);
-float3 __ovld __cnfn mad(float3 a, float3 b, float3 c);
-float4 __ovld __cnfn mad(float4 a, float4 b, float4 c);
-float8 __ovld __cnfn mad(float8 a, float8 b, float8 c);
-float16 __ovld __cnfn mad(float16 a, float16 b, float16 c);
-#ifdef cl_khr_fp64
-double __ovld __cnfn mad(double a, double b, double c);
-double2 __ovld __cnfn mad(double2 a, double2 b, double2 c);
-double3 __ovld __cnfn mad(double3 a, double3 b, double3 c);
-double4 __ovld __cnfn mad(double4 a, double4 b, double4 c);
-double8 __ovld __cnfn mad(double8 a, double8 b, double8 c);
-double16 __ovld __cnfn mad(double16 a, double16 b, double16 c);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn mad(half a, half b, half c);
-half2 __ovld __cnfn mad(half2 a, half2 b, half2 c);
-half3 __ovld __cnfn mad(half3 a, half3 b, half3 c);
-half4 __ovld __cnfn mad(half4 a, half4 b, half4 c);
-half8 __ovld __cnfn mad(half8 a, half8 b, half8 c);
-half16 __ovld __cnfn mad(half16 a, half16 b, half16 c);
-#endif //cl_khr_fp16
-
-/**
- * Returns x if | x | > | y |, y if | y | > | x |, otherwise
- * fmax(x, y).
- */
-float __ovld __cnfn maxmag(float x, float y);
-float2 __ovld __cnfn maxmag(float2 x, float2 y);
-float3 __ovld __cnfn maxmag(float3 x, float3 y);
-float4 __ovld __cnfn maxmag(float4 x, float4 y);
-float8 __ovld __cnfn maxmag(float8 x, float8 y);
-float16 __ovld __cnfn maxmag(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn maxmag(double x, double y);
-double2 __ovld __cnfn maxmag(double2 x, double2 y);
-double3 __ovld __cnfn maxmag(double3 x, double3 y);
-double4 __ovld __cnfn maxmag(double4 x, double4 y);
-double8 __ovld __cnfn maxmag(double8 x, double8 y);
-double16 __ovld __cnfn maxmag(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn maxmag(half x, half y);
-half2 __ovld __cnfn maxmag(half2 x, half2 y);
-half3 __ovld __cnfn maxmag(half3 x, half3 y);
-half4 __ovld __cnfn maxmag(half4 x, half4 y);
-half8 __ovld __cnfn maxmag(half8 x, half8 y);
-half16 __ovld __cnfn maxmag(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns x if | x | < | y |, y if | y | < | x |, otherwise
- * fmin(x, y).
- */
-float __ovld __cnfn minmag(float x, float y);
-float2 __ovld __cnfn minmag(float2 x, float2 y);
-float3 __ovld __cnfn minmag(float3 x, float3 y);
-float4 __ovld __cnfn minmag(float4 x, float4 y);
-float8 __ovld __cnfn minmag(float8 x, float8 y);
-float16 __ovld __cnfn minmag(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn minmag(double x, double y);
-double2 __ovld __cnfn minmag(double2 x, double2 y);
-double3 __ovld __cnfn minmag(double3 x, double3 y);
-double4 __ovld __cnfn minmag(double4 x, double4 y);
-double8 __ovld __cnfn minmag(double8 x, double8 y);
-double16 __ovld __cnfn minmag(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn minmag(half x, half y);
-half2 __ovld __cnfn minmag(half2 x, half2 y);
-half3 __ovld __cnfn minmag(half3 x, half3 y);
-half4 __ovld __cnfn minmag(half4 x, half4 y);
-half8 __ovld __cnfn minmag(half8 x, half8 y);
-half16 __ovld __cnfn minmag(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Decompose a floating-point number. The modf
- * function breaks the argument x into integral and
- * fractional parts, each of which has the same sign as
- * the argument. It stores the integral part in the object
- * pointed to by iptr.
- */
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-float __ovld modf(float x, float *iptr);
-float2 __ovld modf(float2 x, float2 *iptr);
-float3 __ovld modf(float3 x, float3 *iptr);
-float4 __ovld modf(float4 x, float4 *iptr);
-float8 __ovld modf(float8 x, float8 *iptr);
-float16 __ovld modf(float16 x, float16 *iptr);
-#ifdef cl_khr_fp64
-double __ovld modf(double x, double *iptr);
-double2 __ovld modf(double2 x, double2 *iptr);
-double3 __ovld modf(double3 x, double3 *iptr);
-double4 __ovld modf(double4 x, double4 *iptr);
-double8 __ovld modf(double8 x, double8 *iptr);
-double16 __ovld modf(double16 x, double16 *iptr);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld modf(half x, half *iptr);
-half2 __ovld modf(half2 x, half2 *iptr);
-half3 __ovld modf(half3 x, half3 *iptr);
-half4 __ovld modf(half4 x, half4 *iptr);
-half8 __ovld modf(half8 x, half8 *iptr);
-half16 __ovld modf(half16 x, half16 *iptr);
-#endif //cl_khr_fp16
-#else
-float __ovld modf(float x, __global float *iptr);
-float2 __ovld modf(float2 x, __global float2 *iptr);
-float3 __ovld modf(float3 x, __global float3 *iptr);
-float4 __ovld modf(float4 x, __global float4 *iptr);
-float8 __ovld modf(float8 x, __global float8 *iptr);
-float16 __ovld modf(float16 x, __global float16 *iptr);
-float __ovld modf(float x, __local float *iptr);
-float2 __ovld modf(float2 x, __local float2 *iptr);
-float3 __ovld modf(float3 x, __local float3 *iptr);
-float4 __ovld modf(float4 x, __local float4 *iptr);
-float8 __ovld modf(float8 x, __local float8 *iptr);
-float16 __ovld modf(float16 x, __local float16 *iptr);
-float __ovld modf(float x, __private float *iptr);
-float2 __ovld modf(float2 x, __private float2 *iptr);
-float3 __ovld modf(float3 x, __private float3 *iptr);
-float4 __ovld modf(float4 x, __private float4 *iptr);
-float8 __ovld modf(float8 x, __private float8 *iptr);
-float16 __ovld modf(float16 x, __private float16 *iptr);
-#ifdef cl_khr_fp64
-double __ovld modf(double x, __global double *iptr);
-double2 __ovld modf(double2 x, __global double2 *iptr);
-double3 __ovld modf(double3 x, __global double3 *iptr);
-double4 __ovld modf(double4 x, __global double4 *iptr);
-double8 __ovld modf(double8 x, __global double8 *iptr);
-double16 __ovld modf(double16 x, __global double16 *iptr);
-double __ovld modf(double x, __local double *iptr);
-double2 __ovld modf(double2 x, __local double2 *iptr);
-double3 __ovld modf(double3 x, __local double3 *iptr);
-double4 __ovld modf(double4 x, __local double4 *iptr);
-double8 __ovld modf(double8 x, __local double8 *iptr);
-double16 __ovld modf(double16 x, __local double16 *iptr);
-double __ovld modf(double x, __private double *iptr);
-double2 __ovld modf(double2 x, __private double2 *iptr);
-double3 __ovld modf(double3 x, __private double3 *iptr);
-double4 __ovld modf(double4 x, __private double4 *iptr);
-double8 __ovld modf(double8 x, __private double8 *iptr);
-double16 __ovld modf(double16 x, __private double16 *iptr);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld modf(half x, __global half *iptr);
-half2 __ovld modf(half2 x, __global half2 *iptr);
-half3 __ovld modf(half3 x, __global half3 *iptr);
-half4 __ovld modf(half4 x, __global half4 *iptr);
-half8 __ovld modf(half8 x, __global half8 *iptr);
-half16 __ovld modf(half16 x, __global half16 *iptr);
-half __ovld modf(half x, __local half *iptr);
-half2 __ovld modf(half2 x, __local half2 *iptr);
-half3 __ovld modf(half3 x, __local half3 *iptr);
-half4 __ovld modf(half4 x, __local half4 *iptr);
-half8 __ovld modf(half8 x, __local half8 *iptr);
-half16 __ovld modf(half16 x, __local half16 *iptr);
-half __ovld modf(half x, __private half *iptr);
-half2 __ovld modf(half2 x, __private half2 *iptr);
-half3 __ovld modf(half3 x, __private half3 *iptr);
-half4 __ovld modf(half4 x, __private half4 *iptr);
-half8 __ovld modf(half8 x, __private half8 *iptr);
-half16 __ovld modf(half16 x, __private half16 *iptr);
-#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Returns a quiet NaN. The nancode may be placed
- * in the significand of the resulting NaN.
- */
-float __ovld __cnfn nan(uint nancode);
-float2 __ovld __cnfn nan(uint2 nancode);
-float3 __ovld __cnfn nan(uint3 nancode);
-float4 __ovld __cnfn nan(uint4 nancode);
-float8 __ovld __cnfn nan(uint8 nancode);
-float16 __ovld __cnfn nan(uint16 nancode);
-#ifdef cl_khr_fp64
-double __ovld __cnfn nan(ulong nancode);
-double2 __ovld __cnfn nan(ulong2 nancode);
-double3 __ovld __cnfn nan(ulong3 nancode);
-double4 __ovld __cnfn nan(ulong4 nancode);
-double8 __ovld __cnfn nan(ulong8 nancode);
-double16 __ovld __cnfn nan(ulong16 nancode);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn nan(ushort nancode);
-half2 __ovld __cnfn nan(ushort2 nancode);
-half3 __ovld __cnfn nan(ushort3 nancode);
-half4 __ovld __cnfn nan(ushort4 nancode);
-half8 __ovld __cnfn nan(ushort8 nancode);
-half16 __ovld __cnfn nan(ushort16 nancode);
-#endif //cl_khr_fp16
-
-/**
- * Computes the next representable single-precision
- * floating-point value following x in the direction of
- * y. Thus, if y is less than x, nextafter() returns the
- * largest representable floating-point number less
- * than x.
- */
-float __ovld __cnfn nextafter(float x, float y);
-float2 __ovld __cnfn nextafter(float2 x, float2 y);
-float3 __ovld __cnfn nextafter(float3 x, float3 y);
-float4 __ovld __cnfn nextafter(float4 x, float4 y);
-float8 __ovld __cnfn nextafter(float8 x, float8 y);
-float16 __ovld __cnfn nextafter(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn nextafter(double x, double y);
-double2 __ovld __cnfn nextafter(double2 x, double2 y);
-double3 __ovld __cnfn nextafter(double3 x, double3 y);
-double4 __ovld __cnfn nextafter(double4 x, double4 y);
-double8 __ovld __cnfn nextafter(double8 x, double8 y);
-double16 __ovld __cnfn nextafter(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn nextafter(half x, half y);
-half2 __ovld __cnfn nextafter(half2 x, half2 y);
-half3 __ovld __cnfn nextafter(half3 x, half3 y);
-half4 __ovld __cnfn nextafter(half4 x, half4 y);
-half8 __ovld __cnfn nextafter(half8 x, half8 y);
-half16 __ovld __cnfn nextafter(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Compute x to the power y.
- */
-float __ovld __cnfn pow(float x, float y);
-float2 __ovld __cnfn pow(float2 x, float2 y);
-float3 __ovld __cnfn pow(float3 x, float3 y);
-float4 __ovld __cnfn pow(float4 x, float4 y);
-float8 __ovld __cnfn pow(float8 x, float8 y);
-float16 __ovld __cnfn pow(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn pow(double x, double y);
-double2 __ovld __cnfn pow(double2 x, double2 y);
-double3 __ovld __cnfn pow(double3 x, double3 y);
-double4 __ovld __cnfn pow(double4 x, double4 y);
-double8 __ovld __cnfn pow(double8 x, double8 y);
-double16 __ovld __cnfn pow(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn pow(half x, half y);
-half2 __ovld __cnfn pow(half2 x, half2 y);
-half3 __ovld __cnfn pow(half3 x, half3 y);
-half4 __ovld __cnfn pow(half4 x, half4 y);
-half8 __ovld __cnfn pow(half8 x, half8 y);
-half16 __ovld __cnfn pow(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Compute x to the power y, where y is an integer.
- */
-float __ovld __cnfn pown(float x, int y);
-float2 __ovld __cnfn pown(float2 x, int2 y);
-float3 __ovld __cnfn pown(float3 x, int3 y);
-float4 __ovld __cnfn pown(float4 x, int4 y);
-float8 __ovld __cnfn pown(float8 x, int8 y);
-float16 __ovld __cnfn pown(float16 x, int16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn pown(double x, int y);
-double2 __ovld __cnfn pown(double2 x, int2 y);
-double3 __ovld __cnfn pown(double3 x, int3 y);
-double4 __ovld __cnfn pown(double4 x, int4 y);
-double8 __ovld __cnfn pown(double8 x, int8 y);
-double16 __ovld __cnfn pown(double16 x, int16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn pown(half x, int y);
-half2 __ovld __cnfn pown(half2 x, int2 y);
-half3 __ovld __cnfn pown(half3 x, int3 y);
-half4 __ovld __cnfn pown(half4 x, int4 y);
-half8 __ovld __cnfn pown(half8 x, int8 y);
-half16 __ovld __cnfn pown(half16 x, int16 y);
-#endif //cl_khr_fp16
-
-/**
- * Compute x to the power y, where x is >= 0.
- */
-float __ovld __cnfn powr(float x, float y);
-float2 __ovld __cnfn powr(float2 x, float2 y);
-float3 __ovld __cnfn powr(float3 x, float3 y);
-float4 __ovld __cnfn powr(float4 x, float4 y);
-float8 __ovld __cnfn powr(float8 x, float8 y);
-float16 __ovld __cnfn powr(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn powr(double x, double y);
-double2 __ovld __cnfn powr(double2 x, double2 y);
-double3 __ovld __cnfn powr(double3 x, double3 y);
-double4 __ovld __cnfn powr(double4 x, double4 y);
-double8 __ovld __cnfn powr(double8 x, double8 y);
-double16 __ovld __cnfn powr(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn powr(half x, half y);
-half2 __ovld __cnfn powr(half2 x, half2 y);
-half3 __ovld __cnfn powr(half3 x, half3 y);
-half4 __ovld __cnfn powr(half4 x, half4 y);
-half8 __ovld __cnfn powr(half8 x, half8 y);
-half16 __ovld __cnfn powr(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Compute the value r such that r = x - n*y, where n
- * is the integer nearest the exact value of x/y. If there
- * are two integers closest to x/y, n shall be the even
- * one. If r is zero, it is given the same sign as x.
- */
-float __ovld __cnfn remainder(float x, float y);
-float2 __ovld __cnfn remainder(float2 x, float2 y);
-float3 __ovld __cnfn remainder(float3 x, float3 y);
-float4 __ovld __cnfn remainder(float4 x, float4 y);
-float8 __ovld __cnfn remainder(float8 x, float8 y);
-float16 __ovld __cnfn remainder(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn remainder(double x, double y);
-double2 __ovld __cnfn remainder(double2 x, double2 y);
-double3 __ovld __cnfn remainder(double3 x, double3 y);
-double4 __ovld __cnfn remainder(double4 x, double4 y);
-double8 __ovld __cnfn remainder(double8 x, double8 y);
-double16 __ovld __cnfn remainder(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn remainder(half x, half y);
-half2 __ovld __cnfn remainder(half2 x, half2 y);
-half3 __ovld __cnfn remainder(half3 x, half3 y);
-half4 __ovld __cnfn remainder(half4 x, half4 y);
-half8 __ovld __cnfn remainder(half8 x, half8 y);
-half16 __ovld __cnfn remainder(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * The remquo function computes the value r such
- * that r = x - n*y, where n is the integer nearest the
- * exact value of x/y. If there are two integers closest
- * to x/y, n shall be the even one. If r is zero, it is
- * given the same sign as x. This is the same value
- * that is returned by the remainder function.
- * remquo also calculates the lower seven bits of the
- * integral quotient x/y, and gives that value the same
- * sign as x/y. It stores this signed value in the object
- * pointed to by quo.
- */
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-float __ovld remquo(float x, float y, int *quo);
-float2 __ovld remquo(float2 x, float2 y, int2 *quo);
-float3 __ovld remquo(float3 x, float3 y, int3 *quo);
-float4 __ovld remquo(float4 x, float4 y, int4 *quo);
-float8 __ovld remquo(float8 x, float8 y, int8 *quo);
-float16 __ovld remquo(float16 x, float16 y, int16 *quo);
-#ifdef cl_khr_fp64
-double __ovld remquo(double x, double y, int *quo);
-double2 __ovld remquo(double2 x, double2 y, int2 *quo);
-double3 __ovld remquo(double3 x, double3 y, int3 *quo);
-double4 __ovld remquo(double4 x, double4 y, int4 *quo);
-double8 __ovld remquo(double8 x, double8 y, int8 *quo);
-double16 __ovld remquo(double16 x, double16 y, int16 *quo);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld remquo(half x, half y, int *quo);
-half2 __ovld remquo(half2 x, half2 y, int2 *quo);
-half3 __ovld remquo(half3 x, half3 y, int3 *quo);
-half4 __ovld remquo(half4 x, half4 y, int4 *quo);
-half8 __ovld remquo(half8 x, half8 y, int8 *quo);
-half16 __ovld remquo(half16 x, half16 y, int16 *quo);
-
-#endif //cl_khr_fp16
-#else
-float __ovld remquo(float x, float y, __global int *quo);
-float2 __ovld remquo(float2 x, float2 y, __global int2 *quo);
-float3 __ovld remquo(float3 x, float3 y, __global int3 *quo);
-float4 __ovld remquo(float4 x, float4 y, __global int4 *quo);
-float8 __ovld remquo(float8 x, float8 y, __global int8 *quo);
-float16 __ovld remquo(float16 x, float16 y, __global int16 *quo);
-float __ovld remquo(float x, float y, __local int *quo);
-float2 __ovld remquo(float2 x, float2 y, __local int2 *quo);
-float3 __ovld remquo(float3 x, float3 y, __local int3 *quo);
-float4 __ovld remquo(float4 x, float4 y, __local int4 *quo);
-float8 __ovld remquo(float8 x, float8 y, __local int8 *quo);
-float16 __ovld remquo(float16 x, float16 y, __local int16 *quo);
-float __ovld remquo(float x, float y, __private int *quo);
-float2 __ovld remquo(float2 x, float2 y, __private int2 *quo);
-float3 __ovld remquo(float3 x, float3 y, __private int3 *quo);
-float4 __ovld remquo(float4 x, float4 y, __private int4 *quo);
-float8 __ovld remquo(float8 x, float8 y, __private int8 *quo);
-float16 __ovld remquo(float16 x, float16 y, __private int16 *quo);
-#ifdef cl_khr_fp64
-double __ovld remquo(double x, double y, __global int *quo);
-double2 __ovld remquo(double2 x, double2 y, __global int2 *quo);
-double3 __ovld remquo(double3 x, double3 y, __global int3 *quo);
-double4 __ovld remquo(double4 x, double4 y, __global int4 *quo);
-double8 __ovld remquo(double8 x, double8 y, __global int8 *quo);
-double16 __ovld remquo(double16 x, double16 y, __global int16 *quo);
-double __ovld remquo(double x, double y, __local int *quo);
-double2 __ovld remquo(double2 x, double2 y, __local int2 *quo);
-double3 __ovld remquo(double3 x, double3 y, __local int3 *quo);
-double4 __ovld remquo(double4 x, double4 y, __local int4 *quo);
-double8 __ovld remquo(double8 x, double8 y, __local int8 *quo);
-double16 __ovld remquo(double16 x, double16 y, __local int16 *quo);
-double __ovld remquo(double x, double y, __private int *quo);
-double2 __ovld remquo(double2 x, double2 y, __private int2 *quo);
-double3 __ovld remquo(double3 x, double3 y, __private int3 *quo);
-double4 __ovld remquo(double4 x, double4 y, __private int4 *quo);
-double8 __ovld remquo(double8 x, double8 y, __private int8 *quo);
-double16 __ovld remquo(double16 x, double16 y, __private int16 *quo);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld remquo(half x, half y, __global int *quo);
-half2 __ovld remquo(half2 x, half2 y, __global int2 *quo);
-half3 __ovld remquo(half3 x, half3 y, __global int3 *quo);
-half4 __ovld remquo(half4 x, half4 y, __global int4 *quo);
-half8 __ovld remquo(half8 x, half8 y, __global int8 *quo);
-half16 __ovld remquo(half16 x, half16 y, __global int16 *quo);
-half __ovld remquo(half x, half y, __local int *quo);
-half2 __ovld remquo(half2 x, half2 y, __local int2 *quo);
-half3 __ovld remquo(half3 x, half3 y, __local int3 *quo);
-half4 __ovld remquo(half4 x, half4 y, __local int4 *quo);
-half8 __ovld remquo(half8 x, half8 y, __local int8 *quo);
-half16 __ovld remquo(half16 x, half16 y, __local int16 *quo);
-half __ovld remquo(half x, half y, __private int *quo);
-half2 __ovld remquo(half2 x, half2 y, __private int2 *quo);
-half3 __ovld remquo(half3 x, half3 y, __private int3 *quo);
-half4 __ovld remquo(half4 x, half4 y, __private int4 *quo);
-half8 __ovld remquo(half8 x, half8 y, __private int8 *quo);
-half16 __ovld remquo(half16 x, half16 y, __private int16 *quo);
-#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-/**
- * Round to integral value (using round to nearest
- * even rounding mode) in floating-point format.
- * Refer to section 7.1 for description of rounding
- * modes.
- */
-float __ovld __cnfn rint(float);
-float2 __ovld __cnfn rint(float2);
-float3 __ovld __cnfn rint(float3);
-float4 __ovld __cnfn rint(float4);
-float8 __ovld __cnfn rint(float8);
-float16 __ovld __cnfn rint(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn rint(double);
-double2 __ovld __cnfn rint(double2);
-double3 __ovld __cnfn rint(double3);
-double4 __ovld __cnfn rint(double4);
-double8 __ovld __cnfn rint(double8);
-double16 __ovld __cnfn rint(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn rint(half);
-half2 __ovld __cnfn rint(half2);
-half3 __ovld __cnfn rint(half3);
-half4 __ovld __cnfn rint(half4);
-half8 __ovld __cnfn rint(half8);
-half16 __ovld __cnfn rint(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute x to the power 1/y.
- */
-float __ovld __cnfn rootn(float x, int y);
-float2 __ovld __cnfn rootn(float2 x, int2 y);
-float3 __ovld __cnfn rootn(float3 x, int3 y);
-float4 __ovld __cnfn rootn(float4 x, int4 y);
-float8 __ovld __cnfn rootn(float8 x, int8 y);
-float16 __ovld __cnfn rootn(float16 x, int16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn rootn(double x, int y);
-double2 __ovld __cnfn rootn(double2 x, int2 y);
-double3 __ovld __cnfn rootn(double3 x, int3 y);
-double4 __ovld __cnfn rootn(double4 x, int4 y);
-double8 __ovld __cnfn rootn(double8 x, int8 y);
-double16 __ovld __cnfn rootn(double16 x, int16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn rootn(half x, int y);
-half2 __ovld __cnfn rootn(half2 x, int2 y);
-half3 __ovld __cnfn rootn(half3 x, int3 y);
-half4 __ovld __cnfn rootn(half4 x, int4 y);
-half8 __ovld __cnfn rootn(half8 x, int8 y);
-half16 __ovld __cnfn rootn(half16 x, int16 y);
-#endif //cl_khr_fp16
-
-/**
- * Return the integral value nearest to x rounding
- * halfway cases away from zero, regardless of the
- * current rounding direction.
- */
-float __ovld __cnfn round(float x);
-float2 __ovld __cnfn round(float2 x);
-float3 __ovld __cnfn round(float3 x);
-float4 __ovld __cnfn round(float4 x);
-float8 __ovld __cnfn round(float8 x);
-float16 __ovld __cnfn round(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn round(double x);
-double2 __ovld __cnfn round(double2 x);
-double3 __ovld __cnfn round(double3 x);
-double4 __ovld __cnfn round(double4 x);
-double8 __ovld __cnfn round(double8 x);
-double16 __ovld __cnfn round(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn round(half x);
-half2 __ovld __cnfn round(half2 x);
-half3 __ovld __cnfn round(half3 x);
-half4 __ovld __cnfn round(half4 x);
-half8 __ovld __cnfn round(half8 x);
-half16 __ovld __cnfn round(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute inverse square root.
- */
-float __ovld __cnfn rsqrt(float);
-float2 __ovld __cnfn rsqrt(float2);
-float3 __ovld __cnfn rsqrt(float3);
-float4 __ovld __cnfn rsqrt(float4);
-float8 __ovld __cnfn rsqrt(float8);
-float16 __ovld __cnfn rsqrt(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn rsqrt(double);
-double2 __ovld __cnfn rsqrt(double2);
-double3 __ovld __cnfn rsqrt(double3);
-double4 __ovld __cnfn rsqrt(double4);
-double8 __ovld __cnfn rsqrt(double8);
-double16 __ovld __cnfn rsqrt(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn rsqrt(half);
-half2 __ovld __cnfn rsqrt(half2);
-half3 __ovld __cnfn rsqrt(half3);
-half4 __ovld __cnfn rsqrt(half4);
-half8 __ovld __cnfn rsqrt(half8);
-half16 __ovld __cnfn rsqrt(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute sine.
- */
-float __ovld __cnfn sin(float);
-float2 __ovld __cnfn sin(float2);
-float3 __ovld __cnfn sin(float3);
-float4 __ovld __cnfn sin(float4);
-float8 __ovld __cnfn sin(float8);
-float16 __ovld __cnfn sin(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn sin(double);
-double2 __ovld __cnfn sin(double2);
-double3 __ovld __cnfn sin(double3);
-double4 __ovld __cnfn sin(double4);
-double8 __ovld __cnfn sin(double8);
-double16 __ovld __cnfn sin(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn sin(half);
-half2 __ovld __cnfn sin(half2);
-half3 __ovld __cnfn sin(half3);
-half4 __ovld __cnfn sin(half4);
-half8 __ovld __cnfn sin(half8);
-half16 __ovld __cnfn sin(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute sine and cosine of x. The computed sine
- * is the return value and computed cosine is returned
- * in cosval.
- */
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-float __ovld sincos(float x, float *cosval);
-float2 __ovld sincos(float2 x, float2 *cosval);
-float3 __ovld sincos(float3 x, float3 *cosval);
-float4 __ovld sincos(float4 x, float4 *cosval);
-float8 __ovld sincos(float8 x, float8 *cosval);
-float16 __ovld sincos(float16 x, float16 *cosval);
-#ifdef cl_khr_fp64
-double __ovld sincos(double x, double *cosval);
-double2 __ovld sincos(double2 x, double2 *cosval);
-double3 __ovld sincos(double3 x, double3 *cosval);
-double4 __ovld sincos(double4 x, double4 *cosval);
-double8 __ovld sincos(double8 x, double8 *cosval);
-double16 __ovld sincos(double16 x, double16 *cosval);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld sincos(half x, half *cosval);
-half2 __ovld sincos(half2 x, half2 *cosval);
-half3 __ovld sincos(half3 x, half3 *cosval);
-half4 __ovld sincos(half4 x, half4 *cosval);
-half8 __ovld sincos(half8 x, half8 *cosval);
-half16 __ovld sincos(half16 x, half16 *cosval);
-#endif //cl_khr_fp16
-#else
-float __ovld sincos(float x, __global float *cosval);
-float2 __ovld sincos(float2 x, __global float2 *cosval);
-float3 __ovld sincos(float3 x, __global float3 *cosval);
-float4 __ovld sincos(float4 x, __global float4 *cosval);
-float8 __ovld sincos(float8 x, __global float8 *cosval);
-float16 __ovld sincos(float16 x, __global float16 *cosval);
-float __ovld sincos(float x, __local float *cosval);
-float2 __ovld sincos(float2 x, __local float2 *cosval);
-float3 __ovld sincos(float3 x, __local float3 *cosval);
-float4 __ovld sincos(float4 x, __local float4 *cosval);
-float8 __ovld sincos(float8 x, __local float8 *cosval);
-float16 __ovld sincos(float16 x, __local float16 *cosval);
-float __ovld sincos(float x, __private float *cosval);
-float2 __ovld sincos(float2 x, __private float2 *cosval);
-float3 __ovld sincos(float3 x, __private float3 *cosval);
-float4 __ovld sincos(float4 x, __private float4 *cosval);
-float8 __ovld sincos(float8 x, __private float8 *cosval);
-float16 __ovld sincos(float16 x, __private float16 *cosval);
-#ifdef cl_khr_fp64
-double __ovld sincos(double x, __global double *cosval);
-double2 __ovld sincos(double2 x, __global double2 *cosval);
-double3 __ovld sincos(double3 x, __global double3 *cosval);
-double4 __ovld sincos(double4 x, __global double4 *cosval);
-double8 __ovld sincos(double8 x, __global double8 *cosval);
-double16 __ovld sincos(double16 x, __global double16 *cosval);
-double __ovld sincos(double x, __local double *cosval);
-double2 __ovld sincos(double2 x, __local double2 *cosval);
-double3 __ovld sincos(double3 x, __local double3 *cosval);
-double4 __ovld sincos(double4 x, __local double4 *cosval);
-double8 __ovld sincos(double8 x, __local double8 *cosval);
-double16 __ovld sincos(double16 x, __local double16 *cosval);
-double __ovld sincos(double x, __private double *cosval);
-double2 __ovld sincos(double2 x, __private double2 *cosval);
-double3 __ovld sincos(double3 x, __private double3 *cosval);
-double4 __ovld sincos(double4 x, __private double4 *cosval);
-double8 __ovld sincos(double8 x, __private double8 *cosval);
-double16 __ovld sincos(double16 x, __private double16 *cosval);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld sincos(half x, __global half *cosval);
-half2 __ovld sincos(half2 x, __global half2 *cosval);
-half3 __ovld sincos(half3 x, __global half3 *cosval);
-half4 __ovld sincos(half4 x, __global half4 *cosval);
-half8 __ovld sincos(half8 x, __global half8 *cosval);
-half16 __ovld sincos(half16 x, __global half16 *cosval);
-half __ovld sincos(half x, __local half *cosval);
-half2 __ovld sincos(half2 x, __local half2 *cosval);
-half3 __ovld sincos(half3 x, __local half3 *cosval);
-half4 __ovld sincos(half4 x, __local half4 *cosval);
-half8 __ovld sincos(half8 x, __local half8 *cosval);
-half16 __ovld sincos(half16 x, __local half16 *cosval);
-half __ovld sincos(half x, __private half *cosval);
-half2 __ovld sincos(half2 x, __private half2 *cosval);
-half3 __ovld sincos(half3 x, __private half3 *cosval);
-half4 __ovld sincos(half4 x, __private half4 *cosval);
-half8 __ovld sincos(half8 x, __private half8 *cosval);
-half16 __ovld sincos(half16 x, __private half16 *cosval);
-#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Compute hyperbolic sine.
- */
-float __ovld __cnfn sinh(float);
-float2 __ovld __cnfn sinh(float2);
-float3 __ovld __cnfn sinh(float3);
-float4 __ovld __cnfn sinh(float4);
-float8 __ovld __cnfn sinh(float8);
-float16 __ovld __cnfn sinh(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn sinh(double);
-double2 __ovld __cnfn sinh(double2);
-double3 __ovld __cnfn sinh(double3);
-double4 __ovld __cnfn sinh(double4);
-double8 __ovld __cnfn sinh(double8);
-double16 __ovld __cnfn sinh(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn sinh(half);
-half2 __ovld __cnfn sinh(half2);
-half3 __ovld __cnfn sinh(half3);
-half4 __ovld __cnfn sinh(half4);
-half8 __ovld __cnfn sinh(half8);
-half16 __ovld __cnfn sinh(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute sin (PI * x).
- */
-float __ovld __cnfn sinpi(float x);
-float2 __ovld __cnfn sinpi(float2 x);
-float3 __ovld __cnfn sinpi(float3 x);
-float4 __ovld __cnfn sinpi(float4 x);
-float8 __ovld __cnfn sinpi(float8 x);
-float16 __ovld __cnfn sinpi(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn sinpi(double x);
-double2 __ovld __cnfn sinpi(double2 x);
-double3 __ovld __cnfn sinpi(double3 x);
-double4 __ovld __cnfn sinpi(double4 x);
-double8 __ovld __cnfn sinpi(double8 x);
-double16 __ovld __cnfn sinpi(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn sinpi(half x);
-half2 __ovld __cnfn sinpi(half2 x);
-half3 __ovld __cnfn sinpi(half3 x);
-half4 __ovld __cnfn sinpi(half4 x);
-half8 __ovld __cnfn sinpi(half8 x);
-half16 __ovld __cnfn sinpi(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute square root.
- */
-float __ovld __cnfn sqrt(float);
-float2 __ovld __cnfn sqrt(float2);
-float3 __ovld __cnfn sqrt(float3);
-float4 __ovld __cnfn sqrt(float4);
-float8 __ovld __cnfn sqrt(float8);
-float16 __ovld __cnfn sqrt(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn sqrt(double);
-double2 __ovld __cnfn sqrt(double2);
-double3 __ovld __cnfn sqrt(double3);
-double4 __ovld __cnfn sqrt(double4);
-double8 __ovld __cnfn sqrt(double8);
-double16 __ovld __cnfn sqrt(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn sqrt(half);
-half2 __ovld __cnfn sqrt(half2);
-half3 __ovld __cnfn sqrt(half3);
-half4 __ovld __cnfn sqrt(half4);
-half8 __ovld __cnfn sqrt(half8);
-half16 __ovld __cnfn sqrt(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute tangent.
- */
-float __ovld __cnfn tan(float);
-float2 __ovld __cnfn tan(float2);
-float3 __ovld __cnfn tan(float3);
-float4 __ovld __cnfn tan(float4);
-float8 __ovld __cnfn tan(float8);
-float16 __ovld __cnfn tan(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn tan(double);
-double2 __ovld __cnfn tan(double2);
-double3 __ovld __cnfn tan(double3);
-double4 __ovld __cnfn tan(double4);
-double8 __ovld __cnfn tan(double8);
-double16 __ovld __cnfn tan(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn tan(half);
-half2 __ovld __cnfn tan(half2);
-half3 __ovld __cnfn tan(half3);
-half4 __ovld __cnfn tan(half4);
-half8 __ovld __cnfn tan(half8);
-half16 __ovld __cnfn tan(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute hyperbolic tangent.
- */
-float __ovld __cnfn tanh(float);
-float2 __ovld __cnfn tanh(float2);
-float3 __ovld __cnfn tanh(float3);
-float4 __ovld __cnfn tanh(float4);
-float8 __ovld __cnfn tanh(float8);
-float16 __ovld __cnfn tanh(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn tanh(double);
-double2 __ovld __cnfn tanh(double2);
-double3 __ovld __cnfn tanh(double3);
-double4 __ovld __cnfn tanh(double4);
-double8 __ovld __cnfn tanh(double8);
-double16 __ovld __cnfn tanh(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn tanh(half);
-half2 __ovld __cnfn tanh(half2);
-half3 __ovld __cnfn tanh(half3);
-half4 __ovld __cnfn tanh(half4);
-half8 __ovld __cnfn tanh(half8);
-half16 __ovld __cnfn tanh(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute tan (PI * x).
- */
-float __ovld __cnfn tanpi(float x);
-float2 __ovld __cnfn tanpi(float2 x);
-float3 __ovld __cnfn tanpi(float3 x);
-float4 __ovld __cnfn tanpi(float4 x);
-float8 __ovld __cnfn tanpi(float8 x);
-float16 __ovld __cnfn tanpi(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn tanpi(double x);
-double2 __ovld __cnfn tanpi(double2 x);
-double3 __ovld __cnfn tanpi(double3 x);
-double4 __ovld __cnfn tanpi(double4 x);
-double8 __ovld __cnfn tanpi(double8 x);
-double16 __ovld __cnfn tanpi(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn tanpi(half x);
-half2 __ovld __cnfn tanpi(half2 x);
-half3 __ovld __cnfn tanpi(half3 x);
-half4 __ovld __cnfn tanpi(half4 x);
-half8 __ovld __cnfn tanpi(half8 x);
-half16 __ovld __cnfn tanpi(half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Compute the gamma function.
- */
-float __ovld __cnfn tgamma(float);
-float2 __ovld __cnfn tgamma(float2);
-float3 __ovld __cnfn tgamma(float3);
-float4 __ovld __cnfn tgamma(float4);
-float8 __ovld __cnfn tgamma(float8);
-float16 __ovld __cnfn tgamma(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn tgamma(double);
-double2 __ovld __cnfn tgamma(double2);
-double3 __ovld __cnfn tgamma(double3);
-double4 __ovld __cnfn tgamma(double4);
-double8 __ovld __cnfn tgamma(double8);
-double16 __ovld __cnfn tgamma(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn tgamma(half);
-half2 __ovld __cnfn tgamma(half2);
-half3 __ovld __cnfn tgamma(half3);
-half4 __ovld __cnfn tgamma(half4);
-half8 __ovld __cnfn tgamma(half8);
-half16 __ovld __cnfn tgamma(half16);
-#endif //cl_khr_fp16
-
-/**
- * Round to integral value using the round to zero
- * rounding mode.
- */
-float __ovld __cnfn trunc(float);
-float2 __ovld __cnfn trunc(float2);
-float3 __ovld __cnfn trunc(float3);
-float4 __ovld __cnfn trunc(float4);
-float8 __ovld __cnfn trunc(float8);
-float16 __ovld __cnfn trunc(float16);
-#ifdef cl_khr_fp64
-double __ovld __cnfn trunc(double);
-double2 __ovld __cnfn trunc(double2);
-double3 __ovld __cnfn trunc(double3);
-double4 __ovld __cnfn trunc(double4);
-double8 __ovld __cnfn trunc(double8);
-double16 __ovld __cnfn trunc(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn trunc(half);
-half2 __ovld __cnfn trunc(half2);
-half3 __ovld __cnfn trunc(half3);
-half4 __ovld __cnfn trunc(half4);
-half8 __ovld __cnfn trunc(half8);
-half16 __ovld __cnfn trunc(half16);
-#endif //cl_khr_fp16
-
-/**
- * Compute cosine. x must be in the range -2^16 ... +2^16.
- */
-float __ovld __cnfn half_cos(float x);
-float2 __ovld __cnfn half_cos(float2 x);
-float3 __ovld __cnfn half_cos(float3 x);
-float4 __ovld __cnfn half_cos(float4 x);
-float8 __ovld __cnfn half_cos(float8 x);
-float16 __ovld __cnfn half_cos(float16 x);
-
-/**
- * Compute x / y.
- */
-float __ovld __cnfn half_divide(float x, float y);
-float2 __ovld __cnfn half_divide(float2 x, float2 y);
-float3 __ovld __cnfn half_divide(float3 x, float3 y);
-float4 __ovld __cnfn half_divide(float4 x, float4 y);
-float8 __ovld __cnfn half_divide(float8 x, float8 y);
-float16 __ovld __cnfn half_divide(float16 x, float16 y);
-
-/**
- * Compute the base- e exponential of x.
- */
-float __ovld __cnfn half_exp(float x);
-float2 __ovld __cnfn half_exp(float2 x);
-float3 __ovld __cnfn half_exp(float3 x);
-float4 __ovld __cnfn half_exp(float4 x);
-float8 __ovld __cnfn half_exp(float8 x);
-float16 __ovld __cnfn half_exp(float16 x);
-
-/**
- * Compute the base- 2 exponential of x.
- */
-float __ovld __cnfn half_exp2(float x);
-float2 __ovld __cnfn half_exp2(float2 x);
-float3 __ovld __cnfn half_exp2(float3 x);
-float4 __ovld __cnfn half_exp2(float4 x);
-float8 __ovld __cnfn half_exp2(float8 x);
-float16 __ovld __cnfn half_exp2(float16 x);
-
-/**
- * Compute the base- 10 exponential of x.
- */
-float __ovld __cnfn half_exp10(float x);
-float2 __ovld __cnfn half_exp10(float2 x);
-float3 __ovld __cnfn half_exp10(float3 x);
-float4 __ovld __cnfn half_exp10(float4 x);
-float8 __ovld __cnfn half_exp10(float8 x);
-float16 __ovld __cnfn half_exp10(float16 x);
-
-/**
- * Compute natural logarithm.
- */
-float __ovld __cnfn half_log(float x);
-float2 __ovld __cnfn half_log(float2 x);
-float3 __ovld __cnfn half_log(float3 x);
-float4 __ovld __cnfn half_log(float4 x);
-float8 __ovld __cnfn half_log(float8 x);
-float16 __ovld __cnfn half_log(float16 x);
-
-/**
- * Compute a base 2 logarithm.
- */
-float __ovld __cnfn half_log2(float x);
-float2 __ovld __cnfn half_log2(float2 x);
-float3 __ovld __cnfn half_log2(float3 x);
-float4 __ovld __cnfn half_log2(float4 x);
-float8 __ovld __cnfn half_log2(float8 x);
-float16 __ovld __cnfn half_log2(float16 x);
-
-/**
- * Compute a base 10 logarithm.
- */
-float __ovld __cnfn half_log10(float x);
-float2 __ovld __cnfn half_log10(float2 x);
-float3 __ovld __cnfn half_log10(float3 x);
-float4 __ovld __cnfn half_log10(float4 x);
-float8 __ovld __cnfn half_log10(float8 x);
-float16 __ovld __cnfn half_log10(float16 x);
-
-/**
- * Compute x to the power y, where x is >= 0.
- */
-float __ovld __cnfn half_powr(float x, float y);
-float2 __ovld __cnfn half_powr(float2 x, float2 y);
-float3 __ovld __cnfn half_powr(float3 x, float3 y);
-float4 __ovld __cnfn half_powr(float4 x, float4 y);
-float8 __ovld __cnfn half_powr(float8 x, float8 y);
-float16 __ovld __cnfn half_powr(float16 x, float16 y);
-
-/**
- * Compute reciprocal.
- */
-float __ovld __cnfn half_recip(float x);
-float2 __ovld __cnfn half_recip(float2 x);
-float3 __ovld __cnfn half_recip(float3 x);
-float4 __ovld __cnfn half_recip(float4 x);
-float8 __ovld __cnfn half_recip(float8 x);
-float16 __ovld __cnfn half_recip(float16 x);
-
-/**
- * Compute inverse square root.
- */
-float __ovld __cnfn half_rsqrt(float x);
-float2 __ovld __cnfn half_rsqrt(float2 x);
-float3 __ovld __cnfn half_rsqrt(float3 x);
-float4 __ovld __cnfn half_rsqrt(float4 x);
-float8 __ovld __cnfn half_rsqrt(float8 x);
-float16 __ovld __cnfn half_rsqrt(float16 x);
-
-/**
- * Compute sine. x must be in the range -2^16 ... +2^16.
- */
-float __ovld __cnfn half_sin(float x);
-float2 __ovld __cnfn half_sin(float2 x);
-float3 __ovld __cnfn half_sin(float3 x);
-float4 __ovld __cnfn half_sin(float4 x);
-float8 __ovld __cnfn half_sin(float8 x);
-float16 __ovld __cnfn half_sin(float16 x);
-
-/**
- * Compute square root.
- */
-float __ovld __cnfn half_sqrt(float x);
-float2 __ovld __cnfn half_sqrt(float2 x);
-float3 __ovld __cnfn half_sqrt(float3 x);
-float4 __ovld __cnfn half_sqrt(float4 x);
-float8 __ovld __cnfn half_sqrt(float8 x);
-float16 __ovld __cnfn half_sqrt(float16 x);
-
-/**
- * Compute tangent. x must be in the range -216 ... +216.
- */
-float __ovld __cnfn half_tan(float x);
-float2 __ovld __cnfn half_tan(float2 x);
-float3 __ovld __cnfn half_tan(float3 x);
-float4 __ovld __cnfn half_tan(float4 x);
-float8 __ovld __cnfn half_tan(float8 x);
-float16 __ovld __cnfn half_tan(float16 x);
-
-/**
- * Compute cosine over an implementation-defined range.
- * The maximum error is implementation-defined.
- */
-float __ovld __cnfn native_cos(float x);
-float2 __ovld __cnfn native_cos(float2 x);
-float3 __ovld __cnfn native_cos(float3 x);
-float4 __ovld __cnfn native_cos(float4 x);
-float8 __ovld __cnfn native_cos(float8 x);
-float16 __ovld __cnfn native_cos(float16 x);
-
-/**
- * Compute x / y over an implementation-defined range.
- * The maximum error is implementation-defined.
- */
-float __ovld __cnfn native_divide(float x, float y);
-float2 __ovld __cnfn native_divide(float2 x, float2 y);
-float3 __ovld __cnfn native_divide(float3 x, float3 y);
-float4 __ovld __cnfn native_divide(float4 x, float4 y);
-float8 __ovld __cnfn native_divide(float8 x, float8 y);
-float16 __ovld __cnfn native_divide(float16 x, float16 y);
-
-/**
- * Compute the base- e exponential of x over an
- * implementation-defined range. The maximum error is
- * implementation-defined.
- */
-float __ovld __cnfn native_exp(float x);
-float2 __ovld __cnfn native_exp(float2 x);
-float3 __ovld __cnfn native_exp(float3 x);
-float4 __ovld __cnfn native_exp(float4 x);
-float8 __ovld __cnfn native_exp(float8 x);
-float16 __ovld __cnfn native_exp(float16 x);
-
-/**
- * Compute the base- 2 exponential of x over an
- * implementation-defined range. The maximum error is
- * implementation-defined.
- */
-float __ovld __cnfn native_exp2(float x);
-float2 __ovld __cnfn native_exp2(float2 x);
-float3 __ovld __cnfn native_exp2(float3 x);
-float4 __ovld __cnfn native_exp2(float4 x);
-float8 __ovld __cnfn native_exp2(float8 x);
-float16 __ovld __cnfn native_exp2(float16 x);
-
-/**
- * Compute the base- 10 exponential of x over an
- * implementation-defined range. The maximum error is
- * implementation-defined.
- */
-float __ovld __cnfn native_exp10(float x);
-float2 __ovld __cnfn native_exp10(float2 x);
-float3 __ovld __cnfn native_exp10(float3 x);
-float4 __ovld __cnfn native_exp10(float4 x);
-float8 __ovld __cnfn native_exp10(float8 x);
-float16 __ovld __cnfn native_exp10(float16 x);
-
-/**
- * Compute natural logarithm over an implementationdefined
- * range. The maximum error is implementation
- * defined.
- */
-float __ovld __cnfn native_log(float x);
-float2 __ovld __cnfn native_log(float2 x);
-float3 __ovld __cnfn native_log(float3 x);
-float4 __ovld __cnfn native_log(float4 x);
-float8 __ovld __cnfn native_log(float8 x);
-float16 __ovld __cnfn native_log(float16 x);
-
-/**
- * Compute a base 2 logarithm over an implementationdefined
- * range. The maximum error is implementationdefined.
- */
-float __ovld __cnfn native_log2(float x);
-float2 __ovld __cnfn native_log2(float2 x);
-float3 __ovld __cnfn native_log2(float3 x);
-float4 __ovld __cnfn native_log2(float4 x);
-float8 __ovld __cnfn native_log2(float8 x);
-float16 __ovld __cnfn native_log2(float16 x);
-
-/**
- * Compute a base 10 logarithm over an implementationdefined
- * range. The maximum error is implementationdefined.
- */
-float __ovld __cnfn native_log10(float x);
-float2 __ovld __cnfn native_log10(float2 x);
-float3 __ovld __cnfn native_log10(float3 x);
-float4 __ovld __cnfn native_log10(float4 x);
-float8 __ovld __cnfn native_log10(float8 x);
-float16 __ovld __cnfn native_log10(float16 x);
-
-/**
- * Compute x to the power y, where x is >= 0. The range of
- * x and y are implementation-defined. The maximum error
- * is implementation-defined.
- */
-float __ovld __cnfn native_powr(float x, float y);
-float2 __ovld __cnfn native_powr(float2 x, float2 y);
-float3 __ovld __cnfn native_powr(float3 x, float3 y);
-float4 __ovld __cnfn native_powr(float4 x, float4 y);
-float8 __ovld __cnfn native_powr(float8 x, float8 y);
-float16 __ovld __cnfn native_powr(float16 x, float16 y);
-
-/**
- * Compute reciprocal over an implementation-defined
- * range. The maximum error is implementation-defined.
- */
-float __ovld __cnfn native_recip(float x);
-float2 __ovld __cnfn native_recip(float2 x);
-float3 __ovld __cnfn native_recip(float3 x);
-float4 __ovld __cnfn native_recip(float4 x);
-float8 __ovld __cnfn native_recip(float8 x);
-float16 __ovld __cnfn native_recip(float16 x);
-
-/**
- * Compute inverse square root over an implementationdefined
- * range. The maximum error is implementationdefined.
- */
-float __ovld __cnfn native_rsqrt(float x);
-float2 __ovld __cnfn native_rsqrt(float2 x);
-float3 __ovld __cnfn native_rsqrt(float3 x);
-float4 __ovld __cnfn native_rsqrt(float4 x);
-float8 __ovld __cnfn native_rsqrt(float8 x);
-float16 __ovld __cnfn native_rsqrt(float16 x);
-
-/**
- * Compute sine over an implementation-defined range.
- * The maximum error is implementation-defined.
- */
-float __ovld __cnfn native_sin(float x);
-float2 __ovld __cnfn native_sin(float2 x);
-float3 __ovld __cnfn native_sin(float3 x);
-float4 __ovld __cnfn native_sin(float4 x);
-float8 __ovld __cnfn native_sin(float8 x);
-float16 __ovld __cnfn native_sin(float16 x);
-
-/**
- * Compute square root over an implementation-defined
- * range. The maximum error is implementation-defined.
- */
-float __ovld __cnfn native_sqrt(float x);
-float2 __ovld __cnfn native_sqrt(float2 x);
-float3 __ovld __cnfn native_sqrt(float3 x);
-float4 __ovld __cnfn native_sqrt(float4 x);
-float8 __ovld __cnfn native_sqrt(float8 x);
-float16 __ovld __cnfn native_sqrt(float16 x);
-
-/**
- * Compute tangent over an implementation-defined range.
- * The maximum error is implementation-defined.
- */
-float __ovld __cnfn native_tan(float x);
-float2 __ovld __cnfn native_tan(float2 x);
-float3 __ovld __cnfn native_tan(float3 x);
-float4 __ovld __cnfn native_tan(float4 x);
-float8 __ovld __cnfn native_tan(float8 x);
-float16 __ovld __cnfn native_tan(float16 x);
-
-// OpenCL v1.1 s6.11.3, v1.2 s6.12.3, v2.0 s6.13.3 - Integer Functions
-
-/**
- * Returns | x |.
- */
-uchar __ovld __cnfn abs(char x);
-uchar __ovld __cnfn abs(uchar x);
-uchar2 __ovld __cnfn abs(char2 x);
-uchar2 __ovld __cnfn abs(uchar2 x);
-uchar3 __ovld __cnfn abs(char3 x);
-uchar3 __ovld __cnfn abs(uchar3 x);
-uchar4 __ovld __cnfn abs(char4 x);
-uchar4 __ovld __cnfn abs(uchar4 x);
-uchar8 __ovld __cnfn abs(char8 x);
-uchar8 __ovld __cnfn abs(uchar8 x);
-uchar16 __ovld __cnfn abs(char16 x);
-uchar16 __ovld __cnfn abs(uchar16 x);
-ushort __ovld __cnfn abs(short x);
-ushort __ovld __cnfn abs(ushort x);
-ushort2 __ovld __cnfn abs(short2 x);
-ushort2 __ovld __cnfn abs(ushort2 x);
-ushort3 __ovld __cnfn abs(short3 x);
-ushort3 __ovld __cnfn abs(ushort3 x);
-ushort4 __ovld __cnfn abs(short4 x);
-ushort4 __ovld __cnfn abs(ushort4 x);
-ushort8 __ovld __cnfn abs(short8 x);
-ushort8 __ovld __cnfn abs(ushort8 x);
-ushort16 __ovld __cnfn abs(short16 x);
-ushort16 __ovld __cnfn abs(ushort16 x);
-uint __ovld __cnfn abs(int x);
-uint __ovld __cnfn abs(uint x);
-uint2 __ovld __cnfn abs(int2 x);
-uint2 __ovld __cnfn abs(uint2 x);
-uint3 __ovld __cnfn abs(int3 x);
-uint3 __ovld __cnfn abs(uint3 x);
-uint4 __ovld __cnfn abs(int4 x);
-uint4 __ovld __cnfn abs(uint4 x);
-uint8 __ovld __cnfn abs(int8 x);
-uint8 __ovld __cnfn abs(uint8 x);
-uint16 __ovld __cnfn abs(int16 x);
-uint16 __ovld __cnfn abs(uint16 x);
-ulong __ovld __cnfn abs(long x);
-ulong __ovld __cnfn abs(ulong x);
-ulong2 __ovld __cnfn abs(long2 x);
-ulong2 __ovld __cnfn abs(ulong2 x);
-ulong3 __ovld __cnfn abs(long3 x);
-ulong3 __ovld __cnfn abs(ulong3 x);
-ulong4 __ovld __cnfn abs(long4 x);
-ulong4 __ovld __cnfn abs(ulong4 x);
-ulong8 __ovld __cnfn abs(long8 x);
-ulong8 __ovld __cnfn abs(ulong8 x);
-ulong16 __ovld __cnfn abs(long16 x);
-ulong16 __ovld __cnfn abs(ulong16 x);
-
-/**
- * Returns | x - y | without modulo overflow.
- */
-uchar __ovld __cnfn abs_diff(char x, char y);
-uchar __ovld __cnfn abs_diff(uchar x, uchar y);
-uchar2 __ovld __cnfn abs_diff(char2 x, char2 y);
-uchar2 __ovld __cnfn abs_diff(uchar2 x, uchar2 y);
-uchar3 __ovld __cnfn abs_diff(char3 x, char3 y);
-uchar3 __ovld __cnfn abs_diff(uchar3 x, uchar3 y);
-uchar4 __ovld __cnfn abs_diff(char4 x, char4 y);
-uchar4 __ovld __cnfn abs_diff(uchar4 x, uchar4 y);
-uchar8 __ovld __cnfn abs_diff(char8 x, char8 y);
-uchar8 __ovld __cnfn abs_diff(uchar8 x, uchar8 y);
-uchar16 __ovld __cnfn abs_diff(char16 x, char16 y);
-uchar16 __ovld __cnfn abs_diff(uchar16 x, uchar16 y);
-ushort __ovld __cnfn abs_diff(short x, short y);
-ushort __ovld __cnfn abs_diff(ushort x, ushort y);
-ushort2 __ovld __cnfn abs_diff(short2 x, short2 y);
-ushort2 __ovld __cnfn abs_diff(ushort2 x, ushort2 y);
-ushort3 __ovld __cnfn abs_diff(short3 x, short3 y);
-ushort3 __ovld __cnfn abs_diff(ushort3 x, ushort3 y);
-ushort4 __ovld __cnfn abs_diff(short4 x, short4 y);
-ushort4 __ovld __cnfn abs_diff(ushort4 x, ushort4 y);
-ushort8 __ovld __cnfn abs_diff(short8 x, short8 y);
-ushort8 __ovld __cnfn abs_diff(ushort8 x, ushort8 y);
-ushort16 __ovld __cnfn abs_diff(short16 x, short16 y);
-ushort16 __ovld __cnfn abs_diff(ushort16 x, ushort16 y);
-uint __ovld __cnfn abs_diff(int x, int y);
-uint __ovld __cnfn abs_diff(uint x, uint y);
-uint2 __ovld __cnfn abs_diff(int2 x, int2 y);
-uint2 __ovld __cnfn abs_diff(uint2 x, uint2 y);
-uint3 __ovld __cnfn abs_diff(int3 x, int3 y);
-uint3 __ovld __cnfn abs_diff(uint3 x, uint3 y);
-uint4 __ovld __cnfn abs_diff(int4 x, int4 y);
-uint4 __ovld __cnfn abs_diff(uint4 x, uint4 y);
-uint8 __ovld __cnfn abs_diff(int8 x, int8 y);
-uint8 __ovld __cnfn abs_diff(uint8 x, uint8 y);
-uint16 __ovld __cnfn abs_diff(int16 x, int16 y);
-uint16 __ovld __cnfn abs_diff(uint16 x, uint16 y);
-ulong __ovld __cnfn abs_diff(long x, long y);
-ulong __ovld __cnfn abs_diff(ulong x, ulong y);
-ulong2 __ovld __cnfn abs_diff(long2 x, long2 y);
-ulong2 __ovld __cnfn abs_diff(ulong2 x, ulong2 y);
-ulong3 __ovld __cnfn abs_diff(long3 x, long3 y);
-ulong3 __ovld __cnfn abs_diff(ulong3 x, ulong3 y);
-ulong4 __ovld __cnfn abs_diff(long4 x, long4 y);
-ulong4 __ovld __cnfn abs_diff(ulong4 x, ulong4 y);
-ulong8 __ovld __cnfn abs_diff(long8 x, long8 y);
-ulong8 __ovld __cnfn abs_diff(ulong8 x, ulong8 y);
-ulong16 __ovld __cnfn abs_diff(long16 x, long16 y);
-ulong16 __ovld __cnfn abs_diff(ulong16 x, ulong16 y);
-
-/**
- * Returns x + y and saturates the result.
- */
-char __ovld __cnfn add_sat(char x, char y);
-uchar __ovld __cnfn add_sat(uchar x, uchar y);
-char2 __ovld __cnfn add_sat(char2 x, char2 y);
-uchar2 __ovld __cnfn add_sat(uchar2 x, uchar2 y);
-char3 __ovld __cnfn add_sat(char3 x, char3 y);
-uchar3 __ovld __cnfn add_sat(uchar3 x, uchar3 y);
-char4 __ovld __cnfn add_sat(char4 x, char4 y);
-uchar4 __ovld __cnfn add_sat(uchar4 x, uchar4 y);
-char8 __ovld __cnfn add_sat(char8 x, char8 y);
-uchar8 __ovld __cnfn add_sat(uchar8 x, uchar8 y);
-char16 __ovld __cnfn add_sat(char16 x, char16 y);
-uchar16 __ovld __cnfn add_sat(uchar16 x, uchar16 y);
-short __ovld __cnfn add_sat(short x, short y);
-ushort __ovld __cnfn add_sat(ushort x, ushort y);
-short2 __ovld __cnfn add_sat(short2 x, short2 y);
-ushort2 __ovld __cnfn add_sat(ushort2 x, ushort2 y);
-short3 __ovld __cnfn add_sat(short3 x, short3 y);
-ushort3 __ovld __cnfn add_sat(ushort3 x, ushort3 y);
-short4 __ovld __cnfn add_sat(short4 x, short4 y);
-ushort4 __ovld __cnfn add_sat(ushort4 x, ushort4 y);
-short8 __ovld __cnfn add_sat(short8 x, short8 y);
-ushort8 __ovld __cnfn add_sat(ushort8 x, ushort8 y);
-short16 __ovld __cnfn add_sat(short16 x, short16 y);
-ushort16 __ovld __cnfn add_sat(ushort16 x, ushort16 y);
-int __ovld __cnfn add_sat(int x, int y);
-uint __ovld __cnfn add_sat(uint x, uint y);
-int2 __ovld __cnfn add_sat(int2 x, int2 y);
-uint2 __ovld __cnfn add_sat(uint2 x, uint2 y);
-int3 __ovld __cnfn add_sat(int3 x, int3 y);
-uint3 __ovld __cnfn add_sat(uint3 x, uint3 y);
-int4 __ovld __cnfn add_sat(int4 x, int4 y);
-uint4 __ovld __cnfn add_sat(uint4 x, uint4 y);
-int8 __ovld __cnfn add_sat(int8 x, int8 y);
-uint8 __ovld __cnfn add_sat(uint8 x, uint8 y);
-int16 __ovld __cnfn add_sat(int16 x, int16 y);
-uint16 __ovld __cnfn add_sat(uint16 x, uint16 y);
-long __ovld __cnfn add_sat(long x, long y);
-ulong __ovld __cnfn add_sat(ulong x, ulong y);
-long2 __ovld __cnfn add_sat(long2 x, long2 y);
-ulong2 __ovld __cnfn add_sat(ulong2 x, ulong2 y);
-long3 __ovld __cnfn add_sat(long3 x, long3 y);
-ulong3 __ovld __cnfn add_sat(ulong3 x, ulong3 y);
-long4 __ovld __cnfn add_sat(long4 x, long4 y);
-ulong4 __ovld __cnfn add_sat(ulong4 x, ulong4 y);
-long8 __ovld __cnfn add_sat(long8 x, long8 y);
-ulong8 __ovld __cnfn add_sat(ulong8 x, ulong8 y);
-long16 __ovld __cnfn add_sat(long16 x, long16 y);
-ulong16 __ovld __cnfn add_sat(ulong16 x, ulong16 y);
-
-/**
- * Returns (x + y) >> 1. The intermediate sum does
- * not modulo overflow.
- */
-char __ovld __cnfn hadd(char x, char y);
-uchar __ovld __cnfn hadd(uchar x, uchar y);
-char2 __ovld __cnfn hadd(char2 x, char2 y);
-uchar2 __ovld __cnfn hadd(uchar2 x, uchar2 y);
-char3 __ovld __cnfn hadd(char3 x, char3 y);
-uchar3 __ovld __cnfn hadd(uchar3 x, uchar3 y);
-char4 __ovld __cnfn hadd(char4 x, char4 y);
-uchar4 __ovld __cnfn hadd(uchar4 x, uchar4 y);
-char8 __ovld __cnfn hadd(char8 x, char8 y);
-uchar8 __ovld __cnfn hadd(uchar8 x, uchar8 y);
-char16 __ovld __cnfn hadd(char16 x, char16 y);
-uchar16 __ovld __cnfn hadd(uchar16 x, uchar16 y);
-short __ovld __cnfn hadd(short x, short y);
-ushort __ovld __cnfn hadd(ushort x, ushort y);
-short2 __ovld __cnfn hadd(short2 x, short2 y);
-ushort2 __ovld __cnfn hadd(ushort2 x, ushort2 y);
-short3 __ovld __cnfn hadd(short3 x, short3 y);
-ushort3 __ovld __cnfn hadd(ushort3 x, ushort3 y);
-short4 __ovld __cnfn hadd(short4 x, short4 y);
-ushort4 __ovld __cnfn hadd(ushort4 x, ushort4 y);
-short8 __ovld __cnfn hadd(short8 x, short8 y);
-ushort8 __ovld __cnfn hadd(ushort8 x, ushort8 y);
-short16 __ovld __cnfn hadd(short16 x, short16 y);
-ushort16 __ovld __cnfn hadd(ushort16 x, ushort16 y);
-int __ovld __cnfn hadd(int x, int y);
-uint __ovld __cnfn hadd(uint x, uint y);
-int2 __ovld __cnfn hadd(int2 x, int2 y);
-uint2 __ovld __cnfn hadd(uint2 x, uint2 y);
-int3 __ovld __cnfn hadd(int3 x, int3 y);
-uint3 __ovld __cnfn hadd(uint3 x, uint3 y);
-int4 __ovld __cnfn hadd(int4 x, int4 y);
-uint4 __ovld __cnfn hadd(uint4 x, uint4 y);
-int8 __ovld __cnfn hadd(int8 x, int8 y);
-uint8 __ovld __cnfn hadd(uint8 x, uint8 y);
-int16 __ovld __cnfn hadd(int16 x, int16 y);
-uint16 __ovld __cnfn hadd(uint16 x, uint16 y);
-long __ovld __cnfn hadd(long x, long y);
-ulong __ovld __cnfn hadd(ulong x, ulong y);
-long2 __ovld __cnfn hadd(long2 x, long2 y);
-ulong2 __ovld __cnfn hadd(ulong2 x, ulong2 y);
-long3 __ovld __cnfn hadd(long3 x, long3 y);
-ulong3 __ovld __cnfn hadd(ulong3 x, ulong3 y);
-long4 __ovld __cnfn hadd(long4 x, long4 y);
-ulong4 __ovld __cnfn hadd(ulong4 x, ulong4 y);
-long8 __ovld __cnfn hadd(long8 x, long8 y);
-ulong8 __ovld __cnfn hadd(ulong8 x, ulong8 y);
-long16 __ovld __cnfn hadd(long16 x, long16 y);
-ulong16 __ovld __cnfn hadd(ulong16 x, ulong16 y);
-
-/**
- * Returns (x + y + 1) >> 1. The intermediate sum
- * does not modulo overflow.
- */
-char __ovld __cnfn rhadd(char x, char y);
-uchar __ovld __cnfn rhadd(uchar x, uchar y);
-char2 __ovld __cnfn rhadd(char2 x, char2 y);
-uchar2 __ovld __cnfn rhadd(uchar2 x, uchar2 y);
-char3 __ovld __cnfn rhadd(char3 x, char3 y);
-uchar3 __ovld __cnfn rhadd(uchar3 x, uchar3 y);
-char4 __ovld __cnfn rhadd(char4 x, char4 y);
-uchar4 __ovld __cnfn rhadd(uchar4 x, uchar4 y);
-char8 __ovld __cnfn rhadd(char8 x, char8 y);
-uchar8 __ovld __cnfn rhadd(uchar8 x, uchar8 y);
-char16 __ovld __cnfn rhadd(char16 x, char16 y);
-uchar16 __ovld __cnfn rhadd(uchar16 x, uchar16 y);
-short __ovld __cnfn rhadd(short x, short y);
-ushort __ovld __cnfn rhadd(ushort x, ushort y);
-short2 __ovld __cnfn rhadd(short2 x, short2 y);
-ushort2 __ovld __cnfn rhadd(ushort2 x, ushort2 y);
-short3 __ovld __cnfn rhadd(short3 x, short3 y);
-ushort3 __ovld __cnfn rhadd(ushort3 x, ushort3 y);
-short4 __ovld __cnfn rhadd(short4 x, short4 y);
-ushort4 __ovld __cnfn rhadd(ushort4 x, ushort4 y);
-short8 __ovld __cnfn rhadd(short8 x, short8 y);
-ushort8 __ovld __cnfn rhadd(ushort8 x, ushort8 y);
-short16 __ovld __cnfn rhadd(short16 x, short16 y);
-ushort16 __ovld __cnfn rhadd(ushort16 x, ushort16 y);
-int __ovld __cnfn rhadd(int x, int y);
-uint __ovld __cnfn rhadd(uint x, uint y);
-int2 __ovld __cnfn rhadd(int2 x, int2 y);
-uint2 __ovld __cnfn rhadd(uint2 x, uint2 y);
-int3 __ovld __cnfn rhadd(int3 x, int3 y);
-uint3 __ovld __cnfn rhadd(uint3 x, uint3 y);
-int4 __ovld __cnfn rhadd(int4 x, int4 y);
-uint4 __ovld __cnfn rhadd(uint4 x, uint4 y);
-int8 __ovld __cnfn rhadd(int8 x, int8 y);
-uint8 __ovld __cnfn rhadd(uint8 x, uint8 y);
-int16 __ovld __cnfn rhadd(int16 x, int16 y);
-uint16 __ovld __cnfn rhadd(uint16 x, uint16 y);
-long __ovld __cnfn rhadd(long x, long y);
-ulong __ovld __cnfn rhadd(ulong x, ulong y);
-long2 __ovld __cnfn rhadd(long2 x, long2 y);
-ulong2 __ovld __cnfn rhadd(ulong2 x, ulong2 y);
-long3 __ovld __cnfn rhadd(long3 x, long3 y);
-ulong3 __ovld __cnfn rhadd(ulong3 x, ulong3 y);
-long4 __ovld __cnfn rhadd(long4 x, long4 y);
-ulong4 __ovld __cnfn rhadd(ulong4 x, ulong4 y);
-long8 __ovld __cnfn rhadd(long8 x, long8 y);
-ulong8 __ovld __cnfn rhadd(ulong8 x, ulong8 y);
-long16 __ovld __cnfn rhadd(long16 x, long16 y);
-ulong16 __ovld __cnfn rhadd(ulong16 x, ulong16 y);
-
-/**
- * Returns min(max(x, minval), maxval).
- * Results are undefined if minval > maxval.
- */
-char __ovld __cnfn clamp(char x, char minval, char maxval);
-uchar __ovld __cnfn clamp(uchar x, uchar minval, uchar maxval);
-char2 __ovld __cnfn clamp(char2 x, char2 minval, char2 maxval);
-uchar2 __ovld __cnfn clamp(uchar2 x, uchar2 minval, uchar2 maxval);
-char3 __ovld __cnfn clamp(char3 x, char3 minval, char3 maxval);
-uchar3 __ovld __cnfn clamp(uchar3 x, uchar3 minval, uchar3 maxval);
-char4 __ovld __cnfn clamp(char4 x, char4 minval, char4 maxval);
-uchar4 __ovld __cnfn clamp(uchar4 x, uchar4 minval, uchar4 maxval);
-char8 __ovld __cnfn clamp(char8 x, char8 minval, char8 maxval);
-uchar8 __ovld __cnfn clamp(uchar8 x, uchar8 minval, uchar8 maxval);
-char16 __ovld __cnfn clamp(char16 x, char16 minval, char16 maxval);
-uchar16 __ovld __cnfn clamp(uchar16 x, uchar16 minval, uchar16 maxval);
-short __ovld __cnfn clamp(short x, short minval, short maxval);
-ushort __ovld __cnfn clamp(ushort x, ushort minval, ushort maxval);
-short2 __ovld __cnfn clamp(short2 x, short2 minval, short2 maxval);
-ushort2 __ovld __cnfn clamp(ushort2 x, ushort2 minval, ushort2 maxval);
-short3 __ovld __cnfn clamp(short3 x, short3 minval, short3 maxval);
-ushort3 __ovld __cnfn clamp(ushort3 x, ushort3 minval, ushort3 maxval);
-short4 __ovld __cnfn clamp(short4 x, short4 minval, short4 maxval);
-ushort4 __ovld __cnfn clamp(ushort4 x, ushort4 minval, ushort4 maxval);
-short8 __ovld __cnfn clamp(short8 x, short8 minval, short8 maxval);
-ushort8 __ovld __cnfn clamp(ushort8 x, ushort8 minval, ushort8 maxval);
-short16 __ovld __cnfn clamp(short16 x, short16 minval, short16 maxval);
-ushort16 __ovld __cnfn clamp(ushort16 x, ushort16 minval, ushort16 maxval);
-int __ovld __cnfn clamp(int x, int minval, int maxval);
-uint __ovld __cnfn clamp(uint x, uint minval, uint maxval);
-int2 __ovld __cnfn clamp(int2 x, int2 minval, int2 maxval);
-uint2 __ovld __cnfn clamp(uint2 x, uint2 minval, uint2 maxval);
-int3 __ovld __cnfn clamp(int3 x, int3 minval, int3 maxval);
-uint3 __ovld __cnfn clamp(uint3 x, uint3 minval, uint3 maxval);
-int4 __ovld __cnfn clamp(int4 x, int4 minval, int4 maxval);
-uint4 __ovld __cnfn clamp(uint4 x, uint4 minval, uint4 maxval);
-int8 __ovld __cnfn clamp(int8 x, int8 minval, int8 maxval);
-uint8 __ovld __cnfn clamp(uint8 x, uint8 minval, uint8 maxval);
-int16 __ovld __cnfn clamp(int16 x, int16 minval, int16 maxval);
-uint16 __ovld __cnfn clamp(uint16 x, uint16 minval, uint16 maxval);
-long __ovld __cnfn clamp(long x, long minval, long maxval);
-ulong __ovld __cnfn clamp(ulong x, ulong minval, ulong maxval);
-long2 __ovld __cnfn clamp(long2 x, long2 minval, long2 maxval);
-ulong2 __ovld __cnfn clamp(ulong2 x, ulong2 minval, ulong2 maxval);
-long3 __ovld __cnfn clamp(long3 x, long3 minval, long3 maxval);
-ulong3 __ovld __cnfn clamp(ulong3 x, ulong3 minval, ulong3 maxval);
-long4 __ovld __cnfn clamp(long4 x, long4 minval, long4 maxval);
-ulong4 __ovld __cnfn clamp(ulong4 x, ulong4 minval, ulong4 maxval);
-long8 __ovld __cnfn clamp(long8 x, long8 minval, long8 maxval);
-ulong8 __ovld __cnfn clamp(ulong8 x, ulong8 minval, ulong8 maxval);
-long16 __ovld __cnfn clamp(long16 x, long16 minval, long16 maxval);
-ulong16 __ovld __cnfn clamp(ulong16 x, ulong16 minval, ulong16 maxval);
-char2 __ovld __cnfn clamp(char2 x, char minval, char maxval);
-uchar2 __ovld __cnfn clamp(uchar2 x, uchar minval, uchar maxval);
-char3 __ovld __cnfn clamp(char3 x, char minval, char maxval);
-uchar3 __ovld __cnfn clamp(uchar3 x, uchar minval, uchar maxval);
-char4 __ovld __cnfn clamp(char4 x, char minval, char maxval);
-uchar4 __ovld __cnfn clamp(uchar4 x, uchar minval, uchar maxval);
-char8 __ovld __cnfn clamp(char8 x, char minval, char maxval);
-uchar8 __ovld __cnfn clamp(uchar8 x, uchar minval, uchar maxval);
-char16 __ovld __cnfn clamp(char16 x, char minval, char maxval);
-uchar16 __ovld __cnfn clamp(uchar16 x, uchar minval, uchar maxval);
-short2 __ovld __cnfn clamp(short2 x, short minval, short maxval);
-ushort2 __ovld __cnfn clamp(ushort2 x, ushort minval, ushort maxval);
-short3 __ovld __cnfn clamp(short3 x, short minval, short maxval);
-ushort3 __ovld __cnfn clamp(ushort3 x, ushort minval, ushort maxval);
-short4 __ovld __cnfn clamp(short4 x, short minval, short maxval);
-ushort4 __ovld __cnfn clamp(ushort4 x, ushort minval, ushort maxval);
-short8 __ovld __cnfn clamp(short8 x, short minval, short maxval);
-ushort8 __ovld __cnfn clamp(ushort8 x, ushort minval, ushort maxval);
-short16 __ovld __cnfn clamp(short16 x, short minval, short maxval);
-ushort16 __ovld __cnfn clamp(ushort16 x, ushort minval, ushort maxval);
-int2 __ovld __cnfn clamp(int2 x, int minval, int maxval);
-uint2 __ovld __cnfn clamp(uint2 x, uint minval, uint maxval);
-int3 __ovld __cnfn clamp(int3 x, int minval, int maxval);
-uint3 __ovld __cnfn clamp(uint3 x, uint minval, uint maxval);
-int4 __ovld __cnfn clamp(int4 x, int minval, int maxval);
-uint4 __ovld __cnfn clamp(uint4 x, uint minval, uint maxval);
-int8 __ovld __cnfn clamp(int8 x, int minval, int maxval);
-uint8 __ovld __cnfn clamp(uint8 x, uint minval, uint maxval);
-int16 __ovld __cnfn clamp(int16 x, int minval, int maxval);
-uint16 __ovld __cnfn clamp(uint16 x, uint minval, uint maxval);
-long2 __ovld __cnfn clamp(long2 x, long minval, long maxval);
-ulong2 __ovld __cnfn clamp(ulong2 x, ulong minval, ulong maxval);
-long3 __ovld __cnfn clamp(long3 x, long minval, long maxval);
-ulong3 __ovld __cnfn clamp(ulong3 x, ulong minval, ulong maxval);
-long4 __ovld __cnfn clamp(long4 x, long minval, long maxval);
-ulong4 __ovld __cnfn clamp(ulong4 x, ulong minval, ulong maxval);
-long8 __ovld __cnfn clamp(long8 x, long minval, long maxval);
-ulong8 __ovld __cnfn clamp(ulong8 x, ulong minval, ulong maxval);
-long16 __ovld __cnfn clamp(long16 x, long minval, long maxval);
-ulong16 __ovld __cnfn clamp(ulong16 x, ulong minval, ulong maxval);
-
-/**
- * Returns the number of leading 0-bits in x, starting
- * at the most significant bit position.
- */
-char __ovld __cnfn clz(char x);
-uchar __ovld __cnfn clz(uchar x);
-char2 __ovld __cnfn clz(char2 x);
-uchar2 __ovld __cnfn clz(uchar2 x);
-char3 __ovld __cnfn clz(char3 x);
-uchar3 __ovld __cnfn clz(uchar3 x);
-char4 __ovld __cnfn clz(char4 x);
-uchar4 __ovld __cnfn clz(uchar4 x);
-char8 __ovld __cnfn clz(char8 x);
-uchar8 __ovld __cnfn clz(uchar8 x);
-char16 __ovld __cnfn clz(char16 x);
-uchar16 __ovld __cnfn clz(uchar16 x);
-short __ovld __cnfn clz(short x);
-ushort __ovld __cnfn clz(ushort x);
-short2 __ovld __cnfn clz(short2 x);
-ushort2 __ovld __cnfn clz(ushort2 x);
-short3 __ovld __cnfn clz(short3 x);
-ushort3 __ovld __cnfn clz(ushort3 x);
-short4 __ovld __cnfn clz(short4 x);
-ushort4 __ovld __cnfn clz(ushort4 x);
-short8 __ovld __cnfn clz(short8 x);
-ushort8 __ovld __cnfn clz(ushort8 x);
-short16 __ovld __cnfn clz(short16 x);
-ushort16 __ovld __cnfn clz(ushort16 x);
-int __ovld __cnfn clz(int x);
-uint __ovld __cnfn clz(uint x);
-int2 __ovld __cnfn clz(int2 x);
-uint2 __ovld __cnfn clz(uint2 x);
-int3 __ovld __cnfn clz(int3 x);
-uint3 __ovld __cnfn clz(uint3 x);
-int4 __ovld __cnfn clz(int4 x);
-uint4 __ovld __cnfn clz(uint4 x);
-int8 __ovld __cnfn clz(int8 x);
-uint8 __ovld __cnfn clz(uint8 x);
-int16 __ovld __cnfn clz(int16 x);
-uint16 __ovld __cnfn clz(uint16 x);
-long __ovld __cnfn clz(long x);
-ulong __ovld __cnfn clz(ulong x);
-long2 __ovld __cnfn clz(long2 x);
-ulong2 __ovld __cnfn clz(ulong2 x);
-long3 __ovld __cnfn clz(long3 x);
-ulong3 __ovld __cnfn clz(ulong3 x);
-long4 __ovld __cnfn clz(long4 x);
-ulong4 __ovld __cnfn clz(ulong4 x);
-long8 __ovld __cnfn clz(long8 x);
-ulong8 __ovld __cnfn clz(ulong8 x);
-long16 __ovld __cnfn clz(long16 x);
-ulong16 __ovld __cnfn clz(ulong16 x);
-
-/**
- * Returns the count of trailing 0-bits in x. If x is 0,
- * returns the size in bits of the type of x or
- * component type of x, if x is a vector.
- */
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-char __ovld ctz(char x);
-uchar __ovld ctz(uchar x);
-char2 __ovld ctz(char2 x);
-uchar2 __ovld ctz(uchar2 x);
-char3 __ovld ctz(char3 x);
-uchar3 __ovld ctz(uchar3 x);
-char4 __ovld ctz(char4 x);
-uchar4 __ovld ctz(uchar4 x);
-char8 __ovld ctz(char8 x);
-uchar8 __ovld ctz(uchar8 x);
-char16 __ovld ctz(char16 x);
-uchar16 __ovld ctz(uchar16 x);
-short __ovld ctz(short x);
-ushort __ovld ctz(ushort x);
-short2 __ovld ctz(short2 x);
-ushort2 __ovld ctz(ushort2 x);
-short3 __ovld ctz(short3 x);
-ushort3 __ovld ctz(ushort3 x);
-short4 __ovld ctz(short4 x);
-ushort4 __ovld ctz(ushort4 x);
-short8 __ovld ctz(short8 x);
-ushort8 __ovld ctz(ushort8 x);
-short16 __ovld ctz(short16 x);
-ushort16 __ovld ctz(ushort16 x);
-int __ovld ctz(int x);
-uint __ovld ctz(uint x);
-int2 __ovld ctz(int2 x);
-uint2 __ovld ctz(uint2 x);
-int3 __ovld ctz(int3 x);
-uint3 __ovld ctz(uint3 x);
-int4 __ovld ctz(int4 x);
-uint4 __ovld ctz(uint4 x);
-int8 __ovld ctz(int8 x);
-uint8 __ovld ctz(uint8 x);
-int16 __ovld ctz(int16 x);
-uint16 __ovld ctz(uint16 x);
-long __ovld ctz(long x);
-ulong __ovld ctz(ulong x);
-long2 __ovld ctz(long2 x);
-ulong2 __ovld ctz(ulong2 x);
-long3 __ovld ctz(long3 x);
-ulong3 __ovld ctz(ulong3 x);
-long4 __ovld ctz(long4 x);
-ulong4 __ovld ctz(ulong4 x);
-long8 __ovld ctz(long8 x);
-ulong8 __ovld ctz(ulong8 x);
-long16 __ovld ctz(long16 x);
-ulong16 __ovld ctz(ulong16 x);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Returns mul_hi(a, b) + c.
- */
-char __ovld __cnfn mad_hi(char a, char b, char c);
-uchar __ovld __cnfn mad_hi(uchar a, uchar b, uchar c);
-char2 __ovld __cnfn mad_hi(char2 a, char2 b, char2 c);
-uchar2 __ovld __cnfn mad_hi(uchar2 a, uchar2 b, uchar2 c);
-char3 __ovld __cnfn mad_hi(char3 a, char3 b, char3 c);
-uchar3 __ovld __cnfn mad_hi(uchar3 a, uchar3 b, uchar3 c);
-char4 __ovld __cnfn mad_hi(char4 a, char4 b, char4 c);
-uchar4 __ovld __cnfn mad_hi(uchar4 a, uchar4 b, uchar4 c);
-char8 __ovld __cnfn mad_hi(char8 a, char8 b, char8 c);
-uchar8 __ovld __cnfn mad_hi(uchar8 a, uchar8 b, uchar8 c);
-char16 __ovld __cnfn mad_hi(char16 a, char16 b, char16 c);
-uchar16 __ovld __cnfn mad_hi(uchar16 a, uchar16 b, uchar16 c);
-short __ovld __cnfn mad_hi(short a, short b, short c);
-ushort __ovld __cnfn mad_hi(ushort a, ushort b, ushort c);
-short2 __ovld __cnfn mad_hi(short2 a, short2 b, short2 c);
-ushort2 __ovld __cnfn mad_hi(ushort2 a, ushort2 b, ushort2 c);
-short3 __ovld __cnfn mad_hi(short3 a, short3 b, short3 c);
-ushort3 __ovld __cnfn mad_hi(ushort3 a, ushort3 b, ushort3 c);
-short4 __ovld __cnfn mad_hi(short4 a, short4 b, short4 c);
-ushort4 __ovld __cnfn mad_hi(ushort4 a, ushort4 b, ushort4 c);
-short8 __ovld __cnfn mad_hi(short8 a, short8 b, short8 c);
-ushort8 __ovld __cnfn mad_hi(ushort8 a, ushort8 b, ushort8 c);
-short16 __ovld __cnfn mad_hi(short16 a, short16 b, short16 c);
-ushort16 __ovld __cnfn mad_hi(ushort16 a, ushort16 b, ushort16 c);
-int __ovld __cnfn mad_hi(int a, int b, int c);
-uint __ovld __cnfn mad_hi(uint a, uint b, uint c);
-int2 __ovld __cnfn mad_hi(int2 a, int2 b, int2 c);
-uint2 __ovld __cnfn mad_hi(uint2 a, uint2 b, uint2 c);
-int3 __ovld __cnfn mad_hi(int3 a, int3 b, int3 c);
-uint3 __ovld __cnfn mad_hi(uint3 a, uint3 b, uint3 c);
-int4 __ovld __cnfn mad_hi(int4 a, int4 b, int4 c);
-uint4 __ovld __cnfn mad_hi(uint4 a, uint4 b, uint4 c);
-int8 __ovld __cnfn mad_hi(int8 a, int8 b, int8 c);
-uint8 __ovld __cnfn mad_hi(uint8 a, uint8 b, uint8 c);
-int16 __ovld __cnfn mad_hi(int16 a, int16 b, int16 c);
-uint16 __ovld __cnfn mad_hi(uint16 a, uint16 b, uint16 c);
-long __ovld __cnfn mad_hi(long a, long b, long c);
-ulong __ovld __cnfn mad_hi(ulong a, ulong b, ulong c);
-long2 __ovld __cnfn mad_hi(long2 a, long2 b, long2 c);
-ulong2 __ovld __cnfn mad_hi(ulong2 a, ulong2 b, ulong2 c);
-long3 __ovld __cnfn mad_hi(long3 a, long3 b, long3 c);
-ulong3 __ovld __cnfn mad_hi(ulong3 a, ulong3 b, ulong3 c);
-long4 __ovld __cnfn mad_hi(long4 a, long4 b, long4 c);
-ulong4 __ovld __cnfn mad_hi(ulong4 a, ulong4 b, ulong4 c);
-long8 __ovld __cnfn mad_hi(long8 a, long8 b, long8 c);
-ulong8 __ovld __cnfn mad_hi(ulong8 a, ulong8 b, ulong8 c);
-long16 __ovld __cnfn mad_hi(long16 a, long16 b, long16 c);
-ulong16 __ovld __cnfn mad_hi(ulong16 a, ulong16 b, ulong16 c);
-
-/**
- * Returns a * b + c and saturates the result.
- */
-char __ovld __cnfn mad_sat(char a, char b, char c);
-uchar __ovld __cnfn mad_sat(uchar a, uchar b, uchar c);
-char2 __ovld __cnfn mad_sat(char2 a, char2 b, char2 c);
-uchar2 __ovld __cnfn mad_sat(uchar2 a, uchar2 b, uchar2 c);
-char3 __ovld __cnfn mad_sat(char3 a, char3 b, char3 c);
-uchar3 __ovld __cnfn mad_sat(uchar3 a, uchar3 b, uchar3 c);
-char4 __ovld __cnfn mad_sat(char4 a, char4 b, char4 c);
-uchar4 __ovld __cnfn mad_sat(uchar4 a, uchar4 b, uchar4 c);
-char8 __ovld __cnfn mad_sat(char8 a, char8 b, char8 c);
-uchar8 __ovld __cnfn mad_sat(uchar8 a, uchar8 b, uchar8 c);
-char16 __ovld __cnfn mad_sat(char16 a, char16 b, char16 c);
-uchar16 __ovld __cnfn mad_sat(uchar16 a, uchar16 b, uchar16 c);
-short __ovld __cnfn mad_sat(short a, short b, short c);
-ushort __ovld __cnfn mad_sat(ushort a, ushort b, ushort c);
-short2 __ovld __cnfn mad_sat(short2 a, short2 b, short2 c);
-ushort2 __ovld __cnfn mad_sat(ushort2 a, ushort2 b, ushort2 c);
-short3 __ovld __cnfn mad_sat(short3 a, short3 b, short3 c);
-ushort3 __ovld __cnfn mad_sat(ushort3 a, ushort3 b, ushort3 c);
-short4 __ovld __cnfn mad_sat(short4 a, short4 b, short4 c);
-ushort4 __ovld __cnfn mad_sat(ushort4 a, ushort4 b, ushort4 c);
-short8 __ovld __cnfn mad_sat(short8 a, short8 b, short8 c);
-ushort8 __ovld __cnfn mad_sat(ushort8 a, ushort8 b, ushort8 c);
-short16 __ovld __cnfn mad_sat(short16 a, short16 b, short16 c);
-ushort16 __ovld __cnfn mad_sat(ushort16 a, ushort16 b, ushort16 c);
-int __ovld __cnfn mad_sat(int a, int b, int c);
-uint __ovld __cnfn mad_sat(uint a, uint b, uint c);
-int2 __ovld __cnfn mad_sat(int2 a, int2 b, int2 c);
-uint2 __ovld __cnfn mad_sat(uint2 a, uint2 b, uint2 c);
-int3 __ovld __cnfn mad_sat(int3 a, int3 b, int3 c);
-uint3 __ovld __cnfn mad_sat(uint3 a, uint3 b, uint3 c);
-int4 __ovld __cnfn mad_sat(int4 a, int4 b, int4 c);
-uint4 __ovld __cnfn mad_sat(uint4 a, uint4 b, uint4 c);
-int8 __ovld __cnfn mad_sat(int8 a, int8 b, int8 c);
-uint8 __ovld __cnfn mad_sat(uint8 a, uint8 b, uint8 c);
-int16 __ovld __cnfn mad_sat(int16 a, int16 b, int16 c);
-uint16 __ovld __cnfn mad_sat(uint16 a, uint16 b, uint16 c);
-long __ovld __cnfn mad_sat(long a, long b, long c);
-ulong __ovld __cnfn mad_sat(ulong a, ulong b, ulong c);
-long2 __ovld __cnfn mad_sat(long2 a, long2 b, long2 c);
-ulong2 __ovld __cnfn mad_sat(ulong2 a, ulong2 b, ulong2 c);
-long3 __ovld __cnfn mad_sat(long3 a, long3 b, long3 c);
-ulong3 __ovld __cnfn mad_sat(ulong3 a, ulong3 b, ulong3 c);
-long4 __ovld __cnfn mad_sat(long4 a, long4 b, long4 c);
-ulong4 __ovld __cnfn mad_sat(ulong4 a, ulong4 b, ulong4 c);
-long8 __ovld __cnfn mad_sat(long8 a, long8 b, long8 c);
-ulong8 __ovld __cnfn mad_sat(ulong8 a, ulong8 b, ulong8 c);
-long16 __ovld __cnfn mad_sat(long16 a, long16 b, long16 c);
-ulong16 __ovld __cnfn mad_sat(ulong16 a, ulong16 b, ulong16 c);
-
-/**
- * Returns y if x < y, otherwise it returns x.
- */
-char __ovld __cnfn max(char x, char y);
-uchar __ovld __cnfn max(uchar x, uchar y);
-char2 __ovld __cnfn max(char2 x, char2 y);
-uchar2 __ovld __cnfn max(uchar2 x, uchar2 y);
-char3 __ovld __cnfn max(char3 x, char3 y);
-uchar3 __ovld __cnfn max(uchar3 x, uchar3 y);
-char4 __ovld __cnfn max(char4 x, char4 y);
-uchar4 __ovld __cnfn max(uchar4 x, uchar4 y);
-char8 __ovld __cnfn max(char8 x, char8 y);
-uchar8 __ovld __cnfn max(uchar8 x, uchar8 y);
-char16 __ovld __cnfn max(char16 x, char16 y);
-uchar16 __ovld __cnfn max(uchar16 x, uchar16 y);
-short __ovld __cnfn max(short x, short y);
-ushort __ovld __cnfn max(ushort x, ushort y);
-short2 __ovld __cnfn max(short2 x, short2 y);
-ushort2 __ovld __cnfn max(ushort2 x, ushort2 y);
-short3 __ovld __cnfn max(short3 x, short3 y);
-ushort3 __ovld __cnfn max(ushort3 x, ushort3 y);
-short4 __ovld __cnfn max(short4 x, short4 y);
-ushort4 __ovld __cnfn max(ushort4 x, ushort4 y);
-short8 __ovld __cnfn max(short8 x, short8 y);
-ushort8 __ovld __cnfn max(ushort8 x, ushort8 y);
-short16 __ovld __cnfn max(short16 x, short16 y);
-ushort16 __ovld __cnfn max(ushort16 x, ushort16 y);
-int __ovld __cnfn max(int x, int y);
-uint __ovld __cnfn max(uint x, uint y);
-int2 __ovld __cnfn max(int2 x, int2 y);
-uint2 __ovld __cnfn max(uint2 x, uint2 y);
-int3 __ovld __cnfn max(int3 x, int3 y);
-uint3 __ovld __cnfn max(uint3 x, uint3 y);
-int4 __ovld __cnfn max(int4 x, int4 y);
-uint4 __ovld __cnfn max(uint4 x, uint4 y);
-int8 __ovld __cnfn max(int8 x, int8 y);
-uint8 __ovld __cnfn max(uint8 x, uint8 y);
-int16 __ovld __cnfn max(int16 x, int16 y);
-uint16 __ovld __cnfn max(uint16 x, uint16 y);
-long __ovld __cnfn max(long x, long y);
-ulong __ovld __cnfn max(ulong x, ulong y);
-long2 __ovld __cnfn max(long2 x, long2 y);
-ulong2 __ovld __cnfn max(ulong2 x, ulong2 y);
-long3 __ovld __cnfn max(long3 x, long3 y);
-ulong3 __ovld __cnfn max(ulong3 x, ulong3 y);
-long4 __ovld __cnfn max(long4 x, long4 y);
-ulong4 __ovld __cnfn max(ulong4 x, ulong4 y);
-long8 __ovld __cnfn max(long8 x, long8 y);
-ulong8 __ovld __cnfn max(ulong8 x, ulong8 y);
-long16 __ovld __cnfn max(long16 x, long16 y);
-ulong16 __ovld __cnfn max(ulong16 x, ulong16 y);
-char2 __ovld __cnfn max(char2 x, char y);
-uchar2 __ovld __cnfn max(uchar2 x, uchar y);
-char3 __ovld __cnfn max(char3 x, char y);
-uchar3 __ovld __cnfn max(uchar3 x, uchar y);
-char4 __ovld __cnfn max(char4 x, char y);
-uchar4 __ovld __cnfn max(uchar4 x, uchar y);
-char8 __ovld __cnfn max(char8 x, char y);
-uchar8 __ovld __cnfn max(uchar8 x, uchar y);
-char16 __ovld __cnfn max(char16 x, char y);
-uchar16 __ovld __cnfn max(uchar16 x, uchar y);
-short2 __ovld __cnfn max(short2 x, short y);
-ushort2 __ovld __cnfn max(ushort2 x, ushort y);
-short3 __ovld __cnfn max(short3 x, short y);
-ushort3 __ovld __cnfn max(ushort3 x, ushort y);
-short4 __ovld __cnfn max(short4 x, short y);
-ushort4 __ovld __cnfn max(ushort4 x, ushort y);
-short8 __ovld __cnfn max(short8 x, short y);
-ushort8 __ovld __cnfn max(ushort8 x, ushort y);
-short16 __ovld __cnfn max(short16 x, short y);
-ushort16 __ovld __cnfn max(ushort16 x, ushort y);
-int2 __ovld __cnfn max(int2 x, int y);
-uint2 __ovld __cnfn max(uint2 x, uint y);
-int3 __ovld __cnfn max(int3 x, int y);
-uint3 __ovld __cnfn max(uint3 x, uint y);
-int4 __ovld __cnfn max(int4 x, int y);
-uint4 __ovld __cnfn max(uint4 x, uint y);
-int8 __ovld __cnfn max(int8 x, int y);
-uint8 __ovld __cnfn max(uint8 x, uint y);
-int16 __ovld __cnfn max(int16 x, int y);
-uint16 __ovld __cnfn max(uint16 x, uint y);
-long2 __ovld __cnfn max(long2 x, long y);
-ulong2 __ovld __cnfn max(ulong2 x, ulong y);
-long3 __ovld __cnfn max(long3 x, long y);
-ulong3 __ovld __cnfn max(ulong3 x, ulong y);
-long4 __ovld __cnfn max(long4 x, long y);
-ulong4 __ovld __cnfn max(ulong4 x, ulong y);
-long8 __ovld __cnfn max(long8 x, long y);
-ulong8 __ovld __cnfn max(ulong8 x, ulong y);
-long16 __ovld __cnfn max(long16 x, long y);
-ulong16 __ovld __cnfn max(ulong16 x, ulong y);
-
-/**
- * Returns y if y < x, otherwise it returns x.
- */
-char __ovld __cnfn min(char x, char y);
-uchar __ovld __cnfn min(uchar x, uchar y);
-char2 __ovld __cnfn min(char2 x, char2 y);
-uchar2 __ovld __cnfn min(uchar2 x, uchar2 y);
-char3 __ovld __cnfn min(char3 x, char3 y);
-uchar3 __ovld __cnfn min(uchar3 x, uchar3 y);
-char4 __ovld __cnfn min(char4 x, char4 y);
-uchar4 __ovld __cnfn min(uchar4 x, uchar4 y);
-char8 __ovld __cnfn min(char8 x, char8 y);
-uchar8 __ovld __cnfn min(uchar8 x, uchar8 y);
-char16 __ovld __cnfn min(char16 x, char16 y);
-uchar16 __ovld __cnfn min(uchar16 x, uchar16 y);
-short __ovld __cnfn min(short x, short y);
-ushort __ovld __cnfn min(ushort x, ushort y);
-short2 __ovld __cnfn min(short2 x, short2 y);
-ushort2 __ovld __cnfn min(ushort2 x, ushort2 y);
-short3 __ovld __cnfn min(short3 x, short3 y);
-ushort3 __ovld __cnfn min(ushort3 x, ushort3 y);
-short4 __ovld __cnfn min(short4 x, short4 y);
-ushort4 __ovld __cnfn min(ushort4 x, ushort4 y);
-short8 __ovld __cnfn min(short8 x, short8 y);
-ushort8 __ovld __cnfn min(ushort8 x, ushort8 y);
-short16 __ovld __cnfn min(short16 x, short16 y);
-ushort16 __ovld __cnfn min(ushort16 x, ushort16 y);
-int __ovld __cnfn min(int x, int y);
-uint __ovld __cnfn min(uint x, uint y);
-int2 __ovld __cnfn min(int2 x, int2 y);
-uint2 __ovld __cnfn min(uint2 x, uint2 y);
-int3 __ovld __cnfn min(int3 x, int3 y);
-uint3 __ovld __cnfn min(uint3 x, uint3 y);
-int4 __ovld __cnfn min(int4 x, int4 y);
-uint4 __ovld __cnfn min(uint4 x, uint4 y);
-int8 __ovld __cnfn min(int8 x, int8 y);
-uint8 __ovld __cnfn min(uint8 x, uint8 y);
-int16 __ovld __cnfn min(int16 x, int16 y);
-uint16 __ovld __cnfn min(uint16 x, uint16 y);
-long __ovld __cnfn min(long x, long y);
-ulong __ovld __cnfn min(ulong x, ulong y);
-long2 __ovld __cnfn min(long2 x, long2 y);
-ulong2 __ovld __cnfn min(ulong2 x, ulong2 y);
-long3 __ovld __cnfn min(long3 x, long3 y);
-ulong3 __ovld __cnfn min(ulong3 x, ulong3 y);
-long4 __ovld __cnfn min(long4 x, long4 y);
-ulong4 __ovld __cnfn min(ulong4 x, ulong4 y);
-long8 __ovld __cnfn min(long8 x, long8 y);
-ulong8 __ovld __cnfn min(ulong8 x, ulong8 y);
-long16 __ovld __cnfn min(long16 x, long16 y);
-ulong16 __ovld __cnfn min(ulong16 x, ulong16 y);
-char2 __ovld __cnfn min(char2 x, char y);
-uchar2 __ovld __cnfn min(uchar2 x, uchar y);
-char3 __ovld __cnfn min(char3 x, char y);
-uchar3 __ovld __cnfn min(uchar3 x, uchar y);
-char4 __ovld __cnfn min(char4 x, char y);
-uchar4 __ovld __cnfn min(uchar4 x, uchar y);
-char8 __ovld __cnfn min(char8 x, char y);
-uchar8 __ovld __cnfn min(uchar8 x, uchar y);
-char16 __ovld __cnfn min(char16 x, char y);
-uchar16 __ovld __cnfn min(uchar16 x, uchar y);
-short2 __ovld __cnfn min(short2 x, short y);
-ushort2 __ovld __cnfn min(ushort2 x, ushort y);
-short3 __ovld __cnfn min(short3 x, short y);
-ushort3 __ovld __cnfn min(ushort3 x, ushort y);
-short4 __ovld __cnfn min(short4 x, short y);
-ushort4 __ovld __cnfn min(ushort4 x, ushort y);
-short8 __ovld __cnfn min(short8 x, short y);
-ushort8 __ovld __cnfn min(ushort8 x, ushort y);
-short16 __ovld __cnfn min(short16 x, short y);
-ushort16 __ovld __cnfn min(ushort16 x, ushort y);
-int2 __ovld __cnfn min(int2 x, int y);
-uint2 __ovld __cnfn min(uint2 x, uint y);
-int3 __ovld __cnfn min(int3 x, int y);
-uint3 __ovld __cnfn min(uint3 x, uint y);
-int4 __ovld __cnfn min(int4 x, int y);
-uint4 __ovld __cnfn min(uint4 x, uint y);
-int8 __ovld __cnfn min(int8 x, int y);
-uint8 __ovld __cnfn min(uint8 x, uint y);
-int16 __ovld __cnfn min(int16 x, int y);
-uint16 __ovld __cnfn min(uint16 x, uint y);
-long2 __ovld __cnfn min(long2 x, long y);
-ulong2 __ovld __cnfn min(ulong2 x, ulong y);
-long3 __ovld __cnfn min(long3 x, long y);
-ulong3 __ovld __cnfn min(ulong3 x, ulong y);
-long4 __ovld __cnfn min(long4 x, long y);
-ulong4 __ovld __cnfn min(ulong4 x, ulong y);
-long8 __ovld __cnfn min(long8 x, long y);
-ulong8 __ovld __cnfn min(ulong8 x, ulong y);
-long16 __ovld __cnfn min(long16 x, long y);
-ulong16 __ovld __cnfn min(ulong16 x, ulong y);
-
-/**
- * Computes x * y and returns the high half of the
- * product of x and y.
- */
-char __ovld __cnfn mul_hi(char x, char y);
-uchar __ovld __cnfn mul_hi(uchar x, uchar y);
-char2 __ovld __cnfn mul_hi(char2 x, char2 y);
-uchar2 __ovld __cnfn mul_hi(uchar2 x, uchar2 y);
-char3 __ovld __cnfn mul_hi(char3 x, char3 y);
-uchar3 __ovld __cnfn mul_hi(uchar3 x, uchar3 y);
-char4 __ovld __cnfn mul_hi(char4 x, char4 y);
-uchar4 __ovld __cnfn mul_hi(uchar4 x, uchar4 y);
-char8 __ovld __cnfn mul_hi(char8 x, char8 y);
-uchar8 __ovld __cnfn mul_hi(uchar8 x, uchar8 y);
-char16 __ovld __cnfn mul_hi(char16 x, char16 y);
-uchar16 __ovld __cnfn mul_hi(uchar16 x, uchar16 y);
-short __ovld __cnfn mul_hi(short x, short y);
-ushort __ovld __cnfn mul_hi(ushort x, ushort y);
-short2 __ovld __cnfn mul_hi(short2 x, short2 y);
-ushort2 __ovld __cnfn mul_hi(ushort2 x, ushort2 y);
-short3 __ovld __cnfn mul_hi(short3 x, short3 y);
-ushort3 __ovld __cnfn mul_hi(ushort3 x, ushort3 y);
-short4 __ovld __cnfn mul_hi(short4 x, short4 y);
-ushort4 __ovld __cnfn mul_hi(ushort4 x, ushort4 y);
-short8 __ovld __cnfn mul_hi(short8 x, short8 y);
-ushort8 __ovld __cnfn mul_hi(ushort8 x, ushort8 y);
-short16 __ovld __cnfn mul_hi(short16 x, short16 y);
-ushort16 __ovld __cnfn mul_hi(ushort16 x, ushort16 y);
-int __ovld __cnfn mul_hi(int x, int y);
-uint __ovld __cnfn mul_hi(uint x, uint y);
-int2 __ovld __cnfn mul_hi(int2 x, int2 y);
-uint2 __ovld __cnfn mul_hi(uint2 x, uint2 y);
-int3 __ovld __cnfn mul_hi(int3 x, int3 y);
-uint3 __ovld __cnfn mul_hi(uint3 x, uint3 y);
-int4 __ovld __cnfn mul_hi(int4 x, int4 y);
-uint4 __ovld __cnfn mul_hi(uint4 x, uint4 y);
-int8 __ovld __cnfn mul_hi(int8 x, int8 y);
-uint8 __ovld __cnfn mul_hi(uint8 x, uint8 y);
-int16 __ovld __cnfn mul_hi(int16 x, int16 y);
-uint16 __ovld __cnfn mul_hi(uint16 x, uint16 y);
-long __ovld __cnfn mul_hi(long x, long y);
-ulong __ovld __cnfn mul_hi(ulong x, ulong y);
-long2 __ovld __cnfn mul_hi(long2 x, long2 y);
-ulong2 __ovld __cnfn mul_hi(ulong2 x, ulong2 y);
-long3 __ovld __cnfn mul_hi(long3 x, long3 y);
-ulong3 __ovld __cnfn mul_hi(ulong3 x, ulong3 y);
-long4 __ovld __cnfn mul_hi(long4 x, long4 y);
-ulong4 __ovld __cnfn mul_hi(ulong4 x, ulong4 y);
-long8 __ovld __cnfn mul_hi(long8 x, long8 y);
-ulong8 __ovld __cnfn mul_hi(ulong8 x, ulong8 y);
-long16 __ovld __cnfn mul_hi(long16 x, long16 y);
-ulong16 __ovld __cnfn mul_hi(ulong16 x, ulong16 y);
-
-/**
- * For each element in v, the bits are shifted left by
- * the number of bits given by the corresponding
- * element in i (subject to usual shift modulo rules
- * described in section 6.3). Bits shifted off the left
- * side of the element are shifted back in from the
- * right.
- */
-char __ovld __cnfn rotate(char v, char i);
-uchar __ovld __cnfn rotate(uchar v, uchar i);
-char2 __ovld __cnfn rotate(char2 v, char2 i);
-uchar2 __ovld __cnfn rotate(uchar2 v, uchar2 i);
-char3 __ovld __cnfn rotate(char3 v, char3 i);
-uchar3 __ovld __cnfn rotate(uchar3 v, uchar3 i);
-char4 __ovld __cnfn rotate(char4 v, char4 i);
-uchar4 __ovld __cnfn rotate(uchar4 v, uchar4 i);
-char8 __ovld __cnfn rotate(char8 v, char8 i);
-uchar8 __ovld __cnfn rotate(uchar8 v, uchar8 i);
-char16 __ovld __cnfn rotate(char16 v, char16 i);
-uchar16 __ovld __cnfn rotate(uchar16 v, uchar16 i);
-short __ovld __cnfn rotate(short v, short i);
-ushort __ovld __cnfn rotate(ushort v, ushort i);
-short2 __ovld __cnfn rotate(short2 v, short2 i);
-ushort2 __ovld __cnfn rotate(ushort2 v, ushort2 i);
-short3 __ovld __cnfn rotate(short3 v, short3 i);
-ushort3 __ovld __cnfn rotate(ushort3 v, ushort3 i);
-short4 __ovld __cnfn rotate(short4 v, short4 i);
-ushort4 __ovld __cnfn rotate(ushort4 v, ushort4 i);
-short8 __ovld __cnfn rotate(short8 v, short8 i);
-ushort8 __ovld __cnfn rotate(ushort8 v, ushort8 i);
-short16 __ovld __cnfn rotate(short16 v, short16 i);
-ushort16 __ovld __cnfn rotate(ushort16 v, ushort16 i);
-int __ovld __cnfn rotate(int v, int i);
-uint __ovld __cnfn rotate(uint v, uint i);
-int2 __ovld __cnfn rotate(int2 v, int2 i);
-uint2 __ovld __cnfn rotate(uint2 v, uint2 i);
-int3 __ovld __cnfn rotate(int3 v, int3 i);
-uint3 __ovld __cnfn rotate(uint3 v, uint3 i);
-int4 __ovld __cnfn rotate(int4 v, int4 i);
-uint4 __ovld __cnfn rotate(uint4 v, uint4 i);
-int8 __ovld __cnfn rotate(int8 v, int8 i);
-uint8 __ovld __cnfn rotate(uint8 v, uint8 i);
-int16 __ovld __cnfn rotate(int16 v, int16 i);
-uint16 __ovld __cnfn rotate(uint16 v, uint16 i);
-long __ovld __cnfn rotate(long v, long i);
-ulong __ovld __cnfn rotate(ulong v, ulong i);
-long2 __ovld __cnfn rotate(long2 v, long2 i);
-ulong2 __ovld __cnfn rotate(ulong2 v, ulong2 i);
-long3 __ovld __cnfn rotate(long3 v, long3 i);
-ulong3 __ovld __cnfn rotate(ulong3 v, ulong3 i);
-long4 __ovld __cnfn rotate(long4 v, long4 i);
-ulong4 __ovld __cnfn rotate(ulong4 v, ulong4 i);
-long8 __ovld __cnfn rotate(long8 v, long8 i);
-ulong8 __ovld __cnfn rotate(ulong8 v, ulong8 i);
-long16 __ovld __cnfn rotate(long16 v, long16 i);
-ulong16 __ovld __cnfn rotate(ulong16 v, ulong16 i);
-
-/**
- * Returns x - y and saturates the result.
- */
-char __ovld __cnfn sub_sat(char x, char y);
-uchar __ovld __cnfn sub_sat(uchar x, uchar y);
-char2 __ovld __cnfn sub_sat(char2 x, char2 y);
-uchar2 __ovld __cnfn sub_sat(uchar2 x, uchar2 y);
-char3 __ovld __cnfn sub_sat(char3 x, char3 y);
-uchar3 __ovld __cnfn sub_sat(uchar3 x, uchar3 y);
-char4 __ovld __cnfn sub_sat(char4 x, char4 y);
-uchar4 __ovld __cnfn sub_sat(uchar4 x, uchar4 y);
-char8 __ovld __cnfn sub_sat(char8 x, char8 y);
-uchar8 __ovld __cnfn sub_sat(uchar8 x, uchar8 y);
-char16 __ovld __cnfn sub_sat(char16 x, char16 y);
-uchar16 __ovld __cnfn sub_sat(uchar16 x, uchar16 y);
-short __ovld __cnfn sub_sat(short x, short y);
-ushort __ovld __cnfn sub_sat(ushort x, ushort y);
-short2 __ovld __cnfn sub_sat(short2 x, short2 y);
-ushort2 __ovld __cnfn sub_sat(ushort2 x, ushort2 y);
-short3 __ovld __cnfn sub_sat(short3 x, short3 y);
-ushort3 __ovld __cnfn sub_sat(ushort3 x, ushort3 y);
-short4 __ovld __cnfn sub_sat(short4 x, short4 y);
-ushort4 __ovld __cnfn sub_sat(ushort4 x, ushort4 y);
-short8 __ovld __cnfn sub_sat(short8 x, short8 y);
-ushort8 __ovld __cnfn sub_sat(ushort8 x, ushort8 y);
-short16 __ovld __cnfn sub_sat(short16 x, short16 y);
-ushort16 __ovld __cnfn sub_sat(ushort16 x, ushort16 y);
-int __ovld __cnfn sub_sat(int x, int y);
-uint __ovld __cnfn sub_sat(uint x, uint y);
-int2 __ovld __cnfn sub_sat(int2 x, int2 y);
-uint2 __ovld __cnfn sub_sat(uint2 x, uint2 y);
-int3 __ovld __cnfn sub_sat(int3 x, int3 y);
-uint3 __ovld __cnfn sub_sat(uint3 x, uint3 y);
-int4 __ovld __cnfn sub_sat(int4 x, int4 y);
-uint4 __ovld __cnfn sub_sat(uint4 x, uint4 y);
-int8 __ovld __cnfn sub_sat(int8 x, int8 y);
-uint8 __ovld __cnfn sub_sat(uint8 x, uint8 y);
-int16 __ovld __cnfn sub_sat(int16 x, int16 y);
-uint16 __ovld __cnfn sub_sat(uint16 x, uint16 y);
-long __ovld __cnfn sub_sat(long x, long y);
-ulong __ovld __cnfn sub_sat(ulong x, ulong y);
-long2 __ovld __cnfn sub_sat(long2 x, long2 y);
-ulong2 __ovld __cnfn sub_sat(ulong2 x, ulong2 y);
-long3 __ovld __cnfn sub_sat(long3 x, long3 y);
-ulong3 __ovld __cnfn sub_sat(ulong3 x, ulong3 y);
-long4 __ovld __cnfn sub_sat(long4 x, long4 y);
-ulong4 __ovld __cnfn sub_sat(ulong4 x, ulong4 y);
-long8 __ovld __cnfn sub_sat(long8 x, long8 y);
-ulong8 __ovld __cnfn sub_sat(ulong8 x, ulong8 y);
-long16 __ovld __cnfn sub_sat(long16 x, long16 y);
-ulong16 __ovld __cnfn sub_sat(ulong16 x, ulong16 y);
-
-/**
- * result[i] = ((short)hi[i] << 8) | lo[i]
- * result[i] = ((ushort)hi[i] << 8) | lo[i]
- */
-short __ovld __cnfn upsample(char hi, uchar lo);
-ushort __ovld __cnfn upsample(uchar hi, uchar lo);
-short2 __ovld __cnfn upsample(char2 hi, uchar2 lo);
-short3 __ovld __cnfn upsample(char3 hi, uchar3 lo);
-short4 __ovld __cnfn upsample(char4 hi, uchar4 lo);
-short8 __ovld __cnfn upsample(char8 hi, uchar8 lo);
-short16 __ovld __cnfn upsample(char16 hi, uchar16 lo);
-ushort2 __ovld __cnfn upsample(uchar2 hi, uchar2 lo);
-ushort3 __ovld __cnfn upsample(uchar3 hi, uchar3 lo);
-ushort4 __ovld __cnfn upsample(uchar4 hi, uchar4 lo);
-ushort8 __ovld __cnfn upsample(uchar8 hi, uchar8 lo);
-ushort16 __ovld __cnfn upsample(uchar16 hi, uchar16 lo);
-
-/**
- * result[i] = ((int)hi[i] << 16) | lo[i]
- * result[i] = ((uint)hi[i] << 16) | lo[i]
- */
-int __ovld __cnfn upsample(short hi, ushort lo);
-uint __ovld __cnfn upsample(ushort hi, ushort lo);
-int2 __ovld __cnfn upsample(short2 hi, ushort2 lo);
-int3 __ovld __cnfn upsample(short3 hi, ushort3 lo);
-int4 __ovld __cnfn upsample(short4 hi, ushort4 lo);
-int8 __ovld __cnfn upsample(short8 hi, ushort8 lo);
-int16 __ovld __cnfn upsample(short16 hi, ushort16 lo);
-uint2 __ovld __cnfn upsample(ushort2 hi, ushort2 lo);
-uint3 __ovld __cnfn upsample(ushort3 hi, ushort3 lo);
-uint4 __ovld __cnfn upsample(ushort4 hi, ushort4 lo);
-uint8 __ovld __cnfn upsample(ushort8 hi, ushort8 lo);
-uint16 __ovld __cnfn upsample(ushort16 hi, ushort16 lo);
-/**
- * result[i] = ((long)hi[i] << 32) | lo[i]
- * result[i] = ((ulong)hi[i] << 32) | lo[i]
- */
-long __ovld __cnfn upsample(int hi, uint lo);
-ulong __ovld __cnfn upsample(uint hi, uint lo);
-long2 __ovld __cnfn upsample(int2 hi, uint2 lo);
-long3 __ovld __cnfn upsample(int3 hi, uint3 lo);
-long4 __ovld __cnfn upsample(int4 hi, uint4 lo);
-long8 __ovld __cnfn upsample(int8 hi, uint8 lo);
-long16 __ovld __cnfn upsample(int16 hi, uint16 lo);
-ulong2 __ovld __cnfn upsample(uint2 hi, uint2 lo);
-ulong3 __ovld __cnfn upsample(uint3 hi, uint3 lo);
-ulong4 __ovld __cnfn upsample(uint4 hi, uint4 lo);
-ulong8 __ovld __cnfn upsample(uint8 hi, uint8 lo);
-ulong16 __ovld __cnfn upsample(uint16 hi, uint16 lo);
-
-/*
- * popcount(x): returns the number of set bit in x
- */
-char __ovld __cnfn popcount(char x);
-uchar __ovld __cnfn popcount(uchar x);
-char2 __ovld __cnfn popcount(char2 x);
-uchar2 __ovld __cnfn popcount(uchar2 x);
-char3 __ovld __cnfn popcount(char3 x);
-uchar3 __ovld __cnfn popcount(uchar3 x);
-char4 __ovld __cnfn popcount(char4 x);
-uchar4 __ovld __cnfn popcount(uchar4 x);
-char8 __ovld __cnfn popcount(char8 x);
-uchar8 __ovld __cnfn popcount(uchar8 x);
-char16 __ovld __cnfn popcount(char16 x);
-uchar16 __ovld __cnfn popcount(uchar16 x);
-short __ovld __cnfn popcount(short x);
-ushort __ovld __cnfn popcount(ushort x);
-short2 __ovld __cnfn popcount(short2 x);
-ushort2 __ovld __cnfn popcount(ushort2 x);
-short3 __ovld __cnfn popcount(short3 x);
-ushort3 __ovld __cnfn popcount(ushort3 x);
-short4 __ovld __cnfn popcount(short4 x);
-ushort4 __ovld __cnfn popcount(ushort4 x);
-short8 __ovld __cnfn popcount(short8 x);
-ushort8 __ovld __cnfn popcount(ushort8 x);
-short16 __ovld __cnfn popcount(short16 x);
-ushort16 __ovld __cnfn popcount(ushort16 x);
-int __ovld __cnfn popcount(int x);
-uint __ovld __cnfn popcount(uint x);
-int2 __ovld __cnfn popcount(int2 x);
-uint2 __ovld __cnfn popcount(uint2 x);
-int3 __ovld __cnfn popcount(int3 x);
-uint3 __ovld __cnfn popcount(uint3 x);
-int4 __ovld __cnfn popcount(int4 x);
-uint4 __ovld __cnfn popcount(uint4 x);
-int8 __ovld __cnfn popcount(int8 x);
-uint8 __ovld __cnfn popcount(uint8 x);
-int16 __ovld __cnfn popcount(int16 x);
-uint16 __ovld __cnfn popcount(uint16 x);
-long __ovld __cnfn popcount(long x);
-ulong __ovld __cnfn popcount(ulong x);
-long2 __ovld __cnfn popcount(long2 x);
-ulong2 __ovld __cnfn popcount(ulong2 x);
-long3 __ovld __cnfn popcount(long3 x);
-ulong3 __ovld __cnfn popcount(ulong3 x);
-long4 __ovld __cnfn popcount(long4 x);
-ulong4 __ovld __cnfn popcount(ulong4 x);
-long8 __ovld __cnfn popcount(long8 x);
-ulong8 __ovld __cnfn popcount(ulong8 x);
-long16 __ovld __cnfn popcount(long16 x);
-ulong16 __ovld __cnfn popcount(ulong16 x);
-
-/**
- * Multiply two 24-bit integer values x and y and add
- * the 32-bit integer result to the 32-bit integer z.
- * Refer to definition of mul24 to see how the 24-bit
- * integer multiplication is performed.
- */
-int __ovld __cnfn mad24(int x, int y, int z);
-uint __ovld __cnfn mad24(uint x, uint y, uint z);
-int2 __ovld __cnfn mad24(int2 x, int2 y, int2 z);
-uint2 __ovld __cnfn mad24(uint2 x, uint2 y, uint2 z);
-int3 __ovld __cnfn mad24(int3 x, int3 y, int3 z);
-uint3 __ovld __cnfn mad24(uint3 x, uint3 y, uint3 z);
-int4 __ovld __cnfn mad24(int4 x, int4 y, int4 z);
-uint4 __ovld __cnfn mad24(uint4 x, uint4 y, uint4 z);
-int8 __ovld __cnfn mad24(int8 x, int8 y, int8 z);
-uint8 __ovld __cnfn mad24(uint8 x, uint8 y, uint8 z);
-int16 __ovld __cnfn mad24(int16 x, int16 y, int16 z);
-uint16 __ovld __cnfn mad24(uint16 x, uint16 y, uint16 z);
-
-/**
- * Multiply two 24-bit integer values x and y. x and y
- * are 32-bit integers but only the low 24-bits are used
- * to perform the multiplication. mul24 should only
- * be used when values in x and y are in the range [-
- * 2^23, 2^23-1] if x and y are signed integers and in the
- * range [0, 2^24-1] if x and y are unsigned integers. If
- * x and y are not in this range, the multiplication
- * result is implementation-defined.
- */
-int __ovld __cnfn mul24(int x, int y);
-uint __ovld __cnfn mul24(uint x, uint y);
-int2 __ovld __cnfn mul24(int2 x, int2 y);
-uint2 __ovld __cnfn mul24(uint2 x, uint2 y);
-int3 __ovld __cnfn mul24(int3 x, int3 y);
-uint3 __ovld __cnfn mul24(uint3 x, uint3 y);
-int4 __ovld __cnfn mul24(int4 x, int4 y);
-uint4 __ovld __cnfn mul24(uint4 x, uint4 y);
-int8 __ovld __cnfn mul24(int8 x, int8 y);
-uint8 __ovld __cnfn mul24(uint8 x, uint8 y);
-int16 __ovld __cnfn mul24(int16 x, int16 y);
-uint16 __ovld __cnfn mul24(uint16 x, uint16 y);
-
-// OpenCL v1.1 s6.11.4, v1.2 s6.12.4, v2.0 s6.13.4 - Common Functions
-
-/**
- * Returns fmin(fmax(x, minval), maxval).
- * Results are undefined if minval > maxval.
- */
-float __ovld __cnfn clamp(float x, float minval, float maxval);
-float2 __ovld __cnfn clamp(float2 x, float2 minval, float2 maxval);
-float3 __ovld __cnfn clamp(float3 x, float3 minval, float3 maxval);
-float4 __ovld __cnfn clamp(float4 x, float4 minval, float4 maxval);
-float8 __ovld __cnfn clamp(float8 x, float8 minval, float8 maxval);
-float16 __ovld __cnfn clamp(float16 x, float16 minval, float16 maxval);
-float2 __ovld __cnfn clamp(float2 x, float minval, float maxval);
-float3 __ovld __cnfn clamp(float3 x, float minval, float maxval);
-float4 __ovld __cnfn clamp(float4 x, float minval, float maxval);
-float8 __ovld __cnfn clamp(float8 x, float minval, float maxval);
-float16 __ovld __cnfn clamp(float16 x, float minval, float maxval);
-#ifdef cl_khr_fp64
-double __ovld __cnfn clamp(double x, double minval, double maxval);
-double2 __ovld __cnfn clamp(double2 x, double2 minval, double2 maxval);
-double3 __ovld __cnfn clamp(double3 x, double3 minval, double3 maxval);
-double4 __ovld __cnfn clamp(double4 x, double4 minval, double4 maxval);
-double8 __ovld __cnfn clamp(double8 x, double8 minval, double8 maxval);
-double16 __ovld __cnfn clamp(double16 x, double16 minval, double16 maxval);
-double2 __ovld __cnfn clamp(double2 x, double minval, double maxval);
-double3 __ovld __cnfn clamp(double3 x, double minval, double maxval);
-double4 __ovld __cnfn clamp(double4 x, double minval, double maxval);
-double8 __ovld __cnfn clamp(double8 x, double minval, double maxval);
-double16 __ovld __cnfn clamp(double16 x, double minval, double maxval);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn clamp(half x, half minval, half maxval);
-half2 __ovld __cnfn clamp(half2 x, half2 minval, half2 maxval);
-half3 __ovld __cnfn clamp(half3 x, half3 minval, half3 maxval);
-half4 __ovld __cnfn clamp(half4 x, half4 minval, half4 maxval);
-half8 __ovld __cnfn clamp(half8 x, half8 minval, half8 maxval);
-half16 __ovld __cnfn clamp(half16 x, half16 minval, half16 maxval);
-half2 __ovld __cnfn clamp(half2 x, half minval, half maxval);
-half3 __ovld __cnfn clamp(half3 x, half minval, half maxval);
-half4 __ovld __cnfn clamp(half4 x, half minval, half maxval);
-half8 __ovld __cnfn clamp(half8 x, half minval, half maxval);
-half16 __ovld __cnfn clamp(half16 x, half minval, half maxval);
-#endif //cl_khr_fp16
-
-/**
- * Converts radians to degrees, i.e. (180 / PI) *
- * radians.
- */
-float __ovld __cnfn degrees(float radians);
-float2 __ovld __cnfn degrees(float2 radians);
-float3 __ovld __cnfn degrees(float3 radians);
-float4 __ovld __cnfn degrees(float4 radians);
-float8 __ovld __cnfn degrees(float8 radians);
-float16 __ovld __cnfn degrees(float16 radians);
-#ifdef cl_khr_fp64
-double __ovld __cnfn degrees(double radians);
-double2 __ovld __cnfn degrees(double2 radians);
-double3 __ovld __cnfn degrees(double3 radians);
-double4 __ovld __cnfn degrees(double4 radians);
-double8 __ovld __cnfn degrees(double8 radians);
-double16 __ovld __cnfn degrees(double16 radians);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn degrees(half radians);
-half2 __ovld __cnfn degrees(half2 radians);
-half3 __ovld __cnfn degrees(half3 radians);
-half4 __ovld __cnfn degrees(half4 radians);
-half8 __ovld __cnfn degrees(half8 radians);
-half16 __ovld __cnfn degrees(half16 radians);
-#endif //cl_khr_fp16
-
-/**
- * Returns y if x < y, otherwise it returns x. If x and y
- * are infinite or NaN, the return values are undefined.
- */
-float __ovld __cnfn max(float x, float y);
-float2 __ovld __cnfn max(float2 x, float2 y);
-float3 __ovld __cnfn max(float3 x, float3 y);
-float4 __ovld __cnfn max(float4 x, float4 y);
-float8 __ovld __cnfn max(float8 x, float8 y);
-float16 __ovld __cnfn max(float16 x, float16 y);
-float2 __ovld __cnfn max(float2 x, float y);
-float3 __ovld __cnfn max(float3 x, float y);
-float4 __ovld __cnfn max(float4 x, float y);
-float8 __ovld __cnfn max(float8 x, float y);
-float16 __ovld __cnfn max(float16 x, float y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn max(double x, double y);
-double2 __ovld __cnfn max(double2 x, double2 y);
-double3 __ovld __cnfn max(double3 x, double3 y);
-double4 __ovld __cnfn max(double4 x, double4 y);
-double8 __ovld __cnfn max(double8 x, double8 y);
-double16 __ovld __cnfn max(double16 x, double16 y);
-double2 __ovld __cnfn max(double2 x, double y);
-double3 __ovld __cnfn max(double3 x, double y);
-double4 __ovld __cnfn max(double4 x, double y);
-double8 __ovld __cnfn max(double8 x, double y);
-double16 __ovld __cnfn max(double16 x, double y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn max(half x, half y);
-half2 __ovld __cnfn max(half2 x, half2 y);
-half3 __ovld __cnfn max(half3 x, half3 y);
-half4 __ovld __cnfn max(half4 x, half4 y);
-half8 __ovld __cnfn max(half8 x, half8 y);
-half16 __ovld __cnfn max(half16 x, half16 y);
-half2 __ovld __cnfn max(half2 x, half y);
-half3 __ovld __cnfn max(half3 x, half y);
-half4 __ovld __cnfn max(half4 x, half y);
-half8 __ovld __cnfn max(half8 x, half y);
-half16 __ovld __cnfn max(half16 x, half y);
-#endif //cl_khr_fp16
-
-/**
- * Returns y if y < x, otherwise it returns x. If x and y
- * are infinite or NaN, the return values are undefined.
- */
-float __ovld __cnfn min(float x, float y);
-float2 __ovld __cnfn min(float2 x, float2 y);
-float3 __ovld __cnfn min(float3 x, float3 y);
-float4 __ovld __cnfn min(float4 x, float4 y);
-float8 __ovld __cnfn min(float8 x, float8 y);
-float16 __ovld __cnfn min(float16 x, float16 y);
-float2 __ovld __cnfn min(float2 x, float y);
-float3 __ovld __cnfn min(float3 x, float y);
-float4 __ovld __cnfn min(float4 x, float y);
-float8 __ovld __cnfn min(float8 x, float y);
-float16 __ovld __cnfn min(float16 x, float y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn min(double x, double y);
-double2 __ovld __cnfn min(double2 x, double2 y);
-double3 __ovld __cnfn min(double3 x, double3 y);
-double4 __ovld __cnfn min(double4 x, double4 y);
-double8 __ovld __cnfn min(double8 x, double8 y);
-double16 __ovld __cnfn min(double16 x, double16 y);
-double2 __ovld __cnfn min(double2 x, double y);
-double3 __ovld __cnfn min(double3 x, double y);
-double4 __ovld __cnfn min(double4 x, double y);
-double8 __ovld __cnfn min(double8 x, double y);
-double16 __ovld __cnfn min(double16 x, double y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn min(half x, half y);
-half2 __ovld __cnfn min(half2 x, half2 y);
-half3 __ovld __cnfn min(half3 x, half3 y);
-half4 __ovld __cnfn min(half4 x, half4 y);
-half8 __ovld __cnfn min(half8 x, half8 y);
-half16 __ovld __cnfn min(half16 x, half16 y);
-half2 __ovld __cnfn min(half2 x, half y);
-half3 __ovld __cnfn min(half3 x, half y);
-half4 __ovld __cnfn min(half4 x, half y);
-half8 __ovld __cnfn min(half8 x, half y);
-half16 __ovld __cnfn min(half16 x, half y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the linear blend of x & y implemented as:
- * x + (y - x) * a
- * a must be a value in the range 0.0 ... 1.0. If a is not
- * in the range 0.0 ... 1.0, the return values are
- * undefined.
- */
-float __ovld __cnfn mix(float x, float y, float a);
-float2 __ovld __cnfn mix(float2 x, float2 y, float2 a);
-float3 __ovld __cnfn mix(float3 x, float3 y, float3 a);
-float4 __ovld __cnfn mix(float4 x, float4 y, float4 a);
-float8 __ovld __cnfn mix(float8 x, float8 y, float8 a);
-float16 __ovld __cnfn mix(float16 x, float16 y, float16 a);
-float2 __ovld __cnfn mix(float2 x, float2 y, float a);
-float3 __ovld __cnfn mix(float3 x, float3 y, float a);
-float4 __ovld __cnfn mix(float4 x, float4 y, float a);
-float8 __ovld __cnfn mix(float8 x, float8 y, float a);
-float16 __ovld __cnfn mix(float16 x, float16 y, float a);
-#ifdef cl_khr_fp64
-double __ovld __cnfn mix(double x, double y, double a);
-double2 __ovld __cnfn mix(double2 x, double2 y, double2 a);
-double3 __ovld __cnfn mix(double3 x, double3 y, double3 a);
-double4 __ovld __cnfn mix(double4 x, double4 y, double4 a);
-double8 __ovld __cnfn mix(double8 x, double8 y, double8 a);
-double16 __ovld __cnfn mix(double16 x, double16 y, double16 a);
-double2 __ovld __cnfn mix(double2 x, double2 y, double a);
-double3 __ovld __cnfn mix(double3 x, double3 y, double a);
-double4 __ovld __cnfn mix(double4 x, double4 y, double a);
-double8 __ovld __cnfn mix(double8 x, double8 y, double a);
-double16 __ovld __cnfn mix(double16 x, double16 y, double a);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn mix(half x, half y, half a);
-half2 __ovld __cnfn mix(half2 x, half2 y, half2 a);
-half3 __ovld __cnfn mix(half3 x, half3 y, half3 a);
-half4 __ovld __cnfn mix(half4 x, half4 y, half4 a);
-half8 __ovld __cnfn mix(half8 x, half8 y, half8 a);
-half16 __ovld __cnfn mix(half16 x, half16 y, half16 a);
-half2 __ovld __cnfn mix(half2 x, half2 y, half a);
-half3 __ovld __cnfn mix(half3 x, half3 y, half a);
-half4 __ovld __cnfn mix(half4 x, half4 y, half a);
-half8 __ovld __cnfn mix(half8 x, half8 y, half a);
-half16 __ovld __cnfn mix(half16 x, half16 y, half a);
-#endif //cl_khr_fp16
-
-/**
- * Converts degrees to radians, i.e. (PI / 180) *
- * degrees.
- */
-float __ovld __cnfn radians(float degrees);
-float2 __ovld __cnfn radians(float2 degrees);
-float3 __ovld __cnfn radians(float3 degrees);
-float4 __ovld __cnfn radians(float4 degrees);
-float8 __ovld __cnfn radians(float8 degrees);
-float16 __ovld __cnfn radians(float16 degrees);
-#ifdef cl_khr_fp64
-double __ovld __cnfn radians(double degrees);
-double2 __ovld __cnfn radians(double2 degrees);
-double3 __ovld __cnfn radians(double3 degrees);
-double4 __ovld __cnfn radians(double4 degrees);
-double8 __ovld __cnfn radians(double8 degrees);
-double16 __ovld __cnfn radians(double16 degrees);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn radians(half degrees);
-half2 __ovld __cnfn radians(half2 degrees);
-half3 __ovld __cnfn radians(half3 degrees);
-half4 __ovld __cnfn radians(half4 degrees);
-half8 __ovld __cnfn radians(half8 degrees);
-half16 __ovld __cnfn radians(half16 degrees);
-#endif //cl_khr_fp16
-
-/**
- * Returns 0.0 if x < edge, otherwise it returns 1.0.
- */
-float __ovld __cnfn step(float edge, float x);
-float2 __ovld __cnfn step(float2 edge, float2 x);
-float3 __ovld __cnfn step(float3 edge, float3 x);
-float4 __ovld __cnfn step(float4 edge, float4 x);
-float8 __ovld __cnfn step(float8 edge, float8 x);
-float16 __ovld __cnfn step(float16 edge, float16 x);
-float2 __ovld __cnfn step(float edge, float2 x);
-float3 __ovld __cnfn step(float edge, float3 x);
-float4 __ovld __cnfn step(float edge, float4 x);
-float8 __ovld __cnfn step(float edge, float8 x);
-float16 __ovld __cnfn step(float edge, float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn step(double edge, double x);
-double2 __ovld __cnfn step(double2 edge, double2 x);
-double3 __ovld __cnfn step(double3 edge, double3 x);
-double4 __ovld __cnfn step(double4 edge, double4 x);
-double8 __ovld __cnfn step(double8 edge, double8 x);
-double16 __ovld __cnfn step(double16 edge, double16 x);
-double2 __ovld __cnfn step(double edge, double2 x);
-double3 __ovld __cnfn step(double edge, double3 x);
-double4 __ovld __cnfn step(double edge, double4 x);
-double8 __ovld __cnfn step(double edge, double8 x);
-double16 __ovld __cnfn step(double edge, double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn step(half edge, half x);
-half2 __ovld __cnfn step(half2 edge, half2 x);
-half3 __ovld __cnfn step(half3 edge, half3 x);
-half4 __ovld __cnfn step(half4 edge, half4 x);
-half8 __ovld __cnfn step(half8 edge, half8 x);
-half16 __ovld __cnfn step(half16 edge, half16 x);
-half2 __ovld __cnfn step(half edge, half2 x);
-half3 __ovld __cnfn step(half edge, half3 x);
-half4 __ovld __cnfn step(half edge, half4 x);
-half8 __ovld __cnfn step(half edge, half8 x);
-half16 __ovld __cnfn step(half edge, half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Returns 0.0 if x <= edge0 and 1.0 if x >= edge1 and
- * performs smooth Hermite interpolation between 0
- * and 1when edge0 < x < edge1. This is useful in
- * cases where you would want a threshold function
- * with a smooth transition.
- * This is equivalent to:
- * gentype t;
- * t = clamp ((x - edge0) / (edge1 - edge0), 0, 1);
- * return t * t * (3 - 2 * t);
- * Results are undefined if edge0 >= edge1 or if x,
- * edge0 or edge1 is a NaN.
- */
-float __ovld __cnfn smoothstep(float edge0, float edge1, float x);
-float2 __ovld __cnfn smoothstep(float2 edge0, float2 edge1, float2 x);
-float3 __ovld __cnfn smoothstep(float3 edge0, float3 edge1, float3 x);
-float4 __ovld __cnfn smoothstep(float4 edge0, float4 edge1, float4 x);
-float8 __ovld __cnfn smoothstep(float8 edge0, float8 edge1, float8 x);
-float16 __ovld __cnfn smoothstep(float16 edge0, float16 edge1, float16 x);
-float2 __ovld __cnfn smoothstep(float edge0, float edge1, float2 x);
-float3 __ovld __cnfn smoothstep(float edge0, float edge1, float3 x);
-float4 __ovld __cnfn smoothstep(float edge0, float edge1, float4 x);
-float8 __ovld __cnfn smoothstep(float edge0, float edge1, float8 x);
-float16 __ovld __cnfn smoothstep(float edge0, float edge1, float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn smoothstep(double edge0, double edge1, double x);
-double2 __ovld __cnfn smoothstep(double2 edge0, double2 edge1, double2 x);
-double3 __ovld __cnfn smoothstep(double3 edge0, double3 edge1, double3 x);
-double4 __ovld __cnfn smoothstep(double4 edge0, double4 edge1, double4 x);
-double8 __ovld __cnfn smoothstep(double8 edge0, double8 edge1, double8 x);
-double16 __ovld __cnfn smoothstep(double16 edge0, double16 edge1, double16 x);
-double2 __ovld __cnfn smoothstep(double edge0, double edge1, double2 x);
-double3 __ovld __cnfn smoothstep(double edge0, double edge1, double3 x);
-double4 __ovld __cnfn smoothstep(double edge0, double edge1, double4 x);
-double8 __ovld __cnfn smoothstep(double edge0, double edge1, double8 x);
-double16 __ovld __cnfn smoothstep(double edge0, double edge1, double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn smoothstep(half edge0, half edge1, half x);
-half2 __ovld __cnfn smoothstep(half2 edge0, half2 edge1, half2 x);
-half3 __ovld __cnfn smoothstep(half3 edge0, half3 edge1, half3 x);
-half4 __ovld __cnfn smoothstep(half4 edge0, half4 edge1, half4 x);
-half8 __ovld __cnfn smoothstep(half8 edge0, half8 edge1, half8 x);
-half16 __ovld __cnfn smoothstep(half16 edge0, half16 edge1, half16 x);
-half2 __ovld __cnfn smoothstep(half edge0, half edge1, half2 x);
-half3 __ovld __cnfn smoothstep(half edge0, half edge1, half3 x);
-half4 __ovld __cnfn smoothstep(half edge0, half edge1, half4 x);
-half8 __ovld __cnfn smoothstep(half edge0, half edge1, half8 x);
-half16 __ovld __cnfn smoothstep(half edge0, half edge1, half16 x);
-#endif //cl_khr_fp16
-
-/**
- * Returns 1.0 if x > 0, -0.0 if x = -0.0, +0.0 if x =
- * +0.0, or -1.0 if x < 0. Returns 0.0 if x is a NaN.
- */
-float __ovld __cnfn sign(float x);
-float2 __ovld __cnfn sign(float2 x);
-float3 __ovld __cnfn sign(float3 x);
-float4 __ovld __cnfn sign(float4 x);
-float8 __ovld __cnfn sign(float8 x);
-float16 __ovld __cnfn sign(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn sign(double x);
-double2 __ovld __cnfn sign(double2 x);
-double3 __ovld __cnfn sign(double3 x);
-double4 __ovld __cnfn sign(double4 x);
-double8 __ovld __cnfn sign(double8 x);
-double16 __ovld __cnfn sign(double16 x);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn sign(half x);
-half2 __ovld __cnfn sign(half2 x);
-half3 __ovld __cnfn sign(half3 x);
-half4 __ovld __cnfn sign(half4 x);
-half8 __ovld __cnfn sign(half8 x);
-half16 __ovld __cnfn sign(half16 x);
-#endif //cl_khr_fp16
-
-// OpenCL v1.1 s6.11.5, v1.2 s6.12.5, v2.0 s6.13.5 - Geometric Functions
-
-/**
- * Returns the cross product of p0.xyz and p1.xyz. The
- * w component of float4 result returned will be 0.0.
- */
-float4 __ovld __cnfn cross(float4 p0, float4 p1);
-float3 __ovld __cnfn cross(float3 p0, float3 p1);
-#ifdef cl_khr_fp64
-double4 __ovld __cnfn cross(double4 p0, double4 p1);
-double3 __ovld __cnfn cross(double3 p0, double3 p1);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half4 __ovld __cnfn cross(half4 p0, half4 p1);
-half3 __ovld __cnfn cross(half3 p0, half3 p1);
-#endif //cl_khr_fp16
-
-/**
- * Compute dot product.
- */
-float __ovld __cnfn dot(float p0, float p1);
-float __ovld __cnfn dot(float2 p0, float2 p1);
-float __ovld __cnfn dot(float3 p0, float3 p1);
-float __ovld __cnfn dot(float4 p0, float4 p1);
-#ifdef cl_khr_fp64
-double __ovld __cnfn dot(double p0, double p1);
-double __ovld __cnfn dot(double2 p0, double2 p1);
-double __ovld __cnfn dot(double3 p0, double3 p1);
-double __ovld __cnfn dot(double4 p0, double4 p1);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn dot(half p0, half p1);
-half __ovld __cnfn dot(half2 p0, half2 p1);
-half __ovld __cnfn dot(half3 p0, half3 p1);
-half __ovld __cnfn dot(half4 p0, half4 p1);
-#endif //cl_khr_fp16
-
-/**
- * Returns the distance between p0 and p1. This is
- * calculated as length(p0 - p1).
- */
-float __ovld __cnfn distance(float p0, float p1);
-float __ovld __cnfn distance(float2 p0, float2 p1);
-float __ovld __cnfn distance(float3 p0, float3 p1);
-float __ovld __cnfn distance(float4 p0, float4 p1);
-#ifdef cl_khr_fp64
-double __ovld __cnfn distance(double p0, double p1);
-double __ovld __cnfn distance(double2 p0, double2 p1);
-double __ovld __cnfn distance(double3 p0, double3 p1);
-double __ovld __cnfn distance(double4 p0, double4 p1);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn distance(half p0, half p1);
-half __ovld __cnfn distance(half2 p0, half2 p1);
-half __ovld __cnfn distance(half3 p0, half3 p1);
-half __ovld __cnfn distance(half4 p0, half4 p1);
-#endif //cl_khr_fp16
-
-/**
- * Return the length of vector p, i.e.,
- * sqrt(p.x2 + p.y 2 + ...)
- */
-float __ovld __cnfn length(float p);
-float __ovld __cnfn length(float2 p);
-float __ovld __cnfn length(float3 p);
-float __ovld __cnfn length(float4 p);
-#ifdef cl_khr_fp64
-double __ovld __cnfn length(double p);
-double __ovld __cnfn length(double2 p);
-double __ovld __cnfn length(double3 p);
-double __ovld __cnfn length(double4 p);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn length(half p);
-half __ovld __cnfn length(half2 p);
-half __ovld __cnfn length(half3 p);
-half __ovld __cnfn length(half4 p);
-#endif //cl_khr_fp16
-
-/**
- * Returns a vector in the same direction as p but with a
- * length of 1.
- */
-float __ovld __cnfn normalize(float p);
-float2 __ovld __cnfn normalize(float2 p);
-float3 __ovld __cnfn normalize(float3 p);
-float4 __ovld __cnfn normalize(float4 p);
-#ifdef cl_khr_fp64
-double __ovld __cnfn normalize(double p);
-double2 __ovld __cnfn normalize(double2 p);
-double3 __ovld __cnfn normalize(double3 p);
-double4 __ovld __cnfn normalize(double4 p);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn normalize(half p);
-half2 __ovld __cnfn normalize(half2 p);
-half3 __ovld __cnfn normalize(half3 p);
-half4 __ovld __cnfn normalize(half4 p);
-#endif //cl_khr_fp16
-
-/**
- * Returns fast_length(p0 - p1).
- */
-float __ovld __cnfn fast_distance(float p0, float p1);
-float __ovld __cnfn fast_distance(float2 p0, float2 p1);
-float __ovld __cnfn fast_distance(float3 p0, float3 p1);
-float __ovld __cnfn fast_distance(float4 p0, float4 p1);
-#ifdef cl_khr_fp16
-half __ovld __cnfn fast_distance(half p0, half p1);
-half __ovld __cnfn fast_distance(half2 p0, half2 p1);
-half __ovld __cnfn fast_distance(half3 p0, half3 p1);
-half __ovld __cnfn fast_distance(half4 p0, half4 p1);
-#endif //cl_khr_fp16
-
-/**
- * Returns the length of vector p computed as:
- * half_sqrt(p.x2 + p.y2 + ...)
- */
-float __ovld __cnfn fast_length(float p);
-float __ovld __cnfn fast_length(float2 p);
-float __ovld __cnfn fast_length(float3 p);
-float __ovld __cnfn fast_length(float4 p);
-#ifdef cl_khr_fp16
-half __ovld __cnfn fast_length(half p);
-half __ovld __cnfn fast_length(half2 p);
-half __ovld __cnfn fast_length(half3 p);
-half __ovld __cnfn fast_length(half4 p);
-#endif //cl_khr_fp16
-
-/**
- * Returns a vector in the same direction as p but with a
- * length of 1. fast_normalize is computed as:
- * p * half_rsqrt (p.x^2 + p.y^2 + ... )
- * The result shall be within 8192 ulps error from the
- * infinitely precise result of
- * if (all(p == 0.0f))
- * result = p;
- * else
- * result = p / sqrt (p.x^2 + p.y^2 + ...);
- * with the following exceptions:
- * 1) If the sum of squares is greater than FLT_MAX
- * then the value of the floating-point values in the
- * result vector are undefined.
- * 2) If the sum of squares is less than FLT_MIN then
- * the implementation may return back p.
- * 3) If the device is in "denorms are flushed to zero"
- * mode, individual operand elements with magnitude
- * less than sqrt(FLT_MIN) may be flushed to zero
- * before proceeding with the calculation.
- */
-float __ovld __cnfn fast_normalize(float p);
-float2 __ovld __cnfn fast_normalize(float2 p);
-float3 __ovld __cnfn fast_normalize(float3 p);
-float4 __ovld __cnfn fast_normalize(float4 p);
-#ifdef cl_khr_fp16
-half __ovld __cnfn fast_normalize(half p);
-half2 __ovld __cnfn fast_normalize(half2 p);
-half3 __ovld __cnfn fast_normalize(half3 p);
-half4 __ovld __cnfn fast_normalize(half4 p);
-#endif //cl_khr_fp16
-
-// OpenCL v1.1 s6.11.6, v1.2 s6.12.6, v2.0 s6.13.6 - Relational Functions
-
-/**
- * intn isequal (floatn x, floatn y)
- * Returns the component-wise compare of x == y.
- */
-int __ovld __cnfn isequal(float x, float y);
-int2 __ovld __cnfn isequal(float2 x, float2 y);
-int3 __ovld __cnfn isequal(float3 x, float3 y);
-int4 __ovld __cnfn isequal(float4 x, float4 y);
-int8 __ovld __cnfn isequal(float8 x, float8 y);
-int16 __ovld __cnfn isequal(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isequal(double x, double y);
-long2 __ovld __cnfn isequal(double2 x, double2 y);
-long3 __ovld __cnfn isequal(double3 x, double3 y);
-long4 __ovld __cnfn isequal(double4 x, double4 y);
-long8 __ovld __cnfn isequal(double8 x, double8 y);
-long16 __ovld __cnfn isequal(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isequal(half x, half y);
-short2 __ovld __cnfn isequal(half2 x, half2 y);
-short3 __ovld __cnfn isequal(half3 x, half3 y);
-short4 __ovld __cnfn isequal(half4 x, half4 y);
-short8 __ovld __cnfn isequal(half8 x, half8 y);
-short16 __ovld __cnfn isequal(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the component-wise compare of x != y.
- */
-int __ovld __cnfn isnotequal(float x, float y);
-int2 __ovld __cnfn isnotequal(float2 x, float2 y);
-int3 __ovld __cnfn isnotequal(float3 x, float3 y);
-int4 __ovld __cnfn isnotequal(float4 x, float4 y);
-int8 __ovld __cnfn isnotequal(float8 x, float8 y);
-int16 __ovld __cnfn isnotequal(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isnotequal(double x, double y);
-long2 __ovld __cnfn isnotequal(double2 x, double2 y);
-long3 __ovld __cnfn isnotequal(double3 x, double3 y);
-long4 __ovld __cnfn isnotequal(double4 x, double4 y);
-long8 __ovld __cnfn isnotequal(double8 x, double8 y);
-long16 __ovld __cnfn isnotequal(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isnotequal(half x, half y);
-short2 __ovld __cnfn isnotequal(half2 x, half2 y);
-short3 __ovld __cnfn isnotequal(half3 x, half3 y);
-short4 __ovld __cnfn isnotequal(half4 x, half4 y);
-short8 __ovld __cnfn isnotequal(half8 x, half8 y);
-short16 __ovld __cnfn isnotequal(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the component-wise compare of x > y.
- */
-int __ovld __cnfn isgreater(float x, float y);
-int2 __ovld __cnfn isgreater(float2 x, float2 y);
-int3 __ovld __cnfn isgreater(float3 x, float3 y);
-int4 __ovld __cnfn isgreater(float4 x, float4 y);
-int8 __ovld __cnfn isgreater(float8 x, float8 y);
-int16 __ovld __cnfn isgreater(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isgreater(double x, double y);
-long2 __ovld __cnfn isgreater(double2 x, double2 y);
-long3 __ovld __cnfn isgreater(double3 x, double3 y);
-long4 __ovld __cnfn isgreater(double4 x, double4 y);
-long8 __ovld __cnfn isgreater(double8 x, double8 y);
-long16 __ovld __cnfn isgreater(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isgreater(half x, half y);
-short2 __ovld __cnfn isgreater(half2 x, half2 y);
-short3 __ovld __cnfn isgreater(half3 x, half3 y);
-short4 __ovld __cnfn isgreater(half4 x, half4 y);
-short8 __ovld __cnfn isgreater(half8 x, half8 y);
-short16 __ovld __cnfn isgreater(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the component-wise compare of x >= y.
- */
-int __ovld __cnfn isgreaterequal(float x, float y);
-int2 __ovld __cnfn isgreaterequal(float2 x, float2 y);
-int3 __ovld __cnfn isgreaterequal(float3 x, float3 y);
-int4 __ovld __cnfn isgreaterequal(float4 x, float4 y);
-int8 __ovld __cnfn isgreaterequal(float8 x, float8 y);
-int16 __ovld __cnfn isgreaterequal(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isgreaterequal(double x, double y);
-long2 __ovld __cnfn isgreaterequal(double2 x, double2 y);
-long3 __ovld __cnfn isgreaterequal(double3 x, double3 y);
-long4 __ovld __cnfn isgreaterequal(double4 x, double4 y);
-long8 __ovld __cnfn isgreaterequal(double8 x, double8 y);
-long16 __ovld __cnfn isgreaterequal(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isgreaterequal(half x, half y);
-short2 __ovld __cnfn isgreaterequal(half2 x, half2 y);
-short3 __ovld __cnfn isgreaterequal(half3 x, half3 y);
-short4 __ovld __cnfn isgreaterequal(half4 x, half4 y);
-short8 __ovld __cnfn isgreaterequal(half8 x, half8 y);
-short16 __ovld __cnfn isgreaterequal(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the component-wise compare of x < y.
- */
-int __ovld __cnfn isless(float x, float y);
-int2 __ovld __cnfn isless(float2 x, float2 y);
-int3 __ovld __cnfn isless(float3 x, float3 y);
-int4 __ovld __cnfn isless(float4 x, float4 y);
-int8 __ovld __cnfn isless(float8 x, float8 y);
-int16 __ovld __cnfn isless(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isless(double x, double y);
-long2 __ovld __cnfn isless(double2 x, double2 y);
-long3 __ovld __cnfn isless(double3 x, double3 y);
-long4 __ovld __cnfn isless(double4 x, double4 y);
-long8 __ovld __cnfn isless(double8 x, double8 y);
-long16 __ovld __cnfn isless(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isless(half x, half y);
-short2 __ovld __cnfn isless(half2 x, half2 y);
-short3 __ovld __cnfn isless(half3 x, half3 y);
-short4 __ovld __cnfn isless(half4 x, half4 y);
-short8 __ovld __cnfn isless(half8 x, half8 y);
-short16 __ovld __cnfn isless(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the component-wise compare of x <= y.
- */
-int __ovld __cnfn islessequal(float x, float y);
-int2 __ovld __cnfn islessequal(float2 x, float2 y);
-int3 __ovld __cnfn islessequal(float3 x, float3 y);
-int4 __ovld __cnfn islessequal(float4 x, float4 y);
-int8 __ovld __cnfn islessequal(float8 x, float8 y);
-int16 __ovld __cnfn islessequal(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn islessequal(double x, double y);
-long2 __ovld __cnfn islessequal(double2 x, double2 y);
-long3 __ovld __cnfn islessequal(double3 x, double3 y);
-long4 __ovld __cnfn islessequal(double4 x, double4 y);
-long8 __ovld __cnfn islessequal(double8 x, double8 y);
-long16 __ovld __cnfn islessequal(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn islessequal(half x, half y);
-short2 __ovld __cnfn islessequal(half2 x, half2 y);
-short3 __ovld __cnfn islessequal(half3 x, half3 y);
-short4 __ovld __cnfn islessequal(half4 x, half4 y);
-short8 __ovld __cnfn islessequal(half8 x, half8 y);
-short16 __ovld __cnfn islessequal(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Returns the component-wise compare of
- * (x < y) || (x > y) .
- */
-int __ovld __cnfn islessgreater(float x, float y);
-int2 __ovld __cnfn islessgreater(float2 x, float2 y);
-int3 __ovld __cnfn islessgreater(float3 x, float3 y);
-int4 __ovld __cnfn islessgreater(float4 x, float4 y);
-int8 __ovld __cnfn islessgreater(float8 x, float8 y);
-int16 __ovld __cnfn islessgreater(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn islessgreater(double x, double y);
-long2 __ovld __cnfn islessgreater(double2 x, double2 y);
-long3 __ovld __cnfn islessgreater(double3 x, double3 y);
-long4 __ovld __cnfn islessgreater(double4 x, double4 y);
-long8 __ovld __cnfn islessgreater(double8 x, double8 y);
-long16 __ovld __cnfn islessgreater(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn islessgreater(half x, half y);
-short2 __ovld __cnfn islessgreater(half2 x, half2 y);
-short3 __ovld __cnfn islessgreater(half3 x, half3 y);
-short4 __ovld __cnfn islessgreater(half4 x, half4 y);
-short8 __ovld __cnfn islessgreater(half8 x, half8 y);
-short16 __ovld __cnfn islessgreater(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Test for finite value.
- */
-int __ovld __cnfn isfinite(float);
-int2 __ovld __cnfn isfinite(float2);
-int3 __ovld __cnfn isfinite(float3);
-int4 __ovld __cnfn isfinite(float4);
-int8 __ovld __cnfn isfinite(float8);
-int16 __ovld __cnfn isfinite(float16);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isfinite(double);
-long2 __ovld __cnfn isfinite(double2);
-long3 __ovld __cnfn isfinite(double3);
-long4 __ovld __cnfn isfinite(double4);
-long8 __ovld __cnfn isfinite(double8);
-long16 __ovld __cnfn isfinite(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isfinite(half);
-short2 __ovld __cnfn isfinite(half2);
-short3 __ovld __cnfn isfinite(half3);
-short4 __ovld __cnfn isfinite(half4);
-short8 __ovld __cnfn isfinite(half8);
-short16 __ovld __cnfn isfinite(half16);
-#endif //cl_khr_fp16
-
-/**
- * Test for infinity value (+ve or -ve) .
- */
-int __ovld __cnfn isinf(float);
-int2 __ovld __cnfn isinf(float2);
-int3 __ovld __cnfn isinf(float3);
-int4 __ovld __cnfn isinf(float4);
-int8 __ovld __cnfn isinf(float8);
-int16 __ovld __cnfn isinf(float16);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isinf(double);
-long2 __ovld __cnfn isinf(double2);
-long3 __ovld __cnfn isinf(double3);
-long4 __ovld __cnfn isinf(double4);
-long8 __ovld __cnfn isinf(double8);
-long16 __ovld __cnfn isinf(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isinf(half);
-short2 __ovld __cnfn isinf(half2);
-short3 __ovld __cnfn isinf(half3);
-short4 __ovld __cnfn isinf(half4);
-short8 __ovld __cnfn isinf(half8);
-short16 __ovld __cnfn isinf(half16);
-#endif //cl_khr_fp16
-
-/**
- * Test for a NaN.
- */
-int __ovld __cnfn isnan(float);
-int2 __ovld __cnfn isnan(float2);
-int3 __ovld __cnfn isnan(float3);
-int4 __ovld __cnfn isnan(float4);
-int8 __ovld __cnfn isnan(float8);
-int16 __ovld __cnfn isnan(float16);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isnan(double);
-long2 __ovld __cnfn isnan(double2);
-long3 __ovld __cnfn isnan(double3);
-long4 __ovld __cnfn isnan(double4);
-long8 __ovld __cnfn isnan(double8);
-long16 __ovld __cnfn isnan(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isnan(half);
-short2 __ovld __cnfn isnan(half2);
-short3 __ovld __cnfn isnan(half3);
-short4 __ovld __cnfn isnan(half4);
-short8 __ovld __cnfn isnan(half8);
-short16 __ovld __cnfn isnan(half16);
-#endif //cl_khr_fp16
-
-/**
- * Test for a normal value.
- */
-int __ovld __cnfn isnormal(float);
-int2 __ovld __cnfn isnormal(float2);
-int3 __ovld __cnfn isnormal(float3);
-int4 __ovld __cnfn isnormal(float4);
-int8 __ovld __cnfn isnormal(float8);
-int16 __ovld __cnfn isnormal(float16);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isnormal(double);
-long2 __ovld __cnfn isnormal(double2);
-long3 __ovld __cnfn isnormal(double3);
-long4 __ovld __cnfn isnormal(double4);
-long8 __ovld __cnfn isnormal(double8);
-long16 __ovld __cnfn isnormal(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isnormal(half);
-short2 __ovld __cnfn isnormal(half2);
-short3 __ovld __cnfn isnormal(half3);
-short4 __ovld __cnfn isnormal(half4);
-short8 __ovld __cnfn isnormal(half8);
-short16 __ovld __cnfn isnormal(half16);
-#endif //cl_khr_fp16
-
-/**
- * Test if arguments are ordered. isordered() takes
- * arguments x and y, and returns the result
- * isequal(x, x) && isequal(y, y).
- */
-int __ovld __cnfn isordered(float x, float y);
-int2 __ovld __cnfn isordered(float2 x, float2 y);
-int3 __ovld __cnfn isordered(float3 x, float3 y);
-int4 __ovld __cnfn isordered(float4 x, float4 y);
-int8 __ovld __cnfn isordered(float8 x, float8 y);
-int16 __ovld __cnfn isordered(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isordered(double x, double y);
-long2 __ovld __cnfn isordered(double2 x, double2 y);
-long3 __ovld __cnfn isordered(double3 x, double3 y);
-long4 __ovld __cnfn isordered(double4 x, double4 y);
-long8 __ovld __cnfn isordered(double8 x, double8 y);
-long16 __ovld __cnfn isordered(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isordered(half x, half y);
-short2 __ovld __cnfn isordered(half2 x, half2 y);
-short3 __ovld __cnfn isordered(half3 x, half3 y);
-short4 __ovld __cnfn isordered(half4 x, half4 y);
-short8 __ovld __cnfn isordered(half8 x, half8 y);
-short16 __ovld __cnfn isordered(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Test if arguments are unordered. isunordered()
- * takes arguments x and y, returning non-zero if x or y
- * is NaN, and zero otherwise.
- */
-int __ovld __cnfn isunordered(float x, float y);
-int2 __ovld __cnfn isunordered(float2 x, float2 y);
-int3 __ovld __cnfn isunordered(float3 x, float3 y);
-int4 __ovld __cnfn isunordered(float4 x, float4 y);
-int8 __ovld __cnfn isunordered(float8 x, float8 y);
-int16 __ovld __cnfn isunordered(float16 x, float16 y);
-#ifdef cl_khr_fp64
-int __ovld __cnfn isunordered(double x, double y);
-long2 __ovld __cnfn isunordered(double2 x, double2 y);
-long3 __ovld __cnfn isunordered(double3 x, double3 y);
-long4 __ovld __cnfn isunordered(double4 x, double4 y);
-long8 __ovld __cnfn isunordered(double8 x, double8 y);
-long16 __ovld __cnfn isunordered(double16 x, double16 y);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn isunordered(half x, half y);
-short2 __ovld __cnfn isunordered(half2 x, half2 y);
-short3 __ovld __cnfn isunordered(half3 x, half3 y);
-short4 __ovld __cnfn isunordered(half4 x, half4 y);
-short8 __ovld __cnfn isunordered(half8 x, half8 y);
-short16 __ovld __cnfn isunordered(half16 x, half16 y);
-#endif //cl_khr_fp16
-
-/**
- * Test for sign bit. The scalar version of the function
- * returns a 1 if the sign bit in the float is set else returns
- * 0. The vector version of the function returns the
- * following for each component in floatn: a -1 if the
- * sign bit in the float is set else returns 0.
- */
-int __ovld __cnfn signbit(float);
-int2 __ovld __cnfn signbit(float2);
-int3 __ovld __cnfn signbit(float3);
-int4 __ovld __cnfn signbit(float4);
-int8 __ovld __cnfn signbit(float8);
-int16 __ovld __cnfn signbit(float16);
-#ifdef cl_khr_fp64
-int __ovld __cnfn signbit(double);
-long2 __ovld __cnfn signbit(double2);
-long3 __ovld __cnfn signbit(double3);
-long4 __ovld __cnfn signbit(double4);
-long8 __ovld __cnfn signbit(double8);
-long16 __ovld __cnfn signbit(double16);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-int __ovld __cnfn signbit(half);
-short2 __ovld __cnfn signbit(half2);
-short3 __ovld __cnfn signbit(half3);
-short4 __ovld __cnfn signbit(half4);
-short8 __ovld __cnfn signbit(half8);
-short16 __ovld __cnfn signbit(half16);
-#endif //cl_khr_fp16
-
-/**
- * Returns 1 if the most significant bit in any component
- * of x is set; otherwise returns 0.
- */
-int __ovld __cnfn any(char x);
-int __ovld __cnfn any(char2 x);
-int __ovld __cnfn any(char3 x);
-int __ovld __cnfn any(char4 x);
-int __ovld __cnfn any(char8 x);
-int __ovld __cnfn any(char16 x);
-int __ovld __cnfn any(short x);
-int __ovld __cnfn any(short2 x);
-int __ovld __cnfn any(short3 x);
-int __ovld __cnfn any(short4 x);
-int __ovld __cnfn any(short8 x);
-int __ovld __cnfn any(short16 x);
-int __ovld __cnfn any(int x);
-int __ovld __cnfn any(int2 x);
-int __ovld __cnfn any(int3 x);
-int __ovld __cnfn any(int4 x);
-int __ovld __cnfn any(int8 x);
-int __ovld __cnfn any(int16 x);
-int __ovld __cnfn any(long x);
-int __ovld __cnfn any(long2 x);
-int __ovld __cnfn any(long3 x);
-int __ovld __cnfn any(long4 x);
-int __ovld __cnfn any(long8 x);
-int __ovld __cnfn any(long16 x);
-
-/**
- * Returns 1 if the most significant bit in all components
- * of x is set; otherwise returns 0.
- */
-int __ovld __cnfn all(char x);
-int __ovld __cnfn all(char2 x);
-int __ovld __cnfn all(char3 x);
-int __ovld __cnfn all(char4 x);
-int __ovld __cnfn all(char8 x);
-int __ovld __cnfn all(char16 x);
-int __ovld __cnfn all(short x);
-int __ovld __cnfn all(short2 x);
-int __ovld __cnfn all(short3 x);
-int __ovld __cnfn all(short4 x);
-int __ovld __cnfn all(short8 x);
-int __ovld __cnfn all(short16 x);
-int __ovld __cnfn all(int x);
-int __ovld __cnfn all(int2 x);
-int __ovld __cnfn all(int3 x);
-int __ovld __cnfn all(int4 x);
-int __ovld __cnfn all(int8 x);
-int __ovld __cnfn all(int16 x);
-int __ovld __cnfn all(long x);
-int __ovld __cnfn all(long2 x);
-int __ovld __cnfn all(long3 x);
-int __ovld __cnfn all(long4 x);
-int __ovld __cnfn all(long8 x);
-int __ovld __cnfn all(long16 x);
-
-/**
- * Each bit of the result is the corresponding bit of a if
- * the corresponding bit of c is 0. Otherwise it is the
- * corresponding bit of b.
- */
-char __ovld __cnfn bitselect(char a, char b, char c);
-uchar __ovld __cnfn bitselect(uchar a, uchar b, uchar c);
-char2 __ovld __cnfn bitselect(char2 a, char2 b, char2 c);
-uchar2 __ovld __cnfn bitselect(uchar2 a, uchar2 b, uchar2 c);
-char3 __ovld __cnfn bitselect(char3 a, char3 b, char3 c);
-uchar3 __ovld __cnfn bitselect(uchar3 a, uchar3 b, uchar3 c);
-char4 __ovld __cnfn bitselect(char4 a, char4 b, char4 c);
-uchar4 __ovld __cnfn bitselect(uchar4 a, uchar4 b, uchar4 c);
-char8 __ovld __cnfn bitselect(char8 a, char8 b, char8 c);
-uchar8 __ovld __cnfn bitselect(uchar8 a, uchar8 b, uchar8 c);
-char16 __ovld __cnfn bitselect(char16 a, char16 b, char16 c);
-uchar16 __ovld __cnfn bitselect(uchar16 a, uchar16 b, uchar16 c);
-short __ovld __cnfn bitselect(short a, short b, short c);
-ushort __ovld __cnfn bitselect(ushort a, ushort b, ushort c);
-short2 __ovld __cnfn bitselect(short2 a, short2 b, short2 c);
-ushort2 __ovld __cnfn bitselect(ushort2 a, ushort2 b, ushort2 c);
-short3 __ovld __cnfn bitselect(short3 a, short3 b, short3 c);
-ushort3 __ovld __cnfn bitselect(ushort3 a, ushort3 b, ushort3 c);
-short4 __ovld __cnfn bitselect(short4 a, short4 b, short4 c);
-ushort4 __ovld __cnfn bitselect(ushort4 a, ushort4 b, ushort4 c);
-short8 __ovld __cnfn bitselect(short8 a, short8 b, short8 c);
-ushort8 __ovld __cnfn bitselect(ushort8 a, ushort8 b, ushort8 c);
-short16 __ovld __cnfn bitselect(short16 a, short16 b, short16 c);
-ushort16 __ovld __cnfn bitselect(ushort16 a, ushort16 b, ushort16 c);
-int __ovld __cnfn bitselect(int a, int b, int c);
-uint __ovld __cnfn bitselect(uint a, uint b, uint c);
-int2 __ovld __cnfn bitselect(int2 a, int2 b, int2 c);
-uint2 __ovld __cnfn bitselect(uint2 a, uint2 b, uint2 c);
-int3 __ovld __cnfn bitselect(int3 a, int3 b, int3 c);
-uint3 __ovld __cnfn bitselect(uint3 a, uint3 b, uint3 c);
-int4 __ovld __cnfn bitselect(int4 a, int4 b, int4 c);
-uint4 __ovld __cnfn bitselect(uint4 a, uint4 b, uint4 c);
-int8 __ovld __cnfn bitselect(int8 a, int8 b, int8 c);
-uint8 __ovld __cnfn bitselect(uint8 a, uint8 b, uint8 c);
-int16 __ovld __cnfn bitselect(int16 a, int16 b, int16 c);
-uint16 __ovld __cnfn bitselect(uint16 a, uint16 b, uint16 c);
-long __ovld __cnfn bitselect(long a, long b, long c);
-ulong __ovld __cnfn bitselect(ulong a, ulong b, ulong c);
-long2 __ovld __cnfn bitselect(long2 a, long2 b, long2 c);
-ulong2 __ovld __cnfn bitselect(ulong2 a, ulong2 b, ulong2 c);
-long3 __ovld __cnfn bitselect(long3 a, long3 b, long3 c);
-ulong3 __ovld __cnfn bitselect(ulong3 a, ulong3 b, ulong3 c);
-long4 __ovld __cnfn bitselect(long4 a, long4 b, long4 c);
-ulong4 __ovld __cnfn bitselect(ulong4 a, ulong4 b, ulong4 c);
-long8 __ovld __cnfn bitselect(long8 a, long8 b, long8 c);
-ulong8 __ovld __cnfn bitselect(ulong8 a, ulong8 b, ulong8 c);
-long16 __ovld __cnfn bitselect(long16 a, long16 b, long16 c);
-ulong16 __ovld __cnfn bitselect(ulong16 a, ulong16 b, ulong16 c);
-float __ovld __cnfn bitselect(float a, float b, float c);
-float2 __ovld __cnfn bitselect(float2 a, float2 b, float2 c);
-float3 __ovld __cnfn bitselect(float3 a, float3 b, float3 c);
-float4 __ovld __cnfn bitselect(float4 a, float4 b, float4 c);
-float8 __ovld __cnfn bitselect(float8 a, float8 b, float8 c);
-float16 __ovld __cnfn bitselect(float16 a, float16 b, float16 c);
-#ifdef cl_khr_fp64
-double __ovld __cnfn bitselect(double a, double b, double c);
-double2 __ovld __cnfn bitselect(double2 a, double2 b, double2 c);
-double3 __ovld __cnfn bitselect(double3 a, double3 b, double3 c);
-double4 __ovld __cnfn bitselect(double4 a, double4 b, double4 c);
-double8 __ovld __cnfn bitselect(double8 a, double8 b, double8 c);
-double16 __ovld __cnfn bitselect(double16 a, double16 b, double16 c);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn bitselect(half a, half b, half c);
-half2 __ovld __cnfn bitselect(half2 a, half2 b, half2 c);
-half3 __ovld __cnfn bitselect(half3 a, half3 b, half3 c);
-half4 __ovld __cnfn bitselect(half4 a, half4 b, half4 c);
-half8 __ovld __cnfn bitselect(half8 a, half8 b, half8 c);
-half16 __ovld __cnfn bitselect(half16 a, half16 b, half16 c);
-#endif //cl_khr_fp16
-
-/**
- * For each component of a vector type,
- * result[i] = if MSB of c[i] is set ? b[i] : a[i].
- * For a scalar type, result = c ? b : a.
- * b and a must have the same type.
- * c must have the same number of elements and bits as a.
- */
-char __ovld __cnfn select(char a, char b, char c);
-uchar __ovld __cnfn select(uchar a, uchar b, char c);
-char2 __ovld __cnfn select(char2 a, char2 b, char2 c);
-uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, char2 c);
-char3 __ovld __cnfn select(char3 a, char3 b, char3 c);
-uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, char3 c);
-char4 __ovld __cnfn select(char4 a, char4 b, char4 c);
-uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, char4 c);
-char8 __ovld __cnfn select(char8 a, char8 b, char8 c);
-uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, char8 c);
-char16 __ovld __cnfn select(char16 a, char16 b, char16 c);
-uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, char16 c);
-
-short __ovld __cnfn select(short a, short b, short c);
-ushort __ovld __cnfn select(ushort a, ushort b, short c);
-short2 __ovld __cnfn select(short2 a, short2 b, short2 c);
-ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, short2 c);
-short3 __ovld __cnfn select(short3 a, short3 b, short3 c);
-ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, short3 c);
-short4 __ovld __cnfn select(short4 a, short4 b, short4 c);
-ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, short4 c);
-short8 __ovld __cnfn select(short8 a, short8 b, short8 c);
-ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, short8 c);
-short16 __ovld __cnfn select(short16 a, short16 b, short16 c);
-ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, short16 c);
-
-int __ovld __cnfn select(int a, int b, int c);
-uint __ovld __cnfn select(uint a, uint b, int c);
-int2 __ovld __cnfn select(int2 a, int2 b, int2 c);
-uint2 __ovld __cnfn select(uint2 a, uint2 b, int2 c);
-int3 __ovld __cnfn select(int3 a, int3 b, int3 c);
-uint3 __ovld __cnfn select(uint3 a, uint3 b, int3 c);
-int4 __ovld __cnfn select(int4 a, int4 b, int4 c);
-uint4 __ovld __cnfn select(uint4 a, uint4 b, int4 c);
-int8 __ovld __cnfn select(int8 a, int8 b, int8 c);
-uint8 __ovld __cnfn select(uint8 a, uint8 b, int8 c);
-int16 __ovld __cnfn select(int16 a, int16 b, int16 c);
-uint16 __ovld __cnfn select(uint16 a, uint16 b, int16 c);
-float __ovld __cnfn select(float a, float b, int c);
-float2 __ovld __cnfn select(float2 a, float2 b, int2 c);
-float3 __ovld __cnfn select(float3 a, float3 b, int3 c);
-float4 __ovld __cnfn select(float4 a, float4 b, int4 c);
-float8 __ovld __cnfn select(float8 a, float8 b, int8 c);
-float16 __ovld __cnfn select(float16 a, float16 b, int16 c);
-
-long __ovld __cnfn select(long a, long b, long c);
-ulong __ovld __cnfn select(ulong a, ulong b, long c);
-long2 __ovld __cnfn select(long2 a, long2 b, long2 c);
-ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, long2 c);
-long3 __ovld __cnfn select(long3 a, long3 b, long3 c);
-ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, long3 c);
-long4 __ovld __cnfn select(long4 a, long4 b, long4 c);
-ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, long4 c);
-long8 __ovld __cnfn select(long8 a, long8 b, long8 c);
-ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, long8 c);
-long16 __ovld __cnfn select(long16 a, long16 b, long16 c);
-ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, long16 c);
-
-char __ovld __cnfn select(char a, char b, uchar c);
-uchar __ovld __cnfn select(uchar a, uchar b, uchar c);
-char2 __ovld __cnfn select(char2 a, char2 b, uchar2 c);
-uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, uchar2 c);
-char3 __ovld __cnfn select(char3 a, char3 b, uchar3 c);
-uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, uchar3 c);
-char4 __ovld __cnfn select(char4 a, char4 b, uchar4 c);
-uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, uchar4 c);
-char8 __ovld __cnfn select(char8 a, char8 b, uchar8 c);
-uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, uchar8 c);
-char16 __ovld __cnfn select(char16 a, char16 b, uchar16 c);
-uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, uchar16 c);
-
-short __ovld __cnfn select(short a, short b, ushort c);
-ushort __ovld __cnfn select(ushort a, ushort b, ushort c);
-short2 __ovld __cnfn select(short2 a, short2 b, ushort2 c);
-ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, ushort2 c);
-short3 __ovld __cnfn select(short3 a, short3 b, ushort3 c);
-ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, ushort3 c);
-short4 __ovld __cnfn select(short4 a, short4 b, ushort4 c);
-ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, ushort4 c);
-short8 __ovld __cnfn select(short8 a, short8 b, ushort8 c);
-ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, ushort8 c);
-short16 __ovld __cnfn select(short16 a, short16 b, ushort16 c);
-ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, ushort16 c);
-
-int __ovld __cnfn select(int a, int b, uint c);
-uint __ovld __cnfn select(uint a, uint b, uint c);
-int2 __ovld __cnfn select(int2 a, int2 b, uint2 c);
-uint2 __ovld __cnfn select(uint2 a, uint2 b, uint2 c);
-int3 __ovld __cnfn select(int3 a, int3 b, uint3 c);
-uint3 __ovld __cnfn select(uint3 a, uint3 b, uint3 c);
-int4 __ovld __cnfn select(int4 a, int4 b, uint4 c);
-uint4 __ovld __cnfn select(uint4 a, uint4 b, uint4 c);
-int8 __ovld __cnfn select(int8 a, int8 b, uint8 c);
-uint8 __ovld __cnfn select(uint8 a, uint8 b, uint8 c);
-int16 __ovld __cnfn select(int16 a, int16 b, uint16 c);
-uint16 __ovld __cnfn select(uint16 a, uint16 b, uint16 c);
-float __ovld __cnfn select(float a, float b, uint c);
-float2 __ovld __cnfn select(float2 a, float2 b, uint2 c);
-float3 __ovld __cnfn select(float3 a, float3 b, uint3 c);
-float4 __ovld __cnfn select(float4 a, float4 b, uint4 c);
-float8 __ovld __cnfn select(float8 a, float8 b, uint8 c);
-float16 __ovld __cnfn select(float16 a, float16 b, uint16 c);
-
-long __ovld __cnfn select(long a, long b, ulong c);
-ulong __ovld __cnfn select(ulong a, ulong b, ulong c);
-long2 __ovld __cnfn select(long2 a, long2 b, ulong2 c);
-ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, ulong2 c);
-long3 __ovld __cnfn select(long3 a, long3 b, ulong3 c);
-ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, ulong3 c);
-long4 __ovld __cnfn select(long4 a, long4 b, ulong4 c);
-ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, ulong4 c);
-long8 __ovld __cnfn select(long8 a, long8 b, ulong8 c);
-ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, ulong8 c);
-long16 __ovld __cnfn select(long16 a, long16 b, ulong16 c);
-ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, ulong16 c);
-
-#ifdef cl_khr_fp64
-double __ovld __cnfn select(double a, double b, long c);
-double2 __ovld __cnfn select(double2 a, double2 b, long2 c);
-double3 __ovld __cnfn select(double3 a, double3 b, long3 c);
-double4 __ovld __cnfn select(double4 a, double4 b, long4 c);
-double8 __ovld __cnfn select(double8 a, double8 b, long8 c);
-double16 __ovld __cnfn select(double16 a, double16 b, long16 c);
-double __ovld __cnfn select(double a, double b, ulong c);
-double2 __ovld __cnfn select(double2 a, double2 b, ulong2 c);
-double3 __ovld __cnfn select(double3 a, double3 b, ulong3 c);
-double4 __ovld __cnfn select(double4 a, double4 b, ulong4 c);
-double8 __ovld __cnfn select(double8 a, double8 b, ulong8 c);
-double16 __ovld __cnfn select(double16 a, double16 b, ulong16 c);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-half __ovld __cnfn select(half a, half b, short c);
-half2 __ovld __cnfn select(half2 a, half2 b, short2 c);
-half3 __ovld __cnfn select(half3 a, half3 b, short3 c);
-half4 __ovld __cnfn select(half4 a, half4 b, short4 c);
-half8 __ovld __cnfn select(half8 a, half8 b, short8 c);
-half16 __ovld __cnfn select(half16 a, half16 b, short16 c);
-half __ovld __cnfn select(half a, half b, ushort c);
-half2 __ovld __cnfn select(half2 a, half2 b, ushort2 c);
-half3 __ovld __cnfn select(half3 a, half3 b, ushort3 c);
-half4 __ovld __cnfn select(half4 a, half4 b, ushort4 c);
-half8 __ovld __cnfn select(half8 a, half8 b, ushort8 c);
-half16 __ovld __cnfn select(half16 a, half16 b, ushort16 c);
-#endif //cl_khr_fp16
-
-// OpenCL v1.1 s6.11.7, v1.2 s6.12.7, v2.0 s6.13.7 - Vector Data Load and Store Functions
-// OpenCL extensions v1.1 s9.6.6, v1.2 s9.5.6, v2.0 s9.4.6 - Vector Data Load and Store Functions for Half Type
-/**
- * Use generic type gentype to indicate the built-in data types
- * char, uchar, short, ushort, int, uint, long, ulong, float,
- * double or half.
- *
- * vloadn return sizeof (gentypen) bytes of data read from address (p + (offset * n)).
- *
- * vstoren write sizeof (gentypen) bytes given by data to address (p + (offset * n)).
- *
- * The address computed as (p + (offset * n)) must be
- * 8-bit aligned if gentype is char, uchar;
- * 16-bit aligned if gentype is short, ushort, half;
- * 32-bit aligned if gentype is int, uint, float;
- * 64-bit aligned if gentype is long, ulong, double.
- */
-
-char2 __ovld vload2(size_t offset, const __constant char *p);
-uchar2 __ovld vload2(size_t offset, const __constant uchar *p);
-short2 __ovld vload2(size_t offset, const __constant short *p);
-ushort2 __ovld vload2(size_t offset, const __constant ushort *p);
-int2 __ovld vload2(size_t offset, const __constant int *p);
-uint2 __ovld vload2(size_t offset, const __constant uint *p);
-long2 __ovld vload2(size_t offset, const __constant long *p);
-ulong2 __ovld vload2(size_t offset, const __constant ulong *p);
-float2 __ovld vload2(size_t offset, const __constant float *p);
-char3 __ovld vload3(size_t offset, const __constant char *p);
-uchar3 __ovld vload3(size_t offset, const __constant uchar *p);
-short3 __ovld vload3(size_t offset, const __constant short *p);
-ushort3 __ovld vload3(size_t offset, const __constant ushort *p);
-int3 __ovld vload3(size_t offset, const __constant int *p);
-uint3 __ovld vload3(size_t offset, const __constant uint *p);
-long3 __ovld vload3(size_t offset, const __constant long *p);
-ulong3 __ovld vload3(size_t offset, const __constant ulong *p);
-float3 __ovld vload3(size_t offset, const __constant float *p);
-char4 __ovld vload4(size_t offset, const __constant char *p);
-uchar4 __ovld vload4(size_t offset, const __constant uchar *p);
-short4 __ovld vload4(size_t offset, const __constant short *p);
-ushort4 __ovld vload4(size_t offset, const __constant ushort *p);
-int4 __ovld vload4(size_t offset, const __constant int *p);
-uint4 __ovld vload4(size_t offset, const __constant uint *p);
-long4 __ovld vload4(size_t offset, const __constant long *p);
-ulong4 __ovld vload4(size_t offset, const __constant ulong *p);
-float4 __ovld vload4(size_t offset, const __constant float *p);
-char8 __ovld vload8(size_t offset, const __constant char *p);
-uchar8 __ovld vload8(size_t offset, const __constant uchar *p);
-short8 __ovld vload8(size_t offset, const __constant short *p);
-ushort8 __ovld vload8(size_t offset, const __constant ushort *p);
-int8 __ovld vload8(size_t offset, const __constant int *p);
-uint8 __ovld vload8(size_t offset, const __constant uint *p);
-long8 __ovld vload8(size_t offset, const __constant long *p);
-ulong8 __ovld vload8(size_t offset, const __constant ulong *p);
-float8 __ovld vload8(size_t offset, const __constant float *p);
-char16 __ovld vload16(size_t offset, const __constant char *p);
-uchar16 __ovld vload16(size_t offset, const __constant uchar *p);
-short16 __ovld vload16(size_t offset, const __constant short *p);
-ushort16 __ovld vload16(size_t offset, const __constant ushort *p);
-int16 __ovld vload16(size_t offset, const __constant int *p);
-uint16 __ovld vload16(size_t offset, const __constant uint *p);
-long16 __ovld vload16(size_t offset, const __constant long *p);
-ulong16 __ovld vload16(size_t offset, const __constant ulong *p);
-float16 __ovld vload16(size_t offset, const __constant float *p);
-#ifdef cl_khr_fp64
-double2 __ovld vload2(size_t offset, const __constant double *p);
-double3 __ovld vload3(size_t offset, const __constant double *p);
-double4 __ovld vload4(size_t offset, const __constant double *p);
-double8 __ovld vload8(size_t offset, const __constant double *p);
-double16 __ovld vload16(size_t offset, const __constant double *p);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half __ovld vload(size_t offset, const __constant half *p);
-half2 __ovld vload2(size_t offset, const __constant half *p);
-half3 __ovld vload3(size_t offset, const __constant half *p);
-half4 __ovld vload4(size_t offset, const __constant half *p);
-half8 __ovld vload8(size_t offset, const __constant half *p);
-half16 __ovld vload16(size_t offset, const __constant half *p);
-#endif //cl_khr_fp16
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-char2 __ovld vload2(size_t offset, const char *p);
-uchar2 __ovld vload2(size_t offset, const uchar *p);
-short2 __ovld vload2(size_t offset, const short *p);
-ushort2 __ovld vload2(size_t offset, const ushort *p);
-int2 __ovld vload2(size_t offset, const int *p);
-uint2 __ovld vload2(size_t offset, const uint *p);
-long2 __ovld vload2(size_t offset, const long *p);
-ulong2 __ovld vload2(size_t offset, const ulong *p);
-float2 __ovld vload2(size_t offset, const float *p);
-char3 __ovld vload3(size_t offset, const char *p);
-uchar3 __ovld vload3(size_t offset, const uchar *p);
-short3 __ovld vload3(size_t offset, const short *p);
-ushort3 __ovld vload3(size_t offset, const ushort *p);
-int3 __ovld vload3(size_t offset, const int *p);
-uint3 __ovld vload3(size_t offset, const uint *p);
-long3 __ovld vload3(size_t offset, const long *p);
-ulong3 __ovld vload3(size_t offset, const ulong *p);
-float3 __ovld vload3(size_t offset, const float *p);
-char4 __ovld vload4(size_t offset, const char *p);
-uchar4 __ovld vload4(size_t offset, const uchar *p);
-short4 __ovld vload4(size_t offset, const short *p);
-ushort4 __ovld vload4(size_t offset, const ushort *p);
-int4 __ovld vload4(size_t offset, const int *p);
-uint4 __ovld vload4(size_t offset, const uint *p);
-long4 __ovld vload4(size_t offset, const long *p);
-ulong4 __ovld vload4(size_t offset, const ulong *p);
-float4 __ovld vload4(size_t offset, const float *p);
-char8 __ovld vload8(size_t offset, const char *p);
-uchar8 __ovld vload8(size_t offset, const uchar *p);
-short8 __ovld vload8(size_t offset, const short *p);
-ushort8 __ovld vload8(size_t offset, const ushort *p);
-int8 __ovld vload8(size_t offset, const int *p);
-uint8 __ovld vload8(size_t offset, const uint *p);
-long8 __ovld vload8(size_t offset, const long *p);
-ulong8 __ovld vload8(size_t offset, const ulong *p);
-float8 __ovld vload8(size_t offset, const float *p);
-char16 __ovld vload16(size_t offset, const char *p);
-uchar16 __ovld vload16(size_t offset, const uchar *p);
-short16 __ovld vload16(size_t offset, const short *p);
-ushort16 __ovld vload16(size_t offset, const ushort *p);
-int16 __ovld vload16(size_t offset, const int *p);
-uint16 __ovld vload16(size_t offset, const uint *p);
-long16 __ovld vload16(size_t offset, const long *p);
-ulong16 __ovld vload16(size_t offset, const ulong *p);
-float16 __ovld vload16(size_t offset, const float *p);
-
-#ifdef cl_khr_fp64
-double2 __ovld vload2(size_t offset, const double *p);
-double3 __ovld vload3(size_t offset, const double *p);
-double4 __ovld vload4(size_t offset, const double *p);
-double8 __ovld vload8(size_t offset, const double *p);
-double16 __ovld vload16(size_t offset, const double *p);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half __ovld vload(size_t offset, const half *p);
-half2 __ovld vload2(size_t offset, const half *p);
-half3 __ovld vload3(size_t offset, const half *p);
-half4 __ovld vload4(size_t offset, const half *p);
-half8 __ovld vload8(size_t offset, const half *p);
-half16 __ovld vload16(size_t offset, const half *p);
-#endif //cl_khr_fp16
-#else
-char2 __ovld vload2(size_t offset, const __global char *p);
-uchar2 __ovld vload2(size_t offset, const __global uchar *p);
-short2 __ovld vload2(size_t offset, const __global short *p);
-ushort2 __ovld vload2(size_t offset, const __global ushort *p);
-int2 __ovld vload2(size_t offset, const __global int *p);
-uint2 __ovld vload2(size_t offset, const __global uint *p);
-long2 __ovld vload2(size_t offset, const __global long *p);
-ulong2 __ovld vload2(size_t offset, const __global ulong *p);
-float2 __ovld vload2(size_t offset, const __global float *p);
-char3 __ovld vload3(size_t offset, const __global char *p);
-uchar3 __ovld vload3(size_t offset, const __global uchar *p);
-short3 __ovld vload3(size_t offset, const __global short *p);
-ushort3 __ovld vload3(size_t offset, const __global ushort *p);
-int3 __ovld vload3(size_t offset, const __global int *p);
-uint3 __ovld vload3(size_t offset, const __global uint *p);
-long3 __ovld vload3(size_t offset, const __global long *p);
-ulong3 __ovld vload3(size_t offset, const __global ulong *p);
-float3 __ovld vload3(size_t offset, const __global float *p);
-char4 __ovld vload4(size_t offset, const __global char *p);
-uchar4 __ovld vload4(size_t offset, const __global uchar *p);
-short4 __ovld vload4(size_t offset, const __global short *p);
-ushort4 __ovld vload4(size_t offset, const __global ushort *p);
-int4 __ovld vload4(size_t offset, const __global int *p);
-uint4 __ovld vload4(size_t offset, const __global uint *p);
-long4 __ovld vload4(size_t offset, const __global long *p);
-ulong4 __ovld vload4(size_t offset, const __global ulong *p);
-float4 __ovld vload4(size_t offset, const __global float *p);
-char8 __ovld vload8(size_t offset, const __global char *p);
-uchar8 __ovld vload8(size_t offset, const __global uchar *p);
-short8 __ovld vload8(size_t offset, const __global short *p);
-ushort8 __ovld vload8(size_t offset, const __global ushort *p);
-int8 __ovld vload8(size_t offset, const __global int *p);
-uint8 __ovld vload8(size_t offset, const __global uint *p);
-long8 __ovld vload8(size_t offset, const __global long *p);
-ulong8 __ovld vload8(size_t offset, const __global ulong *p);
-float8 __ovld vload8(size_t offset, const __global float *p);
-char16 __ovld vload16(size_t offset, const __global char *p);
-uchar16 __ovld vload16(size_t offset, const __global uchar *p);
-short16 __ovld vload16(size_t offset, const __global short *p);
-ushort16 __ovld vload16(size_t offset, const __global ushort *p);
-int16 __ovld vload16(size_t offset, const __global int *p);
-uint16 __ovld vload16(size_t offset, const __global uint *p);
-long16 __ovld vload16(size_t offset, const __global long *p);
-ulong16 __ovld vload16(size_t offset, const __global ulong *p);
-float16 __ovld vload16(size_t offset, const __global float *p);
-char2 __ovld vload2(size_t offset, const __local char *p);
-uchar2 __ovld vload2(size_t offset, const __local uchar *p);
-short2 __ovld vload2(size_t offset, const __local short *p);
-ushort2 __ovld vload2(size_t offset, const __local ushort *p);
-int2 __ovld vload2(size_t offset, const __local int *p);
-uint2 __ovld vload2(size_t offset, const __local uint *p);
-long2 __ovld vload2(size_t offset, const __local long *p);
-ulong2 __ovld vload2(size_t offset, const __local ulong *p);
-float2 __ovld vload2(size_t offset, const __local float *p);
-char3 __ovld vload3(size_t offset, const __local char *p);
-uchar3 __ovld vload3(size_t offset, const __local uchar *p);
-short3 __ovld vload3(size_t offset, const __local short *p);
-ushort3 __ovld vload3(size_t offset, const __local ushort *p);
-int3 __ovld vload3(size_t offset, const __local int *p);
-uint3 __ovld vload3(size_t offset, const __local uint *p);
-long3 __ovld vload3(size_t offset, const __local long *p);
-ulong3 __ovld vload3(size_t offset, const __local ulong *p);
-float3 __ovld vload3(size_t offset, const __local float *p);
-char4 __ovld vload4(size_t offset, const __local char *p);
-uchar4 __ovld vload4(size_t offset, const __local uchar *p);
-short4 __ovld vload4(size_t offset, const __local short *p);
-ushort4 __ovld vload4(size_t offset, const __local ushort *p);
-int4 __ovld vload4(size_t offset, const __local int *p);
-uint4 __ovld vload4(size_t offset, const __local uint *p);
-long4 __ovld vload4(size_t offset, const __local long *p);
-ulong4 __ovld vload4(size_t offset, const __local ulong *p);
-float4 __ovld vload4(size_t offset, const __local float *p);
-char8 __ovld vload8(size_t offset, const __local char *p);
-uchar8 __ovld vload8(size_t offset, const __local uchar *p);
-short8 __ovld vload8(size_t offset, const __local short *p);
-ushort8 __ovld vload8(size_t offset, const __local ushort *p);
-int8 __ovld vload8(size_t offset, const __local int *p);
-uint8 __ovld vload8(size_t offset, const __local uint *p);
-long8 __ovld vload8(size_t offset, const __local long *p);
-ulong8 __ovld vload8(size_t offset, const __local ulong *p);
-float8 __ovld vload8(size_t offset, const __local float *p);
-char16 __ovld vload16(size_t offset, const __local char *p);
-uchar16 __ovld vload16(size_t offset, const __local uchar *p);
-short16 __ovld vload16(size_t offset, const __local short *p);
-ushort16 __ovld vload16(size_t offset, const __local ushort *p);
-int16 __ovld vload16(size_t offset, const __local int *p);
-uint16 __ovld vload16(size_t offset, const __local uint *p);
-long16 __ovld vload16(size_t offset, const __local long *p);
-ulong16 __ovld vload16(size_t offset, const __local ulong *p);
-float16 __ovld vload16(size_t offset, const __local float *p);
-char2 __ovld vload2(size_t offset, const __private char *p);
-uchar2 __ovld vload2(size_t offset, const __private uchar *p);
-short2 __ovld vload2(size_t offset, const __private short *p);
-ushort2 __ovld vload2(size_t offset, const __private ushort *p);
-int2 __ovld vload2(size_t offset, const __private int *p);
-uint2 __ovld vload2(size_t offset, const __private uint *p);
-long2 __ovld vload2(size_t offset, const __private long *p);
-ulong2 __ovld vload2(size_t offset, const __private ulong *p);
-float2 __ovld vload2(size_t offset, const __private float *p);
-char3 __ovld vload3(size_t offset, const __private char *p);
-uchar3 __ovld vload3(size_t offset, const __private uchar *p);
-short3 __ovld vload3(size_t offset, const __private short *p);
-ushort3 __ovld vload3(size_t offset, const __private ushort *p);
-int3 __ovld vload3(size_t offset, const __private int *p);
-uint3 __ovld vload3(size_t offset, const __private uint *p);
-long3 __ovld vload3(size_t offset, const __private long *p);
-ulong3 __ovld vload3(size_t offset, const __private ulong *p);
-float3 __ovld vload3(size_t offset, const __private float *p);
-char4 __ovld vload4(size_t offset, const __private char *p);
-uchar4 __ovld vload4(size_t offset, const __private uchar *p);
-short4 __ovld vload4(size_t offset, const __private short *p);
-ushort4 __ovld vload4(size_t offset, const __private ushort *p);
-int4 __ovld vload4(size_t offset, const __private int *p);
-uint4 __ovld vload4(size_t offset, const __private uint *p);
-long4 __ovld vload4(size_t offset, const __private long *p);
-ulong4 __ovld vload4(size_t offset, const __private ulong *p);
-float4 __ovld vload4(size_t offset, const __private float *p);
-char8 __ovld vload8(size_t offset, const __private char *p);
-uchar8 __ovld vload8(size_t offset, const __private uchar *p);
-short8 __ovld vload8(size_t offset, const __private short *p);
-ushort8 __ovld vload8(size_t offset, const __private ushort *p);
-int8 __ovld vload8(size_t offset, const __private int *p);
-uint8 __ovld vload8(size_t offset, const __private uint *p);
-long8 __ovld vload8(size_t offset, const __private long *p);
-ulong8 __ovld vload8(size_t offset, const __private ulong *p);
-float8 __ovld vload8(size_t offset, const __private float *p);
-char16 __ovld vload16(size_t offset, const __private char *p);
-uchar16 __ovld vload16(size_t offset, const __private uchar *p);
-short16 __ovld vload16(size_t offset, const __private short *p);
-ushort16 __ovld vload16(size_t offset, const __private ushort *p);
-int16 __ovld vload16(size_t offset, const __private int *p);
-uint16 __ovld vload16(size_t offset, const __private uint *p);
-long16 __ovld vload16(size_t offset, const __private long *p);
-ulong16 __ovld vload16(size_t offset, const __private ulong *p);
-float16 __ovld vload16(size_t offset, const __private float *p);
-
-#ifdef cl_khr_fp64
-double2 __ovld vload2(size_t offset, const __global double *p);
-double3 __ovld vload3(size_t offset, const __global double *p);
-double4 __ovld vload4(size_t offset, const __global double *p);
-double8 __ovld vload8(size_t offset, const __global double *p);
-double16 __ovld vload16(size_t offset, const __global double *p);
-double2 __ovld vload2(size_t offset, const __local double *p);
-double3 __ovld vload3(size_t offset, const __local double *p);
-double4 __ovld vload4(size_t offset, const __local double *p);
-double8 __ovld vload8(size_t offset, const __local double *p);
-double16 __ovld vload16(size_t offset, const __local double *p);
-double2 __ovld vload2(size_t offset, const __private double *p);
-double3 __ovld vload3(size_t offset, const __private double *p);
-double4 __ovld vload4(size_t offset, const __private double *p);
-double8 __ovld vload8(size_t offset, const __private double *p);
-double16 __ovld vload16(size_t offset, const __private double *p);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half __ovld vload(size_t offset, const __global half *p);
-half2 __ovld vload2(size_t offset, const __global half *p);
-half3 __ovld vload3(size_t offset, const __global half *p);
-half4 __ovld vload4(size_t offset, const __global half *p);
-half8 __ovld vload8(size_t offset, const __global half *p);
-half16 __ovld vload16(size_t offset, const __global half *p);
-half __ovld vload(size_t offset, const __local half *p);
-half2 __ovld vload2(size_t offset, const __local half *p);
-half3 __ovld vload3(size_t offset, const __local half *p);
-half4 __ovld vload4(size_t offset, const __local half *p);
-half8 __ovld vload8(size_t offset, const __local half *p);
-half16 __ovld vload16(size_t offset, const __local half *p);
-half __ovld vload(size_t offset, const __private half *p);
-half2 __ovld vload2(size_t offset, const __private half *p);
-half3 __ovld vload3(size_t offset, const __private half *p);
-half4 __ovld vload4(size_t offset, const __private half *p);
-half8 __ovld vload8(size_t offset, const __private half *p);
-half16 __ovld vload16(size_t offset, const __private half *p);
-#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void __ovld vstore2(char2 data, size_t offset, char *p);
-void __ovld vstore2(uchar2 data, size_t offset, uchar *p);
-void __ovld vstore2(short2 data, size_t offset, short *p);
-void __ovld vstore2(ushort2 data, size_t offset, ushort *p);
-void __ovld vstore2(int2 data, size_t offset, int *p);
-void __ovld vstore2(uint2 data, size_t offset, uint *p);
-void __ovld vstore2(long2 data, size_t offset, long *p);
-void __ovld vstore2(ulong2 data, size_t offset, ulong *p);
-void __ovld vstore2(float2 data, size_t offset, float *p);
-void __ovld vstore3(char3 data, size_t offset, char *p);
-void __ovld vstore3(uchar3 data, size_t offset, uchar *p);
-void __ovld vstore3(short3 data, size_t offset, short *p);
-void __ovld vstore3(ushort3 data, size_t offset, ushort *p);
-void __ovld vstore3(int3 data, size_t offset, int *p);
-void __ovld vstore3(uint3 data, size_t offset, uint *p);
-void __ovld vstore3(long3 data, size_t offset, long *p);
-void __ovld vstore3(ulong3 data, size_t offset, ulong *p);
-void __ovld vstore3(float3 data, size_t offset, float *p);
-void __ovld vstore4(char4 data, size_t offset, char *p);
-void __ovld vstore4(uchar4 data, size_t offset, uchar *p);
-void __ovld vstore4(short4 data, size_t offset, short *p);
-void __ovld vstore4(ushort4 data, size_t offset, ushort *p);
-void __ovld vstore4(int4 data, size_t offset, int *p);
-void __ovld vstore4(uint4 data, size_t offset, uint *p);
-void __ovld vstore4(long4 data, size_t offset, long *p);
-void __ovld vstore4(ulong4 data, size_t offset, ulong *p);
-void __ovld vstore4(float4 data, size_t offset, float *p);
-void __ovld vstore8(char8 data, size_t offset, char *p);
-void __ovld vstore8(uchar8 data, size_t offset, uchar *p);
-void __ovld vstore8(short8 data, size_t offset, short *p);
-void __ovld vstore8(ushort8 data, size_t offset, ushort *p);
-void __ovld vstore8(int8 data, size_t offset, int *p);
-void __ovld vstore8(uint8 data, size_t offset, uint *p);
-void __ovld vstore8(long8 data, size_t offset, long *p);
-void __ovld vstore8(ulong8 data, size_t offset, ulong *p);
-void __ovld vstore8(float8 data, size_t offset, float *p);
-void __ovld vstore16(char16 data, size_t offset, char *p);
-void __ovld vstore16(uchar16 data, size_t offset, uchar *p);
-void __ovld vstore16(short16 data, size_t offset, short *p);
-void __ovld vstore16(ushort16 data, size_t offset, ushort *p);
-void __ovld vstore16(int16 data, size_t offset, int *p);
-void __ovld vstore16(uint16 data, size_t offset, uint *p);
-void __ovld vstore16(long16 data, size_t offset, long *p);
-void __ovld vstore16(ulong16 data, size_t offset, ulong *p);
-void __ovld vstore16(float16 data, size_t offset, float *p);
-#ifdef cl_khr_fp64
-void __ovld vstore2(double2 data, size_t offset, double *p);
-void __ovld vstore3(double3 data, size_t offset, double *p);
-void __ovld vstore4(double4 data, size_t offset, double *p);
-void __ovld vstore8(double8 data, size_t offset, double *p);
-void __ovld vstore16(double16 data, size_t offset, double *p);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-void __ovld vstore(half data, size_t offset, half *p);
-void __ovld vstore2(half2 data, size_t offset, half *p);
-void __ovld vstore3(half3 data, size_t offset, half *p);
-void __ovld vstore4(half4 data, size_t offset, half *p);
-void __ovld vstore8(half8 data, size_t offset, half *p);
-void __ovld vstore16(half16 data, size_t offset, half *p);
-#endif //cl_khr_fp16
-#else
-void __ovld vstore2(char2 data, size_t offset, __global char *p);
-void __ovld vstore2(uchar2 data, size_t offset, __global uchar *p);
-void __ovld vstore2(short2 data, size_t offset, __global short *p);
-void __ovld vstore2(ushort2 data, size_t offset, __global ushort *p);
-void __ovld vstore2(int2 data, size_t offset, __global int *p);
-void __ovld vstore2(uint2 data, size_t offset, __global uint *p);
-void __ovld vstore2(long2 data, size_t offset, __global long *p);
-void __ovld vstore2(ulong2 data, size_t offset, __global ulong *p);
-void __ovld vstore2(float2 data, size_t offset, __global float *p);
-void __ovld vstore3(char3 data, size_t offset, __global char *p);
-void __ovld vstore3(uchar3 data, size_t offset, __global uchar *p);
-void __ovld vstore3(short3 data, size_t offset, __global short *p);
-void __ovld vstore3(ushort3 data, size_t offset, __global ushort *p);
-void __ovld vstore3(int3 data, size_t offset, __global int *p);
-void __ovld vstore3(uint3 data, size_t offset, __global uint *p);
-void __ovld vstore3(long3 data, size_t offset, __global long *p);
-void __ovld vstore3(ulong3 data, size_t offset, __global ulong *p);
-void __ovld vstore3(float3 data, size_t offset, __global float *p);
-void __ovld vstore4(char4 data, size_t offset, __global char *p);
-void __ovld vstore4(uchar4 data, size_t offset, __global uchar *p);
-void __ovld vstore4(short4 data, size_t offset, __global short *p);
-void __ovld vstore4(ushort4 data, size_t offset, __global ushort *p);
-void __ovld vstore4(int4 data, size_t offset, __global int *p);
-void __ovld vstore4(uint4 data, size_t offset, __global uint *p);
-void __ovld vstore4(long4 data, size_t offset, __global long *p);
-void __ovld vstore4(ulong4 data, size_t offset, __global ulong *p);
-void __ovld vstore4(float4 data, size_t offset, __global float *p);
-void __ovld vstore8(char8 data, size_t offset, __global char *p);
-void __ovld vstore8(uchar8 data, size_t offset, __global uchar *p);
-void __ovld vstore8(short8 data, size_t offset, __global short *p);
-void __ovld vstore8(ushort8 data, size_t offset, __global ushort *p);
-void __ovld vstore8(int8 data, size_t offset, __global int *p);
-void __ovld vstore8(uint8 data, size_t offset, __global uint *p);
-void __ovld vstore8(long8 data, size_t offset, __global long *p);
-void __ovld vstore8(ulong8 data, size_t offset, __global ulong *p);
-void __ovld vstore8(float8 data, size_t offset, __global float *p);
-void __ovld vstore16(char16 data, size_t offset, __global char *p);
-void __ovld vstore16(uchar16 data, size_t offset, __global uchar *p);
-void __ovld vstore16(short16 data, size_t offset, __global short *p);
-void __ovld vstore16(ushort16 data, size_t offset, __global ushort *p);
-void __ovld vstore16(int16 data, size_t offset, __global int *p);
-void __ovld vstore16(uint16 data, size_t offset, __global uint *p);
-void __ovld vstore16(long16 data, size_t offset, __global long *p);
-void __ovld vstore16(ulong16 data, size_t offset, __global ulong *p);
-void __ovld vstore16(float16 data, size_t offset, __global float *p);
-void __ovld vstore2(char2 data, size_t offset, __local char *p);
-void __ovld vstore2(uchar2 data, size_t offset, __local uchar *p);
-void __ovld vstore2(short2 data, size_t offset, __local short *p);
-void __ovld vstore2(ushort2 data, size_t offset, __local ushort *p);
-void __ovld vstore2(int2 data, size_t offset, __local int *p);
-void __ovld vstore2(uint2 data, size_t offset, __local uint *p);
-void __ovld vstore2(long2 data, size_t offset, __local long *p);
-void __ovld vstore2(ulong2 data, size_t offset, __local ulong *p);
-void __ovld vstore2(float2 data, size_t offset, __local float *p);
-void __ovld vstore3(char3 data, size_t offset, __local char *p);
-void __ovld vstore3(uchar3 data, size_t offset, __local uchar *p);
-void __ovld vstore3(short3 data, size_t offset, __local short *p);
-void __ovld vstore3(ushort3 data, size_t offset, __local ushort *p);
-void __ovld vstore3(int3 data, size_t offset, __local int *p);
-void __ovld vstore3(uint3 data, size_t offset, __local uint *p);
-void __ovld vstore3(long3 data, size_t offset, __local long *p);
-void __ovld vstore3(ulong3 data, size_t offset, __local ulong *p);
-void __ovld vstore3(float3 data, size_t offset, __local float *p);
-void __ovld vstore4(char4 data, size_t offset, __local char *p);
-void __ovld vstore4(uchar4 data, size_t offset, __local uchar *p);
-void __ovld vstore4(short4 data, size_t offset, __local short *p);
-void __ovld vstore4(ushort4 data, size_t offset, __local ushort *p);
-void __ovld vstore4(int4 data, size_t offset, __local int *p);
-void __ovld vstore4(uint4 data, size_t offset, __local uint *p);
-void __ovld vstore4(long4 data, size_t offset, __local long *p);
-void __ovld vstore4(ulong4 data, size_t offset, __local ulong *p);
-void __ovld vstore4(float4 data, size_t offset, __local float *p);
-void __ovld vstore8(char8 data, size_t offset, __local char *p);
-void __ovld vstore8(uchar8 data, size_t offset, __local uchar *p);
-void __ovld vstore8(short8 data, size_t offset, __local short *p);
-void __ovld vstore8(ushort8 data, size_t offset, __local ushort *p);
-void __ovld vstore8(int8 data, size_t offset, __local int *p);
-void __ovld vstore8(uint8 data, size_t offset, __local uint *p);
-void __ovld vstore8(long8 data, size_t offset, __local long *p);
-void __ovld vstore8(ulong8 data, size_t offset, __local ulong *p);
-void __ovld vstore8(float8 data, size_t offset, __local float *p);
-void __ovld vstore16(char16 data, size_t offset, __local char *p);
-void __ovld vstore16(uchar16 data, size_t offset, __local uchar *p);
-void __ovld vstore16(short16 data, size_t offset, __local short *p);
-void __ovld vstore16(ushort16 data, size_t offset, __local ushort *p);
-void __ovld vstore16(int16 data, size_t offset, __local int *p);
-void __ovld vstore16(uint16 data, size_t offset, __local uint *p);
-void __ovld vstore16(long16 data, size_t offset, __local long *p);
-void __ovld vstore16(ulong16 data, size_t offset, __local ulong *p);
-void __ovld vstore16(float16 data, size_t offset, __local float *p);
-void __ovld vstore2(char2 data, size_t offset, __private char *p);
-void __ovld vstore2(uchar2 data, size_t offset, __private uchar *p);
-void __ovld vstore2(short2 data, size_t offset, __private short *p);
-void __ovld vstore2(ushort2 data, size_t offset, __private ushort *p);
-void __ovld vstore2(int2 data, size_t offset, __private int *p);
-void __ovld vstore2(uint2 data, size_t offset, __private uint *p);
-void __ovld vstore2(long2 data, size_t offset, __private long *p);
-void __ovld vstore2(ulong2 data, size_t offset, __private ulong *p);
-void __ovld vstore2(float2 data, size_t offset, __private float *p);
-void __ovld vstore3(char3 data, size_t offset, __private char *p);
-void __ovld vstore3(uchar3 data, size_t offset, __private uchar *p);
-void __ovld vstore3(short3 data, size_t offset, __private short *p);
-void __ovld vstore3(ushort3 data, size_t offset, __private ushort *p);
-void __ovld vstore3(int3 data, size_t offset, __private int *p);
-void __ovld vstore3(uint3 data, size_t offset, __private uint *p);
-void __ovld vstore3(long3 data, size_t offset, __private long *p);
-void __ovld vstore3(ulong3 data, size_t offset, __private ulong *p);
-void __ovld vstore3(float3 data, size_t offset, __private float *p);
-void __ovld vstore4(char4 data, size_t offset, __private char *p);
-void __ovld vstore4(uchar4 data, size_t offset, __private uchar *p);
-void __ovld vstore4(short4 data, size_t offset, __private short *p);
-void __ovld vstore4(ushort4 data, size_t offset, __private ushort *p);
-void __ovld vstore4(int4 data, size_t offset, __private int *p);
-void __ovld vstore4(uint4 data, size_t offset, __private uint *p);
-void __ovld vstore4(long4 data, size_t offset, __private long *p);
-void __ovld vstore4(ulong4 data, size_t offset, __private ulong *p);
-void __ovld vstore4(float4 data, size_t offset, __private float *p);
-void __ovld vstore8(char8 data, size_t offset, __private char *p);
-void __ovld vstore8(uchar8 data, size_t offset, __private uchar *p);
-void __ovld vstore8(short8 data, size_t offset, __private short *p);
-void __ovld vstore8(ushort8 data, size_t offset, __private ushort *p);
-void __ovld vstore8(int8 data, size_t offset, __private int *p);
-void __ovld vstore8(uint8 data, size_t offset, __private uint *p);
-void __ovld vstore8(long8 data, size_t offset, __private long *p);
-void __ovld vstore8(ulong8 data, size_t offset, __private ulong *p);
-void __ovld vstore8(float8 data, size_t offset, __private float *p);
-void __ovld vstore16(char16 data, size_t offset, __private char *p);
-void __ovld vstore16(uchar16 data, size_t offset, __private uchar *p);
-void __ovld vstore16(short16 data, size_t offset, __private short *p);
-void __ovld vstore16(ushort16 data, size_t offset, __private ushort *p);
-void __ovld vstore16(int16 data, size_t offset, __private int *p);
-void __ovld vstore16(uint16 data, size_t offset, __private uint *p);
-void __ovld vstore16(long16 data, size_t offset, __private long *p);
-void __ovld vstore16(ulong16 data, size_t offset, __private ulong *p);
-void __ovld vstore16(float16 data, size_t offset, __private float *p);
-#ifdef cl_khr_fp64
-void __ovld vstore2(double2 data, size_t offset, __global double *p);
-void __ovld vstore3(double3 data, size_t offset, __global double *p);
-void __ovld vstore4(double4 data, size_t offset, __global double *p);
-void __ovld vstore8(double8 data, size_t offset, __global double *p);
-void __ovld vstore16(double16 data, size_t offset, __global double *p);
-void __ovld vstore2(double2 data, size_t offset, __local double *p);
-void __ovld vstore3(double3 data, size_t offset, __local double *p);
-void __ovld vstore4(double4 data, size_t offset, __local double *p);
-void __ovld vstore8(double8 data, size_t offset, __local double *p);
-void __ovld vstore16(double16 data, size_t offset, __local double *p);
-void __ovld vstore2(double2 data, size_t offset, __private double *p);
-void __ovld vstore3(double3 data, size_t offset, __private double *p);
-void __ovld vstore4(double4 data, size_t offset, __private double *p);
-void __ovld vstore8(double8 data, size_t offset, __private double *p);
-void __ovld vstore16(double16 data, size_t offset, __private double *p);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-void __ovld vstore(half data, size_t offset, __global half *p);
-void __ovld vstore2(half2 data, size_t offset, __global half *p);
-void __ovld vstore3(half3 data, size_t offset, __global half *p);
-void __ovld vstore4(half4 data, size_t offset, __global half *p);
-void __ovld vstore8(half8 data, size_t offset, __global half *p);
-void __ovld vstore16(half16 data, size_t offset, __global half *p);
-void __ovld vstore(half data, size_t offset, __local half *p);
-void __ovld vstore2(half2 data, size_t offset, __local half *p);
-void __ovld vstore3(half3 data, size_t offset, __local half *p);
-void __ovld vstore4(half4 data, size_t offset, __local half *p);
-void __ovld vstore8(half8 data, size_t offset, __local half *p);
-void __ovld vstore16(half16 data, size_t offset, __local half *p);
-void __ovld vstore(half data, size_t offset, __private half *p);
-void __ovld vstore2(half2 data, size_t offset, __private half *p);
-void __ovld vstore3(half3 data, size_t offset, __private half *p);
-void __ovld vstore4(half4 data, size_t offset, __private half *p);
-void __ovld vstore8(half8 data, size_t offset, __private half *p);
-void __ovld vstore16(half16 data, size_t offset, __private half *p);
-#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Read sizeof (half) bytes of data from address
- * (p + offset). The data read is interpreted as a
- * half value. The half value is converted to a
- * float value and the float value is returned.
- * The read address computed as (p + offset)
- * must be 16-bit aligned.
- */
-float __ovld vload_half(size_t offset, const __constant half *p);
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-float __ovld vload_half(size_t offset, const half *p);
-#else
-float __ovld vload_half(size_t offset, const __global half *p);
-float __ovld vload_half(size_t offset, const __local half *p);
-float __ovld vload_half(size_t offset, const __private half *p);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Read sizeof (halfn) bytes of data from address
- * (p + (offset * n)). The data read is interpreted
- * as a halfn value. The halfn value read is
- * converted to a floatn value and the floatn
- * value is returned. The read address computed
- * as (p + (offset * n)) must be 16-bit aligned.
- */
-float2 __ovld vload_half2(size_t offset, const __constant half *p);
-float3 __ovld vload_half3(size_t offset, const __constant half *p);
-float4 __ovld vload_half4(size_t offset, const __constant half *p);
-float8 __ovld vload_half8(size_t offset, const __constant half *p);
-float16 __ovld vload_half16(size_t offset, const __constant half *p);
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-float2 __ovld vload_half2(size_t offset, const half *p);
-float3 __ovld vload_half3(size_t offset, const half *p);
-float4 __ovld vload_half4(size_t offset, const half *p);
-float8 __ovld vload_half8(size_t offset, const half *p);
-float16 __ovld vload_half16(size_t offset, const half *p);
-#else
-float2 __ovld vload_half2(size_t offset, const __global half *p);
-float3 __ovld vload_half3(size_t offset, const __global half *p);
-float4 __ovld vload_half4(size_t offset, const __global half *p);
-float8 __ovld vload_half8(size_t offset, const __global half *p);
-float16 __ovld vload_half16(size_t offset, const __global half *p);
-float2 __ovld vload_half2(size_t offset, const __local half *p);
-float3 __ovld vload_half3(size_t offset, const __local half *p);
-float4 __ovld vload_half4(size_t offset, const __local half *p);
-float8 __ovld vload_half8(size_t offset, const __local half *p);
-float16 __ovld vload_half16(size_t offset, const __local half *p);
-float2 __ovld vload_half2(size_t offset, const __private half *p);
-float3 __ovld vload_half3(size_t offset, const __private half *p);
-float4 __ovld vload_half4(size_t offset, const __private half *p);
-float8 __ovld vload_half8(size_t offset, const __private half *p);
-float16 __ovld vload_half16(size_t offset, const __private half *p);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * The float value given by data is first
- * converted to a half value using the appropriate
- * rounding mode. The half value is then written
- * to address computed as (p + offset). The
- * address computed as (p + offset) must be 16-
- * bit aligned.
- * vstore_half use the current rounding mode.
- * The default current rounding mode is round to
- * nearest even.
- */
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void __ovld vstore_half(float data, size_t offset, half *p);
-void __ovld vstore_half_rte(float data, size_t offset, half *p);
-void __ovld vstore_half_rtz(float data, size_t offset, half *p);
-void __ovld vstore_half_rtp(float data, size_t offset, half *p);
-void __ovld vstore_half_rtn(float data, size_t offset, half *p);
-#ifdef cl_khr_fp64
-void __ovld vstore_half(double data, size_t offset, half *p);
-void __ovld vstore_half_rte(double data, size_t offset, half *p);
-void __ovld vstore_half_rtz(double data, size_t offset, half *p);
-void __ovld vstore_half_rtp(double data, size_t offset, half *p);
-void __ovld vstore_half_rtn(double data, size_t offset, half *p);
-#endif //cl_khr_fp64
-#else
-void __ovld vstore_half(float data, size_t offset, __global half *p);
-void __ovld vstore_half_rte(float data, size_t offset, __global half *p);
-void __ovld vstore_half_rtz(float data, size_t offset, __global half *p);
-void __ovld vstore_half_rtp(float data, size_t offset, __global half *p);
-void __ovld vstore_half_rtn(float data, size_t offset, __global half *p);
-void __ovld vstore_half(float data, size_t offset, __local half *p);
-void __ovld vstore_half_rte(float data, size_t offset, __local half *p);
-void __ovld vstore_half_rtz(float data, size_t offset, __local half *p);
-void __ovld vstore_half_rtp(float data, size_t offset, __local half *p);
-void __ovld vstore_half_rtn(float data, size_t offset, __local half *p);
-void __ovld vstore_half(float data, size_t offset, __private half *p);
-void __ovld vstore_half_rte(float data, size_t offset, __private half *p);
-void __ovld vstore_half_rtz(float data, size_t offset, __private half *p);
-void __ovld vstore_half_rtp(float data, size_t offset, __private half *p);
-void __ovld vstore_half_rtn(float data, size_t offset, __private half *p);
-#ifdef cl_khr_fp64
-void __ovld vstore_half(double data, size_t offset, __global half *p);
-void __ovld vstore_half_rte(double data, size_t offset, __global half *p);
-void __ovld vstore_half_rtz(double data, size_t offset, __global half *p);
-void __ovld vstore_half_rtp(double data, size_t offset, __global half *p);
-void __ovld vstore_half_rtn(double data, size_t offset, __global half *p);
-void __ovld vstore_half(double data, size_t offset, __local half *p);
-void __ovld vstore_half_rte(double data, size_t offset, __local half *p);
-void __ovld vstore_half_rtz(double data, size_t offset, __local half *p);
-void __ovld vstore_half_rtp(double data, size_t offset, __local half *p);
-void __ovld vstore_half_rtn(double data, size_t offset, __local half *p);
-void __ovld vstore_half(double data, size_t offset, __private half *p);
-void __ovld vstore_half_rte(double data, size_t offset, __private half *p);
-void __ovld vstore_half_rtz(double data, size_t offset, __private half *p);
-void __ovld vstore_half_rtp(double data, size_t offset, __private half *p);
-void __ovld vstore_half_rtn(double data, size_t offset, __private half *p);
-#endif //cl_khr_fp64
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * The floatn value given by data is converted to
- * a halfn value using the appropriate rounding
- * mode. The halfn value is then written to
- * address computed as (p + (offset * n)). The
- * address computed as (p + (offset * n)) must be
- * 16-bit aligned.
- * vstore_halfn uses the current rounding mode.
- * The default current rounding mode is round to
- * nearest even.
- */
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void __ovld vstore_half2(float2 data, size_t offset, half *p);
-void __ovld vstore_half3(float3 data, size_t offset, half *p);
-void __ovld vstore_half4(float4 data, size_t offset, half *p);
-void __ovld vstore_half8(float8 data, size_t offset, half *p);
-void __ovld vstore_half16(float16 data, size_t offset, half *p);
-void __ovld vstore_half2_rte(float2 data, size_t offset, half *p);
-void __ovld vstore_half3_rte(float3 data, size_t offset, half *p);
-void __ovld vstore_half4_rte(float4 data, size_t offset, half *p);
-void __ovld vstore_half8_rte(float8 data, size_t offset, half *p);
-void __ovld vstore_half16_rte(float16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtz(float2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtz(float3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtz(float4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtz(float8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtz(float16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtp(float2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtp(float3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtp(float4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtp(float8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtp(float16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtn(float2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtn(float3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtn(float4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtn(float8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtn(float16 data, size_t offset, half *p);
-#ifdef cl_khr_fp64
-void __ovld vstore_half2(double2 data, size_t offset, half *p);
-void __ovld vstore_half3(double3 data, size_t offset, half *p);
-void __ovld vstore_half4(double4 data, size_t offset, half *p);
-void __ovld vstore_half8(double8 data, size_t offset, half *p);
-void __ovld vstore_half16(double16 data, size_t offset, half *p);
-void __ovld vstore_half2_rte(double2 data, size_t offset, half *p);
-void __ovld vstore_half3_rte(double3 data, size_t offset, half *p);
-void __ovld vstore_half4_rte(double4 data, size_t offset, half *p);
-void __ovld vstore_half8_rte(double8 data, size_t offset, half *p);
-void __ovld vstore_half16_rte(double16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtz(double2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtz(double3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtz(double4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtz(double8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtz(double16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtp(double2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtp(double3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtp(double4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtp(double8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtp(double16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtn(double2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtn(double3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtn(double4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtn(double8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtn(double16 data, size_t offset, half *p);
-#endif //cl_khr_fp64
-#else
-void __ovld vstore_half2(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rte(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rte(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rte(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rte(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rte(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtz(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtz(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtz(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtz(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtz(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtp(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtp(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtp(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtp(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtp(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtn(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtn(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtn(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtn(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtn(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rte(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rte(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rte(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rte(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rte(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtz(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtz(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtz(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtz(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtz(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtp(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtp(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtp(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtp(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtp(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtn(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtn(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtn(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtn(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtn(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16(float16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rte(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rte(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rte(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rte(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rte(float16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtz(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtz(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtz(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtz(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtz(float16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtp(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtp(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtp(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtp(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtp(float16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtn(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtn(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtn(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtn(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtn(float16 data, size_t offset, __private half *p);
-#ifdef cl_khr_fp64
-void __ovld vstore_half2(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rte(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rte(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rte(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rte(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rte(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtz(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtz(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtz(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtz(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtz(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtp(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtp(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtp(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtp(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtp(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtn(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtn(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtn(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtn(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtn(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rte(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rte(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rte(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rte(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rte(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtz(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtz(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtz(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtz(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtz(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtp(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtp(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtp(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtp(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtp(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtn(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtn(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtn(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtn(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtn(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16(double16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rte(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rte(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rte(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rte(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rte(double16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtz(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtz(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtz(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtz(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtz(double16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtp(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtp(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtp(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtp(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtp(double16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtn(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtn(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtn(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtn(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtn(double16 data, size_t offset, __private half *p);
-#endif //cl_khr_fp64
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * For n = 1, 2, 4, 8 and 16 read sizeof (halfn)
- * bytes of data from address (p + (offset * n)).
- * The data read is interpreted as a halfn value.
- * The halfn value read is converted to a floatn
- * value and the floatn value is returned.
- * The address computed as (p + (offset * n))
- * must be aligned to sizeof (halfn) bytes.
- * For n = 3, vloada_half3 reads a half3 from
- * address (p + (offset * 4)) and returns a float3.
- * The address computed as (p + (offset * 4))
- * must be aligned to sizeof (half) * 4 bytes.
- */
-float __ovld vloada_half(size_t offset, const __constant half *p);
-float2 __ovld vloada_half2(size_t offset, const __constant half *p);
-float3 __ovld vloada_half3(size_t offset, const __constant half *p);
-float4 __ovld vloada_half4(size_t offset, const __constant half *p);
-float8 __ovld vloada_half8(size_t offset, const __constant half *p);
-float16 __ovld vloada_half16(size_t offset, const __constant half *p);
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-float __ovld vloada_half(size_t offset, const half *p);
-float2 __ovld vloada_half2(size_t offset, const half *p);
-float3 __ovld vloada_half3(size_t offset, const half *p);
-float4 __ovld vloada_half4(size_t offset, const half *p);
-float8 __ovld vloada_half8(size_t offset, const half *p);
-float16 __ovld vloada_half16(size_t offset, const half *p);
-#else
-float __ovld vloada_half(size_t offset, const __global half *p);
-float2 __ovld vloada_half2(size_t offset, const __global half *p);
-float3 __ovld vloada_half3(size_t offset, const __global half *p);
-float4 __ovld vloada_half4(size_t offset, const __global half *p);
-float8 __ovld vloada_half8(size_t offset, const __global half *p);
-float16 __ovld vloada_half16(size_t offset, const __global half *p);
-float __ovld vloada_half(size_t offset, const __local half *p);
-float2 __ovld vloada_half2(size_t offset, const __local half *p);
-float3 __ovld vloada_half3(size_t offset, const __local half *p);
-float4 __ovld vloada_half4(size_t offset, const __local half *p);
-float8 __ovld vloada_half8(size_t offset, const __local half *p);
-float16 __ovld vloada_half16(size_t offset, const __local half *p);
-float __ovld vloada_half(size_t offset, const __private half *p);
-float2 __ovld vloada_half2(size_t offset, const __private half *p);
-float3 __ovld vloada_half3(size_t offset, const __private half *p);
-float4 __ovld vloada_half4(size_t offset, const __private half *p);
-float8 __ovld vloada_half8(size_t offset, const __private half *p);
-float16 __ovld vloada_half16(size_t offset, const __private half *p);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * The floatn value given by data is converted to
- * a halfn value using the appropriate rounding
- * mode.
- * For n = 1, 2, 4, 8 and 16, the halfn value is
- * written to the address computed as (p + (offset
- * * n)). The address computed as (p + (offset *
- * n)) must be aligned to sizeof (halfn) bytes.
- * For n = 3, the half3 value is written to the
- * address computed as (p + (offset * 4)). The
- * address computed as (p + (offset * 4)) must be
- * aligned to sizeof (half) * 4 bytes.
- * vstorea_halfn uses the current rounding
- * mode. The default current rounding mode is
- * round to nearest even.
- */
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void __ovld vstorea_half(float data, size_t offset, half *p);
-void __ovld vstorea_half2(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16(float16 data, size_t offset, half *p);
-
-void __ovld vstorea_half_rte(float data, size_t offset, half *p);
-void __ovld vstorea_half2_rte(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rte(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rte(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rte(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rte(float16 data, size_t offset, half *p);
-
-void __ovld vstorea_half_rtz(float data, size_t offset, half *p);
-void __ovld vstorea_half2_rtz(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtz(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtz(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtz(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtz(float16 data, size_t offset, half *p);
-
-void __ovld vstorea_half_rtp(float data, size_t offset, half *p);
-void __ovld vstorea_half2_rtp(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtp(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtp(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtp(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtp(float16 data, size_t offset, half *p);
-
-void __ovld vstorea_half_rtn(float data, size_t offset, half *p);
-void __ovld vstorea_half2_rtn(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtn(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtn(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtn(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtn(float16 data, size_t offset, half *p);
-
-#ifdef cl_khr_fp64
-void __ovld vstorea_half(double data, size_t offset, half *p);
-void __ovld vstorea_half2(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16(double16 data, size_t offset, half *p);
-
-void __ovld vstorea_half_rte(double data, size_t offset, half *p);
-void __ovld vstorea_half2_rte(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rte(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rte(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rte(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rte(double16 data, size_t offset, half *p);
-
-void __ovld vstorea_half_rtz(double data, size_t offset, half *p);
-void __ovld vstorea_half2_rtz(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtz(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtz(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtz(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtz(double16 data, size_t offset, half *p);
-
-void __ovld vstorea_half_rtp(double data, size_t offset, half *p);
-void __ovld vstorea_half2_rtp(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtp(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtp(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtp(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtp(double16 data, size_t offset, half *p);
-
-void __ovld vstorea_half_rtn(double data, size_t offset, half *p);
-void __ovld vstorea_half2_rtn(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtn(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtn(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtn(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtn(double16 data, size_t offset, half *p);
-#endif //cl_khr_fp64
-
-#else
-void __ovld vstorea_half(float data, size_t offset, __global half *p);
-void __ovld vstorea_half2(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half_rte(float data, size_t offset, __global half *p);
-void __ovld vstorea_half2_rte(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rte(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rte(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rte(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rte(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half_rtz(float data, size_t offset, __global half *p);
-void __ovld vstorea_half2_rtz(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtz(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtz(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtz(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtz(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half_rtp(float data, size_t offset, __global half *p);
-void __ovld vstorea_half2_rtp(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtp(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtp(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtp(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtp(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half_rtn(float data, size_t offset, __global half *p);
-void __ovld vstorea_half2_rtn(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtn(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtn(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtn(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtn(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half(float data, size_t offset, __local half *p);
-void __ovld vstorea_half2(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half_rte(float data, size_t offset, __local half *p);
-void __ovld vstorea_half2_rte(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rte(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rte(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rte(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rte(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half_rtz(float data, size_t offset, __local half *p);
-void __ovld vstorea_half2_rtz(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtz(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtz(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtz(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtz(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half_rtp(float data, size_t offset, __local half *p);
-void __ovld vstorea_half2_rtp(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtp(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtp(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtp(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtp(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half_rtn(float data, size_t offset, __local half *p);
-void __ovld vstorea_half2_rtn(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtn(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtn(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtn(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtn(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half(float data, size_t offset, __private half *p);
-void __ovld vstorea_half2(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16(float16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half_rte(float data, size_t offset, __private half *p);
-void __ovld vstorea_half2_rte(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rte(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rte(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rte(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rte(float16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half_rtz(float data, size_t offset, __private half *p);
-void __ovld vstorea_half2_rtz(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtz(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtz(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtz(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtz(float16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half_rtp(float data, size_t offset, __private half *p);
-void __ovld vstorea_half2_rtp(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtp(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtp(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtp(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtp(float16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half_rtn(float data, size_t offset, __private half *p);
-void __ovld vstorea_half2_rtn(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtn(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtn(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtn(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtn(float16 data, size_t offset, __private half *p);
-
-#ifdef cl_khr_fp64
-void __ovld vstorea_half(double data, size_t offset, __global half *p);
-void __ovld vstorea_half2(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half_rte(double data, size_t offset, __global half *p);
-void __ovld vstorea_half2_rte(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rte(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rte(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rte(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rte(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half_rtz(double data, size_t offset, __global half *p);
-void __ovld vstorea_half2_rtz(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtz(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtz(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtz(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtz(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half_rtp(double data, size_t offset, __global half *p);
-void __ovld vstorea_half2_rtp(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtp(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtp(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtp(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtp(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half_rtn(double data, size_t offset, __global half *p);
-void __ovld vstorea_half2_rtn(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtn(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtn(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtn(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtn(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half(double data, size_t offset, __local half *p);
-void __ovld vstorea_half2(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half_rte(double data, size_t offset, __local half *p);
-void __ovld vstorea_half2_rte(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rte(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rte(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rte(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rte(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half_rtz(double data, size_t offset, __local half *p);
-void __ovld vstorea_half2_rtz(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtz(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtz(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtz(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtz(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half_rtp(double data, size_t offset, __local half *p);
-void __ovld vstorea_half2_rtp(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtp(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtp(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtp(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtp(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half_rtn(double data, size_t offset, __local half *p);
-void __ovld vstorea_half2_rtn(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtn(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtn(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtn(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtn(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half(double data, size_t offset, __private half *p);
-void __ovld vstorea_half2(double2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3(double3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4(double4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8(double8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16(double16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half_rte(double data, size_t offset, __private half *p);
-void __ovld vstorea_half2_rte(double2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rte(double3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rte(double4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rte(double8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rte(double16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half_rtz(double data, size_t offset, __private half *p);
-void __ovld vstorea_half2_rtz(double2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtz(double3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtz(double4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtz(double8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtz(double16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half_rtp(double data, size_t offset, __private half *p);
-void __ovld vstorea_half2_rtp(double2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtp(double3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtp(double4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtp(double8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtp(double16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half_rtn(double data, size_t offset, __private half *p);
-void __ovld vstorea_half2_rtn(double2 data,size_t offset, __private half *p);
-void __ovld vstorea_half3_rtn(double3 data,size_t offset, __private half *p);
-void __ovld vstorea_half4_rtn(double4 data,size_t offset, __private half *p);
-void __ovld vstorea_half8_rtn(double8 data,size_t offset, __private half *p);
-void __ovld vstorea_half16_rtn(double16 data,size_t offset, __private half *p);
-#endif //cl_khr_fp64
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// OpenCL v1.1 s6.11.8, v1.2 s6.12.8, v2.0 s6.13.8 - Synchronization Functions
-
-/**
- * All work-items in a work-group executing the kernel
- * on a processor must execute this function before any
- * are allowed to continue execution beyond the barrier.
- * This function must be encountered by all work-items in
- * a work-group executing the kernel.
- * If barrier is inside a conditional statement, then all
- * work-items must enter the conditional if any work-item
- * enters the conditional statement and executes the
- * barrier.
- * If barrer is inside a loop, all work-items must execute
- * the barrier for each iteration of the loop before any are
- * allowed to continue execution beyond the barrier.
- * The barrier function also queues a memory fence
- * (reads and writes) to ensure correct ordering of
- * memory operations to local or global memory.
- * The flags argument specifies the memory address space
- * and can be set to a combination of the following literal
- * values.
- * CLK_LOCAL_MEM_FENCE - The barrier function
- * will either flush any variables stored in local memory
- * or queue a memory fence to ensure correct ordering of
- * memory operations to local memory.
- * CLK_GLOBAL_MEM_FENCE - The barrier function
- * will queue a memory fence to ensure correct ordering
- * of memory operations to global memory. This can be
- * useful when work-items, for example, write to buffer or
- * image objects and then want to read the updated data.
- */
-
-void __ovld __conv barrier(cl_mem_fence_flags flags);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void __ovld __conv work_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
-void __ovld __conv work_group_barrier(cl_mem_fence_flags flags);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// OpenCL v1.1 s6.11.9, v1.2 s6.12.9 - Explicit Memory Fence Functions
-
-/**
- * Orders loads and stores of a work-item
- * executing a kernel. This means that loads
- * and stores preceding the mem_fence will
- * be committed to memory before any loads
- * and stores following the mem_fence.
- * The flags argument specifies the memory
- * address space and can be set to a
- * combination of the following literal
- * values:
- * CLK_LOCAL_MEM_FENCE
- * CLK_GLOBAL_MEM_FENCE.
- */
-void __ovld mem_fence(cl_mem_fence_flags flags);
-
-/**
- * Read memory barrier that orders only
- * loads.
- * The flags argument specifies the memory
- * address space and can be set to a
- * combination of the following literal
- * values:
- * CLK_LOCAL_MEM_FENCE
- * CLK_GLOBAL_MEM_FENCE.
- */
-void __ovld read_mem_fence(cl_mem_fence_flags flags);
-
-/**
- * Write memory barrier that orders only
- * stores.
- * The flags argument specifies the memory
- * address space and can be set to a
- * combination of the following literal
- * values:
- * CLK_LOCAL_MEM_FENCE
- * CLK_GLOBAL_MEM_FENCE.
- */
-void __ovld write_mem_fence(cl_mem_fence_flags flags);
-
-// OpenCL v2.0 s6.13.9 - Address Space Qualifier Functions
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-cl_mem_fence_flags __ovld get_fence(const void *ptr);
-cl_mem_fence_flags __ovld get_fence(void *ptr);
-
-/**
- * Builtin functions to_global, to_local, and to_private need to be declared as Clang builtin functions
- * and checked in Sema since they should be declared as
- *   addr gentype* to_addr (gentype*);
- * where gentype is builtin type or user defined type.
- */
-
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// OpenCL v1.1 s6.11.10, v1.2 s6.12.10, v2.0 s6.13.10 - Async Copies from Global to Local Memory, Local to Global Memory, and Prefetch
-
-/**
- * event_t async_work_group_copy (
- * __global gentype *dst,
- * const __local gentype *src,
- * size_t num_elements,
- * event_t event)
- * Perform an async copy of num_elements
- * gentype elements from src to dst. The async
- * copy is performed by all work-items in a workgroup
- * and this built-in function must therefore
- * be encountered by all work-items in a workgroup
- * executing the kernel with the same
- * argument values; otherwise the results are
- * undefined.
- * Returns an event object that can be used by
- * wait_group_events to wait for the async copy
- * to finish. The event argument can also be used
- * to associate the async_work_group_copy with
- * a previous async copy allowing an event to be
- * shared by multiple async copies; otherwise event
- * should be zero.
- * If event argument is non-zero, the event object
- * supplied in event argument will be returned.
- * This function does not perform any implicit
- * synchronization of source data such as using a
- * barrier before performing the copy.
- */
-event_t __ovld async_work_group_copy(__local char *dst, const __global char *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar *dst, const __global uchar *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short *dst, const __global short *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort *dst, const __global ushort *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int *dst, const __global int *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint *dst, const __global uint *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long *dst, const __global long *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong *dst, const __global ulong *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float *dst, const __global float *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char2 *dst, const __global char2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar2 *dst, const __global uchar2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short2 *dst, const __global short2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort2 *dst, const __global ushort2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int2 *dst, const __global int2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint2 *dst, const __global uint2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long2 *dst, const __global long2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong2 *dst, const __global ulong2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float2 *dst, const __global float2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char3 *dst, const __global char3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar3 *dst, const __global uchar3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short3 *dst, const __global short3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort3 *dst, const __global ushort3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int3 *dst, const __global int3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint3 *dst, const __global uint3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long3 *dst, const __global long3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong3 *dst, const __global ulong3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float3 *dst, const __global float3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char4 *dst, const __global char4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar4 *dst, const __global uchar4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short4 *dst, const __global short4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort4 *dst, const __global ushort4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int4 *dst, const __global int4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint4 *dst, const __global uint4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long4 *dst, const __global long4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong4 *dst, const __global ulong4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float4 *dst, const __global float4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char8 *dst, const __global char8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar8 *dst, const __global uchar8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short8 *dst, const __global short8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort8 *dst, const __global ushort8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int8 *dst, const __global int8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint8 *dst, const __global uint8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long8 *dst, const __global long8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong8 *dst, const __global ulong8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float8 *dst, const __global float8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char16 *dst, const __global char16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar16 *dst, const __global uchar16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short16 *dst, const __global short16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort16 *dst, const __global ushort16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int16 *dst, const __global int16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint16 *dst, const __global uint16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long16 *dst, const __global long16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong16 *dst, const __global ulong16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float16 *dst, const __global float16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char *dst, const __local char *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar *dst, const __local uchar *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short *dst, const __local short *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort *dst, const __local ushort *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int *dst, const __local int *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint *dst, const __local uint *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long *dst, const __local long *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong *dst, const __local ulong *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float *dst, const __local float *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char2 *dst, const __local char2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar2 *dst, const __local uchar2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short2 *dst, const __local short2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort2 *dst, const __local ushort2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int2 *dst, const __local int2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint2 *dst, const __local uint2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long2 *dst, const __local long2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong2 *dst, const __local ulong2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float2 *dst, const __local float2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char3 *dst, const __local char3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar3 *dst, const __local uchar3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short3 *dst, const __local short3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort3 *dst, const __local ushort3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int3 *dst, const __local int3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint3 *dst, const __local uint3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long3 *dst, const __local long3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong3 *dst, const __local ulong3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float3 *dst, const __local float3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char4 *dst, const __local char4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar4 *dst, const __local uchar4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short4 *dst, const __local short4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort4 *dst, const __local ushort4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int4 *dst, const __local int4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint4 *dst, const __local uint4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long4 *dst, const __local long4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong4 *dst, const __local ulong4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float4 *dst, const __local float4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char8 *dst, const __local char8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar8 *dst, const __local uchar8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short8 *dst, const __local short8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort8 *dst, const __local ushort8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int8 *dst, const __local int8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint8 *dst, const __local uint8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long8 *dst, const __local long8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong8 *dst, const __local ulong8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float8 *dst, const __local float8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char16 *dst, const __local char16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar16 *dst, const __local uchar16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short16 *dst, const __local short16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort16 *dst, const __local ushort16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int16 *dst, const __local int16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint16 *dst, const __local uint16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long16 *dst, const __local long16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong16 *dst, const __local ulong16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float16 *dst, const __local float16 *src, size_t num_elements, event_t event);
-#ifdef cl_khr_fp64
-event_t __ovld async_work_group_copy(__local double *dst, const __global double *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double2 *dst, const __global double2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double3 *dst, const __global double3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double4 *dst, const __global double4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double8 *dst, const __global double8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double16 *dst, const __global double16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double *dst, const __local double *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double2 *dst, const __local double2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double3 *dst, const __local double3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double4 *dst, const __local double4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double8 *dst, const __local double8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double16 *dst, const __local double16 *src, size_t num_elements, event_t event);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-event_t __ovld async_work_group_copy(__local half *dst, const __global half *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half2 *dst, const __global half2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half3 *dst, const __global half3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half4 *dst, const __global half4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half8 *dst, const __global half8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half16 *dst, const __global half16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half *dst, const __local half *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half2 *dst, const __local half2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half3 *dst, const __local half3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half4 *dst, const __local half4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half8 *dst, const __local half8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half16 *dst, const __local half16 *src, size_t num_elements, event_t event);
-#endif //cl_khr_fp16
-
-/**
- * Perform an async gather of num_elements
- * gentype elements from src to dst. The
- * src_stride is the stride in elements for each
- * gentype element read from src. The dst_stride
- * is the stride in elements for each gentype
- * element written to dst. The async gather is
- * performed by all work-items in a work-group.
- * This built-in function must therefore be
- * encountered by all work-items in a work-group
- * executing the kernel with the same argument
- * values; otherwise the results are undefined.
- * Returns an event object that can be used by
- * wait_group_events to wait for the async copy
- * to finish. The event argument can also be used
- * to associate the
- * async_work_group_strided_copy with a
- * previous async copy allowing an event to be
- * shared by multiple async copies; otherwise event
- * should be zero.
- * If event argument is non-zero, the event object
- * supplied in event argument will be returned.
- * This function does not perform any implicit
- * synchronization of source data such as using a
- * barrier before performing the copy.
- */
-event_t __ovld async_work_group_strided_copy(__local char *dst, const __global char *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar *dst, const __global uchar *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short *dst, const __global short *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort *dst, const __global ushort *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int *dst, const __global int *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint *dst, const __global uint *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long *dst, const __global long *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong *dst, const __global ulong *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float *dst, const __global float *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char2 *dst, const __global char2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar2 *dst, const __global uchar2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short2 *dst, const __global short2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort2 *dst, const __global ushort2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int2 *dst, const __global int2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint2 *dst, const __global uint2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long2 *dst, const __global long2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong2 *dst, const __global ulong2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float2 *dst, const __global float2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char3 *dst, const __global char3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar3 *dst, const __global uchar3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short3 *dst, const __global short3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort3 *dst, const __global ushort3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int3 *dst, const __global int3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint3 *dst, const __global uint3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long3 *dst, const __global long3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong3 *dst, const __global ulong3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float3 *dst, const __global float3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char4 *dst, const __global char4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar4 *dst, const __global uchar4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short4 *dst, const __global short4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort4 *dst, const __global ushort4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int4 *dst, const __global int4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint4 *dst, const __global uint4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long4 *dst, const __global long4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong4 *dst, const __global ulong4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float4 *dst, const __global float4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char8 *dst, const __global char8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar8 *dst, const __global uchar8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short8 *dst, const __global short8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort8 *dst, const __global ushort8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int8 *dst, const __global int8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint8 *dst, const __global uint8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long8 *dst, const __global long8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong8 *dst, const __global ulong8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float8 *dst, const __global float8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char16 *dst, const __global char16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar16 *dst, const __global uchar16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short16 *dst, const __global short16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort16 *dst, const __global ushort16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int16 *dst, const __global int16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint16 *dst, const __global uint16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long16 *dst, const __global long16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong16 *dst, const __global ulong16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float16 *dst, const __global float16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char *dst, const __local char *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar *dst, const __local uchar *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short *dst, const __local short *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort *dst, const __local ushort *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int *dst, const __local int *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint *dst, const __local uint *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long *dst, const __local long *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong *dst, const __local ulong *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float *dst, const __local float *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char2 *dst, const __local char2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar2 *dst, const __local uchar2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short2 *dst, const __local short2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort2 *dst, const __local ushort2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int2 *dst, const __local int2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint2 *dst, const __local uint2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long2 *dst, const __local long2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong2 *dst, const __local ulong2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float2 *dst, const __local float2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char3 *dst, const __local char3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar3 *dst, const __local uchar3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short3 *dst, const __local short3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort3 *dst, const __local ushort3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int3 *dst, const __local int3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint3 *dst, const __local uint3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long3 *dst, const __local long3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong3 *dst, const __local ulong3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float3 *dst, const __local float3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char4 *dst, const __local char4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar4 *dst, const __local uchar4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short4 *dst, const __local short4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort4 *dst, const __local ushort4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int4 *dst, const __local int4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint4 *dst, const __local uint4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long4 *dst, const __local long4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong4 *dst, const __local ulong4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float4 *dst, const __local float4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char8 *dst, const __local char8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar8 *dst, const __local uchar8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short8 *dst, const __local short8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort8 *dst, const __local ushort8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int8 *dst, const __local int8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint8 *dst, const __local uint8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long8 *dst, const __local long8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong8 *dst, const __local ulong8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float8 *dst, const __local float8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char16 *dst, const __local char16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar16 *dst, const __local uchar16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short16 *dst, const __local short16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort16 *dst, const __local ushort16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int16 *dst, const __local int16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint16 *dst, const __local uint16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long16 *dst, const __local long16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong16 *dst, const __local ulong16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float16 *dst, const __local float16 *src, size_t num_elements, size_t dst_stride, event_t event);
-#ifdef cl_khr_fp64
-event_t __ovld async_work_group_strided_copy(__local double *dst, const __global double *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double2 *dst, const __global double2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double3 *dst, const __global double3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double4 *dst, const __global double4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double8 *dst, const __global double8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double16 *dst, const __global double16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double *dst, const __local double *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double2 *dst, const __local double2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double3 *dst, const __local double3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double4 *dst, const __local double4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double8 *dst, const __local double8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double16 *dst, const __local double16 *src, size_t num_elements, size_t dst_stride, event_t event);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-event_t __ovld async_work_group_strided_copy(__local half *dst, const __global half *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half2 *dst, const __global half2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half3 *dst, const __global half3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half4 *dst, const __global half4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half8 *dst, const __global half8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half16 *dst, const __global half16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half *dst, const __local half *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half2 *dst, const __local half2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half3 *dst, const __local half3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half4 *dst, const __local half4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half8 *dst, const __local half8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half16 *dst, const __local half16 *src, size_t num_elements, size_t dst_stride, event_t event);
-#endif //cl_khr_fp16
-
-/**
- * Wait for events that identify the
- * async_work_group_copy operations to
- * complete. The event objects specified in
- * event_list will be released after the wait is
- * performed.
- * This function must be encountered by all workitems
- * in a work-group executing the kernel with
- * the same num_events and event objects specified
- * in event_list; otherwise the results are undefined.
- */
-void __ovld wait_group_events(int num_events, event_t *event_list);
-
-/**
- * Prefetch num_elements * sizeof(gentype)
- * bytes into the global cache. The prefetch
- * instruction is applied to a work-item in a workgroup
- * and does not affect the functional
- * behavior of the kernel.
- */
-void __ovld prefetch(const __global char *p, size_t num_elements);
-void __ovld prefetch(const __global uchar *p, size_t num_elements);
-void __ovld prefetch(const __global short *p, size_t num_elements);
-void __ovld prefetch(const __global ushort *p, size_t num_elements);
-void __ovld prefetch(const __global int *p, size_t num_elements);
-void __ovld prefetch(const __global uint *p, size_t num_elements);
-void __ovld prefetch(const __global long *p, size_t num_elements);
-void __ovld prefetch(const __global ulong *p, size_t num_elements);
-void __ovld prefetch(const __global float *p, size_t num_elements);
-void __ovld prefetch(const __global char2 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar2 *p, size_t num_elements);
-void __ovld prefetch(const __global short2 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort2 *p, size_t num_elements);
-void __ovld prefetch(const __global int2 *p, size_t num_elements);
-void __ovld prefetch(const __global uint2 *p, size_t num_elements);
-void __ovld prefetch(const __global long2 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong2 *p, size_t num_elements);
-void __ovld prefetch(const __global float2 *p, size_t num_elements);
-void __ovld prefetch(const __global char3 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar3 *p, size_t num_elements);
-void __ovld prefetch(const __global short3 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort3 *p, size_t num_elements);
-void __ovld prefetch(const __global int3 *p, size_t num_elements);
-void __ovld prefetch(const __global uint3 *p, size_t num_elements);
-void __ovld prefetch(const __global long3 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong3 *p, size_t num_elements);
-void __ovld prefetch(const __global float3 *p, size_t num_elements);
-void __ovld prefetch(const __global char4 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar4 *p, size_t num_elements);
-void __ovld prefetch(const __global short4 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort4 *p, size_t num_elements);
-void __ovld prefetch(const __global int4 *p, size_t num_elements);
-void __ovld prefetch(const __global uint4 *p, size_t num_elements);
-void __ovld prefetch(const __global long4 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong4 *p, size_t num_elements);
-void __ovld prefetch(const __global float4 *p, size_t num_elements);
-void __ovld prefetch(const __global char8 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar8 *p, size_t num_elements);
-void __ovld prefetch(const __global short8 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort8 *p, size_t num_elements);
-void __ovld prefetch(const __global int8 *p, size_t num_elements);
-void __ovld prefetch(const __global uint8 *p, size_t num_elements);
-void __ovld prefetch(const __global long8 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong8 *p, size_t num_elements);
-void __ovld prefetch(const __global float8 *p, size_t num_elements);
-void __ovld prefetch(const __global char16 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar16 *p, size_t num_elements);
-void __ovld prefetch(const __global short16 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort16 *p, size_t num_elements);
-void __ovld prefetch(const __global int16 *p, size_t num_elements);
-void __ovld prefetch(const __global uint16 *p, size_t num_elements);
-void __ovld prefetch(const __global long16 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong16 *p, size_t num_elements);
-void __ovld prefetch(const __global float16 *p, size_t num_elements);
-#ifdef cl_khr_fp64
-void __ovld prefetch(const __global double *p, size_t num_elements);
-void __ovld prefetch(const __global double2 *p, size_t num_elements);
-void __ovld prefetch(const __global double3 *p, size_t num_elements);
-void __ovld prefetch(const __global double4 *p, size_t num_elements);
-void __ovld prefetch(const __global double8 *p, size_t num_elements);
-void __ovld prefetch(const __global double16 *p, size_t num_elements);
-#endif //cl_khr_fp64
-#ifdef cl_khr_fp16
-void __ovld prefetch(const __global half *p, size_t num_elements);
-void __ovld prefetch(const __global half2 *p, size_t num_elements);
-void __ovld prefetch(const __global half3 *p, size_t num_elements);
-void __ovld prefetch(const __global half4 *p, size_t num_elements);
-void __ovld prefetch(const __global half8 *p, size_t num_elements);
-void __ovld prefetch(const __global half16 *p, size_t num_elements);
-#endif // cl_khr_fp16
-
-// OpenCL v1.1 s6.11.1, v1.2 s6.12.11 - Atomic Functions
-
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable
-#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable
-#endif
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old + val) and store result at location
- * pointed by p. The function returns old.
- */
-int __ovld atomic_add(volatile __global int *p, int val);
-unsigned int __ovld atomic_add(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_add(volatile __local int *p, int val);
-unsigned int __ovld atomic_add(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_add(volatile int *p, int val);
-unsigned int __ovld atomic_add(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_add(volatile __global int *p, int val);
-unsigned int __ovld atom_add(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_add(volatile __local int *p, int val);
-unsigned int __ovld atom_add(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_add(volatile __global long *p, long val);
-unsigned long __ovld atom_add(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_add(volatile __local long *p, long val);
-unsigned long __ovld atom_add(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old) stored at location pointed by p.
- * Compute (old - val) and store result at location pointed by p. The function
- * returns old.
- */
-int __ovld atomic_sub(volatile __global int *p, int val);
-unsigned int __ovld atomic_sub(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_sub(volatile __local int *p, int val);
-unsigned int __ovld atomic_sub(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_sub(volatile int *p, int val);
-unsigned int __ovld atomic_sub(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_sub(volatile __global int *p, int val);
-unsigned int __ovld atom_sub(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_sub(volatile __local int *p, int val);
-unsigned int __ovld atom_sub(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_sub(volatile __global long *p, long val);
-unsigned long __ovld atom_sub(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_sub(volatile __local long *p, long val);
-unsigned long __ovld atom_sub(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Swaps the old value stored at location p
- * with new value given by val. Returns old
- * value.
- */
-int __ovld atomic_xchg(volatile __global int *p, int val);
-unsigned int __ovld atomic_xchg(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_xchg(volatile __local int *p, int val);
-unsigned int __ovld atomic_xchg(volatile __local unsigned int *p, unsigned int val);
-float __ovld atomic_xchg(volatile __global float *p, float val);
-float __ovld atomic_xchg(volatile __local float *p, float val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_xchg(volatile int *p, int val);
-unsigned int __ovld atomic_xchg(volatile unsigned int *p, unsigned int val);
-float __ovld atomic_xchg(volatile float *p, float val);
-#endif
-
-#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_xchg(volatile __global int *p, int val);
-unsigned int __ovld atom_xchg(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_xchg(volatile __local int *p, int val);
-unsigned int __ovld atom_xchg(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_xchg(volatile __global long *p, long val);
-long __ovld atom_xchg(volatile __local long *p, long val);
-unsigned long __ovld atom_xchg(volatile __global unsigned long *p, unsigned long val);
-unsigned long __ovld atom_xchg(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old + 1) and store result at location
- * pointed by p. The function returns old.
- */
-int __ovld atomic_inc(volatile __global int *p);
-unsigned int __ovld atomic_inc(volatile __global unsigned int *p);
-int __ovld atomic_inc(volatile __local int *p);
-unsigned int __ovld atomic_inc(volatile __local unsigned int *p);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_inc(volatile int *p);
-unsigned int __ovld atomic_inc(volatile unsigned int *p);
-#endif
-
-#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_inc(volatile __global int *p);
-unsigned int __ovld atom_inc(volatile __global unsigned int *p);
-#endif
-#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_inc(volatile __local int *p);
-unsigned int __ovld atom_inc(volatile __local unsigned int *p);
-#endif
-
-#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_inc(volatile __global long *p);
-unsigned long __ovld atom_inc(volatile __global unsigned long *p);
-long __ovld atom_inc(volatile __local long *p);
-unsigned long __ovld atom_inc(volatile __local unsigned long *p);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old - 1) and store result at location
- * pointed by p. The function returns old.
- */
-int __ovld atomic_dec(volatile __global int *p);
-unsigned int __ovld atomic_dec(volatile __global unsigned int *p);
-int __ovld atomic_dec(volatile __local int *p);
-unsigned int __ovld atomic_dec(volatile __local unsigned int *p);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_dec(volatile int *p);
-unsigned int __ovld atomic_dec(volatile unsigned int *p);
-#endif
-
-#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_dec(volatile __global int *p);
-unsigned int __ovld atom_dec(volatile __global unsigned int *p);
-#endif
-#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_dec(volatile __local int *p);
-unsigned int __ovld atom_dec(volatile __local unsigned int *p);
-#endif
-
-#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_dec(volatile __global long *p);
-unsigned long __ovld atom_dec(volatile __global unsigned long *p);
-long __ovld atom_dec(volatile __local long *p);
-unsigned long __ovld atom_dec(volatile __local unsigned long *p);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old == cmp) ? val : old and store result at
- * location pointed by p. The function
- * returns old.
- */
-int __ovld atomic_cmpxchg(volatile __global int *p, int cmp, int val);
-unsigned int __ovld atomic_cmpxchg(volatile __global unsigned int *p, unsigned int cmp, unsigned int val);
-int __ovld atomic_cmpxchg(volatile __local int *p, int cmp, int val);
-unsigned int __ovld atomic_cmpxchg(volatile __local unsigned int *p, unsigned int cmp, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_cmpxchg(volatile int *p, int cmp, int val);
-unsigned int __ovld atomic_cmpxchg(volatile unsigned int *p, unsigned int cmp, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_cmpxchg(volatile __global int *p, int cmp, int val);
-unsigned int __ovld atom_cmpxchg(volatile __global unsigned int *p, unsigned int cmp, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_cmpxchg(volatile __local int *p, int cmp, int val);
-unsigned int __ovld atom_cmpxchg(volatile __local unsigned int *p, unsigned int cmp, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_cmpxchg(volatile __global long *p, long cmp, long val);
-unsigned long __ovld atom_cmpxchg(volatile __global unsigned long *p, unsigned long cmp, unsigned long val);
-long __ovld atom_cmpxchg(volatile __local long *p, long cmp, long val);
-unsigned long __ovld atom_cmpxchg(volatile __local unsigned long *p, unsigned long cmp, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * min(old, val) and store minimum value at
- * location pointed by p. The function
- * returns old.
- */
-int __ovld atomic_min(volatile __global int *p, int val);
-unsigned int __ovld atomic_min(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_min(volatile __local int *p, int val);
-unsigned int __ovld atomic_min(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_min(volatile int *p, int val);
-unsigned int __ovld atomic_min(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_extended_atomics)
-int __ovld atom_min(volatile __global int *p, int val);
-unsigned int __ovld atom_min(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_extended_atomics)
-int __ovld atom_min(volatile __local int *p, int val);
-unsigned int __ovld atom_min(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_extended_atomics)
-long __ovld atom_min(volatile __global long *p, long val);
-unsigned long __ovld atom_min(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_min(volatile __local long *p, long val);
-unsigned long __ovld atom_min(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * max(old, val) and store maximum value at
- * location pointed by p. The function
- * returns old.
- */
-int __ovld atomic_max(volatile __global int *p, int val);
-unsigned int __ovld atomic_max(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_max(volatile __local int *p, int val);
-unsigned int __ovld atomic_max(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_max(volatile int *p, int val);
-unsigned int __ovld atomic_max(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_extended_atomics)
-int __ovld atom_max(volatile __global int *p, int val);
-unsigned int __ovld atom_max(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_extended_atomics)
-int __ovld atom_max(volatile __local int *p, int val);
-unsigned int __ovld atom_max(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_extended_atomics)
-long __ovld atom_max(volatile __global long *p, long val);
-unsigned long __ovld atom_max(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_max(volatile __local long *p, long val);
-unsigned long __ovld atom_max(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old & val) and store result at location
- * pointed by p. The function returns old.
- */
-int __ovld atomic_and(volatile __global int *p, int val);
-unsigned int __ovld atomic_and(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_and(volatile __local int *p, int val);
-unsigned int __ovld atomic_and(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_and(volatile int *p, int val);
-unsigned int __ovld atomic_and(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_extended_atomics)
-int __ovld atom_and(volatile __global int *p, int val);
-unsigned int __ovld atom_and(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_extended_atomics)
-int __ovld atom_and(volatile __local int *p, int val);
-unsigned int __ovld atom_and(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_extended_atomics)
-long __ovld atom_and(volatile __global long *p, long val);
-unsigned long __ovld atom_and(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_and(volatile __local long *p, long val);
-unsigned long __ovld atom_and(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old | val) and store result at location
- * pointed by p. The function returns old.
- */
-int __ovld atomic_or(volatile __global int *p, int val);
-unsigned int __ovld atomic_or(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_or(volatile __local int *p, int val);
-unsigned int __ovld atomic_or(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_or(volatile int *p, int val);
-unsigned int __ovld atomic_or(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_extended_atomics)
-int __ovld atom_or(volatile __global int *p, int val);
-unsigned int __ovld atom_or(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_extended_atomics)
-int __ovld atom_or(volatile __local int *p, int val);
-unsigned int __ovld atom_or(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_extended_atomics)
-long __ovld atom_or(volatile __global long *p, long val);
-unsigned long __ovld atom_or(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_or(volatile __local long *p, long val);
-unsigned long __ovld atom_or(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-/**
- * Read the 32-bit value (referred to as old)
- * stored at location pointed by p. Compute
- * (old ^ val) and store result at location
- * pointed by p. The function returns old.
- */
-int __ovld atomic_xor(volatile __global int *p, int val);
-unsigned int __ovld atomic_xor(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_xor(volatile __local int *p, int val);
-unsigned int __ovld atomic_xor(volatile __local unsigned int *p, unsigned int val);
-#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_xor(volatile int *p, int val);
-unsigned int __ovld atomic_xor(volatile unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_global_int32_extended_atomics)
-int __ovld atom_xor(volatile __global int *p, int val);
-unsigned int __ovld atom_xor(volatile __global unsigned int *p, unsigned int val);
-#endif
-#if defined(cl_khr_local_int32_extended_atomics)
-int __ovld atom_xor(volatile __local int *p, int val);
-unsigned int __ovld atom_xor(volatile __local unsigned int *p, unsigned int val);
-#endif
-
-#if defined(cl_khr_int64_extended_atomics)
-long __ovld atom_xor(volatile __global long *p, long val);
-unsigned long __ovld atom_xor(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_xor(volatile __local long *p, long val);
-unsigned long __ovld atom_xor(volatile __local unsigned long *p, unsigned long val);
-#endif
-
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : disable
-#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : disable
-#endif
-
-// OpenCL v2.0 s6.13.11 - Atomics Functions
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// double atomics support requires extensions cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable
-#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable
-#endif
-
-// atomic_init()
-void __ovld atomic_init(volatile atomic_int *object, int value);
-void __ovld atomic_init(volatile atomic_uint *object, uint value);
-void __ovld atomic_init(volatile atomic_float *object, float value);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-void __ovld atomic_init(volatile atomic_long *object, long value);
-void __ovld atomic_init(volatile atomic_ulong *object, ulong value);
-#ifdef cl_khr_fp64
-void __ovld atomic_init(volatile atomic_double *object, double value);
-#endif //cl_khr_fp64
-#endif
-
-// atomic_work_item_fence()
-void __ovld atomic_work_item_fence(cl_mem_fence_flags flags, memory_order order, memory_scope scope);
-
-// atomic_fetch()
-
-int __ovld atomic_fetch_add(volatile atomic_int *object, int operand);
-int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_add(volatile atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_sub(volatile atomic_int *object, int operand);
-int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_sub(volatile atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_or(volatile atomic_int *object, int operand);
-int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_or(volatile atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_xor(volatile atomic_int *object, int operand);
-int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_xor(volatile atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_and(volatile atomic_int *object, int operand);
-int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_and(volatile atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_min(volatile atomic_int *object, int operand);
-int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_min(volatile atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_min(volatile atomic_uint *object, int operand);
-uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, int operand, memory_order order);
-uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_max(volatile atomic_int *object, int operand);
-int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_max(volatile atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_max(volatile atomic_uint *object, int operand);
-uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, int operand, memory_order order);
-uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, int operand, memory_order order, memory_scope scope);
-
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add(volatile atomic_long *object, long operand);
-long __ovld atomic_fetch_add_explicit(volatile atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_add_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_add(volatile atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_sub(volatile atomic_long *object, long operand);
-long __ovld atomic_fetch_sub_explicit(volatile atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_sub_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_sub(volatile atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_or(volatile atomic_long *object, long operand);
-long __ovld atomic_fetch_or_explicit(volatile atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_or_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_or(volatile atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_xor(volatile atomic_long *object, long operand);
-long __ovld atomic_fetch_xor_explicit(volatile atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_xor_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_xor(volatile atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_and(volatile atomic_long *object, long operand);
-long __ovld atomic_fetch_and_explicit(volatile atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_and_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_and(volatile atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_min(volatile atomic_long *object, long operand);
-long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_min(volatile atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_min(volatile atomic_ulong *object, long operand);
-ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_max(volatile atomic_long *object, long operand);
-long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_max(volatile atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_max(volatile atomic_ulong *object, long operand);
-ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, long operand, memory_order order, memory_scope scope);
-#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-
-// OpenCL v2.0 s6.13.11.7.5:
-// add/sub: atomic type argument can be uintptr_t/intptr_t, value type argument can be ptrdiff_t.
-// or/xor/and/min/max: atomic type argument can be intptr_t/uintptr_t, value type argument can be intptr_t/uintptr_t.
-
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-uintptr_t __ovld atomic_fetch_add(volatile atomic_uintptr_t *object, ptrdiff_t operand);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_sub(volatile atomic_uintptr_t *object, ptrdiff_t operand);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-
-uintptr_t __ovld atomic_fetch_or(volatile atomic_uintptr_t *object, intptr_t operand);
-uintptr_t __ovld atomic_fetch_or_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_or_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_xor(volatile atomic_uintptr_t *object, intptr_t operand);
-uintptr_t __ovld atomic_fetch_xor_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_xor_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_and(volatile atomic_uintptr_t *object, intptr_t operand);
-uintptr_t __ovld atomic_fetch_and_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_and_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_min(volatile atomic_uintptr_t *object, intptr_t opermax);
-uintptr_t __ovld atomic_fetch_min_explicit(volatile atomic_uintptr_t *object, intptr_t opermax, memory_order minder);
-uintptr_t __ovld atomic_fetch_min_explicit(volatile atomic_uintptr_t *object, intptr_t opermax, memory_order minder, memory_scope scope);
-uintptr_t __ovld atomic_fetch_max(volatile atomic_uintptr_t *object, intptr_t opermax);
-uintptr_t __ovld atomic_fetch_max_explicit(volatile atomic_uintptr_t *object, intptr_t opermax, memory_order minder);
-uintptr_t __ovld atomic_fetch_max_explicit(volatile atomic_uintptr_t *object, intptr_t opermax, memory_order minder, memory_scope scope);
-
-intptr_t __ovld atomic_fetch_or(volatile atomic_intptr_t *object, uintptr_t operand);
-intptr_t __ovld atomic_fetch_or_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_or_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_xor(volatile atomic_intptr_t *object, uintptr_t operand);
-intptr_t __ovld atomic_fetch_xor_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_xor_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_and(volatile atomic_intptr_t *object, uintptr_t operand);
-intptr_t __ovld atomic_fetch_and_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_and_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_min(volatile atomic_intptr_t *object, uintptr_t opermax);
-intptr_t __ovld atomic_fetch_min_explicit(volatile atomic_intptr_t *object, uintptr_t opermax, memory_order minder);
-intptr_t __ovld atomic_fetch_min_explicit(volatile atomic_intptr_t *object, uintptr_t opermax, memory_order minder, memory_scope scope);
-intptr_t __ovld atomic_fetch_max(volatile atomic_intptr_t *object, uintptr_t opermax);
-intptr_t __ovld atomic_fetch_max_explicit(volatile atomic_intptr_t *object, uintptr_t opermax, memory_order minder);
-intptr_t __ovld atomic_fetch_max_explicit(volatile atomic_intptr_t *object, uintptr_t opermax, memory_order minder, memory_scope scope);
-#endif
-
-// atomic_store()
-
-void __ovld atomic_store(volatile atomic_int *object, int desired);
-void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order, memory_scope scope);
-void __ovld atomic_store(volatile atomic_uint *object, uint desired);
-void __ovld atomic_store_explicit(volatile atomic_uint *object, uint desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-void __ovld atomic_store(volatile atomic_float *object, float desired);
-void __ovld atomic_store_explicit(volatile atomic_float *object, float desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_float *object, float desired, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-void __ovld atomic_store(volatile atomic_double *object, double desired);
-void __ovld atomic_store_explicit(volatile atomic_double *object, double desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_double *object, double desired, memory_order order, memory_scope scope);
-#endif //cl_khr_fp64
-void __ovld atomic_store(volatile atomic_long *object, long desired);
-void __ovld atomic_store_explicit(volatile atomic_long *object, long desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_long *object, long desired, memory_order order, memory_scope scope);
-void __ovld atomic_store(volatile atomic_ulong *object, ulong desired);
-void __ovld atomic_store_explicit(volatile atomic_ulong *object, ulong desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
-#endif
-
-// atomic_load()
-
-int __ovld atomic_load(volatile atomic_int *object);
-int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order);
-int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order, memory_scope scope);
-uint __ovld atomic_load(volatile atomic_uint *object);
-uint __ovld atomic_load_explicit(volatile atomic_uint *object, memory_order order);
-uint __ovld atomic_load_explicit(volatile atomic_uint *object, memory_order order, memory_scope scope);
-float __ovld atomic_load(volatile atomic_float *object);
-float __ovld atomic_load_explicit(volatile atomic_float *object, memory_order order);
-float __ovld atomic_load_explicit(volatile atomic_float *object, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_load(volatile atomic_double *object);
-double __ovld atomic_load_explicit(volatile atomic_double *object, memory_order order);
-double __ovld atomic_load_explicit(volatile atomic_double *object, memory_order order, memory_scope scope);
-#endif //cl_khr_fp64
-long __ovld atomic_load(volatile atomic_long *object);
-long __ovld atomic_load_explicit(volatile atomic_long *object, memory_order order);
-long __ovld atomic_load_explicit(volatile atomic_long *object, memory_order order, memory_scope scope);
-ulong __ovld atomic_load(volatile atomic_ulong *object);
-ulong __ovld atomic_load_explicit(volatile atomic_ulong *object, memory_order order);
-ulong __ovld atomic_load_explicit(volatile atomic_ulong *object, memory_order order, memory_scope scope);
-#endif
-
-// atomic_exchange()
-
-int __ovld atomic_exchange(volatile atomic_int *object, int desired);
-int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order);
-int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order, memory_scope scope);
-uint __ovld atomic_exchange(volatile atomic_uint *object, uint desired);
-uint __ovld atomic_exchange_explicit(volatile atomic_uint *object, uint desired, memory_order order);
-uint __ovld atomic_exchange_explicit(volatile atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-float __ovld atomic_exchange(volatile atomic_float *object, float desired);
-float __ovld atomic_exchange_explicit(volatile atomic_float *object, float desired, memory_order order);
-float __ovld atomic_exchange_explicit(volatile atomic_float *object, float desired, memory_order order, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-double __ovld atomic_exchange(volatile atomic_double *object, double desired);
-double __ovld atomic_exchange_explicit(volatile atomic_double *object, double desired, memory_order order);
-double __ovld atomic_exchange_explicit(volatile atomic_double *object, double desired, memory_order order, memory_scope scope);
-#endif //cl_khr_fp64
-long __ovld atomic_exchange(volatile atomic_long *object, long desired);
-long __ovld atomic_exchange_explicit(volatile atomic_long *object, long desired, memory_order order);
-long __ovld atomic_exchange_explicit(volatile atomic_long *object, long desired, memory_order order, memory_scope scope);
-ulong __ovld atomic_exchange(volatile atomic_ulong *object, ulong desired);
-ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *object, ulong desired, memory_order order);
-ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
-#endif
-
-// atomic_compare_exchange_strong() and atomic_compare_exchange_weak()
-
-bool __ovld atomic_compare_exchange_strong(volatile atomic_int *object, int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong(volatile atomic_uint *object, uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *object, uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *object, uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_int *object, int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected,
-                                                                                 int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected,
-                                                                                 int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_uint *object, uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *object, uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *object, uint *expected,
-                                                                                 uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong(volatile atomic_float *object, float *expected, float desired);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *object, float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *object, float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_float *object, float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *object, float *expected,
-                                                                                 float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *object, float *expected,
-                                                                                 float desired, memory_order success, memory_order failure, memory_scope scope);
-#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong(volatile atomic_double *object, double *expected, double desired);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *object, double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *object, double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_double *object, double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *object, double *expected,
-                                                                                 double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *object, double *expected,
-                                                                                 double desired, memory_order success, memory_order failure, memory_scope scope);
-#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong(volatile atomic_long *object, long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *object, long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *object, long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_long *object, long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *object, long *expected,
-                                                                                 long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *object, long *expected,
-                                                                                 long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong(volatile atomic_ulong *object, ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *object, ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *object, ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_ulong *object, ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *object, ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *object, ulong *expected,
-                                                                                 ulong desired, memory_order success, memory_order failure, memory_scope scope);
-#endif
-
-// atomic_flag_test_and_set() and atomic_flag_clear()
-
-bool __ovld atomic_flag_test_and_set(volatile atomic_flag *object);
-bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order);
-bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order, memory_scope scope);
-void __ovld atomic_flag_clear(volatile atomic_flag *object);
-void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order);
-void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order, memory_scope scope);
-
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// OpenCL v1.1 s6.11.12, v1.2 s6.12.12, v2.0 s6.13.12 - Miscellaneous Vector Functions
-
-/**
- * The shuffle and shuffle2 built-in functions construct
- * a permutation of elements from one or two input
- * vectors respectively that are of the same type,
- * returning a vector with the same element type as the
- * input and length that is the same as the shuffle mask.
- * The size of each element in the mask must match the
- * size of each element in the result. For shuffle, only
- * the ilogb(2m-1) least significant bits of each mask
- * element are considered. For shuffle2, only the
- * ilogb(2m-1)+1 least significant bits of each mask
- * element are considered. Other bits in the mask shall
- * be ignored.
- * The elements of the input vectors are numbered from
- * left to right across one or both of the vectors. For this
- * purpose, the number of elements in a vector is given
- * by vec_step(gentypem). The shuffle mask operand
- * specifies, for each element of the result vector, which
- * element of the one or two input vectors the result
- * element gets.
- * Examples:
- * uint4 mask = (uint4)(3, 2,
- * 1, 0);
- * float4 a;
- * float4 r = shuffle(a, mask);
- * // r.s0123 = a.wzyx
- * uint8 mask = (uint8)(0, 1, 2, 3,
- * 4, 5, 6, 7);
- * float4 a, b;
- * float8 r = shuffle2(a, b, mask);
- * // r.s0123 = a.xyzw
- * // r.s4567 = b.xyzw
- * uint4 mask;
- * float8 a;
- * float4 b;
- * b = shuffle(a, mask);
- * Examples that are not valid are:
- * uint8 mask;
- * short16 a;
- * short8 b;
- * b = shuffle(a, mask); <- not valid
- */
-char2 __ovld __cnfn shuffle(char2 x, uchar2 mask);
-char2 __ovld __cnfn shuffle(char4 x, uchar2 mask);
-char2 __ovld __cnfn shuffle(char8 x, uchar2 mask);
-char2 __ovld __cnfn shuffle(char16 x, uchar2 mask);
-
-uchar2 __ovld __cnfn shuffle(uchar2 x, uchar2 mask);
-uchar2 __ovld __cnfn shuffle(uchar4 x, uchar2 mask);
-uchar2 __ovld __cnfn shuffle(uchar8 x, uchar2 mask);
-uchar2 __ovld __cnfn shuffle(uchar16 x, uchar2 mask);
-
-short2 __ovld __cnfn shuffle(short2 x, ushort2 mask);
-short2 __ovld __cnfn shuffle(short4 x, ushort2 mask);
-short2 __ovld __cnfn shuffle(short8 x, ushort2 mask);
-short2 __ovld __cnfn shuffle(short16 x, ushort2 mask);
-
-ushort2 __ovld __cnfn shuffle(ushort2 x, ushort2 mask);
-ushort2 __ovld __cnfn shuffle(ushort4 x, ushort2 mask);
-ushort2 __ovld __cnfn shuffle(ushort8 x, ushort2 mask);
-ushort2 __ovld __cnfn shuffle(ushort16 x, ushort2 mask);
-
-int2 __ovld __cnfn shuffle(int2 x, uint2 mask);
-int2 __ovld __cnfn shuffle(int4 x, uint2 mask);
-int2 __ovld __cnfn shuffle(int8 x, uint2 mask);
-int2 __ovld __cnfn shuffle(int16 x, uint2 mask);
-
-uint2 __ovld __cnfn shuffle(uint2 x, uint2 mask);
-uint2 __ovld __cnfn shuffle(uint4 x, uint2 mask);
-uint2 __ovld __cnfn shuffle(uint8 x, uint2 mask);
-uint2 __ovld __cnfn shuffle(uint16 x, uint2 mask);
-
-long2 __ovld __cnfn shuffle(long2 x, ulong2 mask);
-long2 __ovld __cnfn shuffle(long4 x, ulong2 mask);
-long2 __ovld __cnfn shuffle(long8 x, ulong2 mask);
-long2 __ovld __cnfn shuffle(long16 x, ulong2 mask);
-
-ulong2 __ovld __cnfn shuffle(ulong2 x, ulong2 mask);
-ulong2 __ovld __cnfn shuffle(ulong4 x, ulong2 mask);
-ulong2 __ovld __cnfn shuffle(ulong8 x, ulong2 mask);
-ulong2 __ovld __cnfn shuffle(ulong16 x, ulong2 mask);
-
-float2 __ovld __cnfn shuffle(float2 x, uint2 mask);
-float2 __ovld __cnfn shuffle(float4 x, uint2 mask);
-float2 __ovld __cnfn shuffle(float8 x, uint2 mask);
-float2 __ovld __cnfn shuffle(float16 x, uint2 mask);
-
-char4 __ovld __cnfn shuffle(char2 x, uchar4 mask);
-char4 __ovld __cnfn shuffle(char4 x, uchar4 mask);
-char4 __ovld __cnfn shuffle(char8 x, uchar4 mask);
-char4 __ovld __cnfn shuffle(char16 x, uchar4 mask);
-
-uchar4 __ovld __cnfn shuffle(uchar2 x, uchar4 mask);
-uchar4 __ovld __cnfn shuffle(uchar4 x, uchar4 mask);
-uchar4 __ovld __cnfn shuffle(uchar8 x, uchar4 mask);
-uchar4 __ovld __cnfn shuffle(uchar16 x, uchar4 mask);
-
-short4 __ovld __cnfn shuffle(short2 x, ushort4 mask);
-short4 __ovld __cnfn shuffle(short4 x, ushort4 mask);
-short4 __ovld __cnfn shuffle(short8 x, ushort4 mask);
-short4 __ovld __cnfn shuffle(short16 x, ushort4 mask);
-
-ushort4 __ovld __cnfn shuffle(ushort2 x, ushort4 mask);
-ushort4 __ovld __cnfn shuffle(ushort4 x, ushort4 mask);
-ushort4 __ovld __cnfn shuffle(ushort8 x, ushort4 mask);
-ushort4 __ovld __cnfn shuffle(ushort16 x, ushort4 mask);
-
-int4 __ovld __cnfn shuffle(int2 x, uint4 mask);
-int4 __ovld __cnfn shuffle(int4 x, uint4 mask);
-int4 __ovld __cnfn shuffle(int8 x, uint4 mask);
-int4 __ovld __cnfn shuffle(int16 x, uint4 mask);
-
-uint4 __ovld __cnfn shuffle(uint2 x, uint4 mask);
-uint4 __ovld __cnfn shuffle(uint4 x, uint4 mask);
-uint4 __ovld __cnfn shuffle(uint8 x, uint4 mask);
-uint4 __ovld __cnfn shuffle(uint16 x, uint4 mask);
-
-long4 __ovld __cnfn shuffle(long2 x, ulong4 mask);
-long4 __ovld __cnfn shuffle(long4 x, ulong4 mask);
-long4 __ovld __cnfn shuffle(long8 x, ulong4 mask);
-long4 __ovld __cnfn shuffle(long16 x, ulong4 mask);
-
-ulong4 __ovld __cnfn shuffle(ulong2 x, ulong4 mask);
-ulong4 __ovld __cnfn shuffle(ulong4 x, ulong4 mask);
-ulong4 __ovld __cnfn shuffle(ulong8 x, ulong4 mask);
-ulong4 __ovld __cnfn shuffle(ulong16 x, ulong4 mask);
-
-float4 __ovld __cnfn shuffle(float2 x, uint4 mask);
-float4 __ovld __cnfn shuffle(float4 x, uint4 mask);
-float4 __ovld __cnfn shuffle(float8 x, uint4 mask);
-float4 __ovld __cnfn shuffle(float16 x, uint4 mask);
-
-char8 __ovld __cnfn shuffle(char2 x, uchar8 mask);
-char8 __ovld __cnfn shuffle(char4 x, uchar8 mask);
-char8 __ovld __cnfn shuffle(char8 x, uchar8 mask);
-char8 __ovld __cnfn shuffle(char16 x, uchar8 mask);
-
-uchar8 __ovld __cnfn shuffle(uchar2 x, uchar8 mask);
-uchar8 __ovld __cnfn shuffle(uchar4 x, uchar8 mask);
-uchar8 __ovld __cnfn shuffle(uchar8 x, uchar8 mask);
-uchar8 __ovld __cnfn shuffle(uchar16 x, uchar8 mask);
-
-short8 __ovld __cnfn shuffle(short2 x, ushort8 mask);
-short8 __ovld __cnfn shuffle(short4 x, ushort8 mask);
-short8 __ovld __cnfn shuffle(short8 x, ushort8 mask);
-short8 __ovld __cnfn shuffle(short16 x, ushort8 mask);
-
-ushort8 __ovld __cnfn shuffle(ushort2 x, ushort8 mask);
-ushort8 __ovld __cnfn shuffle(ushort4 x, ushort8 mask);
-ushort8 __ovld __cnfn shuffle(ushort8 x, ushort8 mask);
-ushort8 __ovld __cnfn shuffle(ushort16 x, ushort8 mask);
-
-int8 __ovld __cnfn shuffle(int2 x, uint8 mask);
-int8 __ovld __cnfn shuffle(int4 x, uint8 mask);
-int8 __ovld __cnfn shuffle(int8 x, uint8 mask);
-int8 __ovld __cnfn shuffle(int16 x, uint8 mask);
-
-uint8 __ovld __cnfn shuffle(uint2 x, uint8 mask);
-uint8 __ovld __cnfn shuffle(uint4 x, uint8 mask);
-uint8 __ovld __cnfn shuffle(uint8 x, uint8 mask);
-uint8 __ovld __cnfn shuffle(uint16 x, uint8 mask);
-
-long8 __ovld __cnfn shuffle(long2 x, ulong8 mask);
-long8 __ovld __cnfn shuffle(long4 x, ulong8 mask);
-long8 __ovld __cnfn shuffle(long8 x, ulong8 mask);
-long8 __ovld __cnfn shuffle(long16 x, ulong8 mask);
-
-ulong8 __ovld __cnfn shuffle(ulong2 x, ulong8 mask);
-ulong8 __ovld __cnfn shuffle(ulong4 x, ulong8 mask);
-ulong8 __ovld __cnfn shuffle(ulong8 x, ulong8 mask);
-ulong8 __ovld __cnfn shuffle(ulong16 x, ulong8 mask);
-
-float8 __ovld __cnfn shuffle(float2 x, uint8 mask);
-float8 __ovld __cnfn shuffle(float4 x, uint8 mask);
-float8 __ovld __cnfn shuffle(float8 x, uint8 mask);
-float8 __ovld __cnfn shuffle(float16 x, uint8 mask);
-
-char16 __ovld __cnfn shuffle(char2 x, uchar16 mask);
-char16 __ovld __cnfn shuffle(char4 x, uchar16 mask);
-char16 __ovld __cnfn shuffle(char8 x, uchar16 mask);
-char16 __ovld __cnfn shuffle(char16 x, uchar16 mask);
-
-uchar16 __ovld __cnfn shuffle(uchar2 x, uchar16 mask);
-uchar16 __ovld __cnfn shuffle(uchar4 x, uchar16 mask);
-uchar16 __ovld __cnfn shuffle(uchar8 x, uchar16 mask);
-uchar16 __ovld __cnfn shuffle(uchar16 x, uchar16 mask);
-
-short16 __ovld __cnfn shuffle(short2 x, ushort16 mask);
-short16 __ovld __cnfn shuffle(short4 x, ushort16 mask);
-short16 __ovld __cnfn shuffle(short8 x, ushort16 mask);
-short16 __ovld __cnfn shuffle(short16 x, ushort16 mask);
-
-ushort16 __ovld __cnfn shuffle(ushort2 x, ushort16 mask);
-ushort16 __ovld __cnfn shuffle(ushort4 x, ushort16 mask);
-ushort16 __ovld __cnfn shuffle(ushort8 x, ushort16 mask);
-ushort16 __ovld __cnfn shuffle(ushort16 x, ushort16 mask);
-
-int16 __ovld __cnfn shuffle(int2 x, uint16 mask);
-int16 __ovld __cnfn shuffle(int4 x, uint16 mask);
-int16 __ovld __cnfn shuffle(int8 x, uint16 mask);
-int16 __ovld __cnfn shuffle(int16 x, uint16 mask);
-
-uint16 __ovld __cnfn shuffle(uint2 x, uint16 mask);
-uint16 __ovld __cnfn shuffle(uint4 x, uint16 mask);
-uint16 __ovld __cnfn shuffle(uint8 x, uint16 mask);
-uint16 __ovld __cnfn shuffle(uint16 x, uint16 mask);
-
-long16 __ovld __cnfn shuffle(long2 x, ulong16 mask);
-long16 __ovld __cnfn shuffle(long4 x, ulong16 mask);
-long16 __ovld __cnfn shuffle(long8 x, ulong16 mask);
-long16 __ovld __cnfn shuffle(long16 x, ulong16 mask);
-
-ulong16 __ovld __cnfn shuffle(ulong2 x, ulong16 mask);
-ulong16 __ovld __cnfn shuffle(ulong4 x, ulong16 mask);
-ulong16 __ovld __cnfn shuffle(ulong8 x, ulong16 mask);
-ulong16 __ovld __cnfn shuffle(ulong16 x, ulong16 mask);
-
-float16 __ovld __cnfn shuffle(float2 x, uint16 mask);
-float16 __ovld __cnfn shuffle(float4 x, uint16 mask);
-float16 __ovld __cnfn shuffle(float8 x, uint16 mask);
-float16 __ovld __cnfn shuffle(float16 x, uint16 mask);
-
-#ifdef cl_khr_fp64
-double2 __ovld __cnfn shuffle(double2 x, ulong2 mask);
-double2 __ovld __cnfn shuffle(double4 x, ulong2 mask);
-double2 __ovld __cnfn shuffle(double8 x, ulong2 mask);
-double2 __ovld __cnfn shuffle(double16 x, ulong2 mask);
-
-double4 __ovld __cnfn shuffle(double2 x, ulong4 mask);
-double4 __ovld __cnfn shuffle(double4 x, ulong4 mask);
-double4 __ovld __cnfn shuffle(double8 x, ulong4 mask);
-double4 __ovld __cnfn shuffle(double16 x, ulong4 mask);
-
-double8 __ovld __cnfn shuffle(double2 x, ulong8 mask);
-double8 __ovld __cnfn shuffle(double4 x, ulong8 mask);
-double8 __ovld __cnfn shuffle(double8 x, ulong8 mask);
-double8 __ovld __cnfn shuffle(double16 x, ulong8 mask);
-
-double16 __ovld __cnfn shuffle(double2 x, ulong16 mask);
-double16 __ovld __cnfn shuffle(double4 x, ulong16 mask);
-double16 __ovld __cnfn shuffle(double8 x, ulong16 mask);
-double16 __ovld __cnfn shuffle(double16 x, ulong16 mask);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half2 __ovld __cnfn shuffle(half2 x, ushort2 mask);
-half2 __ovld __cnfn shuffle(half4 x, ushort2 mask);
-half2 __ovld __cnfn shuffle(half8 x, ushort2 mask);
-half2 __ovld __cnfn shuffle(half16 x, ushort2 mask);
-
-half4 __ovld __cnfn shuffle(half2 x, ushort4 mask);
-half4 __ovld __cnfn shuffle(half4 x, ushort4 mask);
-half4 __ovld __cnfn shuffle(half8 x, ushort4 mask);
-half4 __ovld __cnfn shuffle(half16 x, ushort4 mask);
-
-half8 __ovld __cnfn shuffle(half2 x, ushort8 mask);
-half8 __ovld __cnfn shuffle(half4 x, ushort8 mask);
-half8 __ovld __cnfn shuffle(half8 x, ushort8 mask);
-half8 __ovld __cnfn shuffle(half16 x, ushort8 mask);
-
-half16 __ovld __cnfn shuffle(half2 x, ushort16 mask);
-half16 __ovld __cnfn shuffle(half4 x, ushort16 mask);
-half16 __ovld __cnfn shuffle(half8 x, ushort16 mask);
-half16 __ovld __cnfn shuffle(half16 x, ushort16 mask);
-#endif //cl_khr_fp16
-
-char2 __ovld __cnfn shuffle2(char2 x, char2 y, uchar2 mask);
-char2 __ovld __cnfn shuffle2(char4 x, char4 y, uchar2 mask);
-char2 __ovld __cnfn shuffle2(char8 x, char8 y, uchar2 mask);
-char2 __ovld __cnfn shuffle2(char16 x, char16 y, uchar2 mask);
-
-uchar2 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar2 mask);
-uchar2 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar2 mask);
-uchar2 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar2 mask);
-uchar2 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar2 mask);
-
-short2 __ovld __cnfn shuffle2(short2 x, short2 y, ushort2 mask);
-short2 __ovld __cnfn shuffle2(short4 x, short4 y, ushort2 mask);
-short2 __ovld __cnfn shuffle2(short8 x, short8 y, ushort2 mask);
-short2 __ovld __cnfn shuffle2(short16 x, short16 y, ushort2 mask);
-
-ushort2 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort2 mask);
-ushort2 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort2 mask);
-ushort2 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort2 mask);
-ushort2 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort2 mask);
-
-int2 __ovld __cnfn shuffle2(int2 x, int2 y, uint2 mask);
-int2 __ovld __cnfn shuffle2(int4 x, int4 y, uint2 mask);
-int2 __ovld __cnfn shuffle2(int8 x, int8 y, uint2 mask);
-int2 __ovld __cnfn shuffle2(int16 x, int16 y, uint2 mask);
-
-uint2 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint2 mask);
-uint2 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint2 mask);
-uint2 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint2 mask);
-uint2 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint2 mask);
-
-long2 __ovld __cnfn shuffle2(long2 x, long2 y, ulong2 mask);
-long2 __ovld __cnfn shuffle2(long4 x, long4 y, ulong2 mask);
-long2 __ovld __cnfn shuffle2(long8 x, long8 y, ulong2 mask);
-long2 __ovld __cnfn shuffle2(long16 x, long16 y, ulong2 mask);
-
-ulong2 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong2 mask);
-ulong2 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong2 mask);
-ulong2 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong2 mask);
-ulong2 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong2 mask);
-
-float2 __ovld __cnfn shuffle2(float2 x, float2 y, uint2 mask);
-float2 __ovld __cnfn shuffle2(float4 x, float4 y, uint2 mask);
-float2 __ovld __cnfn shuffle2(float8 x, float8 y, uint2 mask);
-float2 __ovld __cnfn shuffle2(float16 x, float16 y, uint2 mask);
-
-char4 __ovld __cnfn shuffle2(char2 x, char2 y, uchar4 mask);
-char4 __ovld __cnfn shuffle2(char4 x, char4 y, uchar4 mask);
-char4 __ovld __cnfn shuffle2(char8 x, char8 y, uchar4 mask);
-char4 __ovld __cnfn shuffle2(char16 x, char16 y, uchar4 mask);
-
-uchar4 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar4 mask);
-uchar4 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar4 mask);
-uchar4 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar4 mask);
-uchar4 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar4 mask);
-
-short4 __ovld __cnfn shuffle2(short2 x, short2 y, ushort4 mask);
-short4 __ovld __cnfn shuffle2(short4 x, short4 y, ushort4 mask);
-short4 __ovld __cnfn shuffle2(short8 x, short8 y, ushort4 mask);
-short4 __ovld __cnfn shuffle2(short16 x, short16 y, ushort4 mask);
-
-ushort4 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort4 mask);
-ushort4 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort4 mask);
-ushort4 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort4 mask);
-ushort4 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort4 mask);
-
-int4 __ovld __cnfn shuffle2(int2 x, int2 y, uint4 mask);
-int4 __ovld __cnfn shuffle2(int4 x, int4 y, uint4 mask);
-int4 __ovld __cnfn shuffle2(int8 x, int8 y, uint4 mask);
-int4 __ovld __cnfn shuffle2(int16 x, int16 y, uint4 mask);
-
-uint4 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint4 mask);
-uint4 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint4 mask);
-uint4 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint4 mask);
-uint4 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint4 mask);
-
-long4 __ovld __cnfn shuffle2(long2 x, long2 y, ulong4 mask);
-long4 __ovld __cnfn shuffle2(long4 x, long4 y, ulong4 mask);
-long4 __ovld __cnfn shuffle2(long8 x, long8 y, ulong4 mask);
-long4 __ovld __cnfn shuffle2(long16 x, long16 y, ulong4 mask);
-
-ulong4 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong4 mask);
-ulong4 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong4 mask);
-ulong4 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong4 mask);
-ulong4 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong4 mask);
-
-float4 __ovld __cnfn shuffle2(float2 x, float2 y, uint4 mask);
-float4 __ovld __cnfn shuffle2(float4 x, float4 y, uint4 mask);
-float4 __ovld __cnfn shuffle2(float8 x, float8 y, uint4 mask);
-float4 __ovld __cnfn shuffle2(float16 x, float16 y, uint4 mask);
-
-char8 __ovld __cnfn shuffle2(char2 x, char2 y, uchar8 mask);
-char8 __ovld __cnfn shuffle2(char4 x, char4 y, uchar8 mask);
-char8 __ovld __cnfn shuffle2(char8 x, char8 y, uchar8 mask);
-char8 __ovld __cnfn shuffle2(char16 x, char16 y, uchar8 mask);
-
-uchar8 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar8 mask);
-uchar8 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar8 mask);
-uchar8 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar8 mask);
-uchar8 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar8 mask);
-
-short8 __ovld __cnfn shuffle2(short2 x, short2 y, ushort8 mask);
-short8 __ovld __cnfn shuffle2(short4 x, short4 y, ushort8 mask);
-short8 __ovld __cnfn shuffle2(short8 x, short8 y, ushort8 mask);
-short8 __ovld __cnfn shuffle2(short16 x, short16 y, ushort8 mask);
-
-ushort8 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort8 mask);
-ushort8 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort8 mask);
-ushort8 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort8 mask);
-ushort8 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort8 mask);
-
-int8 __ovld __cnfn shuffle2(int2 x, int2 y, uint8 mask);
-int8 __ovld __cnfn shuffle2(int4 x, int4 y, uint8 mask);
-int8 __ovld __cnfn shuffle2(int8 x, int8 y, uint8 mask);
-int8 __ovld __cnfn shuffle2(int16 x, int16 y, uint8 mask);
-
-uint8 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint8 mask);
-uint8 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint8 mask);
-uint8 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint8 mask);
-uint8 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint8 mask);
-
-long8 __ovld __cnfn shuffle2(long2 x, long2 y, ulong8 mask);
-long8 __ovld __cnfn shuffle2(long4 x, long4 y, ulong8 mask);
-long8 __ovld __cnfn shuffle2(long8 x, long8 y, ulong8 mask);
-long8 __ovld __cnfn shuffle2(long16 x, long16 y, ulong8 mask);
-
-ulong8 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong8 mask);
-ulong8 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong8 mask);
-ulong8 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong8 mask);
-ulong8 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong8 mask);
-
-float8 __ovld __cnfn shuffle2(float2 x, float2 y, uint8 mask);
-float8 __ovld __cnfn shuffle2(float4 x, float4 y, uint8 mask);
-float8 __ovld __cnfn shuffle2(float8 x, float8 y, uint8 mask);
-float8 __ovld __cnfn shuffle2(float16 x, float16 y, uint8 mask);
-
-char16 __ovld __cnfn shuffle2(char2 x, char2 y, uchar16 mask);
-char16 __ovld __cnfn shuffle2(char4 x, char4 y, uchar16 mask);
-char16 __ovld __cnfn shuffle2(char8 x, char8 y, uchar16 mask);
-char16 __ovld __cnfn shuffle2(char16 x, char16 y, uchar16 mask);
-
-uchar16 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar16 mask);
-uchar16 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar16 mask);
-uchar16 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar16 mask);
-uchar16 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar16 mask);
-
-short16 __ovld __cnfn shuffle2(short2 x, short2 y, ushort16 mask);
-short16 __ovld __cnfn shuffle2(short4 x, short4 y, ushort16 mask);
-short16 __ovld __cnfn shuffle2(short8 x, short8 y, ushort16 mask);
-short16 __ovld __cnfn shuffle2(short16 x, short16 y, ushort16 mask);
-
-ushort16 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort16 mask);
-ushort16 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort16 mask);
-ushort16 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort16 mask);
-ushort16 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort16 mask);
-
-int16 __ovld __cnfn shuffle2(int2 x, int2 y, uint16 mask);
-int16 __ovld __cnfn shuffle2(int4 x, int4 y, uint16 mask);
-int16 __ovld __cnfn shuffle2(int8 x, int8 y, uint16 mask);
-int16 __ovld __cnfn shuffle2(int16 x, int16 y, uint16 mask);
-
-uint16 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint16 mask);
-uint16 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint16 mask);
-uint16 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint16 mask);
-uint16 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint16 mask);
-
-long16 __ovld __cnfn shuffle2(long2 x, long2 y, ulong16 mask);
-long16 __ovld __cnfn shuffle2(long4 x, long4 y, ulong16 mask);
-long16 __ovld __cnfn shuffle2(long8 x, long8 y, ulong16 mask);
-long16 __ovld __cnfn shuffle2(long16 x, long16 y, ulong16 mask);
-
-ulong16 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong16 mask);
-ulong16 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong16 mask);
-ulong16 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong16 mask);
-ulong16 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong16 mask);
-
-float16 __ovld __cnfn shuffle2(float2 x, float2 y, uint16 mask);
-float16 __ovld __cnfn shuffle2(float4 x, float4 y, uint16 mask);
-float16 __ovld __cnfn shuffle2(float8 x, float8 y, uint16 mask);
-float16 __ovld __cnfn shuffle2(float16 x, float16 y, uint16 mask);
-
-#ifdef cl_khr_fp64
-double2 __ovld __cnfn shuffle2(double2 x, double2 y, ulong2 mask);
-double2 __ovld __cnfn shuffle2(double4 x, double4 y, ulong2 mask);
-double2 __ovld __cnfn shuffle2(double8 x, double8 y, ulong2 mask);
-double2 __ovld __cnfn shuffle2(double16 x, double16 y, ulong2 mask);
-
-double4 __ovld __cnfn shuffle2(double2 x, double2 y, ulong4 mask);
-double4 __ovld __cnfn shuffle2(double4 x, double4 y, ulong4 mask);
-double4 __ovld __cnfn shuffle2(double8 x, double8 y, ulong4 mask);
-double4 __ovld __cnfn shuffle2(double16 x, double16 y, ulong4 mask);
-
-double8 __ovld __cnfn shuffle2(double2 x, double2 y, ulong8 mask);
-double8 __ovld __cnfn shuffle2(double4 x, double4 y, ulong8 mask);
-double8 __ovld __cnfn shuffle2(double8 x, double8 y, ulong8 mask);
-double8 __ovld __cnfn shuffle2(double16 x, double16 y, ulong8 mask);
-
-double16 __ovld __cnfn shuffle2(double2 x, double2 y, ulong16 mask);
-double16 __ovld __cnfn shuffle2(double4 x, double4 y, ulong16 mask);
-double16 __ovld __cnfn shuffle2(double8 x, double8 y, ulong16 mask);
-double16 __ovld __cnfn shuffle2(double16 x, double16 y, ulong16 mask);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half2 __ovld __cnfn shuffle2(half2 x, half2 y, ushort2 mask);
-half2 __ovld __cnfn shuffle2(half4 x, half4 y, ushort2 mask);
-half2 __ovld __cnfn shuffle2(half8 x, half8 y, ushort2 mask);
-half2 __ovld __cnfn shuffle2(half16 x, half16 y, ushort2 mask);
-
-half4 __ovld __cnfn shuffle2(half2 x, half2 y, ushort4 mask);
-half4 __ovld __cnfn shuffle2(half4 x, half4 y, ushort4 mask);
-half4 __ovld __cnfn shuffle2(half8 x, half8 y, ushort4 mask);
-half4 __ovld __cnfn shuffle2(half16 x, half16 y, ushort4 mask);
-
-half8 __ovld __cnfn shuffle2(half2 x, half2 y, ushort8 mask);
-half8 __ovld __cnfn shuffle2(half4 x, half4 y, ushort8 mask);
-half8 __ovld __cnfn shuffle2(half8 x, half8 y, ushort8 mask);
-half8 __ovld __cnfn shuffle2(half16 x, half16 y, ushort8 mask);
-
-half16 __ovld __cnfn shuffle2(half2 x, half2 y, ushort16 mask);
-half16 __ovld __cnfn shuffle2(half4 x, half4 y, ushort16 mask);
-half16 __ovld __cnfn shuffle2(half8 x, half8 y, ushort16 mask);
-half16 __ovld __cnfn shuffle2(half16 x, half16 y, ushort16 mask);
-#endif //cl_khr_fp16
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-// OpenCL v1.2 s6.12.13, v2.0 s6.13.13 - printf
-
-int printf(__constant const char* st, ...) __attribute__((format(printf, 1, 2)));
-#endif
-
-// OpenCL v1.1 s6.11.3, v1.2 s6.12.14, v2.0 s6.13.14 - Image Read and Write Functions
-
-#ifdef cl_khr_gl_msaa_sharing
-#pragma OPENCL EXTENSION cl_khr_gl_msaa_sharing : enable
-#endif //cl_khr_gl_msaa_sharing
-
-/**
- * Use the coordinate (coord.xy) to do an element lookup in
- * the 2D image object specified by image.
- *
- * Use the coordinate (coord.x, coord.y, coord.z) to do
- * an element lookup in the 3D image object specified
- * by image. coord.w is ignored.
- *
- * Use the coordinate (coord.z) to index into the
- * 2D image array object specified by image_array
- * and (coord.x, coord.y) to do an element lookup in
- * the 2D image object specified by image.
- *
- * Use the coordinate (x) to do an element lookup in
- * the 1D image object specified by image.
- *
- * Use the coordinate (coord.y) to index into the
- * 1D image array object specified by image_array
- * and (coord.x) to do an element lookup in
- * the 1D image object specified by image.
- *
- * Use the coordinate (cood.xy) and sample to do an
- * element lookup in the 2D multi-sample image specified
- * by image.
- *
- * Use coord.xy and sample to do an element
- * lookup in the 2D multi-sample image layer
- * identified by index coord.z in the 2D multi-sample
- * image array specified by image.
- *
- * For mipmap images, use the mip-level specified by
- * the Level-of-Detail (lod) or use gradients for LOD
- * computation.
- *
- * read_imagef returns floating-point values in the
- * range [0.0 ... 1.0] for image objects created with
- * image_channel_data_type set to one of the predefined
- * packed formats or CL_UNORM_INT8, or
- * CL_UNORM_INT16.
- *
- * read_imagef returns floating-point values in the
- * range [-1.0 ... 1.0] for image objects created with
- * image_channel_data_type set to CL_SNORM_INT8,
- * or CL_SNORM_INT16.
- *
- * read_imagef returns floating-point values for image
- * objects created with image_channel_data_type set to
- * CL_HALF_FLOAT or CL_FLOAT.
- *
- * read_imagei and read_imageui return
- * unnormalized signed integer and unsigned integer
- * values respectively. Each channel will be stored in a
- * 32-bit integer.
- *
- * read_imagei can only be used with image objects
- * created with image_channel_data_type set to one of
- * the following values:
- * CL_SIGNED_INT8,
- * CL_SIGNED_INT16 and
- * CL_SIGNED_INT32.
- * If the image_channel_data_type is not one of the
- * above values, the values returned by read_imagei
- * are undefined.
- *
- * read_imageui can only be used with image objects
- * created with image_channel_data_type set to one of
- * the following values:
- * CL_UNSIGNED_INT8,
- * CL_UNSIGNED_INT16 and
- * CL_UNSIGNED_INT32.
- * If the image_channel_data_type is not one of the
- * above values, the values returned by read_imageui
- * are undefined.
- *
- * The read_image{i|ui} calls support a nearest filter
- * only. The filter_mode specified in sampler
- * must be set to CLK_FILTER_NEAREST; otherwise
- * the values returned are undefined.
-
- * The read_image{f|i|ui} calls that take
- * integer coordinates must use a sampler with
- * normalized coordinates set to
- * CLK_NORMALIZED_COORDS_FALSE and
- * addressing mode set to
- * CLK_ADDRESS_CLAMP_TO_EDGE,
- * CLK_ADDRESS_CLAMP or CLK_ADDRESS_NONE;
- * otherwise the values returned are undefined.
- *
- * Values returned by read_imagef for image objects
- * with image_channel_data_type values not specified
- * in the description above are undefined.
- */
-
-float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, int2 coord);
-float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord);
-
-int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, int2 coord);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, int2 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord);
-
-float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, int4 coord);
-float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord);
-
-int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, int4 coord);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, int4 coord);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
-
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-
-float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, int coord);
-float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord);
-
-int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, int coord);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, int coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
-
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-
-#ifdef cl_khr_depth_images
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord);
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, int2 coord);
-
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord);
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, int4 coord);
-#endif //cl_khr_depth_images
-
-#if defined(cl_khr_gl_msaa_sharing)
-float4 __purefn __ovld read_imagef(read_only image2d_msaa_t image, int2 coord, int sample);
-int4 __purefn __ovld read_imagei(read_only image2d_msaa_t image, int2 coord, int sample);
-uint4 __purefn __ovld read_imageui(read_only image2d_msaa_t image, int2 coord, int sample);
-
-float __purefn __ovld read_imagef(read_only image2d_msaa_depth_t image, int2 coord, int sample);
-
-float4 __purefn __ovld read_imagef(read_only image2d_array_msaa_t image, int4 coord, int sample);
-int4 __purefn __ovld read_imagei(read_only image2d_array_msaa_t image, int4 coord, int sample);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_msaa_t image, int4 coord, int sample);
-
-float __purefn __ovld read_imagef(read_only image2d_array_msaa_depth_t image, int4 coord, int sample);
-#endif //cl_khr_gl_msaa_sharing
-
-// OpenCL Extension v2.0 s9.18 - Mipmaps
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#ifdef cl_khr_mipmap_image
-
-float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
-
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-
-float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-
-float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-
-#endif //cl_khr_mipmap_image
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-
-/**
-* Sampler-less Image Access
-*/
-
-float4 __purefn __ovld read_imagef(read_only image1d_t image, int coord);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, int coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, int coord);
-
-float4 __purefn __ovld read_imagef(read_only image1d_buffer_t image, int coord);
-int4 __purefn __ovld read_imagei(read_only image1d_buffer_t image, int coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_buffer_t image, int coord);
-
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image, int2 coord);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image, int2 coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image, int2 coord);
-
-float4 __purefn __ovld read_imagef(read_only image2d_t image, int2 coord);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, int2 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, int2 coord);
-
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image, int4 coord);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image, int4 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image, int4 coord);
-
-#ifdef cl_khr_depth_images
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, int2 coord);
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, int4 coord);
-#endif //cl_khr_depth_images
-
-float4 __purefn __ovld read_imagef(read_only image3d_t image, int4 coord);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, int4 coord);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, int4 coord);
-
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-
-// Image read functions returning half4 type
-#ifdef cl_khr_fp16
-half4 __purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler, int coord);
-half4 __purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler, float coord);
-half4 __purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, int2 coord);
-half4 __purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, float2 coord);
-half4 __purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, int4 coord);
-half4 __purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, float4 coord);
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, int2 coord);
-half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, float2 coord);
-half4 __purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, int4 coord);
-half4 __purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, float4 coord);
-/**
- * Sampler-less Image Access
- */
-half4 __purefn __ovld read_imageh(read_only image1d_t image, int coord);
-half4 __purefn __ovld read_imageh(read_only image2d_t image, int2 coord);
-half4 __purefn __ovld read_imageh(read_only image3d_t image, int4 coord);
-half4 __purefn __ovld read_imageh(read_only image1d_array_t image, int2 coord);
-half4 __purefn __ovld read_imageh(read_only image2d_array_t image, int4 coord);
-half4 __purefn __ovld read_imageh(read_only image1d_buffer_t image, int coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-#endif //cl_khr_fp16
-
-// Image read functions for read_write images
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-float4 __purefn __ovld read_imagef(read_write image1d_t image, int coord);
-int4 __purefn __ovld read_imagei(read_write image1d_t image, int coord);
-uint4 __purefn __ovld read_imageui(read_write image1d_t image, int coord);
-
-float4 __purefn __ovld read_imagef(read_write image1d_buffer_t image, int coord);
-int4 __purefn __ovld read_imagei(read_write image1d_buffer_t image, int coord);
-uint4 __purefn __ovld read_imageui(read_write image1d_buffer_t image, int coord);
-
-float4 __purefn __ovld read_imagef(read_write image1d_array_t image, int2 coord);
-int4 __purefn __ovld read_imagei(read_write image1d_array_t image, int2 coord);
-uint4 __purefn __ovld read_imageui(read_write image1d_array_t image, int2 coord);
-
-float4 __purefn __ovld read_imagef(read_write image2d_t image, int2 coord);
-int4 __purefn __ovld read_imagei(read_write image2d_t image, int2 coord);
-uint4 __purefn __ovld read_imageui(read_write image2d_t image, int2 coord);
-
-float4 __purefn __ovld read_imagef(read_write image2d_array_t image, int4 coord);
-int4 __purefn __ovld read_imagei(read_write image2d_array_t image, int4 coord);
-uint4 __purefn __ovld read_imageui(read_write image2d_array_t image, int4 coord);
-
-float4 __purefn __ovld read_imagef(read_write image3d_t image, int4 coord);
-int4 __purefn __ovld read_imagei(read_write image3d_t image, int4 coord);
-uint4 __purefn __ovld read_imageui(read_write image3d_t image, int4 coord);
-
-#ifdef cl_khr_depth_images
-float __purefn __ovld read_imagef(read_write image2d_depth_t image, int2 coord);
-float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, int4 coord);
-#endif //cl_khr_depth_images
-
-#if cl_khr_gl_msaa_sharing
-float4 __purefn __ovld read_imagef(read_write image2d_msaa_t image, int2 coord, int sample);
-int4 __purefn __ovld read_imagei(read_write image2d_msaa_t image, int2 coord, int sample);
-uint4 __purefn __ovld read_imageui(read_write image2d_msaa_t image, int2 coord, int sample);
-
-float4 __purefn __ovld read_imagef(read_write image2d_array_msaa_t image, int4 coord, int sample);
-int4 __purefn __ovld read_imagei(read_write image2d_array_msaa_t image, int4 coord, int sample);
-uint4 __purefn __ovld read_imageui(read_write image2d_array_msaa_t image, int4 coord, int sample);
-
-float __purefn __ovld read_imagef(read_write image2d_msaa_depth_t image, int2 coord, int sample);
-float __purefn __ovld read_imagef(read_write image2d_array_msaa_depth_t image, int4 coord, int sample);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#ifdef cl_khr_mipmap_image
-float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler, float coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
-
-float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-
-float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
-
-float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-uint4 __purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-
-float4 __purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-int4 __purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-uint4 __purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-
-float4 __purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-int4 __purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-uint4 __purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-
-float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-
-float4 __purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-int4 __purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-uint4 __purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-
-float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-
-float4 __purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-
-#endif //cl_khr_mipmap_image
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// Image read functions returning half4 type
-#ifdef cl_khr_fp16
-half4 __purefn __ovld read_imageh(read_write image1d_t image, int coord);
-half4 __purefn __ovld read_imageh(read_write image2d_t image, int2 coord);
-half4 __purefn __ovld read_imageh(read_write image3d_t image, int4 coord);
-half4 __purefn __ovld read_imageh(read_write image1d_array_t image, int2 coord);
-half4 __purefn __ovld read_imageh(read_write image2d_array_t image, int4 coord);
-half4 __purefn __ovld read_imageh(read_write image1d_buffer_t image, int coord);
-#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Write color value to location specified by coordinate
- * (coord.x, coord.y) in the 2D image object specified by image.
- * (coord.x, coord.y) are considered to be unnormalized coordinates
- * and must be in the range 0 ... image width - 1, and 0
- * ... image height - 1.
-
- * Write color value to location specified by coordinate
- * (coord.x, coord.y) in the 2D image object specified by index
- * (coord.z) of the 2D image array object image_array.
- * (coord.x, coord.y) are considered to be unnormalized
- * coordinates and must be in the range 0 ... image width
- * - 1.
- *
- * Write color value to location specified by coordinate
- * (coord) in the 1D image (buffer) object specified by image.
- * coord is considered to be unnormalized coordinates
- * and must be in the range 0 ... image width - 1.
- *
- * Write color value to location specified by coordinate
- * (coord.x) in the 1D image object specified by index
- * (coord.y) of the 1D image array object image_array.
- * x is considered to be unnormalized coordinates
- * and must be in the range 0 ... image width - 1.
- *
- * Write color value to location specified by coordinate
- * (coord.x, coord.y, coord.z) in the 3D image object specified by image.
- * coord.x & coord.y are considered to be unnormalized coordinates
- * and must be in the range 0 ... image width - 1, and 0
- * ... image height - 1.
- *
- * For mipmap images, use mip-level specified by lod.
- *
- * Appropriate data format conversion to the specified
- * image format is done before writing the color value.
- *
- * write_imagef can only be used with image objects
- * created with image_channel_data_type set to one of
- * the pre-defined packed formats or set to
- * CL_SNORM_INT8, CL_UNORM_INT8,
- * CL_SNORM_INT16, CL_UNORM_INT16,
- * CL_HALF_FLOAT or CL_FLOAT. Appropriate data
- * format conversion will be done to convert channel
- * data from a floating-point value to actual data format
- * in which the channels are stored.
- *
- * write_imagei can only be used with image objects
- * created with image_channel_data_type set to one of
- * the following values:
- * CL_SIGNED_INT8,
- * CL_SIGNED_INT16 and
- * CL_SIGNED_INT32.
- *
- * write_imageui can only be used with image objects
- * created with image_channel_data_type set to one of
- * the following values:
- * CL_UNSIGNED_INT8,
- * CL_UNSIGNED_INT16 and
- * CL_UNSIGNED_INT32.
- *
- * The behavior of write_imagef, write_imagei and
- * write_imageui for image objects created with
- * image_channel_data_type values not specified in
- * the description above or with (x, y) coordinate
- * values that are not in the range (0 ... image width -1,
- * 0 ... image height - 1), respectively, is undefined.
- */
-void __ovld write_imagef(write_only image2d_t image, int2 coord, float4 color);
-void __ovld write_imagei(write_only image2d_t image, int2 coord, int4 color);
-void __ovld write_imageui(write_only image2d_t image, int2 coord, uint4 color);
-
-void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord, float4 color);
-void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord, int4 color);
-void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord, uint4 color);
-
-void __ovld write_imagef(write_only image1d_t image, int coord, float4 color);
-void __ovld write_imagei(write_only image1d_t image, int coord, int4 color);
-void __ovld write_imageui(write_only image1d_t image, int coord, uint4 color);
-
-void __ovld write_imagef(write_only image1d_buffer_t image, int coord, float4 color);
-void __ovld write_imagei(write_only image1d_buffer_t image, int coord, int4 color);
-void __ovld write_imageui(write_only image1d_buffer_t image, int coord, uint4 color);
-
-void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord, float4 color);
-void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord, int4 color);
-void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord, uint4 color);
-
-#ifdef cl_khr_3d_image_writes
-void __ovld write_imagef(write_only image3d_t image, int4 coord, float4 color);
-void __ovld write_imagei(write_only image3d_t image, int4 coord, int4 color);
-void __ovld write_imageui(write_only image3d_t image, int4 coord, uint4 color);
-#endif
-
-#ifdef cl_khr_depth_images
-void __ovld write_imagef(write_only image2d_depth_t image, int2 coord, float color);
-void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, float color);
-#endif //cl_khr_depth_images
-
-// OpenCL Extension v2.0 s9.18 - Mipmaps
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#if defined(cl_khr_mipmap_image_writes)
-void __ovld write_imagef(write_only image1d_t image, int coord, int lod, float4 color);
-void __ovld write_imagei(write_only image1d_t image, int coord, int lod, int4 color);
-void __ovld write_imageui(write_only image1d_t image, int coord, int lod, uint4 color);
-
-void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord, int lod, float4 color);
-void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord, int lod, int4 color);
-void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord, int lod, uint4 color);
-
-void __ovld write_imagef(write_only image2d_t image, int2 coord, int lod, float4 color);
-void __ovld write_imagei(write_only image2d_t image, int2 coord, int lod, int4 color);
-void __ovld write_imageui(write_only image2d_t image, int2 coord, int lod, uint4 color);
-
-void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord, int lod, float4 color);
-void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord, int lod, int4 color);
-void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord, int lod, uint4 color);
-
-void __ovld write_imagef(write_only image2d_depth_t image, int2 coord, int lod, float depth);
-void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, int lod, float depth);
-
-#ifdef cl_khr_3d_image_writes
-void __ovld write_imagef(write_only image3d_t image, int4 coord, int lod, float4 color);
-void __ovld write_imagei(write_only image3d_t image, int4 coord, int lod, int4 color);
-void __ovld write_imageui(write_only image3d_t image, int4 coord, int lod, uint4 color);
-#endif //cl_khr_3d_image_writes
-
-#endif //defined(cl_khr_mipmap_image_writes)
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// Image write functions for half4 type
-#ifdef cl_khr_fp16
-void __ovld write_imageh(write_only image1d_t image, int coord, half4 color);
-void __ovld write_imageh(write_only image2d_t image, int2 coord, half4 color);
-#ifdef cl_khr_3d_image_writes
-void __ovld write_imageh(write_only image3d_t image, int4 coord, half4 color);
-#endif
-void __ovld write_imageh(write_only image1d_array_t image, int2 coord, half4 color);
-void __ovld write_imageh(write_only image2d_array_t image, int4 coord, half4 color);
-void __ovld write_imageh(write_only image1d_buffer_t image, int coord, half4 color);
-#endif //cl_khr_fp16
-
-// Image write functions for read_write images
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void __ovld write_imagef(read_write image2d_t image, int2 coord, float4 color);
-void __ovld write_imagei(read_write image2d_t image, int2 coord, int4 color);
-void __ovld write_imageui(read_write image2d_t image, int2 coord, uint4 color);
-
-void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord, float4 color);
-void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord, int4 color);
-void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord, uint4 color);
-
-void __ovld write_imagef(read_write image1d_t image, int coord, float4 color);
-void __ovld write_imagei(read_write image1d_t image, int coord, int4 color);
-void __ovld write_imageui(read_write image1d_t image, int coord, uint4 color);
-
-void __ovld write_imagef(read_write image1d_buffer_t image, int coord, float4 color);
-void __ovld write_imagei(read_write image1d_buffer_t image, int coord, int4 color);
-void __ovld write_imageui(read_write image1d_buffer_t image, int coord, uint4 color);
-
-void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord, float4 color);
-void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord, int4 color);
-void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord, uint4 color);
-
-#ifdef cl_khr_3d_image_writes
-void __ovld write_imagef(read_write image3d_t image, int4 coord, float4 color);
-void __ovld write_imagei(read_write image3d_t image, int4 coord, int4 color);
-void __ovld write_imageui(read_write image3d_t image, int4 coord, uint4 color);
-#endif
-
-#ifdef cl_khr_depth_images
-void __ovld write_imagef(read_write image2d_depth_t image, int2 coord, float color);
-void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, float color);
-#endif //cl_khr_depth_images
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#if defined(cl_khr_mipmap_image_writes)
-void __ovld write_imagef(read_write image1d_t image, int coord, int lod, float4 color);
-void __ovld write_imagei(read_write image1d_t image, int coord, int lod, int4 color);
-void __ovld write_imageui(read_write image1d_t image, int coord, int lod, uint4 color);
-
-void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord, int lod, float4 color);
-void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord, int lod, int4 color);
-void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord, int lod, uint4 color);
-
-void __ovld write_imagef(read_write image2d_t image, int2 coord, int lod, float4 color);
-void __ovld write_imagei(read_write image2d_t image, int2 coord, int lod, int4 color);
-void __ovld write_imageui(read_write image2d_t image, int2 coord, int lod, uint4 color);
-
-void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord, int lod, float4 color);
-void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord, int lod, int4 color);
-void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord, int lod, uint4 color);
-
-void __ovld write_imagef(read_write image2d_depth_t image, int2 coord, int lod, float color);
-void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, int lod, float color);
-
-#ifdef cl_khr_3d_image_writes
-void __ovld write_imagef(read_write image3d_t image, int4 coord, int lod, float4 color);
-void __ovld write_imagei(read_write image3d_t image, int4 coord, int lod, int4 color);
-void __ovld write_imageui(read_write image3d_t image, int4 coord, int lod, uint4 color);
-#endif //cl_khr_3d_image_writes
-
-#endif //cl_khr_mipmap_image_writes
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// Image write functions for half4 type
-#ifdef cl_khr_fp16
-void __ovld write_imageh(read_write image1d_t image, int coord, half4 color);
-void __ovld write_imageh(read_write image2d_t image, int2 coord, half4 color);
-#ifdef cl_khr_3d_image_writes
-void __ovld write_imageh(read_write image3d_t image, int4 coord, half4 color);
-#endif
-void __ovld write_imageh(read_write image1d_array_t image, int2 coord, half4 color);
-void __ovld write_imageh(read_write image2d_array_t image, int4 coord, half4 color);
-void __ovld write_imageh(read_write image1d_buffer_t image, int coord, half4 color);
-#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// Note: In OpenCL v1.0/1.1/1.2, image argument of image query builtin functions does not have
-// access qualifier, which by default assume read_only access qualifier. Image query builtin
-// functions with write_only image argument should also be declared.
-
-/**
- * Return the image width in pixels.
- *
-  */
-int __ovld __cnfn get_image_width(read_only image1d_t image);
-int __ovld __cnfn get_image_width(read_only image1d_buffer_t image);
-int __ovld __cnfn get_image_width(read_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_width(read_only image3d_t image);
-#endif
-int __ovld __cnfn get_image_width(read_only image1d_array_t image);
-int __ovld __cnfn get_image_width(read_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_width(read_only image2d_depth_t image);
-int __ovld __cnfn get_image_width(read_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_width(read_only image2d_msaa_t image);
-int __ovld __cnfn get_image_width(read_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_width(read_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_width(read_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-int __ovld __cnfn get_image_width(write_only image1d_t image);
-int __ovld __cnfn get_image_width(write_only image1d_buffer_t image);
-int __ovld __cnfn get_image_width(write_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_width(write_only image3d_t image);
-#endif
-int __ovld __cnfn get_image_width(write_only image1d_array_t image);
-int __ovld __cnfn get_image_width(write_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_width(write_only image2d_depth_t image);
-int __ovld __cnfn get_image_width(write_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_width(write_only image2d_msaa_t image);
-int __ovld __cnfn get_image_width(write_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_width(write_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_width(write_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __cnfn get_image_width(read_write image1d_t image);
-int __ovld __cnfn get_image_width(read_write image1d_buffer_t image);
-int __ovld __cnfn get_image_width(read_write image2d_t image);
-int __ovld __cnfn get_image_width(read_write image3d_t image);
-int __ovld __cnfn get_image_width(read_write image1d_array_t image);
-int __ovld __cnfn get_image_width(read_write image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_width(read_write image2d_depth_t image);
-int __ovld __cnfn get_image_width(read_write image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_width(read_write image2d_msaa_t image);
-int __ovld __cnfn get_image_width(read_write image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_width(read_write image2d_array_msaa_t image);
-int __ovld __cnfn get_image_width(read_write image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the image height in pixels.
- */
-int __ovld __cnfn get_image_height(read_only image2d_t image);
-int __ovld __cnfn get_image_height(read_only image3d_t image);
-int __ovld __cnfn get_image_height(read_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_height(read_only image2d_depth_t image);
-int __ovld __cnfn get_image_height(read_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_height(read_only image2d_msaa_t image);
-int __ovld __cnfn get_image_height(read_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_height(read_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_height(read_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-int __ovld __cnfn get_image_height(write_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_height(write_only image3d_t image);
-#endif
-int __ovld __cnfn get_image_height(write_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_height(write_only image2d_depth_t image);
-int __ovld __cnfn get_image_height(write_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_height(write_only image2d_msaa_t image);
-int __ovld __cnfn get_image_height(write_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_height(write_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_height(write_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __cnfn get_image_height(read_write image2d_t image);
-int __ovld __cnfn get_image_height(read_write image3d_t image);
-int __ovld __cnfn get_image_height(read_write image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_height(read_write image2d_depth_t image);
-int __ovld __cnfn get_image_height(read_write image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_height(read_write image2d_msaa_t image);
-int __ovld __cnfn get_image_height(read_write image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_height(read_write image2d_array_msaa_t image);
-int __ovld __cnfn get_image_height(read_write image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the image depth in pixels.
- */
-int __ovld __cnfn get_image_depth(read_only image3d_t image);
-
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_depth(write_only image3d_t image);
-#endif
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __cnfn get_image_depth(read_write image3d_t image);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// OpenCL Extension v2.0 s9.18 - Mipmaps
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#ifdef cl_khr_mipmap_image
-/**
- * Return the image miplevels.
- */
-
-int __ovld get_image_num_mip_levels(read_only image1d_t image);
-int __ovld get_image_num_mip_levels(read_only image2d_t image);
-int __ovld get_image_num_mip_levels(read_only image3d_t image);
-
-int __ovld get_image_num_mip_levels(write_only image1d_t image);
-int __ovld get_image_num_mip_levels(write_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld get_image_num_mip_levels(write_only image3d_t image);
-#endif
-
-int __ovld get_image_num_mip_levels(read_write image1d_t image);
-int __ovld get_image_num_mip_levels(read_write image2d_t image);
-int __ovld get_image_num_mip_levels(read_write image3d_t image);
-
-int __ovld get_image_num_mip_levels(read_only image1d_array_t image);
-int __ovld get_image_num_mip_levels(read_only image2d_array_t image);
-int __ovld get_image_num_mip_levels(read_only image2d_array_depth_t image);
-int __ovld get_image_num_mip_levels(read_only image2d_depth_t image);
-
-int __ovld get_image_num_mip_levels(write_only image1d_array_t image);
-int __ovld get_image_num_mip_levels(write_only image2d_array_t image);
-int __ovld get_image_num_mip_levels(write_only image2d_array_depth_t image);
-int __ovld get_image_num_mip_levels(write_only image2d_depth_t image);
-
-int __ovld get_image_num_mip_levels(read_write image1d_array_t image);
-int __ovld get_image_num_mip_levels(read_write image2d_array_t image);
-int __ovld get_image_num_mip_levels(read_write image2d_array_depth_t image);
-int __ovld get_image_num_mip_levels(read_write image2d_depth_t image);
-
-#endif //cl_khr_mipmap_image
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the channel data type. Valid values are:
- * CLK_SNORM_INT8
- * CLK_SNORM_INT16
- * CLK_UNORM_INT8
- * CLK_UNORM_INT16
- * CLK_UNORM_SHORT_565
- * CLK_UNORM_SHORT_555
- * CLK_UNORM_SHORT_101010
- * CLK_SIGNED_INT8
- * CLK_SIGNED_INT16
- * CLK_SIGNED_INT32
- * CLK_UNSIGNED_INT8
- * CLK_UNSIGNED_INT16
- * CLK_UNSIGNED_INT32
- * CLK_HALF_FLOAT
- * CLK_FLOAT
- */
-
-int __ovld __cnfn get_image_channel_data_type(read_only image1d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image3d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image1d_array_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-int __ovld __cnfn get_image_channel_data_type(write_only image1d_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_channel_data_type(write_only image3d_t image);
-#endif
-int __ovld __cnfn get_image_channel_data_type(write_only image1d_array_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __cnfn get_image_channel_data_type(read_write image1d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image3d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image1d_array_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the image channel order. Valid values are:
- * CLK_A
- * CLK_R
- * CLK_Rx
- * CLK_RG
- * CLK_RGx
- * CLK_RA
- * CLK_RGB
- * CLK_RGBx
- * CLK_RGBA
- * CLK_ARGB
- * CLK_BGRA
- * CLK_INTENSITY
- * CLK_LUMINANCE
- */
-
-int __ovld __cnfn get_image_channel_order(read_only image1d_t image);
-int __ovld __cnfn get_image_channel_order(read_only image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_t image);
-int __ovld __cnfn get_image_channel_order(read_only image3d_t image);
-int __ovld __cnfn get_image_channel_order(read_only image1d_array_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_order(read_only image2d_depth_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-int __ovld __cnfn get_image_channel_order(write_only image1d_t image);
-int __ovld __cnfn get_image_channel_order(write_only image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_channel_order(write_only image3d_t image);
-#endif
-int __ovld __cnfn get_image_channel_order(write_only image1d_array_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_order(write_only image2d_depth_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __cnfn get_image_channel_order(read_write image1d_t image);
-int __ovld __cnfn get_image_channel_order(read_write image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_t image);
-int __ovld __cnfn get_image_channel_order(read_write image3d_t image);
-int __ovld __cnfn get_image_channel_order(read_write image1d_array_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_array_t image);
-#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_order(read_write image2d_depth_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_array_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the 2D image width and height as an int2
- * type. The width is returned in the x component, and
- * the height in the y component.
- */
-int2 __ovld __cnfn get_image_dim(read_only image2d_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int2 __ovld __cnfn get_image_dim(read_only image2d_array_depth_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_depth_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-int2 __ovld __cnfn get_image_dim(write_only image2d_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_array_t image);
-#ifdef cl_khr_depth_images
-int2 __ovld __cnfn get_image_dim(write_only image2d_array_depth_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_depth_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int2 __ovld __cnfn get_image_dim(read_write image2d_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_array_t image);
-#ifdef cl_khr_depth_images
-int2 __ovld __cnfn get_image_dim(read_write image2d_array_depth_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_depth_t image);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_depth_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_depth_t image);
-#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the 3D image width, height, and depth as an
- * int4 type. The width is returned in the x
- * component, height in the y component, depth in the z
- * component and the w component is 0.
- */
-int4 __ovld __cnfn get_image_dim(read_only image3d_t image);
-#ifdef cl_khr_3d_image_writes
-int4 __ovld __cnfn get_image_dim(write_only image3d_t image);
-#endif
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int4 __ovld __cnfn get_image_dim(read_write image3d_t image);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
- * Return the image array size.
- */
-
-size_t __ovld __cnfn get_image_array_size(read_only image1d_array_t image_array);
-size_t __ovld __cnfn get_image_array_size(read_only image2d_array_t image_array);
-#ifdef cl_khr_depth_images
-size_t __ovld __cnfn get_image_array_size(read_only image2d_array_depth_t image_array);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_t image_array);
-size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_depth_t image_array);
-#endif //cl_khr_gl_msaa_sharing
-
-size_t __ovld __cnfn get_image_array_size(write_only image1d_array_t image_array);
-size_t __ovld __cnfn get_image_array_size(write_only image2d_array_t image_array);
-#ifdef cl_khr_depth_images
-size_t __ovld __cnfn get_image_array_size(write_only image2d_array_depth_t image_array);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_t image_array);
-size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_depth_t image_array);
-#endif //cl_khr_gl_msaa_sharing
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-size_t __ovld __cnfn get_image_array_size(read_write image1d_array_t image_array);
-size_t __ovld __cnfn get_image_array_size(read_write image2d_array_t image_array);
-#ifdef cl_khr_depth_images
-size_t __ovld __cnfn get_image_array_size(read_write image2d_array_depth_t image_array);
-#endif //cl_khr_depth_images
-#if defined(cl_khr_gl_msaa_sharing)
-size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_t image_array);
-size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_depth_t image_array);
-#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-/**
-* Return the number of samples associated with image
-*/
-#if defined(cl_khr_gl_msaa_sharing)
-int __ovld get_image_num_samples(read_only image2d_msaa_t image);
-int __ovld get_image_num_samples(read_only image2d_msaa_depth_t image);
-int __ovld get_image_num_samples(read_only image2d_array_msaa_t image);
-int __ovld get_image_num_samples(read_only image2d_array_msaa_depth_t image);
-
-int __ovld get_image_num_samples(write_only image2d_msaa_t image);
-int __ovld get_image_num_samples(write_only image2d_msaa_depth_t image);
-int __ovld get_image_num_samples(write_only image2d_array_msaa_t image);
-int __ovld get_image_num_samples(write_only image2d_array_msaa_depth_t image);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld get_image_num_samples(read_write image2d_msaa_t image);
-int __ovld get_image_num_samples(read_write image2d_msaa_depth_t image);
-int __ovld get_image_num_samples(read_write image2d_array_msaa_t image);
-int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#endif
-
-// OpenCL v2.0 s6.13.15 - Work-group Functions
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __conv work_group_all(int predicate);
-int __ovld __conv work_group_any(int predicate);
-
-#ifdef cl_khr_fp16
-half __ovld __conv work_group_broadcast(half a, size_t local_id);
-half __ovld __conv work_group_broadcast(half a, size_t x, size_t y);
-half __ovld __conv work_group_broadcast(half a, size_t x, size_t y, size_t z);
-#endif
-int __ovld __conv work_group_broadcast(int a, size_t local_id);
-int __ovld __conv work_group_broadcast(int a, size_t x, size_t y);
-int __ovld __conv work_group_broadcast(int a, size_t x, size_t y, size_t z);
-uint __ovld __conv work_group_broadcast(uint a, size_t local_id);
-uint __ovld __conv work_group_broadcast(uint a, size_t x, size_t y);
-uint __ovld __conv work_group_broadcast(uint a, size_t x, size_t y, size_t z);
-long __ovld __conv work_group_broadcast(long a, size_t local_id);
-long __ovld __conv work_group_broadcast(long a, size_t x, size_t y);
-long __ovld __conv work_group_broadcast(long a, size_t x, size_t y, size_t z);
-ulong __ovld __conv work_group_broadcast(ulong a, size_t local_id);
-ulong __ovld __conv work_group_broadcast(ulong a, size_t x, size_t y);
-ulong __ovld __conv work_group_broadcast(ulong a, size_t x, size_t y, size_t z);
-float __ovld __conv work_group_broadcast(float a, size_t local_id);
-float __ovld __conv work_group_broadcast(float a, size_t x, size_t y);
-float __ovld __conv work_group_broadcast(float a, size_t x, size_t y, size_t z);
-#ifdef cl_khr_fp64
-double __ovld __conv work_group_broadcast(double a, size_t local_id);
-double __ovld __conv work_group_broadcast(double a, size_t x, size_t y);
-double __ovld __conv work_group_broadcast(double a, size_t x, size_t y, size_t z);
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-half __ovld __conv work_group_reduce_add(half x);
-half __ovld __conv work_group_reduce_min(half x);
-half __ovld __conv work_group_reduce_max(half x);
-half __ovld __conv work_group_scan_exclusive_add(half x);
-half __ovld __conv work_group_scan_exclusive_min(half x);
-half __ovld __conv work_group_scan_exclusive_max(half x);
-half __ovld __conv work_group_scan_inclusive_add(half x);
-half __ovld __conv work_group_scan_inclusive_min(half x);
-half __ovld __conv work_group_scan_inclusive_max(half x);
-#endif
-int __ovld __conv work_group_reduce_add(int x);
-int __ovld __conv work_group_reduce_min(int x);
-int __ovld __conv work_group_reduce_max(int x);
-int __ovld __conv work_group_scan_exclusive_add(int x);
-int __ovld __conv work_group_scan_exclusive_min(int x);
-int __ovld __conv work_group_scan_exclusive_max(int x);
-int __ovld __conv work_group_scan_inclusive_add(int x);
-int __ovld __conv work_group_scan_inclusive_min(int x);
-int __ovld __conv work_group_scan_inclusive_max(int x);
-uint __ovld __conv work_group_reduce_add(uint x);
-uint __ovld __conv work_group_reduce_min(uint x);
-uint __ovld __conv work_group_reduce_max(uint x);
-uint __ovld __conv work_group_scan_exclusive_add(uint x);
-uint __ovld __conv work_group_scan_exclusive_min(uint x);
-uint __ovld __conv work_group_scan_exclusive_max(uint x);
-uint __ovld __conv work_group_scan_inclusive_add(uint x);
-uint __ovld __conv work_group_scan_inclusive_min(uint x);
-uint __ovld __conv work_group_scan_inclusive_max(uint x);
-long __ovld __conv work_group_reduce_add(long x);
-long __ovld __conv work_group_reduce_min(long x);
-long __ovld __conv work_group_reduce_max(long x);
-long __ovld __conv work_group_scan_exclusive_add(long x);
-long __ovld __conv work_group_scan_exclusive_min(long x);
-long __ovld __conv work_group_scan_exclusive_max(long x);
-long __ovld __conv work_group_scan_inclusive_add(long x);
-long __ovld __conv work_group_scan_inclusive_min(long x);
-long __ovld __conv work_group_scan_inclusive_max(long x);
-ulong __ovld __conv work_group_reduce_add(ulong x);
-ulong __ovld __conv work_group_reduce_min(ulong x);
-ulong __ovld __conv work_group_reduce_max(ulong x);
-ulong __ovld __conv work_group_scan_exclusive_add(ulong x);
-ulong __ovld __conv work_group_scan_exclusive_min(ulong x);
-ulong __ovld __conv work_group_scan_exclusive_max(ulong x);
-ulong __ovld __conv work_group_scan_inclusive_add(ulong x);
-ulong __ovld __conv work_group_scan_inclusive_min(ulong x);
-ulong __ovld __conv work_group_scan_inclusive_max(ulong x);
-float __ovld __conv work_group_reduce_add(float x);
-float __ovld __conv work_group_reduce_min(float x);
-float __ovld __conv work_group_reduce_max(float x);
-float __ovld __conv work_group_scan_exclusive_add(float x);
-float __ovld __conv work_group_scan_exclusive_min(float x);
-float __ovld __conv work_group_scan_exclusive_max(float x);
-float __ovld __conv work_group_scan_inclusive_add(float x);
-float __ovld __conv work_group_scan_inclusive_min(float x);
-float __ovld __conv work_group_scan_inclusive_max(float x);
-#ifdef cl_khr_fp64
-double __ovld __conv work_group_reduce_add(double x);
-double __ovld __conv work_group_reduce_min(double x);
-double __ovld __conv work_group_reduce_max(double x);
-double __ovld __conv work_group_scan_exclusive_add(double x);
-double __ovld __conv work_group_scan_exclusive_min(double x);
-double __ovld __conv work_group_scan_exclusive_max(double x);
-double __ovld __conv work_group_scan_inclusive_add(double x);
-double __ovld __conv work_group_scan_inclusive_min(double x);
-double __ovld __conv work_group_scan_inclusive_max(double x);
-#endif //cl_khr_fp64
-
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// OpenCL v2.0 s6.13.16 - Pipe Functions
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-bool __ovld is_valid_reserve_id(reserve_id_t reserve_id);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-
-// OpenCL v2.0 s6.13.17 - Enqueue Kernels
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-ndrange_t __ovld ndrange_1D(size_t);
-ndrange_t __ovld ndrange_1D(size_t, size_t);
-ndrange_t __ovld ndrange_1D(size_t, size_t, size_t);
-
-ndrange_t __ovld ndrange_2D(const size_t[2]);
-ndrange_t __ovld ndrange_2D(const size_t[2], const size_t[2]);
-ndrange_t __ovld ndrange_2D(const size_t[2], const size_t[2], const size_t[2]);
-
-ndrange_t __ovld ndrange_3D(const size_t[3]);
-ndrange_t __ovld ndrange_3D(const size_t[3], const size_t[3]);
-ndrange_t __ovld ndrange_3D(const size_t[3], const size_t[3], const size_t[3]);
-
-int __ovld enqueue_marker(queue_t, uint, const clk_event_t*, clk_event_t*);
-
-void __ovld retain_event(clk_event_t);
-
-void __ovld release_event(clk_event_t);
-
-clk_event_t __ovld create_user_event(void);
-
-void __ovld set_user_event_status(clk_event_t e, int state);
-
-bool __ovld is_valid_event (clk_event_t event);
-
-void __ovld capture_event_profiling_info(clk_event_t, clk_profiling_info, __global void* value);
-
-queue_t __ovld get_default_queue(void);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-// OpenCL Extension v2.0 s9.17 - Sub-groups
-
-#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups)
-// Shared Sub Group Functions
-uint    __ovld get_sub_group_size(void);
-uint    __ovld get_max_sub_group_size(void);
-uint    __ovld get_num_sub_groups(void);
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-uint    __ovld get_enqueued_num_sub_groups(void);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-uint    __ovld get_sub_group_id(void);
-uint    __ovld get_sub_group_local_id(void);
-
-void    __ovld __conv sub_group_barrier(cl_mem_fence_flags flags);
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void    __ovld __conv sub_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-int     __ovld __conv sub_group_all(int predicate);
-int     __ovld __conv sub_group_any(int predicate);
-
-int     __ovld __conv sub_group_broadcast(int   x, uint sub_group_local_id);
-uint    __ovld __conv sub_group_broadcast(uint  x, uint sub_group_local_id);
-long    __ovld __conv sub_group_broadcast(long  x, uint sub_group_local_id);
-ulong   __ovld __conv sub_group_broadcast(ulong x, uint sub_group_local_id);
-float   __ovld __conv sub_group_broadcast(float x, uint sub_group_local_id);
-
-int     __ovld __conv sub_group_reduce_add(int   x);
-uint    __ovld __conv sub_group_reduce_add(uint  x);
-long    __ovld __conv sub_group_reduce_add(long  x);
-ulong   __ovld __conv sub_group_reduce_add(ulong x);
-float   __ovld __conv sub_group_reduce_add(float x);
-int     __ovld __conv sub_group_reduce_min(int   x);
-uint    __ovld __conv sub_group_reduce_min(uint  x);
-long    __ovld __conv sub_group_reduce_min(long  x);
-ulong   __ovld __conv sub_group_reduce_min(ulong x);
-float   __ovld __conv sub_group_reduce_min(float x);
-int     __ovld __conv sub_group_reduce_max(int   x);
-uint    __ovld __conv sub_group_reduce_max(uint  x);
-long    __ovld __conv sub_group_reduce_max(long  x);
-ulong   __ovld __conv sub_group_reduce_max(ulong x);
-float   __ovld __conv sub_group_reduce_max(float x);
-
-int     __ovld __conv sub_group_scan_exclusive_add(int   x);
-uint    __ovld __conv sub_group_scan_exclusive_add(uint  x);
-long    __ovld __conv sub_group_scan_exclusive_add(long  x);
-ulong   __ovld __conv sub_group_scan_exclusive_add(ulong x);
-float   __ovld __conv sub_group_scan_exclusive_add(float x);
-int     __ovld __conv sub_group_scan_exclusive_min(int   x);
-uint    __ovld __conv sub_group_scan_exclusive_min(uint  x);
-long    __ovld __conv sub_group_scan_exclusive_min(long  x);
-ulong   __ovld __conv sub_group_scan_exclusive_min(ulong x);
-float   __ovld __conv sub_group_scan_exclusive_min(float x);
-int     __ovld __conv sub_group_scan_exclusive_max(int   x);
-uint    __ovld __conv sub_group_scan_exclusive_max(uint  x);
-long    __ovld __conv sub_group_scan_exclusive_max(long  x);
-ulong   __ovld __conv sub_group_scan_exclusive_max(ulong x);
-float   __ovld __conv sub_group_scan_exclusive_max(float x);
-
-int     __ovld __conv sub_group_scan_inclusive_add(int   x);
-uint    __ovld __conv sub_group_scan_inclusive_add(uint  x);
-long    __ovld __conv sub_group_scan_inclusive_add(long  x);
-ulong   __ovld __conv sub_group_scan_inclusive_add(ulong x);
-float   __ovld __conv sub_group_scan_inclusive_add(float x);
-int     __ovld __conv sub_group_scan_inclusive_min(int   x);
-uint    __ovld __conv sub_group_scan_inclusive_min(uint  x);
-long    __ovld __conv sub_group_scan_inclusive_min(long  x);
-ulong   __ovld __conv sub_group_scan_inclusive_min(ulong x);
-float   __ovld __conv sub_group_scan_inclusive_min(float x);
-int     __ovld __conv sub_group_scan_inclusive_max(int   x);
-uint    __ovld __conv sub_group_scan_inclusive_max(uint  x);
-long    __ovld __conv sub_group_scan_inclusive_max(long  x);
-ulong   __ovld __conv sub_group_scan_inclusive_max(ulong x);
-float   __ovld __conv sub_group_scan_inclusive_max(float x);
-
-#ifdef cl_khr_fp16
-half    __ovld __conv sub_group_broadcast(half x, uint sub_group_local_id);
-half    __ovld __conv sub_group_reduce_add(half x);
-half    __ovld __conv sub_group_reduce_min(half x);
-half    __ovld __conv sub_group_reduce_max(half x);
-half    __ovld __conv sub_group_scan_exclusive_add(half x);
-half    __ovld __conv sub_group_scan_exclusive_min(half x);
-half    __ovld __conv sub_group_scan_exclusive_max(half x);
-half    __ovld __conv sub_group_scan_inclusive_add(half x);
-half    __ovld __conv sub_group_scan_inclusive_min(half x);
-half    __ovld __conv sub_group_scan_inclusive_max(half x);
-#endif //cl_khr_fp16
-
-#ifdef cl_khr_fp64
-double  __ovld __conv sub_group_broadcast(double x, uint sub_group_local_id);
-double  __ovld __conv sub_group_reduce_add(double x);
-double  __ovld __conv sub_group_reduce_min(double x);
-double  __ovld __conv sub_group_reduce_max(double x);
-double  __ovld __conv sub_group_scan_exclusive_add(double x);
-double  __ovld __conv sub_group_scan_exclusive_min(double x);
-double  __ovld __conv sub_group_scan_exclusive_max(double x);
-double  __ovld __conv sub_group_scan_inclusive_add(double x);
-double  __ovld __conv sub_group_scan_inclusive_min(double x);
-double  __ovld __conv sub_group_scan_inclusive_max(double x);
-#endif //cl_khr_fp64
-
-#endif //cl_khr_subgroups cl_intel_subgroups
-
-#if defined(cl_intel_subgroups)
-// Intel-Specific Sub Group Functions
-float   __ovld __conv intel_sub_group_shuffle( float  x, uint c );
-float2  __ovld __conv intel_sub_group_shuffle( float2 x, uint c );
-float3  __ovld __conv intel_sub_group_shuffle( float3 x, uint c );
-float4  __ovld __conv intel_sub_group_shuffle( float4 x, uint c );
-float8  __ovld __conv intel_sub_group_shuffle( float8 x, uint c );
-float16 __ovld __conv intel_sub_group_shuffle( float16 x, uint c );
-
-int     __ovld __conv intel_sub_group_shuffle( int  x, uint c );
-int2    __ovld __conv intel_sub_group_shuffle( int2 x, uint c );
-int3    __ovld __conv intel_sub_group_shuffle( int3 x, uint c );
-int4    __ovld __conv intel_sub_group_shuffle( int4 x, uint c );
-int8    __ovld __conv intel_sub_group_shuffle( int8 x, uint c );
-int16   __ovld __conv intel_sub_group_shuffle( int16 x, uint c );
-
-uint    __ovld __conv intel_sub_group_shuffle( uint  x, uint c );
-uint2   __ovld __conv intel_sub_group_shuffle( uint2 x, uint c );
-uint3   __ovld __conv intel_sub_group_shuffle( uint3 x, uint c );
-uint4   __ovld __conv intel_sub_group_shuffle( uint4 x, uint c );
-uint8   __ovld __conv intel_sub_group_shuffle( uint8 x, uint c );
-uint16  __ovld __conv intel_sub_group_shuffle( uint16 x, uint c );
-
-long    __ovld __conv intel_sub_group_shuffle( long x, uint c );
-ulong   __ovld __conv intel_sub_group_shuffle( ulong x, uint c );
-
-float   __ovld __conv intel_sub_group_shuffle_down( float  cur, float  next, uint c );
-float2  __ovld __conv intel_sub_group_shuffle_down( float2 cur, float2 next, uint c );
-float3  __ovld __conv intel_sub_group_shuffle_down( float3 cur, float3 next, uint c );
-float4  __ovld __conv intel_sub_group_shuffle_down( float4 cur, float4 next, uint c );
-float8  __ovld __conv intel_sub_group_shuffle_down( float8 cur, float8 next, uint c );
-float16 __ovld __conv intel_sub_group_shuffle_down( float16 cur, float16 next, uint c );
-
-int     __ovld __conv intel_sub_group_shuffle_down( int  cur, int  next, uint c );
-int2    __ovld __conv intel_sub_group_shuffle_down( int2 cur, int2 next, uint c );
-int3    __ovld __conv intel_sub_group_shuffle_down( int3 cur, int3 next, uint c );
-int4    __ovld __conv intel_sub_group_shuffle_down( int4 cur, int4 next, uint c );
-int8    __ovld __conv intel_sub_group_shuffle_down( int8 cur, int8 next, uint c );
-int16   __ovld __conv intel_sub_group_shuffle_down( int16 cur, int16 next, uint c );
-
-uint    __ovld __conv intel_sub_group_shuffle_down( uint  cur, uint  next, uint c );
-uint2   __ovld __conv intel_sub_group_shuffle_down( uint2 cur, uint2 next, uint c );
-uint3   __ovld __conv intel_sub_group_shuffle_down( uint3 cur, uint3 next, uint c );
-uint4   __ovld __conv intel_sub_group_shuffle_down( uint4 cur, uint4 next, uint c );
-uint8   __ovld __conv intel_sub_group_shuffle_down( uint8 cur, uint8 next, uint c );
-uint16  __ovld __conv intel_sub_group_shuffle_down( uint16 cur, uint16 next, uint c );
-
-long    __ovld __conv intel_sub_group_shuffle_down( long prev, long cur, uint c );
-ulong   __ovld __conv intel_sub_group_shuffle_down( ulong prev, ulong cur, uint c );
-
-float   __ovld __conv intel_sub_group_shuffle_up( float  prev, float  cur, uint c );
-float2  __ovld __conv intel_sub_group_shuffle_up( float2 prev, float2 cur, uint c );
-float3  __ovld __conv intel_sub_group_shuffle_up( float3 prev, float3 cur, uint c );
-float4  __ovld __conv intel_sub_group_shuffle_up( float4 prev, float4 cur, uint c );
-float8  __ovld __conv intel_sub_group_shuffle_up( float8 prev, float8 cur, uint c );
-float16 __ovld __conv intel_sub_group_shuffle_up( float16 prev, float16 cur, uint c );
-
-int     __ovld __conv intel_sub_group_shuffle_up( int  prev, int  cur, uint c );
-int2    __ovld __conv intel_sub_group_shuffle_up( int2 prev, int2 cur, uint c );
-int3    __ovld __conv intel_sub_group_shuffle_up( int3 prev, int3 cur, uint c );
-int4    __ovld __conv intel_sub_group_shuffle_up( int4 prev, int4 cur, uint c );
-int8    __ovld __conv intel_sub_group_shuffle_up( int8 prev, int8 cur, uint c );
-int16   __ovld __conv intel_sub_group_shuffle_up( int16 prev, int16 cur, uint c );
-
-uint    __ovld __conv intel_sub_group_shuffle_up( uint  prev, uint  cur, uint c );
-uint2   __ovld __conv intel_sub_group_shuffle_up( uint2 prev, uint2 cur, uint c );
-uint3   __ovld __conv intel_sub_group_shuffle_up( uint3 prev, uint3 cur, uint c );
-uint4   __ovld __conv intel_sub_group_shuffle_up( uint4 prev, uint4 cur, uint c );
-uint8   __ovld __conv intel_sub_group_shuffle_up( uint8 prev, uint8 cur, uint c );
-uint16  __ovld __conv intel_sub_group_shuffle_up( uint16 prev, uint16 cur, uint c );
-
-long    __ovld __conv intel_sub_group_shuffle_up( long prev, long cur, uint c );
-ulong   __ovld __conv intel_sub_group_shuffle_up( ulong prev, ulong cur, uint c );
-
-float   __ovld __conv intel_sub_group_shuffle_xor( float  x, uint c );
-float2  __ovld __conv intel_sub_group_shuffle_xor( float2 x, uint c );
-float3  __ovld __conv intel_sub_group_shuffle_xor( float3 x, uint c );
-float4  __ovld __conv intel_sub_group_shuffle_xor( float4 x, uint c );
-float8  __ovld __conv intel_sub_group_shuffle_xor( float8 x, uint c );
-float16 __ovld __conv intel_sub_group_shuffle_xor( float16 x, uint c );
-
-int     __ovld __conv intel_sub_group_shuffle_xor( int  x, uint c );
-int2    __ovld __conv intel_sub_group_shuffle_xor( int2 x, uint c );
-int3    __ovld __conv intel_sub_group_shuffle_xor( int3 x, uint c );
-int4    __ovld __conv intel_sub_group_shuffle_xor( int4 x, uint c );
-int8    __ovld __conv intel_sub_group_shuffle_xor( int8 x, uint c );
-int16   __ovld __conv intel_sub_group_shuffle_xor( int16 x, uint c );
-
-uint    __ovld __conv intel_sub_group_shuffle_xor( uint  x, uint c );
-uint2   __ovld __conv intel_sub_group_shuffle_xor( uint2 x, uint c );
-uint3   __ovld __conv intel_sub_group_shuffle_xor( uint3 x, uint c );
-uint4   __ovld __conv intel_sub_group_shuffle_xor( uint4 x, uint c );
-uint8   __ovld __conv intel_sub_group_shuffle_xor( uint8 x, uint c );
-uint16  __ovld __conv intel_sub_group_shuffle_xor( uint16 x, uint c );
-
-long    __ovld __conv intel_sub_group_shuffle_xor( long x, uint c );
-ulong   __ovld __conv intel_sub_group_shuffle_xor( ulong x, uint c );
-
-uint    __ovld __conv intel_sub_group_block_read( read_only image2d_t image, int2 coord );
-uint2   __ovld __conv intel_sub_group_block_read2( read_only image2d_t image, int2 coord );
-uint4   __ovld __conv intel_sub_group_block_read4( read_only image2d_t image, int2 coord );
-uint8   __ovld __conv intel_sub_group_block_read8( read_only image2d_t image, int2 coord );
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-uint    __ovld __conv intel_sub_group_block_read(read_write image2d_t image, int2 coord);
-uint2   __ovld __conv intel_sub_group_block_read2(read_write image2d_t image, int2 coord);
-uint4   __ovld __conv intel_sub_group_block_read4(read_write image2d_t image, int2 coord);
-uint8   __ovld __conv intel_sub_group_block_read8(read_write image2d_t image, int2 coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-uint    __ovld __conv intel_sub_group_block_read( const __global uint* p );
-uint2   __ovld __conv intel_sub_group_block_read2( const __global uint* p );
-uint4   __ovld __conv intel_sub_group_block_read4( const __global uint* p );
-uint8   __ovld __conv intel_sub_group_block_read8( const __global uint* p );
-
-void    __ovld __conv intel_sub_group_block_write(write_only image2d_t image, int2 coord, uint data);
-void    __ovld __conv intel_sub_group_block_write2(write_only image2d_t image, int2 coord, uint2 data);
-void    __ovld __conv intel_sub_group_block_write4(write_only image2d_t image, int2 coord, uint4 data);
-void    __ovld __conv intel_sub_group_block_write8(write_only image2d_t image, int2 coord, uint8 data);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void    __ovld __conv intel_sub_group_block_write(read_write image2d_t image, int2 coord, uint data);
-void    __ovld __conv intel_sub_group_block_write2(read_write image2d_t image, int2 coord, uint2 data);
-void    __ovld __conv intel_sub_group_block_write4(read_write image2d_t image, int2 coord, uint4 data);
-void    __ovld __conv intel_sub_group_block_write8(read_write image2d_t image, int2 coord, uint8 data);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-void    __ovld __conv intel_sub_group_block_write( __global uint* p, uint data );
-void    __ovld __conv intel_sub_group_block_write2( __global uint* p, uint2 data );
-void    __ovld __conv intel_sub_group_block_write4( __global uint* p, uint4 data );
-void    __ovld __conv intel_sub_group_block_write8( __global uint* p, uint8 data );
-
-#ifdef cl_khr_fp16
-half    __ovld __conv intel_sub_group_shuffle( half x, uint c );
-half    __ovld __conv intel_sub_group_shuffle_down( half prev, half cur, uint c );
-half    __ovld __conv intel_sub_group_shuffle_up( half prev, half cur, uint c );
-half    __ovld __conv intel_sub_group_shuffle_xor( half x, uint c );
-#endif
-
-#if defined(cl_khr_fp64)
-double  __ovld __conv intel_sub_group_shuffle( double x, uint c );
-double  __ovld __conv intel_sub_group_shuffle_down( double prev, double cur, uint c );
-double  __ovld __conv intel_sub_group_shuffle_up( double prev, double cur, uint c );
-double  __ovld __conv intel_sub_group_shuffle_xor( double x, uint c );
-#endif
-
-#endif //cl_intel_subgroups
-
-#if defined(cl_intel_subgroups_short)
-short       __ovld __conv intel_sub_group_broadcast( short  x, uint sub_group_local_id );
-short2      __ovld __conv intel_sub_group_broadcast( short2 x, uint sub_group_local_id );
-short3      __ovld __conv intel_sub_group_broadcast( short3 x, uint sub_group_local_id );
-short4      __ovld __conv intel_sub_group_broadcast( short4 x, uint sub_group_local_id );
-short8      __ovld __conv intel_sub_group_broadcast( short8 x, uint sub_group_local_id );
-
-ushort      __ovld __conv intel_sub_group_broadcast( ushort  x, uint sub_group_local_id );
-ushort2     __ovld __conv intel_sub_group_broadcast( ushort2 x, uint sub_group_local_id );
-ushort3     __ovld __conv intel_sub_group_broadcast( ushort3 x, uint sub_group_local_id );
-ushort4     __ovld __conv intel_sub_group_broadcast( ushort4 x, uint sub_group_local_id );
-ushort8     __ovld __conv intel_sub_group_broadcast( ushort8 x, uint sub_group_local_id );
-
-short       __ovld __conv intel_sub_group_shuffle( short   x, uint c );
-short2      __ovld __conv intel_sub_group_shuffle( short2  x, uint c );
-short3      __ovld __conv intel_sub_group_shuffle( short3  x, uint c );
-short4      __ovld __conv intel_sub_group_shuffle( short4  x, uint c );
-short8      __ovld __conv intel_sub_group_shuffle( short8  x, uint c );
-short16     __ovld __conv intel_sub_group_shuffle( short16 x, uint c);
-
-ushort      __ovld __conv intel_sub_group_shuffle( ushort   x, uint c );
-ushort2     __ovld __conv intel_sub_group_shuffle( ushort2  x, uint c );
-ushort3     __ovld __conv intel_sub_group_shuffle( ushort3  x, uint c );
-ushort4     __ovld __conv intel_sub_group_shuffle( ushort4  x, uint c );
-ushort8     __ovld __conv intel_sub_group_shuffle( ushort8  x, uint c );
-ushort16    __ovld __conv intel_sub_group_shuffle( ushort16 x, uint c );
-
-short       __ovld __conv intel_sub_group_shuffle_down( short   cur, short   next, uint c );
-short2      __ovld __conv intel_sub_group_shuffle_down( short2  cur, short2  next, uint c );
-short3      __ovld __conv intel_sub_group_shuffle_down( short3  cur, short3  next, uint c );
-short4      __ovld __conv intel_sub_group_shuffle_down( short4  cur, short4  next, uint c );
-short8      __ovld __conv intel_sub_group_shuffle_down( short8  cur, short8  next, uint c );
-short16     __ovld __conv intel_sub_group_shuffle_down( short16 cur, short16 next, uint c );
-
-ushort      __ovld __conv intel_sub_group_shuffle_down( ushort   cur, ushort   next, uint c );
-ushort2     __ovld __conv intel_sub_group_shuffle_down( ushort2  cur, ushort2  next, uint c );
-ushort3     __ovld __conv intel_sub_group_shuffle_down( ushort3  cur, ushort3  next, uint c );
-ushort4     __ovld __conv intel_sub_group_shuffle_down( ushort4  cur, ushort4  next, uint c );
-ushort8     __ovld __conv intel_sub_group_shuffle_down( ushort8  cur, ushort8  next, uint c );
-ushort16    __ovld __conv intel_sub_group_shuffle_down( ushort16 cur, ushort16 next, uint c );
-
-short       __ovld __conv intel_sub_group_shuffle_up( short   cur, short   next, uint c );
-short2      __ovld __conv intel_sub_group_shuffle_up( short2  cur, short2  next, uint c );
-short3      __ovld __conv intel_sub_group_shuffle_up( short3  cur, short3  next, uint c );
-short4      __ovld __conv intel_sub_group_shuffle_up( short4  cur, short4  next, uint c );
-short8      __ovld __conv intel_sub_group_shuffle_up( short8  cur, short8  next, uint c );
-short16     __ovld __conv intel_sub_group_shuffle_up( short16 cur, short16 next, uint c );
-
-ushort      __ovld __conv intel_sub_group_shuffle_up( ushort   cur, ushort   next, uint c );
-ushort2     __ovld __conv intel_sub_group_shuffle_up( ushort2  cur, ushort2  next, uint c );
-ushort3     __ovld __conv intel_sub_group_shuffle_up( ushort3  cur, ushort3  next, uint c );
-ushort4     __ovld __conv intel_sub_group_shuffle_up( ushort4  cur, ushort4  next, uint c );
-ushort8     __ovld __conv intel_sub_group_shuffle_up( ushort8  cur, ushort8  next, uint c );
-ushort16    __ovld __conv intel_sub_group_shuffle_up( ushort16 cur, ushort16 next, uint c );
-
-short       __ovld __conv intel_sub_group_shuffle_xor( short   x, uint c );
-short2      __ovld __conv intel_sub_group_shuffle_xor( short2  x, uint c );
-short3      __ovld __conv intel_sub_group_shuffle_xor( short3  x, uint c );
-short4      __ovld __conv intel_sub_group_shuffle_xor( short4  x, uint c );
-short8      __ovld __conv intel_sub_group_shuffle_xor( short8  x, uint c );
-short16     __ovld __conv intel_sub_group_shuffle_xor( short16 x, uint c );
-
-ushort      __ovld __conv intel_sub_group_shuffle_xor( ushort   x, uint c );
-ushort2     __ovld __conv intel_sub_group_shuffle_xor( ushort2  x, uint c );
-ushort3     __ovld __conv intel_sub_group_shuffle_xor( ushort3  x, uint c );
-ushort4     __ovld __conv intel_sub_group_shuffle_xor( ushort4  x, uint c );
-ushort8     __ovld __conv intel_sub_group_shuffle_xor( ushort8  x, uint c );
-ushort16    __ovld __conv intel_sub_group_shuffle_xor( ushort16 x, uint c );
-
-short       __ovld __conv intel_sub_group_reduce_add( short   x );
-ushort      __ovld __conv intel_sub_group_reduce_add( ushort  x );
-short       __ovld __conv intel_sub_group_reduce_min( short   x );
-ushort      __ovld __conv intel_sub_group_reduce_min( ushort  x );
-short       __ovld __conv intel_sub_group_reduce_max( short   x );
-ushort      __ovld __conv intel_sub_group_reduce_max( ushort  x );
-
-short       __ovld __conv intel_sub_group_scan_exclusive_add( short   x );
-ushort      __ovld __conv intel_sub_group_scan_exclusive_add( ushort  x );
-short       __ovld __conv intel_sub_group_scan_exclusive_min( short   x );
-ushort      __ovld __conv intel_sub_group_scan_exclusive_min( ushort  x );
-short       __ovld __conv intel_sub_group_scan_exclusive_max( short   x );
-ushort      __ovld __conv intel_sub_group_scan_exclusive_max( ushort  x );
-
-short       __ovld __conv intel_sub_group_scan_inclusive_add( short   x );
-ushort      __ovld __conv intel_sub_group_scan_inclusive_add( ushort  x );
-short       __ovld __conv intel_sub_group_scan_inclusive_min( short   x );
-ushort      __ovld __conv intel_sub_group_scan_inclusive_min( ushort  x );
-short       __ovld __conv intel_sub_group_scan_inclusive_max( short   x );
-ushort      __ovld __conv intel_sub_group_scan_inclusive_max( ushort  x );
-
-uint       __ovld __conv intel_sub_group_block_read_ui( read_only image2d_t image, int2 byte_coord );
-uint2      __ovld __conv intel_sub_group_block_read_ui2( read_only image2d_t image, int2 byte_coord );
-uint4      __ovld __conv intel_sub_group_block_read_ui4( read_only image2d_t image, int2 byte_coord );
-uint8      __ovld __conv intel_sub_group_block_read_ui8( read_only image2d_t image, int2 byte_coord );
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-uint       __ovld __conv intel_sub_group_block_read_ui( read_write image2d_t image, int2 byte_coord );
-uint2      __ovld __conv intel_sub_group_block_read_ui2( read_write image2d_t image, int2 byte_coord );
-uint4      __ovld __conv intel_sub_group_block_read_ui4( read_write image2d_t image, int2 byte_coord );
-uint8      __ovld __conv intel_sub_group_block_read_ui8( read_write image2d_t image, int2 byte_coord );
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-uint       __ovld __conv intel_sub_group_block_read_ui( const __global uint* p );
-uint2      __ovld __conv intel_sub_group_block_read_ui2( const __global uint* p );
-uint4      __ovld __conv intel_sub_group_block_read_ui4( const __global uint* p );
-uint8      __ovld __conv intel_sub_group_block_read_ui8( const __global uint* p );
-
-void       __ovld __conv intel_sub_group_block_write_ui( read_only image2d_t image, int2 byte_coord, uint data );
-void       __ovld __conv intel_sub_group_block_write_ui2( read_only image2d_t image, int2 byte_coord, uint2 data );
-void       __ovld __conv intel_sub_group_block_write_ui4( read_only image2d_t image, int2 byte_coord, uint4 data );
-void       __ovld __conv intel_sub_group_block_write_ui8( read_only image2d_t image, int2 byte_coord, uint8 data );
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void       __ovld __conv intel_sub_group_block_write_ui( read_write image2d_t image, int2 byte_coord, uint data );
-void       __ovld __conv intel_sub_group_block_write_ui2( read_write image2d_t image, int2 byte_coord, uint2 data );
-void       __ovld __conv intel_sub_group_block_write_ui4( read_write image2d_t image, int2 byte_coord, uint4 data );
-void       __ovld __conv intel_sub_group_block_write_ui8( read_write image2d_t image, int2 byte_coord, uint8 data );
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-void       __ovld __conv intel_sub_group_block_write_ui( __global uint* p, uint data );
-void       __ovld __conv intel_sub_group_block_write_ui2( __global uint* p, uint2 data );
-void       __ovld __conv intel_sub_group_block_write_ui4( __global uint* p, uint4 data );
-void       __ovld __conv intel_sub_group_block_write_ui8( __global uint* p, uint8 data );
-
-ushort      __ovld __conv intel_sub_group_block_read_us( read_only image2d_t image, int2 coord );
-ushort2     __ovld __conv intel_sub_group_block_read_us2( read_only image2d_t image, int2 coord );
-ushort4     __ovld __conv intel_sub_group_block_read_us4( read_only image2d_t image, int2 coord );
-ushort8     __ovld __conv intel_sub_group_block_read_us8( read_only image2d_t image, int2 coord );
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-ushort      __ovld __conv intel_sub_group_block_read_us(read_write image2d_t image, int2 coord);
-ushort2     __ovld __conv intel_sub_group_block_read_us2(read_write image2d_t image, int2 coord);
-ushort4     __ovld __conv intel_sub_group_block_read_us4(read_write image2d_t image, int2 coord);
-ushort8     __ovld __conv intel_sub_group_block_read_us8(read_write image2d_t image, int2 coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-ushort      __ovld __conv intel_sub_group_block_read_us(  const __global ushort* p );
-ushort2     __ovld __conv intel_sub_group_block_read_us2( const __global ushort* p );
-ushort4     __ovld __conv intel_sub_group_block_read_us4( const __global ushort* p );
-ushort8     __ovld __conv intel_sub_group_block_read_us8( const __global ushort* p );
-
-void        __ovld __conv intel_sub_group_block_write_us(write_only image2d_t image, int2 coord, ushort  data);
-void        __ovld __conv intel_sub_group_block_write_us2(write_only image2d_t image, int2 coord, ushort2 data);
-void        __ovld __conv intel_sub_group_block_write_us4(write_only image2d_t image, int2 coord, ushort4 data);
-void        __ovld __conv intel_sub_group_block_write_us8(write_only image2d_t image, int2 coord, ushort8 data);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void        __ovld __conv intel_sub_group_block_write_us(read_write image2d_t image, int2 coord, ushort  data);
-void        __ovld __conv intel_sub_group_block_write_us2(read_write image2d_t image, int2 coord, ushort2 data);
-void        __ovld __conv intel_sub_group_block_write_us4(read_write image2d_t image, int2 coord, ushort4 data);
-void        __ovld __conv intel_sub_group_block_write_us8(read_write image2d_t image, int2 coord, ushort8 data);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-
-void        __ovld __conv intel_sub_group_block_write_us(  __global ushort* p, ushort  data );
-void        __ovld __conv intel_sub_group_block_write_us2( __global ushort* p, ushort2 data );
-void        __ovld __conv intel_sub_group_block_write_us4( __global ushort* p, ushort4 data );
-void        __ovld __conv intel_sub_group_block_write_us8( __global ushort* p, ushort8 data );
-#endif // cl_intel_subgroups_short
-
-#ifdef cl_intel_device_side_avc_motion_estimation
-#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : begin
-
-// MCE built-in functions
-uchar __ovld
-intel_sub_group_avc_mce_get_default_inter_base_multi_reference_penalty(
-    uchar slice_type, uchar qp);
-ulong __ovld intel_sub_group_avc_mce_get_default_inter_shape_penalty(
-    uchar slice_type, uchar qp);
-uchar __ovld intel_sub_group_avc_mce_get_default_inter_direction_penalty(
-    uchar slice_type, uchar qp);
-uint __ovld intel_sub_group_avc_mce_get_default_intra_luma_shape_penalty(
-    uchar slice_type, uchar qp);
-uint2 __ovld
-intel_sub_group_avc_mce_get_default_inter_motion_vector_cost_table(
-    uchar slice_type, uchar qp);
-uchar __ovld intel_sub_group_avc_mce_get_default_intra_luma_mode_penalty(
-    uchar slice_type, uchar qp);
-
-uint2 __ovld intel_sub_group_avc_mce_get_default_high_penalty_cost_table();
-uint2 __ovld intel_sub_group_avc_mce_get_default_medium_penalty_cost_table();
-uint2 __ovld intel_sub_group_avc_mce_get_default_low_penalty_cost_table();
-uint __ovld intel_sub_group_avc_mce_get_default_non_dc_luma_intra_penalty();
-uchar __ovld
-intel_sub_group_avc_mce_get_default_intra_chroma_mode_base_penalty();
-
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_inter_base_multi_reference_penalty(
-    uchar reference_base_penalty, intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_inter_shape_penalty(
-    ulong packed_shape_penalty, intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_inter_direction_penalty(
-    uchar direction_cost, intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_motion_vector_cost_function(
-    ulong packed_cost_center_delta, uint2 packed_cost_table,
-    uchar cost_precision, intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_ac_only_haar(
-    intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_source_interlaced_field_polarity(
-    uchar src_field_polarity, intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_single_reference_interlaced_field_polarity(
-    uchar ref_field_polarity, intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_mce_set_dual_reference_interlaced_field_polarities(
-    uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
-    intel_sub_group_avc_mce_payload_t payload);
-
-ulong __ovld intel_sub_group_avc_mce_get_motion_vectors(
-    intel_sub_group_avc_mce_result_t result);
-ushort __ovld intel_sub_group_avc_mce_get_inter_distortions(
-    intel_sub_group_avc_mce_result_t result);
-ushort __ovld intel_sub_group_avc_mce_get_best_inter_distortion(
-    intel_sub_group_avc_mce_result_t result);
-uchar __ovld intel_sub_group_avc_mce_get_inter_major_shape(
-    intel_sub_group_avc_mce_result_t result);
-uchar __ovld intel_sub_group_avc_mce_get_inter_minor_shapes(
-    intel_sub_group_avc_mce_result_t result);
-uchar __ovld intel_sub_group_avc_mce_get_inter_directions(
-    intel_sub_group_avc_mce_result_t result);
-uchar __ovld intel_sub_group_avc_mce_get_inter_motion_vector_count(
-    intel_sub_group_avc_mce_result_t result);
-uint __ovld intel_sub_group_avc_mce_get_inter_reference_ids(
-    intel_sub_group_avc_mce_result_t result);
-uchar __ovld
-intel_sub_group_avc_mce_get_inter_reference_interlaced_field_polarities(
-    uint packed_reference_ids, uint packed_reference_parameter_field_polarities,
-    intel_sub_group_avc_mce_result_t result);
-
-// IME built-in functions
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_initialize(
-    ushort2 src_coord, uchar partition_mask, uchar sad_adjustment);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_single_reference(
-    short2 ref_offset, uchar search_window_config,
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_dual_reference(
-    short2 fwd_ref_offset, short2 bwd_ref_offset, uchar search_window_config,
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_max_motion_vector_count(
-    uchar max_motion_vector_count, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_unidirectional_mix_disable(
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_early_search_termination_threshold(
-    uchar threshold, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_weighted_sad(
-    uint packed_sad_weights, intel_sub_group_avc_ime_payload_t payload);
-
-__attribute__((deprecated("If you use the latest Intel driver, please use "
-                          "intel_sub_group_avc_ime_ref_window_size instead",
-                          "intel_sub_group_avc_ime_ref_window_size")))
-ushort2 __ovld
-intel_sub_group_ime_ref_window_size(uchar search_window_config, char dual_ref);
-ushort2 __ovld intel_sub_group_avc_ime_ref_window_size(
-    uchar search_window_config, char dual_ref);
-short2 __ovld intel_sub_group_avc_ime_adjust_ref_offset(
-    short2 ref_offset, ushort2 src_coord, ushort2 ref_window_size,
-    ushort2 image_size);
-
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_ime_evaluate_with_single_reference(
-    read_only image2d_t src_image, read_only image2d_t ref_image,
-    sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_ime_evaluate_with_dual_reference(
-    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
-    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_result_single_reference_streamout_t __ovld
-intel_sub_group_avc_ime_evaluate_with_single_reference_streamout(
-    read_only image2d_t src_image, read_only image2d_t ref_image,
-    sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_result_dual_reference_streamout_t __ovld
-intel_sub_group_avc_ime_evaluate_with_dual_reference_streamout(
-    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
-    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_ime_evaluate_with_single_reference_streamin(
-    read_only image2d_t src_image, read_only image2d_t ref_image,
-    sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload,
-    intel_sub_group_avc_ime_single_reference_streamin_t streamin_components);
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_ime_evaluate_with_dual_reference_streamin(
-    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
-    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_ime_payload_t payload,
-    intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components);
-intel_sub_group_avc_ime_result_single_reference_streamout_t __ovld
-intel_sub_group_avc_ime_evaluate_with_single_reference_streaminout(
-    read_only image2d_t src_image, read_only image2d_t ref_image,
-    sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload,
-    intel_sub_group_avc_ime_single_reference_streamin_t streamin_components);
-intel_sub_group_avc_ime_result_dual_reference_streamout_t __ovld
-intel_sub_group_avc_ime_evaluate_with_dual_reference_streaminout(
-    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
-    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_ime_payload_t payload,
-    intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components);
-
-intel_sub_group_avc_ime_single_reference_streamin_t __ovld
-intel_sub_group_avc_ime_get_single_reference_streamin(
-    intel_sub_group_avc_ime_result_single_reference_streamout_t result);
-intel_sub_group_avc_ime_dual_reference_streamin_t __ovld
-intel_sub_group_avc_ime_get_dual_reference_streamin(
-    intel_sub_group_avc_ime_result_dual_reference_streamout_t result);
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_ime_strip_single_reference_streamout(
-    intel_sub_group_avc_ime_result_single_reference_streamout_t result);
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_ime_strip_dual_reference_streamout(
-    intel_sub_group_avc_ime_result_dual_reference_streamout_t result);
-
-uint __ovld intel_sub_group_avc_ime_get_streamout_major_shape_motion_vectors(
-    intel_sub_group_avc_ime_result_single_reference_streamout_t result,
-    uchar major_shape);
-ushort __ovld intel_sub_group_avc_ime_get_streamout_major_shape_distortions(
-    intel_sub_group_avc_ime_result_single_reference_streamout_t result,
-    uchar major_shape);
-uchar __ovld intel_sub_group_avc_ime_get_streamout_major_shape_reference_ids(
-    intel_sub_group_avc_ime_result_single_reference_streamout_t result,
-    uchar major_shape);
-uint __ovld intel_sub_group_avc_ime_get_streamout_major_shape_motion_vectors(
-    intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
-    uchar major_shape, uchar direction);
-ushort __ovld intel_sub_group_avc_ime_get_streamout_major_shape_distortions(
-    intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
-    uchar major_shape, uchar direction);
-uchar __ovld intel_sub_group_avc_ime_get_streamout_major_shape_reference_ids(
-    intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
-    uchar major_shape, uchar direction);
-
-uchar __ovld intel_sub_group_avc_ime_get_border_reached(
-    uchar image_select, intel_sub_group_avc_ime_result_t result);
-uchar __ovld intel_sub_group_avc_ime_get_truncated_search_indication(
-    intel_sub_group_avc_ime_result_t result);
-uchar __ovld
-intel_sub_group_avc_ime_get_unidirectional_early_search_termination(
-    intel_sub_group_avc_ime_result_t result);
-uint __ovld intel_sub_group_avc_ime_get_weighting_pattern_minimum_motion_vector(
-    intel_sub_group_avc_ime_result_t result);
-ushort __ovld intel_sub_group_avc_ime_get_weighting_pattern_minimum_distortion(
-    intel_sub_group_avc_ime_result_t result);
-
-// REF built-in functions
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_fme_initialize(
-    ushort2 src_coord, ulong motion_vectors, uchar major_shapes,
-    uchar minor_shapes, uchar directions, uchar pixel_resolution,
-    uchar sad_adjustment);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_bme_initialize(
-    ushort2 src_coord, ulong motion_vectors, uchar major_shapes,
-    uchar minor_shapes, uchar directions, uchar pixel_resolution,
-    uchar bidirectional_weight, uchar sad_adjustment);
-
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_bidirectional_mix_disable(
-    intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_bilinear_filter_enable(
-    intel_sub_group_avc_ref_payload_t payload);
-
-intel_sub_group_avc_ref_result_t __ovld
-intel_sub_group_avc_ref_evaluate_with_single_reference(
-    read_only image2d_t src_image, read_only image2d_t ref_image,
-    sampler_t vme_media_sampler, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_ref_result_t __ovld
-intel_sub_group_avc_ref_evaluate_with_dual_reference(
-    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
-    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_ref_result_t __ovld
-intel_sub_group_avc_ref_evaluate_with_multi_reference(
-    read_only image2d_t src_image, uint packed_reference_ids,
-    sampler_t vme_media_sampler, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_ref_result_t __ovld
-intel_sub_group_avc_ref_evaluate_with_multi_reference(
-    read_only image2d_t src_image, uint packed_reference_ids,
-    uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
-    intel_sub_group_avc_ref_payload_t payload);
-
-// SIC built-in functions
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_initialize(
-    ushort2 src_coord);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_configure_skc(
-    uint skip_block_partition_type, uint skip_motion_vector_mask,
-    ulong motion_vectors, uchar bidirectional_weight, uchar skip_sad_adjustment,
-    intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_configure_ipe(
-    uchar luma_intra_partition_mask, uchar intra_neighbour_availabilty,
-    uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel,
-    uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels,
-    uchar intra_sad_adjustment, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_configure_ipe(
-    uchar luma_intra_partition_mask, uchar intra_neighbour_availabilty,
-    uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel,
-    uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels,
-    ushort left_edge_chroma_pixels, ushort upper_left_corner_chroma_pixel,
-    ushort upper_edge_chroma_pixels, uchar intra_sad_adjustment,
-    intel_sub_group_avc_sic_payload_t payload);
-uint __ovld
-intel_sub_group_avc_sic_get_motion_vector_mask(
-    uint skip_block_partition_type, uchar direction);
-
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_intra_luma_shape_penalty(
-    uint packed_shape_cost, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_intra_luma_mode_cost_function(
-    uchar luma_mode_penalty, uint luma_packed_neighbor_modes,
-    uint luma_packed_non_dc_penalty, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_intra_chroma_mode_cost_function(
-    uchar chroma_mode_penalty, intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_skc_bilinear_filter_enable(
-    intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_skc_forward_transform_enable(
-    ulong packed_sad_coefficients, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_block_based_raw_skip_sad(
-    uchar block_based_skip_type,
-    intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_sic_result_t __ovld
-intel_sub_group_avc_sic_evaluate_ipe(
-    read_only image2d_t src_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_result_t __ovld
-intel_sub_group_avc_sic_evaluate_with_single_reference(
-    read_only image2d_t src_image, read_only image2d_t ref_image,
-    sampler_t vme_media_sampler, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_result_t __ovld
-intel_sub_group_avc_sic_evaluate_with_dual_reference(
-    read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
-    read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
-    intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_result_t __ovld
-intel_sub_group_avc_sic_evaluate_with_multi_reference(
-    read_only image2d_t src_image, uint packed_reference_ids,
-    sampler_t vme_media_sampler, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_result_t __ovld
-intel_sub_group_avc_sic_evaluate_with_multi_reference(
-    read_only image2d_t src_image, uint packed_reference_ids,
-    uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
-    intel_sub_group_avc_sic_payload_t payload);
-
-uchar __ovld intel_sub_group_avc_sic_get_ipe_luma_shape(
-    intel_sub_group_avc_sic_result_t result);
-ushort __ovld intel_sub_group_avc_sic_get_best_ipe_luma_distortion(
-    intel_sub_group_avc_sic_result_t result);
-ushort __ovld intel_sub_group_avc_sic_get_best_ipe_chroma_distortion(
-    intel_sub_group_avc_sic_result_t result);
-ulong __ovld intel_sub_group_avc_sic_get_packed_ipe_luma_modes(
-    intel_sub_group_avc_sic_result_t result);
-uchar __ovld intel_sub_group_avc_sic_get_ipe_chroma_mode(
-    intel_sub_group_avc_sic_result_t result);
-uint __ovld intel_sub_group_avc_sic_get_packed_skc_luma_count_threshold(
-    intel_sub_group_avc_sic_result_t result);
-ulong __ovld intel_sub_group_avc_sic_get_packed_skc_luma_sum_threshold(
-    intel_sub_group_avc_sic_result_t result);
-ushort __ovld intel_sub_group_avc_sic_get_inter_raw_sads(
-    intel_sub_group_avc_sic_result_t result);
-
-// Wrappers
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_inter_base_multi_reference_penalty(
-    uchar reference_base_penalty, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_inter_base_multi_reference_penalty(
-    uchar reference_base_penalty, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_inter_base_multi_reference_penalty(
-    uchar reference_base_penalty, intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_inter_shape_penalty(
-    ulong packed_shape_cost, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_inter_shape_penalty(
-    ulong packed_shape_cost, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_inter_shape_penalty(
-    ulong packed_shape_cost, intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_inter_direction_penalty(
-    uchar direction_cost, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_inter_direction_penalty(
-    uchar direction_cost, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_inter_direction_penalty(
-    uchar direction_cost, intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_motion_vector_cost_function(
-    ulong packed_cost_center_delta, uint2 packed_cost_table,
-    uchar cost_precision, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_motion_vector_cost_function(
-    ulong packed_cost_center_delta, uint2 packed_cost_table,
-    uchar cost_precision, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_motion_vector_cost_function(
-    ulong packed_cost_center_delta, uint2 packed_cost_table,
-    uchar cost_precision, intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_source_interlaced_field_polarity(
-    uchar src_field_polarity, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_source_interlaced_field_polarity(
-    uchar src_field_polarity, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_source_interlaced_field_polarity(
-    uchar src_field_polarity, intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_single_reference_interlaced_field_polarity(
-    uchar ref_field_polarity, intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_single_reference_interlaced_field_polarity(
-    uchar ref_field_polarity, intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_single_reference_interlaced_field_polarity(
-    uchar ref_field_polarity, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_dual_reference_interlaced_field_polarities(
-    uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_dual_reference_interlaced_field_polarities(
-    uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
-    intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_dual_reference_interlaced_field_polarities(
-    uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
-    intel_sub_group_avc_sic_payload_t payload);
-
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_ime_set_ac_only_haar(
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_ref_set_ac_only_haar(
-    intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_set_ac_only_haar(
-    intel_sub_group_avc_sic_payload_t payload);
-
-ulong __ovld intel_sub_group_avc_ime_get_motion_vectors(
-    intel_sub_group_avc_ime_result_t result);
-ulong __ovld intel_sub_group_avc_ref_get_motion_vectors(
-    intel_sub_group_avc_ref_result_t result);
-
-ushort __ovld intel_sub_group_avc_ime_get_inter_distortions(
-    intel_sub_group_avc_ime_result_t result);
-ushort __ovld intel_sub_group_avc_ref_get_inter_distortions(
-    intel_sub_group_avc_ref_result_t result);
-ushort __ovld intel_sub_group_avc_sic_get_inter_distortions(
-    intel_sub_group_avc_sic_result_t result);
-
-ushort __ovld intel_sub_group_avc_ime_get_best_inter_distortion(
-    intel_sub_group_avc_ime_result_t result);
-ushort __ovld intel_sub_group_avc_ref_get_best_inter_distortion(
-    intel_sub_group_avc_ref_result_t result);
-
-uchar __ovld intel_sub_group_avc_ime_get_inter_major_shape(
-    intel_sub_group_avc_ime_result_t result);
-uchar __ovld intel_sub_group_avc_ref_get_inter_major_shape(
-    intel_sub_group_avc_ref_result_t result);
-uchar __ovld intel_sub_group_avc_ime_get_inter_minor_shapes(
-    intel_sub_group_avc_ime_result_t result);
-uchar __ovld intel_sub_group_avc_ref_get_inter_minor_shapes(
-    intel_sub_group_avc_ref_result_t result);
-
-uchar __ovld intel_sub_group_avc_ime_get_inter_directions(
-    intel_sub_group_avc_ime_result_t result);
-uchar __ovld intel_sub_group_avc_ref_get_inter_directions(
-    intel_sub_group_avc_ref_result_t result);
-
-uchar __ovld intel_sub_group_avc_ime_get_inter_motion_vector_count(
-    intel_sub_group_avc_ime_result_t result);
-uchar __ovld intel_sub_group_avc_ref_get_inter_motion_vector_count(
-    intel_sub_group_avc_ref_result_t result);
-
-uint __ovld intel_sub_group_avc_ime_get_inter_reference_ids(
-    intel_sub_group_avc_ime_result_t result);
-uint __ovld intel_sub_group_avc_ref_get_inter_reference_ids(
-    intel_sub_group_avc_ref_result_t result);
-
-uchar __ovld
-intel_sub_group_avc_ime_get_inter_reference_interlaced_field_polarities(
-    uint packed_reference_ids, uint packed_reference_parameter_field_polarities,
-    intel_sub_group_avc_ime_result_t result);
-uchar __ovld
-intel_sub_group_avc_ref_get_inter_reference_interlaced_field_polarities(
-    uint packed_reference_ids, uint packed_reference_parameter_field_polarities,
-    intel_sub_group_avc_ref_result_t result);
-
-// Type conversion functions
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_ime_convert_to_mce_payload(
-    intel_sub_group_avc_ime_payload_t payload);
-intel_sub_group_avc_ime_payload_t __ovld
-intel_sub_group_avc_mce_convert_to_ime_payload(
-    intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_ref_convert_to_mce_payload(
-    intel_sub_group_avc_ref_payload_t payload);
-intel_sub_group_avc_ref_payload_t __ovld
-intel_sub_group_avc_mce_convert_to_ref_payload(
-    intel_sub_group_avc_mce_payload_t payload);
-intel_sub_group_avc_mce_payload_t __ovld
-intel_sub_group_avc_sic_convert_to_mce_payload(
-    intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_mce_convert_to_sic_payload(
-    intel_sub_group_avc_mce_payload_t payload);
-
-intel_sub_group_avc_mce_result_t __ovld
-intel_sub_group_avc_ime_convert_to_mce_result(
-    intel_sub_group_avc_ime_result_t result);
-intel_sub_group_avc_ime_result_t __ovld
-intel_sub_group_avc_mce_convert_to_ime_result(
-    intel_sub_group_avc_mce_result_t result);
-intel_sub_group_avc_mce_result_t __ovld
-intel_sub_group_avc_ref_convert_to_mce_result(
-    intel_sub_group_avc_ref_result_t result);
-intel_sub_group_avc_ref_result_t __ovld
-intel_sub_group_avc_mce_convert_to_ref_result(
-    intel_sub_group_avc_mce_result_t result);
-intel_sub_group_avc_mce_result_t __ovld
-intel_sub_group_avc_sic_convert_to_mce_result(
-    intel_sub_group_avc_sic_result_t result);
-intel_sub_group_avc_sic_result_t __ovld
-intel_sub_group_avc_mce_convert_to_sic_result(
-    intel_sub_group_avc_mce_result_t result);
-#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : end
-#endif // cl_intel_device_side_avc_motion_estimation
-
-#ifdef cl_amd_media_ops
-uint __ovld amd_bitalign(uint a, uint b, uint c);
-uint2 __ovld amd_bitalign(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_bitalign(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_bitalign(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_bitalign(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_bitalign(uint16 a, uint16 b, uint16 c);
-
-uint __ovld amd_bytealign(uint a, uint b, uint c);
-uint2 __ovld amd_bytealign(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_bytealign(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_bytealign(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_bytealign(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_bytealign(uint16 a, uint16 b, uint16 c);
-
-uint __ovld amd_lerp(uint a, uint b, uint c);
-uint2 __ovld amd_lerp(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_lerp(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_lerp(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_lerp(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_lerp(uint16 a, uint16 b, uint16 c);
-
-uint __ovld amd_pack(float4 v);
-
-uint __ovld amd_sad4(uint4 x, uint4 y, uint z);
-
-uint __ovld amd_sadhi(uint a, uint b, uint c);
-uint2 __ovld amd_sadhi(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_sadhi(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_sadhi(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_sadhi(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_sadhi(uint16 a, uint16 b, uint16 c);
-
-uint __ovld amd_sad(uint a, uint b, uint c);
-uint2 __ovld amd_sad(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_sad(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_sad(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_sad(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_sad(uint16 a, uint16 b, uint16 c);
-
-float __ovld amd_unpack0(uint a);
-float2 __ovld amd_unpack0(uint2 a);
-float3 __ovld amd_unpack0(uint3 a);
-float4 __ovld amd_unpack0(uint4 a);
-float8 __ovld amd_unpack0(uint8 a);
-float16 __ovld amd_unpack0(uint16 a);
-
-float __ovld amd_unpack1(uint a);
-float2 __ovld amd_unpack1(uint2 a);
-float3 __ovld amd_unpack1(uint3 a);
-float4 __ovld amd_unpack1(uint4 a);
-float8 __ovld amd_unpack1(uint8 a);
-float16 __ovld amd_unpack1(uint16 a);
-
-float __ovld amd_unpack2(uint a);
-float2 __ovld amd_unpack2(uint2 a);
-float3 __ovld amd_unpack2(uint3 a);
-float4 __ovld amd_unpack2(uint4 a);
-float8 __ovld amd_unpack2(uint8 a);
-float16 __ovld amd_unpack2(uint16 a);
-
-float __ovld amd_unpack3(uint a);
-float2 __ovld amd_unpack3(uint2 a);
-float3 __ovld amd_unpack3(uint3 a);
-float4 __ovld amd_unpack3(uint4 a);
-float8 __ovld amd_unpack3(uint8 a);
-float16 __ovld amd_unpack3(uint16 a);
-#endif // cl_amd_media_ops
-
-#ifdef cl_amd_media_ops2
-int __ovld amd_bfe(int src0, uint src1, uint src2);
-int2 __ovld amd_bfe(int2 src0, uint2 src1, uint2 src2);
-int3 __ovld amd_bfe(int3 src0, uint3 src1, uint3 src2);
-int4 __ovld amd_bfe(int4 src0, uint4 src1, uint4 src2);
-int8 __ovld amd_bfe(int8 src0, uint8 src1, uint8 src2);
-int16 __ovld amd_bfe(int16 src0, uint16 src1, uint16 src2);
-
-uint __ovld amd_bfe(uint src0, uint src1, uint src2);
-uint2 __ovld amd_bfe(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_bfe(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_bfe(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_bfe(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_bfe(uint16 src0, uint16 src1, uint16 src2);
-
-uint __ovld amd_bfm(uint src0, uint src1);
-uint2 __ovld amd_bfm(uint2 src0, uint2 src1);
-uint3 __ovld amd_bfm(uint3 src0, uint3 src1);
-uint4 __ovld amd_bfm(uint4 src0, uint4 src1);
-uint8 __ovld amd_bfm(uint8 src0, uint8 src1);
-uint16 __ovld amd_bfm(uint16 src0, uint16 src1);
-
-float __ovld amd_max3(float src0, float src1, float src2);
-float2 __ovld amd_max3(float2 src0, float2 src1, float2 src2);
-float3 __ovld amd_max3(float3 src0, float3 src1, float3 src2);
-float4 __ovld amd_max3(float4 src0, float4 src1, float4 src2);
-float8 __ovld amd_max3(float8 src0, float8 src1, float8 src2);
-float16 __ovld amd_max3(float16 src0, float16 src1, float16 src2);
-
-int __ovld amd_max3(int src0, int src1, int src2);
-int2 __ovld amd_max3(int2 src0, int2 src1, int2 src2);
-int3 __ovld amd_max3(int3 src0, int3 src1, int3 src2);
-int4 __ovld amd_max3(int4 src0, int4 src1, int4 src2);
-int8 __ovld amd_max3(int8 src0, int8 src1, int8 src2);
-int16 __ovld amd_max3(int16 src0, int16 src1, int16 src2);
-
-uint __ovld amd_max3(uint src0, uint src1, uint src2);
-uint2 __ovld amd_max3(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_max3(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_max3(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_max3(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_max3(uint16 src0, uint16 src1, uint16 src2);
-
-float __ovld amd_median3(float src0, float src1, float src2);
-float2 __ovld amd_median3(float2 src0, float2 src1, float2 src2);
-float3 __ovld amd_median3(float3 src0, float3 src1, float3 src2);
-float4 __ovld amd_median3(float4 src0, float4 src1, float4 src2);
-float8 __ovld amd_median3(float8 src0, float8 src1, float8 src2);
-float16 __ovld amd_median3(float16 src0, float16 src1, float16 src2);
-
-int __ovld amd_median3(int src0, int src1, int src2);
-int2 __ovld amd_median3(int2 src0, int2 src1, int2 src2);
-int3 __ovld amd_median3(int3 src0, int3 src1, int3 src2);
-int4 __ovld amd_median3(int4 src0, int4 src1, int4 src2);
-int8 __ovld amd_median3(int8 src0, int8 src1, int8 src2);
-int16 __ovld amd_median3(int16 src0, int16 src1, int16 src2);
-
-uint __ovld amd_median3(uint src0, uint src1, uint src2);
-uint2 __ovld amd_median3(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_median3(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_median3(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_median3(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_median3(uint16 src0, uint16 src1, uint16 src2);
-
-float __ovld amd_min3(float src0, float src1, float src);
-float2 __ovld amd_min3(float2 src0, float2 src1, float2 src);
-float3 __ovld amd_min3(float3 src0, float3 src1, float3 src);
-float4 __ovld amd_min3(float4 src0, float4 src1, float4 src);
-float8 __ovld amd_min3(float8 src0, float8 src1, float8 src);
-float16 __ovld amd_min3(float16 src0, float16 src1, float16 src);
-
-int __ovld amd_min3(int src0, int src1, int src2);
-int2 __ovld amd_min3(int2 src0, int2 src1, int2 src2);
-int3 __ovld amd_min3(int3 src0, int3 src1, int3 src2);
-int4 __ovld amd_min3(int4 src0, int4 src1, int4 src2);
-int8 __ovld amd_min3(int8 src0, int8 src1, int8 src2);
-int16 __ovld amd_min3(int16 src0, int16 src1, int16 src2);
-
-uint __ovld amd_min3(uint src0, uint src1, uint src2);
-uint2 __ovld amd_min3(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_min3(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_min3(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_min3(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_min3(uint16 src0, uint16 src1, uint16 src2);
-
-ulong __ovld amd_mqsad(ulong src0, uint src1, ulong src2);
-ulong2 __ovld amd_mqsad(ulong2 src0, uint2 src1, ulong2 src2);
-ulong3 __ovld amd_mqsad(ulong3 src0, uint3 src1, ulong3 src2);
-ulong4 __ovld amd_mqsad(ulong4 src0, uint4 src1, ulong4 src2);
-ulong8 __ovld amd_mqsad(ulong8 src0, uint8 src1, ulong8 src2);
-ulong16 __ovld amd_mqsad(ulong16 src0, uint16 src1, ulong16 src2);
-
-ulong __ovld amd_qsad(ulong src0, uint src1, ulong src2);
-ulong2 __ovld amd_qsad(ulong2 src0, uint2 src1, ulong2 src2);
-ulong3 __ovld amd_qsad(ulong3 src0, uint3 src1, ulong3 src2);
-ulong4 __ovld amd_qsad(ulong4 src0, uint4 src1, ulong4 src2);
-ulong8 __ovld amd_qsad(ulong8 src0, uint8 src1, ulong8 src2);
-ulong16 __ovld amd_qsad(ulong16 src0, uint16 src1, ulong16 src2);
-
-uint __ovld amd_msad(uint src0, uint src1, uint src2);
-uint2 __ovld amd_msad(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_msad(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_msad(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_msad(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_msad(uint16 src0, uint16 src1, uint16 src2);
-
-uint __ovld amd_sadd(uint src0, uint src1, uint src2);
-uint2 __ovld amd_sadd(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_sadd(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_sadd(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_sadd(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_sadd(uint16 src0, uint16 src1, uint16 src2);
-
-uint __ovld amd_sadw(uint src0, uint src1, uint src2);
-uint2 __ovld amd_sadw(uint2 src0, uint2 src1, uint2 src2);
-uint3 __ovld amd_sadw(uint3 src0, uint3 src1, uint3 src2);
-uint4 __ovld amd_sadw(uint4 src0, uint4 src1, uint4 src2);
-uint8 __ovld amd_sadw(uint8 src0, uint8 src1, uint8 src2);
-uint16 __ovld amd_sadw(uint16 src0, uint16 src1, uint16 src2);
-#endif // cl_amd_media_ops2
-
-#if defined(cl_arm_integer_dot_product_int8)
-#pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : begin
-uint __ovld arm_dot(uchar4 a, uchar4 b);
-int __ovld arm_dot(char4 a, char4 b);
-#pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : end
-#endif // defined(cl_arm_integer_dot_product_int8)
-
-#if defined(cl_arm_integer_dot_product_accumulate_int8)
-#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : begin
-uint __ovld arm_dot_acc(uchar4 a, uchar4 b, uint c);
-int __ovld arm_dot_acc(char4 a, char4 b, int c);
-#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : end
-#endif // defined(cl_arm_integer_dot_product_accumulate_int8)
-
-#if defined(cl_arm_integer_dot_product_accumulate_int16)
-#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int16 : begin
-uint __ovld arm_dot_acc(ushort2 a, ushort2 b, uint c);
-int __ovld arm_dot_acc(short2 a, short2 b, int c);
-#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int16 : end
-#endif // defined(cl_arm_integer_dot_product_accumulate_int16)
-
-#if defined(cl_arm_integer_dot_product_accumulate_saturate_int8)
-#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_saturate_int8 : begin
-uint __ovld arm_dot_acc_sat(uchar4 a, uchar4 b, uint c);
-int __ovld arm_dot_acc_sat(char4 a, char4 b, int c);
-#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_saturate_int8 : end
-#endif // defined(cl_arm_integer_dot_product_accumulate_saturate_int8)
-
-// Disable any extensions we may have enabled previously.
-#pragma OPENCL EXTENSION all : disable
-
-#undef __cnfn
-#undef __ovld
-#endif //_OPENCL_H_
diff --git a/linux-x86/lib64/clang/11.0.1/include/openmp_wrappers/__clang_openmp_math.h b/linux-x86/lib64/clang/11.0.1/include/openmp_wrappers/__clang_openmp_math.h
deleted file mode 100644
index 5d7ce9a..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/openmp_wrappers/__clang_openmp_math.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*===---- __clang_openmp_math.h - OpenMP target math support ---------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#if defined(__NVPTX__) && defined(_OPENMP)
-/// TODO:
-/// We are currently reusing the functionality of the Clang-CUDA code path
-/// as an alternative to the host declarations provided by math.h and cmath.
-/// This is suboptimal.
-///
-/// We should instead declare the device functions in a similar way, e.g.,
-/// through OpenMP 5.0 variants, and afterwards populate the module with the
-/// host declarations by unconditionally including the host math.h or cmath,
-/// respectively. This is actually what the Clang-CUDA code path does, using
-/// __device__ instead of variants to avoid redeclarations and get the desired
-/// overload resolution.
-
-#define __CUDA__
-
-#if defined(__cplusplus)
-  #include <__clang_cuda_cmath.h>
-#endif
-
-#undef __CUDA__
-
-/// Magic macro for stopping the math.h/cmath host header from being included.
-#define __CLANG_NO_HOST_MATH__
-
-#endif
-
diff --git a/linux-x86/lib64/clang/11.0.1/include/openmp_wrappers/__clang_openmp_math_declares.h b/linux-x86/lib64/clang/11.0.1/include/openmp_wrappers/__clang_openmp_math_declares.h
deleted file mode 100644
index a422c98..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/openmp_wrappers/__clang_openmp_math_declares.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*===---- __clang_openmp_math_declares.h - OpenMP math declares ------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __CLANG_OPENMP_MATH_DECLARES_H__
-#define __CLANG_OPENMP_MATH_DECLARES_H__
-
-#ifndef _OPENMP
-#error "This file is for OpenMP compilation only."
-#endif
-
-#if defined(__NVPTX__) && defined(_OPENMP)
-
-#define __CUDA__
-
-#if defined(__cplusplus)
-  #include <__clang_cuda_math_forward_declares.h>
-#endif
-
-/// Include declarations for libdevice functions.
-#include <__clang_cuda_libdevice_declares.h>
-/// Provide definitions for these functions.
-#include <__clang_cuda_device_functions.h>
-
-#undef __CUDA__
-
-#endif
-#endif
diff --git a/linux-x86/lib64/clang/11.0.1/include/openmp_wrappers/cmath b/linux-x86/lib64/clang/11.0.1/include/openmp_wrappers/cmath
deleted file mode 100644
index a5183a1..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/openmp_wrappers/cmath
+++ /dev/null
@@ -1,16 +0,0 @@
-/*===-------------- cmath - Alternative cmath header -----------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#include <__clang_openmp_math.h>
-
-#ifndef __CLANG_NO_HOST_MATH__
-#include_next <cmath>
-#else
-#undef __CLANG_NO_HOST_MATH__
-#endif
diff --git a/linux-x86/lib64/clang/11.0.1/include/openmp_wrappers/math.h b/linux-x86/lib64/clang/11.0.1/include/openmp_wrappers/math.h
deleted file mode 100644
index d2786ec..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/openmp_wrappers/math.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*===------------- math.h - Alternative math.h header ----------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#include <__clang_openmp_math.h>
-
-#ifndef __CLANG_NO_HOST_MATH__
-#include_next <math.h>
-#else
-#undef __CLANG_NO_HOST_MATH__
-#endif
-
diff --git a/linux-x86/lib64/clang/11.0.1/include/profile/InstrProfData.inc b/linux-x86/lib64/clang/11.0.1/include/profile/InstrProfData.inc
deleted file mode 100644
index 99f41d8..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/profile/InstrProfData.inc
+++ /dev/null
@@ -1,754 +0,0 @@
-/*===-- InstrProfData.inc - instr profiling runtime structures -*- C++ -*-=== *\
-|*
-|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-|* See https://llvm.org/LICENSE.txt for license information.
-|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-|*
-\*===----------------------------------------------------------------------===*/
-/*
- * This is the master file that defines all the data structure, signature,
- * constant literals that are shared across profiling runtime library,
- * compiler (instrumentation), and host tools (reader/writer). The entities
- * defined in this file affect the profile runtime ABI, the raw profile format,
- * or both.
- *
- * The file has two identical copies. The master copy lives in LLVM and
- * the other one  sits in compiler-rt/lib/profile directory. To make changes
- * in this file, first modify the master copy and copy it over to compiler-rt.
- * Testing of any change in this file can start only after the two copies are
- * synced up.
- *
- * The first part of the file includes macros that defines types, names, and
- * initializers for the member fields of the core data structures. The field
- * declarations for one structure is enabled by defining the field activation
- * macro associated with that structure. Only one field activation record
- * can be defined at one time and the rest definitions will be filtered out by
- * the preprocessor.
- *
- * Examples of how the template is used to instantiate structure definition:
- * 1. To declare a structure:
- *
- * struct ProfData {
- * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
- *    Type Name;
- * #include "llvm/ProfileData/InstrProfData.inc"
- * };
- *
- * 2. To construct LLVM type arrays for the struct type:
- *
- * Type *DataTypes[] = {
- * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
- *   LLVMType,
- * #include "llvm/ProfileData/InstrProfData.inc"
- * };
- *
- * 4. To construct constant array for the initializers:
- * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
- *   Initializer,
- * Constant *ConstantVals[] = {
- * #include "llvm/ProfileData/InstrProfData.inc"
- * };
- *
- *
- * The second part of the file includes definitions all other entities that
- * are related to runtime ABI and format. When no field activation macro is
- * defined, this file can be included to introduce the definitions.
- *
-\*===----------------------------------------------------------------------===*/
-
-/* Functions marked with INSTR_PROF_VISIBILITY must have hidden visibility in
- * the compiler runtime. */
-#ifndef INSTR_PROF_VISIBILITY
-#define INSTR_PROF_VISIBILITY
-#endif
-
-/* INSTR_PROF_DATA start. */
-/* Definition of member fields of the per-function control structure. */
-#ifndef INSTR_PROF_DATA
-#define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \
-                ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
-                IndexedInstrProf::ComputeHash(getPGOFuncNameVarInitializer(Inc->getName()))))
-INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \
-                ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
-                Inc->getHash()->getZExtValue()))
-INSTR_PROF_DATA(const IntPtrT, llvm::Type::getInt64PtrTy(Ctx), CounterPtr, \
-                ConstantExpr::getBitCast(CounterPtr, \
-                llvm::Type::getInt64PtrTy(Ctx)))
-/* This is used to map function pointers for the indirect call targets to
- * function name hashes during the conversion from raw to merged profile
- * data.
- */
-INSTR_PROF_DATA(const IntPtrT, llvm::Type::getInt8PtrTy(Ctx), FunctionPointer, \
-                FunctionAddr)
-INSTR_PROF_DATA(IntPtrT, llvm::Type::getInt8PtrTy(Ctx), Values, \
-                ValuesPtrExpr)
-INSTR_PROF_DATA(const uint32_t, llvm::Type::getInt32Ty(Ctx), NumCounters, \
-                ConstantInt::get(llvm::Type::getInt32Ty(Ctx), NumCounters))
-INSTR_PROF_DATA(const uint16_t, Int16ArrayTy, NumValueSites[IPVK_Last+1], \
-                ConstantArray::get(Int16ArrayTy, Int16ArrayVals))
-#undef INSTR_PROF_DATA
-/* INSTR_PROF_DATA end. */
-
-
-/* This is an internal data structure used by value profiler. It
- * is defined here to allow serialization code sharing by LLVM
- * to be used in unit test.
- *
- * typedef struct ValueProfNode {
- *   // InstrProfValueData VData;
- *   uint64_t Value;
- *   uint64_t Count;
- *   struct ValueProfNode *Next;
- * } ValueProfNode;
- */
-/* INSTR_PROF_VALUE_NODE start. */
-#ifndef INSTR_PROF_VALUE_NODE
-#define INSTR_PROF_VALUE_NODE(Type, LLVMType, Name, Initializer)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-INSTR_PROF_VALUE_NODE(uint64_t, llvm::Type::getInt64Ty(Ctx), Value, \
-                      ConstantInt::get(llvm::Type::GetInt64Ty(Ctx), 0))
-INSTR_PROF_VALUE_NODE(uint64_t, llvm::Type::getInt64Ty(Ctx), Count, \
-                      ConstantInt::get(llvm::Type::GetInt64Ty(Ctx), 0))
-INSTR_PROF_VALUE_NODE(PtrToNodeT, llvm::Type::getInt8PtrTy(Ctx), Next, \
-                      ConstantInt::get(llvm::Type::GetInt8PtrTy(Ctx), 0))
-#undef INSTR_PROF_VALUE_NODE
-/* INSTR_PROF_VALUE_NODE end. */
-
-/* INSTR_PROF_RAW_HEADER  start */
-/* Definition of member fields of the raw profile header data structure. */
-#ifndef INSTR_PROF_RAW_HEADER
-#define INSTR_PROF_RAW_HEADER(Type, Name, Initializer)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-INSTR_PROF_RAW_HEADER(uint64_t, Magic, __llvm_profile_get_magic())
-INSTR_PROF_RAW_HEADER(uint64_t, Version, __llvm_profile_get_version())
-INSTR_PROF_RAW_HEADER(uint64_t, DataSize, DataSize)
-INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesBeforeCounters, PaddingBytesBeforeCounters)
-INSTR_PROF_RAW_HEADER(uint64_t, CountersSize, CountersSize)
-INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesAfterCounters, PaddingBytesAfterCounters)
-INSTR_PROF_RAW_HEADER(uint64_t, NamesSize,  NamesSize)
-INSTR_PROF_RAW_HEADER(uint64_t, CountersDelta, (uintptr_t)CountersBegin)
-INSTR_PROF_RAW_HEADER(uint64_t, NamesDelta, (uintptr_t)NamesBegin)
-INSTR_PROF_RAW_HEADER(uint64_t, ValueKindLast, IPVK_Last)
-#undef INSTR_PROF_RAW_HEADER
-/* INSTR_PROF_RAW_HEADER  end */
-
-/* VALUE_PROF_FUNC_PARAM start */
-/* Definition of parameter types of the runtime API used to do value profiling
- * for a given value site.
- */
-#ifndef VALUE_PROF_FUNC_PARAM
-#define VALUE_PROF_FUNC_PARAM(ArgType, ArgName, ArgLLVMType)
-#define INSTR_PROF_COMMA
-#else
-#define INSTR_PROF_DATA_DEFINED
-#define INSTR_PROF_COMMA ,
-#endif
-VALUE_PROF_FUNC_PARAM(uint64_t, TargetValue, Type::getInt64Ty(Ctx)) \
-                      INSTR_PROF_COMMA
-VALUE_PROF_FUNC_PARAM(void *, Data, Type::getInt8PtrTy(Ctx)) INSTR_PROF_COMMA
-#ifndef VALUE_RANGE_PROF
-VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx))
-#else /* VALUE_RANGE_PROF */
-VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx)) \
-                      INSTR_PROF_COMMA
-VALUE_PROF_FUNC_PARAM(uint64_t, PreciseRangeStart, Type::getInt64Ty(Ctx)) \
-                      INSTR_PROF_COMMA
-VALUE_PROF_FUNC_PARAM(uint64_t, PreciseRangeLast, Type::getInt64Ty(Ctx)) \
-                      INSTR_PROF_COMMA
-VALUE_PROF_FUNC_PARAM(uint64_t, LargeValue, Type::getInt64Ty(Ctx))
-#endif /*VALUE_RANGE_PROF */
-#undef VALUE_PROF_FUNC_PARAM
-#undef INSTR_PROF_COMMA
-/* VALUE_PROF_FUNC_PARAM end */
-
-/* VALUE_PROF_KIND start */
-#ifndef VALUE_PROF_KIND
-#define VALUE_PROF_KIND(Enumerator, Value, Descr)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-/* For indirect function call value profiling, the addresses of the target
- * functions are profiled by the instrumented code. The target addresses are
- * written in the raw profile data and converted to target function name's MD5
- * hash by the profile reader during deserialization.  Typically, this happens
- * when the raw profile data is read during profile merging.
- *
- * For this remapping the ProfData is used.  ProfData contains both the function
- * name hash and the function address.
- */
-VALUE_PROF_KIND(IPVK_IndirectCallTarget, 0, "indirect call target")
-/* For memory intrinsic functions size profiling. */
-VALUE_PROF_KIND(IPVK_MemOPSize, 1, "memory intrinsic functions size")
-/* These two kinds must be the last to be
- * declared. This is to make sure the string
- * array created with the template can be
- * indexed with the kind value.
- */
-VALUE_PROF_KIND(IPVK_First, IPVK_IndirectCallTarget, "first")
-VALUE_PROF_KIND(IPVK_Last, IPVK_MemOPSize, "last")
-
-#undef VALUE_PROF_KIND
-/* VALUE_PROF_KIND end */
-
-/* COVMAP_FUNC_RECORD start */
-/* Definition of member fields of the function record structure in coverage
- * map.
- */
-#ifndef COVMAP_FUNC_RECORD
-#define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Initializer)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-#ifdef COVMAP_V1
-COVMAP_FUNC_RECORD(const IntPtrT, llvm::Type::getInt8PtrTy(Ctx), \
-                   NamePtr, llvm::ConstantExpr::getBitCast(NamePtr, \
-                   llvm::Type::getInt8PtrTy(Ctx)))
-COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), NameSize, \
-                   llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), \
-                   NameValue.size()))
-#else
-COVMAP_FUNC_RECORD(const int64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \
-                   llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
-                   llvm::IndexedInstrProf::ComputeHash(NameValue)))
-#endif
-COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), DataSize, \
-                   llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx),\
-                   CoverageMapping.size()))
-COVMAP_FUNC_RECORD(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \
-                   llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), FuncHash))
-#undef COVMAP_FUNC_RECORD
-/* COVMAP_FUNC_RECORD end.  */
-
-/* COVMAP_HEADER start */
-/* Definition of member fields of coverage map header.
- */
-#ifndef COVMAP_HEADER
-#define COVMAP_HEADER(Type, LLVMType, Name, Initializer)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-COVMAP_HEADER(uint32_t, Int32Ty, NRecords, \
-              llvm::ConstantInt::get(Int32Ty,  FunctionRecords.size()))
-COVMAP_HEADER(uint32_t, Int32Ty, FilenamesSize, \
-              llvm::ConstantInt::get(Int32Ty, FilenamesSize))
-COVMAP_HEADER(uint32_t, Int32Ty, CoverageSize, \
-              llvm::ConstantInt::get(Int32Ty, CoverageMappingSize))
-COVMAP_HEADER(uint32_t, Int32Ty, Version, \
-              llvm::ConstantInt::get(Int32Ty, CovMapVersion::CurrentVersion))
-#undef COVMAP_HEADER
-/* COVMAP_HEADER end.  */
-
-
-#ifdef INSTR_PROF_SECT_ENTRY
-#define INSTR_PROF_DATA_DEFINED
-INSTR_PROF_SECT_ENTRY(IPSK_data, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_DATA_COMMON), \
-                      INSTR_PROF_DATA_COFF, "__DATA,")
-INSTR_PROF_SECT_ENTRY(IPSK_cnts, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_CNTS_COMMON), \
-                      INSTR_PROF_CNTS_COFF, "__DATA,")
-INSTR_PROF_SECT_ENTRY(IPSK_name, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_NAME_COMMON), \
-                      INSTR_PROF_NAME_COFF, "__DATA,")
-INSTR_PROF_SECT_ENTRY(IPSK_vals, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_VALS_COMMON), \
-                      INSTR_PROF_VALS_COFF, "__DATA,")
-INSTR_PROF_SECT_ENTRY(IPSK_vnodes, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_VNODES_COMMON), \
-                      INSTR_PROF_VNODES_COFF, "__DATA,")
-INSTR_PROF_SECT_ENTRY(IPSK_covmap, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COMMON), \
-                      INSTR_PROF_COVMAP_COFF, "__LLVM_COV,")
-INSTR_PROF_SECT_ENTRY(IPSK_orderfile, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COMMON), \
-                      INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COFF), "__DATA,")
-
-#undef INSTR_PROF_SECT_ENTRY
-#endif
-
-
-#ifdef INSTR_PROF_VALUE_PROF_DATA
-#define INSTR_PROF_DATA_DEFINED
-
-#define INSTR_PROF_MAX_NUM_VAL_PER_SITE 255
-/*!
- * This is the header of the data structure that defines the on-disk
- * layout of the value profile data of a particular kind for one function.
- */
-typedef struct ValueProfRecord {
-  /* The kind of the value profile record. */
-  uint32_t Kind;
-  /*
-   * The number of value profile sites. It is guaranteed to be non-zero;
-   * otherwise the record for this kind won't be emitted.
-   */
-  uint32_t NumValueSites;
-  /*
-   * The first element of the array that stores the number of profiled
-   * values for each value site. The size of the array is NumValueSites.
-   * Since NumValueSites is greater than zero, there is at least one
-   * element in the array.
-   */
-  uint8_t SiteCountArray[1];
-
-  /*
-   * The fake declaration is for documentation purpose only.
-   * Align the start of next field to be on 8 byte boundaries.
-  uint8_t Padding[X];
-   */
-
-  /* The array of value profile data. The size of the array is the sum
-   * of all elements in SiteCountArray[].
-  InstrProfValueData ValueData[];
-   */
-
-#ifdef __cplusplus
-  /*!
-   * Return the number of value sites.
-   */
-  uint32_t getNumValueSites() const { return NumValueSites; }
-  /*!
-   * Read data from this record and save it to Record.
-   */
-  void deserializeTo(InstrProfRecord &Record,
-                     InstrProfSymtab *SymTab);
-  /*
-   * In-place byte swap:
-   * Do byte swap for this instance. \c Old is the original order before
-   * the swap, and \c New is the New byte order.
-   */
-  void swapBytes(support::endianness Old, support::endianness New);
-#endif
-} ValueProfRecord;
-
-/*!
- * Per-function header/control data structure for value profiling
- * data in indexed format.
- */
-typedef struct ValueProfData {
-  /*
-   * Total size in bytes including this field. It must be a multiple
-   * of sizeof(uint64_t).
-   */
-  uint32_t TotalSize;
-  /*
-   *The number of value profile kinds that has value profile data.
-   * In this implementation, a value profile kind is considered to
-   * have profile data if the number of value profile sites for the
-   * kind is not zero. More aggressively, the implementation can
-   * choose to check the actual data value: if none of the value sites
-   * has any profiled values, the kind can be skipped.
-   */
-  uint32_t NumValueKinds;
-
-  /*
-   * Following are a sequence of variable length records. The prefix/header
-   * of each record is defined by ValueProfRecord type. The number of
-   * records is NumValueKinds.
-   * ValueProfRecord Record_1;
-   * ValueProfRecord Record_N;
-   */
-
-#if __cplusplus
-  /*!
-   * Return the total size in bytes of the on-disk value profile data
-   * given the data stored in Record.
-   */
-  static uint32_t getSize(const InstrProfRecord &Record);
-  /*!
-   * Return a pointer to \c ValueProfData instance ready to be streamed.
-   */
-  static std::unique_ptr<ValueProfData>
-  serializeFrom(const InstrProfRecord &Record);
-  /*!
-   * Check the integrity of the record.
-   */
-  Error checkIntegrity();
-  /*!
-   * Return a pointer to \c ValueProfileData instance ready to be read.
-   * All data in the instance are properly byte swapped. The input
-   * data is assumed to be in little endian order.
-   */
-  static Expected<std::unique_ptr<ValueProfData>>
-  getValueProfData(const unsigned char *SrcBuffer,
-                   const unsigned char *const SrcBufferEnd,
-                   support::endianness SrcDataEndianness);
-  /*!
-   * Swap byte order from \c Endianness order to host byte order.
-   */
-  void swapBytesToHost(support::endianness Endianness);
-  /*!
-   * Swap byte order from host byte order to \c Endianness order.
-   */
-  void swapBytesFromHost(support::endianness Endianness);
-  /*!
-   * Return the total size of \c ValueProfileData.
-   */
-  uint32_t getSize() const { return TotalSize; }
-  /*!
-   * Read data from this data and save it to \c Record.
-   */
-  void deserializeTo(InstrProfRecord &Record,
-                     InstrProfSymtab *SymTab);
-  void operator delete(void *ptr) { ::operator delete(ptr); }
-#endif
-} ValueProfData;
-
-/*
- * The closure is designed to abstact away two types of value profile data:
- * - InstrProfRecord which is the primary data structure used to
- *   represent profile data in host tools (reader, writer, and profile-use)
- * - value profile runtime data structure suitable to be used by C
- *   runtime library.
- *
- * Both sources of data need to serialize to disk/memory-buffer in common
- * format: ValueProfData. The abstraction allows compiler-rt's raw profiler
- * writer to share the same format and code with indexed profile writer.
- *
- * For documentation of the member methods below, refer to corresponding methods
- * in class InstrProfRecord.
- */
-typedef struct ValueProfRecordClosure {
-  const void *Record;
-  uint32_t (*GetNumValueKinds)(const void *Record);
-  uint32_t (*GetNumValueSites)(const void *Record, uint32_t VKind);
-  uint32_t (*GetNumValueData)(const void *Record, uint32_t VKind);
-  uint32_t (*GetNumValueDataForSite)(const void *R, uint32_t VK, uint32_t S);
-
-  /*
-   * After extracting the value profile data from the value profile record,
-   * this method is used to map the in-memory value to on-disk value. If
-   * the method is null, value will be written out untranslated.
-   */
-  uint64_t (*RemapValueData)(uint32_t, uint64_t Value);
-  void (*GetValueForSite)(const void *R, InstrProfValueData *Dst, uint32_t K,
-                          uint32_t S);
-  ValueProfData *(*AllocValueProfData)(size_t TotalSizeInBytes);
-} ValueProfRecordClosure;
-
-INSTR_PROF_VISIBILITY ValueProfRecord *
-getFirstValueProfRecord(ValueProfData *VPD);
-INSTR_PROF_VISIBILITY ValueProfRecord *
-getValueProfRecordNext(ValueProfRecord *VPR);
-INSTR_PROF_VISIBILITY InstrProfValueData *
-getValueProfRecordValueData(ValueProfRecord *VPR);
-INSTR_PROF_VISIBILITY uint32_t
-getValueProfRecordHeaderSize(uint32_t NumValueSites);
-
-#undef INSTR_PROF_VALUE_PROF_DATA
-#endif  /* INSTR_PROF_VALUE_PROF_DATA */
-
-
-#ifdef INSTR_PROF_COMMON_API_IMPL
-#define INSTR_PROF_DATA_DEFINED
-#ifdef __cplusplus
-#define INSTR_PROF_INLINE inline
-#define INSTR_PROF_NULLPTR nullptr
-#else
-#define INSTR_PROF_INLINE
-#define INSTR_PROF_NULLPTR NULL
-#endif
-
-#ifndef offsetof
-#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
-#endif
-
-/*!
- * Return the \c ValueProfRecord header size including the
- * padding bytes.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-uint32_t getValueProfRecordHeaderSize(uint32_t NumValueSites) {
-  uint32_t Size = offsetof(ValueProfRecord, SiteCountArray) +
-                  sizeof(uint8_t) * NumValueSites;
-  /* Round the size to multiple of 8 bytes. */
-  Size = (Size + 7) & ~7;
-  return Size;
-}
-
-/*!
- * Return the total size of the value profile record including the
- * header and the value data.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-uint32_t getValueProfRecordSize(uint32_t NumValueSites,
-                                uint32_t NumValueData) {
-  return getValueProfRecordHeaderSize(NumValueSites) +
-         sizeof(InstrProfValueData) * NumValueData;
-}
-
-/*!
- * Return the pointer to the start of value data array.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-InstrProfValueData *getValueProfRecordValueData(ValueProfRecord *This) {
-  return (InstrProfValueData *)((char *)This + getValueProfRecordHeaderSize(
-                                                   This->NumValueSites));
-}
-
-/*!
- * Return the total number of value data for \c This record.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-uint32_t getValueProfRecordNumValueData(ValueProfRecord *This) {
-  uint32_t NumValueData = 0;
-  uint32_t I;
-  for (I = 0; I < This->NumValueSites; I++)
-    NumValueData += This->SiteCountArray[I];
-  return NumValueData;
-}
-
-/*!
- * Use this method to advance to the next \c This \c ValueProfRecord.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-ValueProfRecord *getValueProfRecordNext(ValueProfRecord *This) {
-  uint32_t NumValueData = getValueProfRecordNumValueData(This);
-  return (ValueProfRecord *)((char *)This +
-                             getValueProfRecordSize(This->NumValueSites,
-                                                    NumValueData));
-}
-
-/*!
- * Return the first \c ValueProfRecord instance.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-ValueProfRecord *getFirstValueProfRecord(ValueProfData *This) {
-  return (ValueProfRecord *)((char *)This + sizeof(ValueProfData));
-}
-
-/* Closure based interfaces.  */
-
-/*!
- * Return the total size in bytes of the on-disk value profile data
- * given the data stored in Record.
- */
-INSTR_PROF_VISIBILITY uint32_t
-getValueProfDataSize(ValueProfRecordClosure *Closure) {
-  uint32_t Kind;
-  uint32_t TotalSize = sizeof(ValueProfData);
-  const void *Record = Closure->Record;
-
-  for (Kind = IPVK_First; Kind <= IPVK_Last; Kind++) {
-    uint32_t NumValueSites = Closure->GetNumValueSites(Record, Kind);
-    if (!NumValueSites)
-      continue;
-    TotalSize += getValueProfRecordSize(NumValueSites,
-                                        Closure->GetNumValueData(Record, Kind));
-  }
-  return TotalSize;
-}
-
-/*!
- * Extract value profile data of a function for the profile kind \c ValueKind
- * from the \c Closure and serialize the data into \c This record instance.
- */
-INSTR_PROF_VISIBILITY void
-serializeValueProfRecordFrom(ValueProfRecord *This,
-                             ValueProfRecordClosure *Closure,
-                             uint32_t ValueKind, uint32_t NumValueSites) {
-  uint32_t S;
-  const void *Record = Closure->Record;
-  This->Kind = ValueKind;
-  This->NumValueSites = NumValueSites;
-  InstrProfValueData *DstVD = getValueProfRecordValueData(This);
-
-  for (S = 0; S < NumValueSites; S++) {
-    uint32_t ND = Closure->GetNumValueDataForSite(Record, ValueKind, S);
-    This->SiteCountArray[S] = ND;
-    Closure->GetValueForSite(Record, DstVD, ValueKind, S);
-    DstVD += ND;
-  }
-}
-
-/*!
- * Extract value profile data of a function  from the \c Closure
- * and serialize the data into \c DstData if it is not NULL or heap
- * memory allocated by the \c Closure's allocator method. If \c
- * DstData is not null, the caller is expected to set the TotalSize
- * in DstData.
- */
-INSTR_PROF_VISIBILITY ValueProfData *
-serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
-                           ValueProfData *DstData) {
-  uint32_t Kind;
-  uint32_t TotalSize =
-      DstData ? DstData->TotalSize : getValueProfDataSize(Closure);
-
-  ValueProfData *VPD =
-      DstData ? DstData : Closure->AllocValueProfData(TotalSize);
-
-  VPD->TotalSize = TotalSize;
-  VPD->NumValueKinds = Closure->GetNumValueKinds(Closure->Record);
-  ValueProfRecord *VR = getFirstValueProfRecord(VPD);
-  for (Kind = IPVK_First; Kind <= IPVK_Last; Kind++) {
-    uint32_t NumValueSites = Closure->GetNumValueSites(Closure->Record, Kind);
-    if (!NumValueSites)
-      continue;
-    serializeValueProfRecordFrom(VR, Closure, Kind, NumValueSites);
-    VR = getValueProfRecordNext(VR);
-  }
-  return VPD;
-}
-
-#undef INSTR_PROF_COMMON_API_IMPL
-#endif /* INSTR_PROF_COMMON_API_IMPL */
-
-/*============================================================================*/
-
-#ifndef INSTR_PROF_DATA_DEFINED
-
-#ifndef INSTR_PROF_DATA_INC
-#define INSTR_PROF_DATA_INC
-
-/* Helper macros.  */
-#define INSTR_PROF_SIMPLE_QUOTE(x) #x
-#define INSTR_PROF_QUOTE(x) INSTR_PROF_SIMPLE_QUOTE(x)
-#define INSTR_PROF_SIMPLE_CONCAT(x,y) x ## y
-#define INSTR_PROF_CONCAT(x,y) INSTR_PROF_SIMPLE_CONCAT(x,y)
-
-/* Magic number to detect file format and endianness.
- * Use 255 at one end, since no UTF-8 file can use that character.  Avoid 0,
- * so that utilities, like strings, don't grab it as a string.  129 is also
- * invalid UTF-8, and high enough to be interesting.
- * Use "lprofr" in the centre to stand for "LLVM Profile Raw", or "lprofR"
- * for 32-bit platforms.
- */
-#define INSTR_PROF_RAW_MAGIC_64 (uint64_t)255 << 56 | (uint64_t)'l' << 48 | \
-       (uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 |  \
-        (uint64_t)'f' << 16 | (uint64_t)'r' << 8 | (uint64_t)129
-#define INSTR_PROF_RAW_MAGIC_32 (uint64_t)255 << 56 | (uint64_t)'l' << 48 | \
-       (uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 |  \
-        (uint64_t)'f' << 16 | (uint64_t)'R' << 8 | (uint64_t)129
-
-/* Raw profile format version (start from 1). */
-#define INSTR_PROF_RAW_VERSION 5
-/* Indexed profile format version (start from 1). */
-#define INSTR_PROF_INDEX_VERSION 5
-/* Coverage mapping format vresion (start from 0). */
-#define INSTR_PROF_COVMAP_VERSION 2
-
-/* Profile version is always of type uint64_t. Reserve the upper 8 bits in the
- * version for other variants of profile. We set the lowest bit of the upper 8
- * bits (i.e. bit 56) to 1 to indicate if this is an IR-level instrumentaiton
- * generated profile, and 0 if this is a Clang FE generated profile.
- * 1 in bit 57 indicates there are context-sensitive records in the profile.
- */
-#define VARIANT_MASKS_ALL 0xff00000000000000ULL
-#define GET_VERSION(V) ((V) & ~VARIANT_MASKS_ALL)
-#define VARIANT_MASK_IR_PROF (0x1ULL << 56)
-#define VARIANT_MASK_CSIR_PROF (0x1ULL << 57)
-#define INSTR_PROF_RAW_VERSION_VAR __llvm_profile_raw_version
-#define INSTR_PROF_PROFILE_RUNTIME_VAR __llvm_profile_runtime
-
-/* The variable that holds the name of the profile data
- * specified via command line. */
-#define INSTR_PROF_PROFILE_NAME_VAR __llvm_profile_filename
-
-/* section name strings common to all targets other
-   than WIN32 */
-#define INSTR_PROF_DATA_COMMON __llvm_prf_data
-#define INSTR_PROF_NAME_COMMON __llvm_prf_names
-#define INSTR_PROF_CNTS_COMMON __llvm_prf_cnts
-#define INSTR_PROF_VALS_COMMON __llvm_prf_vals
-#define INSTR_PROF_VNODES_COMMON __llvm_prf_vnds
-#define INSTR_PROF_COVMAP_COMMON __llvm_covmap
-#define INSTR_PROF_ORDERFILE_COMMON __llvm_orderfile
-/* Windows section names. Because these section names contain dollar characters,
- * they must be quoted.
- */
-#define INSTR_PROF_DATA_COFF ".lprfd$M"
-#define INSTR_PROF_NAME_COFF ".lprfn$M"
-#define INSTR_PROF_CNTS_COFF ".lprfc$M"
-#define INSTR_PROF_VALS_COFF ".lprfv$M"
-#define INSTR_PROF_VNODES_COFF ".lprfnd$M"
-#define INSTR_PROF_COVMAP_COFF ".lcovmap$M"
-#define INSTR_PROF_ORDERFILE_COFF ".lorderfile$M"
-
-#ifdef _WIN32
-/* Runtime section names and name strings.  */
-#define INSTR_PROF_DATA_SECT_NAME INSTR_PROF_DATA_COFF
-#define INSTR_PROF_NAME_SECT_NAME INSTR_PROF_NAME_COFF
-#define INSTR_PROF_CNTS_SECT_NAME INSTR_PROF_CNTS_COFF
-/* Array of pointers. Each pointer points to a list
- * of value nodes associated with one value site.
- */
-#define INSTR_PROF_VALS_SECT_NAME INSTR_PROF_VALS_COFF
-/* Value profile nodes section. */
-#define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_VNODES_COFF
-#define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_COVMAP_COFF
-#define INSTR_PROF_ORDERFILE_SECT_NAME INSTR_PROF_ORDERFILE_COFF
-#else
-/* Runtime section names and name strings.  */
-#define INSTR_PROF_DATA_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_DATA_COMMON)
-#define INSTR_PROF_NAME_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_NAME_COMMON)
-#define INSTR_PROF_CNTS_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_CNTS_COMMON)
-/* Array of pointers. Each pointer points to a list
- * of value nodes associated with one value site.
- */
-#define INSTR_PROF_VALS_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_VALS_COMMON)
-/* Value profile nodes section. */
-#define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_VNODES_COMMON)
-#define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COMMON)
-/* Order file instrumentation. */
-#define INSTR_PROF_ORDERFILE_SECT_NAME                                         \
-  INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COMMON)
-#endif
-
-#define INSTR_PROF_ORDERFILE_BUFFER_NAME _llvm_order_file_buffer
-#define INSTR_PROF_ORDERFILE_BUFFER_NAME_STR                                   \
-  INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_BUFFER_NAME)
-#define INSTR_PROF_ORDERFILE_BUFFER_IDX_NAME _llvm_order_file_buffer_idx
-#define INSTR_PROF_ORDERFILE_BUFFER_IDX_NAME_STR                               \
-  INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_BUFFER_IDX_NAME)
-
-/* Macros to define start/stop section symbol for a given
- * section on Linux. For instance
- * INSTR_PROF_SECT_START(INSTR_PROF_DATA_SECT_NAME) will
- * expand to __start___llvm_prof_data
- */
-#define INSTR_PROF_SECT_START(Sect) \
-        INSTR_PROF_CONCAT(__start_,Sect)
-#define INSTR_PROF_SECT_STOP(Sect) \
-        INSTR_PROF_CONCAT(__stop_,Sect)
-
-/* Value Profiling API linkage name.  */
-#define INSTR_PROF_VALUE_PROF_FUNC __llvm_profile_instrument_target
-#define INSTR_PROF_VALUE_PROF_FUNC_STR \
-        INSTR_PROF_QUOTE(INSTR_PROF_VALUE_PROF_FUNC)
-#define INSTR_PROF_VALUE_RANGE_PROF_FUNC __llvm_profile_instrument_range
-#define INSTR_PROF_VALUE_RANGE_PROF_FUNC_STR \
-        INSTR_PROF_QUOTE(INSTR_PROF_VALUE_RANGE_PROF_FUNC)
-
-/* InstrProfile per-function control data alignment.  */
-#define INSTR_PROF_DATA_ALIGNMENT 8
-
-/* The data structure that represents a tracked value by the
- * value profiler.
- */
-typedef struct InstrProfValueData {
-  /* Profiled value. */
-  uint64_t Value;
-  /* Number of times the value appears in the training run. */
-  uint64_t Count;
-} InstrProfValueData;
-
-#endif /* INSTR_PROF_DATA_INC */
-
-#ifndef INSTR_ORDER_FILE_INC
-/* The maximal # of functions: 128*1024 (the buffer size will be 128*4 KB). */
-#define INSTR_ORDER_FILE_BUFFER_SIZE 131072
-#define INSTR_ORDER_FILE_BUFFER_BITS 17
-#define INSTR_ORDER_FILE_BUFFER_MASK 0x1ffff
-#endif /* INSTR_ORDER_FILE_INC */
-#else
-#undef INSTR_PROF_DATA_DEFINED
-#endif
diff --git a/linux-x86/lib64/clang/11.0.1/include/vecintrin.h b/linux-x86/lib64/clang/11.0.1/include/vecintrin.h
deleted file mode 100644
index 6f9b609..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/vecintrin.h
+++ /dev/null
@@ -1,10862 +0,0 @@
-/*===---- vecintrin.h - Vector intrinsics ----------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#if defined(__s390x__) && defined(__VEC__)
-
-#define __ATTRS_ai __attribute__((__always_inline__))
-#define __ATTRS_o __attribute__((__overloadable__))
-#define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__))
-
-#define __constant(PARM) \
-  __attribute__((__enable_if__ ((PARM) == (PARM), \
-     "argument must be a constant integer")))
-#define __constant_range(PARM, LOW, HIGH) \
-  __attribute__((__enable_if__ ((PARM) >= (LOW) && (PARM) <= (HIGH), \
-     "argument must be a constant integer from " #LOW " to " #HIGH)))
-#define __constant_pow2_range(PARM, LOW, HIGH) \
-  __attribute__((__enable_if__ ((PARM) >= (LOW) && (PARM) <= (HIGH) && \
-                                ((PARM) & ((PARM) - 1)) == 0, \
-     "argument must be a constant power of 2 from " #LOW " to " #HIGH)))
-
-/*-- __lcbb -----------------------------------------------------------------*/
-
-extern __ATTRS_o unsigned int
-__lcbb(const void *__ptr, unsigned short __len)
-  __constant_pow2_range(__len, 64, 4096);
-
-#define __lcbb(X, Y) ((__typeof__((__lcbb)((X), (Y)))) \
-  __builtin_s390_lcbb((X), __builtin_constant_p((Y))? \
-                           ((Y) == 64 ? 0 : \
-                            (Y) == 128 ? 1 : \
-                            (Y) == 256 ? 2 : \
-                            (Y) == 512 ? 3 : \
-                            (Y) == 1024 ? 4 : \
-                            (Y) == 2048 ? 5 : \
-                            (Y) == 4096 ? 6 : 0) : 0))
-
-/*-- vec_extract ------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai signed char
-vec_extract(vector signed char __vec, int __index) {
-  return __vec[__index & 15];
-}
-
-static inline __ATTRS_o_ai unsigned char
-vec_extract(vector bool char __vec, int __index) {
-  return __vec[__index & 15];
-}
-
-static inline __ATTRS_o_ai unsigned char
-vec_extract(vector unsigned char __vec, int __index) {
-  return __vec[__index & 15];
-}
-
-static inline __ATTRS_o_ai signed short
-vec_extract(vector signed short __vec, int __index) {
-  return __vec[__index & 7];
-}
-
-static inline __ATTRS_o_ai unsigned short
-vec_extract(vector bool short __vec, int __index) {
-  return __vec[__index & 7];
-}
-
-static inline __ATTRS_o_ai unsigned short
-vec_extract(vector unsigned short __vec, int __index) {
-  return __vec[__index & 7];
-}
-
-static inline __ATTRS_o_ai signed int
-vec_extract(vector signed int __vec, int __index) {
-  return __vec[__index & 3];
-}
-
-static inline __ATTRS_o_ai unsigned int
-vec_extract(vector bool int __vec, int __index) {
-  return __vec[__index & 3];
-}
-
-static inline __ATTRS_o_ai unsigned int
-vec_extract(vector unsigned int __vec, int __index) {
-  return __vec[__index & 3];
-}
-
-static inline __ATTRS_o_ai signed long long
-vec_extract(vector signed long long __vec, int __index) {
-  return __vec[__index & 1];
-}
-
-static inline __ATTRS_o_ai unsigned long long
-vec_extract(vector bool long long __vec, int __index) {
-  return __vec[__index & 1];
-}
-
-static inline __ATTRS_o_ai unsigned long long
-vec_extract(vector unsigned long long __vec, int __index) {
-  return __vec[__index & 1];
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai float
-vec_extract(vector float __vec, int __index) {
-  return __vec[__index & 3];
-}
-#endif
-
-static inline __ATTRS_o_ai double
-vec_extract(vector double __vec, int __index) {
-  return __vec[__index & 1];
-}
-
-/*-- vec_insert -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_insert(signed char __scalar, vector signed char __vec, int __index) {
-  __vec[__index & 15] = __scalar;
-  return __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_insert(unsigned char __scalar, vector bool char __vec, int __index) {
-  vector unsigned char __newvec = (vector unsigned char)__vec;
-  __newvec[__index & 15] = (unsigned char)__scalar;
-  return __newvec;
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_insert(unsigned char __scalar, vector unsigned char __vec, int __index) {
-  __vec[__index & 15] = __scalar;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_insert(signed short __scalar, vector signed short __vec, int __index) {
-  __vec[__index & 7] = __scalar;
-  return __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_insert(unsigned short __scalar, vector bool short __vec, int __index) {
-  vector unsigned short __newvec = (vector unsigned short)__vec;
-  __newvec[__index & 7] = (unsigned short)__scalar;
-  return __newvec;
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_insert(unsigned short __scalar, vector unsigned short __vec, int __index) {
-  __vec[__index & 7] = __scalar;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_insert(signed int __scalar, vector signed int __vec, int __index) {
-  __vec[__index & 3] = __scalar;
-  return __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_insert(unsigned int __scalar, vector bool int __vec, int __index) {
-  vector unsigned int __newvec = (vector unsigned int)__vec;
-  __newvec[__index & 3] = __scalar;
-  return __newvec;
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_insert(unsigned int __scalar, vector unsigned int __vec, int __index) {
-  __vec[__index & 3] = __scalar;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_insert(signed long long __scalar, vector signed long long __vec,
-           int __index) {
-  __vec[__index & 1] = __scalar;
-  return __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_insert(unsigned long long __scalar, vector bool long long __vec,
-           int __index) {
-  vector unsigned long long __newvec = (vector unsigned long long)__vec;
-  __newvec[__index & 1] = __scalar;
-  return __newvec;
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_insert(unsigned long long __scalar, vector unsigned long long __vec,
-           int __index) {
-  __vec[__index & 1] = __scalar;
-  return __vec;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_insert(float __scalar, vector float __vec, int __index) {
-  __vec[__index & 1] = __scalar;
-  return __vec;
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_insert(double __scalar, vector double __vec, int __index) {
-  __vec[__index & 1] = __scalar;
-  return __vec;
-}
-
-/*-- vec_promote ------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_promote(signed char __scalar, int __index) {
-  const vector signed char __zero = (vector signed char)0;
-  vector signed char __vec = __builtin_shufflevector(__zero, __zero,
-    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
-  __vec[__index & 15] = __scalar;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_promote(unsigned char __scalar, int __index) {
-  const vector unsigned char __zero = (vector unsigned char)0;
-  vector unsigned char __vec = __builtin_shufflevector(__zero, __zero,
-    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
-  __vec[__index & 15] = __scalar;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_promote(signed short __scalar, int __index) {
-  const vector signed short __zero = (vector signed short)0;
-  vector signed short __vec = __builtin_shufflevector(__zero, __zero,
-                                -1, -1, -1, -1, -1, -1, -1, -1);
-  __vec[__index & 7] = __scalar;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_promote(unsigned short __scalar, int __index) {
-  const vector unsigned short __zero = (vector unsigned short)0;
-  vector unsigned short __vec = __builtin_shufflevector(__zero, __zero,
-                                  -1, -1, -1, -1, -1, -1, -1, -1);
-  __vec[__index & 7] = __scalar;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_promote(signed int __scalar, int __index) {
-  const vector signed int __zero = (vector signed int)0;
-  vector signed int __vec = __builtin_shufflevector(__zero, __zero,
-                                                    -1, -1, -1, -1);
-  __vec[__index & 3] = __scalar;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_promote(unsigned int __scalar, int __index) {
-  const vector unsigned int __zero = (vector unsigned int)0;
-  vector unsigned int __vec = __builtin_shufflevector(__zero, __zero,
-                                                      -1, -1, -1, -1);
-  __vec[__index & 3] = __scalar;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_promote(signed long long __scalar, int __index) {
-  const vector signed long long __zero = (vector signed long long)0;
-  vector signed long long __vec = __builtin_shufflevector(__zero, __zero,
-                                                          -1, -1);
-  __vec[__index & 1] = __scalar;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_promote(unsigned long long __scalar, int __index) {
-  const vector unsigned long long __zero = (vector unsigned long long)0;
-  vector unsigned long long __vec = __builtin_shufflevector(__zero, __zero,
-                                                            -1, -1);
-  __vec[__index & 1] = __scalar;
-  return __vec;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_promote(float __scalar, int __index) {
-  const vector float __zero = (vector float)0.0f;
-  vector float __vec = __builtin_shufflevector(__zero, __zero, -1, -1, -1, -1);
-  __vec[__index & 3] = __scalar;
-  return __vec;
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_promote(double __scalar, int __index) {
-  const vector double __zero = (vector double)0.0;
-  vector double __vec = __builtin_shufflevector(__zero, __zero, -1, -1);
-  __vec[__index & 1] = __scalar;
-  return __vec;
-}
-
-/*-- vec_insert_and_zero ----------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_insert_and_zero(const signed char *__ptr) {
-  vector signed char __vec = (vector signed char)0;
-  __vec[7] = *__ptr;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_insert_and_zero(const unsigned char *__ptr) {
-  vector unsigned char __vec = (vector unsigned char)0;
-  __vec[7] = *__ptr;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_insert_and_zero(const signed short *__ptr) {
-  vector signed short __vec = (vector signed short)0;
-  __vec[3] = *__ptr;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_insert_and_zero(const unsigned short *__ptr) {
-  vector unsigned short __vec = (vector unsigned short)0;
-  __vec[3] = *__ptr;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_insert_and_zero(const signed int *__ptr) {
-  vector signed int __vec = (vector signed int)0;
-  __vec[1] = *__ptr;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_insert_and_zero(const unsigned int *__ptr) {
-  vector unsigned int __vec = (vector unsigned int)0;
-  __vec[1] = *__ptr;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_insert_and_zero(const signed long long *__ptr) {
-  vector signed long long __vec = (vector signed long long)0;
-  __vec[0] = *__ptr;
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_insert_and_zero(const unsigned long long *__ptr) {
-  vector unsigned long long __vec = (vector unsigned long long)0;
-  __vec[0] = *__ptr;
-  return __vec;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_insert_and_zero(const float *__ptr) {
-  vector float __vec = (vector float)0.0f;
-  __vec[1] = *__ptr;
-  return __vec;
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_insert_and_zero(const double *__ptr) {
-  vector double __vec = (vector double)0.0;
-  __vec[0] = *__ptr;
-  return __vec;
-}
-
-/*-- vec_perm ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_perm(vector signed char __a, vector signed char __b,
-         vector unsigned char __c) {
-  return (vector signed char)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_perm(vector unsigned char __a, vector unsigned char __b,
-         vector unsigned char __c) {
-  return (vector unsigned char)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_perm(vector bool char __a, vector bool char __b,
-         vector unsigned char __c) {
-  return (vector bool char)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_perm(vector signed short __a, vector signed short __b,
-         vector unsigned char __c) {
-  return (vector signed short)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_perm(vector unsigned short __a, vector unsigned short __b,
-         vector unsigned char __c) {
-  return (vector unsigned short)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_perm(vector bool short __a, vector bool short __b,
-         vector unsigned char __c) {
-  return (vector bool short)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_perm(vector signed int __a, vector signed int __b,
-         vector unsigned char __c) {
-  return (vector signed int)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_perm(vector unsigned int __a, vector unsigned int __b,
-         vector unsigned char __c) {
-  return (vector unsigned int)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_perm(vector bool int __a, vector bool int __b,
-         vector unsigned char __c) {
-  return (vector bool int)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_perm(vector signed long long __a, vector signed long long __b,
-         vector unsigned char __c) {
-  return (vector signed long long)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_perm(vector unsigned long long __a, vector unsigned long long __b,
-         vector unsigned char __c) {
-  return (vector unsigned long long)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_perm(vector bool long long __a, vector bool long long __b,
-         vector unsigned char __c) {
-  return (vector bool long long)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_perm(vector float __a, vector float __b,
-         vector unsigned char __c) {
-  return (vector float)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_perm(vector double __a, vector double __b,
-         vector unsigned char __c) {
-  return (vector double)__builtin_s390_vperm(
-           (vector unsigned char)__a, (vector unsigned char)__b, __c);
-}
-
-/*-- vec_permi --------------------------------------------------------------*/
-
-// This prototype is deprecated.
-extern __ATTRS_o vector signed long long
-vec_permi(vector signed long long __a, vector signed long long __b, int __c)
-  __constant_range(__c, 0, 3);
-
-// This prototype is deprecated.
-extern __ATTRS_o vector unsigned long long
-vec_permi(vector unsigned long long __a, vector unsigned long long __b, int __c)
-  __constant_range(__c, 0, 3);
-
-// This prototype is deprecated.
-extern __ATTRS_o vector bool long long
-vec_permi(vector bool long long __a, vector bool long long __b, int __c)
-  __constant_range(__c, 0, 3);
-
-// This prototype is deprecated.
-extern __ATTRS_o vector double
-vec_permi(vector double __a, vector double __b, int __c)
-  __constant_range(__c, 0, 3);
-
-#define vec_permi(X, Y, Z) ((__typeof__((vec_permi)((X), (Y), (Z)))) \
-  __builtin_s390_vpdi((vector unsigned long long)(X), \
-                      (vector unsigned long long)(Y), \
-                      (((Z) & 2) << 1) | ((Z) & 1)))
-
-/*-- vec_bperm_u128 ---------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_ai vector unsigned long long
-vec_bperm_u128(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vbperm(__a, __b);
-}
-#endif
-
-/*-- vec_revb ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed short
-vec_revb(vector signed short __vec) {
-  return (vector signed short)
-         __builtin_s390_vlbrh((vector unsigned short)__vec);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_revb(vector unsigned short __vec) {
-  return __builtin_s390_vlbrh(__vec);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_revb(vector signed int __vec) {
-  return (vector signed int)
-         __builtin_s390_vlbrf((vector unsigned int)__vec);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_revb(vector unsigned int __vec) {
-  return __builtin_s390_vlbrf(__vec);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_revb(vector signed long long __vec) {
-  return (vector signed long long)
-         __builtin_s390_vlbrg((vector unsigned long long)__vec);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_revb(vector unsigned long long __vec) {
-  return __builtin_s390_vlbrg(__vec);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_revb(vector float __vec) {
-  return (vector float)
-         __builtin_s390_vlbrf((vector unsigned int)__vec);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_revb(vector double __vec) {
-  return (vector double)
-         __builtin_s390_vlbrg((vector unsigned long long)__vec);
-}
-
-/*-- vec_reve ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_reve(vector signed char __vec) {
-  return (vector signed char) { __vec[15], __vec[14], __vec[13], __vec[12],
-                                __vec[11], __vec[10], __vec[9], __vec[8],
-                                __vec[7], __vec[6], __vec[5], __vec[4],
-                                __vec[3], __vec[2], __vec[1], __vec[0] };
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_reve(vector unsigned char __vec) {
-  return (vector unsigned char) { __vec[15], __vec[14], __vec[13], __vec[12],
-                                  __vec[11], __vec[10], __vec[9], __vec[8],
-                                  __vec[7], __vec[6], __vec[5], __vec[4],
-                                  __vec[3], __vec[2], __vec[1], __vec[0] };
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_reve(vector bool char __vec) {
-  return (vector bool char) { __vec[15], __vec[14], __vec[13], __vec[12],
-                              __vec[11], __vec[10], __vec[9], __vec[8],
-                              __vec[7], __vec[6], __vec[5], __vec[4],
-                              __vec[3], __vec[2], __vec[1], __vec[0] };
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_reve(vector signed short __vec) {
-  return (vector signed short) { __vec[7], __vec[6], __vec[5], __vec[4],
-                                 __vec[3], __vec[2], __vec[1], __vec[0] };
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_reve(vector unsigned short __vec) {
-  return (vector unsigned short) { __vec[7], __vec[6], __vec[5], __vec[4],
-                                   __vec[3], __vec[2], __vec[1], __vec[0] };
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_reve(vector bool short __vec) {
-  return (vector bool short) { __vec[7], __vec[6], __vec[5], __vec[4],
-                               __vec[3], __vec[2], __vec[1], __vec[0] };
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_reve(vector signed int __vec) {
-  return (vector signed int) { __vec[3], __vec[2], __vec[1], __vec[0] };
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_reve(vector unsigned int __vec) {
-  return (vector unsigned int) { __vec[3], __vec[2], __vec[1], __vec[0] };
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_reve(vector bool int __vec) {
-  return (vector bool int) { __vec[3], __vec[2], __vec[1], __vec[0] };
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_reve(vector signed long long __vec) {
-  return (vector signed long long) { __vec[1], __vec[0] };
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_reve(vector unsigned long long __vec) {
-  return (vector unsigned long long) { __vec[1], __vec[0] };
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_reve(vector bool long long __vec) {
-  return (vector bool long long) { __vec[1], __vec[0] };
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_reve(vector float __vec) {
-  return (vector float) { __vec[3], __vec[2], __vec[1], __vec[0] };
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_reve(vector double __vec) {
-  return (vector double) { __vec[1], __vec[0] };
-}
-
-/*-- vec_sel ----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_sel(vector signed char __a, vector signed char __b,
-        vector unsigned char __c) {
-  return ((vector signed char)__c & __b) | (~(vector signed char)__c & __a);
-}
-
-static inline __ATTRS_o_ai vector signed char
-vec_sel(vector signed char __a, vector signed char __b, vector bool char __c) {
-  return ((vector signed char)__c & __b) | (~(vector signed char)__c & __a);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_sel(vector bool char __a, vector bool char __b, vector unsigned char __c) {
-  return ((vector bool char)__c & __b) | (~(vector bool char)__c & __a);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_sel(vector bool char __a, vector bool char __b, vector bool char __c) {
-  return (__c & __b) | (~__c & __a);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_sel(vector unsigned char __a, vector unsigned char __b,
-        vector unsigned char __c) {
-  return (__c & __b) | (~__c & __a);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_sel(vector unsigned char __a, vector unsigned char __b,
-        vector bool char __c) {
-  return ((vector unsigned char)__c & __b) | (~(vector unsigned char)__c & __a);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_sel(vector signed short __a, vector signed short __b,
-        vector unsigned short __c) {
-  return ((vector signed short)__c & __b) | (~(vector signed short)__c & __a);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_sel(vector signed short __a, vector signed short __b,
-        vector bool short __c) {
-  return ((vector signed short)__c & __b) | (~(vector signed short)__c & __a);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_sel(vector bool short __a, vector bool short __b,
-        vector unsigned short __c) {
-  return ((vector bool short)__c & __b) | (~(vector bool short)__c & __a);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_sel(vector bool short __a, vector bool short __b, vector bool short __c) {
-  return (__c & __b) | (~__c & __a);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_sel(vector unsigned short __a, vector unsigned short __b,
-        vector unsigned short __c) {
-  return (__c & __b) | (~__c & __a);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_sel(vector unsigned short __a, vector unsigned short __b,
-        vector bool short __c) {
-  return (((vector unsigned short)__c & __b) |
-          (~(vector unsigned short)__c & __a));
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_sel(vector signed int __a, vector signed int __b,
-        vector unsigned int __c) {
-  return ((vector signed int)__c & __b) | (~(vector signed int)__c & __a);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_sel(vector signed int __a, vector signed int __b, vector bool int __c) {
-  return ((vector signed int)__c & __b) | (~(vector signed int)__c & __a);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_sel(vector bool int __a, vector bool int __b, vector unsigned int __c) {
-  return ((vector bool int)__c & __b) | (~(vector bool int)__c & __a);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_sel(vector bool int __a, vector bool int __b, vector bool int __c) {
-  return (__c & __b) | (~__c & __a);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_sel(vector unsigned int __a, vector unsigned int __b,
-        vector unsigned int __c) {
-  return (__c & __b) | (~__c & __a);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_sel(vector unsigned int __a, vector unsigned int __b, vector bool int __c) {
-  return ((vector unsigned int)__c & __b) | (~(vector unsigned int)__c & __a);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_sel(vector signed long long __a, vector signed long long __b,
-        vector unsigned long long __c) {
-  return (((vector signed long long)__c & __b) |
-          (~(vector signed long long)__c & __a));
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_sel(vector signed long long __a, vector signed long long __b,
-        vector bool long long __c) {
-  return (((vector signed long long)__c & __b) |
-          (~(vector signed long long)__c & __a));
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_sel(vector bool long long __a, vector bool long long __b,
-        vector unsigned long long __c) {
-  return (((vector bool long long)__c & __b) |
-          (~(vector bool long long)__c & __a));
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_sel(vector bool long long __a, vector bool long long __b,
-        vector bool long long __c) {
-  return (__c & __b) | (~__c & __a);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sel(vector unsigned long long __a, vector unsigned long long __b,
-        vector unsigned long long __c) {
-  return (__c & __b) | (~__c & __a);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sel(vector unsigned long long __a, vector unsigned long long __b,
-        vector bool long long __c) {
-  return (((vector unsigned long long)__c & __b) |
-          (~(vector unsigned long long)__c & __a));
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_sel(vector float __a, vector float __b, vector unsigned int __c) {
-  return (vector float)((__c & (vector unsigned int)__b) |
-                        (~__c & (vector unsigned int)__a));
-}
-
-static inline __ATTRS_o_ai vector float
-vec_sel(vector float __a, vector float __b, vector bool int __c) {
-  vector unsigned int __ac = (vector unsigned int)__a;
-  vector unsigned int __bc = (vector unsigned int)__b;
-  vector unsigned int __cc = (vector unsigned int)__c;
-  return (vector float)((__cc & __bc) | (~__cc & __ac));
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_sel(vector double __a, vector double __b, vector unsigned long long __c) {
-  return (vector double)((__c & (vector unsigned long long)__b) |
-                         (~__c & (vector unsigned long long)__a));
-}
-
-static inline __ATTRS_o_ai vector double
-vec_sel(vector double __a, vector double __b, vector bool long long __c) {
-  vector unsigned long long __ac = (vector unsigned long long)__a;
-  vector unsigned long long __bc = (vector unsigned long long)__b;
-  vector unsigned long long __cc = (vector unsigned long long)__c;
-  return (vector double)((__cc & __bc) | (~__cc & __ac));
-}
-
-/*-- vec_gather_element -----------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed int
-vec_gather_element(vector signed int __vec, vector unsigned int __offset,
-                   const signed int *__ptr, int __index)
-  __constant_range(__index, 0, 3) {
-  __vec[__index] = *(const signed int *)(
-    (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_gather_element(vector bool int __vec, vector unsigned int __offset,
-                   const unsigned int *__ptr, int __index)
-  __constant_range(__index, 0, 3) {
-  __vec[__index] = *(const unsigned int *)(
-    (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_gather_element(vector unsigned int __vec, vector unsigned int __offset,
-                   const unsigned int *__ptr, int __index)
-  __constant_range(__index, 0, 3) {
-  __vec[__index] = *(const unsigned int *)(
-    (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_gather_element(vector signed long long __vec,
-                   vector unsigned long long __offset,
-                   const signed long long *__ptr, int __index)
-  __constant_range(__index, 0, 1) {
-  __vec[__index] = *(const signed long long *)(
-    (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_gather_element(vector bool long long __vec,
-                   vector unsigned long long __offset,
-                   const unsigned long long *__ptr, int __index)
-  __constant_range(__index, 0, 1) {
-  __vec[__index] = *(const unsigned long long *)(
-    (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
-  return __vec;
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_gather_element(vector unsigned long long __vec,
-                   vector unsigned long long __offset,
-                   const unsigned long long *__ptr, int __index)
-  __constant_range(__index, 0, 1) {
-  __vec[__index] = *(const unsigned long long *)(
-    (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
-  return __vec;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_gather_element(vector float __vec, vector unsigned int __offset,
-                   const float *__ptr, int __index)
-  __constant_range(__index, 0, 3) {
-  __vec[__index] = *(const float *)(
-    (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
-  return __vec;
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_gather_element(vector double __vec, vector unsigned long long __offset,
-                   const double *__ptr, int __index)
-  __constant_range(__index, 0, 1) {
-  __vec[__index] = *(const double *)(
-    (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]);
-  return __vec;
-}
-
-/*-- vec_scatter_element ----------------------------------------------------*/
-
-static inline __ATTRS_o_ai void
-vec_scatter_element(vector signed int __vec, vector unsigned int __offset,
-                    signed int *__ptr, int __index)
-  __constant_range(__index, 0, 3) {
-  *(signed int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
-    __vec[__index];
-}
-
-static inline __ATTRS_o_ai void
-vec_scatter_element(vector bool int __vec, vector unsigned int __offset,
-                    unsigned int *__ptr, int __index)
-  __constant_range(__index, 0, 3) {
-  *(unsigned int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
-    __vec[__index];
-}
-
-static inline __ATTRS_o_ai void
-vec_scatter_element(vector unsigned int __vec, vector unsigned int __offset,
-                    unsigned int *__ptr, int __index)
-  __constant_range(__index, 0, 3) {
-  *(unsigned int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
-    __vec[__index];
-}
-
-static inline __ATTRS_o_ai void
-vec_scatter_element(vector signed long long __vec,
-                    vector unsigned long long __offset,
-                    signed long long *__ptr, int __index)
-  __constant_range(__index, 0, 1) {
-  *(signed long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
-    __vec[__index];
-}
-
-static inline __ATTRS_o_ai void
-vec_scatter_element(vector bool long long __vec,
-                    vector unsigned long long __offset,
-                    unsigned long long *__ptr, int __index)
-  __constant_range(__index, 0, 1) {
-  *(unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
-    __vec[__index];
-}
-
-static inline __ATTRS_o_ai void
-vec_scatter_element(vector unsigned long long __vec,
-                    vector unsigned long long __offset,
-                    unsigned long long *__ptr, int __index)
-  __constant_range(__index, 0, 1) {
-  *(unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
-    __vec[__index];
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai void
-vec_scatter_element(vector float __vec, vector unsigned int __offset,
-                    float *__ptr, int __index)
-  __constant_range(__index, 0, 3) {
-  *(float *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
-    __vec[__index];
-}
-#endif
-
-static inline __ATTRS_o_ai void
-vec_scatter_element(vector double __vec, vector unsigned long long __offset,
-                    double *__ptr, int __index)
-  __constant_range(__index, 0, 1) {
-  *(double *)((__INTPTR_TYPE__)__ptr + __offset[__index]) =
-    __vec[__index];
-}
-
-/*-- vec_xl -----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_xl(long __offset, const signed char *__ptr) {
-  return *(const vector signed char *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_xl(long __offset, const unsigned char *__ptr) {
-  return *(const vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_xl(long __offset, const signed short *__ptr) {
-  return *(const vector signed short *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_xl(long __offset, const unsigned short *__ptr) {
-  return *(const vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_xl(long __offset, const signed int *__ptr) {
-  return *(const vector signed int *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_xl(long __offset, const unsigned int *__ptr) {
-  return *(const vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_xl(long __offset, const signed long long *__ptr) {
-  return *(const vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_xl(long __offset, const unsigned long long *__ptr) {
-  return *(const vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_xl(long __offset, const float *__ptr) {
-  return *(const vector float *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_xl(long __offset, const double *__ptr) {
-  return *(const vector double *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-/*-- vec_xld2 ---------------------------------------------------------------*/
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_xld2(long __offset, const signed char *__ptr) {
-  return *(const vector signed char *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_xld2(long __offset, const unsigned char *__ptr) {
-  return *(const vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_xld2(long __offset, const signed short *__ptr) {
-  return *(const vector signed short *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_xld2(long __offset, const unsigned short *__ptr) {
-  return *(const vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_xld2(long __offset, const signed int *__ptr) {
-  return *(const vector signed int *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_xld2(long __offset, const unsigned int *__ptr) {
-  return *(const vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_xld2(long __offset, const signed long long *__ptr) {
-  return *(const vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_xld2(long __offset, const unsigned long long *__ptr) {
-  return *(const vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_xld2(long __offset, const double *__ptr) {
-  return *(const vector double *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-/*-- vec_xlw4 ---------------------------------------------------------------*/
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_xlw4(long __offset, const signed char *__ptr) {
-  return *(const vector signed char *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_xlw4(long __offset, const unsigned char *__ptr) {
-  return *(const vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_xlw4(long __offset, const signed short *__ptr) {
-  return *(const vector signed short *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_xlw4(long __offset, const unsigned short *__ptr) {
-  return *(const vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_xlw4(long __offset, const signed int *__ptr) {
-  return *(const vector signed int *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_xlw4(long __offset, const unsigned int *__ptr) {
-  return *(const vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset);
-}
-
-/*-- vec_xst ----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai void
-vec_xst(vector signed char __vec, long __offset, signed char *__ptr) {
-  *(vector signed char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void
-vec_xst(vector unsigned char __vec, long __offset, unsigned char *__ptr) {
-  *(vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void
-vec_xst(vector signed short __vec, long __offset, signed short *__ptr) {
-  *(vector signed short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void
-vec_xst(vector unsigned short __vec, long __offset, unsigned short *__ptr) {
-  *(vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void
-vec_xst(vector signed int __vec, long __offset, signed int *__ptr) {
-  *(vector signed int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void
-vec_xst(vector unsigned int __vec, long __offset, unsigned int *__ptr) {
-  *(vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void
-vec_xst(vector signed long long __vec, long __offset,
-          signed long long *__ptr) {
-  *(vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void
-vec_xst(vector unsigned long long __vec, long __offset,
-          unsigned long long *__ptr) {
-  *(vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset) =
-    __vec;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai void
-vec_xst(vector float __vec, long __offset, float *__ptr) {
-  *(vector float *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-#endif
-
-static inline __ATTRS_o_ai void
-vec_xst(vector double __vec, long __offset, double *__ptr) {
-  *(vector double *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-/*-- vec_xstd2 --------------------------------------------------------------*/
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstd2(vector signed char __vec, long __offset, signed char *__ptr) {
-  *(vector signed char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstd2(vector unsigned char __vec, long __offset, unsigned char *__ptr) {
-  *(vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstd2(vector signed short __vec, long __offset, signed short *__ptr) {
-  *(vector signed short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstd2(vector unsigned short __vec, long __offset, unsigned short *__ptr) {
-  *(vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstd2(vector signed int __vec, long __offset, signed int *__ptr) {
-  *(vector signed int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstd2(vector unsigned int __vec, long __offset, unsigned int *__ptr) {
-  *(vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstd2(vector signed long long __vec, long __offset,
-          signed long long *__ptr) {
-  *(vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstd2(vector unsigned long long __vec, long __offset,
-          unsigned long long *__ptr) {
-  *(vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset) =
-    __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstd2(vector double __vec, long __offset, double *__ptr) {
-  *(vector double *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-/*-- vec_xstw4 --------------------------------------------------------------*/
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstw4(vector signed char __vec, long __offset, signed char *__ptr) {
-  *(vector signed char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstw4(vector unsigned char __vec, long __offset, unsigned char *__ptr) {
-  *(vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstw4(vector signed short __vec, long __offset, signed short *__ptr) {
-  *(vector signed short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstw4(vector unsigned short __vec, long __offset, unsigned short *__ptr) {
-  *(vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstw4(vector signed int __vec, long __offset, signed int *__ptr) {
-  *(vector signed int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai void
-vec_xstw4(vector unsigned int __vec, long __offset, unsigned int *__ptr) {
-  *(vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec;
-}
-
-/*-- vec_load_bndry ---------------------------------------------------------*/
-
-extern __ATTRS_o vector signed char
-vec_load_bndry(const signed char *__ptr, unsigned short __len)
-  __constant_pow2_range(__len, 64, 4096);
-
-extern __ATTRS_o vector unsigned char
-vec_load_bndry(const unsigned char *__ptr, unsigned short __len)
-  __constant_pow2_range(__len, 64, 4096);
-
-extern __ATTRS_o vector signed short
-vec_load_bndry(const signed short *__ptr, unsigned short __len)
-  __constant_pow2_range(__len, 64, 4096);
-
-extern __ATTRS_o vector unsigned short
-vec_load_bndry(const unsigned short *__ptr, unsigned short __len)
-  __constant_pow2_range(__len, 64, 4096);
-
-extern __ATTRS_o vector signed int
-vec_load_bndry(const signed int *__ptr, unsigned short __len)
-  __constant_pow2_range(__len, 64, 4096);
-
-extern __ATTRS_o vector unsigned int
-vec_load_bndry(const unsigned int *__ptr, unsigned short __len)
-  __constant_pow2_range(__len, 64, 4096);
-
-extern __ATTRS_o vector signed long long
-vec_load_bndry(const signed long long *__ptr, unsigned short __len)
-  __constant_pow2_range(__len, 64, 4096);
-
-extern __ATTRS_o vector unsigned long long
-vec_load_bndry(const unsigned long long *__ptr, unsigned short __len)
-  __constant_pow2_range(__len, 64, 4096);
-
-#if __ARCH__ >= 12
-extern __ATTRS_o vector float
-vec_load_bndry(const float *__ptr, unsigned short __len)
-  __constant_pow2_range(__len, 64, 4096);
-#endif
-
-extern __ATTRS_o vector double
-vec_load_bndry(const double *__ptr, unsigned short __len)
-  __constant_pow2_range(__len, 64, 4096);
-
-#define vec_load_bndry(X, Y) ((__typeof__((vec_load_bndry)((X), (Y)))) \
-  __builtin_s390_vlbb((X), ((Y) == 64 ? 0 : \
-                            (Y) == 128 ? 1 : \
-                            (Y) == 256 ? 2 : \
-                            (Y) == 512 ? 3 : \
-                            (Y) == 1024 ? 4 : \
-                            (Y) == 2048 ? 5 : \
-                            (Y) == 4096 ? 6 : -1)))
-
-/*-- vec_load_len -----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_load_len(const signed char *__ptr, unsigned int __len) {
-  return (vector signed char)__builtin_s390_vll(__len, __ptr);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_load_len(const unsigned char *__ptr, unsigned int __len) {
-  return (vector unsigned char)__builtin_s390_vll(__len, __ptr);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_load_len(const signed short *__ptr, unsigned int __len) {
-  return (vector signed short)__builtin_s390_vll(__len, __ptr);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_load_len(const unsigned short *__ptr, unsigned int __len) {
-  return (vector unsigned short)__builtin_s390_vll(__len, __ptr);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_load_len(const signed int *__ptr, unsigned int __len) {
-  return (vector signed int)__builtin_s390_vll(__len, __ptr);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_load_len(const unsigned int *__ptr, unsigned int __len) {
-  return (vector unsigned int)__builtin_s390_vll(__len, __ptr);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_load_len(const signed long long *__ptr, unsigned int __len) {
-  return (vector signed long long)__builtin_s390_vll(__len, __ptr);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_load_len(const unsigned long long *__ptr, unsigned int __len) {
-  return (vector unsigned long long)__builtin_s390_vll(__len, __ptr);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_load_len(const float *__ptr, unsigned int __len) {
-  return (vector float)__builtin_s390_vll(__len, __ptr);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_load_len(const double *__ptr, unsigned int __len) {
-  return (vector double)__builtin_s390_vll(__len, __ptr);
-}
-
-/*-- vec_load_len_r ---------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_ai vector unsigned char
-vec_load_len_r(const unsigned char *__ptr, unsigned int __len) {
-  return (vector unsigned char)__builtin_s390_vlrl(__len, __ptr);
-}
-#endif
-
-/*-- vec_store_len ----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai void
-vec_store_len(vector signed char __vec, signed char *__ptr,
-              unsigned int __len) {
-  __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
-}
-
-static inline __ATTRS_o_ai void
-vec_store_len(vector unsigned char __vec, unsigned char *__ptr,
-              unsigned int __len) {
-  __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
-}
-
-static inline __ATTRS_o_ai void
-vec_store_len(vector signed short __vec, signed short *__ptr,
-              unsigned int __len) {
-  __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
-}
-
-static inline __ATTRS_o_ai void
-vec_store_len(vector unsigned short __vec, unsigned short *__ptr,
-              unsigned int __len) {
-  __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
-}
-
-static inline __ATTRS_o_ai void
-vec_store_len(vector signed int __vec, signed int *__ptr,
-              unsigned int __len) {
-  __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
-}
-
-static inline __ATTRS_o_ai void
-vec_store_len(vector unsigned int __vec, unsigned int *__ptr,
-              unsigned int __len) {
-  __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
-}
-
-static inline __ATTRS_o_ai void
-vec_store_len(vector signed long long __vec, signed long long *__ptr,
-              unsigned int __len) {
-  __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
-}
-
-static inline __ATTRS_o_ai void
-vec_store_len(vector unsigned long long __vec, unsigned long long *__ptr,
-              unsigned int __len) {
-  __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai void
-vec_store_len(vector float __vec, float *__ptr,
-              unsigned int __len) {
-  __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
-}
-#endif
-
-static inline __ATTRS_o_ai void
-vec_store_len(vector double __vec, double *__ptr,
-              unsigned int __len) {
-  __builtin_s390_vstl((vector signed char)__vec, __len, __ptr);
-}
-
-/*-- vec_store_len_r --------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_ai void
-vec_store_len_r(vector unsigned char __vec, unsigned char *__ptr,
-                unsigned int __len) {
-  __builtin_s390_vstrl((vector signed char)__vec, __len, __ptr);
-}
-#endif
-
-/*-- vec_load_pair ----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed long long
-vec_load_pair(signed long long __a, signed long long __b) {
-  return (vector signed long long)(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_load_pair(unsigned long long __a, unsigned long long __b) {
-  return (vector unsigned long long)(__a, __b);
-}
-
-/*-- vec_genmask ------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_genmask(unsigned short __mask)
-  __constant(__mask) {
-  return (vector unsigned char)(
-    __mask & 0x8000 ? 0xff : 0,
-    __mask & 0x4000 ? 0xff : 0,
-    __mask & 0x2000 ? 0xff : 0,
-    __mask & 0x1000 ? 0xff : 0,
-    __mask & 0x0800 ? 0xff : 0,
-    __mask & 0x0400 ? 0xff : 0,
-    __mask & 0x0200 ? 0xff : 0,
-    __mask & 0x0100 ? 0xff : 0,
-    __mask & 0x0080 ? 0xff : 0,
-    __mask & 0x0040 ? 0xff : 0,
-    __mask & 0x0020 ? 0xff : 0,
-    __mask & 0x0010 ? 0xff : 0,
-    __mask & 0x0008 ? 0xff : 0,
-    __mask & 0x0004 ? 0xff : 0,
-    __mask & 0x0002 ? 0xff : 0,
-    __mask & 0x0001 ? 0xff : 0);
-}
-
-/*-- vec_genmasks_* ---------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_genmasks_8(unsigned char __first, unsigned char __last)
-  __constant(__first) __constant(__last) {
-  unsigned char __bit1 = __first & 7;
-  unsigned char __bit2 = __last & 7;
-  unsigned char __mask1 = (unsigned char)(1U << (7 - __bit1) << 1) - 1;
-  unsigned char __mask2 = (unsigned char)(1U << (7 - __bit2)) - 1;
-  unsigned char __value = (__bit1 <= __bit2 ?
-                           __mask1 & ~__mask2 :
-                           __mask1 | ~__mask2);
-  return (vector unsigned char)__value;
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_genmasks_16(unsigned char __first, unsigned char __last)
-  __constant(__first) __constant(__last) {
-  unsigned char __bit1 = __first & 15;
-  unsigned char __bit2 = __last & 15;
-  unsigned short __mask1 = (unsigned short)(1U << (15 - __bit1) << 1) - 1;
-  unsigned short __mask2 = (unsigned short)(1U << (15 - __bit2)) - 1;
-  unsigned short __value = (__bit1 <= __bit2 ?
-                            __mask1 & ~__mask2 :
-                            __mask1 | ~__mask2);
-  return (vector unsigned short)__value;
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_genmasks_32(unsigned char __first, unsigned char __last)
-  __constant(__first) __constant(__last) {
-  unsigned char __bit1 = __first & 31;
-  unsigned char __bit2 = __last & 31;
-  unsigned int __mask1 = (1U << (31 - __bit1) << 1) - 1;
-  unsigned int __mask2 = (1U << (31 - __bit2)) - 1;
-  unsigned int __value = (__bit1 <= __bit2 ?
-                          __mask1 & ~__mask2 :
-                          __mask1 | ~__mask2);
-  return (vector unsigned int)__value;
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_genmasks_64(unsigned char __first, unsigned char __last)
-  __constant(__first) __constant(__last) {
-  unsigned char __bit1 = __first & 63;
-  unsigned char __bit2 = __last & 63;
-  unsigned long long __mask1 = (1ULL << (63 - __bit1) << 1) - 1;
-  unsigned long long __mask2 = (1ULL << (63 - __bit2)) - 1;
-  unsigned long long __value = (__bit1 <= __bit2 ?
-                                __mask1 & ~__mask2 :
-                                __mask1 | ~__mask2);
-  return (vector unsigned long long)__value;
-}
-
-/*-- vec_splat --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_splat(vector signed char __vec, int __index)
-  __constant_range(__index, 0, 15) {
-  return (vector signed char)__vec[__index];
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_splat(vector bool char __vec, int __index)
-  __constant_range(__index, 0, 15) {
-  return (vector bool char)(vector unsigned char)__vec[__index];
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_splat(vector unsigned char __vec, int __index)
-  __constant_range(__index, 0, 15) {
-  return (vector unsigned char)__vec[__index];
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_splat(vector signed short __vec, int __index)
-  __constant_range(__index, 0, 7) {
-  return (vector signed short)__vec[__index];
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_splat(vector bool short __vec, int __index)
-  __constant_range(__index, 0, 7) {
-  return (vector bool short)(vector unsigned short)__vec[__index];
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_splat(vector unsigned short __vec, int __index)
-  __constant_range(__index, 0, 7) {
-  return (vector unsigned short)__vec[__index];
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_splat(vector signed int __vec, int __index)
-  __constant_range(__index, 0, 3) {
-  return (vector signed int)__vec[__index];
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_splat(vector bool int __vec, int __index)
-  __constant_range(__index, 0, 3) {
-  return (vector bool int)(vector unsigned int)__vec[__index];
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_splat(vector unsigned int __vec, int __index)
-  __constant_range(__index, 0, 3) {
-  return (vector unsigned int)__vec[__index];
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_splat(vector signed long long __vec, int __index)
-  __constant_range(__index, 0, 1) {
-  return (vector signed long long)__vec[__index];
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_splat(vector bool long long __vec, int __index)
-  __constant_range(__index, 0, 1) {
-  return (vector bool long long)(vector unsigned long long)__vec[__index];
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_splat(vector unsigned long long __vec, int __index)
-  __constant_range(__index, 0, 1) {
-  return (vector unsigned long long)__vec[__index];
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_splat(vector float __vec, int __index)
-  __constant_range(__index, 0, 3) {
-  return (vector float)__vec[__index];
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_splat(vector double __vec, int __index)
-  __constant_range(__index, 0, 1) {
-  return (vector double)__vec[__index];
-}
-
-/*-- vec_splat_s* -----------------------------------------------------------*/
-
-static inline __ATTRS_ai vector signed char
-vec_splat_s8(signed char __scalar)
-  __constant(__scalar) {
-  return (vector signed char)__scalar;
-}
-
-static inline __ATTRS_ai vector signed short
-vec_splat_s16(signed short __scalar)
-  __constant(__scalar) {
-  return (vector signed short)__scalar;
-}
-
-static inline __ATTRS_ai vector signed int
-vec_splat_s32(signed short __scalar)
-  __constant(__scalar) {
-  return (vector signed int)(signed int)__scalar;
-}
-
-static inline __ATTRS_ai vector signed long long
-vec_splat_s64(signed short __scalar)
-  __constant(__scalar) {
-  return (vector signed long long)(signed long)__scalar;
-}
-
-/*-- vec_splat_u* -----------------------------------------------------------*/
-
-static inline __ATTRS_ai vector unsigned char
-vec_splat_u8(unsigned char __scalar)
-  __constant(__scalar) {
-  return (vector unsigned char)__scalar;
-}
-
-static inline __ATTRS_ai vector unsigned short
-vec_splat_u16(unsigned short __scalar)
-  __constant(__scalar) {
-  return (vector unsigned short)__scalar;
-}
-
-static inline __ATTRS_ai vector unsigned int
-vec_splat_u32(signed short __scalar)
-  __constant(__scalar) {
-  return (vector unsigned int)(signed int)__scalar;
-}
-
-static inline __ATTRS_ai vector unsigned long long
-vec_splat_u64(signed short __scalar)
-  __constant(__scalar) {
-  return (vector unsigned long long)(signed long long)__scalar;
-}
-
-/*-- vec_splats -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_splats(signed char __scalar) {
-  return (vector signed char)__scalar;
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_splats(unsigned char __scalar) {
-  return (vector unsigned char)__scalar;
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_splats(signed short __scalar) {
-  return (vector signed short)__scalar;
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_splats(unsigned short __scalar) {
-  return (vector unsigned short)__scalar;
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_splats(signed int __scalar) {
-  return (vector signed int)__scalar;
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_splats(unsigned int __scalar) {
-  return (vector unsigned int)__scalar;
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_splats(signed long long __scalar) {
-  return (vector signed long long)__scalar;
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_splats(unsigned long long __scalar) {
-  return (vector unsigned long long)__scalar;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_splats(float __scalar) {
-  return (vector float)__scalar;
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_splats(double __scalar) {
-  return (vector double)__scalar;
-}
-
-/*-- vec_extend_s64 ---------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed long long
-vec_extend_s64(vector signed char __a) {
-  return (vector signed long long)(__a[7], __a[15]);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_extend_s64(vector signed short __a) {
-  return (vector signed long long)(__a[3], __a[7]);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_extend_s64(vector signed int __a) {
-  return (vector signed long long)(__a[1], __a[3]);
-}
-
-/*-- vec_mergeh -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_mergeh(vector signed char __a, vector signed char __b) {
-  return (vector signed char)(
-    __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
-    __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_mergeh(vector bool char __a, vector bool char __b) {
-  return (vector bool char)(
-    __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
-    __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_mergeh(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)(
-    __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
-    __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_mergeh(vector signed short __a, vector signed short __b) {
-  return (vector signed short)(
-    __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_mergeh(vector bool short __a, vector bool short __b) {
-  return (vector bool short)(
-    __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_mergeh(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)(
-    __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_mergeh(vector signed int __a, vector signed int __b) {
-  return (vector signed int)(__a[0], __b[0], __a[1], __b[1]);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_mergeh(vector bool int __a, vector bool int __b) {
-  return (vector bool int)(__a[0], __b[0], __a[1], __b[1]);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_mergeh(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)(__a[0], __b[0], __a[1], __b[1]);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_mergeh(vector signed long long __a, vector signed long long __b) {
-  return (vector signed long long)(__a[0], __b[0]);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_mergeh(vector bool long long __a, vector bool long long __b) {
-  return (vector bool long long)(__a[0], __b[0]);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_mergeh(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)(__a[0], __b[0]);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_mergeh(vector float __a, vector float __b) {
-  return (vector float)(__a[0], __b[0], __a[1], __b[1]);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_mergeh(vector double __a, vector double __b) {
-  return (vector double)(__a[0], __b[0]);
-}
-
-/*-- vec_mergel -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_mergel(vector signed char __a, vector signed char __b) {
-  return (vector signed char)(
-    __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
-    __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_mergel(vector bool char __a, vector bool char __b) {
-  return (vector bool char)(
-    __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
-    __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_mergel(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)(
-    __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
-    __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_mergel(vector signed short __a, vector signed short __b) {
-  return (vector signed short)(
-    __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_mergel(vector bool short __a, vector bool short __b) {
-  return (vector bool short)(
-    __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_mergel(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)(
-    __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_mergel(vector signed int __a, vector signed int __b) {
-  return (vector signed int)(__a[2], __b[2], __a[3], __b[3]);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_mergel(vector bool int __a, vector bool int __b) {
-  return (vector bool int)(__a[2], __b[2], __a[3], __b[3]);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_mergel(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)(__a[2], __b[2], __a[3], __b[3]);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_mergel(vector signed long long __a, vector signed long long __b) {
-  return (vector signed long long)(__a[1], __b[1]);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_mergel(vector bool long long __a, vector bool long long __b) {
-  return (vector bool long long)(__a[1], __b[1]);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_mergel(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)(__a[1], __b[1]);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_mergel(vector float __a, vector float __b) {
-  return (vector float)(__a[2], __b[2], __a[3], __b[3]);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_mergel(vector double __a, vector double __b) {
-  return (vector double)(__a[1], __b[1]);
-}
-
-/*-- vec_pack ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_pack(vector signed short __a, vector signed short __b) {
-  vector signed char __ac = (vector signed char)__a;
-  vector signed char __bc = (vector signed char)__b;
-  return (vector signed char)(
-    __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
-    __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_pack(vector bool short __a, vector bool short __b) {
-  vector bool char __ac = (vector bool char)__a;
-  vector bool char __bc = (vector bool char)__b;
-  return (vector bool char)(
-    __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
-    __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_pack(vector unsigned short __a, vector unsigned short __b) {
-  vector unsigned char __ac = (vector unsigned char)__a;
-  vector unsigned char __bc = (vector unsigned char)__b;
-  return (vector unsigned char)(
-    __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
-    __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_pack(vector signed int __a, vector signed int __b) {
-  vector signed short __ac = (vector signed short)__a;
-  vector signed short __bc = (vector signed short)__b;
-  return (vector signed short)(
-    __ac[1], __ac[3], __ac[5], __ac[7],
-    __bc[1], __bc[3], __bc[5], __bc[7]);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_pack(vector bool int __a, vector bool int __b) {
-  vector bool short __ac = (vector bool short)__a;
-  vector bool short __bc = (vector bool short)__b;
-  return (vector bool short)(
-    __ac[1], __ac[3], __ac[5], __ac[7],
-    __bc[1], __bc[3], __bc[5], __bc[7]);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_pack(vector unsigned int __a, vector unsigned int __b) {
-  vector unsigned short __ac = (vector unsigned short)__a;
-  vector unsigned short __bc = (vector unsigned short)__b;
-  return (vector unsigned short)(
-    __ac[1], __ac[3], __ac[5], __ac[7],
-    __bc[1], __bc[3], __bc[5], __bc[7]);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_pack(vector signed long long __a, vector signed long long __b) {
-  vector signed int __ac = (vector signed int)__a;
-  vector signed int __bc = (vector signed int)__b;
-  return (vector signed int)(__ac[1], __ac[3], __bc[1], __bc[3]);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_pack(vector bool long long __a, vector bool long long __b) {
-  vector bool int __ac = (vector bool int)__a;
-  vector bool int __bc = (vector bool int)__b;
-  return (vector bool int)(__ac[1], __ac[3], __bc[1], __bc[3]);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_pack(vector unsigned long long __a, vector unsigned long long __b) {
-  vector unsigned int __ac = (vector unsigned int)__a;
-  vector unsigned int __bc = (vector unsigned int)__b;
-  return (vector unsigned int)(__ac[1], __ac[3], __bc[1], __bc[3]);
-}
-
-/*-- vec_packs --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_packs(vector signed short __a, vector signed short __b) {
-  return __builtin_s390_vpksh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_packs(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vpklsh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_packs(vector signed int __a, vector signed int __b) {
-  return __builtin_s390_vpksf(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_packs(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vpklsf(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_packs(vector signed long long __a, vector signed long long __b) {
-  return __builtin_s390_vpksg(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_packs(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_s390_vpklsg(__a, __b);
-}
-
-/*-- vec_packs_cc -----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_packs_cc(vector signed short __a, vector signed short __b, int *__cc) {
-  return __builtin_s390_vpkshs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_packs_cc(vector unsigned short __a, vector unsigned short __b, int *__cc) {
-  return __builtin_s390_vpklshs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_packs_cc(vector signed int __a, vector signed int __b, int *__cc) {
-  return __builtin_s390_vpksfs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_packs_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
-  return __builtin_s390_vpklsfs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_packs_cc(vector signed long long __a, vector signed long long __b,
-             int *__cc) {
-  return __builtin_s390_vpksgs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_packs_cc(vector unsigned long long __a, vector unsigned long long __b,
-             int *__cc) {
-  return __builtin_s390_vpklsgs(__a, __b, __cc);
-}
-
-/*-- vec_packsu -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_packsu(vector signed short __a, vector signed short __b) {
-  const vector signed short __zero = (vector signed short)0;
-  return __builtin_s390_vpklsh(
-    (vector unsigned short)(__a >= __zero) & (vector unsigned short)__a,
-    (vector unsigned short)(__b >= __zero) & (vector unsigned short)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_packsu(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vpklsh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_packsu(vector signed int __a, vector signed int __b) {
-  const vector signed int __zero = (vector signed int)0;
-  return __builtin_s390_vpklsf(
-    (vector unsigned int)(__a >= __zero) & (vector unsigned int)__a,
-    (vector unsigned int)(__b >= __zero) & (vector unsigned int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_packsu(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vpklsf(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_packsu(vector signed long long __a, vector signed long long __b) {
-  const vector signed long long __zero = (vector signed long long)0;
-  return __builtin_s390_vpklsg(
-    (vector unsigned long long)(__a >= __zero) &
-    (vector unsigned long long)__a,
-    (vector unsigned long long)(__b >= __zero) &
-    (vector unsigned long long)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_packsu(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_s390_vpklsg(__a, __b);
-}
-
-/*-- vec_packsu_cc ----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_packsu_cc(vector unsigned short __a, vector unsigned short __b, int *__cc) {
-  return __builtin_s390_vpklshs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_packsu_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
-  return __builtin_s390_vpklsfs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_packsu_cc(vector unsigned long long __a, vector unsigned long long __b,
-              int *__cc) {
-  return __builtin_s390_vpklsgs(__a, __b, __cc);
-}
-
-/*-- vec_unpackh ------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed short
-vec_unpackh(vector signed char __a) {
-  return __builtin_s390_vuphb(__a);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_unpackh(vector bool char __a) {
-  return (vector bool short)__builtin_s390_vuphb((vector signed char)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_unpackh(vector unsigned char __a) {
-  return __builtin_s390_vuplhb(__a);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_unpackh(vector signed short __a) {
-  return __builtin_s390_vuphh(__a);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_unpackh(vector bool short __a) {
-  return (vector bool int)__builtin_s390_vuphh((vector signed short)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_unpackh(vector unsigned short __a) {
-  return __builtin_s390_vuplhh(__a);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_unpackh(vector signed int __a) {
-  return __builtin_s390_vuphf(__a);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_unpackh(vector bool int __a) {
-  return (vector bool long long)__builtin_s390_vuphf((vector signed int)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_unpackh(vector unsigned int __a) {
-  return __builtin_s390_vuplhf(__a);
-}
-
-/*-- vec_unpackl ------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed short
-vec_unpackl(vector signed char __a) {
-  return __builtin_s390_vuplb(__a);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_unpackl(vector bool char __a) {
-  return (vector bool short)__builtin_s390_vuplb((vector signed char)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_unpackl(vector unsigned char __a) {
-  return __builtin_s390_vupllb(__a);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_unpackl(vector signed short __a) {
-  return __builtin_s390_vuplhw(__a);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_unpackl(vector bool short __a) {
-  return (vector bool int)__builtin_s390_vuplhw((vector signed short)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_unpackl(vector unsigned short __a) {
-  return __builtin_s390_vupllh(__a);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_unpackl(vector signed int __a) {
-  return __builtin_s390_vuplf(__a);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_unpackl(vector bool int __a) {
-  return (vector bool long long)__builtin_s390_vuplf((vector signed int)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_unpackl(vector unsigned int __a) {
-  return __builtin_s390_vupllf(__a);
-}
-
-/*-- vec_cmpeq --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmpeq(vector bool char __a, vector bool char __b) {
-  return (vector bool char)(__a == __b);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmpeq(vector signed char __a, vector signed char __b) {
-  return (vector bool char)(__a == __b);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmpeq(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)(__a == __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmpeq(vector bool short __a, vector bool short __b) {
-  return (vector bool short)(__a == __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmpeq(vector signed short __a, vector signed short __b) {
-  return (vector bool short)(__a == __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmpeq(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)(__a == __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmpeq(vector bool int __a, vector bool int __b) {
-  return (vector bool int)(__a == __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmpeq(vector signed int __a, vector signed int __b) {
-  return (vector bool int)(__a == __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmpeq(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)(__a == __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpeq(vector bool long long __a, vector bool long long __b) {
-  return (vector bool long long)(__a == __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpeq(vector signed long long __a, vector signed long long __b) {
-  return (vector bool long long)(__a == __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector bool long long)(__a == __b);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool int
-vec_cmpeq(vector float __a, vector float __b) {
-  return (vector bool int)(__a == __b);
-}
-#endif
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpeq(vector double __a, vector double __b) {
-  return (vector bool long long)(__a == __b);
-}
-
-/*-- vec_cmpge --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmpge(vector signed char __a, vector signed char __b) {
-  return (vector bool char)(__a >= __b);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmpge(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)(__a >= __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmpge(vector signed short __a, vector signed short __b) {
-  return (vector bool short)(__a >= __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmpge(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)(__a >= __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmpge(vector signed int __a, vector signed int __b) {
-  return (vector bool int)(__a >= __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmpge(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)(__a >= __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpge(vector signed long long __a, vector signed long long __b) {
-  return (vector bool long long)(__a >= __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector bool long long)(__a >= __b);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool int
-vec_cmpge(vector float __a, vector float __b) {
-  return (vector bool int)(__a >= __b);
-}
-#endif
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpge(vector double __a, vector double __b) {
-  return (vector bool long long)(__a >= __b);
-}
-
-/*-- vec_cmpgt --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmpgt(vector signed char __a, vector signed char __b) {
-  return (vector bool char)(__a > __b);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmpgt(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)(__a > __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmpgt(vector signed short __a, vector signed short __b) {
-  return (vector bool short)(__a > __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmpgt(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)(__a > __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmpgt(vector signed int __a, vector signed int __b) {
-  return (vector bool int)(__a > __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmpgt(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)(__a > __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpgt(vector signed long long __a, vector signed long long __b) {
-  return (vector bool long long)(__a > __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector bool long long)(__a > __b);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool int
-vec_cmpgt(vector float __a, vector float __b) {
-  return (vector bool int)(__a > __b);
-}
-#endif
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmpgt(vector double __a, vector double __b) {
-  return (vector bool long long)(__a > __b);
-}
-
-/*-- vec_cmple --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmple(vector signed char __a, vector signed char __b) {
-  return (vector bool char)(__a <= __b);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmple(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)(__a <= __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmple(vector signed short __a, vector signed short __b) {
-  return (vector bool short)(__a <= __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmple(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)(__a <= __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmple(vector signed int __a, vector signed int __b) {
-  return (vector bool int)(__a <= __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmple(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)(__a <= __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmple(vector signed long long __a, vector signed long long __b) {
-  return (vector bool long long)(__a <= __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmple(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector bool long long)(__a <= __b);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool int
-vec_cmple(vector float __a, vector float __b) {
-  return (vector bool int)(__a <= __b);
-}
-#endif
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmple(vector double __a, vector double __b) {
-  return (vector bool long long)(__a <= __b);
-}
-
-/*-- vec_cmplt --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmplt(vector signed char __a, vector signed char __b) {
-  return (vector bool char)(__a < __b);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmplt(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)(__a < __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmplt(vector signed short __a, vector signed short __b) {
-  return (vector bool short)(__a < __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmplt(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)(__a < __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmplt(vector signed int __a, vector signed int __b) {
-  return (vector bool int)(__a < __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmplt(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)(__a < __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmplt(vector signed long long __a, vector signed long long __b) {
-  return (vector bool long long)(__a < __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector bool long long)(__a < __b);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool int
-vec_cmplt(vector float __a, vector float __b) {
-  return (vector bool int)(__a < __b);
-}
-#endif
-
-static inline __ATTRS_o_ai vector bool long long
-vec_cmplt(vector double __a, vector double __b) {
-  return (vector bool long long)(__a < __b);
-}
-
-/*-- vec_all_eq -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector signed char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vceqbs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector signed char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector bool char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector bool char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector bool char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector signed short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vceqhs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector signed short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector bool short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector bool short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector bool short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector signed int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vceqfs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector signed int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector bool int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector bool int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector bool int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector signed long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector signed long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector bool long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector unsigned long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_eq(vector bool long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector bool long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc == 0;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_all_eq(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfcesbs(__a, __b, &__cc);
-  return __cc == 0;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_all_eq(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfcedbs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-/*-- vec_all_ne -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector signed char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vceqbs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector signed char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector bool char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector bool char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector bool char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector signed short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vceqhs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector signed short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector bool short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector bool short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector bool short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector signed int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vceqfs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector signed int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector bool int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector bool int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector bool int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector signed long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector signed long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector bool long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector unsigned long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ne(vector bool long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector bool long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc == 3;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_all_ne(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfcesbs(__a, __b, &__cc);
-  return __cc == 3;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_all_ne(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfcedbs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-/*-- vec_all_ge -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_all_ge(vector signed char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector signed char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector bool char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector bool char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector bool char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__b,
-                        (vector unsigned char)__a, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ge(vector signed short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector signed short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector bool short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector bool short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector bool short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__b,
-                        (vector unsigned short)__a, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ge(vector signed int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector signed int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector bool int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector bool int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector bool int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__b,
-                        (vector unsigned int)__a, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ge(vector signed long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector signed long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector bool long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector unsigned long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector bool long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_ge(vector bool long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__b,
-                        (vector unsigned long long)__a, &__cc);
-  return __cc == 3;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_all_ge(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchesbs(__a, __b, &__cc);
-  return __cc == 0;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_all_ge(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchedbs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-/*-- vec_all_gt -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_all_gt(vector signed char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector signed char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector bool char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector bool char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector bool char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__a,
-                        (vector unsigned char)__b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_gt(vector signed short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector signed short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector bool short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector bool short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector bool short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__a,
-                        (vector unsigned short)__b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_gt(vector signed int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector signed int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector bool int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector bool int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector bool int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__a,
-                        (vector unsigned int)__b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_gt(vector signed long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector signed long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector bool long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector unsigned long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector bool long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_gt(vector bool long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__a,
-                        (vector unsigned long long)__b, &__cc);
-  return __cc == 0;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_all_gt(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchsbs(__a, __b, &__cc);
-  return __cc == 0;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_all_gt(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchdbs(__a, __b, &__cc);
-  return __cc == 0;
-}
-
-/*-- vec_all_le -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_all_le(vector signed char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector signed char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector bool char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector bool char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector bool char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__a,
-                        (vector unsigned char)__b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_le(vector signed short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector signed short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector bool short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector bool short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector bool short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__a,
-                        (vector unsigned short)__b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_le(vector signed int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector signed int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector bool int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector bool int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector bool int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__a,
-                        (vector unsigned int)__b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_le(vector signed long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector signed long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector bool long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
-  return __cc == 3;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector unsigned long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector bool long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
-  return __cc == 3;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_le(vector bool long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__a,
-                        (vector unsigned long long)__b, &__cc);
-  return __cc == 3;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_all_le(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchesbs(__b, __a, &__cc);
-  return __cc == 0;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_all_le(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchedbs(__b, __a, &__cc);
-  return __cc == 0;
-}
-
-/*-- vec_all_lt -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_all_lt(vector signed char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector signed char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector bool char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector bool char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector bool char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__b,
-                        (vector unsigned char)__a, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_lt(vector signed short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector signed short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector bool short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector bool short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector bool short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__b,
-                        (vector unsigned short)__a, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_lt(vector signed int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector signed int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector bool int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector bool int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector bool int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__b,
-                        (vector unsigned int)__a, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_lt(vector signed long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector signed long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector bool long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
-  return __cc == 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector unsigned long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector bool long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
-  return __cc == 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_all_lt(vector bool long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__b,
-                        (vector unsigned long long)__a, &__cc);
-  return __cc == 0;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_all_lt(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchsbs(__b, __a, &__cc);
-  return __cc == 0;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_all_lt(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchdbs(__b, __a, &__cc);
-  return __cc == 0;
-}
-
-/*-- vec_all_nge ------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_all_nge(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchesbs(__a, __b, &__cc);
-  return __cc == 3;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_all_nge(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchedbs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-/*-- vec_all_ngt ------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_all_ngt(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchsbs(__a, __b, &__cc);
-  return __cc == 3;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_all_ngt(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchdbs(__a, __b, &__cc);
-  return __cc == 3;
-}
-
-/*-- vec_all_nle ------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_all_nle(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchesbs(__b, __a, &__cc);
-  return __cc == 3;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_all_nle(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchedbs(__b, __a, &__cc);
-  return __cc == 3;
-}
-
-/*-- vec_all_nlt ------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_all_nlt(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchsbs(__b, __a, &__cc);
-  return __cc == 3;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_all_nlt(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchdbs(__b, __a, &__cc);
-  return __cc == 3;
-}
-
-/*-- vec_all_nan ------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_all_nan(vector float __a) {
-  int __cc;
-  __builtin_s390_vftcisb(__a, 15, &__cc);
-  return __cc == 0;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_all_nan(vector double __a) {
-  int __cc;
-  __builtin_s390_vftcidb(__a, 15, &__cc);
-  return __cc == 0;
-}
-
-/*-- vec_all_numeric --------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_all_numeric(vector float __a) {
-  int __cc;
-  __builtin_s390_vftcisb(__a, 15, &__cc);
-  return __cc == 3;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_all_numeric(vector double __a) {
-  int __cc;
-  __builtin_s390_vftcidb(__a, 15, &__cc);
-  return __cc == 3;
-}
-
-/*-- vec_any_eq -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector signed char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vceqbs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector signed char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector bool char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector bool char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector bool char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector signed short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vceqhs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector signed short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector bool short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector bool short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector bool short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector signed int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vceqfs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector signed int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector bool int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector bool int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector bool int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector signed long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector signed long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector bool long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector unsigned long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_eq(vector bool long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector bool long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc <= 1;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_any_eq(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfcesbs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_any_eq(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfcedbs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-/*-- vec_any_ne -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector signed char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vceqbs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector signed char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector bool char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector bool char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector bool char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vceqbs((vector signed char)__a,
-                        (vector signed char)__b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector signed short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vceqhs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector signed short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector bool short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector bool short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector bool short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vceqhs((vector signed short)__a,
-                        (vector signed short)__b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector signed int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vceqfs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector signed int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector bool int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector bool int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector bool int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vceqfs((vector signed int)__a,
-                        (vector signed int)__b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector signed long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector signed long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector bool long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector unsigned long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ne(vector bool long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector bool long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vceqgs((vector signed long long)__a,
-                        (vector signed long long)__b, &__cc);
-  return __cc != 0;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_any_ne(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfcesbs(__a, __b, &__cc);
-  return __cc != 0;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_any_ne(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfcedbs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-/*-- vec_any_ge -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_any_ge(vector signed char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector signed char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector bool char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector bool char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector bool char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__b,
-                        (vector unsigned char)__a, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ge(vector signed short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector signed short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector bool short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector bool short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector bool short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__b,
-                        (vector unsigned short)__a, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ge(vector signed int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector signed int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector bool int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector bool int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector bool int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__b,
-                        (vector unsigned int)__a, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ge(vector signed long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector signed long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector bool long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector unsigned long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector bool long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_ge(vector bool long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__b,
-                        (vector unsigned long long)__a, &__cc);
-  return __cc != 0;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_any_ge(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchesbs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_any_ge(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchedbs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-/*-- vec_any_gt -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_any_gt(vector signed char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector signed char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector bool char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector bool char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector bool char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__a,
-                        (vector unsigned char)__b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_gt(vector signed short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector signed short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector bool short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector bool short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector bool short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__a,
-                        (vector unsigned short)__b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_gt(vector signed int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector signed int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector bool int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector bool int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector bool int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__a,
-                        (vector unsigned int)__b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_gt(vector signed long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector signed long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector bool long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector unsigned long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector bool long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_gt(vector bool long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__a,
-                        (vector unsigned long long)__b, &__cc);
-  return __cc <= 1;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_any_gt(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchsbs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_any_gt(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchdbs(__a, __b, &__cc);
-  return __cc <= 1;
-}
-
-/*-- vec_any_le -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_any_le(vector signed char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector signed char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector bool char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs((vector signed char)__a, __b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector bool char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector bool char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__a,
-                        (vector unsigned char)__b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_le(vector signed short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector signed short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector bool short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs((vector signed short)__a, __b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector bool short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector bool short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__a,
-                        (vector unsigned short)__b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_le(vector signed int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector signed int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector bool int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs((vector signed int)__a, __b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector bool int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector bool int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__a,
-                        (vector unsigned int)__b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_le(vector signed long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector signed long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector bool long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc);
-  return __cc != 0;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector unsigned long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector bool long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc);
-  return __cc != 0;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_le(vector bool long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__a,
-                        (vector unsigned long long)__b, &__cc);
-  return __cc != 0;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_any_le(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchesbs(__b, __a, &__cc);
-  return __cc <= 1;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_any_le(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchedbs(__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-/*-- vec_any_lt -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_any_lt(vector signed char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector signed char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchbs((vector signed char)__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector bool char __a, vector signed char __b) {
-  int __cc;
-  __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector bool char __a, vector unsigned char __b) {
-  int __cc;
-  __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector bool char __a, vector bool char __b) {
-  int __cc;
-  __builtin_s390_vchlbs((vector unsigned char)__b,
-                        (vector unsigned char)__a, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_lt(vector signed short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector signed short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchhs((vector signed short)__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector bool short __a, vector signed short __b) {
-  int __cc;
-  __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector bool short __a, vector unsigned short __b) {
-  int __cc;
-  __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector bool short __a, vector bool short __b) {
-  int __cc;
-  __builtin_s390_vchlhs((vector unsigned short)__b,
-                        (vector unsigned short)__a, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_lt(vector signed int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector signed int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchfs((vector signed int)__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector bool int __a, vector signed int __b) {
-  int __cc;
-  __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector bool int __a, vector unsigned int __b) {
-  int __cc;
-  __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector bool int __a, vector bool int __b) {
-  int __cc;
-  __builtin_s390_vchlfs((vector unsigned int)__b,
-                        (vector unsigned int)__a, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_lt(vector signed long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector signed long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector bool long long __a, vector signed long long __b) {
-  int __cc;
-  __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc);
-  return __cc <= 1;
-}
-
-static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector unsigned long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector bool long long __a, vector unsigned long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc);
-  return __cc <= 1;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai int
-vec_any_lt(vector bool long long __a, vector bool long long __b) {
-  int __cc;
-  __builtin_s390_vchlgs((vector unsigned long long)__b,
-                        (vector unsigned long long)__a, &__cc);
-  return __cc <= 1;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_any_lt(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchsbs(__b, __a, &__cc);
-  return __cc <= 1;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_any_lt(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchdbs(__b, __a, &__cc);
-  return __cc <= 1;
-}
-
-/*-- vec_any_nge ------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_any_nge(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchesbs(__a, __b, &__cc);
-  return __cc != 0;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_any_nge(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchedbs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-/*-- vec_any_ngt ------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_any_ngt(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchsbs(__a, __b, &__cc);
-  return __cc != 0;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_any_ngt(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchdbs(__a, __b, &__cc);
-  return __cc != 0;
-}
-
-/*-- vec_any_nle ------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_any_nle(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchesbs(__b, __a, &__cc);
-  return __cc != 0;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_any_nle(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchedbs(__b, __a, &__cc);
-  return __cc != 0;
-}
-
-/*-- vec_any_nlt ------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_any_nlt(vector float __a, vector float __b) {
-  int __cc;
-  __builtin_s390_vfchsbs(__b, __a, &__cc);
-  return __cc != 0;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_any_nlt(vector double __a, vector double __b) {
-  int __cc;
-  __builtin_s390_vfchdbs(__b, __a, &__cc);
-  return __cc != 0;
-}
-
-/*-- vec_any_nan ------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_any_nan(vector float __a) {
-  int __cc;
-  __builtin_s390_vftcisb(__a, 15, &__cc);
-  return __cc != 3;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_any_nan(vector double __a) {
-  int __cc;
-  __builtin_s390_vftcidb(__a, 15, &__cc);
-  return __cc != 3;
-}
-
-/*-- vec_any_numeric --------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_any_numeric(vector float __a) {
-  int __cc;
-  __builtin_s390_vftcisb(__a, 15, &__cc);
-  return __cc != 0;
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_any_numeric(vector double __a) {
-  int __cc;
-  __builtin_s390_vftcidb(__a, 15, &__cc);
-  return __cc != 0;
-}
-
-/*-- vec_andc ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_andc(vector bool char __a, vector bool char __b) {
-  return __a & ~__b;
-}
-
-static inline __ATTRS_o_ai vector signed char
-vec_andc(vector signed char __a, vector signed char __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_andc(vector bool char __a, vector signed char __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_andc(vector signed char __a, vector bool char __b) {
-  return __a & ~__b;
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_andc(vector unsigned char __a, vector unsigned char __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_andc(vector bool char __a, vector unsigned char __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_andc(vector unsigned char __a, vector bool char __b) {
-  return __a & ~__b;
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_andc(vector bool short __a, vector bool short __b) {
-  return __a & ~__b;
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_andc(vector signed short __a, vector signed short __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_andc(vector bool short __a, vector signed short __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_andc(vector signed short __a, vector bool short __b) {
-  return __a & ~__b;
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_andc(vector unsigned short __a, vector unsigned short __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_andc(vector bool short __a, vector unsigned short __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_andc(vector unsigned short __a, vector bool short __b) {
-  return __a & ~__b;
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_andc(vector bool int __a, vector bool int __b) {
-  return __a & ~__b;
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_andc(vector signed int __a, vector signed int __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_andc(vector bool int __a, vector signed int __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_andc(vector signed int __a, vector bool int __b) {
-  return __a & ~__b;
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_andc(vector unsigned int __a, vector unsigned int __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_andc(vector bool int __a, vector unsigned int __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_andc(vector unsigned int __a, vector bool int __b) {
-  return __a & ~__b;
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_andc(vector bool long long __a, vector bool long long __b) {
-  return __a & ~__b;
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_andc(vector signed long long __a, vector signed long long __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_andc(vector bool long long __a, vector signed long long __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_andc(vector signed long long __a, vector bool long long __b) {
-  return __a & ~__b;
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_andc(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_andc(vector bool long long __a, vector unsigned long long __b) {
-  return __a & ~__b;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_andc(vector unsigned long long __a, vector bool long long __b) {
-  return __a & ~__b;
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_andc(vector float __a, vector float __b) {
-  return (vector float)((vector unsigned int)__a &
-                         ~(vector unsigned int)__b);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_andc(vector double __a, vector double __b) {
-  return (vector double)((vector unsigned long long)__a &
-                         ~(vector unsigned long long)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_andc(vector bool long long __a, vector double __b) {
-  return (vector double)((vector unsigned long long)__a &
-                         ~(vector unsigned long long)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_andc(vector double __a, vector bool long long __b) {
-  return (vector double)((vector unsigned long long)__a &
-                         ~(vector unsigned long long)__b);
-}
-
-/*-- vec_nor ----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_nor(vector bool char __a, vector bool char __b) {
-  return ~(__a | __b);
-}
-
-static inline __ATTRS_o_ai vector signed char
-vec_nor(vector signed char __a, vector signed char __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_nor(vector bool char __a, vector signed char __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_nor(vector signed char __a, vector bool char __b) {
-  return ~(__a | __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_nor(vector unsigned char __a, vector unsigned char __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_nor(vector bool char __a, vector unsigned char __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_nor(vector unsigned char __a, vector bool char __b) {
-  return ~(__a | __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_nor(vector bool short __a, vector bool short __b) {
-  return ~(__a | __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_nor(vector signed short __a, vector signed short __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_nor(vector bool short __a, vector signed short __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_nor(vector signed short __a, vector bool short __b) {
-  return ~(__a | __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_nor(vector unsigned short __a, vector unsigned short __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_nor(vector bool short __a, vector unsigned short __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_nor(vector unsigned short __a, vector bool short __b) {
-  return ~(__a | __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_nor(vector bool int __a, vector bool int __b) {
-  return ~(__a | __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_nor(vector signed int __a, vector signed int __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_nor(vector bool int __a, vector signed int __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_nor(vector signed int __a, vector bool int __b) {
-  return ~(__a | __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_nor(vector unsigned int __a, vector unsigned int __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_nor(vector bool int __a, vector unsigned int __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_nor(vector unsigned int __a, vector bool int __b) {
-  return ~(__a | __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_nor(vector bool long long __a, vector bool long long __b) {
-  return ~(__a | __b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_nor(vector signed long long __a, vector signed long long __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_nor(vector bool long long __a, vector signed long long __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_nor(vector signed long long __a, vector bool long long __b) {
-  return ~(__a | __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_nor(vector unsigned long long __a, vector unsigned long long __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_nor(vector bool long long __a, vector unsigned long long __b) {
-  return ~(__a | __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_nor(vector unsigned long long __a, vector bool long long __b) {
-  return ~(__a | __b);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_nor(vector float __a, vector float __b) {
-  return (vector float)~((vector unsigned int)__a |
-                         (vector unsigned int)__b);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_nor(vector double __a, vector double __b) {
-  return (vector double)~((vector unsigned long long)__a |
-                          (vector unsigned long long)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_nor(vector bool long long __a, vector double __b) {
-  return (vector double)~((vector unsigned long long)__a |
-                          (vector unsigned long long)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_nor(vector double __a, vector bool long long __b) {
-  return (vector double)~((vector unsigned long long)__a |
-                          (vector unsigned long long)__b);
-}
-
-/*-- vec_orc ----------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool char
-vec_orc(vector bool char __a, vector bool char __b) {
-  return __a | ~__b;
-}
-
-static inline __ATTRS_o_ai vector signed char
-vec_orc(vector signed char __a, vector signed char __b) {
-  return __a | ~__b;
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_orc(vector unsigned char __a, vector unsigned char __b) {
-  return __a | ~__b;
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_orc(vector bool short __a, vector bool short __b) {
-  return __a | ~__b;
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_orc(vector signed short __a, vector signed short __b) {
-  return __a | ~__b;
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_orc(vector unsigned short __a, vector unsigned short __b) {
-  return __a | ~__b;
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_orc(vector bool int __a, vector bool int __b) {
-  return __a | ~__b;
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_orc(vector signed int __a, vector signed int __b) {
-  return __a | ~__b;
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_orc(vector unsigned int __a, vector unsigned int __b) {
-  return __a | ~__b;
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_orc(vector bool long long __a, vector bool long long __b) {
-  return __a | ~__b;
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_orc(vector signed long long __a, vector signed long long __b) {
-  return __a | ~__b;
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_orc(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a | ~__b;
-}
-
-static inline __ATTRS_o_ai vector float
-vec_orc(vector float __a, vector float __b) {
-  return (vector float)((vector unsigned int)__a |
-                        ~(vector unsigned int)__b);
-}
-
-static inline __ATTRS_o_ai vector double
-vec_orc(vector double __a, vector double __b) {
-  return (vector double)((vector unsigned long long)__a |
-                         ~(vector unsigned long long)__b);
-}
-#endif
-
-/*-- vec_nand ---------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool char
-vec_nand(vector bool char __a, vector bool char __b) {
-  return ~(__a & __b);
-}
-
-static inline __ATTRS_o_ai vector signed char
-vec_nand(vector signed char __a, vector signed char __b) {
-  return ~(__a & __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_nand(vector unsigned char __a, vector unsigned char __b) {
-  return ~(__a & __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_nand(vector bool short __a, vector bool short __b) {
-  return ~(__a & __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_nand(vector signed short __a, vector signed short __b) {
-  return ~(__a & __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_nand(vector unsigned short __a, vector unsigned short __b) {
-  return ~(__a & __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_nand(vector bool int __a, vector bool int __b) {
-  return ~(__a & __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_nand(vector signed int __a, vector signed int __b) {
-  return ~(__a & __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_nand(vector unsigned int __a, vector unsigned int __b) {
-  return ~(__a & __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_nand(vector bool long long __a, vector bool long long __b) {
-  return ~(__a & __b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_nand(vector signed long long __a, vector signed long long __b) {
-  return ~(__a & __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_nand(vector unsigned long long __a, vector unsigned long long __b) {
-  return ~(__a & __b);
-}
-
-static inline __ATTRS_o_ai vector float
-vec_nand(vector float __a, vector float __b) {
-  return (vector float)~((vector unsigned int)__a &
-                         (vector unsigned int)__b);
-}
-
-static inline __ATTRS_o_ai vector double
-vec_nand(vector double __a, vector double __b) {
-  return (vector double)~((vector unsigned long long)__a &
-                          (vector unsigned long long)__b);
-}
-#endif
-
-/*-- vec_eqv ----------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector bool char
-vec_eqv(vector bool char __a, vector bool char __b) {
-  return ~(__a ^ __b);
-}
-
-static inline __ATTRS_o_ai vector signed char
-vec_eqv(vector signed char __a, vector signed char __b) {
-  return ~(__a ^ __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_eqv(vector unsigned char __a, vector unsigned char __b) {
-  return ~(__a ^ __b);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_eqv(vector bool short __a, vector bool short __b) {
-  return ~(__a ^ __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_eqv(vector signed short __a, vector signed short __b) {
-  return ~(__a ^ __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_eqv(vector unsigned short __a, vector unsigned short __b) {
-  return ~(__a ^ __b);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_eqv(vector bool int __a, vector bool int __b) {
-  return ~(__a ^ __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_eqv(vector signed int __a, vector signed int __b) {
-  return ~(__a ^ __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_eqv(vector unsigned int __a, vector unsigned int __b) {
-  return ~(__a ^ __b);
-}
-
-static inline __ATTRS_o_ai vector bool long long
-vec_eqv(vector bool long long __a, vector bool long long __b) {
-  return ~(__a ^ __b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_eqv(vector signed long long __a, vector signed long long __b) {
-  return ~(__a ^ __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_eqv(vector unsigned long long __a, vector unsigned long long __b) {
-  return ~(__a ^ __b);
-}
-
-static inline __ATTRS_o_ai vector float
-vec_eqv(vector float __a, vector float __b) {
-  return (vector float)~((vector unsigned int)__a ^
-                         (vector unsigned int)__b);
-}
-
-static inline __ATTRS_o_ai vector double
-vec_eqv(vector double __a, vector double __b) {
-  return (vector double)~((vector unsigned long long)__a ^
-                          (vector unsigned long long)__b);
-}
-#endif
-
-/*-- vec_cntlz --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cntlz(vector signed char __a) {
-  return __builtin_s390_vclzb((vector unsigned char)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cntlz(vector unsigned char __a) {
-  return __builtin_s390_vclzb(__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cntlz(vector signed short __a) {
-  return __builtin_s390_vclzh((vector unsigned short)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cntlz(vector unsigned short __a) {
-  return __builtin_s390_vclzh(__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cntlz(vector signed int __a) {
-  return __builtin_s390_vclzf((vector unsigned int)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cntlz(vector unsigned int __a) {
-  return __builtin_s390_vclzf(__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_cntlz(vector signed long long __a) {
-  return __builtin_s390_vclzg((vector unsigned long long)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_cntlz(vector unsigned long long __a) {
-  return __builtin_s390_vclzg(__a);
-}
-
-/*-- vec_cnttz --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cnttz(vector signed char __a) {
-  return __builtin_s390_vctzb((vector unsigned char)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cnttz(vector unsigned char __a) {
-  return __builtin_s390_vctzb(__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cnttz(vector signed short __a) {
-  return __builtin_s390_vctzh((vector unsigned short)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cnttz(vector unsigned short __a) {
-  return __builtin_s390_vctzh(__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cnttz(vector signed int __a) {
-  return __builtin_s390_vctzf((vector unsigned int)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cnttz(vector unsigned int __a) {
-  return __builtin_s390_vctzf(__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_cnttz(vector signed long long __a) {
-  return __builtin_s390_vctzg((vector unsigned long long)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_cnttz(vector unsigned long long __a) {
-  return __builtin_s390_vctzg(__a);
-}
-
-/*-- vec_popcnt -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_popcnt(vector signed char __a) {
-  return __builtin_s390_vpopctb((vector unsigned char)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_popcnt(vector unsigned char __a) {
-  return __builtin_s390_vpopctb(__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_popcnt(vector signed short __a) {
-  return __builtin_s390_vpopcth((vector unsigned short)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_popcnt(vector unsigned short __a) {
-  return __builtin_s390_vpopcth(__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_popcnt(vector signed int __a) {
-  return __builtin_s390_vpopctf((vector unsigned int)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_popcnt(vector unsigned int __a) {
-  return __builtin_s390_vpopctf(__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_popcnt(vector signed long long __a) {
-  return __builtin_s390_vpopctg((vector unsigned long long)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_popcnt(vector unsigned long long __a) {
-  return __builtin_s390_vpopctg(__a);
-}
-
-/*-- vec_rl -----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_rl(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_s390_verllvb(
-    (vector unsigned char)__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_rl(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_verllvb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_rl(vector signed short __a, vector unsigned short __b) {
-  return (vector signed short)__builtin_s390_verllvh(
-    (vector unsigned short)__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_rl(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_verllvh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_rl(vector signed int __a, vector unsigned int __b) {
-  return (vector signed int)__builtin_s390_verllvf(
-    (vector unsigned int)__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_rl(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_verllvf(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_rl(vector signed long long __a, vector unsigned long long __b) {
-  return (vector signed long long)__builtin_s390_verllvg(
-    (vector unsigned long long)__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_rl(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_s390_verllvg(__a, __b);
-}
-
-/*-- vec_rli ----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_rli(vector signed char __a, unsigned long __b) {
-  return (vector signed char)__builtin_s390_verllb(
-    (vector unsigned char)__a, (int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_rli(vector unsigned char __a, unsigned long __b) {
-  return __builtin_s390_verllb(__a, (int)__b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_rli(vector signed short __a, unsigned long __b) {
-  return (vector signed short)__builtin_s390_verllh(
-    (vector unsigned short)__a, (int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_rli(vector unsigned short __a, unsigned long __b) {
-  return __builtin_s390_verllh(__a, (int)__b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_rli(vector signed int __a, unsigned long __b) {
-  return (vector signed int)__builtin_s390_verllf(
-    (vector unsigned int)__a, (int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_rli(vector unsigned int __a, unsigned long __b) {
-  return __builtin_s390_verllf(__a, (int)__b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_rli(vector signed long long __a, unsigned long __b) {
-  return (vector signed long long)__builtin_s390_verllg(
-    (vector unsigned long long)__a, (int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_rli(vector unsigned long long __a, unsigned long __b) {
-  return __builtin_s390_verllg(__a, (int)__b);
-}
-
-/*-- vec_rl_mask ------------------------------------------------------------*/
-
-extern __ATTRS_o vector signed char
-vec_rl_mask(vector signed char __a, vector unsigned char __b,
-            unsigned char __c) __constant(__c);
-
-extern __ATTRS_o vector unsigned char
-vec_rl_mask(vector unsigned char __a, vector unsigned char __b,
-            unsigned char __c) __constant(__c);
-
-extern __ATTRS_o vector signed short
-vec_rl_mask(vector signed short __a, vector unsigned short __b,
-            unsigned char __c) __constant(__c);
-
-extern __ATTRS_o vector unsigned short
-vec_rl_mask(vector unsigned short __a, vector unsigned short __b,
-            unsigned char __c) __constant(__c);
-
-extern __ATTRS_o vector signed int
-vec_rl_mask(vector signed int __a, vector unsigned int __b,
-            unsigned char __c) __constant(__c);
-
-extern __ATTRS_o vector unsigned int
-vec_rl_mask(vector unsigned int __a, vector unsigned int __b,
-            unsigned char __c) __constant(__c);
-
-extern __ATTRS_o vector signed long long
-vec_rl_mask(vector signed long long __a, vector unsigned long long __b,
-            unsigned char __c) __constant(__c);
-
-extern __ATTRS_o vector unsigned long long
-vec_rl_mask(vector unsigned long long __a, vector unsigned long long __b,
-            unsigned char __c) __constant(__c);
-
-#define vec_rl_mask(X, Y, Z) ((__typeof__((vec_rl_mask)((X), (Y), (Z)))) \
-  __extension__ ({ \
-    vector unsigned char __res; \
-    vector unsigned char __x = (vector unsigned char)(X); \
-    vector unsigned char __y = (vector unsigned char)(Y); \
-    switch (sizeof ((X)[0])) { \
-    case 1: __res = (vector unsigned char) __builtin_s390_verimb( \
-             (vector unsigned char)__x, (vector unsigned char)__x, \
-             (vector unsigned char)__y, (Z)); break; \
-    case 2: __res = (vector unsigned char) __builtin_s390_verimh( \
-             (vector unsigned short)__x, (vector unsigned short)__x, \
-             (vector unsigned short)__y, (Z)); break; \
-    case 4: __res = (vector unsigned char) __builtin_s390_verimf( \
-             (vector unsigned int)__x, (vector unsigned int)__x, \
-             (vector unsigned int)__y, (Z)); break; \
-    default: __res = (vector unsigned char) __builtin_s390_verimg( \
-             (vector unsigned long long)__x, (vector unsigned long long)__x, \
-             (vector unsigned long long)__y, (Z)); break; \
-    } __res; }))
-
-/*-- vec_sll ----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_sll(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_s390_vsl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_sll(vector signed char __a, vector unsigned short __b) {
-  return (vector signed char)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_sll(vector signed char __a, vector unsigned int __b) {
-  return (vector signed char)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_sll(vector bool char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_s390_vsl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_sll(vector bool char __a, vector unsigned short __b) {
-  return (vector bool char)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_sll(vector bool char __a, vector unsigned int __b) {
-  return (vector bool char)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_sll(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vsl(__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_sll(vector unsigned char __a, vector unsigned short __b) {
-  return __builtin_s390_vsl(__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_sll(vector unsigned char __a, vector unsigned int __b) {
-  return __builtin_s390_vsl(__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_sll(vector signed short __a, vector unsigned char __b) {
-  return (vector signed short)__builtin_s390_vsl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_sll(vector signed short __a, vector unsigned short __b) {
-  return (vector signed short)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_sll(vector signed short __a, vector unsigned int __b) {
-  return (vector signed short)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_sll(vector bool short __a, vector unsigned char __b) {
-  return (vector bool short)__builtin_s390_vsl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_sll(vector bool short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_sll(vector bool short __a, vector unsigned int __b) {
-  return (vector bool short)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_sll(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_s390_vsl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_sll(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_sll(vector unsigned short __a, vector unsigned int __b) {
-  return (vector unsigned short)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_sll(vector signed int __a, vector unsigned char __b) {
-  return (vector signed int)__builtin_s390_vsl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_sll(vector signed int __a, vector unsigned short __b) {
-  return (vector signed int)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_sll(vector signed int __a, vector unsigned int __b) {
-  return (vector signed int)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_sll(vector bool int __a, vector unsigned char __b) {
-  return (vector bool int)__builtin_s390_vsl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_sll(vector bool int __a, vector unsigned short __b) {
-  return (vector bool int)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_sll(vector bool int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_sll(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_s390_vsl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_sll(vector unsigned int __a, vector unsigned short __b) {
-  return (vector unsigned int)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_sll(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_sll(vector signed long long __a, vector unsigned char __b) {
-  return (vector signed long long)__builtin_s390_vsl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_sll(vector signed long long __a, vector unsigned short __b) {
-  return (vector signed long long)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_sll(vector signed long long __a, vector unsigned int __b) {
-  return (vector signed long long)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_sll(vector bool long long __a, vector unsigned char __b) {
-  return (vector bool long long)__builtin_s390_vsl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_sll(vector bool long long __a, vector unsigned short __b) {
-  return (vector bool long long)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_sll(vector bool long long __a, vector unsigned int __b) {
-  return (vector bool long long)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sll(vector unsigned long long __a, vector unsigned char __b) {
-  return (vector unsigned long long)__builtin_s390_vsl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sll(vector unsigned long long __a, vector unsigned short __b) {
-  return (vector unsigned long long)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sll(vector unsigned long long __a, vector unsigned int __b) {
-  return (vector unsigned long long)__builtin_s390_vsl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-/*-- vec_slb ----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_slb(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed char
-vec_slb(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_s390_vslb(
-    (vector unsigned char)__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_slb(vector unsigned char __a, vector signed char __b) {
-  return __builtin_s390_vslb(__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_slb(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vslb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_slb(vector signed short __a, vector signed short __b) {
-  return (vector signed short)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_slb(vector signed short __a, vector unsigned short __b) {
-  return (vector signed short)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_slb(vector unsigned short __a, vector signed short __b) {
-  return (vector unsigned short)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_slb(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_slb(vector signed int __a, vector signed int __b) {
-  return (vector signed int)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_slb(vector signed int __a, vector unsigned int __b) {
-  return (vector signed int)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_slb(vector unsigned int __a, vector signed int __b) {
-  return (vector unsigned int)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_slb(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_slb(vector signed long long __a, vector signed long long __b) {
-  return (vector signed long long)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_slb(vector signed long long __a, vector unsigned long long __b) {
-  return (vector signed long long)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_slb(vector unsigned long long __a, vector signed long long __b) {
-  return (vector unsigned long long)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_slb(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_slb(vector float __a, vector signed int __b) {
-  return (vector float)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector float
-vec_slb(vector float __a, vector unsigned int __b) {
-  return (vector float)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_slb(vector double __a, vector signed long long __b) {
-  return (vector double)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector double
-vec_slb(vector double __a, vector unsigned long long __b) {
-  return (vector double)__builtin_s390_vslb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-/*-- vec_sld ----------------------------------------------------------------*/
-
-extern __ATTRS_o vector signed char
-vec_sld(vector signed char __a, vector signed char __b, int __c)
-  __constant_range(__c, 0, 15);
-
-extern __ATTRS_o vector bool char
-vec_sld(vector bool char __a, vector bool char __b, int __c)
-  __constant_range(__c, 0, 15);
-
-extern __ATTRS_o vector unsigned char
-vec_sld(vector unsigned char __a, vector unsigned char __b, int __c)
-  __constant_range(__c, 0, 15);
-
-extern __ATTRS_o vector signed short
-vec_sld(vector signed short __a, vector signed short __b, int __c)
-  __constant_range(__c, 0, 15);
-
-extern __ATTRS_o vector bool short
-vec_sld(vector bool short __a, vector bool short __b, int __c)
-  __constant_range(__c, 0, 15);
-
-extern __ATTRS_o vector unsigned short
-vec_sld(vector unsigned short __a, vector unsigned short __b, int __c)
-  __constant_range(__c, 0, 15);
-
-extern __ATTRS_o vector signed int
-vec_sld(vector signed int __a, vector signed int __b, int __c)
-  __constant_range(__c, 0, 15);
-
-extern __ATTRS_o vector bool int
-vec_sld(vector bool int __a, vector bool int __b, int __c)
-  __constant_range(__c, 0, 15);
-
-extern __ATTRS_o vector unsigned int
-vec_sld(vector unsigned int __a, vector unsigned int __b, int __c)
-  __constant_range(__c, 0, 15);
-
-extern __ATTRS_o vector signed long long
-vec_sld(vector signed long long __a, vector signed long long __b, int __c)
-  __constant_range(__c, 0, 15);
-
-extern __ATTRS_o vector bool long long
-vec_sld(vector bool long long __a, vector bool long long __b, int __c)
-  __constant_range(__c, 0, 15);
-
-extern __ATTRS_o vector unsigned long long
-vec_sld(vector unsigned long long __a, vector unsigned long long __b, int __c)
-  __constant_range(__c, 0, 15);
-
-#if __ARCH__ >= 12
-extern __ATTRS_o vector float
-vec_sld(vector float __a, vector float __b, int __c)
-  __constant_range(__c, 0, 15);
-#endif
-
-extern __ATTRS_o vector double
-vec_sld(vector double __a, vector double __b, int __c)
-  __constant_range(__c, 0, 15);
-
-#define vec_sld(X, Y, Z) ((__typeof__((vec_sld)((X), (Y), (Z)))) \
-  __builtin_s390_vsldb((vector unsigned char)(X), \
-                       (vector unsigned char)(Y), (Z)))
-
-/*-- vec_sldw ---------------------------------------------------------------*/
-
-extern __ATTRS_o vector signed char
-vec_sldw(vector signed char __a, vector signed char __b, int __c)
-  __constant_range(__c, 0, 3);
-
-extern __ATTRS_o vector unsigned char
-vec_sldw(vector unsigned char __a, vector unsigned char __b, int __c)
-  __constant_range(__c, 0, 3);
-
-extern __ATTRS_o vector signed short
-vec_sldw(vector signed short __a, vector signed short __b, int __c)
-  __constant_range(__c, 0, 3);
-
-extern __ATTRS_o vector unsigned short
-vec_sldw(vector unsigned short __a, vector unsigned short __b, int __c)
-  __constant_range(__c, 0, 3);
-
-extern __ATTRS_o vector signed int
-vec_sldw(vector signed int __a, vector signed int __b, int __c)
-  __constant_range(__c, 0, 3);
-
-extern __ATTRS_o vector unsigned int
-vec_sldw(vector unsigned int __a, vector unsigned int __b, int __c)
-  __constant_range(__c, 0, 3);
-
-extern __ATTRS_o vector signed long long
-vec_sldw(vector signed long long __a, vector signed long long __b, int __c)
-  __constant_range(__c, 0, 3);
-
-extern __ATTRS_o vector unsigned long long
-vec_sldw(vector unsigned long long __a, vector unsigned long long __b, int __c)
-  __constant_range(__c, 0, 3);
-
-// This prototype is deprecated.
-extern __ATTRS_o vector double
-vec_sldw(vector double __a, vector double __b, int __c)
-  __constant_range(__c, 0, 3);
-
-#define vec_sldw(X, Y, Z) ((__typeof__((vec_sldw)((X), (Y), (Z)))) \
-  __builtin_s390_vsldb((vector unsigned char)(X), \
-                       (vector unsigned char)(Y), (Z) * 4))
-
-/*-- vec_sldb ---------------------------------------------------------------*/
-
-#if __ARCH__ >= 13
-
-extern __ATTRS_o vector signed char
-vec_sldb(vector signed char __a, vector signed char __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector unsigned char
-vec_sldb(vector unsigned char __a, vector unsigned char __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector signed short
-vec_sldb(vector signed short __a, vector signed short __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector unsigned short
-vec_sldb(vector unsigned short __a, vector unsigned short __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector signed int
-vec_sldb(vector signed int __a, vector signed int __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector unsigned int
-vec_sldb(vector unsigned int __a, vector unsigned int __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector signed long long
-vec_sldb(vector signed long long __a, vector signed long long __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector unsigned long long
-vec_sldb(vector unsigned long long __a, vector unsigned long long __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector float
-vec_sldb(vector float __a, vector float __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector double
-vec_sldb(vector double __a, vector double __b, int __c)
-  __constant_range(__c, 0, 7);
-
-#define vec_sldb(X, Y, Z) ((__typeof__((vec_sldb)((X), (Y), (Z)))) \
-  __builtin_s390_vsld((vector unsigned char)(X), \
-                      (vector unsigned char)(Y), (Z)))
-
-#endif
-
-/*-- vec_sral ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_sral(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_s390_vsra(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_sral(vector signed char __a, vector unsigned short __b) {
-  return (vector signed char)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_sral(vector signed char __a, vector unsigned int __b) {
-  return (vector signed char)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_sral(vector bool char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_s390_vsra(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_sral(vector bool char __a, vector unsigned short __b) {
-  return (vector bool char)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_sral(vector bool char __a, vector unsigned int __b) {
-  return (vector bool char)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_sral(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vsra(__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_sral(vector unsigned char __a, vector unsigned short __b) {
-  return __builtin_s390_vsra(__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_sral(vector unsigned char __a, vector unsigned int __b) {
-  return __builtin_s390_vsra(__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_sral(vector signed short __a, vector unsigned char __b) {
-  return (vector signed short)__builtin_s390_vsra(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_sral(vector signed short __a, vector unsigned short __b) {
-  return (vector signed short)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_sral(vector signed short __a, vector unsigned int __b) {
-  return (vector signed short)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_sral(vector bool short __a, vector unsigned char __b) {
-  return (vector bool short)__builtin_s390_vsra(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_sral(vector bool short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_sral(vector bool short __a, vector unsigned int __b) {
-  return (vector bool short)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_sral(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_s390_vsra(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_sral(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_sral(vector unsigned short __a, vector unsigned int __b) {
-  return (vector unsigned short)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_sral(vector signed int __a, vector unsigned char __b) {
-  return (vector signed int)__builtin_s390_vsra(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_sral(vector signed int __a, vector unsigned short __b) {
-  return (vector signed int)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_sral(vector signed int __a, vector unsigned int __b) {
-  return (vector signed int)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_sral(vector bool int __a, vector unsigned char __b) {
-  return (vector bool int)__builtin_s390_vsra(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_sral(vector bool int __a, vector unsigned short __b) {
-  return (vector bool int)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_sral(vector bool int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_sral(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_s390_vsra(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_sral(vector unsigned int __a, vector unsigned short __b) {
-  return (vector unsigned int)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_sral(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_sral(vector signed long long __a, vector unsigned char __b) {
-  return (vector signed long long)__builtin_s390_vsra(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_sral(vector signed long long __a, vector unsigned short __b) {
-  return (vector signed long long)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_sral(vector signed long long __a, vector unsigned int __b) {
-  return (vector signed long long)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_sral(vector bool long long __a, vector unsigned char __b) {
-  return (vector bool long long)__builtin_s390_vsra(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_sral(vector bool long long __a, vector unsigned short __b) {
-  return (vector bool long long)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_sral(vector bool long long __a, vector unsigned int __b) {
-  return (vector bool long long)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sral(vector unsigned long long __a, vector unsigned char __b) {
-  return (vector unsigned long long)__builtin_s390_vsra(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sral(vector unsigned long long __a, vector unsigned short __b) {
-  return (vector unsigned long long)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sral(vector unsigned long long __a, vector unsigned int __b) {
-  return (vector unsigned long long)__builtin_s390_vsra(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-/*-- vec_srab ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_srab(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed char
-vec_srab(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_s390_vsrab(
-    (vector unsigned char)__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_srab(vector unsigned char __a, vector signed char __b) {
-  return __builtin_s390_vsrab(__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_srab(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vsrab(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_srab(vector signed short __a, vector signed short __b) {
-  return (vector signed short)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_srab(vector signed short __a, vector unsigned short __b) {
-  return (vector signed short)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_srab(vector unsigned short __a, vector signed short __b) {
-  return (vector unsigned short)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_srab(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_srab(vector signed int __a, vector signed int __b) {
-  return (vector signed int)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_srab(vector signed int __a, vector unsigned int __b) {
-  return (vector signed int)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_srab(vector unsigned int __a, vector signed int __b) {
-  return (vector unsigned int)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_srab(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_srab(vector signed long long __a, vector signed long long __b) {
-  return (vector signed long long)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_srab(vector signed long long __a, vector unsigned long long __b) {
-  return (vector signed long long)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srab(vector unsigned long long __a, vector signed long long __b) {
-  return (vector unsigned long long)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srab(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_srab(vector float __a, vector signed int __b) {
-  return (vector float)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector float
-vec_srab(vector float __a, vector unsigned int __b) {
-  return (vector float)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_srab(vector double __a, vector signed long long __b) {
-  return (vector double)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector double
-vec_srab(vector double __a, vector unsigned long long __b) {
-  return (vector double)__builtin_s390_vsrab(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-/*-- vec_srl ----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_srl(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_s390_vsrl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_srl(vector signed char __a, vector unsigned short __b) {
-  return (vector signed char)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_srl(vector signed char __a, vector unsigned int __b) {
-  return (vector signed char)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_srl(vector bool char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_s390_vsrl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_srl(vector bool char __a, vector unsigned short __b) {
-  return (vector bool char)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool char
-vec_srl(vector bool char __a, vector unsigned int __b) {
-  return (vector bool char)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_srl(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vsrl(__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_srl(vector unsigned char __a, vector unsigned short __b) {
-  return __builtin_s390_vsrl(__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_srl(vector unsigned char __a, vector unsigned int __b) {
-  return __builtin_s390_vsrl(__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_srl(vector signed short __a, vector unsigned char __b) {
-  return (vector signed short)__builtin_s390_vsrl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_srl(vector signed short __a, vector unsigned short __b) {
-  return (vector signed short)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_srl(vector signed short __a, vector unsigned int __b) {
-  return (vector signed short)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_srl(vector bool short __a, vector unsigned char __b) {
-  return (vector bool short)__builtin_s390_vsrl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_srl(vector bool short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool short
-vec_srl(vector bool short __a, vector unsigned int __b) {
-  return (vector bool short)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_srl(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_s390_vsrl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_srl(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_srl(vector unsigned short __a, vector unsigned int __b) {
-  return (vector unsigned short)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_srl(vector signed int __a, vector unsigned char __b) {
-  return (vector signed int)__builtin_s390_vsrl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_srl(vector signed int __a, vector unsigned short __b) {
-  return (vector signed int)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_srl(vector signed int __a, vector unsigned int __b) {
-  return (vector signed int)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_srl(vector bool int __a, vector unsigned char __b) {
-  return (vector bool int)__builtin_s390_vsrl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_srl(vector bool int __a, vector unsigned short __b) {
-  return (vector bool int)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool int
-vec_srl(vector bool int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_srl(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_s390_vsrl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_srl(vector unsigned int __a, vector unsigned short __b) {
-  return (vector unsigned int)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_srl(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_srl(vector signed long long __a, vector unsigned char __b) {
-  return (vector signed long long)__builtin_s390_vsrl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_srl(vector signed long long __a, vector unsigned short __b) {
-  return (vector signed long long)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_srl(vector signed long long __a, vector unsigned int __b) {
-  return (vector signed long long)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_srl(vector bool long long __a, vector unsigned char __b) {
-  return (vector bool long long)__builtin_s390_vsrl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_srl(vector bool long long __a, vector unsigned short __b) {
-  return (vector bool long long)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector bool long long
-vec_srl(vector bool long long __a, vector unsigned int __b) {
-  return (vector bool long long)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srl(vector unsigned long long __a, vector unsigned char __b) {
-  return (vector unsigned long long)__builtin_s390_vsrl(
-    (vector unsigned char)__a, __b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srl(vector unsigned long long __a, vector unsigned short __b) {
-  return (vector unsigned long long)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srl(vector unsigned long long __a, vector unsigned int __b) {
-  return (vector unsigned long long)__builtin_s390_vsrl(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-/*-- vec_srb ----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_srb(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed char
-vec_srb(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_srb(vector unsigned char __a, vector signed char __b) {
-  return __builtin_s390_vsrlb(__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_srb(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vsrlb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_srb(vector signed short __a, vector signed short __b) {
-  return (vector signed short)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_srb(vector signed short __a, vector unsigned short __b) {
-  return (vector signed short)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_srb(vector unsigned short __a, vector signed short __b) {
-  return (vector unsigned short)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_srb(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_srb(vector signed int __a, vector signed int __b) {
-  return (vector signed int)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_srb(vector signed int __a, vector unsigned int __b) {
-  return (vector signed int)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_srb(vector unsigned int __a, vector signed int __b) {
-  return (vector unsigned int)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_srb(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_srb(vector signed long long __a, vector signed long long __b) {
-  return (vector signed long long)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_srb(vector signed long long __a, vector unsigned long long __b) {
-  return (vector signed long long)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srb(vector unsigned long long __a, vector signed long long __b) {
-  return (vector unsigned long long)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_srb(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_srb(vector float __a, vector signed int __b) {
-  return (vector float)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector float
-vec_srb(vector float __a, vector unsigned int __b) {
-  return (vector float)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_srb(vector double __a, vector signed long long __b) {
-  return (vector double)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector double
-vec_srb(vector double __a, vector unsigned long long __b) {
-  return (vector double)__builtin_s390_vsrlb(
-    (vector unsigned char)__a, (vector unsigned char)__b);
-}
-
-/*-- vec_srdb ---------------------------------------------------------------*/
-
-#if __ARCH__ >= 13
-
-extern __ATTRS_o vector signed char
-vec_srdb(vector signed char __a, vector signed char __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector unsigned char
-vec_srdb(vector unsigned char __a, vector unsigned char __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector signed short
-vec_srdb(vector signed short __a, vector signed short __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector unsigned short
-vec_srdb(vector unsigned short __a, vector unsigned short __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector signed int
-vec_srdb(vector signed int __a, vector signed int __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector unsigned int
-vec_srdb(vector unsigned int __a, vector unsigned int __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector signed long long
-vec_srdb(vector signed long long __a, vector signed long long __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector unsigned long long
-vec_srdb(vector unsigned long long __a, vector unsigned long long __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector float
-vec_srdb(vector float __a, vector float __b, int __c)
-  __constant_range(__c, 0, 7);
-
-extern __ATTRS_o vector double
-vec_srdb(vector double __a, vector double __b, int __c)
-  __constant_range(__c, 0, 7);
-
-#define vec_srdb(X, Y, Z) ((__typeof__((vec_srdb)((X), (Y), (Z)))) \
-  __builtin_s390_vsrd((vector unsigned char)(X), \
-                      (vector unsigned char)(Y), (Z)))
-
-#endif
-
-/*-- vec_abs ----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_abs(vector signed char __a) {
-  return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed char)0));
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_abs(vector signed short __a) {
-  return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed short)0));
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_abs(vector signed int __a) {
-  return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed int)0));
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_abs(vector signed long long __a) {
-  return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed long long)0));
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_abs(vector float __a) {
-  return __builtin_s390_vflpsb(__a);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_abs(vector double __a) {
-  return __builtin_s390_vflpdb(__a);
-}
-
-/*-- vec_nabs ---------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_nabs(vector float __a) {
-  return __builtin_s390_vflnsb(__a);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_nabs(vector double __a) {
-  return __builtin_s390_vflndb(__a);
-}
-
-/*-- vec_max ----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_max(vector signed char __a, vector signed char __b) {
-  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_max(vector signed char __a, vector bool char __b) {
-  vector signed char __bc = (vector signed char)__b;
-  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_max(vector bool char __a, vector signed char __b) {
-  vector signed char __ac = (vector signed char)__a;
-  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_max(vector unsigned char __a, vector unsigned char __b) {
-  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_max(vector unsigned char __a, vector bool char __b) {
-  vector unsigned char __bc = (vector unsigned char)__b;
-  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_max(vector bool char __a, vector unsigned char __b) {
-  vector unsigned char __ac = (vector unsigned char)__a;
-  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_max(vector signed short __a, vector signed short __b) {
-  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_max(vector signed short __a, vector bool short __b) {
-  vector signed short __bc = (vector signed short)__b;
-  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_max(vector bool short __a, vector signed short __b) {
-  vector signed short __ac = (vector signed short)__a;
-  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_max(vector unsigned short __a, vector unsigned short __b) {
-  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_max(vector unsigned short __a, vector bool short __b) {
-  vector unsigned short __bc = (vector unsigned short)__b;
-  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_max(vector bool short __a, vector unsigned short __b) {
-  vector unsigned short __ac = (vector unsigned short)__a;
-  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_max(vector signed int __a, vector signed int __b) {
-  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_max(vector signed int __a, vector bool int __b) {
-  vector signed int __bc = (vector signed int)__b;
-  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_max(vector bool int __a, vector signed int __b) {
-  vector signed int __ac = (vector signed int)__a;
-  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_max(vector unsigned int __a, vector unsigned int __b) {
-  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_max(vector unsigned int __a, vector bool int __b) {
-  vector unsigned int __bc = (vector unsigned int)__b;
-  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_max(vector bool int __a, vector unsigned int __b) {
-  vector unsigned int __ac = (vector unsigned int)__a;
-  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_max(vector signed long long __a, vector signed long long __b) {
-  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_max(vector signed long long __a, vector bool long long __b) {
-  vector signed long long __bc = (vector signed long long)__b;
-  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_max(vector bool long long __a, vector signed long long __b) {
-  vector signed long long __ac = (vector signed long long)__a;
-  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_max(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_max(vector unsigned long long __a, vector bool long long __b) {
-  vector unsigned long long __bc = (vector unsigned long long)__b;
-  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_max(vector bool long long __a, vector unsigned long long __b) {
-  vector unsigned long long __ac = (vector unsigned long long)__a;
-  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_max(vector float __a, vector float __b) {
-  return __builtin_s390_vfmaxsb(__a, __b, 0);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_max(vector double __a, vector double __b) {
-#if __ARCH__ >= 12
-  return __builtin_s390_vfmaxdb(__a, __b, 0);
-#else
-  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
-#endif
-}
-
-/*-- vec_min ----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_min(vector signed char __a, vector signed char __b) {
-  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_min(vector signed char __a, vector bool char __b) {
-  vector signed char __bc = (vector signed char)__b;
-  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed char
-vec_min(vector bool char __a, vector signed char __b) {
-  vector signed char __ac = (vector signed char)__a;
-  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_min(vector unsigned char __a, vector unsigned char __b) {
-  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_min(vector unsigned char __a, vector bool char __b) {
-  vector unsigned char __bc = (vector unsigned char)__b;
-  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned char
-vec_min(vector bool char __a, vector unsigned char __b) {
-  vector unsigned char __ac = (vector unsigned char)__a;
-  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_min(vector signed short __a, vector signed short __b) {
-  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_min(vector signed short __a, vector bool short __b) {
-  vector signed short __bc = (vector signed short)__b;
-  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed short
-vec_min(vector bool short __a, vector signed short __b) {
-  vector signed short __ac = (vector signed short)__a;
-  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_min(vector unsigned short __a, vector unsigned short __b) {
-  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_min(vector unsigned short __a, vector bool short __b) {
-  vector unsigned short __bc = (vector unsigned short)__b;
-  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned short
-vec_min(vector bool short __a, vector unsigned short __b) {
-  vector unsigned short __ac = (vector unsigned short)__a;
-  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_min(vector signed int __a, vector signed int __b) {
-  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_min(vector signed int __a, vector bool int __b) {
-  vector signed int __bc = (vector signed int)__b;
-  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed int
-vec_min(vector bool int __a, vector signed int __b) {
-  vector signed int __ac = (vector signed int)__a;
-  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_min(vector unsigned int __a, vector unsigned int __b) {
-  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_min(vector unsigned int __a, vector bool int __b) {
-  vector unsigned int __bc = (vector unsigned int)__b;
-  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned int
-vec_min(vector bool int __a, vector unsigned int __b) {
-  vector unsigned int __ac = (vector unsigned int)__a;
-  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_min(vector signed long long __a, vector signed long long __b) {
-  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_min(vector signed long long __a, vector bool long long __b) {
-  vector signed long long __bc = (vector signed long long)__b;
-  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_min(vector bool long long __a, vector signed long long __b) {
-  vector signed long long __ac = (vector signed long long)__a;
-  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_min(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_min(vector unsigned long long __a, vector bool long long __b) {
-  vector unsigned long long __bc = (vector unsigned long long)__b;
-  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_min(vector bool long long __a, vector unsigned long long __b) {
-  vector unsigned long long __ac = (vector unsigned long long)__a;
-  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_min(vector float __a, vector float __b) {
-  return __builtin_s390_vfminsb(__a, __b, 0);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_min(vector double __a, vector double __b) {
-#if __ARCH__ >= 12
-  return __builtin_s390_vfmindb(__a, __b, 0);
-#else
-  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
-#endif
-}
-
-/*-- vec_add_u128 -----------------------------------------------------------*/
-
-static inline __ATTRS_ai vector unsigned char
-vec_add_u128(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vaq(__a, __b);
-}
-
-/*-- vec_addc ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_addc(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vaccb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_addc(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vacch(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_addc(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vaccf(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_addc(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_s390_vaccg(__a, __b);
-}
-
-/*-- vec_addc_u128 ----------------------------------------------------------*/
-
-static inline __ATTRS_ai vector unsigned char
-vec_addc_u128(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vaccq(__a, __b);
-}
-
-/*-- vec_adde_u128 ----------------------------------------------------------*/
-
-static inline __ATTRS_ai vector unsigned char
-vec_adde_u128(vector unsigned char __a, vector unsigned char __b,
-              vector unsigned char __c) {
-  return __builtin_s390_vacq(__a, __b, __c);
-}
-
-/*-- vec_addec_u128 ---------------------------------------------------------*/
-
-static inline __ATTRS_ai vector unsigned char
-vec_addec_u128(vector unsigned char __a, vector unsigned char __b,
-               vector unsigned char __c) {
-  return __builtin_s390_vacccq(__a, __b, __c);
-}
-
-/*-- vec_avg ----------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_avg(vector signed char __a, vector signed char __b) {
-  return __builtin_s390_vavgb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_avg(vector signed short __a, vector signed short __b) {
-  return __builtin_s390_vavgh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_avg(vector signed int __a, vector signed int __b) {
-  return __builtin_s390_vavgf(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_avg(vector signed long long __a, vector signed long long __b) {
-  return __builtin_s390_vavgg(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_avg(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vavglb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_avg(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vavglh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_avg(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vavglf(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_avg(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_s390_vavglg(__a, __b);
-}
-
-/*-- vec_checksum -----------------------------------------------------------*/
-
-static inline __ATTRS_ai vector unsigned int
-vec_checksum(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vcksm(__a, __b);
-}
-
-/*-- vec_gfmsum -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_gfmsum(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vgfmb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_gfmsum(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vgfmh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_gfmsum(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vgfmf(__a, __b);
-}
-
-/*-- vec_gfmsum_128 ---------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_gfmsum_128(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_s390_vgfmg(__a, __b);
-}
-
-/*-- vec_gfmsum_accum -------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_gfmsum_accum(vector unsigned char __a, vector unsigned char __b,
-                 vector unsigned short __c) {
-  return __builtin_s390_vgfmab(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_gfmsum_accum(vector unsigned short __a, vector unsigned short __b,
-                 vector unsigned int __c) {
-  return __builtin_s390_vgfmah(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_gfmsum_accum(vector unsigned int __a, vector unsigned int __b,
-                 vector unsigned long long __c) {
-  return __builtin_s390_vgfmaf(__a, __b, __c);
-}
-
-/*-- vec_gfmsum_accum_128 ---------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_gfmsum_accum_128(vector unsigned long long __a,
-                     vector unsigned long long __b,
-                     vector unsigned char __c) {
-  return __builtin_s390_vgfmag(__a, __b, __c);
-}
-
-/*-- vec_mladd --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_mladd(vector signed char __a, vector signed char __b,
-          vector signed char __c) {
-  return __a * __b + __c;
-}
-
-static inline __ATTRS_o_ai vector signed char
-vec_mladd(vector unsigned char __a, vector signed char __b,
-          vector signed char __c) {
-  return (vector signed char)__a * __b + __c;
-}
-
-static inline __ATTRS_o_ai vector signed char
-vec_mladd(vector signed char __a, vector unsigned char __b,
-          vector unsigned char __c) {
-  return __a * (vector signed char)__b + (vector signed char)__c;
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_mladd(vector unsigned char __a, vector unsigned char __b,
-          vector unsigned char __c) {
-  return __a * __b + __c;
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_mladd(vector signed short __a, vector signed short __b,
-          vector signed short __c) {
-  return __a * __b + __c;
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_mladd(vector unsigned short __a, vector signed short __b,
-          vector signed short __c) {
-  return (vector signed short)__a * __b + __c;
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_mladd(vector signed short __a, vector unsigned short __b,
-          vector unsigned short __c) {
-  return __a * (vector signed short)__b + (vector signed short)__c;
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_mladd(vector unsigned short __a, vector unsigned short __b,
-          vector unsigned short __c) {
-  return __a * __b + __c;
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_mladd(vector signed int __a, vector signed int __b,
-          vector signed int __c) {
-  return __a * __b + __c;
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_mladd(vector unsigned int __a, vector signed int __b,
-          vector signed int __c) {
-  return (vector signed int)__a * __b + __c;
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_mladd(vector signed int __a, vector unsigned int __b,
-          vector unsigned int __c) {
-  return __a * (vector signed int)__b + (vector signed int)__c;
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_mladd(vector unsigned int __a, vector unsigned int __b,
-          vector unsigned int __c) {
-  return __a * __b + __c;
-}
-
-/*-- vec_mhadd --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_mhadd(vector signed char __a, vector signed char __b,
-          vector signed char __c) {
-  return __builtin_s390_vmahb(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_mhadd(vector unsigned char __a, vector unsigned char __b,
-          vector unsigned char __c) {
-  return __builtin_s390_vmalhb(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_mhadd(vector signed short __a, vector signed short __b,
-          vector signed short __c) {
-  return __builtin_s390_vmahh(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_mhadd(vector unsigned short __a, vector unsigned short __b,
-          vector unsigned short __c) {
-  return __builtin_s390_vmalhh(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_mhadd(vector signed int __a, vector signed int __b,
-          vector signed int __c) {
-  return __builtin_s390_vmahf(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_mhadd(vector unsigned int __a, vector unsigned int __b,
-          vector unsigned int __c) {
-  return __builtin_s390_vmalhf(__a, __b, __c);
-}
-
-/*-- vec_meadd --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed short
-vec_meadd(vector signed char __a, vector signed char __b,
-          vector signed short __c) {
-  return __builtin_s390_vmaeb(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_meadd(vector unsigned char __a, vector unsigned char __b,
-          vector unsigned short __c) {
-  return __builtin_s390_vmaleb(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_meadd(vector signed short __a, vector signed short __b,
-          vector signed int __c) {
-  return __builtin_s390_vmaeh(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_meadd(vector unsigned short __a, vector unsigned short __b,
-          vector unsigned int __c) {
-  return __builtin_s390_vmaleh(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_meadd(vector signed int __a, vector signed int __b,
-          vector signed long long __c) {
-  return __builtin_s390_vmaef(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_meadd(vector unsigned int __a, vector unsigned int __b,
-          vector unsigned long long __c) {
-  return __builtin_s390_vmalef(__a, __b, __c);
-}
-
-/*-- vec_moadd --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed short
-vec_moadd(vector signed char __a, vector signed char __b,
-          vector signed short __c) {
-  return __builtin_s390_vmaob(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_moadd(vector unsigned char __a, vector unsigned char __b,
-          vector unsigned short __c) {
-  return __builtin_s390_vmalob(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_moadd(vector signed short __a, vector signed short __b,
-          vector signed int __c) {
-  return __builtin_s390_vmaoh(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_moadd(vector unsigned short __a, vector unsigned short __b,
-          vector unsigned int __c) {
-  return __builtin_s390_vmaloh(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_moadd(vector signed int __a, vector signed int __b,
-          vector signed long long __c) {
-  return __builtin_s390_vmaof(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_moadd(vector unsigned int __a, vector unsigned int __b,
-          vector unsigned long long __c) {
-  return __builtin_s390_vmalof(__a, __b, __c);
-}
-
-/*-- vec_mulh ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_mulh(vector signed char __a, vector signed char __b) {
-  return __builtin_s390_vmhb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_mulh(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vmlhb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_mulh(vector signed short __a, vector signed short __b) {
-  return __builtin_s390_vmhh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_mulh(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vmlhh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_mulh(vector signed int __a, vector signed int __b) {
-  return __builtin_s390_vmhf(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_mulh(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vmlhf(__a, __b);
-}
-
-/*-- vec_mule ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed short
-vec_mule(vector signed char __a, vector signed char __b) {
-  return __builtin_s390_vmeb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_mule(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vmleb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_mule(vector signed short __a, vector signed short __b) {
-  return __builtin_s390_vmeh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_mule(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vmleh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_mule(vector signed int __a, vector signed int __b) {
-  return __builtin_s390_vmef(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_mule(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vmlef(__a, __b);
-}
-
-/*-- vec_mulo ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed short
-vec_mulo(vector signed char __a, vector signed char __b) {
-  return __builtin_s390_vmob(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_mulo(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vmlob(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_mulo(vector signed short __a, vector signed short __b) {
-  return __builtin_s390_vmoh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_mulo(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vmloh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_mulo(vector signed int __a, vector signed int __b) {
-  return __builtin_s390_vmof(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_mulo(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vmlof(__a, __b);
-}
-
-/*-- vec_msum_u128 ----------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-#define vec_msum_u128(X, Y, Z, W) \
-  ((vector unsigned char)__builtin_s390_vmslg((X), (Y), (Z), (W)));
-#endif
-
-/*-- vec_sub_u128 -----------------------------------------------------------*/
-
-static inline __ATTRS_ai vector unsigned char
-vec_sub_u128(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vsq(__a, __b);
-}
-
-/*-- vec_subc ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_subc(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vscbib(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_subc(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vscbih(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_subc(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vscbif(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_subc(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_s390_vscbig(__a, __b);
-}
-
-/*-- vec_subc_u128 ----------------------------------------------------------*/
-
-static inline __ATTRS_ai vector unsigned char
-vec_subc_u128(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vscbiq(__a, __b);
-}
-
-/*-- vec_sube_u128 ----------------------------------------------------------*/
-
-static inline __ATTRS_ai vector unsigned char
-vec_sube_u128(vector unsigned char __a, vector unsigned char __b,
-              vector unsigned char __c) {
-  return __builtin_s390_vsbiq(__a, __b, __c);
-}
-
-/*-- vec_subec_u128 ---------------------------------------------------------*/
-
-static inline __ATTRS_ai vector unsigned char
-vec_subec_u128(vector unsigned char __a, vector unsigned char __b,
-               vector unsigned char __c) {
-  return __builtin_s390_vsbcbiq(__a, __b, __c);
-}
-
-/*-- vec_sum2 ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sum2(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vsumgh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_sum2(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vsumgf(__a, __b);
-}
-
-/*-- vec_sum_u128 -----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_sum_u128(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vsumqf(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_sum_u128(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_s390_vsumqg(__a, __b);
-}
-
-/*-- vec_sum4 ---------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_sum4(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vsumb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_sum4(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vsumh(__a, __b);
-}
-
-/*-- vec_test_mask ----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai int
-vec_test_mask(vector signed char __a, vector unsigned char __b) {
-  return __builtin_s390_vtm((vector unsigned char)__a,
-                            (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai int
-vec_test_mask(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vtm(__a, __b);
-}
-
-static inline __ATTRS_o_ai int
-vec_test_mask(vector signed short __a, vector unsigned short __b) {
-  return __builtin_s390_vtm((vector unsigned char)__a,
-                            (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai int
-vec_test_mask(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vtm((vector unsigned char)__a,
-                            (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai int
-vec_test_mask(vector signed int __a, vector unsigned int __b) {
-  return __builtin_s390_vtm((vector unsigned char)__a,
-                            (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai int
-vec_test_mask(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vtm((vector unsigned char)__a,
-                            (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai int
-vec_test_mask(vector signed long long __a, vector unsigned long long __b) {
-  return __builtin_s390_vtm((vector unsigned char)__a,
-                            (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai int
-vec_test_mask(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_s390_vtm((vector unsigned char)__a,
-                            (vector unsigned char)__b);
-}
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai int
-vec_test_mask(vector float __a, vector unsigned int __b) {
-  return __builtin_s390_vtm((vector unsigned char)__a,
-                            (vector unsigned char)__b);
-}
-#endif
-
-static inline __ATTRS_o_ai int
-vec_test_mask(vector double __a, vector unsigned long long __b) {
-  return __builtin_s390_vtm((vector unsigned char)__a,
-                            (vector unsigned char)__b);
-}
-
-/*-- vec_madd ---------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_madd(vector float __a, vector float __b, vector float __c) {
-  return __builtin_s390_vfmasb(__a, __b, __c);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_madd(vector double __a, vector double __b, vector double __c) {
-  return __builtin_s390_vfmadb(__a, __b, __c);
-}
-
-/*-- vec_msub ---------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_msub(vector float __a, vector float __b, vector float __c) {
-  return __builtin_s390_vfmssb(__a, __b, __c);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_msub(vector double __a, vector double __b, vector double __c) {
-  return __builtin_s390_vfmsdb(__a, __b, __c);
-}
-
-/*-- vec_nmadd ---------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_nmadd(vector float __a, vector float __b, vector float __c) {
-  return __builtin_s390_vfnmasb(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector double
-vec_nmadd(vector double __a, vector double __b, vector double __c) {
-  return __builtin_s390_vfnmadb(__a, __b, __c);
-}
-#endif
-
-/*-- vec_nmsub ---------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_nmsub(vector float __a, vector float __b, vector float __c) {
-  return __builtin_s390_vfnmssb(__a, __b, __c);
-}
-
-static inline __ATTRS_o_ai vector double
-vec_nmsub(vector double __a, vector double __b, vector double __c) {
-  return __builtin_s390_vfnmsdb(__a, __b, __c);
-}
-#endif
-
-/*-- vec_sqrt ---------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_sqrt(vector float __a) {
-  return __builtin_s390_vfsqsb(__a);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_sqrt(vector double __a) {
-  return __builtin_s390_vfsqdb(__a);
-}
-
-/*-- vec_ld2f ---------------------------------------------------------------*/
-
-// This prototype is deprecated.
-static inline __ATTRS_ai vector double
-vec_ld2f(const float *__ptr) {
-  typedef float __v2f32 __attribute__((__vector_size__(8)));
-  return __builtin_convertvector(*(const __v2f32 *)__ptr, vector double);
-}
-
-/*-- vec_st2f ---------------------------------------------------------------*/
-
-// This prototype is deprecated.
-static inline __ATTRS_ai void
-vec_st2f(vector double __a, float *__ptr) {
-  typedef float __v2f32 __attribute__((__vector_size__(8)));
-  *(__v2f32 *)__ptr = __builtin_convertvector(__a, __v2f32);
-}
-
-/*-- vec_ctd ----------------------------------------------------------------*/
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_ctd(vector signed long long __a, int __b)
-  __constant_range(__b, 0, 31) {
-  vector double __conv = __builtin_convertvector(__a, vector double);
-  __conv *= (vector double)(vector unsigned long long)((0x3ffULL - __b) << 52);
-  return __conv;
-}
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector double
-vec_ctd(vector unsigned long long __a, int __b)
-  __constant_range(__b, 0, 31) {
-  vector double __conv = __builtin_convertvector(__a, vector double);
-  __conv *= (vector double)(vector unsigned long long)((0x3ffULL - __b) << 52);
-  return __conv;
-}
-
-/*-- vec_ctsl ---------------------------------------------------------------*/
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector signed long long
-vec_ctsl(vector double __a, int __b)
-  __constant_range(__b, 0, 31) {
-  __a *= (vector double)(vector unsigned long long)((0x3ffULL + __b) << 52);
-  return __builtin_convertvector(__a, vector signed long long);
-}
-
-/*-- vec_ctul ---------------------------------------------------------------*/
-
-// This prototype is deprecated.
-static inline __ATTRS_o_ai vector unsigned long long
-vec_ctul(vector double __a, int __b)
-  __constant_range(__b, 0, 31) {
-  __a *= (vector double)(vector unsigned long long)((0x3ffULL + __b) << 52);
-  return __builtin_convertvector(__a, vector unsigned long long);
-}
-
-/*-- vec_doublee ------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_ai vector double
-vec_doublee(vector float __a) {
-  typedef float __v2f32 __attribute__((__vector_size__(8)));
-  __v2f32 __pack = __builtin_shufflevector(__a, __a, 0, 2);
-  return __builtin_convertvector(__pack, vector double);
-}
-#endif
-
-/*-- vec_floate -------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_ai vector float
-vec_floate(vector double __a) {
-  typedef float __v2f32 __attribute__((__vector_size__(8)));
-  __v2f32 __pack = __builtin_convertvector(__a, __v2f32);
-  return __builtin_shufflevector(__pack, __pack, 0, -1, 1, -1);
-}
-#endif
-
-/*-- vec_double -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector double
-vec_double(vector signed long long __a) {
-  return __builtin_convertvector(__a, vector double);
-}
-
-static inline __ATTRS_o_ai vector double
-vec_double(vector unsigned long long __a) {
-  return __builtin_convertvector(__a, vector double);
-}
-
-/*-- vec_float --------------------------------------------------------------*/
-
-#if __ARCH__ >= 13
-
-static inline __ATTRS_o_ai vector float
-vec_float(vector signed int __a) {
-  return __builtin_convertvector(__a, vector float);
-}
-
-static inline __ATTRS_o_ai vector float
-vec_float(vector unsigned int __a) {
-  return __builtin_convertvector(__a, vector float);
-}
-
-#endif
-
-/*-- vec_signed -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed long long
-vec_signed(vector double __a) {
-  return __builtin_convertvector(__a, vector signed long long);
-}
-
-#if __ARCH__ >= 13
-static inline __ATTRS_o_ai vector signed int
-vec_signed(vector float __a) {
-  return __builtin_convertvector(__a, vector signed int);
-}
-#endif
-
-/*-- vec_unsigned -----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_unsigned(vector double __a) {
-  return __builtin_convertvector(__a, vector unsigned long long);
-}
-
-#if __ARCH__ >= 13
-static inline __ATTRS_o_ai vector unsigned int
-vec_unsigned(vector float __a) {
-  return __builtin_convertvector(__a, vector unsigned int);
-}
-#endif
-
-/*-- vec_roundp -------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_roundp(vector float __a) {
-  return __builtin_s390_vfisb(__a, 4, 6);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_roundp(vector double __a) {
-  return __builtin_s390_vfidb(__a, 4, 6);
-}
-
-/*-- vec_ceil ---------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_ceil(vector float __a) {
-  // On this platform, vec_ceil never triggers the IEEE-inexact exception.
-  return __builtin_s390_vfisb(__a, 4, 6);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_ceil(vector double __a) {
-  // On this platform, vec_ceil never triggers the IEEE-inexact exception.
-  return __builtin_s390_vfidb(__a, 4, 6);
-}
-
-/*-- vec_roundm -------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_roundm(vector float __a) {
-  return __builtin_s390_vfisb(__a, 4, 7);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_roundm(vector double __a) {
-  return __builtin_s390_vfidb(__a, 4, 7);
-}
-
-/*-- vec_floor --------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_floor(vector float __a) {
-  // On this platform, vec_floor never triggers the IEEE-inexact exception.
-  return __builtin_s390_vfisb(__a, 4, 7);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_floor(vector double __a) {
-  // On this platform, vec_floor never triggers the IEEE-inexact exception.
-  return __builtin_s390_vfidb(__a, 4, 7);
-}
-
-/*-- vec_roundz -------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_roundz(vector float __a) {
-  return __builtin_s390_vfisb(__a, 4, 5);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_roundz(vector double __a) {
-  return __builtin_s390_vfidb(__a, 4, 5);
-}
-
-/*-- vec_trunc --------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_trunc(vector float __a) {
-  // On this platform, vec_trunc never triggers the IEEE-inexact exception.
-  return __builtin_s390_vfisb(__a, 4, 5);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_trunc(vector double __a) {
-  // On this platform, vec_trunc never triggers the IEEE-inexact exception.
-  return __builtin_s390_vfidb(__a, 4, 5);
-}
-
-/*-- vec_roundc -------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_roundc(vector float __a) {
-  return __builtin_s390_vfisb(__a, 4, 0);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_roundc(vector double __a) {
-  return __builtin_s390_vfidb(__a, 4, 0);
-}
-
-/*-- vec_rint ---------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_rint(vector float __a) {
-  // vec_rint may trigger the IEEE-inexact exception.
-  return __builtin_s390_vfisb(__a, 0, 0);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_rint(vector double __a) {
-  // vec_rint may trigger the IEEE-inexact exception.
-  return __builtin_s390_vfidb(__a, 0, 0);
-}
-
-/*-- vec_round --------------------------------------------------------------*/
-
-#if __ARCH__ >= 12
-static inline __ATTRS_o_ai vector float
-vec_round(vector float __a) {
-  return __builtin_s390_vfisb(__a, 4, 4);
-}
-#endif
-
-static inline __ATTRS_o_ai vector double
-vec_round(vector double __a) {
-  return __builtin_s390_vfidb(__a, 4, 4);
-}
-
-/*-- vec_fp_test_data_class -------------------------------------------------*/
-
-#if __ARCH__ >= 12
-extern __ATTRS_o vector bool int
-vec_fp_test_data_class(vector float __a, int __b, int *__c)
-  __constant_range(__b, 0, 4095);
-
-extern __ATTRS_o vector bool long long
-vec_fp_test_data_class(vector double __a, int __b, int *__c)
-  __constant_range(__b, 0, 4095);
-
-#define vec_fp_test_data_class(X, Y, Z) \
-  ((__typeof__((vec_fp_test_data_class)((X), (Y), (Z)))) \
-   __extension__ ({ \
-     vector unsigned char __res; \
-     vector unsigned char __x = (vector unsigned char)(X); \
-     int *__z = (Z); \
-     switch (sizeof ((X)[0])) { \
-     case 4:  __res = (vector unsigned char) \
-                      __builtin_s390_vftcisb((vector float)__x, (Y), __z); \
-              break; \
-     default: __res = (vector unsigned char) \
-                      __builtin_s390_vftcidb((vector double)__x, (Y), __z); \
-              break; \
-     } __res; }))
-#else
-#define vec_fp_test_data_class(X, Y, Z) \
-  ((vector bool long long)__builtin_s390_vftcidb((X), (Y), (Z)))
-#endif
-
-#define __VEC_CLASS_FP_ZERO_P (1 << 11)
-#define __VEC_CLASS_FP_ZERO_N (1 << 10)
-#define __VEC_CLASS_FP_ZERO (__VEC_CLASS_FP_ZERO_P | __VEC_CLASS_FP_ZERO_N)
-#define __VEC_CLASS_FP_NORMAL_P (1 << 9)
-#define __VEC_CLASS_FP_NORMAL_N (1 << 8)
-#define __VEC_CLASS_FP_NORMAL (__VEC_CLASS_FP_NORMAL_P | \
-                               __VEC_CLASS_FP_NORMAL_N)
-#define __VEC_CLASS_FP_SUBNORMAL_P (1 << 7)
-#define __VEC_CLASS_FP_SUBNORMAL_N (1 << 6)
-#define __VEC_CLASS_FP_SUBNORMAL (__VEC_CLASS_FP_SUBNORMAL_P | \
-                                  __VEC_CLASS_FP_SUBNORMAL_N)
-#define __VEC_CLASS_FP_INFINITY_P (1 << 5)
-#define __VEC_CLASS_FP_INFINITY_N (1 << 4)
-#define __VEC_CLASS_FP_INFINITY (__VEC_CLASS_FP_INFINITY_P | \
-                                 __VEC_CLASS_FP_INFINITY_N)
-#define __VEC_CLASS_FP_QNAN_P (1 << 3)
-#define __VEC_CLASS_FP_QNAN_N (1 << 2)
-#define __VEC_CLASS_FP_QNAN (__VEC_CLASS_FP_QNAN_P | __VEC_CLASS_FP_QNAN_N)
-#define __VEC_CLASS_FP_SNAN_P (1 << 1)
-#define __VEC_CLASS_FP_SNAN_N (1 << 0)
-#define __VEC_CLASS_FP_SNAN (__VEC_CLASS_FP_SNAN_P | __VEC_CLASS_FP_SNAN_N)
-#define __VEC_CLASS_FP_NAN (__VEC_CLASS_FP_QNAN | __VEC_CLASS_FP_SNAN)
-#define __VEC_CLASS_FP_NOT_NORMAL (__VEC_CLASS_FP_NAN | \
-                                   __VEC_CLASS_FP_SUBNORMAL | \
-                                   __VEC_CLASS_FP_ZERO | \
-                                   __VEC_CLASS_FP_INFINITY)
-
-/*-- vec_cp_until_zero ------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_cp_until_zero(vector signed char __a) {
-  return (vector signed char)__builtin_s390_vistrb((vector unsigned char)__a);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_cp_until_zero(vector bool char __a) {
-  return (vector bool char)__builtin_s390_vistrb((vector unsigned char)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cp_until_zero(vector unsigned char __a) {
-  return __builtin_s390_vistrb(__a);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_cp_until_zero(vector signed short __a) {
-  return (vector signed short)__builtin_s390_vistrh((vector unsigned short)__a);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cp_until_zero(vector bool short __a) {
-  return (vector bool short)__builtin_s390_vistrh((vector unsigned short)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cp_until_zero(vector unsigned short __a) {
-  return __builtin_s390_vistrh(__a);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_cp_until_zero(vector signed int __a) {
-  return (vector signed int)__builtin_s390_vistrf((vector unsigned int)__a);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cp_until_zero(vector bool int __a) {
-  return (vector bool int)__builtin_s390_vistrf((vector unsigned int)__a);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cp_until_zero(vector unsigned int __a) {
-  return __builtin_s390_vistrf(__a);
-}
-
-/*-- vec_cp_until_zero_cc ---------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_cp_until_zero_cc(vector signed char __a, int *__cc) {
-  return (vector signed char)
-    __builtin_s390_vistrbs((vector unsigned char)__a, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_cp_until_zero_cc(vector bool char __a, int *__cc) {
-  return (vector bool char)
-    __builtin_s390_vistrbs((vector unsigned char)__a, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cp_until_zero_cc(vector unsigned char __a, int *__cc) {
-  return __builtin_s390_vistrbs(__a, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_cp_until_zero_cc(vector signed short __a, int *__cc) {
-  return (vector signed short)
-    __builtin_s390_vistrhs((vector unsigned short)__a, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cp_until_zero_cc(vector bool short __a, int *__cc) {
-  return (vector bool short)
-    __builtin_s390_vistrhs((vector unsigned short)__a, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cp_until_zero_cc(vector unsigned short __a, int *__cc) {
-  return __builtin_s390_vistrhs(__a, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_cp_until_zero_cc(vector signed int __a, int *__cc) {
-  return (vector signed int)
-    __builtin_s390_vistrfs((vector unsigned int)__a, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cp_until_zero_cc(vector bool int __a, int *__cc) {
-  return (vector bool int)__builtin_s390_vistrfs((vector unsigned int)__a,
-                                                 __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cp_until_zero_cc(vector unsigned int __a, int *__cc) {
-  return __builtin_s390_vistrfs(__a, __cc);
-}
-
-/*-- vec_cmpeq_idx ----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_cmpeq_idx(vector signed char __a, vector signed char __b) {
-  return (vector signed char)
-    __builtin_s390_vfeeb((vector unsigned char)__a,
-                         (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_idx(vector bool char __a, vector bool char __b) {
-  return __builtin_s390_vfeeb((vector unsigned char)__a,
-                              (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_idx(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vfeeb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_cmpeq_idx(vector signed short __a, vector signed short __b) {
-  return (vector signed short)
-    __builtin_s390_vfeeh((vector unsigned short)__a,
-                         (vector unsigned short)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_idx(vector bool short __a, vector bool short __b) {
-  return __builtin_s390_vfeeh((vector unsigned short)__a,
-                              (vector unsigned short)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_idx(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vfeeh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_cmpeq_idx(vector signed int __a, vector signed int __b) {
-  return (vector signed int)
-    __builtin_s390_vfeef((vector unsigned int)__a,
-                         (vector unsigned int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_idx(vector bool int __a, vector bool int __b) {
-  return __builtin_s390_vfeef((vector unsigned int)__a,
-                              (vector unsigned int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_idx(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vfeef(__a, __b);
-}
-
-/*-- vec_cmpeq_idx_cc -------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_cmpeq_idx_cc(vector signed char __a, vector signed char __b, int *__cc) {
-  return (vector signed char)
-    __builtin_s390_vfeebs((vector unsigned char)__a,
-                          (vector unsigned char)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
-  return __builtin_s390_vfeebs((vector unsigned char)__a,
-                               (vector unsigned char)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_idx_cc(vector unsigned char __a, vector unsigned char __b,
-                 int *__cc) {
-  return __builtin_s390_vfeebs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_cmpeq_idx_cc(vector signed short __a, vector signed short __b, int *__cc) {
-  return (vector signed short)
-    __builtin_s390_vfeehs((vector unsigned short)__a,
-                          (vector unsigned short)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
-  return __builtin_s390_vfeehs((vector unsigned short)__a,
-                               (vector unsigned short)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_idx_cc(vector unsigned short __a, vector unsigned short __b,
-                 int *__cc) {
-  return __builtin_s390_vfeehs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_cmpeq_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
-  return (vector signed int)
-    __builtin_s390_vfeefs((vector unsigned int)__a,
-                          (vector unsigned int)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
-  return __builtin_s390_vfeefs((vector unsigned int)__a,
-                               (vector unsigned int)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_idx_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
-  return __builtin_s390_vfeefs(__a, __b, __cc);
-}
-
-/*-- vec_cmpeq_or_0_idx -----------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_cmpeq_or_0_idx(vector signed char __a, vector signed char __b) {
-  return (vector signed char)
-    __builtin_s390_vfeezb((vector unsigned char)__a,
-                          (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_or_0_idx(vector bool char __a, vector bool char __b) {
-  return __builtin_s390_vfeezb((vector unsigned char)__a,
-                               (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vfeezb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_cmpeq_or_0_idx(vector signed short __a, vector signed short __b) {
-  return (vector signed short)
-    __builtin_s390_vfeezh((vector unsigned short)__a,
-                          (vector unsigned short)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_or_0_idx(vector bool short __a, vector bool short __b) {
-  return __builtin_s390_vfeezh((vector unsigned short)__a,
-                               (vector unsigned short)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vfeezh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_cmpeq_or_0_idx(vector signed int __a, vector signed int __b) {
-  return (vector signed int)
-    __builtin_s390_vfeezf((vector unsigned int)__a,
-                          (vector unsigned int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_or_0_idx(vector bool int __a, vector bool int __b) {
-  return __builtin_s390_vfeezf((vector unsigned int)__a,
-                               (vector unsigned int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vfeezf(__a, __b);
-}
-
-/*-- vec_cmpeq_or_0_idx_cc --------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_cmpeq_or_0_idx_cc(vector signed char __a, vector signed char __b,
-                      int *__cc) {
-  return (vector signed char)
-    __builtin_s390_vfeezbs((vector unsigned char)__a,
-                           (vector unsigned char)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_or_0_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
-  return __builtin_s390_vfeezbs((vector unsigned char)__a,
-                                (vector unsigned char)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpeq_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
-                      int *__cc) {
-  return __builtin_s390_vfeezbs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_cmpeq_or_0_idx_cc(vector signed short __a, vector signed short __b,
-                      int *__cc) {
-  return (vector signed short)
-    __builtin_s390_vfeezhs((vector unsigned short)__a,
-                           (vector unsigned short)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_or_0_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
-  return __builtin_s390_vfeezhs((vector unsigned short)__a,
-                                (vector unsigned short)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpeq_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
-                      int *__cc) {
-  return __builtin_s390_vfeezhs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_cmpeq_or_0_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
-  return (vector signed int)
-    __builtin_s390_vfeezfs((vector unsigned int)__a,
-                           (vector unsigned int)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_or_0_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
-  return __builtin_s390_vfeezfs((vector unsigned int)__a,
-                                (vector unsigned int)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpeq_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
-                      int *__cc) {
-  return __builtin_s390_vfeezfs(__a, __b, __cc);
-}
-
-/*-- vec_cmpne_idx ----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_cmpne_idx(vector signed char __a, vector signed char __b) {
-  return (vector signed char)
-    __builtin_s390_vfeneb((vector unsigned char)__a,
-                          (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_idx(vector bool char __a, vector bool char __b) {
-  return __builtin_s390_vfeneb((vector unsigned char)__a,
-                               (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_idx(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vfeneb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_cmpne_idx(vector signed short __a, vector signed short __b) {
-  return (vector signed short)
-    __builtin_s390_vfeneh((vector unsigned short)__a,
-                          (vector unsigned short)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_idx(vector bool short __a, vector bool short __b) {
-  return __builtin_s390_vfeneh((vector unsigned short)__a,
-                               (vector unsigned short)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_idx(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vfeneh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_cmpne_idx(vector signed int __a, vector signed int __b) {
-  return (vector signed int)
-    __builtin_s390_vfenef((vector unsigned int)__a,
-                          (vector unsigned int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_idx(vector bool int __a, vector bool int __b) {
-  return __builtin_s390_vfenef((vector unsigned int)__a,
-                               (vector unsigned int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_idx(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vfenef(__a, __b);
-}
-
-/*-- vec_cmpne_idx_cc -------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_cmpne_idx_cc(vector signed char __a, vector signed char __b, int *__cc) {
-  return (vector signed char)
-    __builtin_s390_vfenebs((vector unsigned char)__a,
-                           (vector unsigned char)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
-  return __builtin_s390_vfenebs((vector unsigned char)__a,
-                                (vector unsigned char)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_idx_cc(vector unsigned char __a, vector unsigned char __b,
-                 int *__cc) {
-  return __builtin_s390_vfenebs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_cmpne_idx_cc(vector signed short __a, vector signed short __b, int *__cc) {
-  return (vector signed short)
-    __builtin_s390_vfenehs((vector unsigned short)__a,
-                           (vector unsigned short)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
-  return __builtin_s390_vfenehs((vector unsigned short)__a,
-                                (vector unsigned short)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_idx_cc(vector unsigned short __a, vector unsigned short __b,
-                 int *__cc) {
-  return __builtin_s390_vfenehs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_cmpne_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
-  return (vector signed int)
-    __builtin_s390_vfenefs((vector unsigned int)__a,
-                           (vector unsigned int)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
-  return __builtin_s390_vfenefs((vector unsigned int)__a,
-                                (vector unsigned int)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_idx_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) {
-  return __builtin_s390_vfenefs(__a, __b, __cc);
-}
-
-/*-- vec_cmpne_or_0_idx -----------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_cmpne_or_0_idx(vector signed char __a, vector signed char __b) {
-  return (vector signed char)
-    __builtin_s390_vfenezb((vector unsigned char)__a,
-                           (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_or_0_idx(vector bool char __a, vector bool char __b) {
-  return __builtin_s390_vfenezb((vector unsigned char)__a,
-                                (vector unsigned char)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vfenezb(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_cmpne_or_0_idx(vector signed short __a, vector signed short __b) {
-  return (vector signed short)
-    __builtin_s390_vfenezh((vector unsigned short)__a,
-                           (vector unsigned short)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_or_0_idx(vector bool short __a, vector bool short __b) {
-  return __builtin_s390_vfenezh((vector unsigned short)__a,
-                                (vector unsigned short)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vfenezh(__a, __b);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_cmpne_or_0_idx(vector signed int __a, vector signed int __b) {
-  return (vector signed int)
-    __builtin_s390_vfenezf((vector unsigned int)__a,
-                           (vector unsigned int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_or_0_idx(vector bool int __a, vector bool int __b) {
-  return __builtin_s390_vfenezf((vector unsigned int)__a,
-                                (vector unsigned int)__b);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vfenezf(__a, __b);
-}
-
-/*-- vec_cmpne_or_0_idx_cc --------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_cmpne_or_0_idx_cc(vector signed char __a, vector signed char __b,
-                      int *__cc) {
-  return (vector signed char)
-    __builtin_s390_vfenezbs((vector unsigned char)__a,
-                            (vector unsigned char)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_or_0_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
-  return __builtin_s390_vfenezbs((vector unsigned char)__a,
-                                 (vector unsigned char)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpne_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
-                      int *__cc) {
-  return __builtin_s390_vfenezbs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_cmpne_or_0_idx_cc(vector signed short __a, vector signed short __b,
-                      int *__cc) {
-  return (vector signed short)
-    __builtin_s390_vfenezhs((vector unsigned short)__a,
-                            (vector unsigned short)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_or_0_idx_cc(vector bool short __a, vector bool short __b, int *__cc) {
-  return __builtin_s390_vfenezhs((vector unsigned short)__a,
-                                 (vector unsigned short)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpne_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
-                      int *__cc) {
-  return __builtin_s390_vfenezhs(__a, __b, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_cmpne_or_0_idx_cc(vector signed int __a, vector signed int __b, int *__cc) {
-  return (vector signed int)
-    __builtin_s390_vfenezfs((vector unsigned int)__a,
-                            (vector unsigned int)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_or_0_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
-  return __builtin_s390_vfenezfs((vector unsigned int)__a,
-                                 (vector unsigned int)__b, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpne_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
-                      int *__cc) {
-  return __builtin_s390_vfenezfs(__a, __b, __cc);
-}
-
-/*-- vec_cmprg --------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmprg(vector unsigned char __a, vector unsigned char __b,
-          vector unsigned char __c) {
-  return (vector bool char)__builtin_s390_vstrcb(__a, __b, __c, 4);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmprg(vector unsigned short __a, vector unsigned short __b,
-          vector unsigned short __c) {
-  return (vector bool short)__builtin_s390_vstrch(__a, __b, __c, 4);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmprg(vector unsigned int __a, vector unsigned int __b,
-          vector unsigned int __c) {
-  return (vector bool int)__builtin_s390_vstrcf(__a, __b, __c, 4);
-}
-
-/*-- vec_cmprg_cc -----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmprg_cc(vector unsigned char __a, vector unsigned char __b,
-             vector unsigned char __c, int *__cc) {
-  return (vector bool char)__builtin_s390_vstrcbs(__a, __b, __c, 4, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmprg_cc(vector unsigned short __a, vector unsigned short __b,
-             vector unsigned short __c, int *__cc) {
-  return (vector bool short)__builtin_s390_vstrchs(__a, __b, __c, 4, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmprg_cc(vector unsigned int __a, vector unsigned int __b,
-             vector unsigned int __c, int *__cc) {
-  return (vector bool int)__builtin_s390_vstrcfs(__a, __b, __c, 4, __cc);
-}
-
-/*-- vec_cmprg_idx ----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmprg_idx(vector unsigned char __a, vector unsigned char __b,
-              vector unsigned char __c) {
-  return __builtin_s390_vstrcb(__a, __b, __c, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmprg_idx(vector unsigned short __a, vector unsigned short __b,
-              vector unsigned short __c) {
-  return __builtin_s390_vstrch(__a, __b, __c, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmprg_idx(vector unsigned int __a, vector unsigned int __b,
-              vector unsigned int __c) {
-  return __builtin_s390_vstrcf(__a, __b, __c, 0);
-}
-
-/*-- vec_cmprg_idx_cc -------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmprg_idx_cc(vector unsigned char __a, vector unsigned char __b,
-                 vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrcbs(__a, __b, __c, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmprg_idx_cc(vector unsigned short __a, vector unsigned short __b,
-                 vector unsigned short __c, int *__cc) {
-  return __builtin_s390_vstrchs(__a, __b, __c, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmprg_idx_cc(vector unsigned int __a, vector unsigned int __b,
-                 vector unsigned int __c, int *__cc) {
-  return __builtin_s390_vstrcfs(__a, __b, __c, 0, __cc);
-}
-
-/*-- vec_cmprg_or_0_idx -----------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmprg_or_0_idx(vector unsigned char __a, vector unsigned char __b,
-                   vector unsigned char __c) {
-  return __builtin_s390_vstrczb(__a, __b, __c, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmprg_or_0_idx(vector unsigned short __a, vector unsigned short __b,
-                   vector unsigned short __c) {
-  return __builtin_s390_vstrczh(__a, __b, __c, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmprg_or_0_idx(vector unsigned int __a, vector unsigned int __b,
-                   vector unsigned int __c) {
-  return __builtin_s390_vstrczf(__a, __b, __c, 0);
-}
-
-/*-- vec_cmprg_or_0_idx_cc --------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmprg_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
-                      vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrczbs(__a, __b, __c, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmprg_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
-                      vector unsigned short __c, int *__cc) {
-  return __builtin_s390_vstrczhs(__a, __b, __c, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmprg_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
-                      vector unsigned int __c, int *__cc) {
-  return __builtin_s390_vstrczfs(__a, __b, __c, 0, __cc);
-}
-
-/*-- vec_cmpnrg -------------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmpnrg(vector unsigned char __a, vector unsigned char __b,
-           vector unsigned char __c) {
-  return (vector bool char)__builtin_s390_vstrcb(__a, __b, __c, 12);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmpnrg(vector unsigned short __a, vector unsigned short __b,
-           vector unsigned short __c) {
-  return (vector bool short)__builtin_s390_vstrch(__a, __b, __c, 12);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmpnrg(vector unsigned int __a, vector unsigned int __b,
-           vector unsigned int __c) {
-  return (vector bool int)__builtin_s390_vstrcf(__a, __b, __c, 12);
-}
-
-/*-- vec_cmpnrg_cc ----------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_cmpnrg_cc(vector unsigned char __a, vector unsigned char __b,
-              vector unsigned char __c, int *__cc) {
-  return (vector bool char)__builtin_s390_vstrcbs(__a, __b, __c, 12, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_cmpnrg_cc(vector unsigned short __a, vector unsigned short __b,
-              vector unsigned short __c, int *__cc) {
-  return (vector bool short)__builtin_s390_vstrchs(__a, __b, __c, 12, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_cmpnrg_cc(vector unsigned int __a, vector unsigned int __b,
-              vector unsigned int __c, int *__cc) {
-  return (vector bool int)__builtin_s390_vstrcfs(__a, __b, __c, 12, __cc);
-}
-
-/*-- vec_cmpnrg_idx ---------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpnrg_idx(vector unsigned char __a, vector unsigned char __b,
-               vector unsigned char __c) {
-  return __builtin_s390_vstrcb(__a, __b, __c, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpnrg_idx(vector unsigned short __a, vector unsigned short __b,
-               vector unsigned short __c) {
-  return __builtin_s390_vstrch(__a, __b, __c, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpnrg_idx(vector unsigned int __a, vector unsigned int __b,
-               vector unsigned int __c) {
-  return __builtin_s390_vstrcf(__a, __b, __c, 8);
-}
-
-/*-- vec_cmpnrg_idx_cc ------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpnrg_idx_cc(vector unsigned char __a, vector unsigned char __b,
-                  vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrcbs(__a, __b, __c, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpnrg_idx_cc(vector unsigned short __a, vector unsigned short __b,
-                  vector unsigned short __c, int *__cc) {
-  return __builtin_s390_vstrchs(__a, __b, __c, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpnrg_idx_cc(vector unsigned int __a, vector unsigned int __b,
-                  vector unsigned int __c, int *__cc) {
-  return __builtin_s390_vstrcfs(__a, __b, __c, 8, __cc);
-}
-
-/*-- vec_cmpnrg_or_0_idx ----------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpnrg_or_0_idx(vector unsigned char __a, vector unsigned char __b,
-                    vector unsigned char __c) {
-  return __builtin_s390_vstrczb(__a, __b, __c, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpnrg_or_0_idx(vector unsigned short __a, vector unsigned short __b,
-                    vector unsigned short __c) {
-  return __builtin_s390_vstrczh(__a, __b, __c, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpnrg_or_0_idx(vector unsigned int __a, vector unsigned int __b,
-                    vector unsigned int __c) {
-  return __builtin_s390_vstrczf(__a, __b, __c, 8);
-}
-
-/*-- vec_cmpnrg_or_0_idx_cc -------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_cmpnrg_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
-                       vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrczbs(__a, __b, __c, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_cmpnrg_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b,
-                       vector unsigned short __c, int *__cc) {
-  return __builtin_s390_vstrczhs(__a, __b, __c, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_cmpnrg_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
-                       vector unsigned int __c, int *__cc) {
-  return __builtin_s390_vstrczfs(__a, __b, __c, 8, __cc);
-}
-
-/*-- vec_find_any_eq --------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_eq(vector signed char __a, vector signed char __b) {
-  return (vector bool char)
-    __builtin_s390_vfaeb((vector unsigned char)__a,
-                         (vector unsigned char)__b, 4);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_eq(vector bool char __a, vector bool char __b) {
-  return (vector bool char)
-    __builtin_s390_vfaeb((vector unsigned char)__a,
-                         (vector unsigned char)__b, 4);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_eq(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_s390_vfaeb(__a, __b, 4);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_eq(vector signed short __a, vector signed short __b) {
-  return (vector bool short)
-    __builtin_s390_vfaeh((vector unsigned short)__a,
-                         (vector unsigned short)__b, 4);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_eq(vector bool short __a, vector bool short __b) {
-  return (vector bool short)
-    __builtin_s390_vfaeh((vector unsigned short)__a,
-                         (vector unsigned short)__b, 4);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_eq(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_s390_vfaeh(__a, __b, 4);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_eq(vector signed int __a, vector signed int __b) {
-  return (vector bool int)
-    __builtin_s390_vfaef((vector unsigned int)__a,
-                         (vector unsigned int)__b, 4);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_eq(vector bool int __a, vector bool int __b) {
-  return (vector bool int)
-    __builtin_s390_vfaef((vector unsigned int)__a,
-                         (vector unsigned int)__b, 4);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_eq(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_s390_vfaef(__a, __b, 4);
-}
-
-/*-- vec_find_any_eq_cc -----------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_eq_cc(vector signed char __a, vector signed char __b, int *__cc) {
-  return (vector bool char)
-    __builtin_s390_vfaebs((vector unsigned char)__a,
-                          (vector unsigned char)__b, 4, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_eq_cc(vector bool char __a, vector bool char __b, int *__cc) {
-  return (vector bool char)
-    __builtin_s390_vfaebs((vector unsigned char)__a,
-                          (vector unsigned char)__b, 4, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_eq_cc(vector unsigned char __a, vector unsigned char __b,
-                   int *__cc) {
-  return (vector bool char)__builtin_s390_vfaebs(__a, __b, 4, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_eq_cc(vector signed short __a, vector signed short __b,
-                   int *__cc) {
-  return (vector bool short)
-    __builtin_s390_vfaehs((vector unsigned short)__a,
-                          (vector unsigned short)__b, 4, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_eq_cc(vector bool short __a, vector bool short __b, int *__cc) {
-  return (vector bool short)
-    __builtin_s390_vfaehs((vector unsigned short)__a,
-                          (vector unsigned short)__b, 4, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_eq_cc(vector unsigned short __a, vector unsigned short __b,
-                   int *__cc) {
-  return (vector bool short)__builtin_s390_vfaehs(__a, __b, 4, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_eq_cc(vector signed int __a, vector signed int __b, int *__cc) {
-  return (vector bool int)
-    __builtin_s390_vfaefs((vector unsigned int)__a,
-                          (vector unsigned int)__b, 4, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_eq_cc(vector bool int __a, vector bool int __b, int *__cc) {
-  return (vector bool int)
-    __builtin_s390_vfaefs((vector unsigned int)__a,
-                          (vector unsigned int)__b, 4, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_eq_cc(vector unsigned int __a, vector unsigned int __b,
-                   int *__cc) {
-  return (vector bool int)__builtin_s390_vfaefs(__a, __b, 4, __cc);
-}
-
-/*-- vec_find_any_eq_idx ----------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_eq_idx(vector signed char __a, vector signed char __b) {
-  return (vector signed char)
-    __builtin_s390_vfaeb((vector unsigned char)__a,
-                         (vector unsigned char)__b, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_idx(vector bool char __a, vector bool char __b) {
-  return __builtin_s390_vfaeb((vector unsigned char)__a,
-                              (vector unsigned char)__b, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_idx(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vfaeb(__a, __b, 0);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_eq_idx(vector signed short __a, vector signed short __b) {
-  return (vector signed short)
-    __builtin_s390_vfaeh((vector unsigned short)__a,
-                         (vector unsigned short)__b, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_idx(vector bool short __a, vector bool short __b) {
-  return __builtin_s390_vfaeh((vector unsigned short)__a,
-                              (vector unsigned short)__b, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_idx(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vfaeh(__a, __b, 0);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_eq_idx(vector signed int __a, vector signed int __b) {
-  return (vector signed int)
-    __builtin_s390_vfaef((vector unsigned int)__a,
-                         (vector unsigned int)__b, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_idx(vector bool int __a, vector bool int __b) {
-  return __builtin_s390_vfaef((vector unsigned int)__a,
-                              (vector unsigned int)__b, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_idx(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vfaef(__a, __b, 0);
-}
-
-/*-- vec_find_any_eq_idx_cc -------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_eq_idx_cc(vector signed char __a, vector signed char __b,
-                       int *__cc) {
-  return (vector signed char)
-    __builtin_s390_vfaebs((vector unsigned char)__a,
-                          (vector unsigned char)__b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
-  return __builtin_s390_vfaebs((vector unsigned char)__a,
-                               (vector unsigned char)__b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_idx_cc(vector unsigned char __a, vector unsigned char __b,
-                       int *__cc) {
-  return __builtin_s390_vfaebs(__a, __b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_eq_idx_cc(vector signed short __a, vector signed short __b,
-                       int *__cc) {
-  return (vector signed short)
-    __builtin_s390_vfaehs((vector unsigned short)__a,
-                          (vector unsigned short)__b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_idx_cc(vector bool short __a, vector bool short __b,
-                       int *__cc) {
-  return __builtin_s390_vfaehs((vector unsigned short)__a,
-                               (vector unsigned short)__b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_idx_cc(vector unsigned short __a, vector unsigned short __b,
-                       int *__cc) {
-  return __builtin_s390_vfaehs(__a, __b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_eq_idx_cc(vector signed int __a, vector signed int __b,
-                       int *__cc) {
-  return (vector signed int)
-    __builtin_s390_vfaefs((vector unsigned int)__a,
-                          (vector unsigned int)__b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
-  return __builtin_s390_vfaefs((vector unsigned int)__a,
-                               (vector unsigned int)__b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_idx_cc(vector unsigned int __a, vector unsigned int __b,
-                       int *__cc) {
-  return __builtin_s390_vfaefs(__a, __b, 0, __cc);
-}
-
-/*-- vec_find_any_eq_or_0_idx -----------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_eq_or_0_idx(vector signed char __a, vector signed char __b) {
-  return (vector signed char)
-    __builtin_s390_vfaezb((vector unsigned char)__a,
-                          (vector unsigned char)__b, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_or_0_idx(vector bool char __a, vector bool char __b) {
-  return __builtin_s390_vfaezb((vector unsigned char)__a,
-                               (vector unsigned char)__b, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vfaezb(__a, __b, 0);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_eq_or_0_idx(vector signed short __a, vector signed short __b) {
-  return (vector signed short)
-    __builtin_s390_vfaezh((vector unsigned short)__a,
-                          (vector unsigned short)__b, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_or_0_idx(vector bool short __a, vector bool short __b) {
-  return __builtin_s390_vfaezh((vector unsigned short)__a,
-                               (vector unsigned short)__b, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vfaezh(__a, __b, 0);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_eq_or_0_idx(vector signed int __a, vector signed int __b) {
-  return (vector signed int)
-    __builtin_s390_vfaezf((vector unsigned int)__a,
-                          (vector unsigned int)__b, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_or_0_idx(vector bool int __a, vector bool int __b) {
-  return __builtin_s390_vfaezf((vector unsigned int)__a,
-                               (vector unsigned int)__b, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vfaezf(__a, __b, 0);
-}
-
-/*-- vec_find_any_eq_or_0_idx_cc --------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_eq_or_0_idx_cc(vector signed char __a, vector signed char __b,
-                            int *__cc) {
-  return (vector signed char)
-    __builtin_s390_vfaezbs((vector unsigned char)__a,
-                           (vector unsigned char)__b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_or_0_idx_cc(vector bool char __a, vector bool char __b,
-                            int *__cc) {
-  return __builtin_s390_vfaezbs((vector unsigned char)__a,
-                                (vector unsigned char)__b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_eq_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
-                            int *__cc) {
-  return __builtin_s390_vfaezbs(__a, __b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_eq_or_0_idx_cc(vector signed short __a, vector signed short __b,
-                            int *__cc) {
-  return (vector signed short)
-    __builtin_s390_vfaezhs((vector unsigned short)__a,
-                           (vector unsigned short)__b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_or_0_idx_cc(vector bool short __a, vector bool short __b,
-                            int *__cc) {
-  return __builtin_s390_vfaezhs((vector unsigned short)__a,
-                                (vector unsigned short)__b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_eq_or_0_idx_cc(vector unsigned short __a,
-                            vector unsigned short __b, int *__cc) {
-  return __builtin_s390_vfaezhs(__a, __b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_eq_or_0_idx_cc(vector signed int __a, vector signed int __b,
-                            int *__cc) {
-  return (vector signed int)
-    __builtin_s390_vfaezfs((vector unsigned int)__a,
-                           (vector unsigned int)__b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_or_0_idx_cc(vector bool int __a, vector bool int __b,
-                            int *__cc) {
-  return __builtin_s390_vfaezfs((vector unsigned int)__a,
-                                (vector unsigned int)__b, 0, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_eq_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
-                            int *__cc) {
-  return __builtin_s390_vfaezfs(__a, __b, 0, __cc);
-}
-
-/*-- vec_find_any_ne --------------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_ne(vector signed char __a, vector signed char __b) {
-  return (vector bool char)
-    __builtin_s390_vfaeb((vector unsigned char)__a,
-                         (vector unsigned char)__b, 12);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_ne(vector bool char __a, vector bool char __b) {
-  return (vector bool char)
-    __builtin_s390_vfaeb((vector unsigned char)__a,
-                         (vector unsigned char)__b, 12);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_ne(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_s390_vfaeb(__a, __b, 12);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_ne(vector signed short __a, vector signed short __b) {
-  return (vector bool short)
-    __builtin_s390_vfaeh((vector unsigned short)__a,
-                         (vector unsigned short)__b, 12);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_ne(vector bool short __a, vector bool short __b) {
-  return (vector bool short)
-    __builtin_s390_vfaeh((vector unsigned short)__a,
-                         (vector unsigned short)__b, 12);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_ne(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_s390_vfaeh(__a, __b, 12);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_ne(vector signed int __a, vector signed int __b) {
-  return (vector bool int)
-    __builtin_s390_vfaef((vector unsigned int)__a,
-                         (vector unsigned int)__b, 12);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_ne(vector bool int __a, vector bool int __b) {
-  return (vector bool int)
-    __builtin_s390_vfaef((vector unsigned int)__a,
-                         (vector unsigned int)__b, 12);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_ne(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_s390_vfaef(__a, __b, 12);
-}
-
-/*-- vec_find_any_ne_cc -----------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_ne_cc(vector signed char __a, vector signed char __b, int *__cc) {
-  return (vector bool char)
-    __builtin_s390_vfaebs((vector unsigned char)__a,
-                          (vector unsigned char)__b, 12, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_ne_cc(vector bool char __a, vector bool char __b, int *__cc) {
-  return (vector bool char)
-    __builtin_s390_vfaebs((vector unsigned char)__a,
-                          (vector unsigned char)__b, 12, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool char
-vec_find_any_ne_cc(vector unsigned char __a, vector unsigned char __b,
-                   int *__cc) {
-  return (vector bool char)__builtin_s390_vfaebs(__a, __b, 12, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_ne_cc(vector signed short __a, vector signed short __b,
-                   int *__cc) {
-  return (vector bool short)
-    __builtin_s390_vfaehs((vector unsigned short)__a,
-                          (vector unsigned short)__b, 12, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_ne_cc(vector bool short __a, vector bool short __b, int *__cc) {
-  return (vector bool short)
-    __builtin_s390_vfaehs((vector unsigned short)__a,
-                          (vector unsigned short)__b, 12, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool short
-vec_find_any_ne_cc(vector unsigned short __a, vector unsigned short __b,
-                   int *__cc) {
-  return (vector bool short)__builtin_s390_vfaehs(__a, __b, 12, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_ne_cc(vector signed int __a, vector signed int __b, int *__cc) {
-  return (vector bool int)
-    __builtin_s390_vfaefs((vector unsigned int)__a,
-                          (vector unsigned int)__b, 12, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_ne_cc(vector bool int __a, vector bool int __b, int *__cc) {
-  return (vector bool int)
-    __builtin_s390_vfaefs((vector unsigned int)__a,
-                          (vector unsigned int)__b, 12, __cc);
-}
-
-static inline __ATTRS_o_ai vector bool int
-vec_find_any_ne_cc(vector unsigned int __a, vector unsigned int __b,
-                   int *__cc) {
-  return (vector bool int)__builtin_s390_vfaefs(__a, __b, 12, __cc);
-}
-
-/*-- vec_find_any_ne_idx ----------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_ne_idx(vector signed char __a, vector signed char __b) {
-  return (vector signed char)
-    __builtin_s390_vfaeb((vector unsigned char)__a,
-                         (vector unsigned char)__b, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_idx(vector bool char __a, vector bool char __b) {
-  return __builtin_s390_vfaeb((vector unsigned char)__a,
-                              (vector unsigned char)__b, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_idx(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vfaeb(__a, __b, 8);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_ne_idx(vector signed short __a, vector signed short __b) {
-  return (vector signed short)
-    __builtin_s390_vfaeh((vector unsigned short)__a,
-                         (vector unsigned short)__b, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_idx(vector bool short __a, vector bool short __b) {
-  return __builtin_s390_vfaeh((vector unsigned short)__a,
-                              (vector unsigned short)__b, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_idx(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vfaeh(__a, __b, 8);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_ne_idx(vector signed int __a, vector signed int __b) {
-  return (vector signed int)
-    __builtin_s390_vfaef((vector unsigned int)__a,
-                         (vector unsigned int)__b, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_idx(vector bool int __a, vector bool int __b) {
-  return __builtin_s390_vfaef((vector unsigned int)__a,
-                              (vector unsigned int)__b, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_idx(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vfaef(__a, __b, 8);
-}
-
-/*-- vec_find_any_ne_idx_cc -------------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_ne_idx_cc(vector signed char __a, vector signed char __b,
-                       int *__cc) {
-  return (vector signed char)
-    __builtin_s390_vfaebs((vector unsigned char)__a,
-                          (vector unsigned char)__b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_idx_cc(vector bool char __a, vector bool char __b, int *__cc) {
-  return __builtin_s390_vfaebs((vector unsigned char)__a,
-                               (vector unsigned char)__b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_idx_cc(vector unsigned char __a, vector unsigned char __b,
-                       int *__cc) {
-  return __builtin_s390_vfaebs(__a, __b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_ne_idx_cc(vector signed short __a, vector signed short __b,
-                       int *__cc) {
-  return (vector signed short)
-    __builtin_s390_vfaehs((vector unsigned short)__a,
-                          (vector unsigned short)__b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_idx_cc(vector bool short __a, vector bool short __b,
-                       int *__cc) {
-  return __builtin_s390_vfaehs((vector unsigned short)__a,
-                               (vector unsigned short)__b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_idx_cc(vector unsigned short __a, vector unsigned short __b,
-                       int *__cc) {
-  return __builtin_s390_vfaehs(__a, __b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_ne_idx_cc(vector signed int __a, vector signed int __b,
-                       int *__cc) {
-  return (vector signed int)
-    __builtin_s390_vfaefs((vector unsigned int)__a,
-                          (vector unsigned int)__b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_idx_cc(vector bool int __a, vector bool int __b, int *__cc) {
-  return __builtin_s390_vfaefs((vector unsigned int)__a,
-                               (vector unsigned int)__b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_idx_cc(vector unsigned int __a, vector unsigned int __b,
-                       int *__cc) {
-  return __builtin_s390_vfaefs(__a, __b, 8, __cc);
-}
-
-/*-- vec_find_any_ne_or_0_idx -----------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_ne_or_0_idx(vector signed char __a, vector signed char __b) {
-  return (vector signed char)
-    __builtin_s390_vfaezb((vector unsigned char)__a,
-                          (vector unsigned char)__b, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_or_0_idx(vector bool char __a, vector bool char __b) {
-  return __builtin_s390_vfaezb((vector unsigned char)__a,
-                               (vector unsigned char)__b, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_or_0_idx(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_s390_vfaezb(__a, __b, 8);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_ne_or_0_idx(vector signed short __a, vector signed short __b) {
-  return (vector signed short)
-    __builtin_s390_vfaezh((vector unsigned short)__a,
-                          (vector unsigned short)__b, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_or_0_idx(vector bool short __a, vector bool short __b) {
-  return __builtin_s390_vfaezh((vector unsigned short)__a,
-                               (vector unsigned short)__b, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_or_0_idx(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_s390_vfaezh(__a, __b, 8);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_ne_or_0_idx(vector signed int __a, vector signed int __b) {
-  return (vector signed int)
-    __builtin_s390_vfaezf((vector unsigned int)__a,
-                          (vector unsigned int)__b, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_or_0_idx(vector bool int __a, vector bool int __b) {
-  return __builtin_s390_vfaezf((vector unsigned int)__a,
-                               (vector unsigned int)__b, 8);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_or_0_idx(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_s390_vfaezf(__a, __b, 8);
-}
-
-/*-- vec_find_any_ne_or_0_idx_cc --------------------------------------------*/
-
-static inline __ATTRS_o_ai vector signed char
-vec_find_any_ne_or_0_idx_cc(vector signed char __a, vector signed char __b,
-                            int *__cc) {
-  return (vector signed char)
-    __builtin_s390_vfaezbs((vector unsigned char)__a,
-                           (vector unsigned char)__b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_or_0_idx_cc(vector bool char __a, vector bool char __b,
-                            int *__cc) {
-  return __builtin_s390_vfaezbs((vector unsigned char)__a,
-                                (vector unsigned char)__b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_find_any_ne_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b,
-                            int *__cc) {
-  return __builtin_s390_vfaezbs(__a, __b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_find_any_ne_or_0_idx_cc(vector signed short __a, vector signed short __b,
-                            int *__cc) {
-  return (vector signed short)
-    __builtin_s390_vfaezhs((vector unsigned short)__a,
-                           (vector unsigned short)__b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_or_0_idx_cc(vector bool short __a, vector bool short __b,
-                            int *__cc) {
-  return __builtin_s390_vfaezhs((vector unsigned short)__a,
-                                (vector unsigned short)__b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_find_any_ne_or_0_idx_cc(vector unsigned short __a,
-                            vector unsigned short __b, int *__cc) {
-  return __builtin_s390_vfaezhs(__a, __b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector signed int
-vec_find_any_ne_or_0_idx_cc(vector signed int __a, vector signed int __b,
-                            int *__cc) {
-  return (vector signed int)
-    __builtin_s390_vfaezfs((vector unsigned int)__a,
-                           (vector unsigned int)__b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_or_0_idx_cc(vector bool int __a, vector bool int __b,
-                            int *__cc) {
-  return __builtin_s390_vfaezfs((vector unsigned int)__a,
-                                (vector unsigned int)__b, 8, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_find_any_ne_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b,
-                            int *__cc) {
-  return __builtin_s390_vfaezfs(__a, __b, 8, __cc);
-}
-
-/*-- vec_search_string_cc ---------------------------------------------------*/
-
-#if __ARCH__ >= 13
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector signed char __a, vector signed char __b,
-                     vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrsb((vector unsigned char)__a,
-                               (vector unsigned char)__b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector bool char __a, vector bool char __b,
-                     vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrsb((vector unsigned char)__a,
-                               (vector unsigned char)__b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector unsigned char __a, vector unsigned char __b,
-                     vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrsb(__a, __b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector signed short __a, vector signed short __b,
-                     vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrsh((vector unsigned short)__a,
-                               (vector unsigned short)__b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector bool short __a, vector bool short __b,
-                     vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrsh((vector unsigned short)__a,
-                               (vector unsigned short)__b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector unsigned short __a, vector unsigned short __b,
-                     vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrsh(__a, __b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector signed int __a, vector signed int __b,
-                     vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrsf((vector unsigned int)__a,
-                               (vector unsigned int)__b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector bool int __a, vector bool int __b,
-                     vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrsf((vector unsigned int)__a,
-                               (vector unsigned int)__b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_cc(vector unsigned int __a, vector unsigned int __b,
-                     vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrsf(__a, __b, __c, __cc);
-}
-
-#endif
-
-/*-- vec_search_string_until_zero_cc ----------------------------------------*/
-
-#if __ARCH__ >= 13
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector signed char __a,
-                                vector signed char __b,
-                                vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrszb((vector unsigned char)__a,
-                                (vector unsigned char)__b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector bool char __a,
-                                vector bool char __b,
-                                vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrszb((vector unsigned char)__a,
-                                (vector unsigned char)__b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector unsigned char __a,
-                                vector unsigned char __b,
-                                vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrszb(__a, __b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector signed short __a,
-                                vector signed short __b,
-                                vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrszh((vector unsigned short)__a,
-                                (vector unsigned short)__b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector bool short __a,
-                                vector bool short __b,
-                                vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrszh((vector unsigned short)__a,
-                                (vector unsigned short)__b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector unsigned short __a,
-                                vector unsigned short __b,
-                                vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrszh(__a, __b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector signed int __a,
-                                vector signed int __b,
-                                vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrszf((vector unsigned int)__a,
-                                (vector unsigned int)__b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector bool int __a,
-                                vector bool int __b,
-                                vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrszf((vector unsigned int)__a,
-                                (vector unsigned int)__b, __c, __cc);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_search_string_until_zero_cc(vector unsigned int __a,
-                                vector unsigned int __b,
-                                vector unsigned char __c, int *__cc) {
-  return __builtin_s390_vstrszf(__a, __b, __c, __cc);
-}
-
-#endif
-
-#undef __constant_pow2_range
-#undef __constant_range
-#undef __constant
-#undef __ATTRS_o
-#undef __ATTRS_o_ai
-#undef __ATTRS_ai
-
-#else
-
-#error "Use -fzvector to enable vector extensions"
-
-#endif
diff --git a/linux-x86/lib64/clang/11.0.1/include/x86intrin.h b/linux-x86/lib64/clang/11.0.1/include/x86intrin.h
deleted file mode 100644
index a8b3662..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/x86intrin.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*===---- x86intrin.h - X86 intrinsics -------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __X86INTRIN_H
-#define __X86INTRIN_H
-
-#include <ia32intrin.h>
-
-#include <immintrin.h>
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__3dNOW__)
-#include <mm3dnow.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PRFCHW__)
-#include <prfchwintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE4A__)
-#include <ammintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FMA4__)
-#include <fma4intrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XOP__)
-#include <xopintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__TBM__)
-#include <tbmintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__LWP__)
-#include <lwpintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__MWAITX__)
-#include <mwaitxintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLZERO__)
-#include <clzerointrin.h>
-#endif
-
-
-#endif /* __X86INTRIN_H */
diff --git a/linux-x86/lib64/clang/11.0.1/include/xmmintrin.h b/linux-x86/lib64/clang/11.0.1/include/xmmintrin.h
deleted file mode 100644
index 9b8de63..0000000
--- a/linux-x86/lib64/clang/11.0.1/include/xmmintrin.h
+++ /dev/null
@@ -1,3008 +0,0 @@
-/*===---- xmmintrin.h - SSE intrinsics -------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __XMMINTRIN_H
-#define __XMMINTRIN_H
-
-#include <mmintrin.h>
-
-typedef int __v4si __attribute__((__vector_size__(16)));
-typedef float __v4sf __attribute__((__vector_size__(16)));
-typedef float __m128 __attribute__((__vector_size__(16), __aligned__(16)));
-
-typedef float __m128_u __attribute__((__vector_size__(16), __aligned__(1)));
-
-/* Unsigned types */
-typedef unsigned int __v4su __attribute__((__vector_size__(16)));
-
-/* This header should only be included in a hosted environment as it depends on
- * a standard library to provide allocation routines. */
-#if __STDC_HOSTED__
-#include <mm_malloc.h>
-#endif
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS_MMX __attribute__((__always_inline__, __nodebug__, __target__("mmx,sse"), __min_vector_width__(64)))
-
-/// Adds the 32-bit float values in the low-order bits of the operands.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VADDSS / ADDSS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-///    The lower 32 bits of this operand are used in the calculation.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-///    The lower 32 bits of this operand are used in the calculation.
-/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the sum
-///    of the lower 32 bits of both operands. The upper 96 bits are copied from
-///    the upper 96 bits of the first source operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_add_ss(__m128 __a, __m128 __b)
-{
-  __a[0] += __b[0];
-  return __a;
-}
-
-/// Adds two 128-bit vectors of [4 x float], and returns the results of
-///    the addition.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VADDPS / ADDPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-/// \returns A 128-bit vector of [4 x float] containing the sums of both
-///    operands.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_add_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)((__v4sf)__a + (__v4sf)__b);
-}
-
-/// Subtracts the 32-bit float value in the low-order bits of the second
-///    operand from the corresponding value in the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSUBSS / SUBSS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the minuend. The lower 32 bits
-///    of this operand are used in the calculation.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing the subtrahend. The lower 32
-///    bits of this operand are used in the calculation.
-/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the
-///    difference of the lower 32 bits of both operands. The upper 96 bits are
-///    copied from the upper 96 bits of the first source operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_sub_ss(__m128 __a, __m128 __b)
-{
-  __a[0] -= __b[0];
-  return __a;
-}
-
-/// Subtracts each of the values of the second operand from the first
-///    operand, both of which are 128-bit vectors of [4 x float] and returns
-///    the results of the subtraction.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSUBPS / SUBPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the minuend.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing the subtrahend.
-/// \returns A 128-bit vector of [4 x float] containing the differences between
-///    both operands.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_sub_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)((__v4sf)__a - (__v4sf)__b);
-}
-
-/// Multiplies two 32-bit float values in the low-order bits of the
-///    operands.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMULSS / MULSS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-///    The lower 32 bits of this operand are used in the calculation.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-///    The lower 32 bits of this operand are used in the calculation.
-/// \returns A 128-bit vector of [4 x float] containing the product of the lower
-///    32 bits of both operands. The upper 96 bits are copied from the upper 96
-///    bits of the first source operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_mul_ss(__m128 __a, __m128 __b)
-{
-  __a[0] *= __b[0];
-  return __a;
-}
-
-/// Multiplies two 128-bit vectors of [4 x float] and returns the
-///    results of the multiplication.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMULPS / MULPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-/// \returns A 128-bit vector of [4 x float] containing the products of both
-///    operands.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_mul_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)((__v4sf)__a * (__v4sf)__b);
-}
-
-/// Divides the value in the low-order 32 bits of the first operand by
-///    the corresponding value in the second operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VDIVSS / DIVSS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the dividend. The lower 32
-///    bits of this operand are used in the calculation.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing the divisor. The lower 32 bits
-///    of this operand are used in the calculation.
-/// \returns A 128-bit vector of [4 x float] containing the quotients of the
-///    lower 32 bits of both operands. The upper 96 bits are copied from the
-///    upper 96 bits of the first source operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_div_ss(__m128 __a, __m128 __b)
-{
-  __a[0] /= __b[0];
-  return __a;
-}
-
-/// Divides two 128-bit vectors of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VDIVPS / DIVPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the dividend.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing the divisor.
-/// \returns A 128-bit vector of [4 x float] containing the quotients of both
-///    operands.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_div_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)((__v4sf)__a / (__v4sf)__b);
-}
-
-/// Calculates the square root of the value stored in the low-order bits
-///    of a 128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSQRTSS / SQRTSS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the calculation.
-/// \returns A 128-bit vector of [4 x float] containing the square root of the
-///    value in the low-order bits of the operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_sqrt_ss(__m128 __a)
-{
-  return (__m128)__builtin_ia32_sqrtss((__v4sf)__a);
-}
-
-/// Calculates the square roots of the values stored in a 128-bit vector
-///    of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSQRTPS / SQRTPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the square roots of the
-///    values in the operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_sqrt_ps(__m128 __a)
-{
-  return __builtin_ia32_sqrtps((__v4sf)__a);
-}
-
-/// Calculates the approximate reciprocal of the value stored in the
-///    low-order bits of a 128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VRCPSS / RCPSS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the calculation.
-/// \returns A 128-bit vector of [4 x float] containing the approximate
-///    reciprocal of the value in the low-order bits of the operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_rcp_ss(__m128 __a)
-{
-  return (__m128)__builtin_ia32_rcpss((__v4sf)__a);
-}
-
-/// Calculates the approximate reciprocals of the values stored in a
-///    128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VRCPPS / RCPPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the approximate
-///    reciprocals of the values in the operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_rcp_ps(__m128 __a)
-{
-  return (__m128)__builtin_ia32_rcpps((__v4sf)__a);
-}
-
-/// Calculates the approximate reciprocal of the square root of the value
-///    stored in the low-order bits of a 128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VRSQRTSS / RSQRTSS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the calculation.
-/// \returns A 128-bit vector of [4 x float] containing the approximate
-///    reciprocal of the square root of the value in the low-order bits of the
-///    operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_rsqrt_ss(__m128 __a)
-{
-  return __builtin_ia32_rsqrtss((__v4sf)__a);
-}
-
-/// Calculates the approximate reciprocals of the square roots of the
-///    values stored in a 128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VRSQRTPS / RSQRTPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the approximate
-///    reciprocals of the square roots of the values in the operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_rsqrt_ps(__m128 __a)
-{
-  return __builtin_ia32_rsqrtps((__v4sf)__a);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands and returns the lesser value in the low-order bits of the
-///    vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMINSS / MINSS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the
-///    minimum value between both operands. The upper 96 bits are copied from
-///    the upper 96 bits of the first source operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_min_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_minss((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 128-bit vectors of [4 x float] and returns the lesser
-///    of each pair of values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMINPS / MINPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands.
-/// \returns A 128-bit vector of [4 x float] containing the minimum values
-///    between both operands.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_min_ps(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_minps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands and returns the greater value in the low-order bits of a 128-bit
-///    vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMAXSS / MAXSS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the
-///    maximum value between both operands. The upper 96 bits are copied from
-///    the upper 96 bits of the first source operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_max_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_maxss((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 128-bit vectors of [4 x float] and returns the greater
-///    of each pair of values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMAXPS / MAXPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands.
-/// \returns A 128-bit vector of [4 x float] containing the maximum values
-///    between both operands.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_max_ps(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_maxps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Performs a bitwise AND of two 128-bit vectors of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VANDPS / ANDPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector containing one of the source operands.
-/// \param __b
-///    A 128-bit vector containing one of the source operands.
-/// \returns A 128-bit vector of [4 x float] containing the bitwise AND of the
-///    values between both operands.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_and_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)((__v4su)__a & (__v4su)__b);
-}
-
-/// Performs a bitwise AND of two 128-bit vectors of [4 x float], using
-///    the one's complement of the values contained in the first source
-///    operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VANDNPS / ANDNPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the first source operand. The
-///    one's complement of this value is used in the bitwise AND.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing the second source operand.
-/// \returns A 128-bit vector of [4 x float] containing the bitwise AND of the
-///    one's complement of the first operand and the values in the second
-///    operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_andnot_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)(~(__v4su)__a & (__v4su)__b);
-}
-
-/// Performs a bitwise OR of two 128-bit vectors of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VORPS / ORPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-/// \returns A 128-bit vector of [4 x float] containing the bitwise OR of the
-///    values between both operands.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_or_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)((__v4su)__a | (__v4su)__b);
-}
-
-/// Performs a bitwise exclusive OR of two 128-bit vectors of
-///    [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VXORPS / XORPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the source operands.
-/// \returns A 128-bit vector of [4 x float] containing the bitwise exclusive OR
-///    of the values between both operands.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_xor_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)((__v4su)__a ^ (__v4su)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands for equality and returns the result of the comparison in the
-///    low-order bits of a vector [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPEQSS / CMPEQSS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] containing the comparison results
-///    in the low-order bits.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpeq_ss(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpeqss((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares each of the corresponding 32-bit float values of the
-///    128-bit vectors of [4 x float] for equality.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPEQPS / CMPEQPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpeq_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpeqps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the value in the first operand is less than the
-///    corresponding value in the second operand and returns the result of the
-///    comparison in the low-order bits of a vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLTSS / CMPLTSS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] containing the comparison results
-///    in the low-order bits.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmplt_ss(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpltss((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares each of the corresponding 32-bit float values of the
-///    128-bit vectors of [4 x float] to determine if the values in the first
-///    operand are less than those in the second operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLTPS / CMPLTPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmplt_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpltps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the value in the first operand is less than or
-///    equal to the corresponding value in the second operand and returns the
-///    result of the comparison in the low-order bits of a vector of
-///    [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLESS / CMPLESS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] containing the comparison results
-///    in the low-order bits.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmple_ss(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpless((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares each of the corresponding 32-bit float values of the
-///    128-bit vectors of [4 x float] to determine if the values in the first
-///    operand are less than or equal to those in the second operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLEPS / CMPLEPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmple_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpleps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the value in the first operand is greater than
-///    the corresponding value in the second operand and returns the result of
-///    the comparison in the low-order bits of a vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLTSS / CMPLTSS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] containing the comparison results
-///    in the low-order bits.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpgt_ss(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_shufflevector((__v4sf)__a,
-                                         (__v4sf)__builtin_ia32_cmpltss((__v4sf)__b, (__v4sf)__a),
-                                         4, 1, 2, 3);
-}
-
-/// Compares each of the corresponding 32-bit float values of the
-///    128-bit vectors of [4 x float] to determine if the values in the first
-///    operand are greater than those in the second operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLTPS / CMPLTPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpgt_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpltps((__v4sf)__b, (__v4sf)__a);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the value in the first operand is greater than
-///    or equal to the corresponding value in the second operand and returns
-///    the result of the comparison in the low-order bits of a vector of
-///    [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLESS / CMPLESS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] containing the comparison results
-///    in the low-order bits.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpge_ss(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_shufflevector((__v4sf)__a,
-                                         (__v4sf)__builtin_ia32_cmpless((__v4sf)__b, (__v4sf)__a),
-                                         4, 1, 2, 3);
-}
-
-/// Compares each of the corresponding 32-bit float values of the
-///    128-bit vectors of [4 x float] to determine if the values in the first
-///    operand are greater than or equal to those in the second operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLEPS / CMPLEPS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpge_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpleps((__v4sf)__b, (__v4sf)__a);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands for inequality and returns the result of the comparison in the
-///    low-order bits of a vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNEQSS / CMPNEQSS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] containing the comparison results
-///    in the low-order bits.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpneq_ss(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpneqss((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares each of the corresponding 32-bit float values of the
-///    128-bit vectors of [4 x float] for inequality.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNEQPS / CMPNEQPS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpneq_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpneqps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the value in the first operand is not less than
-///    the corresponding value in the second operand and returns the result of
-///    the comparison in the low-order bits of a vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLTSS / CMPNLTSS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] containing the comparison results
-///    in the low-order bits.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpnlt_ss(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpnltss((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares each of the corresponding 32-bit float values of the
-///    128-bit vectors of [4 x float] to determine if the values in the first
-///    operand are not less than those in the second operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLTPS / CMPNLTPS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpnlt_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpnltps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the value in the first operand is not less than
-///    or equal to the corresponding value in the second operand and returns
-///    the result of the comparison in the low-order bits of a vector of
-///    [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLESS / CMPNLESS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] containing the comparison results
-///    in the low-order bits.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpnle_ss(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpnless((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares each of the corresponding 32-bit float values of the
-///    128-bit vectors of [4 x float] to determine if the values in the first
-///    operand are not less than or equal to those in the second operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLEPS / CMPNLEPS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpnle_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpnleps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the value in the first operand is not greater
-///    than the corresponding value in the second operand and returns the
-///    result of the comparison in the low-order bits of a vector of
-///    [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLTSS / CMPNLTSS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] containing the comparison results
-///    in the low-order bits.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpngt_ss(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_shufflevector((__v4sf)__a,
-                                         (__v4sf)__builtin_ia32_cmpnltss((__v4sf)__b, (__v4sf)__a),
-                                         4, 1, 2, 3);
-}
-
-/// Compares each of the corresponding 32-bit float values of the
-///    128-bit vectors of [4 x float] to determine if the values in the first
-///    operand are not greater than those in the second operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLTPS / CMPNLTPS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpngt_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpnltps((__v4sf)__b, (__v4sf)__a);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the value in the first operand is not greater
-///    than or equal to the corresponding value in the second operand and
-///    returns the result of the comparison in the low-order bits of a vector
-///    of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLESS / CMPNLESS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] containing the comparison results
-///    in the low-order bits.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpnge_ss(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_shufflevector((__v4sf)__a,
-                                         (__v4sf)__builtin_ia32_cmpnless((__v4sf)__b, (__v4sf)__a),
-                                         4, 1, 2, 3);
-}
-
-/// Compares each of the corresponding 32-bit float values of the
-///    128-bit vectors of [4 x float] to determine if the values in the first
-///    operand are not greater than or equal to those in the second operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLEPS / CMPNLEPS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpnge_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpnleps((__v4sf)__b, (__v4sf)__a);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the value in the first operand is ordered with
-///    respect to the corresponding value in the second operand and returns the
-///    result of the comparison in the low-order bits of a vector of
-///    [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPORDSS / CMPORDSS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] containing the comparison results
-///    in the low-order bits.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpord_ss(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpordss((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares each of the corresponding 32-bit float values of the
-///    128-bit vectors of [4 x float] to determine if the values in the first
-///    operand are ordered with respect to those in the second operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPORDPS / CMPORDPS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpord_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpordps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the value in the first operand is unordered
-///    with respect to the corresponding value in the second operand and
-///    returns the result of the comparison in the low-order bits of a vector
-///    of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPUNORDSS / CMPUNORDSS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float] containing one of the operands. The lower
-///    32 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [4 x float] containing the comparison results
-///    in the low-order bits.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpunord_ss(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpunordss((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares each of the corresponding 32-bit float values of the
-///    128-bit vectors of [4 x float] to determine if the values in the first
-///    operand are unordered with respect to those in the second operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPUNORDPS / CMPUNORDPS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cmpunord_ps(__m128 __a, __m128 __b)
-{
-  return (__m128)__builtin_ia32_cmpunordps((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands for equality and returns the result of the comparison.
-///
-///    If either of the two lower 32-bit values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISS / COMISS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \returns An integer containing the comparison results. If either of the
-///    two lower 32-bit values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comieq_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_comieq((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the first operand is less than the second
-///    operand and returns the result of the comparison.
-///
-///    If either of the two lower 32-bit values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISS / COMISS </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower 32-bit values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comilt_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_comilt((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the first operand is less than or equal to the
-///    second operand and returns the result of the comparison.
-///
-///    If either of the two lower 32-bit values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISS / COMISS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower 32-bit values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comile_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_comile((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the first operand is greater than the second
-///    operand and returns the result of the comparison.
-///
-///    If either of the two lower 32-bit values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISS / COMISS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \returns An integer containing the comparison results. If either of the
-///     two lower 32-bit values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comigt_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_comigt((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the first operand is greater than or equal to
-///    the second operand and returns the result of the comparison.
-///
-///    If either of the two lower 32-bit values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISS / COMISS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower 32-bit values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comige_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_comige((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Compares two 32-bit float values in the low-order bits of both
-///    operands to determine if the first operand is not equal to the second
-///    operand and returns the result of the comparison.
-///
-///    If either of the two lower 32-bit values is NaN, 1 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISS / COMISS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \returns An integer containing the comparison results. If either of the
-///     two lower 32-bit values is NaN, 1 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comineq_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_comineq((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Performs an unordered comparison of two 32-bit float values using
-///    the low-order bits of both operands to determine equality and returns
-///    the result of the comparison.
-///
-///    If either of the two lower 32-bit values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISS / UCOMISS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower 32-bit values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomieq_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_ucomieq((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Performs an unordered comparison of two 32-bit float values using
-///    the low-order bits of both operands to determine if the first operand is
-///    less than the second operand and returns the result of the comparison.
-///
-///    If either of the two lower 32-bit values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISS / UCOMISS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower 32-bit values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomilt_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_ucomilt((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Performs an unordered comparison of two 32-bit float values using
-///    the low-order bits of both operands to determine if the first operand is
-///    less than or equal to the second operand and returns the result of the
-///    comparison.
-///
-///    If either of the two lower 32-bit values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISS / UCOMISS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower 32-bit values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomile_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_ucomile((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Performs an unordered comparison of two 32-bit float values using
-///    the low-order bits of both operands to determine if the first operand is
-///    greater than the second operand and returns the result of the
-///    comparison.
-///
-///    If either of the two lower 32-bit values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISS / UCOMISS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower 32-bit values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomigt_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_ucomigt((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Performs an unordered comparison of two 32-bit float values using
-///    the low-order bits of both operands to determine if the first operand is
-///    greater than or equal to the second operand and returns the result of
-///    the comparison.
-///
-///    If either of the two lower 32-bit values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISS / UCOMISS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower 32-bit values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomige_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_ucomige((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Performs an unordered comparison of two 32-bit float values using
-///    the low-order bits of both operands to determine inequality and returns
-///    the result of the comparison.
-///
-///    If either of the two lower 32-bit values is NaN, 1 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISS / UCOMISS </c> instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the comparison.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower 32-bit values is NaN, 1 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomineq_ss(__m128 __a, __m128 __b)
-{
-  return __builtin_ia32_ucomineq((__v4sf)__a, (__v4sf)__b);
-}
-
-/// Converts a float value contained in the lower 32 bits of a vector of
-///    [4 x float] into a 32-bit integer.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSS2SI / CVTSS2SI </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the conversion.
-/// \returns A 32-bit integer containing the converted value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvtss_si32(__m128 __a)
-{
-  return __builtin_ia32_cvtss2si((__v4sf)__a);
-}
-
-/// Converts a float value contained in the lower 32 bits of a vector of
-///    [4 x float] into a 32-bit integer.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSS2SI / CVTSS2SI </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the conversion.
-/// \returns A 32-bit integer containing the converted value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvt_ss2si(__m128 __a)
-{
-  return _mm_cvtss_si32(__a);
-}
-
-#ifdef __x86_64__
-
-/// Converts a float value contained in the lower 32 bits of a vector of
-///    [4 x float] into a 64-bit integer.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSS2SI / CVTSS2SI </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the conversion.
-/// \returns A 64-bit integer containing the converted value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvtss_si64(__m128 __a)
-{
-  return __builtin_ia32_cvtss2si64((__v4sf)__a);
-}
-
-#endif
-
-/// Converts two low-order float values in a 128-bit vector of
-///    [4 x float] into a 64-bit vector of [2 x i32].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPS2PI </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 64-bit integer vector containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtps_pi32(__m128 __a)
-{
-  return (__m64)__builtin_ia32_cvtps2pi((__v4sf)__a);
-}
-
-/// Converts two low-order float values in a 128-bit vector of
-///    [4 x float] into a 64-bit vector of [2 x i32].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPS2PI </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 64-bit integer vector containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvt_ps2pi(__m128 __a)
-{
-  return _mm_cvtps_pi32(__a);
-}
-
-/// Converts a float value contained in the lower 32 bits of a vector of
-///    [4 x float] into a 32-bit integer, truncating the result when it is
-///    inexact.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTTSS2SI / CVTTSS2SI </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the conversion.
-/// \returns A 32-bit integer containing the converted value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvttss_si32(__m128 __a)
-{
-  return __builtin_ia32_cvttss2si((__v4sf)__a);
-}
-
-/// Converts a float value contained in the lower 32 bits of a vector of
-///    [4 x float] into a 32-bit integer, truncating the result when it is
-///    inexact.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTTSS2SI / CVTTSS2SI </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the conversion.
-/// \returns A 32-bit integer containing the converted value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvtt_ss2si(__m128 __a)
-{
-  return _mm_cvttss_si32(__a);
-}
-
-#ifdef __x86_64__
-/// Converts a float value contained in the lower 32 bits of a vector of
-///    [4 x float] into a 64-bit integer, truncating the result when it is
-///    inexact.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTTSS2SI / CVTTSS2SI </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the conversion.
-/// \returns A 64-bit integer containing the converted value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvttss_si64(__m128 __a)
-{
-  return __builtin_ia32_cvttss2si64((__v4sf)__a);
-}
-#endif
-
-/// Converts two low-order float values in a 128-bit vector of
-///    [4 x float] into a 64-bit vector of [2 x i32], truncating the result
-///    when it is inexact.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTTPS2PI / VTTPS2PI </c>
-///   instructions.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 64-bit integer vector containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvttps_pi32(__m128 __a)
-{
-  return (__m64)__builtin_ia32_cvttps2pi((__v4sf)__a);
-}
-
-/// Converts two low-order float values in a 128-bit vector of [4 x
-///    float] into a 64-bit vector of [2 x i32], truncating the result when it
-///    is inexact.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTTPS2PI </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 64-bit integer vector containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtt_ps2pi(__m128 __a)
-{
-  return _mm_cvttps_pi32(__a);
-}
-
-/// Converts a 32-bit signed integer value into a floating point value
-///    and writes it to the lower 32 bits of the destination. The remaining
-///    higher order elements of the destination vector are copied from the
-///    corresponding elements in the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSI2SS / CVTSI2SS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 32-bit signed integer operand containing the value to be converted.
-/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the
-///    converted value of the second operand. The upper 96 bits are copied from
-///    the upper 96 bits of the first operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtsi32_ss(__m128 __a, int __b)
-{
-  __a[0] = __b;
-  return __a;
-}
-
-/// Converts a 32-bit signed integer value into a floating point value
-///    and writes it to the lower 32 bits of the destination. The remaining
-///    higher order elements of the destination are copied from the
-///    corresponding elements in the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSI2SS / CVTSI2SS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 32-bit signed integer operand containing the value to be converted.
-/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the
-///    converted value of the second operand. The upper 96 bits are copied from
-///    the upper 96 bits of the first operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvt_si2ss(__m128 __a, int __b)
-{
-  return _mm_cvtsi32_ss(__a, __b);
-}
-
-#ifdef __x86_64__
-
-/// Converts a 64-bit signed integer value into a floating point value
-///    and writes it to the lower 32 bits of the destination. The remaining
-///    higher order elements of the destination are copied from the
-///    corresponding elements in the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSI2SS / CVTSI2SS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 64-bit signed integer operand containing the value to be converted.
-/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the
-///    converted value of the second operand. The upper 96 bits are copied from
-///    the upper 96 bits of the first operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtsi64_ss(__m128 __a, long long __b)
-{
-  __a[0] = __b;
-  return __a;
-}
-
-#endif
-
-/// Converts two elements of a 64-bit vector of [2 x i32] into two
-///    floating point values and writes them to the lower 64-bits of the
-///    destination. The remaining higher order elements of the destination are
-///    copied from the corresponding elements in the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPI2PS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 64-bit vector of [2 x i32]. The elements in this vector are converted
-///    and written to the corresponding low-order elements in the destination.
-/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
-///    converted value of the second operand. The upper 64 bits are copied from
-///    the upper 64 bits of the first operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpi32_ps(__m128 __a, __m64 __b)
-{
-  return __builtin_ia32_cvtpi2ps((__v4sf)__a, (__v2si)__b);
-}
-
-/// Converts two elements of a 64-bit vector of [2 x i32] into two
-///    floating point values and writes them to the lower 64-bits of the
-///    destination. The remaining higher order elements of the destination are
-///    copied from the corresponding elements in the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPI2PS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \param __b
-///    A 64-bit vector of [2 x i32]. The elements in this vector are converted
-///    and written to the corresponding low-order elements in the destination.
-/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
-///    converted value from the second operand. The upper 64 bits are copied
-///    from the upper 64 bits of the first operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX
-_mm_cvt_pi2ps(__m128 __a, __m64 __b)
-{
-  return _mm_cvtpi32_ps(__a, __b);
-}
-
-/// Extracts a float value contained in the lower 32 bits of a vector of
-///    [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
-///    used in the extraction.
-/// \returns A 32-bit float containing the extracted value.
-static __inline__ float __DEFAULT_FN_ATTRS
-_mm_cvtss_f32(__m128 __a)
-{
-  return __a[0];
-}
-
-/// Loads two packed float values from the address \a __p into the
-///     high-order bits of a 128-bit vector of [4 x float]. The low-order bits
-///     are copied from the low-order bits of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVHPD / MOVHPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. Bits [63:0] are written to bits [63:0]
-///    of the destination.
-/// \param __p
-///    A pointer to two packed float values. Bits [63:0] are written to bits
-///    [127:64] of the destination.
-/// \returns A 128-bit vector of [4 x float] containing the moved values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_loadh_pi(__m128 __a, const __m64 *__p)
-{
-  typedef float __mm_loadh_pi_v2f32 __attribute__((__vector_size__(8)));
-  struct __mm_loadh_pi_struct {
-    __mm_loadh_pi_v2f32 __u;
-  } __attribute__((__packed__, __may_alias__));
-  __mm_loadh_pi_v2f32 __b = ((const struct __mm_loadh_pi_struct*)__p)->__u;
-  __m128 __bb = __builtin_shufflevector(__b, __b, 0, 1, 0, 1);
-  return __builtin_shufflevector(__a, __bb, 0, 1, 4, 5);
-}
-
-/// Loads two packed float values from the address \a __p into the
-///    low-order bits of a 128-bit vector of [4 x float]. The high-order bits
-///    are copied from the high-order bits of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVLPD / MOVLPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. Bits [127:64] are written to bits
-///    [127:64] of the destination.
-/// \param __p
-///    A pointer to two packed float values. Bits [63:0] are written to bits
-///    [63:0] of the destination.
-/// \returns A 128-bit vector of [4 x float] containing the moved values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_loadl_pi(__m128 __a, const __m64 *__p)
-{
-  typedef float __mm_loadl_pi_v2f32 __attribute__((__vector_size__(8)));
-  struct __mm_loadl_pi_struct {
-    __mm_loadl_pi_v2f32 __u;
-  } __attribute__((__packed__, __may_alias__));
-  __mm_loadl_pi_v2f32 __b = ((const struct __mm_loadl_pi_struct*)__p)->__u;
-  __m128 __bb = __builtin_shufflevector(__b, __b, 0, 1, 0, 1);
-  return __builtin_shufflevector(__a, __bb, 4, 5, 2, 3);
-}
-
-/// Constructs a 128-bit floating-point vector of [4 x float]. The lower
-///    32 bits of the vector are initialized with the single-precision
-///    floating-point value loaded from a specified memory location. The upper
-///    96 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVSS / MOVSS </c> instruction.
-///
-/// \param __p
-///    A pointer to a 32-bit memory location containing a single-precision
-///    floating-point value.
-/// \returns An initialized 128-bit floating-point vector of [4 x float]. The
-///    lower 32 bits contain the value loaded from the memory location. The
-///    upper 96 bits are set to zero.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_load_ss(const float *__p)
-{
-  struct __mm_load_ss_struct {
-    float __u;
-  } __attribute__((__packed__, __may_alias__));
-  float __u = ((const struct __mm_load_ss_struct*)__p)->__u;
-  return __extension__ (__m128){ __u, 0, 0, 0 };
-}
-
-/// Loads a 32-bit float value and duplicates it to all four vector
-///    elements of a 128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VBROADCASTSS / MOVSS + shuffling </c>
-///    instruction.
-///
-/// \param __p
-///    A pointer to a float value to be loaded and duplicated.
-/// \returns A 128-bit vector of [4 x float] containing the loaded and
-///    duplicated values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_load1_ps(const float *__p)
-{
-  struct __mm_load1_ps_struct {
-    float __u;
-  } __attribute__((__packed__, __may_alias__));
-  float __u = ((const struct __mm_load1_ps_struct*)__p)->__u;
-  return __extension__ (__m128){ __u, __u, __u, __u };
-}
-
-#define        _mm_load_ps1(p) _mm_load1_ps(p)
-
-/// Loads a 128-bit floating-point vector of [4 x float] from an aligned
-///    memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVAPS / MOVAPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a 128-bit memory location. The address of the memory
-///    location has to be 128-bit aligned.
-/// \returns A 128-bit vector of [4 x float] containing the loaded values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_load_ps(const float *__p)
-{
-  return *(const __m128*)__p;
-}
-
-/// Loads a 128-bit floating-point vector of [4 x float] from an
-///    unaligned memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVUPS / MOVUPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a 128-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \returns A 128-bit vector of [4 x float] containing the loaded values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_loadu_ps(const float *__p)
-{
-  struct __loadu_ps {
-    __m128_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __loadu_ps*)__p)->__v;
-}
-
-/// Loads four packed float values, in reverse order, from an aligned
-///    memory location to 32-bit elements in a 128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVAPS / MOVAPS + shuffling </c>
-///    instruction.
-///
-/// \param __p
-///    A pointer to a 128-bit memory location. The address of the memory
-///    location has to be 128-bit aligned.
-/// \returns A 128-bit vector of [4 x float] containing the moved values, loaded
-///    in reverse order.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_loadr_ps(const float *__p)
-{
-  __m128 __a = _mm_load_ps(__p);
-  return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 3, 2, 1, 0);
-}
-
-/// Create a 128-bit vector of [4 x float] with undefined values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \returns A 128-bit vector of [4 x float] containing undefined values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_undefined_ps(void)
-{
-  return (__m128)__builtin_ia32_undef128();
-}
-
-/// Constructs a 128-bit floating-point vector of [4 x float]. The lower
-///    32 bits of the vector are initialized with the specified single-precision
-///    floating-point value. The upper 96 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVSS / MOVSS </c> instruction.
-///
-/// \param __w
-///    A single-precision floating-point value used to initialize the lower 32
-///    bits of the result.
-/// \returns An initialized 128-bit floating-point vector of [4 x float]. The
-///    lower 32 bits contain the value provided in the source operand. The
-///    upper 96 bits are set to zero.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_set_ss(float __w)
-{
-  return __extension__ (__m128){ __w, 0, 0, 0 };
-}
-
-/// Constructs a 128-bit floating-point vector of [4 x float], with each
-///    of the four single-precision floating-point vector elements set to the
-///    specified single-precision floating-point value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPERMILPS / PERMILPS </c> instruction.
-///
-/// \param __w
-///    A single-precision floating-point value used to initialize each vector
-///    element of the result.
-/// \returns An initialized 128-bit floating-point vector of [4 x float].
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_set1_ps(float __w)
-{
-  return __extension__ (__m128){ __w, __w, __w, __w };
-}
-
-/* Microsoft specific. */
-/// Constructs a 128-bit floating-point vector of [4 x float], with each
-///    of the four single-precision floating-point vector elements set to the
-///    specified single-precision floating-point value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPERMILPS / PERMILPS </c> instruction.
-///
-/// \param __w
-///    A single-precision floating-point value used to initialize each vector
-///    element of the result.
-/// \returns An initialized 128-bit floating-point vector of [4 x float].
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_set_ps1(float __w)
-{
-    return _mm_set1_ps(__w);
-}
-
-/// Constructs a 128-bit floating-point vector of [4 x float]
-///    initialized with the specified single-precision floating-point values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __z
-///    A single-precision floating-point value used to initialize bits [127:96]
-///    of the result.
-/// \param __y
-///    A single-precision floating-point value used to initialize bits [95:64]
-///    of the result.
-/// \param __x
-///    A single-precision floating-point value used to initialize bits [63:32]
-///    of the result.
-/// \param __w
-///    A single-precision floating-point value used to initialize bits [31:0]
-///    of the result.
-/// \returns An initialized 128-bit floating-point vector of [4 x float].
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_set_ps(float __z, float __y, float __x, float __w)
-{
-  return __extension__ (__m128){ __w, __x, __y, __z };
-}
-
-/// Constructs a 128-bit floating-point vector of [4 x float],
-///    initialized in reverse order with the specified 32-bit single-precision
-///    float-point values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __z
-///    A single-precision floating-point value used to initialize bits [31:0]
-///    of the result.
-/// \param __y
-///    A single-precision floating-point value used to initialize bits [63:32]
-///    of the result.
-/// \param __x
-///    A single-precision floating-point value used to initialize bits [95:64]
-///    of the result.
-/// \param __w
-///    A single-precision floating-point value used to initialize bits [127:96]
-///    of the result.
-/// \returns An initialized 128-bit floating-point vector of [4 x float].
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_setr_ps(float __z, float __y, float __x, float __w)
-{
-  return __extension__ (__m128){ __z, __y, __x, __w };
-}
-
-/// Constructs a 128-bit floating-point vector of [4 x float] initialized
-///    to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VXORPS / XORPS </c> instruction.
-///
-/// \returns An initialized 128-bit floating-point vector of [4 x float] with
-///    all elements set to zero.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_setzero_ps(void)
-{
-  return __extension__ (__m128){ 0, 0, 0, 0 };
-}
-
-/// Stores the upper 64 bits of a 128-bit vector of [4 x float] to a
-///    memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPEXTRQ / PEXTRQ </c> instruction.
-///
-/// \param __p
-///    A pointer to a 64-bit memory location.
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeh_pi(__m64 *__p, __m128 __a)
-{
-  typedef float __mm_storeh_pi_v2f32 __attribute__((__vector_size__(8)));
-  struct __mm_storeh_pi_struct {
-    __mm_storeh_pi_v2f32 __u;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_storeh_pi_struct*)__p)->__u = __builtin_shufflevector(__a, __a, 2, 3);
-}
-
-/// Stores the lower 64 bits of a 128-bit vector of [4 x float] to a
-///     memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVLPS / MOVLPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a memory location that will receive the float values.
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storel_pi(__m64 *__p, __m128 __a)
-{
-  typedef float __mm_storeh_pi_v2f32 __attribute__((__vector_size__(8)));
-  struct __mm_storeh_pi_struct {
-    __mm_storeh_pi_v2f32 __u;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_storeh_pi_struct*)__p)->__u = __builtin_shufflevector(__a, __a, 0, 1);
-}
-
-/// Stores the lower 32 bits of a 128-bit vector of [4 x float] to a
-///     memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVSS / MOVSS </c> instruction.
-///
-/// \param __p
-///    A pointer to a 32-bit memory location.
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_ss(float *__p, __m128 __a)
-{
-  struct __mm_store_ss_struct {
-    float __u;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_store_ss_struct*)__p)->__u = __a[0];
-}
-
-/// Stores a 128-bit vector of [4 x float] to an unaligned memory
-///    location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVUPS / MOVUPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a 128-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_ps(float *__p, __m128 __a)
-{
-  struct __storeu_ps {
-    __m128_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_ps*)__p)->__v = __a;
-}
-
-/// Stores a 128-bit vector of [4 x float] into an aligned memory
-///    location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVAPS / MOVAPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a 128-bit memory location. The address of the memory
-///    location has to be 16-byte aligned.
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_ps(float *__p, __m128 __a)
-{
-  *(__m128*)__p = __a;
-}
-
-/// Stores the lower 32 bits of a 128-bit vector of [4 x float] into
-///    four contiguous elements in an aligned memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to <c> VMOVAPS / MOVAPS + shuffling </c>
-///    instruction.
-///
-/// \param __p
-///    A pointer to a 128-bit memory location.
-/// \param __a
-///    A 128-bit vector of [4 x float] whose lower 32 bits are stored to each
-///    of the four contiguous elements pointed by \a __p.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store1_ps(float *__p, __m128 __a)
-{
-  __a = __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 0, 0, 0);
-  _mm_store_ps(__p, __a);
-}
-
-/// Stores the lower 32 bits of a 128-bit vector of [4 x float] into
-///    four contiguous elements in an aligned memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to <c> VMOVAPS / MOVAPS + shuffling </c>
-///    instruction.
-///
-/// \param __p
-///    A pointer to a 128-bit memory location.
-/// \param __a
-///    A 128-bit vector of [4 x float] whose lower 32 bits are stored to each
-///    of the four contiguous elements pointed by \a __p.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_ps1(float *__p, __m128 __a)
-{
-  _mm_store1_ps(__p, __a);
-}
-
-/// Stores float values from a 128-bit vector of [4 x float] to an
-///    aligned memory location in reverse order.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVAPS / MOVAPS + shuffling </c>
-///    instruction.
-///
-/// \param __p
-///    A pointer to a 128-bit memory location. The address of the memory
-///    location has to be 128-bit aligned.
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storer_ps(float *__p, __m128 __a)
-{
-  __a = __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 3, 2, 1, 0);
-  _mm_store_ps(__p, __a);
-}
-
-#define _MM_HINT_ET0 7
-#define _MM_HINT_ET1 6
-#define _MM_HINT_T0  3
-#define _MM_HINT_T1  2
-#define _MM_HINT_T2  1
-#define _MM_HINT_NTA 0
-
-#ifndef _MSC_VER
-/* FIXME: We have to #define this because "sel" must be a constant integer, and
-   Sema doesn't do any form of constant propagation yet. */
-
-/// Loads one cache line of data from the specified address to a location
-///    closer to the processor.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// void _mm_prefetch(const void * a, const int sel);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> PREFETCHNTA </c> instruction.
-///
-/// \param a
-///    A pointer to a memory location containing a cache line of data.
-/// \param sel
-///    A predefined integer constant specifying the type of prefetch
-///    operation: \n
-///    _MM_HINT_NTA: Move data using the non-temporal access (NTA) hint. The
-///    PREFETCHNTA instruction will be generated. \n
-///    _MM_HINT_T0: Move data using the T0 hint. The PREFETCHT0 instruction will
-///    be generated. \n
-///    _MM_HINT_T1: Move data using the T1 hint. The PREFETCHT1 instruction will
-///    be generated. \n
-///    _MM_HINT_T2: Move data using the T2 hint. The PREFETCHT2 instruction will
-///    be generated.
-#define _mm_prefetch(a, sel) (__builtin_prefetch((const void *)(a), \
-                                                 ((sel) >> 2) & 1, (sel) & 0x3))
-#endif
-
-/// Stores a 64-bit integer in the specified aligned memory location. To
-///    minimize caching, the data is flagged as non-temporal (unlikely to be
-///    used again soon).
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MOVNTQ </c> instruction.
-///
-/// \param __p
-///    A pointer to an aligned memory location used to store the register value.
-/// \param __a
-///    A 64-bit integer containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS_MMX
-_mm_stream_pi(__m64 *__p, __m64 __a)
-{
-  __builtin_ia32_movntq(__p, __a);
-}
-
-/// Moves packed float values from a 128-bit vector of [4 x float] to a
-///    128-bit aligned memory location. To minimize caching, the data is flagged
-///    as non-temporal (unlikely to be used again soon).
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVNTPS / MOVNTPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a 128-bit aligned memory location that will receive the
-///    single-precision floating-point values.
-/// \param __a
-///    A 128-bit vector of [4 x float] containing the values to be moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_stream_ps(float *__p, __m128 __a)
-{
-  __builtin_nontemporal_store((__v4sf)__a, (__v4sf*)__p);
-}
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/// Forces strong memory ordering (serialization) between store
-///    instructions preceding this instruction and store instructions following
-///    this instruction, ensuring the system completes all previous stores
-///    before executing subsequent stores.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> SFENCE </c> instruction.
-///
-void _mm_sfence(void);
-
-#if defined(__cplusplus)
-} // extern "C"
-#endif
-
-/// Extracts 16-bit element from a 64-bit vector of [4 x i16] and
-///    returns it, as specified by the immediate integer operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// int _mm_extract_pi16(__m64 a, int n);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPEXTRW / PEXTRW </c> instruction.
-///
-/// \param a
-///    A 64-bit vector of [4 x i16].
-/// \param n
-///    An immediate integer operand that determines which bits are extracted: \n
-///    0: Bits [15:0] are copied to the destination. \n
-///    1: Bits [31:16] are copied to the destination. \n
-///    2: Bits [47:32] are copied to the destination. \n
-///    3: Bits [63:48] are copied to the destination.
-/// \returns A 16-bit integer containing the extracted 16 bits of packed data.
-#define _mm_extract_pi16(a, n) \
-  (int)__builtin_ia32_vec_ext_v4hi((__v4hi)a, (int)n)
-
-/// Copies data from the 64-bit vector of [4 x i16] to the destination,
-///    and inserts the lower 16-bits of an integer operand at the 16-bit offset
-///    specified by the immediate operand \a n.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m64 _mm_insert_pi16(__m64 a, int d, int n);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> PINSRW </c> instruction.
-///
-/// \param a
-///    A 64-bit vector of [4 x i16].
-/// \param d
-///    An integer. The lower 16-bit value from this operand is written to the
-///    destination at the offset specified by operand \a n.
-/// \param n
-///    An immediate integer operant that determines which the bits to be used
-///    in the destination. \n
-///    0: Bits [15:0] are copied to the destination. \n
-///    1: Bits [31:16] are copied to the destination. \n
-///    2: Bits [47:32] are copied to the destination. \n
-///    3: Bits [63:48] are copied to the destination.  \n
-///    The remaining bits in the destination are copied from the corresponding
-///    bits in operand \a a.
-/// \returns A 64-bit integer vector containing the copied packed data from the
-///    operands.
-#define _mm_insert_pi16(a, d, n) \
-  (__m64)__builtin_ia32_vec_set_v4hi((__v4hi)a, (int)d, (int)n)
-
-/// Compares each of the corresponding packed 16-bit integer values of
-///    the 64-bit integer vectors, and writes the greater value to the
-///    corresponding bits in the destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PMAXSW </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 64-bit integer vector containing one of the source operands.
-/// \returns A 64-bit integer vector containing the comparison results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_max_pi16(__m64 __a, __m64 __b)
-{
-  return (__m64)__builtin_ia32_pmaxsw((__v4hi)__a, (__v4hi)__b);
-}
-
-/// Compares each of the corresponding packed 8-bit unsigned integer
-///    values of the 64-bit integer vectors, and writes the greater value to the
-///    corresponding bits in the destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PMAXUB </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 64-bit integer vector containing one of the source operands.
-/// \returns A 64-bit integer vector containing the comparison results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_max_pu8(__m64 __a, __m64 __b)
-{
-  return (__m64)__builtin_ia32_pmaxub((__v8qi)__a, (__v8qi)__b);
-}
-
-/// Compares each of the corresponding packed 16-bit integer values of
-///    the 64-bit integer vectors, and writes the lesser value to the
-///    corresponding bits in the destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PMINSW </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 64-bit integer vector containing one of the source operands.
-/// \returns A 64-bit integer vector containing the comparison results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_min_pi16(__m64 __a, __m64 __b)
-{
-  return (__m64)__builtin_ia32_pminsw((__v4hi)__a, (__v4hi)__b);
-}
-
-/// Compares each of the corresponding packed 8-bit unsigned integer
-///    values of the 64-bit integer vectors, and writes the lesser value to the
-///    corresponding bits in the destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PMINUB </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 64-bit integer vector containing one of the source operands.
-/// \returns A 64-bit integer vector containing the comparison results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_min_pu8(__m64 __a, __m64 __b)
-{
-  return (__m64)__builtin_ia32_pminub((__v8qi)__a, (__v8qi)__b);
-}
-
-/// Takes the most significant bit from each 8-bit element in a 64-bit
-///    integer vector to create an 8-bit mask value. Zero-extends the value to
-///    32-bit integer and writes it to the destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PMOVMSKB </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer vector containing the values with bits to be extracted.
-/// \returns The most significant bit from each 8-bit element in \a __a,
-///    written to bits [7:0].
-static __inline__ int __DEFAULT_FN_ATTRS_MMX
-_mm_movemask_pi8(__m64 __a)
-{
-  return __builtin_ia32_pmovmskb((__v8qi)__a);
-}
-
-/// Multiplies packed 16-bit unsigned integer values and writes the
-///    high-order 16 bits of each 32-bit product to the corresponding bits in
-///    the destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PMULHUW </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 64-bit integer vector containing one of the source operands.
-/// \returns A 64-bit integer vector containing the products of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_mulhi_pu16(__m64 __a, __m64 __b)
-{
-  return (__m64)__builtin_ia32_pmulhuw((__v4hi)__a, (__v4hi)__b);
-}
-
-/// Shuffles the 4 16-bit integers from a 64-bit integer vector to the
-///    destination, as specified by the immediate value operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m64 _mm_shuffle_pi16(__m64 a, const int n);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> PSHUFW </c> instruction.
-///
-/// \param a
-///    A 64-bit integer vector containing the values to be shuffled.
-/// \param n
-///    An immediate value containing an 8-bit value specifying which elements to
-///    copy from \a a. The destinations within the 64-bit destination are
-///    assigned values as follows: \n
-///    Bits [1:0] are used to assign values to bits [15:0] in the
-///    destination. \n
-///    Bits [3:2] are used to assign values to bits [31:16] in the
-///    destination. \n
-///    Bits [5:4] are used to assign values to bits [47:32] in the
-///    destination. \n
-///    Bits [7:6] are used to assign values to bits [63:48] in the
-///    destination. \n
-///    Bit value assignments: \n
-///    00: assigned from bits [15:0] of \a a. \n
-///    01: assigned from bits [31:16] of \a a. \n
-///    10: assigned from bits [47:32] of \a a. \n
-///    11: assigned from bits [63:48] of \a a.
-/// \returns A 64-bit integer vector containing the shuffled values.
-#define _mm_shuffle_pi16(a, n) \
-  (__m64)__builtin_ia32_pshufw((__v4hi)(__m64)(a), (n))
-
-/// Conditionally copies the values from each 8-bit element in the first
-///    64-bit integer vector operand to the specified memory location, as
-///    specified by the most significant bit in the corresponding element in the
-///    second 64-bit integer vector operand.
-///
-///    To minimize caching, the data is flagged as non-temporal
-///    (unlikely to be used again soon).
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MASKMOVQ </c> instruction.
-///
-/// \param __d
-///    A 64-bit integer vector containing the values with elements to be copied.
-/// \param __n
-///    A 64-bit integer vector operand. The most significant bit from each 8-bit
-///    element determines whether the corresponding element in operand \a __d
-///    is copied. If the most significant bit of a given element is 1, the
-///    corresponding element in operand \a __d is copied.
-/// \param __p
-///    A pointer to a 64-bit memory location that will receive the conditionally
-///    copied integer values. The address of the memory location does not have
-///    to be aligned.
-static __inline__ void __DEFAULT_FN_ATTRS_MMX
-_mm_maskmove_si64(__m64 __d, __m64 __n, char *__p)
-{
-  __builtin_ia32_maskmovq((__v8qi)__d, (__v8qi)__n, __p);
-}
-
-/// Computes the rounded averages of the packed unsigned 8-bit integer
-///    values and writes the averages to the corresponding bits in the
-///    destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PAVGB </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 64-bit integer vector containing one of the source operands.
-/// \returns A 64-bit integer vector containing the averages of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_avg_pu8(__m64 __a, __m64 __b)
-{
-  return (__m64)__builtin_ia32_pavgb((__v8qi)__a, (__v8qi)__b);
-}
-
-/// Computes the rounded averages of the packed unsigned 16-bit integer
-///    values and writes the averages to the corresponding bits in the
-///    destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PAVGW </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 64-bit integer vector containing one of the source operands.
-/// \returns A 64-bit integer vector containing the averages of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_avg_pu16(__m64 __a, __m64 __b)
-{
-  return (__m64)__builtin_ia32_pavgw((__v4hi)__a, (__v4hi)__b);
-}
-
-/// Subtracts the corresponding 8-bit unsigned integer values of the two
-///    64-bit vector operands and computes the absolute value for each of the
-///    difference. Then sum of the 8 absolute differences is written to the
-///    bits [15:0] of the destination; the remaining bits [63:16] are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PSADBW </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 64-bit integer vector containing one of the source operands.
-/// \returns A 64-bit integer vector whose lower 16 bits contain the sums of the
-///    sets of absolute differences between both operands. The upper bits are
-///    cleared.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_sad_pu8(__m64 __a, __m64 __b)
-{
-  return (__m64)__builtin_ia32_psadbw((__v8qi)__a, (__v8qi)__b);
-}
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/// Returns the contents of the MXCSR register as a 32-bit unsigned
-///    integer value.
-///
-///    There are several groups of macros associated with this
-///    intrinsic, including:
-///    <ul>
-///    <li>
-///      For checking exception states: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO,
-///      _MM_EXCEPT_DENORM, _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW,
-///      _MM_EXCEPT_INEXACT. There is a convenience wrapper
-///      _MM_GET_EXCEPTION_STATE().
-///    </li>
-///    <li>
-///      For checking exception masks: _MM_MASK_UNDERFLOW, _MM_MASK_OVERFLOW,
-///      _MM_MASK_INVALID, _MM_MASK_DENORM, _MM_MASK_DIV_ZERO, _MM_MASK_INEXACT.
-///      There is a convenience wrapper _MM_GET_EXCEPTION_MASK().
-///    </li>
-///    <li>
-///      For checking rounding modes: _MM_ROUND_NEAREST, _MM_ROUND_DOWN,
-///      _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO. There is a convenience wrapper
-///      _MM_GET_ROUNDING_MODE().
-///    </li>
-///    <li>
-///      For checking flush-to-zero mode: _MM_FLUSH_ZERO_ON, _MM_FLUSH_ZERO_OFF.
-///      There is a convenience wrapper _MM_GET_FLUSH_ZERO_MODE().
-///    </li>
-///    <li>
-///      For checking denormals-are-zero mode: _MM_DENORMALS_ZERO_ON,
-///      _MM_DENORMALS_ZERO_OFF. There is a convenience wrapper
-///      _MM_GET_DENORMALS_ZERO_MODE().
-///    </li>
-///    </ul>
-///
-///    For example, the following expression checks if an overflow exception has
-///    occurred:
-///    \code
-///      ( _mm_getcsr() & _MM_EXCEPT_OVERFLOW )
-///    \endcode
-///
-///    The following expression gets the current rounding mode:
-///    \code
-///      _MM_GET_ROUNDING_MODE()
-///    \endcode
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSTMXCSR / STMXCSR </c> instruction.
-///
-/// \returns A 32-bit unsigned integer containing the contents of the MXCSR
-///    register.
-unsigned int _mm_getcsr(void);
-
-/// Sets the MXCSR register with the 32-bit unsigned integer value.
-///
-///    There are several groups of macros associated with this intrinsic,
-///    including:
-///    <ul>
-///    <li>
-///      For setting exception states: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO,
-///      _MM_EXCEPT_DENORM, _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW,
-///      _MM_EXCEPT_INEXACT. There is a convenience wrapper
-///      _MM_SET_EXCEPTION_STATE(x) where x is one of these macros.
-///    </li>
-///    <li>
-///      For setting exception masks: _MM_MASK_UNDERFLOW, _MM_MASK_OVERFLOW,
-///      _MM_MASK_INVALID, _MM_MASK_DENORM, _MM_MASK_DIV_ZERO, _MM_MASK_INEXACT.
-///      There is a convenience wrapper _MM_SET_EXCEPTION_MASK(x) where x is one
-///      of these macros.
-///    </li>
-///    <li>
-///      For setting rounding modes: _MM_ROUND_NEAREST, _MM_ROUND_DOWN,
-///      _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO. There is a convenience wrapper
-///      _MM_SET_ROUNDING_MODE(x) where x is one of these macros.
-///    </li>
-///    <li>
-///      For setting flush-to-zero mode: _MM_FLUSH_ZERO_ON, _MM_FLUSH_ZERO_OFF.
-///      There is a convenience wrapper _MM_SET_FLUSH_ZERO_MODE(x) where x is
-///      one of these macros.
-///    </li>
-///    <li>
-///      For setting denormals-are-zero mode: _MM_DENORMALS_ZERO_ON,
-///      _MM_DENORMALS_ZERO_OFF. There is a convenience wrapper
-///      _MM_SET_DENORMALS_ZERO_MODE(x) where x is one of these macros.
-///    </li>
-///    </ul>
-///
-///    For example, the following expression causes subsequent floating-point
-///    operations to round up:
-///      _mm_setcsr(_mm_getcsr() | _MM_ROUND_UP)
-///
-///    The following example sets the DAZ and FTZ flags:
-///    \code
-///    void setFlags() {
-///      _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
-///      _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
-///    }
-///    \endcode
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VLDMXCSR / LDMXCSR </c> instruction.
-///
-/// \param __i
-///    A 32-bit unsigned integer value to be written to the MXCSR register.
-void _mm_setcsr(unsigned int __i);
-
-#if defined(__cplusplus)
-} // extern "C"
-#endif
-
-/// Selects 4 float values from the 128-bit operands of [4 x float], as
-///    specified by the immediate value operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128 _mm_shuffle_ps(__m128 a, __m128 b, const int mask);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VSHUFPS / SHUFPS </c> instruction.
-///
-/// \param a
-///    A 128-bit vector of [4 x float].
-/// \param b
-///    A 128-bit vector of [4 x float].
-/// \param mask
-///    An immediate value containing an 8-bit value specifying which elements to
-///    copy from \a a and \a b. \n
-///    Bits [3:0] specify the values copied from operand \a a. \n
-///    Bits [7:4] specify the values copied from operand \a b. \n
-///    The destinations within the 128-bit destination are assigned values as
-///    follows: \n
-///    Bits [1:0] are used to assign values to bits [31:0] in the
-///    destination. \n
-///    Bits [3:2] are used to assign values to bits [63:32] in the
-///    destination. \n
-///    Bits [5:4] are used to assign values to bits [95:64] in the
-///    destination. \n
-///    Bits [7:6] are used to assign values to bits [127:96] in the
-///    destination. \n
-///    Bit value assignments: \n
-///    00: Bits [31:0] copied from the specified operand. \n
-///    01: Bits [63:32] copied from the specified operand. \n
-///    10: Bits [95:64] copied from the specified operand. \n
-///    11: Bits [127:96] copied from the specified operand.
-/// \returns A 128-bit vector of [4 x float] containing the shuffled values.
-#define _mm_shuffle_ps(a, b, mask) \
-  (__m128)__builtin_ia32_shufps((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), \
-                                (int)(mask))
-
-/// Unpacks the high-order (index 2,3) values from two 128-bit vectors of
-///    [4 x float] and interleaves them into a 128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKHPS / UNPCKHPS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. \n
-///    Bits [95:64] are written to bits [31:0] of the destination. \n
-///    Bits [127:96] are written to bits [95:64] of the destination.
-/// \param __b
-///    A 128-bit vector of [4 x float].
-///    Bits [95:64] are written to bits [63:32] of the destination. \n
-///    Bits [127:96] are written to bits [127:96] of the destination.
-/// \returns A 128-bit vector of [4 x float] containing the interleaved values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_unpackhi_ps(__m128 __a, __m128 __b)
-{
-  return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 2, 6, 3, 7);
-}
-
-/// Unpacks the low-order (index 0,1) values from two 128-bit vectors of
-///    [4 x float] and interleaves them into a 128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKLPS / UNPCKLPS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. \n
-///    Bits [31:0] are written to bits [31:0] of the destination.  \n
-///    Bits [63:32] are written to bits [95:64] of the destination.
-/// \param __b
-///    A 128-bit vector of [4 x float]. \n
-///    Bits [31:0] are written to bits [63:32] of the destination. \n
-///    Bits [63:32] are written to bits [127:96] of the destination.
-/// \returns A 128-bit vector of [4 x float] containing the interleaved values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_unpacklo_ps(__m128 __a, __m128 __b)
-{
-  return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 0, 4, 1, 5);
-}
-
-/// Constructs a 128-bit floating-point vector of [4 x float]. The lower
-///    32 bits are set to the lower 32 bits of the second parameter. The upper
-///    96 bits are set to the upper 96 bits of the first parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VBLENDPS / BLENDPS / MOVSS </c>
-///    instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [4 x float]. The upper 96 bits are
-///    written to the upper 96 bits of the result.
-/// \param __b
-///    A 128-bit floating-point vector of [4 x float]. The lower 32 bits are
-///    written to the lower 32 bits of the result.
-/// \returns A 128-bit floating-point vector of [4 x float].
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_move_ss(__m128 __a, __m128 __b)
-{
-  __a[0] = __b[0];
-  return __a;
-}
-
-/// Constructs a 128-bit floating-point vector of [4 x float]. The lower
-///    64 bits are set to the upper 64 bits of the second parameter. The upper
-///    64 bits are set to the upper 64 bits of the first parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKHPD / UNPCKHPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [4 x float]. The upper 64 bits are
-///    written to the upper 64 bits of the result.
-/// \param __b
-///    A 128-bit floating-point vector of [4 x float]. The upper 64 bits are
-///    written to the lower 64 bits of the result.
-/// \returns A 128-bit floating-point vector of [4 x float].
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_movehl_ps(__m128 __a, __m128 __b)
-{
-  return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 6, 7, 2, 3);
-}
-
-/// Constructs a 128-bit floating-point vector of [4 x float]. The lower
-///    64 bits are set to the lower 64 bits of the first parameter. The upper
-///    64 bits are set to the lower 64 bits of the second parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKLPD / UNPCKLPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [4 x float]. The lower 64 bits are
-///    written to the lower 64 bits of the result.
-/// \param __b
-///    A 128-bit floating-point vector of [4 x float]. The lower 64 bits are
-///    written to the upper 64 bits of the result.
-/// \returns A 128-bit floating-point vector of [4 x float].
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_movelh_ps(__m128 __a, __m128 __b)
-{
-  return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 0, 1, 4, 5);
-}
-
-/// Converts a 64-bit vector of [4 x i16] into a 128-bit vector of [4 x
-///    float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPI2PS + COMPOSITE </c> instruction.
-///
-/// \param __a
-///    A 64-bit vector of [4 x i16]. The elements of the destination are copied
-///    from the corresponding elements in this operand.
-/// \returns A 128-bit vector of [4 x float] containing the copied and converted
-///    values from the operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpi16_ps(__m64 __a)
-{
-  __m64 __b, __c;
-  __m128 __r;
-
-  __b = _mm_setzero_si64();
-  __b = _mm_cmpgt_pi16(__b, __a);
-  __c = _mm_unpackhi_pi16(__a, __b);
-  __r = _mm_setzero_ps();
-  __r = _mm_cvtpi32_ps(__r, __c);
-  __r = _mm_movelh_ps(__r, __r);
-  __c = _mm_unpacklo_pi16(__a, __b);
-  __r = _mm_cvtpi32_ps(__r, __c);
-
-  return __r;
-}
-
-/// Converts a 64-bit vector of 16-bit unsigned integer values into a
-///    128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPI2PS + COMPOSITE </c> instruction.
-///
-/// \param __a
-///    A 64-bit vector of 16-bit unsigned integer values. The elements of the
-///    destination are copied from the corresponding elements in this operand.
-/// \returns A 128-bit vector of [4 x float] containing the copied and converted
-///    values from the operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpu16_ps(__m64 __a)
-{
-  __m64 __b, __c;
-  __m128 __r;
-
-  __b = _mm_setzero_si64();
-  __c = _mm_unpackhi_pi16(__a, __b);
-  __r = _mm_setzero_ps();
-  __r = _mm_cvtpi32_ps(__r, __c);
-  __r = _mm_movelh_ps(__r, __r);
-  __c = _mm_unpacklo_pi16(__a, __b);
-  __r = _mm_cvtpi32_ps(__r, __c);
-
-  return __r;
-}
-
-/// Converts the lower four 8-bit values from a 64-bit vector of [8 x i8]
-///    into a 128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPI2PS + COMPOSITE </c> instruction.
-///
-/// \param __a
-///    A 64-bit vector of [8 x i8]. The elements of the destination are copied
-///    from the corresponding lower 4 elements in this operand.
-/// \returns A 128-bit vector of [4 x float] containing the copied and converted
-///    values from the operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpi8_ps(__m64 __a)
-{
-  __m64 __b;
-
-  __b = _mm_setzero_si64();
-  __b = _mm_cmpgt_pi8(__b, __a);
-  __b = _mm_unpacklo_pi8(__a, __b);
-
-  return _mm_cvtpi16_ps(__b);
-}
-
-/// Converts the lower four unsigned 8-bit integer values from a 64-bit
-///    vector of [8 x u8] into a 128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPI2PS + COMPOSITE </c> instruction.
-///
-/// \param __a
-///    A 64-bit vector of unsigned 8-bit integer values. The elements of the
-///    destination are copied from the corresponding lower 4 elements in this
-///    operand.
-/// \returns A 128-bit vector of [4 x float] containing the copied and converted
-///    values from the source operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpu8_ps(__m64 __a)
-{
-  __m64 __b;
-
-  __b = _mm_setzero_si64();
-  __b = _mm_unpacklo_pi8(__a, __b);
-
-  return _mm_cvtpi16_ps(__b);
-}
-
-/// Converts the two 32-bit signed integer values from each 64-bit vector
-///    operand of [2 x i32] into a 128-bit vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPI2PS + COMPOSITE </c> instruction.
-///
-/// \param __a
-///    A 64-bit vector of [2 x i32]. The lower elements of the destination are
-///    copied from the elements in this operand.
-/// \param __b
-///    A 64-bit vector of [2 x i32]. The upper elements of the destination are
-///    copied from the elements in this operand.
-/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
-///    copied and converted values from the first operand. The upper 64 bits
-///    contain the copied and converted values from the second operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpi32x2_ps(__m64 __a, __m64 __b)
-{
-  __m128 __c;
-
-  __c = _mm_setzero_ps();
-  __c = _mm_cvtpi32_ps(__c, __b);
-  __c = _mm_movelh_ps(__c, __c);
-
-  return _mm_cvtpi32_ps(__c, __a);
-}
-
-/// Converts each single-precision floating-point element of a 128-bit
-///    floating-point vector of [4 x float] into a 16-bit signed integer, and
-///    packs the results into a 64-bit integer vector of [4 x i16].
-///
-///    If the floating-point element is NaN or infinity, or if the
-///    floating-point element is greater than 0x7FFFFFFF or less than -0x8000,
-///    it is converted to 0x8000. Otherwise if the floating-point element is
-///    greater than 0x7FFF, it is converted to 0x7FFF.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPS2PI + COMPOSITE </c> instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [4 x float].
-/// \returns A 64-bit integer vector of [4 x i16] containing the converted
-///    values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtps_pi16(__m128 __a)
-{
-  __m64 __b, __c;
-
-  __b = _mm_cvtps_pi32(__a);
-  __a = _mm_movehl_ps(__a, __a);
-  __c = _mm_cvtps_pi32(__a);
-
-  return _mm_packs_pi32(__b, __c);
-}
-
-/// Converts each single-precision floating-point element of a 128-bit
-///    floating-point vector of [4 x float] into an 8-bit signed integer, and
-///    packs the results into the lower 32 bits of a 64-bit integer vector of
-///    [8 x i8]. The upper 32 bits of the vector are set to 0.
-///
-///    If the floating-point element is NaN or infinity, or if the
-///    floating-point element is greater than 0x7FFFFFFF or less than -0x80, it
-///    is converted to 0x80. Otherwise if the floating-point element is greater
-///    than 0x7F, it is converted to 0x7F.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPS2PI + COMPOSITE </c> instruction.
-///
-/// \param __a
-///    128-bit floating-point vector of [4 x float].
-/// \returns A 64-bit integer vector of [8 x i8]. The lower 32 bits contain the
-///    converted values and the uppper 32 bits are set to zero.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtps_pi8(__m128 __a)
-{
-  __m64 __b, __c;
-
-  __b = _mm_cvtps_pi16(__a);
-  __c = _mm_setzero_si64();
-
-  return _mm_packs_pi16(__b, __c);
-}
-
-/// Extracts the sign bits from each single-precision floating-point
-///    element of a 128-bit floating-point vector of [4 x float] and returns the
-///    sign bits in bits [0:3] of the result. Bits [31:4] of the result are set
-///    to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVMSKPS / MOVMSKPS </c> instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [4 x float].
-/// \returns A 32-bit integer value. Bits [3:0] contain the sign bits from each
-///    single-precision floating-point element of the parameter. Bits [31:4] are
-///    set to zero.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_movemask_ps(__m128 __a)
-{
-  return __builtin_ia32_movmskps((__v4sf)__a);
-}
-
-
-#define _MM_ALIGN16 __attribute__((aligned(16)))
-
-#define _MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
-
-#define _MM_EXCEPT_INVALID    (0x0001)
-#define _MM_EXCEPT_DENORM     (0x0002)
-#define _MM_EXCEPT_DIV_ZERO   (0x0004)
-#define _MM_EXCEPT_OVERFLOW   (0x0008)
-#define _MM_EXCEPT_UNDERFLOW  (0x0010)
-#define _MM_EXCEPT_INEXACT    (0x0020)
-#define _MM_EXCEPT_MASK       (0x003f)
-
-#define _MM_MASK_INVALID      (0x0080)
-#define _MM_MASK_DENORM       (0x0100)
-#define _MM_MASK_DIV_ZERO     (0x0200)
-#define _MM_MASK_OVERFLOW     (0x0400)
-#define _MM_MASK_UNDERFLOW    (0x0800)
-#define _MM_MASK_INEXACT      (0x1000)
-#define _MM_MASK_MASK         (0x1f80)
-
-#define _MM_ROUND_NEAREST     (0x0000)
-#define _MM_ROUND_DOWN        (0x2000)
-#define _MM_ROUND_UP          (0x4000)
-#define _MM_ROUND_TOWARD_ZERO (0x6000)
-#define _MM_ROUND_MASK        (0x6000)
-
-#define _MM_FLUSH_ZERO_MASK   (0x8000)
-#define _MM_FLUSH_ZERO_ON     (0x8000)
-#define _MM_FLUSH_ZERO_OFF    (0x0000)
-
-#define _MM_GET_EXCEPTION_MASK() (_mm_getcsr() & _MM_MASK_MASK)
-#define _MM_GET_EXCEPTION_STATE() (_mm_getcsr() & _MM_EXCEPT_MASK)
-#define _MM_GET_FLUSH_ZERO_MODE() (_mm_getcsr() & _MM_FLUSH_ZERO_MASK)
-#define _MM_GET_ROUNDING_MODE() (_mm_getcsr() & _MM_ROUND_MASK)
-
-#define _MM_SET_EXCEPTION_MASK(x) (_mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | (x)))
-#define _MM_SET_EXCEPTION_STATE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | (x)))
-#define _MM_SET_FLUSH_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | (x)))
-#define _MM_SET_ROUNDING_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | (x)))
-
-#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
-do { \
-  __m128 tmp3, tmp2, tmp1, tmp0; \
-  tmp0 = _mm_unpacklo_ps((row0), (row1)); \
-  tmp2 = _mm_unpacklo_ps((row2), (row3)); \
-  tmp1 = _mm_unpackhi_ps((row0), (row1)); \
-  tmp3 = _mm_unpackhi_ps((row2), (row3)); \
-  (row0) = _mm_movelh_ps(tmp0, tmp2); \
-  (row1) = _mm_movehl_ps(tmp2, tmp0); \
-  (row2) = _mm_movelh_ps(tmp1, tmp3); \
-  (row3) = _mm_movehl_ps(tmp3, tmp1); \
-} while (0)
-
-/* Aliases for compatibility. */
-#define _m_pextrw _mm_extract_pi16
-#define _m_pinsrw _mm_insert_pi16
-#define _m_pmaxsw _mm_max_pi16
-#define _m_pmaxub _mm_max_pu8
-#define _m_pminsw _mm_min_pi16
-#define _m_pminub _mm_min_pu8
-#define _m_pmovmskb _mm_movemask_pi8
-#define _m_pmulhuw _mm_mulhi_pu16
-#define _m_pshufw _mm_shuffle_pi16
-#define _m_maskmovq _mm_maskmove_si64
-#define _m_pavgb _mm_avg_pu8
-#define _m_pavgw _mm_avg_pu16
-#define _m_psadbw _mm_sad_pu8
-#define _m_ _mm_
-#define _m_ _mm_
-
-#undef __DEFAULT_FN_ATTRS
-#undef __DEFAULT_FN_ATTRS_MMX
-
-/* Ugly hack for backwards-compatibility (compatible with gcc) */
-#if defined(__SSE2__) && !__building_module(_Builtin_intrinsics)
-#include <emmintrin.h>
-#endif
-
-#endif /* __XMMINTRIN_H */
diff --git a/linux-x86/lib64/clang/11.0.1/include/__clang_cuda_builtin_vars.h b/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_builtin_vars.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/__clang_cuda_builtin_vars.h
rename to linux-x86/lib64/clang/11.0.5/include/__clang_cuda_builtin_vars.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_cmath.h b/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_cmath.h
similarity index 94%
copy from darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_cmath.h
copy to linux-x86/lib64/clang/11.0.5/include/__clang_cuda_cmath.h
index 834a2e3..8ba1826 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_cmath.h
+++ b/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_cmath.h
@@ -12,7 +12,9 @@
 #error "This file is for CUDA compilation only."
 #endif
 
+#ifndef __OPENMP_NVPTX__
 #include <limits>
+#endif
 
 // CUDA lets us use various std math functions on the device side.  This file
 // works in concert with __clang_cuda_math_forward_declares.h to make this work.
@@ -30,32 +32,16 @@
 // implementation.  Declaring in the global namespace and pulling into namespace
 // std covers all of the known knowns.
 
-#ifdef _OPENMP
-#define __DEVICE__ static __attribute__((always_inline))
+#ifdef __OPENMP_NVPTX__
+#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow))
 #else
 #define __DEVICE__ static __device__ __inline__ __attribute__((always_inline))
 #endif
 
-// For C++ 17 we need to include noexcept attribute to be compatible
-// with the header-defined version. This may be removed once
-// variant is supported.
-#if defined(_OPENMP) && defined(__cplusplus) && __cplusplus >= 201703L
-#define __NOEXCEPT noexcept
-#else
-#define __NOEXCEPT
-#endif
-
-#if !(defined(_OPENMP) && defined(__cplusplus))
 __DEVICE__ long long abs(long long __n) { return ::llabs(__n); }
 __DEVICE__ long abs(long __n) { return ::labs(__n); }
 __DEVICE__ float abs(float __x) { return ::fabsf(__x); }
 __DEVICE__ double abs(double __x) { return ::fabs(__x); }
-#endif
-// TODO: remove once variat is supported.
-#if defined(_OPENMP) && defined(__cplusplus)
-__DEVICE__ const float abs(const float __x) { return ::fabsf((float)__x); }
-__DEVICE__ const double abs(const double __x) { return ::fabs((double)__x); }
-#endif
 __DEVICE__ float acos(float __x) { return ::acosf(__x); }
 __DEVICE__ float asin(float __x) { return ::asinf(__x); }
 __DEVICE__ float atan(float __x) { return ::atanf(__x); }
@@ -64,11 +50,9 @@
 __DEVICE__ float cos(float __x) { return ::cosf(__x); }
 __DEVICE__ float cosh(float __x) { return ::coshf(__x); }
 __DEVICE__ float exp(float __x) { return ::expf(__x); }
-__DEVICE__ float fabs(float __x) __NOEXCEPT { return ::fabsf(__x); }
+__DEVICE__ float fabs(float __x) { return ::fabsf(__x); }
 __DEVICE__ float floor(float __x) { return ::floorf(__x); }
 __DEVICE__ float fmod(float __x, float __y) { return ::fmodf(__x, __y); }
-// TODO: remove when variant is supported
-#ifndef _OPENMP
 __DEVICE__ int fpclassify(float __x) {
   return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
                               FP_ZERO, __x);
@@ -77,14 +61,15 @@
   return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
                               FP_ZERO, __x);
 }
-#endif
 __DEVICE__ float frexp(float __arg, int *__exp) {
   return ::frexpf(__arg, __exp);
 }
 
 // For inscrutable reasons, the CUDA headers define these functions for us on
-// Windows.
-#ifndef _MSC_VER
+// Windows. For OpenMP we omit these as some old system headers have
+// non-conforming `isinf(float)` and `isnan(float)` implementations that return
+// an `int`. The system versions of these functions should be fine anyway.
+#if !defined(_MSC_VER) && !defined(__OPENMP_NVPTX__)
 __DEVICE__ bool isinf(float __x) { return ::__isinff(__x); }
 __DEVICE__ bool isinf(double __x) { return ::__isinf(__x); }
 __DEVICE__ bool isfinite(float __x) { return ::__finitef(__x); }
@@ -161,6 +146,8 @@
 // libdevice doesn't provide an implementation, and we don't want to be in the
 // business of implementing tricky libm functions in this header.
 
+#ifndef __OPENMP_NVPTX__
+
 // Now we've defined everything we promised we'd define in
 // __clang_cuda_math_forward_declares.h.  We need to do two additional things to
 // fix up our math functions.
@@ -457,10 +444,7 @@
 using ::remquof;
 using ::rintf;
 using ::roundf;
-// TODO: remove once variant is supported
-#ifndef _OPENMP
 using ::scalblnf;
-#endif
 using ::scalbnf;
 using ::sinf;
 using ::sinhf;
@@ -479,7 +463,8 @@
 } // namespace std
 #endif
 
-#undef __NOEXCEPT
+#endif // __OPENMP_NVPTX__
+
 #undef __DEVICE__
 
 #endif
diff --git a/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_complex_builtins.h b/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_complex_builtins.h
new file mode 100644
index 0000000..8c10ff6
--- /dev/null
+++ b/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_complex_builtins.h
@@ -0,0 +1,259 @@
+/*===-- __clang_cuda_complex_builtins - CUDA impls of runtime complex fns ---===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_CUDA_COMPLEX_BUILTINS
+#define __CLANG_CUDA_COMPLEX_BUILTINS
+
+// This header defines __muldc3, __mulsc3, __divdc3, and __divsc3.  These are
+// libgcc functions that clang assumes are available when compiling c99 complex
+// operations.  (These implementations come from libc++, and have been modified
+// to work with CUDA and OpenMP target offloading [in C and C++ mode].)
+
+#pragma push_macro("__DEVICE__")
+#ifdef _OPENMP
+#pragma omp declare target
+#define __DEVICE__ __attribute__((noinline, nothrow, cold, weak))
+#else
+#define __DEVICE__ __device__ inline
+#endif
+
+// To make the algorithms available for C and C++ in CUDA and OpenMP we select
+// different but equivalent function versions. TODO: For OpenMP we currently
+// select the native builtins as the overload support for templates is lacking.
+#if !defined(_OPENMP)
+#define _ISNANd std::isnan
+#define _ISNANf std::isnan
+#define _ISINFd std::isinf
+#define _ISINFf std::isinf
+#define _ISFINITEd std::isfinite
+#define _ISFINITEf std::isfinite
+#define _COPYSIGNd std::copysign
+#define _COPYSIGNf std::copysign
+#define _SCALBNd std::scalbn
+#define _SCALBNf std::scalbn
+#define _ABSd std::abs
+#define _ABSf std::abs
+#define _LOGBd std::logb
+#define _LOGBf std::logb
+#else
+#define _ISNANd __nv_isnand
+#define _ISNANf __nv_isnanf
+#define _ISINFd __nv_isinfd
+#define _ISINFf __nv_isinff
+#define _ISFINITEd __nv_isfinited
+#define _ISFINITEf __nv_finitef
+#define _COPYSIGNd __nv_copysign
+#define _COPYSIGNf __nv_copysignf
+#define _SCALBNd __nv_scalbn
+#define _SCALBNf __nv_scalbnf
+#define _ABSd __nv_fabs
+#define _ABSf __nv_fabsf
+#define _LOGBd __nv_logb
+#define _LOGBf __nv_logbf
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+__DEVICE__ double _Complex __muldc3(double __a, double __b, double __c,
+                                    double __d) {
+  double __ac = __a * __c;
+  double __bd = __b * __d;
+  double __ad = __a * __d;
+  double __bc = __b * __c;
+  double _Complex z;
+  __real__(z) = __ac - __bd;
+  __imag__(z) = __ad + __bc;
+  if (_ISNANd(__real__(z)) && _ISNANd(__imag__(z))) {
+    int __recalc = 0;
+    if (_ISINFd(__a) || _ISINFd(__b)) {
+      __a = _COPYSIGNd(_ISINFd(__a) ? 1 : 0, __a);
+      __b = _COPYSIGNd(_ISINFd(__b) ? 1 : 0, __b);
+      if (_ISNANd(__c))
+        __c = _COPYSIGNd(0, __c);
+      if (_ISNANd(__d))
+        __d = _COPYSIGNd(0, __d);
+      __recalc = 1;
+    }
+    if (_ISINFd(__c) || _ISINFd(__d)) {
+      __c = _COPYSIGNd(_ISINFd(__c) ? 1 : 0, __c);
+      __d = _COPYSIGNd(_ISINFd(__d) ? 1 : 0, __d);
+      if (_ISNANd(__a))
+        __a = _COPYSIGNd(0, __a);
+      if (_ISNANd(__b))
+        __b = _COPYSIGNd(0, __b);
+      __recalc = 1;
+    }
+    if (!__recalc &&
+        (_ISINFd(__ac) || _ISINFd(__bd) || _ISINFd(__ad) || _ISINFd(__bc))) {
+      if (_ISNANd(__a))
+        __a = _COPYSIGNd(0, __a);
+      if (_ISNANd(__b))
+        __b = _COPYSIGNd(0, __b);
+      if (_ISNANd(__c))
+        __c = _COPYSIGNd(0, __c);
+      if (_ISNANd(__d))
+        __d = _COPYSIGNd(0, __d);
+      __recalc = 1;
+    }
+    if (__recalc) {
+      // Can't use std::numeric_limits<double>::infinity() -- that doesn't have
+      // a device overload (and isn't constexpr before C++11, naturally).
+      __real__(z) = __builtin_huge_val() * (__a * __c - __b * __d);
+      __imag__(z) = __builtin_huge_val() * (__a * __d + __b * __c);
+    }
+  }
+  return z;
+}
+
+__DEVICE__ float _Complex __mulsc3(float __a, float __b, float __c, float __d) {
+  float __ac = __a * __c;
+  float __bd = __b * __d;
+  float __ad = __a * __d;
+  float __bc = __b * __c;
+  float _Complex z;
+  __real__(z) = __ac - __bd;
+  __imag__(z) = __ad + __bc;
+  if (_ISNANf(__real__(z)) && _ISNANf(__imag__(z))) {
+    int __recalc = 0;
+    if (_ISINFf(__a) || _ISINFf(__b)) {
+      __a = _COPYSIGNf(_ISINFf(__a) ? 1 : 0, __a);
+      __b = _COPYSIGNf(_ISINFf(__b) ? 1 : 0, __b);
+      if (_ISNANf(__c))
+        __c = _COPYSIGNf(0, __c);
+      if (_ISNANf(__d))
+        __d = _COPYSIGNf(0, __d);
+      __recalc = 1;
+    }
+    if (_ISINFf(__c) || _ISINFf(__d)) {
+      __c = _COPYSIGNf(_ISINFf(__c) ? 1 : 0, __c);
+      __d = _COPYSIGNf(_ISINFf(__d) ? 1 : 0, __d);
+      if (_ISNANf(__a))
+        __a = _COPYSIGNf(0, __a);
+      if (_ISNANf(__b))
+        __b = _COPYSIGNf(0, __b);
+      __recalc = 1;
+    }
+    if (!__recalc &&
+        (_ISINFf(__ac) || _ISINFf(__bd) || _ISINFf(__ad) || _ISINFf(__bc))) {
+      if (_ISNANf(__a))
+        __a = _COPYSIGNf(0, __a);
+      if (_ISNANf(__b))
+        __b = _COPYSIGNf(0, __b);
+      if (_ISNANf(__c))
+        __c = _COPYSIGNf(0, __c);
+      if (_ISNANf(__d))
+        __d = _COPYSIGNf(0, __d);
+      __recalc = 1;
+    }
+    if (__recalc) {
+      __real__(z) = __builtin_huge_valf() * (__a * __c - __b * __d);
+      __imag__(z) = __builtin_huge_valf() * (__a * __d + __b * __c);
+    }
+  }
+  return z;
+}
+
+__DEVICE__ double _Complex __divdc3(double __a, double __b, double __c,
+                                    double __d) {
+  int __ilogbw = 0;
+  // Can't use std::max, because that's defined in <algorithm>, and we don't
+  // want to pull that in for every compile.  The CUDA headers define
+  // ::max(float, float) and ::max(double, double), which is sufficient for us.
+  double __logbw = _LOGBd(max(_ABSd(__c), _ABSd(__d)));
+  if (_ISFINITEd(__logbw)) {
+    __ilogbw = (int)__logbw;
+    __c = _SCALBNd(__c, -__ilogbw);
+    __d = _SCALBNd(__d, -__ilogbw);
+  }
+  double __denom = __c * __c + __d * __d;
+  double _Complex z;
+  __real__(z) = _SCALBNd((__a * __c + __b * __d) / __denom, -__ilogbw);
+  __imag__(z) = _SCALBNd((__b * __c - __a * __d) / __denom, -__ilogbw);
+  if (_ISNANd(__real__(z)) && _ISNANd(__imag__(z))) {
+    if ((__denom == 0.0) && (!_ISNANd(__a) || !_ISNANd(__b))) {
+      __real__(z) = _COPYSIGNd(__builtin_huge_val(), __c) * __a;
+      __imag__(z) = _COPYSIGNd(__builtin_huge_val(), __c) * __b;
+    } else if ((_ISINFd(__a) || _ISINFd(__b)) && _ISFINITEd(__c) &&
+               _ISFINITEd(__d)) {
+      __a = _COPYSIGNd(_ISINFd(__a) ? 1.0 : 0.0, __a);
+      __b = _COPYSIGNd(_ISINFd(__b) ? 1.0 : 0.0, __b);
+      __real__(z) = __builtin_huge_val() * (__a * __c + __b * __d);
+      __imag__(z) = __builtin_huge_val() * (__b * __c - __a * __d);
+    } else if (_ISINFd(__logbw) && __logbw > 0.0 && _ISFINITEd(__a) &&
+               _ISFINITEd(__b)) {
+      __c = _COPYSIGNd(_ISINFd(__c) ? 1.0 : 0.0, __c);
+      __d = _COPYSIGNd(_ISINFd(__d) ? 1.0 : 0.0, __d);
+      __real__(z) = 0.0 * (__a * __c + __b * __d);
+      __imag__(z) = 0.0 * (__b * __c - __a * __d);
+    }
+  }
+  return z;
+}
+
+__DEVICE__ float _Complex __divsc3(float __a, float __b, float __c, float __d) {
+  int __ilogbw = 0;
+  float __logbw = _LOGBf(max(_ABSf(__c), _ABSf(__d)));
+  if (_ISFINITEf(__logbw)) {
+    __ilogbw = (int)__logbw;
+    __c = _SCALBNf(__c, -__ilogbw);
+    __d = _SCALBNf(__d, -__ilogbw);
+  }
+  float __denom = __c * __c + __d * __d;
+  float _Complex z;
+  __real__(z) = _SCALBNf((__a * __c + __b * __d) / __denom, -__ilogbw);
+  __imag__(z) = _SCALBNf((__b * __c - __a * __d) / __denom, -__ilogbw);
+  if (_ISNANf(__real__(z)) && _ISNANf(__imag__(z))) {
+    if ((__denom == 0) && (!_ISNANf(__a) || !_ISNANf(__b))) {
+      __real__(z) = _COPYSIGNf(__builtin_huge_valf(), __c) * __a;
+      __imag__(z) = _COPYSIGNf(__builtin_huge_valf(), __c) * __b;
+    } else if ((_ISINFf(__a) || _ISINFf(__b)) && _ISFINITEf(__c) &&
+               _ISFINITEf(__d)) {
+      __a = _COPYSIGNf(_ISINFf(__a) ? 1 : 0, __a);
+      __b = _COPYSIGNf(_ISINFf(__b) ? 1 : 0, __b);
+      __real__(z) = __builtin_huge_valf() * (__a * __c + __b * __d);
+      __imag__(z) = __builtin_huge_valf() * (__b * __c - __a * __d);
+    } else if (_ISINFf(__logbw) && __logbw > 0 && _ISFINITEf(__a) &&
+               _ISFINITEf(__b)) {
+      __c = _COPYSIGNf(_ISINFf(__c) ? 1 : 0, __c);
+      __d = _COPYSIGNf(_ISINFf(__d) ? 1 : 0, __d);
+      __real__(z) = 0 * (__a * __c + __b * __d);
+      __imag__(z) = 0 * (__b * __c - __a * __d);
+    }
+  }
+  return z;
+}
+
+#if defined(__cplusplus)
+} // extern "C"
+#endif
+
+#undef _ISNANd
+#undef _ISNANf
+#undef _ISINFd
+#undef _ISINFf
+#undef _COPYSIGNd
+#undef _COPYSIGNf
+#undef _ISFINITEd
+#undef _ISFINITEf
+#undef _SCALBNd
+#undef _SCALBNf
+#undef _ABSd
+#undef _ABSf
+#undef _LOGBd
+#undef _LOGBf
+
+#ifdef _OPENMP
+#pragma omp end declare target
+#endif
+
+#pragma pop_macro("__DEVICE__")
+
+#endif // __CLANG_CUDA_COMPLEX_BUILTINS
diff --git a/darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_device_functions.h b/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_device_functions.h
similarity index 78%
copy from darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_device_functions.h
copy to linux-x86/lib64/clang/11.0.5/include/__clang_cuda_device_functions.h
index 50ad674..f801e54 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_device_functions.h
+++ b/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_device_functions.h
@@ -10,7 +10,7 @@
 #ifndef __CLANG_CUDA_DEVICE_FUNCTIONS_H__
 #define __CLANG_CUDA_DEVICE_FUNCTIONS_H__
 
-#ifndef _OPENMP
+#ifndef __OPENMP_NVPTX__
 #if CUDA_VERSION < 9000
 #error This file is intended to be used with CUDA-9+ only.
 #endif
@@ -20,32 +20,12 @@
 // we implement in this file. We need static in order to avoid emitting unused
 // functions and __forceinline__ helps inlining these wrappers at -O1.
 #pragma push_macro("__DEVICE__")
-#ifdef _OPENMP
-#define __DEVICE__ static __attribute__((always_inline))
+#ifdef __OPENMP_NVPTX__
+#define __DEVICE__ static __attribute__((always_inline, nothrow))
 #else
 #define __DEVICE__ static __device__ __forceinline__
 #endif
 
-// libdevice provides fast low precision and slow full-recision implementations
-// for some functions. Which one gets selected depends on
-// __CLANG_CUDA_APPROX_TRANSCENDENTALS__ which gets defined by clang if
-// -ffast-math or -fcuda-approx-transcendentals are in effect.
-#pragma push_macro("__FAST_OR_SLOW")
-#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__)
-#define __FAST_OR_SLOW(fast, slow) fast
-#else
-#define __FAST_OR_SLOW(fast, slow) slow
-#endif
-
-// For C++ 17 we need to include noexcept attribute to be compatible
-// with the header-defined version. This may be removed once
-// variant is supported.
-#if defined(_OPENMP) && defined(__cplusplus) && __cplusplus >= 201703L
-#define __NOEXCEPT noexcept
-#else
-#define __NOEXCEPT
-#endif
-
 __DEVICE__ int __all(int __a) { return __nvvm_vote_all(__a); }
 __DEVICE__ int __any(int __a) { return __nvvm_vote_any(__a); }
 __DEVICE__ unsigned int __ballot(int __a) { return __nvvm_vote_ballot(__a); }
@@ -359,10 +339,10 @@
   return __nvvm_atom_add_gen_i(__p, __v);
 }
 __DEVICE__ int __iAtomicAdd_block(int *__p, int __v) {
-  __nvvm_atom_cta_add_gen_i(__p, __v);
+  return __nvvm_atom_cta_add_gen_i(__p, __v);
 }
 __DEVICE__ int __iAtomicAdd_system(int *__p, int __v) {
-  __nvvm_atom_sys_add_gen_i(__p, __v);
+  return __nvvm_atom_sys_add_gen_i(__p, __v);
 }
 __DEVICE__ int __iAtomicAnd(int *__p, int __v) {
   return __nvvm_atom_and_gen_i(__p, __v);
@@ -1483,152 +1463,17 @@
   return r;
 }
 #endif // CUDA_VERSION >= 9020
-__DEVICE__ int abs(int __a) __NOEXCEPT { return __nv_abs(__a); }
-__DEVICE__ double fabs(double __a) __NOEXCEPT { return __nv_fabs(__a); }
-__DEVICE__ double acos(double __a) { return __nv_acos(__a); }
-__DEVICE__ float acosf(float __a) { return __nv_acosf(__a); }
-__DEVICE__ double acosh(double __a) { return __nv_acosh(__a); }
-__DEVICE__ float acoshf(float __a) { return __nv_acoshf(__a); }
-__DEVICE__ double asin(double __a) { return __nv_asin(__a); }
-__DEVICE__ float asinf(float __a) { return __nv_asinf(__a); }
-__DEVICE__ double asinh(double __a) { return __nv_asinh(__a); }
-__DEVICE__ float asinhf(float __a) { return __nv_asinhf(__a); }
-__DEVICE__ double atan(double __a) { return __nv_atan(__a); }
-__DEVICE__ double atan2(double __a, double __b) { return __nv_atan2(__a, __b); }
-__DEVICE__ float atan2f(float __a, float __b) { return __nv_atan2f(__a, __b); }
-__DEVICE__ float atanf(float __a) { return __nv_atanf(__a); }
-__DEVICE__ double atanh(double __a) { return __nv_atanh(__a); }
-__DEVICE__ float atanhf(float __a) { return __nv_atanhf(__a); }
-__DEVICE__ double cbrt(double __a) { return __nv_cbrt(__a); }
-__DEVICE__ float cbrtf(float __a) { return __nv_cbrtf(__a); }
-__DEVICE__ double ceil(double __a) { return __nv_ceil(__a); }
-__DEVICE__ float ceilf(float __a) { return __nv_ceilf(__a); }
-#ifndef _OPENMP
-__DEVICE__ int clock() { return __nvvm_read_ptx_sreg_clock(); }
+
+// For OpenMP we require the user to include <time.h> as we need to know what
+// clock_t is on the system.
+#ifndef __OPENMP_NVPTX__
+__DEVICE__ /* clock_t= */ int clock() { return __nvvm_read_ptx_sreg_clock(); }
+#endif
 __DEVICE__ long long clock64() { return __nvvm_read_ptx_sreg_clock64(); }
-#endif
-__DEVICE__ double copysign(double __a, double __b) {
-  return __nv_copysign(__a, __b);
-}
-__DEVICE__ float copysignf(float __a, float __b) {
-  return __nv_copysignf(__a, __b);
-}
-__DEVICE__ double cos(double __a) { return __nv_cos(__a); }
-__DEVICE__ float cosf(float __a) {
-  return __FAST_OR_SLOW(__nv_fast_cosf, __nv_cosf)(__a);
-}
-__DEVICE__ double cosh(double __a) { return __nv_cosh(__a); }
-__DEVICE__ float coshf(float __a) { return __nv_coshf(__a); }
-__DEVICE__ double cospi(double __a) { return __nv_cospi(__a); }
-__DEVICE__ float cospif(float __a) { return __nv_cospif(__a); }
-__DEVICE__ double cyl_bessel_i0(double __a) { return __nv_cyl_bessel_i0(__a); }
-__DEVICE__ float cyl_bessel_i0f(float __a) { return __nv_cyl_bessel_i0f(__a); }
-__DEVICE__ double cyl_bessel_i1(double __a) { return __nv_cyl_bessel_i1(__a); }
-__DEVICE__ float cyl_bessel_i1f(float __a) { return __nv_cyl_bessel_i1f(__a); }
-__DEVICE__ double erf(double __a) { return __nv_erf(__a); }
-__DEVICE__ double erfc(double __a) { return __nv_erfc(__a); }
-__DEVICE__ float erfcf(float __a) { return __nv_erfcf(__a); }
-__DEVICE__ double erfcinv(double __a) { return __nv_erfcinv(__a); }
-__DEVICE__ float erfcinvf(float __a) { return __nv_erfcinvf(__a); }
-__DEVICE__ double erfcx(double __a) { return __nv_erfcx(__a); }
-__DEVICE__ float erfcxf(float __a) { return __nv_erfcxf(__a); }
-__DEVICE__ float erff(float __a) { return __nv_erff(__a); }
-__DEVICE__ double erfinv(double __a) { return __nv_erfinv(__a); }
-__DEVICE__ float erfinvf(float __a) { return __nv_erfinvf(__a); }
-__DEVICE__ double exp(double __a) { return __nv_exp(__a); }
-__DEVICE__ double exp10(double __a) { return __nv_exp10(__a); }
-__DEVICE__ float exp10f(float __a) { return __nv_exp10f(__a); }
-__DEVICE__ double exp2(double __a) { return __nv_exp2(__a); }
-__DEVICE__ float exp2f(float __a) { return __nv_exp2f(__a); }
-__DEVICE__ float expf(float __a) { return __nv_expf(__a); }
-__DEVICE__ double expm1(double __a) { return __nv_expm1(__a); }
-__DEVICE__ float expm1f(float __a) { return __nv_expm1f(__a); }
-__DEVICE__ float fabsf(float __a) { return __nv_fabsf(__a); }
-__DEVICE__ double fdim(double __a, double __b) { return __nv_fdim(__a, __b); }
-__DEVICE__ float fdimf(float __a, float __b) { return __nv_fdimf(__a, __b); }
-__DEVICE__ double fdivide(double __a, double __b) { return __a / __b; }
-__DEVICE__ float fdividef(float __a, float __b) {
-#if __FAST_MATH__ && !__CUDA_PREC_DIV
-  return __nv_fast_fdividef(__a, __b);
-#else
-  return __a / __b;
-#endif
-}
-__DEVICE__ double floor(double __f) { return __nv_floor(__f); }
-__DEVICE__ float floorf(float __f) { return __nv_floorf(__f); }
-__DEVICE__ double fma(double __a, double __b, double __c) {
-  return __nv_fma(__a, __b, __c);
-}
-__DEVICE__ float fmaf(float __a, float __b, float __c) {
-  return __nv_fmaf(__a, __b, __c);
-}
-__DEVICE__ double fmax(double __a, double __b) { return __nv_fmax(__a, __b); }
-__DEVICE__ float fmaxf(float __a, float __b) { return __nv_fmaxf(__a, __b); }
-__DEVICE__ double fmin(double __a, double __b) { return __nv_fmin(__a, __b); }
-__DEVICE__ float fminf(float __a, float __b) { return __nv_fminf(__a, __b); }
-__DEVICE__ double fmod(double __a, double __b) { return __nv_fmod(__a, __b); }
-__DEVICE__ float fmodf(float __a, float __b) { return __nv_fmodf(__a, __b); }
-__DEVICE__ double frexp(double __a, int *__b) { return __nv_frexp(__a, __b); }
-__DEVICE__ float frexpf(float __a, int *__b) { return __nv_frexpf(__a, __b); }
-__DEVICE__ double hypot(double __a, double __b) { return __nv_hypot(__a, __b); }
-__DEVICE__ float hypotf(float __a, float __b) { return __nv_hypotf(__a, __b); }
-__DEVICE__ int ilogb(double __a) { return __nv_ilogb(__a); }
-__DEVICE__ int ilogbf(float __a) { return __nv_ilogbf(__a); }
-__DEVICE__ double j0(double __a) { return __nv_j0(__a); }
-__DEVICE__ float j0f(float __a) { return __nv_j0f(__a); }
-__DEVICE__ double j1(double __a) { return __nv_j1(__a); }
-__DEVICE__ float j1f(float __a) { return __nv_j1f(__a); }
-__DEVICE__ double jn(int __n, double __a) { return __nv_jn(__n, __a); }
-__DEVICE__ float jnf(int __n, float __a) { return __nv_jnf(__n, __a); }
-#if defined(__LP64__) || defined(_WIN64)
-__DEVICE__ long labs(long __a) __NOEXCEPT { return __nv_llabs(__a); };
-#else
-__DEVICE__ long labs(long __a) __NOEXCEPT { return __nv_abs(__a); };
-#endif
-__DEVICE__ double ldexp(double __a, int __b) { return __nv_ldexp(__a, __b); }
-__DEVICE__ float ldexpf(float __a, int __b) { return __nv_ldexpf(__a, __b); }
-__DEVICE__ double lgamma(double __a) { return __nv_lgamma(__a); }
-__DEVICE__ float lgammaf(float __a) { return __nv_lgammaf(__a); }
-__DEVICE__ long long llabs(long long __a) __NOEXCEPT { return __nv_llabs(__a); }
-__DEVICE__ long long llmax(long long __a, long long __b) {
-  return __nv_llmax(__a, __b);
-}
-__DEVICE__ long long llmin(long long __a, long long __b) {
-  return __nv_llmin(__a, __b);
-}
-__DEVICE__ long long llrint(double __a) { return __nv_llrint(__a); }
-__DEVICE__ long long llrintf(float __a) { return __nv_llrintf(__a); }
-__DEVICE__ long long llround(double __a) { return __nv_llround(__a); }
-__DEVICE__ long long llroundf(float __a) { return __nv_llroundf(__a); }
-__DEVICE__ double log(double __a) { return __nv_log(__a); }
-__DEVICE__ double log10(double __a) { return __nv_log10(__a); }
-__DEVICE__ float log10f(float __a) { return __nv_log10f(__a); }
-__DEVICE__ double log1p(double __a) { return __nv_log1p(__a); }
-__DEVICE__ float log1pf(float __a) { return __nv_log1pf(__a); }
-__DEVICE__ double log2(double __a) { return __nv_log2(__a); }
-__DEVICE__ float log2f(float __a) {
-  return __FAST_OR_SLOW(__nv_fast_log2f, __nv_log2f)(__a);
-}
-__DEVICE__ double logb(double __a) { return __nv_logb(__a); }
-__DEVICE__ float logbf(float __a) { return __nv_logbf(__a); }
-__DEVICE__ float logf(float __a) {
-  return __FAST_OR_SLOW(__nv_fast_logf, __nv_logf)(__a);
-}
-#if defined(__LP64__) || defined(_WIN64)
-__DEVICE__ long lrint(double __a) { return llrint(__a); }
-__DEVICE__ long lrintf(float __a) { return __float2ll_rn(__a); }
-__DEVICE__ long lround(double __a) { return llround(__a); }
-__DEVICE__ long lroundf(float __a) { return llroundf(__a); }
-#else
-__DEVICE__ long lrint(double __a) { return (long)rint(__a); }
-__DEVICE__ long lrintf(float __a) { return __float2int_rn(__a); }
-__DEVICE__ long lround(double __a) { return round(__a); }
-__DEVICE__ long lroundf(float __a) { return roundf(__a); }
-#endif
-__DEVICE__ int max(int __a, int __b) { return __nv_max(__a, __b); }
+
 // These functions shouldn't be declared when including this header
 // for math function resolution purposes.
-#ifndef _OPENMP
+#ifndef __OPENMP_NVPTX__
 __DEVICE__ void *memcpy(void *__a, const void *__b, size_t __c) {
   return __builtin_memcpy(__a, __b, __c);
 }
@@ -1636,158 +1481,6 @@
   return __builtin_memset(__a, __b, __c);
 }
 #endif
-__DEVICE__ int min(int __a, int __b) { return __nv_min(__a, __b); }
-__DEVICE__ double modf(double __a, double *__b) { return __nv_modf(__a, __b); }
-__DEVICE__ float modff(float __a, float *__b) { return __nv_modff(__a, __b); }
-__DEVICE__ double nearbyint(double __a) { return __nv_nearbyint(__a); }
-__DEVICE__ float nearbyintf(float __a) { return __nv_nearbyintf(__a); }
-__DEVICE__ double nextafter(double __a, double __b) {
-  return __nv_nextafter(__a, __b);
-}
-__DEVICE__ float nextafterf(float __a, float __b) {
-  return __nv_nextafterf(__a, __b);
-}
-__DEVICE__ double norm(int __dim, const double *__t) {
-  return __nv_norm(__dim, __t);
-}
-__DEVICE__ double norm3d(double __a, double __b, double __c) {
-  return __nv_norm3d(__a, __b, __c);
-}
-__DEVICE__ float norm3df(float __a, float __b, float __c) {
-  return __nv_norm3df(__a, __b, __c);
-}
-__DEVICE__ double norm4d(double __a, double __b, double __c, double __d) {
-  return __nv_norm4d(__a, __b, __c, __d);
-}
-__DEVICE__ float norm4df(float __a, float __b, float __c, float __d) {
-  return __nv_norm4df(__a, __b, __c, __d);
-}
-__DEVICE__ double normcdf(double __a) { return __nv_normcdf(__a); }
-__DEVICE__ float normcdff(float __a) { return __nv_normcdff(__a); }
-__DEVICE__ double normcdfinv(double __a) { return __nv_normcdfinv(__a); }
-__DEVICE__ float normcdfinvf(float __a) { return __nv_normcdfinvf(__a); }
-__DEVICE__ float normf(int __dim, const float *__t) {
-  return __nv_normf(__dim, __t);
-}
-__DEVICE__ double pow(double __a, double __b) { return __nv_pow(__a, __b); }
-__DEVICE__ float powf(float __a, float __b) { return __nv_powf(__a, __b); }
-__DEVICE__ double powi(double __a, int __b) { return __nv_powi(__a, __b); }
-__DEVICE__ float powif(float __a, int __b) { return __nv_powif(__a, __b); }
-__DEVICE__ double rcbrt(double __a) { return __nv_rcbrt(__a); }
-__DEVICE__ float rcbrtf(float __a) { return __nv_rcbrtf(__a); }
-__DEVICE__ double remainder(double __a, double __b) {
-  return __nv_remainder(__a, __b);
-}
-__DEVICE__ float remainderf(float __a, float __b) {
-  return __nv_remainderf(__a, __b);
-}
-__DEVICE__ double remquo(double __a, double __b, int *__c) {
-  return __nv_remquo(__a, __b, __c);
-}
-__DEVICE__ float remquof(float __a, float __b, int *__c) {
-  return __nv_remquof(__a, __b, __c);
-}
-__DEVICE__ double rhypot(double __a, double __b) {
-  return __nv_rhypot(__a, __b);
-}
-__DEVICE__ float rhypotf(float __a, float __b) {
-  return __nv_rhypotf(__a, __b);
-}
-__DEVICE__ double rint(double __a) { return __nv_rint(__a); }
-__DEVICE__ float rintf(float __a) { return __nv_rintf(__a); }
-__DEVICE__ double rnorm(int __a, const double *__b) {
-  return __nv_rnorm(__a, __b);
-}
-__DEVICE__ double rnorm3d(double __a, double __b, double __c) {
-  return __nv_rnorm3d(__a, __b, __c);
-}
-__DEVICE__ float rnorm3df(float __a, float __b, float __c) {
-  return __nv_rnorm3df(__a, __b, __c);
-}
-__DEVICE__ double rnorm4d(double __a, double __b, double __c, double __d) {
-  return __nv_rnorm4d(__a, __b, __c, __d);
-}
-__DEVICE__ float rnorm4df(float __a, float __b, float __c, float __d) {
-  return __nv_rnorm4df(__a, __b, __c, __d);
-}
-__DEVICE__ float rnormf(int __dim, const float *__t) {
-  return __nv_rnormf(__dim, __t);
-}
-__DEVICE__ double round(double __a) { return __nv_round(__a); }
-__DEVICE__ float roundf(float __a) { return __nv_roundf(__a); }
-__DEVICE__ double rsqrt(double __a) { return __nv_rsqrt(__a); }
-__DEVICE__ float rsqrtf(float __a) { return __nv_rsqrtf(__a); }
-__DEVICE__ double scalbn(double __a, int __b) { return __nv_scalbn(__a, __b); }
-__DEVICE__ float scalbnf(float __a, int __b) { return __nv_scalbnf(__a, __b); }
-// TODO: remove once variant is supported
-#ifndef _OPENMP
-__DEVICE__ double scalbln(double __a, long __b) {
-  if (__b > INT_MAX)
-    return __a > 0 ? HUGE_VAL : -HUGE_VAL;
-  if (__b < INT_MIN)
-    return __a > 0 ? 0.0 : -0.0;
-  return scalbn(__a, (int)__b);
-}
-__DEVICE__ float scalblnf(float __a, long __b) {
-  if (__b > INT_MAX)
-    return __a > 0 ? HUGE_VALF : -HUGE_VALF;
-  if (__b < INT_MIN)
-    return __a > 0 ? 0.f : -0.f;
-  return scalbnf(__a, (int)__b);
-}
-#endif
-__DEVICE__ double sin(double __a) { return __nv_sin(__a); }
-__DEVICE__ void sincos(double __a, double *__s, double *__c) {
-  return __nv_sincos(__a, __s, __c);
-}
-__DEVICE__ void sincosf(float __a, float *__s, float *__c) {
-  return __FAST_OR_SLOW(__nv_fast_sincosf, __nv_sincosf)(__a, __s, __c);
-}
-__DEVICE__ void sincospi(double __a, double *__s, double *__c) {
-  return __nv_sincospi(__a, __s, __c);
-}
-__DEVICE__ void sincospif(float __a, float *__s, float *__c) {
-  return __nv_sincospif(__a, __s, __c);
-}
-__DEVICE__ float sinf(float __a) {
-  return __FAST_OR_SLOW(__nv_fast_sinf, __nv_sinf)(__a);
-}
-__DEVICE__ double sinh(double __a) { return __nv_sinh(__a); }
-__DEVICE__ float sinhf(float __a) { return __nv_sinhf(__a); }
-__DEVICE__ double sinpi(double __a) { return __nv_sinpi(__a); }
-__DEVICE__ float sinpif(float __a) { return __nv_sinpif(__a); }
-__DEVICE__ double sqrt(double __a) { return __nv_sqrt(__a); }
-__DEVICE__ float sqrtf(float __a) { return __nv_sqrtf(__a); }
-__DEVICE__ double tan(double __a) { return __nv_tan(__a); }
-__DEVICE__ float tanf(float __a) { return __nv_tanf(__a); }
-__DEVICE__ double tanh(double __a) { return __nv_tanh(__a); }
-__DEVICE__ float tanhf(float __a) { return __nv_tanhf(__a); }
-__DEVICE__ double tgamma(double __a) { return __nv_tgamma(__a); }
-__DEVICE__ float tgammaf(float __a) { return __nv_tgammaf(__a); }
-__DEVICE__ double trunc(double __a) { return __nv_trunc(__a); }
-__DEVICE__ float truncf(float __a) { return __nv_truncf(__a); }
-__DEVICE__ unsigned long long ullmax(unsigned long long __a,
-                                     unsigned long long __b) {
-  return __nv_ullmax(__a, __b);
-}
-__DEVICE__ unsigned long long ullmin(unsigned long long __a,
-                                     unsigned long long __b) {
-  return __nv_ullmin(__a, __b);
-}
-__DEVICE__ unsigned int umax(unsigned int __a, unsigned int __b) {
-  return __nv_umax(__a, __b);
-}
-__DEVICE__ unsigned int umin(unsigned int __a, unsigned int __b) {
-  return __nv_umin(__a, __b);
-}
-__DEVICE__ double y0(double __a) { return __nv_y0(__a); }
-__DEVICE__ float y0f(float __a) { return __nv_y0f(__a); }
-__DEVICE__ double y1(double __a) { return __nv_y1(__a); }
-__DEVICE__ float y1f(float __a) { return __nv_y1f(__a); }
-__DEVICE__ double yn(int __a, double __b) { return __nv_yn(__a, __b); }
-__DEVICE__ float ynf(int __a, float __b) { return __nv_ynf(__a, __b); }
 
-#undef __NOEXCEPT
 #pragma pop_macro("__DEVICE__")
-#pragma pop_macro("__FAST_OR_SLOW")
 #endif // __CLANG_CUDA_DEVICE_FUNCTIONS_H__
diff --git a/linux-x86/lib64/clang/11.0.1/include/__clang_cuda_intrinsics.h b/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_intrinsics.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/__clang_cuda_intrinsics.h
rename to linux-x86/lib64/clang/11.0.5/include/__clang_cuda_intrinsics.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_libdevice_declares.h b/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_libdevice_declares.h
similarity index 99%
copy from darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_libdevice_declares.h
copy to linux-x86/lib64/clang/11.0.5/include/__clang_cuda_libdevice_declares.h
index 4d70353..6173b58 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_libdevice_declares.h
+++ b/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_libdevice_declares.h
@@ -14,7 +14,7 @@
 extern "C" {
 #endif
 
-#if defined(_OPENMP)
+#if defined(__OPENMP_NVPTX__)
 #define __DEVICE__
 #elif defined(__CUDA__)
 #define __DEVICE__ __device__
diff --git a/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_math.h b/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_math.h
new file mode 100644
index 0000000..332e616
--- /dev/null
+++ b/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_math.h
@@ -0,0 +1,347 @@
+/*===---- __clang_cuda_math.h - Device-side CUDA math support --------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __CLANG_CUDA_MATH_H__
+#define __CLANG_CUDA_MATH_H__
+#ifndef __CUDA__
+#error "This file is for CUDA compilation only."
+#endif
+
+#ifndef __OPENMP_NVPTX__
+#if CUDA_VERSION < 9000
+#error This file is intended to be used with CUDA-9+ only.
+#endif
+#endif
+
+// __DEVICE__ is a helper macro with common set of attributes for the wrappers
+// we implement in this file. We need static in order to avoid emitting unused
+// functions and __forceinline__ helps inlining these wrappers at -O1.
+#pragma push_macro("__DEVICE__")
+#ifdef __OPENMP_NVPTX__
+#if defined(__cplusplus)
+#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow))
+#else
+#define __DEVICE__ static __attribute__((always_inline, nothrow))
+#endif
+#else
+#define __DEVICE__ static __device__ __forceinline__
+#endif
+
+// Specialized version of __DEVICE__ for functions with void return type. Needed
+// because the OpenMP overlay requires constexpr functions here but prior to
+// c++14 void return functions could not be constexpr.
+#pragma push_macro("__DEVICE_VOID__")
+#ifdef __OPENMP_NVPTX__ && defined(__cplusplus) && __cplusplus < 201402L
+#define __DEVICE_VOID__ static __attribute__((always_inline, nothrow))
+#else
+#define __DEVICE_VOID__ __DEVICE__
+#endif
+
+// libdevice provides fast low precision and slow full-recision implementations
+// for some functions. Which one gets selected depends on
+// __CLANG_CUDA_APPROX_TRANSCENDENTALS__ which gets defined by clang if
+// -ffast-math or -fcuda-approx-transcendentals are in effect.
+#pragma push_macro("__FAST_OR_SLOW")
+#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__)
+#define __FAST_OR_SLOW(fast, slow) fast
+#else
+#define __FAST_OR_SLOW(fast, slow) slow
+#endif
+
+__DEVICE__ int abs(int __a) { return __nv_abs(__a); }
+__DEVICE__ double fabs(double __a) { return __nv_fabs(__a); }
+__DEVICE__ double acos(double __a) { return __nv_acos(__a); }
+__DEVICE__ float acosf(float __a) { return __nv_acosf(__a); }
+__DEVICE__ double acosh(double __a) { return __nv_acosh(__a); }
+__DEVICE__ float acoshf(float __a) { return __nv_acoshf(__a); }
+__DEVICE__ double asin(double __a) { return __nv_asin(__a); }
+__DEVICE__ float asinf(float __a) { return __nv_asinf(__a); }
+__DEVICE__ double asinh(double __a) { return __nv_asinh(__a); }
+__DEVICE__ float asinhf(float __a) { return __nv_asinhf(__a); }
+__DEVICE__ double atan(double __a) { return __nv_atan(__a); }
+__DEVICE__ double atan2(double __a, double __b) { return __nv_atan2(__a, __b); }
+__DEVICE__ float atan2f(float __a, float __b) { return __nv_atan2f(__a, __b); }
+__DEVICE__ float atanf(float __a) { return __nv_atanf(__a); }
+__DEVICE__ double atanh(double __a) { return __nv_atanh(__a); }
+__DEVICE__ float atanhf(float __a) { return __nv_atanhf(__a); }
+__DEVICE__ double cbrt(double __a) { return __nv_cbrt(__a); }
+__DEVICE__ float cbrtf(float __a) { return __nv_cbrtf(__a); }
+__DEVICE__ double ceil(double __a) { return __nv_ceil(__a); }
+__DEVICE__ float ceilf(float __a) { return __nv_ceilf(__a); }
+__DEVICE__ double copysign(double __a, double __b) {
+  return __nv_copysign(__a, __b);
+}
+__DEVICE__ float copysignf(float __a, float __b) {
+  return __nv_copysignf(__a, __b);
+}
+__DEVICE__ double cos(double __a) { return __nv_cos(__a); }
+__DEVICE__ float cosf(float __a) {
+  return __FAST_OR_SLOW(__nv_fast_cosf, __nv_cosf)(__a);
+}
+__DEVICE__ double cosh(double __a) { return __nv_cosh(__a); }
+__DEVICE__ float coshf(float __a) { return __nv_coshf(__a); }
+__DEVICE__ double cospi(double __a) { return __nv_cospi(__a); }
+__DEVICE__ float cospif(float __a) { return __nv_cospif(__a); }
+__DEVICE__ double cyl_bessel_i0(double __a) { return __nv_cyl_bessel_i0(__a); }
+__DEVICE__ float cyl_bessel_i0f(float __a) { return __nv_cyl_bessel_i0f(__a); }
+__DEVICE__ double cyl_bessel_i1(double __a) { return __nv_cyl_bessel_i1(__a); }
+__DEVICE__ float cyl_bessel_i1f(float __a) { return __nv_cyl_bessel_i1f(__a); }
+__DEVICE__ double erf(double __a) { return __nv_erf(__a); }
+__DEVICE__ double erfc(double __a) { return __nv_erfc(__a); }
+__DEVICE__ float erfcf(float __a) { return __nv_erfcf(__a); }
+__DEVICE__ double erfcinv(double __a) { return __nv_erfcinv(__a); }
+__DEVICE__ float erfcinvf(float __a) { return __nv_erfcinvf(__a); }
+__DEVICE__ double erfcx(double __a) { return __nv_erfcx(__a); }
+__DEVICE__ float erfcxf(float __a) { return __nv_erfcxf(__a); }
+__DEVICE__ float erff(float __a) { return __nv_erff(__a); }
+__DEVICE__ double erfinv(double __a) { return __nv_erfinv(__a); }
+__DEVICE__ float erfinvf(float __a) { return __nv_erfinvf(__a); }
+__DEVICE__ double exp(double __a) { return __nv_exp(__a); }
+__DEVICE__ double exp10(double __a) { return __nv_exp10(__a); }
+__DEVICE__ float exp10f(float __a) { return __nv_exp10f(__a); }
+__DEVICE__ double exp2(double __a) { return __nv_exp2(__a); }
+__DEVICE__ float exp2f(float __a) { return __nv_exp2f(__a); }
+__DEVICE__ float expf(float __a) { return __nv_expf(__a); }
+__DEVICE__ double expm1(double __a) { return __nv_expm1(__a); }
+__DEVICE__ float expm1f(float __a) { return __nv_expm1f(__a); }
+__DEVICE__ float fabsf(float __a) { return __nv_fabsf(__a); }
+__DEVICE__ double fdim(double __a, double __b) { return __nv_fdim(__a, __b); }
+__DEVICE__ float fdimf(float __a, float __b) { return __nv_fdimf(__a, __b); }
+__DEVICE__ double fdivide(double __a, double __b) { return __a / __b; }
+__DEVICE__ float fdividef(float __a, float __b) {
+#if __FAST_MATH__ && !__CUDA_PREC_DIV
+  return __nv_fast_fdividef(__a, __b);
+#else
+  return __a / __b;
+#endif
+}
+__DEVICE__ double floor(double __f) { return __nv_floor(__f); }
+__DEVICE__ float floorf(float __f) { return __nv_floorf(__f); }
+__DEVICE__ double fma(double __a, double __b, double __c) {
+  return __nv_fma(__a, __b, __c);
+}
+__DEVICE__ float fmaf(float __a, float __b, float __c) {
+  return __nv_fmaf(__a, __b, __c);
+}
+__DEVICE__ double fmax(double __a, double __b) { return __nv_fmax(__a, __b); }
+__DEVICE__ float fmaxf(float __a, float __b) { return __nv_fmaxf(__a, __b); }
+__DEVICE__ double fmin(double __a, double __b) { return __nv_fmin(__a, __b); }
+__DEVICE__ float fminf(float __a, float __b) { return __nv_fminf(__a, __b); }
+__DEVICE__ double fmod(double __a, double __b) { return __nv_fmod(__a, __b); }
+__DEVICE__ float fmodf(float __a, float __b) { return __nv_fmodf(__a, __b); }
+__DEVICE__ double frexp(double __a, int *__b) { return __nv_frexp(__a, __b); }
+__DEVICE__ float frexpf(float __a, int *__b) { return __nv_frexpf(__a, __b); }
+__DEVICE__ double hypot(double __a, double __b) { return __nv_hypot(__a, __b); }
+__DEVICE__ float hypotf(float __a, float __b) { return __nv_hypotf(__a, __b); }
+__DEVICE__ int ilogb(double __a) { return __nv_ilogb(__a); }
+__DEVICE__ int ilogbf(float __a) { return __nv_ilogbf(__a); }
+__DEVICE__ double j0(double __a) { return __nv_j0(__a); }
+__DEVICE__ float j0f(float __a) { return __nv_j0f(__a); }
+__DEVICE__ double j1(double __a) { return __nv_j1(__a); }
+__DEVICE__ float j1f(float __a) { return __nv_j1f(__a); }
+__DEVICE__ double jn(int __n, double __a) { return __nv_jn(__n, __a); }
+__DEVICE__ float jnf(int __n, float __a) { return __nv_jnf(__n, __a); }
+#if defined(__LP64__) || defined(_WIN64)
+__DEVICE__ long labs(long __a) { return __nv_llabs(__a); };
+#else
+__DEVICE__ long labs(long __a) { return __nv_abs(__a); };
+#endif
+__DEVICE__ double ldexp(double __a, int __b) { return __nv_ldexp(__a, __b); }
+__DEVICE__ float ldexpf(float __a, int __b) { return __nv_ldexpf(__a, __b); }
+__DEVICE__ double lgamma(double __a) { return __nv_lgamma(__a); }
+__DEVICE__ float lgammaf(float __a) { return __nv_lgammaf(__a); }
+__DEVICE__ long long llabs(long long __a) { return __nv_llabs(__a); }
+__DEVICE__ long long llmax(long long __a, long long __b) {
+  return __nv_llmax(__a, __b);
+}
+__DEVICE__ long long llmin(long long __a, long long __b) {
+  return __nv_llmin(__a, __b);
+}
+__DEVICE__ long long llrint(double __a) { return __nv_llrint(__a); }
+__DEVICE__ long long llrintf(float __a) { return __nv_llrintf(__a); }
+__DEVICE__ long long llround(double __a) { return __nv_llround(__a); }
+__DEVICE__ long long llroundf(float __a) { return __nv_llroundf(__a); }
+__DEVICE__ double log(double __a) { return __nv_log(__a); }
+__DEVICE__ double log10(double __a) { return __nv_log10(__a); }
+__DEVICE__ float log10f(float __a) { return __nv_log10f(__a); }
+__DEVICE__ double log1p(double __a) { return __nv_log1p(__a); }
+__DEVICE__ float log1pf(float __a) { return __nv_log1pf(__a); }
+__DEVICE__ double log2(double __a) { return __nv_log2(__a); }
+__DEVICE__ float log2f(float __a) {
+  return __FAST_OR_SLOW(__nv_fast_log2f, __nv_log2f)(__a);
+}
+__DEVICE__ double logb(double __a) { return __nv_logb(__a); }
+__DEVICE__ float logbf(float __a) { return __nv_logbf(__a); }
+__DEVICE__ float logf(float __a) {
+  return __FAST_OR_SLOW(__nv_fast_logf, __nv_logf)(__a);
+}
+#if defined(__LP64__) || defined(_WIN64)
+__DEVICE__ long lrint(double __a) { return llrint(__a); }
+__DEVICE__ long lrintf(float __a) { return __float2ll_rn(__a); }
+__DEVICE__ long lround(double __a) { return llround(__a); }
+__DEVICE__ long lroundf(float __a) { return llroundf(__a); }
+#else
+__DEVICE__ long lrint(double __a) { return (long)rint(__a); }
+__DEVICE__ long lrintf(float __a) { return __float2int_rn(__a); }
+__DEVICE__ long lround(double __a) { return round(__a); }
+__DEVICE__ long lroundf(float __a) { return roundf(__a); }
+#endif
+__DEVICE__ int max(int __a, int __b) { return __nv_max(__a, __b); }
+__DEVICE__ int min(int __a, int __b) { return __nv_min(__a, __b); }
+__DEVICE__ double modf(double __a, double *__b) { return __nv_modf(__a, __b); }
+__DEVICE__ float modff(float __a, float *__b) { return __nv_modff(__a, __b); }
+__DEVICE__ double nearbyint(double __a) { return __nv_nearbyint(__a); }
+__DEVICE__ float nearbyintf(float __a) { return __nv_nearbyintf(__a); }
+__DEVICE__ double nextafter(double __a, double __b) {
+  return __nv_nextafter(__a, __b);
+}
+__DEVICE__ float nextafterf(float __a, float __b) {
+  return __nv_nextafterf(__a, __b);
+}
+__DEVICE__ double norm(int __dim, const double *__t) {
+  return __nv_norm(__dim, __t);
+}
+__DEVICE__ double norm3d(double __a, double __b, double __c) {
+  return __nv_norm3d(__a, __b, __c);
+}
+__DEVICE__ float norm3df(float __a, float __b, float __c) {
+  return __nv_norm3df(__a, __b, __c);
+}
+__DEVICE__ double norm4d(double __a, double __b, double __c, double __d) {
+  return __nv_norm4d(__a, __b, __c, __d);
+}
+__DEVICE__ float norm4df(float __a, float __b, float __c, float __d) {
+  return __nv_norm4df(__a, __b, __c, __d);
+}
+__DEVICE__ double normcdf(double __a) { return __nv_normcdf(__a); }
+__DEVICE__ float normcdff(float __a) { return __nv_normcdff(__a); }
+__DEVICE__ double normcdfinv(double __a) { return __nv_normcdfinv(__a); }
+__DEVICE__ float normcdfinvf(float __a) { return __nv_normcdfinvf(__a); }
+__DEVICE__ float normf(int __dim, const float *__t) {
+  return __nv_normf(__dim, __t);
+}
+__DEVICE__ double pow(double __a, double __b) { return __nv_pow(__a, __b); }
+__DEVICE__ float powf(float __a, float __b) { return __nv_powf(__a, __b); }
+__DEVICE__ double powi(double __a, int __b) { return __nv_powi(__a, __b); }
+__DEVICE__ float powif(float __a, int __b) { return __nv_powif(__a, __b); }
+__DEVICE__ double rcbrt(double __a) { return __nv_rcbrt(__a); }
+__DEVICE__ float rcbrtf(float __a) { return __nv_rcbrtf(__a); }
+__DEVICE__ double remainder(double __a, double __b) {
+  return __nv_remainder(__a, __b);
+}
+__DEVICE__ float remainderf(float __a, float __b) {
+  return __nv_remainderf(__a, __b);
+}
+__DEVICE__ double remquo(double __a, double __b, int *__c) {
+  return __nv_remquo(__a, __b, __c);
+}
+__DEVICE__ float remquof(float __a, float __b, int *__c) {
+  return __nv_remquof(__a, __b, __c);
+}
+__DEVICE__ double rhypot(double __a, double __b) {
+  return __nv_rhypot(__a, __b);
+}
+__DEVICE__ float rhypotf(float __a, float __b) {
+  return __nv_rhypotf(__a, __b);
+}
+__DEVICE__ double rint(double __a) { return __nv_rint(__a); }
+__DEVICE__ float rintf(float __a) { return __nv_rintf(__a); }
+__DEVICE__ double rnorm(int __a, const double *__b) {
+  return __nv_rnorm(__a, __b);
+}
+__DEVICE__ double rnorm3d(double __a, double __b, double __c) {
+  return __nv_rnorm3d(__a, __b, __c);
+}
+__DEVICE__ float rnorm3df(float __a, float __b, float __c) {
+  return __nv_rnorm3df(__a, __b, __c);
+}
+__DEVICE__ double rnorm4d(double __a, double __b, double __c, double __d) {
+  return __nv_rnorm4d(__a, __b, __c, __d);
+}
+__DEVICE__ float rnorm4df(float __a, float __b, float __c, float __d) {
+  return __nv_rnorm4df(__a, __b, __c, __d);
+}
+__DEVICE__ float rnormf(int __dim, const float *__t) {
+  return __nv_rnormf(__dim, __t);
+}
+__DEVICE__ double round(double __a) { return __nv_round(__a); }
+__DEVICE__ float roundf(float __a) { return __nv_roundf(__a); }
+__DEVICE__ double rsqrt(double __a) { return __nv_rsqrt(__a); }
+__DEVICE__ float rsqrtf(float __a) { return __nv_rsqrtf(__a); }
+__DEVICE__ double scalbn(double __a, int __b) { return __nv_scalbn(__a, __b); }
+__DEVICE__ float scalbnf(float __a, int __b) { return __nv_scalbnf(__a, __b); }
+__DEVICE__ double scalbln(double __a, long __b) {
+  if (__b > INT_MAX)
+    return __a > 0 ? HUGE_VAL : -HUGE_VAL;
+  if (__b < INT_MIN)
+    return __a > 0 ? 0.0 : -0.0;
+  return scalbn(__a, (int)__b);
+}
+__DEVICE__ float scalblnf(float __a, long __b) {
+  if (__b > INT_MAX)
+    return __a > 0 ? HUGE_VALF : -HUGE_VALF;
+  if (__b < INT_MIN)
+    return __a > 0 ? 0.f : -0.f;
+  return scalbnf(__a, (int)__b);
+}
+__DEVICE__ double sin(double __a) { return __nv_sin(__a); }
+__DEVICE_VOID__ void sincos(double __a, double *__s, double *__c) {
+  return __nv_sincos(__a, __s, __c);
+}
+__DEVICE_VOID__ void sincosf(float __a, float *__s, float *__c) {
+  return __FAST_OR_SLOW(__nv_fast_sincosf, __nv_sincosf)(__a, __s, __c);
+}
+__DEVICE_VOID__ void sincospi(double __a, double *__s, double *__c) {
+  return __nv_sincospi(__a, __s, __c);
+}
+__DEVICE_VOID__ void sincospif(float __a, float *__s, float *__c) {
+  return __nv_sincospif(__a, __s, __c);
+}
+__DEVICE__ float sinf(float __a) {
+  return __FAST_OR_SLOW(__nv_fast_sinf, __nv_sinf)(__a);
+}
+__DEVICE__ double sinh(double __a) { return __nv_sinh(__a); }
+__DEVICE__ float sinhf(float __a) { return __nv_sinhf(__a); }
+__DEVICE__ double sinpi(double __a) { return __nv_sinpi(__a); }
+__DEVICE__ float sinpif(float __a) { return __nv_sinpif(__a); }
+__DEVICE__ double sqrt(double __a) { return __nv_sqrt(__a); }
+__DEVICE__ float sqrtf(float __a) { return __nv_sqrtf(__a); }
+__DEVICE__ double tan(double __a) { return __nv_tan(__a); }
+__DEVICE__ float tanf(float __a) { return __nv_tanf(__a); }
+__DEVICE__ double tanh(double __a) { return __nv_tanh(__a); }
+__DEVICE__ float tanhf(float __a) { return __nv_tanhf(__a); }
+__DEVICE__ double tgamma(double __a) { return __nv_tgamma(__a); }
+__DEVICE__ float tgammaf(float __a) { return __nv_tgammaf(__a); }
+__DEVICE__ double trunc(double __a) { return __nv_trunc(__a); }
+__DEVICE__ float truncf(float __a) { return __nv_truncf(__a); }
+__DEVICE__ unsigned long long ullmax(unsigned long long __a,
+                                     unsigned long long __b) {
+  return __nv_ullmax(__a, __b);
+}
+__DEVICE__ unsigned long long ullmin(unsigned long long __a,
+                                     unsigned long long __b) {
+  return __nv_ullmin(__a, __b);
+}
+__DEVICE__ unsigned int umax(unsigned int __a, unsigned int __b) {
+  return __nv_umax(__a, __b);
+}
+__DEVICE__ unsigned int umin(unsigned int __a, unsigned int __b) {
+  return __nv_umin(__a, __b);
+}
+__DEVICE__ double y0(double __a) { return __nv_y0(__a); }
+__DEVICE__ float y0f(float __a) { return __nv_y0f(__a); }
+__DEVICE__ double y1(double __a) { return __nv_y1(__a); }
+__DEVICE__ float y1f(float __a) { return __nv_y1f(__a); }
+__DEVICE__ double yn(int __a, double __b) { return __nv_yn(__a, __b); }
+__DEVICE__ float ynf(int __a, float __b) { return __nv_ynf(__a, __b); }
+
+#pragma pop_macro("__DEVICE__")
+#pragma pop_macro("__DEVICE_VOID__")
+#pragma pop_macro("__FAST_OR_SLOW")
+
+#endif // __CLANG_CUDA_DEVICE_FUNCTIONS_H__
diff --git a/darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_math_forward_declares.h b/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_math_forward_declares.h
similarity index 87%
copy from darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_math_forward_declares.h
copy to linux-x86/lib64/clang/11.0.5/include/__clang_cuda_math_forward_declares.h
index 0afe4db..8a27085 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_math_forward_declares.h
+++ b/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_math_forward_declares.h
@@ -8,8 +8,8 @@
  */
 #ifndef __CLANG__CUDA_MATH_FORWARD_DECLARES_H__
 #define __CLANG__CUDA_MATH_FORWARD_DECLARES_H__
-#ifndef __CUDA__
-#error "This file is for CUDA compilation only."
+#if !defined(__CUDA__) && !__HIP__
+#error "This file is for CUDA/HIP compilation only."
 #endif
 
 // This file forward-declares of some math functions we (or the CUDA headers)
@@ -20,37 +20,14 @@
 // would preclude the use of our own __device__ overloads for these functions.
 
 #pragma push_macro("__DEVICE__")
-#ifdef _OPENMP
-#define __DEVICE__ static __inline__ __attribute__((always_inline))
-#else
 #define __DEVICE__                                                             \
   static __inline__ __attribute__((always_inline)) __attribute__((device))
-#endif
 
-// For C++ 17 we need to include noexcept attribute to be compatible
-// with the header-defined version. This may be removed once
-// variant is supported.
-#if defined(_OPENMP) && defined(__cplusplus) && __cplusplus >= 201703L
-#define __NOEXCEPT noexcept
-#else
-#define __NOEXCEPT
-#endif
-
-#if !(defined(_OPENMP) && defined(__cplusplus))
 __DEVICE__ long abs(long);
 __DEVICE__ long long abs(long long);
 __DEVICE__ double abs(double);
 __DEVICE__ float abs(float);
-#endif
-// While providing the CUDA declarations and definitions for math functions,
-// we may manually define additional functions.
-// TODO: Once variant is supported the additional functions will have
-// to be removed.
-#if defined(_OPENMP) && defined(__cplusplus)
-__DEVICE__ const double abs(const double);
-__DEVICE__ const float abs(const float);
-#endif
-__DEVICE__ int abs(int) __NOEXCEPT;
+__DEVICE__ int abs(int);
 __DEVICE__ double acos(double);
 __DEVICE__ float acos(float);
 __DEVICE__ double acosh(double);
@@ -85,8 +62,8 @@
 __DEVICE__ float exp(float);
 __DEVICE__ double expm1(double);
 __DEVICE__ float expm1(float);
-__DEVICE__ double fabs(double) __NOEXCEPT;
-__DEVICE__ float fabs(float) __NOEXCEPT;
+__DEVICE__ double fabs(double);
+__DEVICE__ float fabs(float);
 __DEVICE__ double fdim(double, double);
 __DEVICE__ float fdim(float, float);
 __DEVICE__ double floor(double);
@@ -136,12 +113,12 @@
 __DEVICE__ bool isnormal(float);
 __DEVICE__ bool isunordered(double, double);
 __DEVICE__ bool isunordered(float, float);
-__DEVICE__ long labs(long) __NOEXCEPT;
+__DEVICE__ long labs(long);
 __DEVICE__ double ldexp(double, int);
 __DEVICE__ float ldexp(float, int);
 __DEVICE__ double lgamma(double);
 __DEVICE__ float lgamma(float);
-__DEVICE__ long long llabs(long long) __NOEXCEPT;
+__DEVICE__ long long llabs(long long);
 __DEVICE__ long long llrint(double);
 __DEVICE__ long long llrint(float);
 __DEVICE__ double log10(double);
@@ -152,9 +129,6 @@
 __DEVICE__ float log2(float);
 __DEVICE__ double logb(double);
 __DEVICE__ float logb(float);
-#if defined(_OPENMP) && defined(__cplusplus)
-__DEVICE__ long double log(long double);
-#endif
 __DEVICE__ double log(double);
 __DEVICE__ float log(float);
 __DEVICE__ long lrint(double);
@@ -302,7 +276,6 @@
 } // namespace std
 #endif
 
-#undef __NOEXCEPT
 #pragma pop_macro("__DEVICE__")
 
 #endif
diff --git a/darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_runtime_wrapper.h b/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_runtime_wrapper.h
similarity index 96%
copy from darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_runtime_wrapper.h
copy to linux-x86/lib64/clang/11.0.5/include/__clang_cuda_runtime_wrapper.h
index e91de3c..f43ed55 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/__clang_cuda_runtime_wrapper.h
+++ b/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_runtime_wrapper.h
@@ -31,11 +31,17 @@
 // Include some forward declares that must come before cmath.
 #include <__clang_cuda_math_forward_declares.h>
 
+// Define __CUDACC__ early as libstdc++ standard headers with GNU extensions
+// enabled depend on it to avoid using __float128, which is unsupported in
+// CUDA.
+#define __CUDACC__
+
 // Include some standard headers to avoid CUDA headers including them
 // while some required macros (like __THROW) are in a weird state.
 #include <cmath>
 #include <cstdlib>
 #include <stdlib.h>
+#undef __CUDACC__
 
 // Preserve common macros that will be changed below by us or by CUDA
 // headers.
@@ -83,13 +89,15 @@
 #if CUDA_VERSION < 9000
 #define __CUDABE__
 #else
+#define __CUDACC__
 #define __CUDA_LIBDEVICE__
 #endif
 // Disables definitions of device-side runtime support stubs in
 // cuda_device_runtime_api.h
+#include "host_defines.h"
+#undef __CUDACC__
 #include "driver_types.h"
 #include "host_config.h"
-#include "host_defines.h"
 
 // Temporarily replace "nv_weak" with weak, so __attribute__((nv_weak)) in
 // cuda_device_runtime_api.h ends up being __attribute__((weak)) which is the
@@ -141,11 +149,12 @@
 // to provide our own.
 #include <__clang_cuda_libdevice_declares.h>
 
-// Wrappers for many device-side standard library functions became compiler
-// builtins in CUDA-9 and have been removed from the CUDA headers. Clang now
-// provides its own implementation of the wrappers.
+// Wrappers for many device-side standard library functions, incl. math
+// functions, became compiler builtins in CUDA-9 and have been removed from the
+// CUDA headers. Clang now provides its own implementation of the wrappers.
 #if CUDA_VERSION >= 9000
 #include <__clang_cuda_device_functions.h>
+#include <__clang_cuda_math.h>
 #endif
 
 // __THROW is redefined to be empty by device_functions_decls.h in CUDA. Clang's
diff --git a/linux-x86/lib64/clang/11.0.5/include/__clang_hip_libdevice_declares.h b/linux-x86/lib64/clang/11.0.5/include/__clang_hip_libdevice_declares.h
new file mode 100644
index 0000000..e1cd49a
--- /dev/null
+++ b/linux-x86/lib64/clang/11.0.5/include/__clang_hip_libdevice_declares.h
@@ -0,0 +1,326 @@
+/*===---- __clang_hip_libdevice_declares.h - HIP device library decls -------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_HIP_LIBDEVICE_DECLARES_H__
+#define __CLANG_HIP_LIBDEVICE_DECLARES_H__
+
+extern "C" {
+
+// BEGIN FLOAT
+__device__ __attribute__((const)) float __ocml_acos_f32(float);
+__device__ __attribute__((pure)) float __ocml_acosh_f32(float);
+__device__ __attribute__((const)) float __ocml_asin_f32(float);
+__device__ __attribute__((pure)) float __ocml_asinh_f32(float);
+__device__ __attribute__((const)) float __ocml_atan2_f32(float, float);
+__device__ __attribute__((const)) float __ocml_atan_f32(float);
+__device__ __attribute__((pure)) float __ocml_atanh_f32(float);
+__device__ __attribute__((pure)) float __ocml_cbrt_f32(float);
+__device__ __attribute__((const)) float __ocml_ceil_f32(float);
+__device__ __attribute__((const)) __device__ float __ocml_copysign_f32(float,
+                                                                       float);
+__device__ float __ocml_cos_f32(float);
+__device__ float __ocml_native_cos_f32(float);
+__device__ __attribute__((pure)) __device__ float __ocml_cosh_f32(float);
+__device__ float __ocml_cospi_f32(float);
+__device__ float __ocml_i0_f32(float);
+__device__ float __ocml_i1_f32(float);
+__device__ __attribute__((pure)) float __ocml_erfc_f32(float);
+__device__ __attribute__((pure)) float __ocml_erfcinv_f32(float);
+__device__ __attribute__((pure)) float __ocml_erfcx_f32(float);
+__device__ __attribute__((pure)) float __ocml_erf_f32(float);
+__device__ __attribute__((pure)) float __ocml_erfinv_f32(float);
+__device__ __attribute__((pure)) float __ocml_exp10_f32(float);
+__device__ __attribute__((pure)) float __ocml_native_exp10_f32(float);
+__device__ __attribute__((pure)) float __ocml_exp2_f32(float);
+__device__ __attribute__((pure)) float __ocml_exp_f32(float);
+__device__ __attribute__((pure)) float __ocml_native_exp_f32(float);
+__device__ __attribute__((pure)) float __ocml_expm1_f32(float);
+__device__ __attribute__((const)) float __ocml_fabs_f32(float);
+__device__ __attribute__((const)) float __ocml_fdim_f32(float, float);
+__device__ __attribute__((const)) float __ocml_floor_f32(float);
+__device__ __attribute__((const)) float __ocml_fma_f32(float, float, float);
+__device__ __attribute__((const)) float __ocml_fmax_f32(float, float);
+__device__ __attribute__((const)) float __ocml_fmin_f32(float, float);
+__device__ __attribute__((const)) __device__ float __ocml_fmod_f32(float,
+                                                                   float);
+__device__ float __ocml_frexp_f32(float,
+                                  __attribute__((address_space(5))) int *);
+__device__ __attribute__((const)) float __ocml_hypot_f32(float, float);
+__device__ __attribute__((const)) int __ocml_ilogb_f32(float);
+__device__ __attribute__((const)) int __ocml_isfinite_f32(float);
+__device__ __attribute__((const)) int __ocml_isinf_f32(float);
+__device__ __attribute__((const)) int __ocml_isnan_f32(float);
+__device__ float __ocml_j0_f32(float);
+__device__ float __ocml_j1_f32(float);
+__device__ __attribute__((const)) float __ocml_ldexp_f32(float, int);
+__device__ float __ocml_lgamma_f32(float);
+__device__ __attribute__((pure)) float __ocml_log10_f32(float);
+__device__ __attribute__((pure)) float __ocml_native_log10_f32(float);
+__device__ __attribute__((pure)) float __ocml_log1p_f32(float);
+__device__ __attribute__((pure)) float __ocml_log2_f32(float);
+__device__ __attribute__((pure)) float __ocml_native_log2_f32(float);
+__device__ __attribute__((const)) float __ocml_logb_f32(float);
+__device__ __attribute__((pure)) float __ocml_log_f32(float);
+__device__ __attribute__((pure)) float __ocml_native_log_f32(float);
+__device__ float __ocml_modf_f32(float,
+                                 __attribute__((address_space(5))) float *);
+__device__ __attribute__((const)) float __ocml_nearbyint_f32(float);
+__device__ __attribute__((const)) float __ocml_nextafter_f32(float, float);
+__device__ __attribute__((const)) float __ocml_len3_f32(float, float, float);
+__device__ __attribute__((const)) float __ocml_len4_f32(float, float, float,
+                                                        float);
+__device__ __attribute__((pure)) float __ocml_ncdf_f32(float);
+__device__ __attribute__((pure)) float __ocml_ncdfinv_f32(float);
+__device__ __attribute__((pure)) float __ocml_pow_f32(float, float);
+__device__ __attribute__((pure)) float __ocml_rcbrt_f32(float);
+__device__ __attribute__((const)) float __ocml_remainder_f32(float, float);
+__device__ float __ocml_remquo_f32(float, float,
+                                   __attribute__((address_space(5))) int *);
+__device__ __attribute__((const)) float __ocml_rhypot_f32(float, float);
+__device__ __attribute__((const)) float __ocml_rint_f32(float);
+__device__ __attribute__((const)) float __ocml_rlen3_f32(float, float, float);
+__device__ __attribute__((const)) float __ocml_rlen4_f32(float, float, float,
+                                                         float);
+__device__ __attribute__((const)) float __ocml_round_f32(float);
+__device__ __attribute__((pure)) float __ocml_rsqrt_f32(float);
+__device__ __attribute__((const)) float __ocml_scalb_f32(float, float);
+__device__ __attribute__((const)) float __ocml_scalbn_f32(float, int);
+__device__ __attribute__((const)) int __ocml_signbit_f32(float);
+__device__ float __ocml_sincos_f32(float,
+                                   __attribute__((address_space(5))) float *);
+__device__ float __ocml_sincospi_f32(float,
+                                     __attribute__((address_space(5))) float *);
+__device__ float __ocml_sin_f32(float);
+__device__ float __ocml_native_sin_f32(float);
+__device__ __attribute__((pure)) float __ocml_sinh_f32(float);
+__device__ float __ocml_sinpi_f32(float);
+__device__ __attribute__((const)) float __ocml_sqrt_f32(float);
+__device__ __attribute__((const)) float __ocml_native_sqrt_f32(float);
+__device__ float __ocml_tan_f32(float);
+__device__ __attribute__((pure)) float __ocml_tanh_f32(float);
+__device__ float __ocml_tgamma_f32(float);
+__device__ __attribute__((const)) float __ocml_trunc_f32(float);
+__device__ float __ocml_y0_f32(float);
+__device__ float __ocml_y1_f32(float);
+
+// BEGIN INTRINSICS
+__device__ __attribute__((const)) float __ocml_add_rte_f32(float, float);
+__device__ __attribute__((const)) float __ocml_add_rtn_f32(float, float);
+__device__ __attribute__((const)) float __ocml_add_rtp_f32(float, float);
+__device__ __attribute__((const)) float __ocml_add_rtz_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sub_rte_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sub_rtn_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sub_rtp_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sub_rtz_f32(float, float);
+__device__ __attribute__((const)) float __ocml_mul_rte_f32(float, float);
+__device__ __attribute__((const)) float __ocml_mul_rtn_f32(float, float);
+__device__ __attribute__((const)) float __ocml_mul_rtp_f32(float, float);
+__device__ __attribute__((const)) float __ocml_mul_rtz_f32(float, float);
+__device__ __attribute__((const)) float __ocml_div_rte_f32(float, float);
+__device__ __attribute__((const)) float __ocml_div_rtn_f32(float, float);
+__device__ __attribute__((const)) float __ocml_div_rtp_f32(float, float);
+__device__ __attribute__((const)) float __ocml_div_rtz_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sqrt_rte_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sqrt_rtn_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sqrt_rtp_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sqrt_rtz_f32(float, float);
+__device__ __attribute__((const)) float __ocml_fma_rte_f32(float, float, float);
+__device__ __attribute__((const)) float __ocml_fma_rtn_f32(float, float, float);
+__device__ __attribute__((const)) float __ocml_fma_rtp_f32(float, float, float);
+__device__ __attribute__((const)) float __ocml_fma_rtz_f32(float, float, float);
+
+__device__ __attribute__((const)) float
+__llvm_amdgcn_cos_f32(float) __asm("llvm.amdgcn.cos.f32");
+__device__ __attribute__((const)) float
+__llvm_amdgcn_rcp_f32(float) __asm("llvm.amdgcn.rcp.f32");
+__device__ __attribute__((const)) float
+__llvm_amdgcn_rsq_f32(float) __asm("llvm.amdgcn.rsq.f32");
+__device__ __attribute__((const)) float
+__llvm_amdgcn_sin_f32(float) __asm("llvm.amdgcn.sin.f32");
+// END INTRINSICS
+// END FLOAT
+
+// BEGIN DOUBLE
+__device__ __attribute__((const)) double __ocml_acos_f64(double);
+__device__ __attribute__((pure)) double __ocml_acosh_f64(double);
+__device__ __attribute__((const)) double __ocml_asin_f64(double);
+__device__ __attribute__((pure)) double __ocml_asinh_f64(double);
+__device__ __attribute__((const)) double __ocml_atan2_f64(double, double);
+__device__ __attribute__((const)) double __ocml_atan_f64(double);
+__device__ __attribute__((pure)) double __ocml_atanh_f64(double);
+__device__ __attribute__((pure)) double __ocml_cbrt_f64(double);
+__device__ __attribute__((const)) double __ocml_ceil_f64(double);
+__device__ __attribute__((const)) double __ocml_copysign_f64(double, double);
+__device__ double __ocml_cos_f64(double);
+__device__ __attribute__((pure)) double __ocml_cosh_f64(double);
+__device__ double __ocml_cospi_f64(double);
+__device__ double __ocml_i0_f64(double);
+__device__ double __ocml_i1_f64(double);
+__device__ __attribute__((pure)) double __ocml_erfc_f64(double);
+__device__ __attribute__((pure)) double __ocml_erfcinv_f64(double);
+__device__ __attribute__((pure)) double __ocml_erfcx_f64(double);
+__device__ __attribute__((pure)) double __ocml_erf_f64(double);
+__device__ __attribute__((pure)) double __ocml_erfinv_f64(double);
+__device__ __attribute__((pure)) double __ocml_exp10_f64(double);
+__device__ __attribute__((pure)) double __ocml_exp2_f64(double);
+__device__ __attribute__((pure)) double __ocml_exp_f64(double);
+__device__ __attribute__((pure)) double __ocml_expm1_f64(double);
+__device__ __attribute__((const)) double __ocml_fabs_f64(double);
+__device__ __attribute__((const)) double __ocml_fdim_f64(double, double);
+__device__ __attribute__((const)) double __ocml_floor_f64(double);
+__device__ __attribute__((const)) double __ocml_fma_f64(double, double, double);
+__device__ __attribute__((const)) double __ocml_fmax_f64(double, double);
+__device__ __attribute__((const)) double __ocml_fmin_f64(double, double);
+__device__ __attribute__((const)) double __ocml_fmod_f64(double, double);
+__device__ double __ocml_frexp_f64(double,
+                                   __attribute__((address_space(5))) int *);
+__device__ __attribute__((const)) double __ocml_hypot_f64(double, double);
+__device__ __attribute__((const)) int __ocml_ilogb_f64(double);
+__device__ __attribute__((const)) int __ocml_isfinite_f64(double);
+__device__ __attribute__((const)) int __ocml_isinf_f64(double);
+__device__ __attribute__((const)) int __ocml_isnan_f64(double);
+__device__ double __ocml_j0_f64(double);
+__device__ double __ocml_j1_f64(double);
+__device__ __attribute__((const)) double __ocml_ldexp_f64(double, int);
+__device__ double __ocml_lgamma_f64(double);
+__device__ __attribute__((pure)) double __ocml_log10_f64(double);
+__device__ __attribute__((pure)) double __ocml_log1p_f64(double);
+__device__ __attribute__((pure)) double __ocml_log2_f64(double);
+__device__ __attribute__((const)) double __ocml_logb_f64(double);
+__device__ __attribute__((pure)) double __ocml_log_f64(double);
+__device__ double __ocml_modf_f64(double,
+                                  __attribute__((address_space(5))) double *);
+__device__ __attribute__((const)) double __ocml_nearbyint_f64(double);
+__device__ __attribute__((const)) double __ocml_nextafter_f64(double, double);
+__device__ __attribute__((const)) double __ocml_len3_f64(double, double,
+                                                         double);
+__device__ __attribute__((const)) double __ocml_len4_f64(double, double, double,
+                                                         double);
+__device__ __attribute__((pure)) double __ocml_ncdf_f64(double);
+__device__ __attribute__((pure)) double __ocml_ncdfinv_f64(double);
+__device__ __attribute__((pure)) double __ocml_pow_f64(double, double);
+__device__ __attribute__((pure)) double __ocml_rcbrt_f64(double);
+__device__ __attribute__((const)) double __ocml_remainder_f64(double, double);
+__device__ double __ocml_remquo_f64(double, double,
+                                    __attribute__((address_space(5))) int *);
+__device__ __attribute__((const)) double __ocml_rhypot_f64(double, double);
+__device__ __attribute__((const)) double __ocml_rint_f64(double);
+__device__ __attribute__((const)) double __ocml_rlen3_f64(double, double,
+                                                          double);
+__device__ __attribute__((const)) double __ocml_rlen4_f64(double, double,
+                                                          double, double);
+__device__ __attribute__((const)) double __ocml_round_f64(double);
+__device__ __attribute__((pure)) double __ocml_rsqrt_f64(double);
+__device__ __attribute__((const)) double __ocml_scalb_f64(double, double);
+__device__ __attribute__((const)) double __ocml_scalbn_f64(double, int);
+__device__ __attribute__((const)) int __ocml_signbit_f64(double);
+__device__ double __ocml_sincos_f64(double,
+                                    __attribute__((address_space(5))) double *);
+__device__ double
+__ocml_sincospi_f64(double, __attribute__((address_space(5))) double *);
+__device__ double __ocml_sin_f64(double);
+__device__ __attribute__((pure)) double __ocml_sinh_f64(double);
+__device__ double __ocml_sinpi_f64(double);
+__device__ __attribute__((const)) double __ocml_sqrt_f64(double);
+__device__ double __ocml_tan_f64(double);
+__device__ __attribute__((pure)) double __ocml_tanh_f64(double);
+__device__ double __ocml_tgamma_f64(double);
+__device__ __attribute__((const)) double __ocml_trunc_f64(double);
+__device__ double __ocml_y0_f64(double);
+__device__ double __ocml_y1_f64(double);
+
+// BEGIN INTRINSICS
+__device__ __attribute__((const)) double __ocml_add_rte_f64(double, double);
+__device__ __attribute__((const)) double __ocml_add_rtn_f64(double, double);
+__device__ __attribute__((const)) double __ocml_add_rtp_f64(double, double);
+__device__ __attribute__((const)) double __ocml_add_rtz_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sub_rte_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sub_rtn_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sub_rtp_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sub_rtz_f64(double, double);
+__device__ __attribute__((const)) double __ocml_mul_rte_f64(double, double);
+__device__ __attribute__((const)) double __ocml_mul_rtn_f64(double, double);
+__device__ __attribute__((const)) double __ocml_mul_rtp_f64(double, double);
+__device__ __attribute__((const)) double __ocml_mul_rtz_f64(double, double);
+__device__ __attribute__((const)) double __ocml_div_rte_f64(double, double);
+__device__ __attribute__((const)) double __ocml_div_rtn_f64(double, double);
+__device__ __attribute__((const)) double __ocml_div_rtp_f64(double, double);
+__device__ __attribute__((const)) double __ocml_div_rtz_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sqrt_rte_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sqrt_rtn_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sqrt_rtp_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sqrt_rtz_f64(double, double);
+__device__ __attribute__((const)) double __ocml_fma_rte_f64(double, double,
+                                                            double);
+__device__ __attribute__((const)) double __ocml_fma_rtn_f64(double, double,
+                                                            double);
+__device__ __attribute__((const)) double __ocml_fma_rtp_f64(double, double,
+                                                            double);
+__device__ __attribute__((const)) double __ocml_fma_rtz_f64(double, double,
+                                                            double);
+
+__device__ __attribute__((const)) double
+__llvm_amdgcn_rcp_f64(double) __asm("llvm.amdgcn.rcp.f64");
+__device__ __attribute__((const)) double
+__llvm_amdgcn_rsq_f64(double) __asm("llvm.amdgcn.rsq.f64");
+
+__device__ __attribute__((const)) _Float16 __ocml_ceil_f16(_Float16);
+__device__ _Float16 __ocml_cos_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_exp_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_exp10_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_exp2_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_floor_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_fma_f16(_Float16, _Float16,
+                                                          _Float16);
+__device__ __attribute__((const)) _Float16 __ocml_fabs_f16(_Float16);
+__device__ __attribute__((const)) int __ocml_isinf_f16(_Float16);
+__device__ __attribute__((const)) int __ocml_isnan_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_log_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_log10_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_log2_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __llvm_amdgcn_rcp_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_rint_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_rsqrt_f16(_Float16);
+__device__ _Float16 __ocml_sin_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_sqrt_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_trunc_f16(_Float16);
+
+typedef _Float16 __2f16 __attribute__((ext_vector_type(2)));
+typedef short __2i16 __attribute__((ext_vector_type(2)));
+
+__device__ __attribute__((const)) float __ockl_fdot2(__2f16 a, __2f16 b,
+                                                     float c, bool s);
+__device__ __attribute__((const)) __2f16 __ocml_ceil_2f16(__2f16);
+__device__ __attribute__((const)) __2f16 __ocml_fabs_2f16(__2f16);
+__device__ __2f16 __ocml_cos_2f16(__2f16);
+__device__ __attribute__((pure)) __2f16 __ocml_exp_2f16(__2f16);
+__device__ __attribute__((pure)) __2f16 __ocml_exp10_2f16(__2f16);
+__device__ __attribute__((pure)) __2f16 __ocml_exp2_2f16(__2f16);
+__device__ __attribute__((const)) __2f16 __ocml_floor_2f16(__2f16);
+__device__ __attribute__((const))
+__2f16 __ocml_fma_2f16(__2f16, __2f16, __2f16);
+__device__ __attribute__((const)) __2i16 __ocml_isinf_2f16(__2f16);
+__device__ __attribute__((const)) __2i16 __ocml_isnan_2f16(__2f16);
+__device__ __attribute__((pure)) __2f16 __ocml_log_2f16(__2f16);
+__device__ __attribute__((pure)) __2f16 __ocml_log10_2f16(__2f16);
+__device__ __attribute__((pure)) __2f16 __ocml_log2_2f16(__2f16);
+__device__ inline __2f16
+__llvm_amdgcn_rcp_2f16(__2f16 __x) // Not currently exposed by ROCDL.
+{
+  return __2f16{__llvm_amdgcn_rcp_f16(__x.x), __llvm_amdgcn_rcp_f16(__x.y)};
+}
+__device__ __attribute__((const)) __2f16 __ocml_rint_2f16(__2f16);
+__device__ __attribute__((const)) __2f16 __ocml_rsqrt_2f16(__2f16);
+__device__ __2f16 __ocml_sin_2f16(__2f16);
+__device__ __attribute__((const)) __2f16 __ocml_sqrt_2f16(__2f16);
+__device__ __attribute__((const)) __2f16 __ocml_trunc_2f16(__2f16);
+
+} // extern "C"
+
+#endif // __CLANG_HIP_LIBDEVICE_DECLARES_H__
diff --git a/linux-x86/lib64/clang/11.0.5/include/__clang_hip_math.h b/linux-x86/lib64/clang/11.0.5/include/__clang_hip_math.h
new file mode 100644
index 0000000..cf7014b
--- /dev/null
+++ b/linux-x86/lib64/clang/11.0.5/include/__clang_hip_math.h
@@ -0,0 +1,1185 @@
+/*===---- __clang_hip_math.h - HIP math decls -------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_HIP_MATH_H__
+#define __CLANG_HIP_MATH_H__
+
+#include <algorithm>
+#include <limits.h>
+#include <limits>
+#include <stdint.h>
+
+#pragma push_macro("__DEVICE__")
+#pragma push_macro("__RETURN_TYPE")
+
+// to be consistent with __clang_cuda_math_forward_declares
+#define __DEVICE__ static __device__
+#define __RETURN_TYPE bool
+
+__DEVICE__
+inline uint64_t __make_mantissa_base8(const char *__tagp) {
+  uint64_t __r = 0;
+  while (__tagp) {
+    char __tmp = *__tagp;
+
+    if (__tmp >= '0' && __tmp <= '7')
+      __r = (__r * 8u) + __tmp - '0';
+    else
+      return 0;
+
+    ++__tagp;
+  }
+
+  return __r;
+}
+
+__DEVICE__
+inline uint64_t __make_mantissa_base10(const char *__tagp) {
+  uint64_t __r = 0;
+  while (__tagp) {
+    char __tmp = *__tagp;
+
+    if (__tmp >= '0' && __tmp <= '9')
+      __r = (__r * 10u) + __tmp - '0';
+    else
+      return 0;
+
+    ++__tagp;
+  }
+
+  return __r;
+}
+
+__DEVICE__
+inline uint64_t __make_mantissa_base16(const char *__tagp) {
+  uint64_t __r = 0;
+  while (__tagp) {
+    char __tmp = *__tagp;
+
+    if (__tmp >= '0' && __tmp <= '9')
+      __r = (__r * 16u) + __tmp - '0';
+    else if (__tmp >= 'a' && __tmp <= 'f')
+      __r = (__r * 16u) + __tmp - 'a' + 10;
+    else if (__tmp >= 'A' && __tmp <= 'F')
+      __r = (__r * 16u) + __tmp - 'A' + 10;
+    else
+      return 0;
+
+    ++__tagp;
+  }
+
+  return __r;
+}
+
+__DEVICE__
+inline uint64_t __make_mantissa(const char *__tagp) {
+  if (!__tagp)
+    return 0u;
+
+  if (*__tagp == '0') {
+    ++__tagp;
+
+    if (*__tagp == 'x' || *__tagp == 'X')
+      return __make_mantissa_base16(__tagp);
+    else
+      return __make_mantissa_base8(__tagp);
+  }
+
+  return __make_mantissa_base10(__tagp);
+}
+
+// BEGIN FLOAT
+__DEVICE__
+inline float abs(float __x) { return __ocml_fabs_f32(__x); }
+__DEVICE__
+inline float acosf(float __x) { return __ocml_acos_f32(__x); }
+__DEVICE__
+inline float acoshf(float __x) { return __ocml_acosh_f32(__x); }
+__DEVICE__
+inline float asinf(float __x) { return __ocml_asin_f32(__x); }
+__DEVICE__
+inline float asinhf(float __x) { return __ocml_asinh_f32(__x); }
+__DEVICE__
+inline float atan2f(float __x, float __y) { return __ocml_atan2_f32(__x, __y); }
+__DEVICE__
+inline float atanf(float __x) { return __ocml_atan_f32(__x); }
+__DEVICE__
+inline float atanhf(float __x) { return __ocml_atanh_f32(__x); }
+__DEVICE__
+inline float cbrtf(float __x) { return __ocml_cbrt_f32(__x); }
+__DEVICE__
+inline float ceilf(float __x) { return __ocml_ceil_f32(__x); }
+__DEVICE__
+inline float copysignf(float __x, float __y) {
+  return __ocml_copysign_f32(__x, __y);
+}
+__DEVICE__
+inline float cosf(float __x) { return __ocml_cos_f32(__x); }
+__DEVICE__
+inline float coshf(float __x) { return __ocml_cosh_f32(__x); }
+__DEVICE__
+inline float cospif(float __x) { return __ocml_cospi_f32(__x); }
+__DEVICE__
+inline float cyl_bessel_i0f(float __x) { return __ocml_i0_f32(__x); }
+__DEVICE__
+inline float cyl_bessel_i1f(float __x) { return __ocml_i1_f32(__x); }
+__DEVICE__
+inline float erfcf(float __x) { return __ocml_erfc_f32(__x); }
+__DEVICE__
+inline float erfcinvf(float __x) { return __ocml_erfcinv_f32(__x); }
+__DEVICE__
+inline float erfcxf(float __x) { return __ocml_erfcx_f32(__x); }
+__DEVICE__
+inline float erff(float __x) { return __ocml_erf_f32(__x); }
+__DEVICE__
+inline float erfinvf(float __x) { return __ocml_erfinv_f32(__x); }
+__DEVICE__
+inline float exp10f(float __x) { return __ocml_exp10_f32(__x); }
+__DEVICE__
+inline float exp2f(float __x) { return __ocml_exp2_f32(__x); }
+__DEVICE__
+inline float expf(float __x) { return __ocml_exp_f32(__x); }
+__DEVICE__
+inline float expm1f(float __x) { return __ocml_expm1_f32(__x); }
+__DEVICE__
+inline float fabsf(float __x) { return __ocml_fabs_f32(__x); }
+__DEVICE__
+inline float fdimf(float __x, float __y) { return __ocml_fdim_f32(__x, __y); }
+__DEVICE__
+inline float fdividef(float __x, float __y) { return __x / __y; }
+__DEVICE__
+inline float floorf(float __x) { return __ocml_floor_f32(__x); }
+__DEVICE__
+inline float fmaf(float __x, float __y, float __z) {
+  return __ocml_fma_f32(__x, __y, __z);
+}
+__DEVICE__
+inline float fmaxf(float __x, float __y) { return __ocml_fmax_f32(__x, __y); }
+__DEVICE__
+inline float fminf(float __x, float __y) { return __ocml_fmin_f32(__x, __y); }
+__DEVICE__
+inline float fmodf(float __x, float __y) { return __ocml_fmod_f32(__x, __y); }
+__DEVICE__
+inline float frexpf(float __x, int *__nptr) {
+  int __tmp;
+  float __r =
+      __ocml_frexp_f32(__x, (__attribute__((address_space(5))) int *)&__tmp);
+  *__nptr = __tmp;
+
+  return __r;
+}
+__DEVICE__
+inline float hypotf(float __x, float __y) { return __ocml_hypot_f32(__x, __y); }
+__DEVICE__
+inline int ilogbf(float __x) { return __ocml_ilogb_f32(__x); }
+__DEVICE__
+inline __RETURN_TYPE isfinite(float __x) { return __ocml_isfinite_f32(__x); }
+__DEVICE__
+inline __RETURN_TYPE isinf(float __x) { return __ocml_isinf_f32(__x); }
+__DEVICE__
+inline __RETURN_TYPE isnan(float __x) { return __ocml_isnan_f32(__x); }
+__DEVICE__
+inline float j0f(float __x) { return __ocml_j0_f32(__x); }
+__DEVICE__
+inline float j1f(float __x) { return __ocml_j1_f32(__x); }
+__DEVICE__
+inline float jnf(int __n,
+                 float __x) { // TODO: we could use Ahmes multiplication
+                              // and the Miller & Brown algorithm
+  //       for linear recurrences to get O(log n) steps, but it's unclear if
+  //       it'd be beneficial in this case.
+  if (__n == 0)
+    return j0f(__x);
+  if (__n == 1)
+    return j1f(__x);
+
+  float __x0 = j0f(__x);
+  float __x1 = j1f(__x);
+  for (int __i = 1; __i < __n; ++__i) {
+    float __x2 = (2 * __i) / __x * __x1 - __x0;
+    __x0 = __x1;
+    __x1 = __x2;
+  }
+
+  return __x1;
+}
+__DEVICE__
+inline float ldexpf(float __x, int __e) { return __ocml_ldexp_f32(__x, __e); }
+__DEVICE__
+inline float lgammaf(float __x) { return __ocml_lgamma_f32(__x); }
+__DEVICE__
+inline long long int llrintf(float __x) { return __ocml_rint_f32(__x); }
+__DEVICE__
+inline long long int llroundf(float __x) { return __ocml_round_f32(__x); }
+__DEVICE__
+inline float log10f(float __x) { return __ocml_log10_f32(__x); }
+__DEVICE__
+inline float log1pf(float __x) { return __ocml_log1p_f32(__x); }
+__DEVICE__
+inline float log2f(float __x) { return __ocml_log2_f32(__x); }
+__DEVICE__
+inline float logbf(float __x) { return __ocml_logb_f32(__x); }
+__DEVICE__
+inline float logf(float __x) { return __ocml_log_f32(__x); }
+__DEVICE__
+inline long int lrintf(float __x) { return __ocml_rint_f32(__x); }
+__DEVICE__
+inline long int lroundf(float __x) { return __ocml_round_f32(__x); }
+__DEVICE__
+inline float modff(float __x, float *__iptr) {
+  float __tmp;
+  float __r =
+      __ocml_modf_f32(__x, (__attribute__((address_space(5))) float *)&__tmp);
+  *__iptr = __tmp;
+
+  return __r;
+}
+__DEVICE__
+inline float nanf(const char *__tagp) {
+  union {
+    float val;
+    struct ieee_float {
+      uint32_t mantissa : 22;
+      uint32_t quiet : 1;
+      uint32_t exponent : 8;
+      uint32_t sign : 1;
+    } bits;
+
+    static_assert(sizeof(float) == sizeof(ieee_float), "");
+  } __tmp;
+
+  __tmp.bits.sign = 0u;
+  __tmp.bits.exponent = ~0u;
+  __tmp.bits.quiet = 1u;
+  __tmp.bits.mantissa = __make_mantissa(__tagp);
+
+  return __tmp.val;
+}
+__DEVICE__
+inline float nearbyintf(float __x) { return __ocml_nearbyint_f32(__x); }
+__DEVICE__
+inline float nextafterf(float __x, float __y) {
+  return __ocml_nextafter_f32(__x, __y);
+}
+__DEVICE__
+inline float norm3df(float __x, float __y, float __z) {
+  return __ocml_len3_f32(__x, __y, __z);
+}
+__DEVICE__
+inline float norm4df(float __x, float __y, float __z, float __w) {
+  return __ocml_len4_f32(__x, __y, __z, __w);
+}
+__DEVICE__
+inline float normcdff(float __x) { return __ocml_ncdf_f32(__x); }
+__DEVICE__
+inline float normcdfinvf(float __x) { return __ocml_ncdfinv_f32(__x); }
+__DEVICE__
+inline float
+normf(int __dim,
+      const float *__a) { // TODO: placeholder until OCML adds support.
+  float __r = 0;
+  while (__dim--) {
+    __r += __a[0] * __a[0];
+    ++__a;
+  }
+
+  return __ocml_sqrt_f32(__r);
+}
+__DEVICE__
+inline float powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
+__DEVICE__
+inline float rcbrtf(float __x) { return __ocml_rcbrt_f32(__x); }
+__DEVICE__
+inline float remainderf(float __x, float __y) {
+  return __ocml_remainder_f32(__x, __y);
+}
+__DEVICE__
+inline float remquof(float __x, float __y, int *__quo) {
+  int __tmp;
+  float __r = __ocml_remquo_f32(
+      __x, __y, (__attribute__((address_space(5))) int *)&__tmp);
+  *__quo = __tmp;
+
+  return __r;
+}
+__DEVICE__
+inline float rhypotf(float __x, float __y) {
+  return __ocml_rhypot_f32(__x, __y);
+}
+__DEVICE__
+inline float rintf(float __x) { return __ocml_rint_f32(__x); }
+__DEVICE__
+inline float rnorm3df(float __x, float __y, float __z) {
+  return __ocml_rlen3_f32(__x, __y, __z);
+}
+
+__DEVICE__
+inline float rnorm4df(float __x, float __y, float __z, float __w) {
+  return __ocml_rlen4_f32(__x, __y, __z, __w);
+}
+__DEVICE__
+inline float
+rnormf(int __dim,
+       const float *__a) { // TODO: placeholder until OCML adds support.
+  float __r = 0;
+  while (__dim--) {
+    __r += __a[0] * __a[0];
+    ++__a;
+  }
+
+  return __ocml_rsqrt_f32(__r);
+}
+__DEVICE__
+inline float roundf(float __x) { return __ocml_round_f32(__x); }
+__DEVICE__
+inline float rsqrtf(float __x) { return __ocml_rsqrt_f32(__x); }
+__DEVICE__
+inline float scalblnf(float __x, long int __n) {
+  return (__n < INT_MAX) ? __ocml_scalbn_f32(__x, __n)
+                         : __ocml_scalb_f32(__x, __n);
+}
+__DEVICE__
+inline float scalbnf(float __x, int __n) { return __ocml_scalbn_f32(__x, __n); }
+__DEVICE__
+inline __RETURN_TYPE signbit(float __x) { return __ocml_signbit_f32(__x); }
+__DEVICE__
+inline void sincosf(float __x, float *__sinptr, float *__cosptr) {
+  float __tmp;
+
+  *__sinptr =
+      __ocml_sincos_f32(__x, (__attribute__((address_space(5))) float *)&__tmp);
+  *__cosptr = __tmp;
+}
+__DEVICE__
+inline void sincospif(float __x, float *__sinptr, float *__cosptr) {
+  float __tmp;
+
+  *__sinptr = __ocml_sincospi_f32(
+      __x, (__attribute__((address_space(5))) float *)&__tmp);
+  *__cosptr = __tmp;
+}
+__DEVICE__
+inline float sinf(float __x) { return __ocml_sin_f32(__x); }
+__DEVICE__
+inline float sinhf(float __x) { return __ocml_sinh_f32(__x); }
+__DEVICE__
+inline float sinpif(float __x) { return __ocml_sinpi_f32(__x); }
+__DEVICE__
+inline float sqrtf(float __x) { return __ocml_sqrt_f32(__x); }
+__DEVICE__
+inline float tanf(float __x) { return __ocml_tan_f32(__x); }
+__DEVICE__
+inline float tanhf(float __x) { return __ocml_tanh_f32(__x); }
+__DEVICE__
+inline float tgammaf(float __x) { return __ocml_tgamma_f32(__x); }
+__DEVICE__
+inline float truncf(float __x) { return __ocml_trunc_f32(__x); }
+__DEVICE__
+inline float y0f(float __x) { return __ocml_y0_f32(__x); }
+__DEVICE__
+inline float y1f(float __x) { return __ocml_y1_f32(__x); }
+__DEVICE__
+inline float ynf(int __n,
+                 float __x) { // TODO: we could use Ahmes multiplication
+                              // and the Miller & Brown algorithm
+  //       for linear recurrences to get O(log n) steps, but it's unclear if
+  //       it'd be beneficial in this case. Placeholder until OCML adds
+  //       support.
+  if (__n == 0)
+    return y0f(__x);
+  if (__n == 1)
+    return y1f(__x);
+
+  float __x0 = y0f(__x);
+  float __x1 = y1f(__x);
+  for (int __i = 1; __i < __n; ++__i) {
+    float __x2 = (2 * __i) / __x * __x1 - __x0;
+    __x0 = __x1;
+    __x1 = __x2;
+  }
+
+  return __x1;
+}
+
+// BEGIN INTRINSICS
+__DEVICE__
+inline float __cosf(float __x) { return __ocml_native_cos_f32(__x); }
+__DEVICE__
+inline float __exp10f(float __x) { return __ocml_native_exp10_f32(__x); }
+__DEVICE__
+inline float __expf(float __x) { return __ocml_native_exp_f32(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fadd_rd(float __x, float __y) {
+  return __ocml_add_rtn_f32(__x, __y);
+}
+#endif
+__DEVICE__
+inline float __fadd_rn(float __x, float __y) { return __x + __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fadd_ru(float __x, float __y) {
+  return __ocml_add_rtp_f32(__x, __y);
+}
+__DEVICE__
+inline float __fadd_rz(float __x, float __y) {
+  return __ocml_add_rtz_f32(__x, __y);
+}
+__DEVICE__
+inline float __fdiv_rd(float __x, float __y) {
+  return __ocml_div_rtn_f32(__x, __y);
+}
+#endif
+__DEVICE__
+inline float __fdiv_rn(float __x, float __y) { return __x / __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fdiv_ru(float __x, float __y) {
+  return __ocml_div_rtp_f32(__x, __y);
+}
+__DEVICE__
+inline float __fdiv_rz(float __x, float __y) {
+  return __ocml_div_rtz_f32(__x, __y);
+}
+#endif
+__DEVICE__
+inline float __fdividef(float __x, float __y) { return __x / __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fmaf_rd(float __x, float __y, float __z) {
+  return __ocml_fma_rtn_f32(__x, __y, __z);
+}
+#endif
+__DEVICE__
+inline float __fmaf_rn(float __x, float __y, float __z) {
+  return __ocml_fma_f32(__x, __y, __z);
+}
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fmaf_ru(float __x, float __y, float __z) {
+  return __ocml_fma_rtp_f32(__x, __y, __z);
+}
+__DEVICE__
+inline float __fmaf_rz(float __x, float __y, float __z) {
+  return __ocml_fma_rtz_f32(__x, __y, __z);
+}
+__DEVICE__
+inline float __fmul_rd(float __x, float __y) {
+  return __ocml_mul_rtn_f32(__x, __y);
+}
+#endif
+__DEVICE__
+inline float __fmul_rn(float __x, float __y) { return __x * __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fmul_ru(float __x, float __y) {
+  return __ocml_mul_rtp_f32(__x, __y);
+}
+__DEVICE__
+inline float __fmul_rz(float __x, float __y) {
+  return __ocml_mul_rtz_f32(__x, __y);
+}
+__DEVICE__
+inline float __frcp_rd(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+#endif
+__DEVICE__
+inline float __frcp_rn(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __frcp_ru(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+__DEVICE__
+inline float __frcp_rz(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+#endif
+__DEVICE__
+inline float __frsqrt_rn(float __x) { return __llvm_amdgcn_rsq_f32(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fsqrt_rd(float __x) { return __ocml_sqrt_rtn_f32(__x); }
+#endif
+__DEVICE__
+inline float __fsqrt_rn(float __x) { return __ocml_native_sqrt_f32(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fsqrt_ru(float __x) { return __ocml_sqrt_rtp_f32(__x); }
+__DEVICE__
+inline float __fsqrt_rz(float __x) { return __ocml_sqrt_rtz_f32(__x); }
+__DEVICE__
+inline float __fsub_rd(float __x, float __y) {
+  return __ocml_sub_rtn_f32(__x, __y);
+}
+#endif
+__DEVICE__
+inline float __fsub_rn(float __x, float __y) { return __x - __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline float __fsub_ru(float __x, float __y) {
+  return __ocml_sub_rtp_f32(__x, __y);
+}
+__DEVICE__
+inline float __fsub_rz(float __x, float __y) {
+  return __ocml_sub_rtz_f32(__x, __y);
+}
+#endif
+__DEVICE__
+inline float __log10f(float __x) { return __ocml_native_log10_f32(__x); }
+__DEVICE__
+inline float __log2f(float __x) { return __ocml_native_log2_f32(__x); }
+__DEVICE__
+inline float __logf(float __x) { return __ocml_native_log_f32(__x); }
+__DEVICE__
+inline float __powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
+__DEVICE__
+inline float __saturatef(float __x) {
+  return (__x < 0) ? 0 : ((__x > 1) ? 1 : __x);
+}
+__DEVICE__
+inline void __sincosf(float __x, float *__sinptr, float *__cosptr) {
+  *__sinptr = __ocml_native_sin_f32(__x);
+  *__cosptr = __ocml_native_cos_f32(__x);
+}
+__DEVICE__
+inline float __sinf(float __x) { return __ocml_native_sin_f32(__x); }
+__DEVICE__
+inline float __tanf(float __x) { return __ocml_tan_f32(__x); }
+// END INTRINSICS
+// END FLOAT
+
+// BEGIN DOUBLE
+__DEVICE__
+inline double abs(double __x) { return __ocml_fabs_f64(__x); }
+__DEVICE__
+inline double acos(double __x) { return __ocml_acos_f64(__x); }
+__DEVICE__
+inline double acosh(double __x) { return __ocml_acosh_f64(__x); }
+__DEVICE__
+inline double asin(double __x) { return __ocml_asin_f64(__x); }
+__DEVICE__
+inline double asinh(double __x) { return __ocml_asinh_f64(__x); }
+__DEVICE__
+inline double atan(double __x) { return __ocml_atan_f64(__x); }
+__DEVICE__
+inline double atan2(double __x, double __y) {
+  return __ocml_atan2_f64(__x, __y);
+}
+__DEVICE__
+inline double atanh(double __x) { return __ocml_atanh_f64(__x); }
+__DEVICE__
+inline double cbrt(double __x) { return __ocml_cbrt_f64(__x); }
+__DEVICE__
+inline double ceil(double __x) { return __ocml_ceil_f64(__x); }
+__DEVICE__
+inline double copysign(double __x, double __y) {
+  return __ocml_copysign_f64(__x, __y);
+}
+__DEVICE__
+inline double cos(double __x) { return __ocml_cos_f64(__x); }
+__DEVICE__
+inline double cosh(double __x) { return __ocml_cosh_f64(__x); }
+__DEVICE__
+inline double cospi(double __x) { return __ocml_cospi_f64(__x); }
+__DEVICE__
+inline double cyl_bessel_i0(double __x) { return __ocml_i0_f64(__x); }
+__DEVICE__
+inline double cyl_bessel_i1(double __x) { return __ocml_i1_f64(__x); }
+__DEVICE__
+inline double erf(double __x) { return __ocml_erf_f64(__x); }
+__DEVICE__
+inline double erfc(double __x) { return __ocml_erfc_f64(__x); }
+__DEVICE__
+inline double erfcinv(double __x) { return __ocml_erfcinv_f64(__x); }
+__DEVICE__
+inline double erfcx(double __x) { return __ocml_erfcx_f64(__x); }
+__DEVICE__
+inline double erfinv(double __x) { return __ocml_erfinv_f64(__x); }
+__DEVICE__
+inline double exp(double __x) { return __ocml_exp_f64(__x); }
+__DEVICE__
+inline double exp10(double __x) { return __ocml_exp10_f64(__x); }
+__DEVICE__
+inline double exp2(double __x) { return __ocml_exp2_f64(__x); }
+__DEVICE__
+inline double expm1(double __x) { return __ocml_expm1_f64(__x); }
+__DEVICE__
+inline double fabs(double __x) { return __ocml_fabs_f64(__x); }
+__DEVICE__
+inline double fdim(double __x, double __y) { return __ocml_fdim_f64(__x, __y); }
+__DEVICE__
+inline double floor(double __x) { return __ocml_floor_f64(__x); }
+__DEVICE__
+inline double fma(double __x, double __y, double __z) {
+  return __ocml_fma_f64(__x, __y, __z);
+}
+__DEVICE__
+inline double fmax(double __x, double __y) { return __ocml_fmax_f64(__x, __y); }
+__DEVICE__
+inline double fmin(double __x, double __y) { return __ocml_fmin_f64(__x, __y); }
+__DEVICE__
+inline double fmod(double __x, double __y) { return __ocml_fmod_f64(__x, __y); }
+__DEVICE__
+inline double frexp(double __x, int *__nptr) {
+  int __tmp;
+  double __r =
+      __ocml_frexp_f64(__x, (__attribute__((address_space(5))) int *)&__tmp);
+  *__nptr = __tmp;
+
+  return __r;
+}
+__DEVICE__
+inline double hypot(double __x, double __y) {
+  return __ocml_hypot_f64(__x, __y);
+}
+__DEVICE__
+inline int ilogb(double __x) { return __ocml_ilogb_f64(__x); }
+__DEVICE__
+inline __RETURN_TYPE isfinite(double __x) { return __ocml_isfinite_f64(__x); }
+__DEVICE__
+inline __RETURN_TYPE isinf(double __x) { return __ocml_isinf_f64(__x); }
+__DEVICE__
+inline __RETURN_TYPE isnan(double __x) { return __ocml_isnan_f64(__x); }
+__DEVICE__
+inline double j0(double __x) { return __ocml_j0_f64(__x); }
+__DEVICE__
+inline double j1(double __x) { return __ocml_j1_f64(__x); }
+__DEVICE__
+inline double jn(int __n,
+                 double __x) { // TODO: we could use Ahmes multiplication
+                               // and the Miller & Brown algorithm
+  //       for linear recurrences to get O(log n) steps, but it's unclear if
+  //       it'd be beneficial in this case. Placeholder until OCML adds
+  //       support.
+  if (__n == 0)
+    return j0f(__x);
+  if (__n == 1)
+    return j1f(__x);
+
+  double __x0 = j0f(__x);
+  double __x1 = j1f(__x);
+  for (int __i = 1; __i < __n; ++__i) {
+    double __x2 = (2 * __i) / __x * __x1 - __x0;
+    __x0 = __x1;
+    __x1 = __x2;
+  }
+
+  return __x1;
+}
+__DEVICE__
+inline double ldexp(double __x, int __e) { return __ocml_ldexp_f64(__x, __e); }
+__DEVICE__
+inline double lgamma(double __x) { return __ocml_lgamma_f64(__x); }
+__DEVICE__
+inline long long int llrint(double __x) { return __ocml_rint_f64(__x); }
+__DEVICE__
+inline long long int llround(double __x) { return __ocml_round_f64(__x); }
+__DEVICE__
+inline double log(double __x) { return __ocml_log_f64(__x); }
+__DEVICE__
+inline double log10(double __x) { return __ocml_log10_f64(__x); }
+__DEVICE__
+inline double log1p(double __x) { return __ocml_log1p_f64(__x); }
+__DEVICE__
+inline double log2(double __x) { return __ocml_log2_f64(__x); }
+__DEVICE__
+inline double logb(double __x) { return __ocml_logb_f64(__x); }
+__DEVICE__
+inline long int lrint(double __x) { return __ocml_rint_f64(__x); }
+__DEVICE__
+inline long int lround(double __x) { return __ocml_round_f64(__x); }
+__DEVICE__
+inline double modf(double __x, double *__iptr) {
+  double __tmp;
+  double __r =
+      __ocml_modf_f64(__x, (__attribute__((address_space(5))) double *)&__tmp);
+  *__iptr = __tmp;
+
+  return __r;
+}
+__DEVICE__
+inline double nan(const char *__tagp) {
+#if !_WIN32
+  union {
+    double val;
+    struct ieee_double {
+      uint64_t mantissa : 51;
+      uint32_t quiet : 1;
+      uint32_t exponent : 11;
+      uint32_t sign : 1;
+    } bits;
+    static_assert(sizeof(double) == sizeof(ieee_double), "");
+  } __tmp;
+
+  __tmp.bits.sign = 0u;
+  __tmp.bits.exponent = ~0u;
+  __tmp.bits.quiet = 1u;
+  __tmp.bits.mantissa = __make_mantissa(__tagp);
+
+  return __tmp.val;
+#else
+  static_assert(sizeof(uint64_t) == sizeof(double));
+  uint64_t val = __make_mantissa(__tagp);
+  val |= 0xFFF << 51;
+  return *reinterpret_cast<double *>(&val);
+#endif
+}
+__DEVICE__
+inline double nearbyint(double __x) { return __ocml_nearbyint_f64(__x); }
+__DEVICE__
+inline double nextafter(double __x, double __y) {
+  return __ocml_nextafter_f64(__x, __y);
+}
+__DEVICE__
+inline double
+norm(int __dim,
+     const double *__a) { // TODO: placeholder until OCML adds support.
+  double __r = 0;
+  while (__dim--) {
+    __r += __a[0] * __a[0];
+    ++__a;
+  }
+
+  return __ocml_sqrt_f64(__r);
+}
+__DEVICE__
+inline double norm3d(double __x, double __y, double __z) {
+  return __ocml_len3_f64(__x, __y, __z);
+}
+__DEVICE__
+inline double norm4d(double __x, double __y, double __z, double __w) {
+  return __ocml_len4_f64(__x, __y, __z, __w);
+}
+__DEVICE__
+inline double normcdf(double __x) { return __ocml_ncdf_f64(__x); }
+__DEVICE__
+inline double normcdfinv(double __x) { return __ocml_ncdfinv_f64(__x); }
+__DEVICE__
+inline double pow(double __x, double __y) { return __ocml_pow_f64(__x, __y); }
+__DEVICE__
+inline double rcbrt(double __x) { return __ocml_rcbrt_f64(__x); }
+__DEVICE__
+inline double remainder(double __x, double __y) {
+  return __ocml_remainder_f64(__x, __y);
+}
+__DEVICE__
+inline double remquo(double __x, double __y, int *__quo) {
+  int __tmp;
+  double __r = __ocml_remquo_f64(
+      __x, __y, (__attribute__((address_space(5))) int *)&__tmp);
+  *__quo = __tmp;
+
+  return __r;
+}
+__DEVICE__
+inline double rhypot(double __x, double __y) {
+  return __ocml_rhypot_f64(__x, __y);
+}
+__DEVICE__
+inline double rint(double __x) { return __ocml_rint_f64(__x); }
+__DEVICE__
+inline double
+rnorm(int __dim,
+      const double *__a) { // TODO: placeholder until OCML adds support.
+  double __r = 0;
+  while (__dim--) {
+    __r += __a[0] * __a[0];
+    ++__a;
+  }
+
+  return __ocml_rsqrt_f64(__r);
+}
+__DEVICE__
+inline double rnorm3d(double __x, double __y, double __z) {
+  return __ocml_rlen3_f64(__x, __y, __z);
+}
+__DEVICE__
+inline double rnorm4d(double __x, double __y, double __z, double __w) {
+  return __ocml_rlen4_f64(__x, __y, __z, __w);
+}
+__DEVICE__
+inline double round(double __x) { return __ocml_round_f64(__x); }
+__DEVICE__
+inline double rsqrt(double __x) { return __ocml_rsqrt_f64(__x); }
+__DEVICE__
+inline double scalbln(double __x, long int __n) {
+  return (__n < INT_MAX) ? __ocml_scalbn_f64(__x, __n)
+                         : __ocml_scalb_f64(__x, __n);
+}
+__DEVICE__
+inline double scalbn(double __x, int __n) {
+  return __ocml_scalbn_f64(__x, __n);
+}
+__DEVICE__
+inline __RETURN_TYPE signbit(double __x) { return __ocml_signbit_f64(__x); }
+__DEVICE__
+inline double sin(double __x) { return __ocml_sin_f64(__x); }
+__DEVICE__
+inline void sincos(double __x, double *__sinptr, double *__cosptr) {
+  double __tmp;
+  *__sinptr = __ocml_sincos_f64(
+      __x, (__attribute__((address_space(5))) double *)&__tmp);
+  *__cosptr = __tmp;
+}
+__DEVICE__
+inline void sincospi(double __x, double *__sinptr, double *__cosptr) {
+  double __tmp;
+  *__sinptr = __ocml_sincospi_f64(
+      __x, (__attribute__((address_space(5))) double *)&__tmp);
+  *__cosptr = __tmp;
+}
+__DEVICE__
+inline double sinh(double __x) { return __ocml_sinh_f64(__x); }
+__DEVICE__
+inline double sinpi(double __x) { return __ocml_sinpi_f64(__x); }
+__DEVICE__
+inline double sqrt(double __x) { return __ocml_sqrt_f64(__x); }
+__DEVICE__
+inline double tan(double __x) { return __ocml_tan_f64(__x); }
+__DEVICE__
+inline double tanh(double __x) { return __ocml_tanh_f64(__x); }
+__DEVICE__
+inline double tgamma(double __x) { return __ocml_tgamma_f64(__x); }
+__DEVICE__
+inline double trunc(double __x) { return __ocml_trunc_f64(__x); }
+__DEVICE__
+inline double y0(double __x) { return __ocml_y0_f64(__x); }
+__DEVICE__
+inline double y1(double __x) { return __ocml_y1_f64(__x); }
+__DEVICE__
+inline double yn(int __n,
+                 double __x) { // TODO: we could use Ahmes multiplication
+                               // and the Miller & Brown algorithm
+  //       for linear recurrences to get O(log n) steps, but it's unclear if
+  //       it'd be beneficial in this case. Placeholder until OCML adds
+  //       support.
+  if (__n == 0)
+    return j0f(__x);
+  if (__n == 1)
+    return j1f(__x);
+
+  double __x0 = j0f(__x);
+  double __x1 = j1f(__x);
+  for (int __i = 1; __i < __n; ++__i) {
+    double __x2 = (2 * __i) / __x * __x1 - __x0;
+    __x0 = __x1;
+    __x1 = __x2;
+  }
+
+  return __x1;
+}
+
+// BEGIN INTRINSICS
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __dadd_rd(double __x, double __y) {
+  return __ocml_add_rtn_f64(__x, __y);
+}
+#endif
+__DEVICE__
+inline double __dadd_rn(double __x, double __y) { return __x + __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __dadd_ru(double __x, double __y) {
+  return __ocml_add_rtp_f64(__x, __y);
+}
+__DEVICE__
+inline double __dadd_rz(double __x, double __y) {
+  return __ocml_add_rtz_f64(__x, __y);
+}
+__DEVICE__
+inline double __ddiv_rd(double __x, double __y) {
+  return __ocml_div_rtn_f64(__x, __y);
+}
+#endif
+__DEVICE__
+inline double __ddiv_rn(double __x, double __y) { return __x / __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __ddiv_ru(double __x, double __y) {
+  return __ocml_div_rtp_f64(__x, __y);
+}
+__DEVICE__
+inline double __ddiv_rz(double __x, double __y) {
+  return __ocml_div_rtz_f64(__x, __y);
+}
+__DEVICE__
+inline double __dmul_rd(double __x, double __y) {
+  return __ocml_mul_rtn_f64(__x, __y);
+}
+#endif
+__DEVICE__
+inline double __dmul_rn(double __x, double __y) { return __x * __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __dmul_ru(double __x, double __y) {
+  return __ocml_mul_rtp_f64(__x, __y);
+}
+__DEVICE__
+inline double __dmul_rz(double __x, double __y) {
+  return __ocml_mul_rtz_f64(__x, __y);
+}
+__DEVICE__
+inline double __drcp_rd(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+#endif
+__DEVICE__
+inline double __drcp_rn(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __drcp_ru(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+__DEVICE__
+inline double __drcp_rz(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+__DEVICE__
+inline double __dsqrt_rd(double __x) { return __ocml_sqrt_rtn_f64(__x); }
+#endif
+__DEVICE__
+inline double __dsqrt_rn(double __x) { return __ocml_sqrt_f64(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __dsqrt_ru(double __x) { return __ocml_sqrt_rtp_f64(__x); }
+__DEVICE__
+inline double __dsqrt_rz(double __x) { return __ocml_sqrt_rtz_f64(__x); }
+__DEVICE__
+inline double __dsub_rd(double __x, double __y) {
+  return __ocml_sub_rtn_f64(__x, __y);
+}
+#endif
+__DEVICE__
+inline double __dsub_rn(double __x, double __y) { return __x - __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __dsub_ru(double __x, double __y) {
+  return __ocml_sub_rtp_f64(__x, __y);
+}
+__DEVICE__
+inline double __dsub_rz(double __x, double __y) {
+  return __ocml_sub_rtz_f64(__x, __y);
+}
+__DEVICE__
+inline double __fma_rd(double __x, double __y, double __z) {
+  return __ocml_fma_rtn_f64(__x, __y, __z);
+}
+#endif
+__DEVICE__
+inline double __fma_rn(double __x, double __y, double __z) {
+  return __ocml_fma_f64(__x, __y, __z);
+}
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+inline double __fma_ru(double __x, double __y, double __z) {
+  return __ocml_fma_rtp_f64(__x, __y, __z);
+}
+__DEVICE__
+inline double __fma_rz(double __x, double __y, double __z) {
+  return __ocml_fma_rtz_f64(__x, __y, __z);
+}
+#endif
+// END INTRINSICS
+// END DOUBLE
+
+// BEGIN INTEGER
+__DEVICE__
+inline int abs(int __x) {
+  int __sgn = __x >> (sizeof(int) * CHAR_BIT - 1);
+  return (__x ^ __sgn) - __sgn;
+}
+__DEVICE__
+inline long labs(long __x) {
+  long __sgn = __x >> (sizeof(long) * CHAR_BIT - 1);
+  return (__x ^ __sgn) - __sgn;
+}
+__DEVICE__
+inline long long llabs(long long __x) {
+  long long __sgn = __x >> (sizeof(long long) * CHAR_BIT - 1);
+  return (__x ^ __sgn) - __sgn;
+}
+
+#if defined(__cplusplus)
+__DEVICE__
+inline long abs(long __x) { return labs(__x); }
+__DEVICE__
+inline long long abs(long long __x) { return llabs(__x); }
+#endif
+// END INTEGER
+
+__DEVICE__
+inline _Float16 fma(_Float16 __x, _Float16 __y, _Float16 __z) {
+  return __ocml_fma_f16(__x, __y, __z);
+}
+
+__DEVICE__
+inline float fma(float __x, float __y, float __z) {
+  return fmaf(__x, __y, __z);
+}
+
+#pragma push_macro("__DEF_FUN1")
+#pragma push_macro("__DEF_FUN2")
+#pragma push_macro("__DEF_FUNI")
+#pragma push_macro("__DEF_FLOAT_FUN2I")
+#pragma push_macro("__HIP_OVERLOAD1")
+#pragma push_macro("__HIP_OVERLOAD2")
+
+// __hip_enable_if::type is a type function which returns __T if __B is true.
+template <bool __B, class __T = void> struct __hip_enable_if {};
+
+template <class __T> struct __hip_enable_if<true, __T> { typedef __T type; };
+
+// __HIP_OVERLOAD1 is used to resolve function calls with integer argument to
+// avoid compilation error due to ambibuity. e.g. floor(5) is resolved with
+// floor(double).
+#define __HIP_OVERLOAD1(__retty, __fn)                                         \
+  template <typename __T>                                                      \
+  __DEVICE__ typename __hip_enable_if<std::numeric_limits<__T>::is_integer,    \
+                                      __retty>::type                           \
+  __fn(__T __x) {                                                              \
+    return ::__fn((double)__x);                                                \
+  }
+
+// __HIP_OVERLOAD2 is used to resolve function calls with mixed float/double
+// or integer argument to avoid compilation error due to ambibuity. e.g.
+// max(5.0f, 6.0) is resolved with max(double, double).
+#define __HIP_OVERLOAD2(__retty, __fn)                                         \
+  template <typename __T1, typename __T2>                                      \
+  __DEVICE__                                                                   \
+      typename __hip_enable_if<std::numeric_limits<__T1>::is_specialized &&    \
+                                   std::numeric_limits<__T2>::is_specialized,  \
+                               __retty>::type                                  \
+      __fn(__T1 __x, __T2 __y) {                                               \
+    return __fn((double)__x, (double)__y);                                     \
+  }
+
+// Define cmath functions with float argument and returns float.
+#define __DEF_FUN1(__retty, __func)                                            \
+  __DEVICE__                                                                   \
+  inline float __func(float __x) { return __func##f(__x); }                    \
+  __HIP_OVERLOAD1(__retty, __func)
+
+// Define cmath functions with float argument and returns __retty.
+#define __DEF_FUNI(__retty, __func)                                            \
+  __DEVICE__                                                                   \
+  inline __retty __func(float __x) { return __func##f(__x); }                  \
+  __HIP_OVERLOAD1(__retty, __func)
+
+// define cmath functions with two float arguments.
+#define __DEF_FUN2(__retty, __func)                                            \
+  __DEVICE__                                                                   \
+  inline float __func(float __x, float __y) { return __func##f(__x, __y); }    \
+  __HIP_OVERLOAD2(__retty, __func)
+
+__DEF_FUN1(double, acos)
+__DEF_FUN1(double, acosh)
+__DEF_FUN1(double, asin)
+__DEF_FUN1(double, asinh)
+__DEF_FUN1(double, atan)
+__DEF_FUN2(double, atan2);
+__DEF_FUN1(double, atanh)
+__DEF_FUN1(double, cbrt)
+__DEF_FUN1(double, ceil)
+__DEF_FUN2(double, copysign);
+__DEF_FUN1(double, cos)
+__DEF_FUN1(double, cosh)
+__DEF_FUN1(double, erf)
+__DEF_FUN1(double, erfc)
+__DEF_FUN1(double, exp)
+__DEF_FUN1(double, exp2)
+__DEF_FUN1(double, expm1)
+__DEF_FUN1(double, fabs)
+__DEF_FUN2(double, fdim);
+__DEF_FUN1(double, floor)
+__DEF_FUN2(double, fmax);
+__DEF_FUN2(double, fmin);
+__DEF_FUN2(double, fmod);
+//__HIP_OVERLOAD1(int, fpclassify)
+__DEF_FUN2(double, hypot);
+__DEF_FUNI(int, ilogb)
+__HIP_OVERLOAD1(bool, isfinite)
+__HIP_OVERLOAD2(bool, isgreater);
+__HIP_OVERLOAD2(bool, isgreaterequal);
+__HIP_OVERLOAD1(bool, isinf);
+__HIP_OVERLOAD2(bool, isless);
+__HIP_OVERLOAD2(bool, islessequal);
+__HIP_OVERLOAD2(bool, islessgreater);
+__HIP_OVERLOAD1(bool, isnan);
+//__HIP_OVERLOAD1(bool, isnormal)
+__HIP_OVERLOAD2(bool, isunordered);
+__DEF_FUN1(double, lgamma)
+__DEF_FUN1(double, log)
+__DEF_FUN1(double, log10)
+__DEF_FUN1(double, log1p)
+__DEF_FUN1(double, log2)
+__DEF_FUN1(double, logb)
+__DEF_FUNI(long long, llrint)
+__DEF_FUNI(long long, llround)
+__DEF_FUNI(long, lrint)
+__DEF_FUNI(long, lround)
+__DEF_FUN1(double, nearbyint);
+__DEF_FUN2(double, nextafter);
+__DEF_FUN2(double, pow);
+__DEF_FUN2(double, remainder);
+__DEF_FUN1(double, rint);
+__DEF_FUN1(double, round);
+__HIP_OVERLOAD1(bool, signbit)
+__DEF_FUN1(double, sin)
+__DEF_FUN1(double, sinh)
+__DEF_FUN1(double, sqrt)
+__DEF_FUN1(double, tan)
+__DEF_FUN1(double, tanh)
+__DEF_FUN1(double, tgamma)
+__DEF_FUN1(double, trunc);
+
+// define cmath functions with a float and an integer argument.
+#define __DEF_FLOAT_FUN2I(__func)                                              \
+  __DEVICE__                                                                   \
+  inline float __func(float __x, int __y) { return __func##f(__x, __y); }
+__DEF_FLOAT_FUN2I(scalbn)
+
+template <class T> __DEVICE__ inline T min(T __arg1, T __arg2) {
+  return (__arg1 < __arg2) ? __arg1 : __arg2;
+}
+
+template <class T> __DEVICE__ inline T max(T __arg1, T __arg2) {
+  return (__arg1 > __arg2) ? __arg1 : __arg2;
+}
+
+__DEVICE__ inline int min(int __arg1, int __arg2) {
+  return (__arg1 < __arg2) ? __arg1 : __arg2;
+}
+__DEVICE__ inline int max(int __arg1, int __arg2) {
+  return (__arg1 > __arg2) ? __arg1 : __arg2;
+}
+
+__DEVICE__
+inline float max(float __x, float __y) { return fmaxf(__x, __y); }
+
+__DEVICE__
+inline double max(double __x, double __y) { return fmax(__x, __y); }
+
+__DEVICE__
+inline float min(float __x, float __y) { return fminf(__x, __y); }
+
+__DEVICE__
+inline double min(double __x, double __y) { return fmin(__x, __y); }
+
+__HIP_OVERLOAD2(double, max)
+__HIP_OVERLOAD2(double, min)
+
+__host__ inline static int min(int __arg1, int __arg2) {
+  return std::min(__arg1, __arg2);
+}
+
+__host__ inline static int max(int __arg1, int __arg2) {
+  return std::max(__arg1, __arg2);
+}
+
+#pragma pop_macro("__DEF_FUN1")
+#pragma pop_macro("__DEF_FUN2")
+#pragma pop_macro("__DEF_FUNI")
+#pragma pop_macro("__DEF_FLOAT_FUN2I")
+#pragma pop_macro("__HIP_OVERLOAD1")
+#pragma pop_macro("__HIP_OVERLOAD2")
+#pragma pop_macro("__DEVICE__")
+#pragma pop_macro("__RETURN_TYPE")
+
+#endif // __CLANG_HIP_MATH_H__
diff --git a/linux-x86/lib64/clang/11.0.5/include/__clang_hip_runtime_wrapper.h b/linux-x86/lib64/clang/11.0.5/include/__clang_hip_runtime_wrapper.h
new file mode 100644
index 0000000..addae56
--- /dev/null
+++ b/linux-x86/lib64/clang/11.0.5/include/__clang_hip_runtime_wrapper.h
@@ -0,0 +1,64 @@
+/*===---- __clang_hip_runtime_wrapper.h - HIP runtime support ---------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/*
+ * WARNING: This header is intended to be directly -include'd by
+ * the compiler and is not supposed to be included by users.
+ *
+ */
+
+#ifndef __CLANG_HIP_RUNTIME_WRAPPER_H__
+#define __CLANG_HIP_RUNTIME_WRAPPER_H__
+
+#if __HIP__
+
+#include <cmath>
+#include <cstdlib>
+#include <stdlib.h>
+
+#define __host__ __attribute__((host))
+#define __device__ __attribute__((device))
+#define __global__ __attribute__((global))
+#define __shared__ __attribute__((shared))
+#define __constant__ __attribute__((constant))
+
+#if __HIP_ENABLE_DEVICE_MALLOC__
+extern "C" __device__ void *__hip_malloc(size_t __size);
+extern "C" __device__ void *__hip_free(void *__ptr);
+static inline __device__ void *malloc(size_t __size) {
+  return __hip_malloc(__size);
+}
+static inline __device__ void *free(void *__ptr) { return __hip_free(__ptr); }
+#else
+static inline __device__ void *malloc(size_t __size) {
+  __builtin_trap();
+  return nullptr;
+}
+static inline __device__ void *free(void *__ptr) {
+  __builtin_trap();
+  return nullptr;
+}
+#endif
+
+#include <__clang_hip_libdevice_declares.h>
+#include <__clang_hip_math.h>
+
+#if !_OPENMP || __HIP_ENABLE_CUDA_WRAPPER_FOR_OPENMP__
+#include <__clang_cuda_math_forward_declares.h>
+#include <__clang_cuda_complex_builtins.h>
+
+#include <algorithm>
+#include <complex>
+#include <new>
+#endif // !_OPENMP || __HIP_ENABLE_CUDA_WRAPPER_FOR_OPENMP__
+
+#define __CLANG_HIP_RUNTIME_WRAPPER_INCLUDED__ 1
+
+#endif // __HIP__
+#endif // __CLANG_HIP_RUNTIME_WRAPPER_H__
diff --git a/linux-x86/lib64/clang/11.0.1/include/__stddef_max_align_t.h b/linux-x86/lib64/clang/11.0.5/include/__stddef_max_align_t.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/__stddef_max_align_t.h
rename to linux-x86/lib64/clang/11.0.5/include/__stddef_max_align_t.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/__wmmintrin_aes.h b/linux-x86/lib64/clang/11.0.5/include/__wmmintrin_aes.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/__wmmintrin_aes.h
rename to linux-x86/lib64/clang/11.0.5/include/__wmmintrin_aes.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/__wmmintrin_pclmul.h b/linux-x86/lib64/clang/11.0.5/include/__wmmintrin_pclmul.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/__wmmintrin_pclmul.h
rename to linux-x86/lib64/clang/11.0.5/include/__wmmintrin_pclmul.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/adxintrin.h b/linux-x86/lib64/clang/11.0.5/include/adxintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/adxintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/adxintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/altivec.h b/linux-x86/lib64/clang/11.0.5/include/altivec.h
similarity index 97%
copy from darwin-x86/lib64/clang/11.0.1/include/altivec.h
copy to linux-x86/lib64/clang/11.0.5/include/altivec.h
index 7e231a2..9a40092 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/altivec.h
+++ b/linux-x86/lib64/clang/11.0.5/include/altivec.h
@@ -16761,6 +16761,394 @@
 static vector signed char __ATTRS_o_ai vec_nabs(vector signed char __a) {
   return __builtin_altivec_vminsb(__a, -__a);
 }
+
+#ifdef __POWER10_VECTOR__
+/* vec_pdep */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_pdep(vector unsigned long long __a, vector unsigned long long __b) {
+  return __builtin_altivec_vpdepd(__a, __b);
+}
+
+/* vec_pext */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_pext(vector unsigned long long __a, vector unsigned long long __b) {
+  return __builtin_altivec_vpextd(__a, __b);
+}
+
+/* vec_cfuge */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_cfuge(vector unsigned long long __a, vector unsigned long long __b) {
+  return __builtin_altivec_vcfuged(__a, __b);
+}
+
+/* vec_gnb */
+
+#define vec_gnb(__a, __b) __builtin_altivec_vgnb(__a, __b)
+
+/* vec_ternarylogic */
+#ifdef __VSX__
+#define vec_ternarylogic(__a, __b, __c, __imm)                                 \
+  _Generic((__a), vector unsigned char                                         \
+           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
+                                  (vector unsigned long long)(__b),            \
+                                  (vector unsigned long long)(__c), (__imm)),  \
+             vector unsigned short                                             \
+           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
+                                  (vector unsigned long long)(__b),            \
+                                  (vector unsigned long long)(__c), (__imm)),  \
+             vector unsigned int                                               \
+           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
+                                  (vector unsigned long long)(__b),            \
+                                  (vector unsigned long long)(__c), (__imm)),  \
+             vector unsigned long long                                         \
+           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
+                                  (vector unsigned long long)(__b),            \
+                                  (vector unsigned long long)(__c), (__imm)),  \
+             vector unsigned __int128                                          \
+           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
+                                  (vector unsigned long long)(__b),            \
+                                  (vector unsigned long long)(__c), (__imm)))
+#endif /* __VSX__ */
+
+/* vec_genpcvm */
+
+#ifdef __VSX__
+#define vec_genpcvm(__a, __imm)                                                \
+  _Generic((__a), vector unsigned char                                         \
+           : __builtin_vsx_xxgenpcvbm((__a), (int)(__imm)),                    \
+             vector unsigned short                                             \
+           : __builtin_vsx_xxgenpcvhm((__a), (int)(__imm)),                    \
+             vector unsigned int                                               \
+           : __builtin_vsx_xxgenpcvwm((__a), (int)(__imm)),                    \
+             vector unsigned long long                                         \
+           : __builtin_vsx_xxgenpcvdm((__a), (int)(__imm)))
+#endif /* __VSX__ */
+
+/* vec_clrl */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_clrl(vector signed char __a, unsigned int __n) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vclrrb(__a, __n);
+#else
+  return __builtin_altivec_vclrlb( __a, __n);
+#endif
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_clrl(vector unsigned char __a, unsigned int __n) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vclrrb((vector signed char)__a, __n);
+#else
+  return __builtin_altivec_vclrlb((vector signed char)__a, __n);
+#endif
+}
+
+/* vec_clrr */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_clrr(vector signed char __a, unsigned int __n) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vclrlb(__a, __n);
+#else
+  return __builtin_altivec_vclrrb( __a, __n);
+#endif
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_clrr(vector unsigned char __a, unsigned int __n) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vclrlb((vector signed char)__a, __n);
+#else
+  return __builtin_altivec_vclrrb((vector signed char)__a, __n);
+#endif
+}
+
+/* vec_cntlzm */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_cntlzm(vector unsigned long long __a, vector unsigned long long __b) {
+  return __builtin_altivec_vclzdm(__a, __b);
+}
+
+/* vec_cnttzm */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_cnttzm(vector unsigned long long __a, vector unsigned long long __b) {
+  return __builtin_altivec_vctzdm(__a, __b);
+}
+
+/* vec_sldbi */
+
+#define vec_sldb(__a, __b, __c) __builtin_altivec_vsldbi(__a, __b, (__c & 0x7))
+
+/* vec_srdbi */
+
+#define vec_srdb(__a, __b, __c) __builtin_altivec_vsrdbi(__a, __b, (__c & 0x7))
+
+/* vec_insertl */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_insertl(unsigned char __a, vector unsigned char __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinsbrx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinsblx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_insertl(unsigned short __a, vector unsigned short __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinshrx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinshlx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_insertl(unsigned int __a, vector unsigned int __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinswrx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinswlx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_insertl(unsigned long long __a, vector unsigned long long __b,
+            unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinsdrx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinsdlx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_insertl(vector unsigned char __a, vector unsigned char __b,
+            unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinsbvrx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinsbvlx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_insertl(vector unsigned short __a, vector unsigned short __b,
+            unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinshvrx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinshvlx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_insertl(vector unsigned int __a, vector unsigned int __b,
+            unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinswvrx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinswvlx(__b, __c, __a);
+#endif
+}
+
+/* vec_inserth */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_inserth(unsigned char __a, vector unsigned char __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinsblx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinsbrx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_inserth(unsigned short __a, vector unsigned short __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinshlx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinshrx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_inserth(unsigned int __a, vector unsigned int __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinswlx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinswrx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_inserth(unsigned long long __a, vector unsigned long long __b,
+            unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinsdlx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinsdrx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_inserth(vector unsigned char __a, vector unsigned char __b,
+            unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinsbvlx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinsbvrx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_inserth(vector unsigned short __a, vector unsigned short __b,
+            unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinshvlx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinshvrx(__b, __c, __a);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_inserth(vector unsigned int __a, vector unsigned int __b,
+            unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vinswvlx(__b, __c, __a);
+#else
+  return __builtin_altivec_vinswvrx(__b, __c, __a);
+#endif
+}
+
+#ifdef __VSX__
+
+/* vec_permx */
+
+#define vec_permx(__a, __b, __c, __d)                                          \
+  __builtin_vsx_xxpermx((__a), (__b), (__c), (__d))
+
+/* vec_blendv */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_blendv(vector signed char __a, vector signed char __b,
+           vector unsigned char __c) {
+  return __builtin_vsx_xxblendvb(__a, __b, __c);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_blendv(vector unsigned char __a, vector unsigned char __b,
+           vector unsigned char __c) {
+  return __builtin_vsx_xxblendvb(__a, __b, __c);
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_blendv(vector signed short __a, vector signed short __b,
+           vector unsigned short __c) {
+  return __builtin_vsx_xxblendvh(__a, __b, __c);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_blendv(vector unsigned short __a, vector unsigned short __b,
+           vector unsigned short __c) {
+  return __builtin_vsx_xxblendvh(__a, __b, __c);
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_blendv(vector signed int __a, vector signed int __b,
+           vector unsigned int __c) {
+  return __builtin_vsx_xxblendvw(__a, __b, __c);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_blendv(vector unsigned int __a, vector unsigned int __b,
+           vector unsigned int __c) {
+  return __builtin_vsx_xxblendvw(__a, __b, __c);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_blendv(vector signed long long __a, vector signed long long __b,
+           vector unsigned long long __c) {
+  return __builtin_vsx_xxblendvd(__a, __b, __c);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_blendv(vector unsigned long long __a, vector unsigned long long __b,
+           vector unsigned long long __c) {
+  return __builtin_vsx_xxblendvd(__a, __b, __c);
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_blendv(vector float __a, vector float __b, vector unsigned int __c) {
+  return __builtin_vsx_xxblendvw(__a, __b, __c);
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_blendv(vector double __a, vector double __b,
+           vector unsigned long long __c) {
+  return __builtin_vsx_xxblendvd(__a, __b, __c);
+}
+
+/* vec_splati */
+
+#define vec_splati(__a)                                                        \
+  _Generic((__a), signed int                                                   \
+           : ((vector signed int)__a), unsigned int                            \
+           : ((vector unsigned int)__a), float                                 \
+           : ((vector float)__a))
+
+/* vec_spatid */
+
+static __inline__ vector double __ATTRS_o_ai vec_splatid(const float __a) {
+  return ((vector double)((double)__a));
+}
+
+/* vec_splati_ins */
+
+static __inline__ vector signed int __ATTRS_o_ai vec_splati_ins(
+    vector signed int __a, const unsigned int __b, const signed int __c) {
+#ifdef __LITTLE_ENDIAN__
+  __a[1 - __b] = __c;
+  __a[3 - __b] = __c;
+#else
+  __a[__b] = __c;
+  __a[2 + __b] = __c;
+#endif
+  return __a;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai vec_splati_ins(
+    vector unsigned int __a, const unsigned int __b, const unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  __a[1 - __b] = __c;
+  __a[3 - __b] = __c;
+#else
+  __a[__b] = __c;
+  __a[2 + __b] = __c;
+#endif
+  return __a;
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_splati_ins(vector float __a, const unsigned int __b, const float __c) {
+#ifdef __LITTLE_ENDIAN__
+  __a[1 - __b] = __c;
+  __a[3 - __b] = __c;
+#else
+  __a[__b] = __c;
+  __a[2 + __b] = __c;
+#endif
+  return __a;
+}
+#endif /* __VSX__ */
+#endif /* __POWER10_VECTOR__ */
+
 #undef __ATTRS_o_ai
 
 #endif /* __ALTIVEC_H */
diff --git a/linux-x86/lib64/clang/11.0.1/include/ammintrin.h b/linux-x86/lib64/clang/11.0.5/include/ammintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/ammintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/ammintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/amxintrin.h b/linux-x86/lib64/clang/11.0.5/include/amxintrin.h
new file mode 100644
index 0000000..58254e2
--- /dev/null
+++ b/linux-x86/lib64/clang/11.0.5/include/amxintrin.h
@@ -0,0 +1,225 @@
+/*===--------------- amxintrin.h - AMX intrinsics -*- C/C++ -*---------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===------------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <amxintrin.h> directly; include <immintrin.h> instead."
+#endif /* __IMMINTRIN_H */
+
+#ifndef __AMXINTRIN_H
+#define __AMXINTRIN_H
+#ifdef __x86_64__
+
+#define __DEFAULT_FN_ATTRS \
+  __attribute__((__always_inline__, __nodebug__,  __target__("amx-tile")))
+
+/// Load tile configuration from a 64-byte memory location specified by
+/// "mem_addr". The tile configuration includes the tile type palette, the
+/// number of bytes per row, and the number of rows. If the specified
+/// palette_id is zero, that signifies the init state for both the tile
+/// config and the tile data, and the tiles are zeroed. Any invalid
+/// configurations will result in #GP fault.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> LDTILECFG </c> instruction.
+///
+/// \param __config
+///    A pointer to 512-bits configuration
+static __inline__ void __DEFAULT_FN_ATTRS
+_tile_loadconfig(const void *__config)
+{
+  __builtin_ia32_tile_loadconfig(__config);
+}
+
+/// Stores the current tile configuration to a 64-byte memory location
+/// specified by "mem_addr". The tile configuration includes the tile type
+/// palette, the number of bytes per row, and the number of rows. If tiles
+/// are not configured, all zeroes will be stored to memory.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> STTILECFG </c> instruction.
+///
+/// \param __config
+///    A pointer to 512-bits configuration
+static __inline__ void __DEFAULT_FN_ATTRS
+_tile_storeconfig(void *__config)
+{
+  __builtin_ia32_tile_storeconfig(__config);
+}
+
+/// Release the tile configuration to return to the init state, which
+/// releases all storage it currently holds.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TILERELEASE </c> instruction.
+static __inline__ void __DEFAULT_FN_ATTRS
+_tile_release(void)
+{
+  __builtin_ia32_tilerelease();
+}
+
+/// Load tile rows from memory specifieid by "base" address and "stride" into
+/// destination tile "dst" using the tile configuration previously configured
+/// via "_tile_loadconfig".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TILELOADD </c> instruction.
+///
+/// \param dst
+///    A destination tile. Max size is 1024 Bytes.
+/// \param base
+///    A pointer to base address.
+/// \param stride
+///    The stride between the rows' data to be loaded in memory.
+#define _tile_loadd(dst, base, stride) \
+  __builtin_ia32_tileloadd64((dst), ((const void *)(base)), (__SIZE_TYPE__)(stride))
+
+/// Load tile rows from memory specifieid by "base" address and "stride" into
+/// destination tile "dst" using the tile configuration previously configured
+/// via "_tile_loadconfig". This intrinsic provides a hint to the implementation
+/// that the data will likely not be reused in the near future and the data
+/// caching can be optimized accordingly.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TILELOADDT1 </c> instruction.
+///
+/// \param dst
+///    A destination tile. Max size is 1024 Bytes.
+/// \param base
+///    A pointer to base address.
+/// \param stride
+///    The stride between the rows' data to be loaded in memory.
+#define _tile_stream_loadd(dst, base, stride) \
+  __builtin_ia32_tileloaddt164((dst), ((const void *)(base)), (__SIZE_TYPE__)(stride))
+
+/// Store the tile specified by "src" to memory specifieid by "base" address and
+/// "stride" using the tile configuration previously configured via
+/// "_tile_loadconfig".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TILESTORED </c> instruction.
+///
+/// \param dst
+///    A destination tile. Max size is 1024 Bytes.
+/// \param base
+///    A pointer to base address.
+/// \param stride
+///    The stride between the rows' data to be stored in memory.
+#define _tile_stored(dst, base, stride) \
+  __builtin_ia32_tilestored64((dst), ((void *)(base)), (__SIZE_TYPE__)(stride))
+
+/// Zero the tile specified by "tdest".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TILEZERO </c> instruction.
+///
+/// \param tile
+///    The destination tile to be zero. Max size is 1024 Bytes.
+#define _tile_zero(tile) __builtin_ia32_tilezero((tile))
+
+/// Compute dot-product of bytes in tiles with a source/destination accumulator.
+/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with
+/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit
+/// results. Sum these 4 results with the corresponding 32-bit integer in "dst",
+/// and store the 32-bit result back to tile "dst".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPBSSD </c> instruction.
+///
+/// \param dst
+///    The destination tile. Max size is 1024 Bytes.
+/// \param src0
+///    The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+///    The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_dpbssd(dst, src0, src1) __builtin_ia32_tdpbssd((dst), (src0), (src1))
+
+/// Compute dot-product of bytes in tiles with a source/destination accumulator.
+/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with
+/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate
+/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer
+/// in "dst", and store the 32-bit result back to tile "dst".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPBSUD </c> instruction.
+///
+/// \param dst
+///    The destination tile. Max size is 1024 Bytes.
+/// \param src0
+///    The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+///    The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_dpbsud(dst, src0, src1) __builtin_ia32_tdpbsud((dst), (src0), (src1))
+
+/// Compute dot-product of bytes in tiles with a source/destination accumulator.
+/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with
+/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit
+/// results. Sum these 4 results with the corresponding 32-bit integer in "dst",
+/// and store the 32-bit result back to tile "dst".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPBUSD </c> instruction.
+///
+/// \param dst
+///    The destination tile. Max size is 1024 Bytes.
+/// \param src0
+///    The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+///    The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_dpbusd(dst, src0, src1) __builtin_ia32_tdpbusd((dst), (src0), (src1))
+
+/// Compute dot-product of bytes in tiles with a source/destination accumulator.
+/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with
+/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate
+/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer in
+/// "dst", and store the 32-bit result back to tile "dst".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPBUUD </c> instruction.
+///
+/// \param dst
+///    The destination tile. Max size is 1024 Bytes.
+/// \param src0
+///    The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+///    The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_dpbuud(dst, src0, src1) __builtin_ia32_tdpbuud((dst), (src0), (src1))
+
+/// Compute dot-product of BF16 (16-bit) floating-point pairs in tiles src0 and
+/// src1, accumulating the intermediate single-precision (32-bit) floating-point
+/// elements with elements in "dst", and store the 32-bit result back to tile
+/// "dst".
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPBF16PS </c> instruction.
+///
+/// \param dst
+///    The destination tile. Max size is 1024 Bytes.
+/// \param src0
+///    The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+///    The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_dpbf16ps(dst, src0, src1) \
+  __builtin_ia32_tdpbf16ps((dst), (src0), (src1))
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __x86_64__ */
+#endif /* __AMXINTRIN_H */
diff --git a/linux-x86/lib64/clang/11.0.1/include/arm64intr.h b/linux-x86/lib64/clang/11.0.5/include/arm64intr.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/arm64intr.h
rename to linux-x86/lib64/clang/11.0.5/include/arm64intr.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/arm_acle.h b/linux-x86/lib64/clang/11.0.5/include/arm_acle.h
similarity index 98%
copy from darwin-x86/lib64/clang/11.0.1/include/arm_acle.h
copy to linux-x86/lib64/clang/11.0.5/include/arm_acle.h
index 596ea03..de568b4 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/arm_acle.h
+++ b/linux-x86/lib64/clang/11.0.5/include/arm_acle.h
@@ -22,31 +22,43 @@
 
 /* 8 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */
 /* 8.3 Memory barriers */
-#if !defined(_MSC_VER)
+#if !__has_builtin(__dmb)
 #define __dmb(i) __builtin_arm_dmb(i)
+#endif
+#if !__has_builtin(__dsb)
 #define __dsb(i) __builtin_arm_dsb(i)
+#endif
+#if !__has_builtin(__isb)
 #define __isb(i) __builtin_arm_isb(i)
 #endif
 
 /* 8.4 Hints */
 
-#if !defined(_MSC_VER)
+#if !__has_builtin(__wfi)
 static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfi(void) {
   __builtin_arm_wfi();
 }
+#endif
 
+#if !__has_builtin(__wfe)
 static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfe(void) {
   __builtin_arm_wfe();
 }
+#endif
 
+#if !__has_builtin(__sev)
 static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sev(void) {
   __builtin_arm_sev();
 }
+#endif
 
+#if !__has_builtin(__sevl)
 static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sevl(void) {
   __builtin_arm_sevl();
 }
+#endif
 
+#if !__has_builtin(__yield)
 static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(void) {
   __builtin_arm_yield();
 }
diff --git a/linux-x86/lib64/clang/11.0.5/include/arm_bf16.h b/linux-x86/lib64/clang/11.0.5/include/arm_bf16.h
new file mode 100644
index 0000000..329ae39
--- /dev/null
+++ b/linux-x86/lib64/clang/11.0.5/include/arm_bf16.h
@@ -0,0 +1,20 @@
+/*===---- arm_bf16.h - ARM BF16 intrinsics -----------------------------------===
+ *
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ARM_BF16_H
+#define __ARM_BF16_H
+
+typedef __bf16 bfloat16_t;
+#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__))
+
+
+#undef __ai
+
+#endif
diff --git a/linux-x86/lib64/clang/11.0.5/include/arm_cde.h b/linux-x86/lib64/clang/11.0.5/include/arm_cde.h
new file mode 100644
index 0000000..4ad5d82
--- /dev/null
+++ b/linux-x86/lib64/clang/11.0.5/include/arm_cde.h
@@ -0,0 +1,410 @@
+/*===---- arm_cde.h - ARM CDE intrinsics -----------------------------------===
+ *
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ARM_CDE_H
+#define __ARM_CDE_H
+
+#if !__ARM_FEATURE_CDE
+#error "CDE support not enabled"
+#endif
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx1)))
+uint32_t __arm_cx1(int, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx1a)))
+uint32_t __arm_cx1a(int, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx1d)))
+uint64_t __arm_cx1d(int, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx1da)))
+uint64_t __arm_cx1da(int, uint64_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx2)))
+uint32_t __arm_cx2(int, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx2a)))
+uint32_t __arm_cx2a(int, uint32_t, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx2d)))
+uint64_t __arm_cx2d(int, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx2da)))
+uint64_t __arm_cx2da(int, uint64_t, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx3)))
+uint32_t __arm_cx3(int, uint32_t, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx3a)))
+uint32_t __arm_cx3a(int, uint32_t, uint32_t, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx3d)))
+uint64_t __arm_cx3d(int, uint32_t, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx3da)))
+uint64_t __arm_cx3da(int, uint64_t, uint32_t, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1_u32)))
+uint32_t __arm_vcx1_u32(int, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1a_u32)))
+uint32_t __arm_vcx1a_u32(int, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1d_u64)))
+uint64_t __arm_vcx1d_u64(int, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1da_u64)))
+uint64_t __arm_vcx1da_u64(int, uint64_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx2_u32)))
+uint32_t __arm_vcx2_u32(int, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx2a_u32)))
+uint32_t __arm_vcx2a_u32(int, uint32_t, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx2d_u64)))
+uint64_t __arm_vcx2d_u64(int, uint64_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx2da_u64)))
+uint64_t __arm_vcx2da_u64(int, uint64_t, uint64_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx3_u32)))
+uint32_t __arm_vcx3_u32(int, uint32_t, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx3a_u32)))
+uint32_t __arm_vcx3a_u32(int, uint32_t, uint32_t, uint32_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx3d_u64)))
+uint64_t __arm_vcx3d_u64(int, uint64_t, uint64_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx3da_u64)))
+uint64_t __arm_vcx3da_u64(int, uint64_t, uint64_t, uint64_t, uint32_t);
+
+#if __ARM_FEATURE_MVE
+
+typedef uint16_t mve_pred16_t;
+typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) int16_t int16x8_t;
+typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) int32_t int32x4_t;
+typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) int64_t int64x2_t;
+typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) int8_t int8x16_t;
+typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) uint16_t uint16x8_t;
+typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) uint32_t uint32x4_t;
+typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) uint64_t uint64x2_t;
+typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) uint8_t uint8x16_t;
+
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_s16)))
+int16x8_t __arm_vcx1q_m(int, int16x8_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_s32)))
+int32x4_t __arm_vcx1q_m(int, int32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_s64)))
+int64x2_t __arm_vcx1q_m(int, int64x2_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_s8)))
+int8x16_t __arm_vcx1q_m(int, int8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_u16)))
+uint16x8_t __arm_vcx1q_m(int, uint16x8_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_u32)))
+uint32x4_t __arm_vcx1q_m(int, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_u64)))
+uint64x2_t __arm_vcx1q_m(int, uint64x2_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_u8)))
+uint8x16_t __arm_vcx1q_m(int, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_u8)))
+uint8x16_t __arm_vcx1q_u8(int, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_s16)))
+int16x8_t __arm_vcx1qa_m(int, int16x8_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_s32)))
+int32x4_t __arm_vcx1qa_m(int, int32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_s64)))
+int64x2_t __arm_vcx1qa_m(int, int64x2_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_s8)))
+int8x16_t __arm_vcx1qa_m(int, int8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_u16)))
+uint16x8_t __arm_vcx1qa_m(int, uint16x8_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_u32)))
+uint32x4_t __arm_vcx1qa_m(int, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_u64)))
+uint64x2_t __arm_vcx1qa_m(int, uint64x2_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_u8)))
+uint8x16_t __arm_vcx1qa_m(int, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_s16)))
+int16x8_t __arm_vcx1qa(int, int16x8_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_s32)))
+int32x4_t __arm_vcx1qa(int, int32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_s64)))
+int64x2_t __arm_vcx1qa(int, int64x2_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_s8)))
+int8x16_t __arm_vcx1qa(int, int8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_u16)))
+uint16x8_t __arm_vcx1qa(int, uint16x8_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_u32)))
+uint32x4_t __arm_vcx1qa(int, uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_u64)))
+uint64x2_t __arm_vcx1qa(int, uint64x2_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_u8)))
+uint8x16_t __arm_vcx1qa(int, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_s16)))
+int16x8_t __arm_vcx2q_m_impl(int, int16x8_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_s32)))
+int32x4_t __arm_vcx2q_m_impl(int, int32x4_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_s64)))
+int64x2_t __arm_vcx2q_m_impl(int, int64x2_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_s8)))
+int8x16_t __arm_vcx2q_m_impl(int, int8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_u16)))
+uint16x8_t __arm_vcx2q_m_impl(int, uint16x8_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_u32)))
+uint32x4_t __arm_vcx2q_m_impl(int, uint32x4_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_u64)))
+uint64x2_t __arm_vcx2q_m_impl(int, uint64x2_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_u8)))
+uint8x16_t __arm_vcx2q_m_impl(int, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_s16)))
+int16x8_t __arm_vcx2q(int, int16x8_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_s32)))
+int32x4_t __arm_vcx2q(int, int32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_s64)))
+int64x2_t __arm_vcx2q(int, int64x2_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_s8)))
+int8x16_t __arm_vcx2q(int, int8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u16)))
+uint16x8_t __arm_vcx2q(int, uint16x8_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u32)))
+uint32x4_t __arm_vcx2q(int, uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u64)))
+uint64x2_t __arm_vcx2q(int, uint64x2_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8)))
+uint8x16_t __arm_vcx2q(int, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_s16)))
+uint8x16_t __arm_vcx2q_u8(int, int16x8_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_s32)))
+uint8x16_t __arm_vcx2q_u8(int, int32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_s64)))
+uint8x16_t __arm_vcx2q_u8(int, int64x2_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_s8)))
+uint8x16_t __arm_vcx2q_u8(int, int8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_u16)))
+uint8x16_t __arm_vcx2q_u8(int, uint16x8_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_u32)))
+uint8x16_t __arm_vcx2q_u8(int, uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_u64)))
+uint8x16_t __arm_vcx2q_u8(int, uint64x2_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_u8)))
+uint8x16_t __arm_vcx2q_u8(int, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_s16)))
+int16x8_t __arm_vcx2qa_impl(int, int16x8_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_s32)))
+int32x4_t __arm_vcx2qa_impl(int, int32x4_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_s64)))
+int64x2_t __arm_vcx2qa_impl(int, int64x2_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_s8)))
+int8x16_t __arm_vcx2qa_impl(int, int8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_u16)))
+uint16x8_t __arm_vcx2qa_impl(int, uint16x8_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_u32)))
+uint32x4_t __arm_vcx2qa_impl(int, uint32x4_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_u64)))
+uint64x2_t __arm_vcx2qa_impl(int, uint64x2_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_u8)))
+uint8x16_t __arm_vcx2qa_impl(int, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_s16)))
+int16x8_t __arm_vcx2qa_m_impl(int, int16x8_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_s32)))
+int32x4_t __arm_vcx2qa_m_impl(int, int32x4_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_s64)))
+int64x2_t __arm_vcx2qa_m_impl(int, int64x2_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_s8)))
+int8x16_t __arm_vcx2qa_m_impl(int, int8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_u16)))
+uint16x8_t __arm_vcx2qa_m_impl(int, uint16x8_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_u32)))
+uint32x4_t __arm_vcx2qa_m_impl(int, uint32x4_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_u64)))
+uint64x2_t __arm_vcx2qa_m_impl(int, uint64x2_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_u8)))
+uint8x16_t __arm_vcx2qa_m_impl(int, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_s16)))
+int16x8_t __arm_vcx3q_impl(int, int16x8_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_s32)))
+int32x4_t __arm_vcx3q_impl(int, int32x4_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_s64)))
+int64x2_t __arm_vcx3q_impl(int, int64x2_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_s8)))
+int8x16_t __arm_vcx3q_impl(int, int8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_u16)))
+uint16x8_t __arm_vcx3q_impl(int, uint16x8_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_u32)))
+uint32x4_t __arm_vcx3q_impl(int, uint32x4_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_u64)))
+uint64x2_t __arm_vcx3q_impl(int, uint64x2_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_u8)))
+uint8x16_t __arm_vcx3q_impl(int, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_s16)))
+int16x8_t __arm_vcx3q_m_impl(int, int16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_s32)))
+int32x4_t __arm_vcx3q_m_impl(int, int32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_s64)))
+int64x2_t __arm_vcx3q_m_impl(int, int64x2_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_s8)))
+int8x16_t __arm_vcx3q_m_impl(int, int8x16_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_u16)))
+uint16x8_t __arm_vcx3q_m_impl(int, uint16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_u32)))
+uint32x4_t __arm_vcx3q_m_impl(int, uint32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_u64)))
+uint64x2_t __arm_vcx3q_m_impl(int, uint64x2_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_u8)))
+uint8x16_t __arm_vcx3q_m_impl(int, uint8x16_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_s16)))
+uint8x16_t __arm_vcx3q_u8_impl(int, int16x8_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_s32)))
+uint8x16_t __arm_vcx3q_u8_impl(int, int32x4_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_s64)))
+uint8x16_t __arm_vcx3q_u8_impl(int, int64x2_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_s8)))
+uint8x16_t __arm_vcx3q_u8_impl(int, int8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_u16)))
+uint8x16_t __arm_vcx3q_u8_impl(int, uint16x8_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_u32)))
+uint8x16_t __arm_vcx3q_u8_impl(int, uint32x4_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_u64)))
+uint8x16_t __arm_vcx3q_u8_impl(int, uint64x2_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_u8)))
+uint8x16_t __arm_vcx3q_u8_impl(int, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_s16)))
+int16x8_t __arm_vcx3qa_impl(int, int16x8_t, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_s32)))
+int32x4_t __arm_vcx3qa_impl(int, int32x4_t, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_s64)))
+int64x2_t __arm_vcx3qa_impl(int, int64x2_t, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_s8)))
+int8x16_t __arm_vcx3qa_impl(int, int8x16_t, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_u16)))
+uint16x8_t __arm_vcx3qa_impl(int, uint16x8_t, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_u32)))
+uint32x4_t __arm_vcx3qa_impl(int, uint32x4_t, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_u64)))
+uint64x2_t __arm_vcx3qa_impl(int, uint64x2_t, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_u8)))
+uint8x16_t __arm_vcx3qa_impl(int, uint8x16_t, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_s16)))
+int16x8_t __arm_vcx3qa_m_impl(int, int16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_s32)))
+int32x4_t __arm_vcx3qa_m_impl(int, int32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_s64)))
+int64x2_t __arm_vcx3qa_m_impl(int, int64x2_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_s8)))
+int8x16_t __arm_vcx3qa_m_impl(int, int8x16_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_u16)))
+uint16x8_t __arm_vcx3qa_m_impl(int, uint16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_u32)))
+uint32x4_t __arm_vcx3qa_m_impl(int, uint32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_u64)))
+uint64x2_t __arm_vcx3qa_m_impl(int, uint64x2_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_u8)))
+uint8x16_t __arm_vcx3qa_m_impl(int, uint8x16_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
+int16x8_t __arm_vreinterpretq_s16_u8(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
+int32x4_t __arm_vreinterpretq_s32_u8(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
+int64x2_t __arm_vreinterpretq_s64_u8(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
+int8x16_t __arm_vreinterpretq_s8_u8(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
+uint16x8_t __arm_vreinterpretq_u16_u8(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
+uint32x4_t __arm_vreinterpretq_u32_u8(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
+uint64x2_t __arm_vreinterpretq_u64_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
+uint8x16_t __arm_vreinterpretq_u8(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
+uint8x16_t __arm_vreinterpretq_u8(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
+uint8x16_t __arm_vreinterpretq_u8(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
+uint8x16_t __arm_vreinterpretq_u8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
+uint8x16_t __arm_vreinterpretq_u8(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
+uint8x16_t __arm_vreinterpretq_u8(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
+uint8x16_t __arm_vreinterpretq_u8(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vreinterpretq_u8_u8)))
+uint8x16_t __arm_vreinterpretq_u8(uint8x16_t);
+#define __arm_vcx2q_m(cp, inactive, n, imm, pred) __arm_vcx2q_m_impl((cp), (inactive), __arm_vreinterpretq_u8(n), (imm), (pred))
+#define __arm_vcx2qa(cp, acc, n, imm) __arm_vcx2qa_impl((cp), (acc), __arm_vreinterpretq_u8(n), (imm))
+#define __arm_vcx2qa_m(cp, acc, n, imm, pred) __arm_vcx2qa_m_impl((cp), (acc), __arm_vreinterpretq_u8(n), (imm), (pred))
+#define __arm_vcx3q(cp, n, m, imm) __arm_vcx3q_impl((cp), (n), __arm_vreinterpretq_u8(m), (imm))
+#define __arm_vcx3q_m(cp, inactive, n, m, imm, pred) __arm_vcx3q_m_impl((cp), (inactive), __arm_vreinterpretq_u8(n), __arm_vreinterpretq_u8(m), (imm), (pred))
+#define __arm_vcx3q_u8(cp, n, m, imm) __arm_vcx3q_u8_impl((cp), (n), __arm_vreinterpretq_u8(m), (imm))
+#define __arm_vcx3qa(cp, acc, n, m, imm) __arm_vcx3qa_impl((cp), (acc), __arm_vreinterpretq_u8(n), __arm_vreinterpretq_u8(m), (imm))
+#define __arm_vcx3qa_m(cp, acc, n, m, imm, pred) __arm_vcx3qa_m_impl((cp), (acc), __arm_vreinterpretq_u8(n), __arm_vreinterpretq_u8(m), (imm), (pred))
+
+#endif /* __ARM_FEATURE_MVE */
+
+#if __ARM_FEATURE_MVE & 2
+
+typedef __fp16 float16_t;
+typedef float float32_t;
+typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) float16_t float16x8_t;
+typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) float32_t float32x4_t;
+
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_f16)))
+float16x8_t __arm_vcx1q_m(int, float16x8_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_f32)))
+float32x4_t __arm_vcx1q_m(int, float32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_f16)))
+float16x8_t __arm_vcx1qa(int, float16x8_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_f32)))
+float32x4_t __arm_vcx1qa(int, float32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_f16)))
+float16x8_t __arm_vcx1qa_m(int, float16x8_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_f32)))
+float32x4_t __arm_vcx1qa_m(int, float32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_f16)))
+float16x8_t __arm_vcx2q(int, float16x8_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_f32)))
+float32x4_t __arm_vcx2q(int, float32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_f16)))
+float16x8_t __arm_vcx2q_m_impl(int, float16x8_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_f32)))
+float32x4_t __arm_vcx2q_m_impl(int, float32x4_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_f16)))
+uint8x16_t __arm_vcx2q_u8(int, float16x8_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_f32)))
+uint8x16_t __arm_vcx2q_u8(int, float32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_f16)))
+float16x8_t __arm_vcx2qa_impl(int, float16x8_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_f32)))
+float32x4_t __arm_vcx2qa_impl(int, float32x4_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_f16)))
+float16x8_t __arm_vcx2qa_m_impl(int, float16x8_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_f32)))
+float32x4_t __arm_vcx2qa_m_impl(int, float32x4_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_f16)))
+float16x8_t __arm_vcx3q_impl(int, float16x8_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_f32)))
+float32x4_t __arm_vcx3q_impl(int, float32x4_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_f16)))
+float16x8_t __arm_vcx3q_m_impl(int, float16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_f32)))
+float32x4_t __arm_vcx3q_m_impl(int, float32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_f16)))
+uint8x16_t __arm_vcx3q_u8_impl(int, float16x8_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_f32)))
+uint8x16_t __arm_vcx3q_u8_impl(int, float32x4_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_f16)))
+float16x8_t __arm_vcx3qa_impl(int, float16x8_t, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_f32)))
+float32x4_t __arm_vcx3qa_impl(int, float32x4_t, uint8x16_t, uint8x16_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_f16)))
+float16x8_t __arm_vcx3qa_m_impl(int, float16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_f32)))
+float32x4_t __arm_vcx3qa_m_impl(int, float32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
+float16x8_t __arm_vreinterpretq_f16_u8(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
+float32x4_t __arm_vreinterpretq_f32_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
+uint8x16_t __arm_vreinterpretq_u8(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
+uint8x16_t __arm_vreinterpretq_u8(float32x4_t);
+
+#endif /* __ARM_FEATURE_MVE & 2 */
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* __ARM_CDE_H */
diff --git a/linux-x86/lib64/clang/11.0.1/include/arm_cmse.h b/linux-x86/lib64/clang/11.0.5/include/arm_cmse.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/arm_cmse.h
rename to linux-x86/lib64/clang/11.0.5/include/arm_cmse.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/arm_fp16.h b/linux-x86/lib64/clang/11.0.5/include/arm_fp16.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/arm_fp16.h
rename to linux-x86/lib64/clang/11.0.5/include/arm_fp16.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/arm_mve.h b/linux-x86/lib64/clang/11.0.5/include/arm_mve.h
new file mode 100644
index 0000000..4da41dc
--- /dev/null
+++ b/linux-x86/lib64/clang/11.0.5/include/arm_mve.h
@@ -0,0 +1,19187 @@
+/*===---- arm_mve.h - ARM MVE intrinsics -----------------------------------===
+ *
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ARM_MVE_H
+#define __ARM_MVE_H
+
+#if !__ARM_FEATURE_MVE
+#error "MVE support not enabled"
+#endif
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint16_t mve_pred16_t;
+typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) int16_t int16x8_t;
+typedef struct { int16x8_t val[2]; } int16x8x2_t;
+typedef struct { int16x8_t val[4]; } int16x8x4_t;
+typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) int32_t int32x4_t;
+typedef struct { int32x4_t val[2]; } int32x4x2_t;
+typedef struct { int32x4_t val[4]; } int32x4x4_t;
+typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) int64_t int64x2_t;
+typedef struct { int64x2_t val[2]; } int64x2x2_t;
+typedef struct { int64x2_t val[4]; } int64x2x4_t;
+typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) int8_t int8x16_t;
+typedef struct { int8x16_t val[2]; } int8x16x2_t;
+typedef struct { int8x16_t val[4]; } int8x16x4_t;
+typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) uint16_t uint16x8_t;
+typedef struct { uint16x8_t val[2]; } uint16x8x2_t;
+typedef struct { uint16x8_t val[4]; } uint16x8x4_t;
+typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) uint32_t uint32x4_t;
+typedef struct { uint32x4_t val[2]; } uint32x4x2_t;
+typedef struct { uint32x4_t val[4]; } uint32x4x4_t;
+typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) uint64_t uint64x2_t;
+typedef struct { uint64x2_t val[2]; } uint64x2x2_t;
+typedef struct { uint64x2_t val[4]; } uint64x2x4_t;
+typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) uint8_t uint8x16_t;
+typedef struct { uint8x16_t val[2]; } uint8x16x2_t;
+typedef struct { uint8x16_t val[4]; } uint8x16x4_t;
+
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_asrl)))
+int64_t __arm_asrl(int64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_lsll)))
+uint64_t __arm_lsll(uint64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshr)))
+int32_t __arm_sqrshr(int32_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshrl)))
+int64_t __arm_sqrshrl(int64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshrl_sat48)))
+int64_t __arm_sqrshrl_sat48(int64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqshl)))
+int32_t __arm_sqshl(int32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqshll)))
+int64_t __arm_sqshll(int64_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_srshr)))
+int32_t __arm_srshr(int32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_srshrl)))
+int64_t __arm_srshrl(int64_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshl)))
+uint32_t __arm_uqrshl(uint32_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshll)))
+uint64_t __arm_uqrshll(uint64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshll_sat48)))
+uint64_t __arm_uqrshll_sat48(uint64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqshl)))
+uint32_t __arm_uqshl(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqshll)))
+uint64_t __arm_uqshll(uint64_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_urshr)))
+uint32_t __arm_urshr(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_urshrl)))
+uint64_t __arm_urshrl(uint64_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s16)))
+uint32_t __arm_vabavq_p_s16(uint32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s16)))
+uint32_t __arm_vabavq_p(uint32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s32)))
+uint32_t __arm_vabavq_p_s32(uint32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s32)))
+uint32_t __arm_vabavq_p(uint32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s8)))
+uint32_t __arm_vabavq_p_s8(uint32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s8)))
+uint32_t __arm_vabavq_p(uint32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u16)))
+uint32_t __arm_vabavq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u16)))
+uint32_t __arm_vabavq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u32)))
+uint32_t __arm_vabavq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u32)))
+uint32_t __arm_vabavq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u8)))
+uint32_t __arm_vabavq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u8)))
+uint32_t __arm_vabavq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s16)))
+uint32_t __arm_vabavq_s16(uint32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s16)))
+uint32_t __arm_vabavq(uint32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s32)))
+uint32_t __arm_vabavq_s32(uint32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s32)))
+uint32_t __arm_vabavq(uint32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s8)))
+uint32_t __arm_vabavq_s8(uint32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s8)))
+uint32_t __arm_vabavq(uint32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u16)))
+uint32_t __arm_vabavq_u16(uint32_t, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u16)))
+uint32_t __arm_vabavq(uint32_t, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u32)))
+uint32_t __arm_vabavq_u32(uint32_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u32)))
+uint32_t __arm_vabavq(uint32_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u8)))
+uint32_t __arm_vabavq_u8(uint32_t, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u8)))
+uint32_t __arm_vabavq(uint32_t, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s16)))
+int16x8_t __arm_vabdq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s16)))
+int16x8_t __arm_vabdq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s32)))
+int32x4_t __arm_vabdq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s32)))
+int32x4_t __arm_vabdq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s8)))
+int8x16_t __arm_vabdq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s8)))
+int8x16_t __arm_vabdq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u16)))
+uint16x8_t __arm_vabdq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u16)))
+uint16x8_t __arm_vabdq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u32)))
+uint32x4_t __arm_vabdq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u32)))
+uint32x4_t __arm_vabdq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u8)))
+uint8x16_t __arm_vabdq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u8)))
+uint8x16_t __arm_vabdq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s16)))
+int16x8_t __arm_vabdq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s16)))
+int16x8_t __arm_vabdq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s32)))
+int32x4_t __arm_vabdq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s32)))
+int32x4_t __arm_vabdq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s8)))
+int8x16_t __arm_vabdq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s8)))
+int8x16_t __arm_vabdq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u16)))
+uint16x8_t __arm_vabdq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u16)))
+uint16x8_t __arm_vabdq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u32)))
+uint32x4_t __arm_vabdq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u32)))
+uint32x4_t __arm_vabdq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u8)))
+uint8x16_t __arm_vabdq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u8)))
+uint8x16_t __arm_vabdq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s16)))
+int16x8_t __arm_vabdq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s16)))
+int16x8_t __arm_vabdq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s32)))
+int32x4_t __arm_vabdq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s32)))
+int32x4_t __arm_vabdq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s8)))
+int8x16_t __arm_vabdq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s8)))
+int8x16_t __arm_vabdq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u16)))
+uint16x8_t __arm_vabdq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u16)))
+uint16x8_t __arm_vabdq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u32)))
+uint32x4_t __arm_vabdq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u32)))
+uint32x4_t __arm_vabdq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u8)))
+uint8x16_t __arm_vabdq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u8)))
+uint8x16_t __arm_vabdq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s16)))
+int16x8_t __arm_vabsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s16)))
+int16x8_t __arm_vabsq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s32)))
+int32x4_t __arm_vabsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s32)))
+int32x4_t __arm_vabsq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s8)))
+int8x16_t __arm_vabsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s8)))
+int8x16_t __arm_vabsq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s16)))
+int16x8_t __arm_vabsq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s16)))
+int16x8_t __arm_vabsq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s32)))
+int32x4_t __arm_vabsq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s32)))
+int32x4_t __arm_vabsq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s8)))
+int8x16_t __arm_vabsq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s8)))
+int8x16_t __arm_vabsq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s16)))
+int16x8_t __arm_vabsq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s16)))
+int16x8_t __arm_vabsq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s32)))
+int32x4_t __arm_vabsq_x_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s32)))
+int32x4_t __arm_vabsq_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s8)))
+int8x16_t __arm_vabsq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s8)))
+int8x16_t __arm_vabsq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_s32)))
+int32x4_t __arm_vadciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_s32)))
+int32x4_t __arm_vadciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_u32)))
+uint32x4_t __arm_vadciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_u32)))
+uint32x4_t __arm_vadciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_s32)))
+int32x4_t __arm_vadciq_s32(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_s32)))
+int32x4_t __arm_vadciq(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_u32)))
+uint32x4_t __arm_vadciq_u32(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_u32)))
+uint32x4_t __arm_vadciq(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_s32)))
+int32x4_t __arm_vadcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_s32)))
+int32x4_t __arm_vadcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_u32)))
+uint32x4_t __arm_vadcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_u32)))
+uint32x4_t __arm_vadcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_s32)))
+int32x4_t __arm_vadcq_s32(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_s32)))
+int32x4_t __arm_vadcq(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_u32)))
+uint32x4_t __arm_vadcq_u32(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_u32)))
+uint32x4_t __arm_vadcq(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_s32)))
+int64_t __arm_vaddlvaq_p_s32(int64_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_s32)))
+int64_t __arm_vaddlvaq_p(int64_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_u32)))
+uint64_t __arm_vaddlvaq_p_u32(uint64_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_u32)))
+uint64_t __arm_vaddlvaq_p(uint64_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_s32)))
+int64_t __arm_vaddlvaq_s32(int64_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_s32)))
+int64_t __arm_vaddlvaq(int64_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_u32)))
+uint64_t __arm_vaddlvaq_u32(uint64_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_u32)))
+uint64_t __arm_vaddlvaq(uint64_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_s32)))
+int64_t __arm_vaddlvq_p_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_s32)))
+int64_t __arm_vaddlvq_p(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_u32)))
+uint64_t __arm_vaddlvq_p_u32(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_u32)))
+uint64_t __arm_vaddlvq_p(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_s32)))
+int64_t __arm_vaddlvq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_s32)))
+int64_t __arm_vaddlvq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_u32)))
+uint64_t __arm_vaddlvq_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_u32)))
+uint64_t __arm_vaddlvq(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s16)))
+int16x8_t __arm_vaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s16)))
+int16x8_t __arm_vaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s32)))
+int32x4_t __arm_vaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s32)))
+int32x4_t __arm_vaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s8)))
+int8x16_t __arm_vaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s8)))
+int8x16_t __arm_vaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u16)))
+uint16x8_t __arm_vaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u16)))
+uint16x8_t __arm_vaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u32)))
+uint32x4_t __arm_vaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u32)))
+uint32x4_t __arm_vaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u8)))
+uint8x16_t __arm_vaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u8)))
+uint8x16_t __arm_vaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s16)))
+int16x8_t __arm_vaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s16)))
+int16x8_t __arm_vaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s32)))
+int32x4_t __arm_vaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s32)))
+int32x4_t __arm_vaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s8)))
+int8x16_t __arm_vaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s8)))
+int8x16_t __arm_vaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u16)))
+uint16x8_t __arm_vaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u16)))
+uint16x8_t __arm_vaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u32)))
+uint32x4_t __arm_vaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u32)))
+uint32x4_t __arm_vaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u8)))
+uint8x16_t __arm_vaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u8)))
+uint8x16_t __arm_vaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s16)))
+int16x8_t __arm_vaddq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s16)))
+int16x8_t __arm_vaddq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s32)))
+int32x4_t __arm_vaddq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s32)))
+int32x4_t __arm_vaddq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s8)))
+int8x16_t __arm_vaddq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s8)))
+int8x16_t __arm_vaddq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u16)))
+uint16x8_t __arm_vaddq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u16)))
+uint16x8_t __arm_vaddq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u32)))
+uint32x4_t __arm_vaddq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u32)))
+uint32x4_t __arm_vaddq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u8)))
+uint8x16_t __arm_vaddq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u8)))
+uint8x16_t __arm_vaddq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s16)))
+int16x8_t __arm_vaddq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s16)))
+int16x8_t __arm_vaddq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s32)))
+int32x4_t __arm_vaddq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s32)))
+int32x4_t __arm_vaddq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s8)))
+int8x16_t __arm_vaddq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s8)))
+int8x16_t __arm_vaddq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u16)))
+uint16x8_t __arm_vaddq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u16)))
+uint16x8_t __arm_vaddq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u32)))
+uint32x4_t __arm_vaddq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u32)))
+uint32x4_t __arm_vaddq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u8)))
+uint8x16_t __arm_vaddq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u8)))
+uint8x16_t __arm_vaddq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s16)))
+int16x8_t __arm_vaddq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s16)))
+int16x8_t __arm_vaddq_x(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s32)))
+int32x4_t __arm_vaddq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s32)))
+int32x4_t __arm_vaddq_x(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s8)))
+int8x16_t __arm_vaddq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s8)))
+int8x16_t __arm_vaddq_x(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u16)))
+uint16x8_t __arm_vaddq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u16)))
+uint16x8_t __arm_vaddq_x(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u32)))
+uint32x4_t __arm_vaddq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u32)))
+uint32x4_t __arm_vaddq_x(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u8)))
+uint8x16_t __arm_vaddq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u8)))
+uint8x16_t __arm_vaddq_x(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s16)))
+int16x8_t __arm_vaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s16)))
+int16x8_t __arm_vaddq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s32)))
+int32x4_t __arm_vaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s32)))
+int32x4_t __arm_vaddq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s8)))
+int8x16_t __arm_vaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s8)))
+int8x16_t __arm_vaddq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u16)))
+uint16x8_t __arm_vaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u16)))
+uint16x8_t __arm_vaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u32)))
+uint32x4_t __arm_vaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u32)))
+uint32x4_t __arm_vaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u8)))
+uint8x16_t __arm_vaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u8)))
+uint8x16_t __arm_vaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s16)))
+int32_t __arm_vaddvaq_p_s16(int32_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s16)))
+int32_t __arm_vaddvaq_p(int32_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s32)))
+int32_t __arm_vaddvaq_p_s32(int32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s32)))
+int32_t __arm_vaddvaq_p(int32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s8)))
+int32_t __arm_vaddvaq_p_s8(int32_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s8)))
+int32_t __arm_vaddvaq_p(int32_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u16)))
+uint32_t __arm_vaddvaq_p_u16(uint32_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u16)))
+uint32_t __arm_vaddvaq_p(uint32_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u32)))
+uint32_t __arm_vaddvaq_p_u32(uint32_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u32)))
+uint32_t __arm_vaddvaq_p(uint32_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u8)))
+uint32_t __arm_vaddvaq_p_u8(uint32_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u8)))
+uint32_t __arm_vaddvaq_p(uint32_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s16)))
+int32_t __arm_vaddvaq_s16(int32_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s16)))
+int32_t __arm_vaddvaq(int32_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s32)))
+int32_t __arm_vaddvaq_s32(int32_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s32)))
+int32_t __arm_vaddvaq(int32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s8)))
+int32_t __arm_vaddvaq_s8(int32_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s8)))
+int32_t __arm_vaddvaq(int32_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u16)))
+uint32_t __arm_vaddvaq_u16(uint32_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u16)))
+uint32_t __arm_vaddvaq(uint32_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u32)))
+uint32_t __arm_vaddvaq_u32(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u32)))
+uint32_t __arm_vaddvaq(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u8)))
+uint32_t __arm_vaddvaq_u8(uint32_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u8)))
+uint32_t __arm_vaddvaq(uint32_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s16)))
+int32_t __arm_vaddvq_p_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s16)))
+int32_t __arm_vaddvq_p(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s32)))
+int32_t __arm_vaddvq_p_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s32)))
+int32_t __arm_vaddvq_p(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s8)))
+int32_t __arm_vaddvq_p_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s8)))
+int32_t __arm_vaddvq_p(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u16)))
+uint32_t __arm_vaddvq_p_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u16)))
+uint32_t __arm_vaddvq_p(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u32)))
+uint32_t __arm_vaddvq_p_u32(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u32)))
+uint32_t __arm_vaddvq_p(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u8)))
+uint32_t __arm_vaddvq_p_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u8)))
+uint32_t __arm_vaddvq_p(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s16)))
+int32_t __arm_vaddvq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s16)))
+int32_t __arm_vaddvq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s32)))
+int32_t __arm_vaddvq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s32)))
+int32_t __arm_vaddvq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s8)))
+int32_t __arm_vaddvq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s8)))
+int32_t __arm_vaddvq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u16)))
+uint32_t __arm_vaddvq_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u16)))
+uint32_t __arm_vaddvq(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u32)))
+uint32_t __arm_vaddvq_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u32)))
+uint32_t __arm_vaddvq(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u8)))
+uint32_t __arm_vaddvq_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u8)))
+uint32_t __arm_vaddvq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s16)))
+int16x8_t __arm_vandq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s16)))
+int16x8_t __arm_vandq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s32)))
+int32x4_t __arm_vandq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s32)))
+int32x4_t __arm_vandq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s8)))
+int8x16_t __arm_vandq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s8)))
+int8x16_t __arm_vandq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u16)))
+uint16x8_t __arm_vandq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u16)))
+uint16x8_t __arm_vandq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u32)))
+uint32x4_t __arm_vandq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u32)))
+uint32x4_t __arm_vandq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u8)))
+uint8x16_t __arm_vandq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u8)))
+uint8x16_t __arm_vandq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s16)))
+int16x8_t __arm_vandq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s16)))
+int16x8_t __arm_vandq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s32)))
+int32x4_t __arm_vandq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s32)))
+int32x4_t __arm_vandq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s8)))
+int8x16_t __arm_vandq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s8)))
+int8x16_t __arm_vandq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u16)))
+uint16x8_t __arm_vandq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u16)))
+uint16x8_t __arm_vandq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u32)))
+uint32x4_t __arm_vandq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u32)))
+uint32x4_t __arm_vandq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u8)))
+uint8x16_t __arm_vandq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u8)))
+uint8x16_t __arm_vandq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s16)))
+int16x8_t __arm_vandq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s16)))
+int16x8_t __arm_vandq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s32)))
+int32x4_t __arm_vandq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s32)))
+int32x4_t __arm_vandq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s8)))
+int8x16_t __arm_vandq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s8)))
+int8x16_t __arm_vandq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u16)))
+uint16x8_t __arm_vandq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u16)))
+uint16x8_t __arm_vandq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u32)))
+uint32x4_t __arm_vandq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u32)))
+uint32x4_t __arm_vandq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u8)))
+uint8x16_t __arm_vandq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u8)))
+uint8x16_t __arm_vandq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s16)))
+int16x8_t __arm_vbicq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s16)))
+int16x8_t __arm_vbicq_m_n(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s32)))
+int32x4_t __arm_vbicq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s32)))
+int32x4_t __arm_vbicq_m_n(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u16)))
+uint16x8_t __arm_vbicq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u16)))
+uint16x8_t __arm_vbicq_m_n(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u32)))
+uint32x4_t __arm_vbicq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u32)))
+uint32x4_t __arm_vbicq_m_n(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s16)))
+int16x8_t __arm_vbicq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s16)))
+int16x8_t __arm_vbicq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s32)))
+int32x4_t __arm_vbicq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s32)))
+int32x4_t __arm_vbicq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s8)))
+int8x16_t __arm_vbicq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s8)))
+int8x16_t __arm_vbicq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u16)))
+uint16x8_t __arm_vbicq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u16)))
+uint16x8_t __arm_vbicq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u32)))
+uint32x4_t __arm_vbicq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u32)))
+uint32x4_t __arm_vbicq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u8)))
+uint8x16_t __arm_vbicq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u8)))
+uint8x16_t __arm_vbicq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s16)))
+int16x8_t __arm_vbicq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s16)))
+int16x8_t __arm_vbicq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s32)))
+int32x4_t __arm_vbicq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s32)))
+int32x4_t __arm_vbicq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u16)))
+uint16x8_t __arm_vbicq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u16)))
+uint16x8_t __arm_vbicq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u32)))
+uint32x4_t __arm_vbicq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u32)))
+uint32x4_t __arm_vbicq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s16)))
+int16x8_t __arm_vbicq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s16)))
+int16x8_t __arm_vbicq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s32)))
+int32x4_t __arm_vbicq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s32)))
+int32x4_t __arm_vbicq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s8)))
+int8x16_t __arm_vbicq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s8)))
+int8x16_t __arm_vbicq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u16)))
+uint16x8_t __arm_vbicq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u16)))
+uint16x8_t __arm_vbicq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u32)))
+uint32x4_t __arm_vbicq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u32)))
+uint32x4_t __arm_vbicq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u8)))
+uint8x16_t __arm_vbicq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u8)))
+uint8x16_t __arm_vbicq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s16)))
+int16x8_t __arm_vbicq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s16)))
+int16x8_t __arm_vbicq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s32)))
+int32x4_t __arm_vbicq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s32)))
+int32x4_t __arm_vbicq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s8)))
+int8x16_t __arm_vbicq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s8)))
+int8x16_t __arm_vbicq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u16)))
+uint16x8_t __arm_vbicq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u16)))
+uint16x8_t __arm_vbicq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u32)))
+uint32x4_t __arm_vbicq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u32)))
+uint32x4_t __arm_vbicq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u8)))
+uint8x16_t __arm_vbicq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u8)))
+uint8x16_t __arm_vbicq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s16)))
+int16x8_t __arm_vbrsrq_m_n_s16(int16x8_t, int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s16)))
+int16x8_t __arm_vbrsrq_m(int16x8_t, int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s32)))
+int32x4_t __arm_vbrsrq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s32)))
+int32x4_t __arm_vbrsrq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s8)))
+int8x16_t __arm_vbrsrq_m_n_s8(int8x16_t, int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s8)))
+int8x16_t __arm_vbrsrq_m(int8x16_t, int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u16)))
+uint16x8_t __arm_vbrsrq_m_n_u16(uint16x8_t, uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u16)))
+uint16x8_t __arm_vbrsrq_m(uint16x8_t, uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u32)))
+uint32x4_t __arm_vbrsrq_m_n_u32(uint32x4_t, uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u32)))
+uint32x4_t __arm_vbrsrq_m(uint32x4_t, uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u8)))
+uint8x16_t __arm_vbrsrq_m_n_u8(uint8x16_t, uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u8)))
+uint8x16_t __arm_vbrsrq_m(uint8x16_t, uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s16)))
+int16x8_t __arm_vbrsrq_n_s16(int16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s16)))
+int16x8_t __arm_vbrsrq(int16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s32)))
+int32x4_t __arm_vbrsrq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s32)))
+int32x4_t __arm_vbrsrq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s8)))
+int8x16_t __arm_vbrsrq_n_s8(int8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s8)))
+int8x16_t __arm_vbrsrq(int8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u16)))
+uint16x8_t __arm_vbrsrq_n_u16(uint16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u16)))
+uint16x8_t __arm_vbrsrq(uint16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u32)))
+uint32x4_t __arm_vbrsrq_n_u32(uint32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u32)))
+uint32x4_t __arm_vbrsrq(uint32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u8)))
+uint8x16_t __arm_vbrsrq_n_u8(uint8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u8)))
+uint8x16_t __arm_vbrsrq(uint8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s16)))
+int16x8_t __arm_vbrsrq_x_n_s16(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s16)))
+int16x8_t __arm_vbrsrq_x(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s32)))
+int32x4_t __arm_vbrsrq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s32)))
+int32x4_t __arm_vbrsrq_x(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s8)))
+int8x16_t __arm_vbrsrq_x_n_s8(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s8)))
+int8x16_t __arm_vbrsrq_x(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u16)))
+uint16x8_t __arm_vbrsrq_x_n_u16(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u16)))
+uint16x8_t __arm_vbrsrq_x(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u32)))
+uint32x4_t __arm_vbrsrq_x_n_u32(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u32)))
+uint32x4_t __arm_vbrsrq_x(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u8)))
+uint8x16_t __arm_vbrsrq_x_n_u8(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u8)))
+uint8x16_t __arm_vbrsrq_x(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s16)))
+int16x8_t __arm_vcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s16)))
+int16x8_t __arm_vcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s32)))
+int32x4_t __arm_vcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s32)))
+int32x4_t __arm_vcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s8)))
+int8x16_t __arm_vcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s8)))
+int8x16_t __arm_vcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u16)))
+uint16x8_t __arm_vcaddq_rot270_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u16)))
+uint16x8_t __arm_vcaddq_rot270_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u32)))
+uint32x4_t __arm_vcaddq_rot270_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u32)))
+uint32x4_t __arm_vcaddq_rot270_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u8)))
+uint8x16_t __arm_vcaddq_rot270_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u8)))
+uint8x16_t __arm_vcaddq_rot270_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s16)))
+int16x8_t __arm_vcaddq_rot270_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s16)))
+int16x8_t __arm_vcaddq_rot270(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s32)))
+int32x4_t __arm_vcaddq_rot270_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s32)))
+int32x4_t __arm_vcaddq_rot270(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s8)))
+int8x16_t __arm_vcaddq_rot270_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s8)))
+int8x16_t __arm_vcaddq_rot270(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u16)))
+uint16x8_t __arm_vcaddq_rot270_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u16)))
+uint16x8_t __arm_vcaddq_rot270(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u32)))
+uint32x4_t __arm_vcaddq_rot270_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u32)))
+uint32x4_t __arm_vcaddq_rot270(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u8)))
+uint8x16_t __arm_vcaddq_rot270_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u8)))
+uint8x16_t __arm_vcaddq_rot270(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s16)))
+int16x8_t __arm_vcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s16)))
+int16x8_t __arm_vcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s32)))
+int32x4_t __arm_vcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s32)))
+int32x4_t __arm_vcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s8)))
+int8x16_t __arm_vcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s8)))
+int8x16_t __arm_vcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u16)))
+uint16x8_t __arm_vcaddq_rot270_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u16)))
+uint16x8_t __arm_vcaddq_rot270_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u32)))
+uint32x4_t __arm_vcaddq_rot270_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u32)))
+uint32x4_t __arm_vcaddq_rot270_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u8)))
+uint8x16_t __arm_vcaddq_rot270_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u8)))
+uint8x16_t __arm_vcaddq_rot270_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s16)))
+int16x8_t __arm_vcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s16)))
+int16x8_t __arm_vcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s32)))
+int32x4_t __arm_vcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s32)))
+int32x4_t __arm_vcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s8)))
+int8x16_t __arm_vcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s8)))
+int8x16_t __arm_vcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u16)))
+uint16x8_t __arm_vcaddq_rot90_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u16)))
+uint16x8_t __arm_vcaddq_rot90_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u32)))
+uint32x4_t __arm_vcaddq_rot90_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u32)))
+uint32x4_t __arm_vcaddq_rot90_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u8)))
+uint8x16_t __arm_vcaddq_rot90_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u8)))
+uint8x16_t __arm_vcaddq_rot90_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s16)))
+int16x8_t __arm_vcaddq_rot90_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s16)))
+int16x8_t __arm_vcaddq_rot90(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s32)))
+int32x4_t __arm_vcaddq_rot90_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s32)))
+int32x4_t __arm_vcaddq_rot90(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s8)))
+int8x16_t __arm_vcaddq_rot90_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s8)))
+int8x16_t __arm_vcaddq_rot90(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u16)))
+uint16x8_t __arm_vcaddq_rot90_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u16)))
+uint16x8_t __arm_vcaddq_rot90(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u32)))
+uint32x4_t __arm_vcaddq_rot90_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u32)))
+uint32x4_t __arm_vcaddq_rot90(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u8)))
+uint8x16_t __arm_vcaddq_rot90_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u8)))
+uint8x16_t __arm_vcaddq_rot90(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s16)))
+int16x8_t __arm_vcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s16)))
+int16x8_t __arm_vcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s32)))
+int32x4_t __arm_vcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s32)))
+int32x4_t __arm_vcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s8)))
+int8x16_t __arm_vcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s8)))
+int8x16_t __arm_vcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u16)))
+uint16x8_t __arm_vcaddq_rot90_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u16)))
+uint16x8_t __arm_vcaddq_rot90_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u32)))
+uint32x4_t __arm_vcaddq_rot90_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u32)))
+uint32x4_t __arm_vcaddq_rot90_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u8)))
+uint8x16_t __arm_vcaddq_rot90_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u8)))
+uint8x16_t __arm_vcaddq_rot90_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s16)))
+int16x8_t __arm_vclsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s16)))
+int16x8_t __arm_vclsq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s32)))
+int32x4_t __arm_vclsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s32)))
+int32x4_t __arm_vclsq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s8)))
+int8x16_t __arm_vclsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s8)))
+int8x16_t __arm_vclsq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s16)))
+int16x8_t __arm_vclsq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s16)))
+int16x8_t __arm_vclsq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s32)))
+int32x4_t __arm_vclsq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s32)))
+int32x4_t __arm_vclsq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s8)))
+int8x16_t __arm_vclsq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s8)))
+int8x16_t __arm_vclsq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s16)))
+int16x8_t __arm_vclsq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s16)))
+int16x8_t __arm_vclsq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s32)))
+int32x4_t __arm_vclsq_x_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s32)))
+int32x4_t __arm_vclsq_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s8)))
+int8x16_t __arm_vclsq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s8)))
+int8x16_t __arm_vclsq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s16)))
+int16x8_t __arm_vclzq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s16)))
+int16x8_t __arm_vclzq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s32)))
+int32x4_t __arm_vclzq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s32)))
+int32x4_t __arm_vclzq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s8)))
+int8x16_t __arm_vclzq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s8)))
+int8x16_t __arm_vclzq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u16)))
+uint16x8_t __arm_vclzq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u16)))
+uint16x8_t __arm_vclzq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u32)))
+uint32x4_t __arm_vclzq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u32)))
+uint32x4_t __arm_vclzq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u8)))
+uint8x16_t __arm_vclzq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u8)))
+uint8x16_t __arm_vclzq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s16)))
+int16x8_t __arm_vclzq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s16)))
+int16x8_t __arm_vclzq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s32)))
+int32x4_t __arm_vclzq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s32)))
+int32x4_t __arm_vclzq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s8)))
+int8x16_t __arm_vclzq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s8)))
+int8x16_t __arm_vclzq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u16)))
+uint16x8_t __arm_vclzq_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u16)))
+uint16x8_t __arm_vclzq(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u32)))
+uint32x4_t __arm_vclzq_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u32)))
+uint32x4_t __arm_vclzq(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u8)))
+uint8x16_t __arm_vclzq_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u8)))
+uint8x16_t __arm_vclzq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s16)))
+int16x8_t __arm_vclzq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s16)))
+int16x8_t __arm_vclzq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s32)))
+int32x4_t __arm_vclzq_x_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s32)))
+int32x4_t __arm_vclzq_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s8)))
+int8x16_t __arm_vclzq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s8)))
+int8x16_t __arm_vclzq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u16)))
+uint16x8_t __arm_vclzq_x_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u16)))
+uint16x8_t __arm_vclzq_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u32)))
+uint32x4_t __arm_vclzq_x_u32(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u32)))
+uint32x4_t __arm_vclzq_x(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u8)))
+uint8x16_t __arm_vclzq_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u8)))
+uint8x16_t __arm_vclzq_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))
+mve_pred16_t __arm_vcmpcsq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))
+mve_pred16_t __arm_vcmpcsq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))
+mve_pred16_t __arm_vcmpcsq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))
+mve_pred16_t __arm_vcmpcsq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))
+mve_pred16_t __arm_vcmpcsq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))
+mve_pred16_t __arm_vcmpcsq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u16)))
+mve_pred16_t __arm_vcmpcsq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u16)))
+mve_pred16_t __arm_vcmpcsq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u32)))
+mve_pred16_t __arm_vcmpcsq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u32)))
+mve_pred16_t __arm_vcmpcsq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u8)))
+mve_pred16_t __arm_vcmpcsq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u8)))
+mve_pred16_t __arm_vcmpcsq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u16)))
+mve_pred16_t __arm_vcmpcsq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u16)))
+mve_pred16_t __arm_vcmpcsq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u32)))
+mve_pred16_t __arm_vcmpcsq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u32)))
+mve_pred16_t __arm_vcmpcsq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u8)))
+mve_pred16_t __arm_vcmpcsq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u8)))
+mve_pred16_t __arm_vcmpcsq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u16)))
+mve_pred16_t __arm_vcmpcsq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u16)))
+mve_pred16_t __arm_vcmpcsq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u32)))
+mve_pred16_t __arm_vcmpcsq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u32)))
+mve_pred16_t __arm_vcmpcsq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u8)))
+mve_pred16_t __arm_vcmpcsq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u8)))
+mve_pred16_t __arm_vcmpcsq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))
+mve_pred16_t __arm_vcmpeqq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))
+mve_pred16_t __arm_vcmpeqq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))
+mve_pred16_t __arm_vcmpeqq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))
+mve_pred16_t __arm_vcmpeqq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))
+mve_pred16_t __arm_vcmpeqq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))
+mve_pred16_t __arm_vcmpeqq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))
+mve_pred16_t __arm_vcmpeqq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))
+mve_pred16_t __arm_vcmpeqq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))
+mve_pred16_t __arm_vcmpeqq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))
+mve_pred16_t __arm_vcmpeqq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))
+mve_pred16_t __arm_vcmpeqq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))
+mve_pred16_t __arm_vcmpeqq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s16)))
+mve_pred16_t __arm_vcmpeqq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s16)))
+mve_pred16_t __arm_vcmpeqq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s32)))
+mve_pred16_t __arm_vcmpeqq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s32)))
+mve_pred16_t __arm_vcmpeqq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s8)))
+mve_pred16_t __arm_vcmpeqq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s8)))
+mve_pred16_t __arm_vcmpeqq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u16)))
+mve_pred16_t __arm_vcmpeqq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u16)))
+mve_pred16_t __arm_vcmpeqq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u32)))
+mve_pred16_t __arm_vcmpeqq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u32)))
+mve_pred16_t __arm_vcmpeqq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u8)))
+mve_pred16_t __arm_vcmpeqq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u8)))
+mve_pred16_t __arm_vcmpeqq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s16)))
+mve_pred16_t __arm_vcmpeqq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s16)))
+mve_pred16_t __arm_vcmpeqq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s32)))
+mve_pred16_t __arm_vcmpeqq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s32)))
+mve_pred16_t __arm_vcmpeqq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s8)))
+mve_pred16_t __arm_vcmpeqq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s8)))
+mve_pred16_t __arm_vcmpeqq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u16)))
+mve_pred16_t __arm_vcmpeqq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u16)))
+mve_pred16_t __arm_vcmpeqq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u32)))
+mve_pred16_t __arm_vcmpeqq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u32)))
+mve_pred16_t __arm_vcmpeqq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u8)))
+mve_pred16_t __arm_vcmpeqq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u8)))
+mve_pred16_t __arm_vcmpeqq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s16)))
+mve_pred16_t __arm_vcmpeqq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s16)))
+mve_pred16_t __arm_vcmpeqq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s32)))
+mve_pred16_t __arm_vcmpeqq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s32)))
+mve_pred16_t __arm_vcmpeqq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s8)))
+mve_pred16_t __arm_vcmpeqq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s8)))
+mve_pred16_t __arm_vcmpeqq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u16)))
+mve_pred16_t __arm_vcmpeqq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u16)))
+mve_pred16_t __arm_vcmpeqq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u32)))
+mve_pred16_t __arm_vcmpeqq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u32)))
+mve_pred16_t __arm_vcmpeqq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u8)))
+mve_pred16_t __arm_vcmpeqq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u8)))
+mve_pred16_t __arm_vcmpeqq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))
+mve_pred16_t __arm_vcmpgeq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))
+mve_pred16_t __arm_vcmpgeq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))
+mve_pred16_t __arm_vcmpgeq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))
+mve_pred16_t __arm_vcmpgeq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))
+mve_pred16_t __arm_vcmpgeq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))
+mve_pred16_t __arm_vcmpgeq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s16)))
+mve_pred16_t __arm_vcmpgeq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s16)))
+mve_pred16_t __arm_vcmpgeq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s32)))
+mve_pred16_t __arm_vcmpgeq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s32)))
+mve_pred16_t __arm_vcmpgeq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s8)))
+mve_pred16_t __arm_vcmpgeq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s8)))
+mve_pred16_t __arm_vcmpgeq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s16)))
+mve_pred16_t __arm_vcmpgeq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s16)))
+mve_pred16_t __arm_vcmpgeq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s32)))
+mve_pred16_t __arm_vcmpgeq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s32)))
+mve_pred16_t __arm_vcmpgeq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s8)))
+mve_pred16_t __arm_vcmpgeq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s8)))
+mve_pred16_t __arm_vcmpgeq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s16)))
+mve_pred16_t __arm_vcmpgeq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s16)))
+mve_pred16_t __arm_vcmpgeq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s32)))
+mve_pred16_t __arm_vcmpgeq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s32)))
+mve_pred16_t __arm_vcmpgeq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s8)))
+mve_pred16_t __arm_vcmpgeq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s8)))
+mve_pred16_t __arm_vcmpgeq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))
+mve_pred16_t __arm_vcmpgtq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))
+mve_pred16_t __arm_vcmpgtq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))
+mve_pred16_t __arm_vcmpgtq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))
+mve_pred16_t __arm_vcmpgtq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))
+mve_pred16_t __arm_vcmpgtq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))
+mve_pred16_t __arm_vcmpgtq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s16)))
+mve_pred16_t __arm_vcmpgtq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s16)))
+mve_pred16_t __arm_vcmpgtq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s32)))
+mve_pred16_t __arm_vcmpgtq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s32)))
+mve_pred16_t __arm_vcmpgtq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s8)))
+mve_pred16_t __arm_vcmpgtq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s8)))
+mve_pred16_t __arm_vcmpgtq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s16)))
+mve_pred16_t __arm_vcmpgtq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s16)))
+mve_pred16_t __arm_vcmpgtq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s32)))
+mve_pred16_t __arm_vcmpgtq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s32)))
+mve_pred16_t __arm_vcmpgtq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s8)))
+mve_pred16_t __arm_vcmpgtq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s8)))
+mve_pred16_t __arm_vcmpgtq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s16)))
+mve_pred16_t __arm_vcmpgtq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s16)))
+mve_pred16_t __arm_vcmpgtq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s32)))
+mve_pred16_t __arm_vcmpgtq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s32)))
+mve_pred16_t __arm_vcmpgtq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s8)))
+mve_pred16_t __arm_vcmpgtq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s8)))
+mve_pred16_t __arm_vcmpgtq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))
+mve_pred16_t __arm_vcmphiq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))
+mve_pred16_t __arm_vcmphiq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))
+mve_pred16_t __arm_vcmphiq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))
+mve_pred16_t __arm_vcmphiq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))
+mve_pred16_t __arm_vcmphiq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))
+mve_pred16_t __arm_vcmphiq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u16)))
+mve_pred16_t __arm_vcmphiq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u16)))
+mve_pred16_t __arm_vcmphiq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u32)))
+mve_pred16_t __arm_vcmphiq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u32)))
+mve_pred16_t __arm_vcmphiq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u8)))
+mve_pred16_t __arm_vcmphiq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u8)))
+mve_pred16_t __arm_vcmphiq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u16)))
+mve_pred16_t __arm_vcmphiq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u16)))
+mve_pred16_t __arm_vcmphiq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u32)))
+mve_pred16_t __arm_vcmphiq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u32)))
+mve_pred16_t __arm_vcmphiq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u8)))
+mve_pred16_t __arm_vcmphiq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u8)))
+mve_pred16_t __arm_vcmphiq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u16)))
+mve_pred16_t __arm_vcmphiq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u16)))
+mve_pred16_t __arm_vcmphiq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u32)))
+mve_pred16_t __arm_vcmphiq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u32)))
+mve_pred16_t __arm_vcmphiq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u8)))
+mve_pred16_t __arm_vcmphiq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u8)))
+mve_pred16_t __arm_vcmphiq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))
+mve_pred16_t __arm_vcmpleq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))
+mve_pred16_t __arm_vcmpleq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))
+mve_pred16_t __arm_vcmpleq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))
+mve_pred16_t __arm_vcmpleq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))
+mve_pred16_t __arm_vcmpleq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))
+mve_pred16_t __arm_vcmpleq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s16)))
+mve_pred16_t __arm_vcmpleq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s16)))
+mve_pred16_t __arm_vcmpleq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s32)))
+mve_pred16_t __arm_vcmpleq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s32)))
+mve_pred16_t __arm_vcmpleq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s8)))
+mve_pred16_t __arm_vcmpleq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s8)))
+mve_pred16_t __arm_vcmpleq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s16)))
+mve_pred16_t __arm_vcmpleq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s16)))
+mve_pred16_t __arm_vcmpleq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s32)))
+mve_pred16_t __arm_vcmpleq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s32)))
+mve_pred16_t __arm_vcmpleq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s8)))
+mve_pred16_t __arm_vcmpleq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s8)))
+mve_pred16_t __arm_vcmpleq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s16)))
+mve_pred16_t __arm_vcmpleq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s16)))
+mve_pred16_t __arm_vcmpleq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s32)))
+mve_pred16_t __arm_vcmpleq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s32)))
+mve_pred16_t __arm_vcmpleq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s8)))
+mve_pred16_t __arm_vcmpleq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s8)))
+mve_pred16_t __arm_vcmpleq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))
+mve_pred16_t __arm_vcmpltq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))
+mve_pred16_t __arm_vcmpltq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))
+mve_pred16_t __arm_vcmpltq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))
+mve_pred16_t __arm_vcmpltq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))
+mve_pred16_t __arm_vcmpltq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))
+mve_pred16_t __arm_vcmpltq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s16)))
+mve_pred16_t __arm_vcmpltq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s16)))
+mve_pred16_t __arm_vcmpltq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s32)))
+mve_pred16_t __arm_vcmpltq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s32)))
+mve_pred16_t __arm_vcmpltq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s8)))
+mve_pred16_t __arm_vcmpltq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s8)))
+mve_pred16_t __arm_vcmpltq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s16)))
+mve_pred16_t __arm_vcmpltq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s16)))
+mve_pred16_t __arm_vcmpltq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s32)))
+mve_pred16_t __arm_vcmpltq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s32)))
+mve_pred16_t __arm_vcmpltq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s8)))
+mve_pred16_t __arm_vcmpltq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s8)))
+mve_pred16_t __arm_vcmpltq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s16)))
+mve_pred16_t __arm_vcmpltq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s16)))
+mve_pred16_t __arm_vcmpltq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s32)))
+mve_pred16_t __arm_vcmpltq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s32)))
+mve_pred16_t __arm_vcmpltq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s8)))
+mve_pred16_t __arm_vcmpltq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s8)))
+mve_pred16_t __arm_vcmpltq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))
+mve_pred16_t __arm_vcmpneq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))
+mve_pred16_t __arm_vcmpneq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))
+mve_pred16_t __arm_vcmpneq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))
+mve_pred16_t __arm_vcmpneq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))
+mve_pred16_t __arm_vcmpneq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))
+mve_pred16_t __arm_vcmpneq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))
+mve_pred16_t __arm_vcmpneq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))
+mve_pred16_t __arm_vcmpneq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))
+mve_pred16_t __arm_vcmpneq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))
+mve_pred16_t __arm_vcmpneq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))
+mve_pred16_t __arm_vcmpneq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))
+mve_pred16_t __arm_vcmpneq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s16)))
+mve_pred16_t __arm_vcmpneq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s16)))
+mve_pred16_t __arm_vcmpneq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s32)))
+mve_pred16_t __arm_vcmpneq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s32)))
+mve_pred16_t __arm_vcmpneq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s8)))
+mve_pred16_t __arm_vcmpneq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s8)))
+mve_pred16_t __arm_vcmpneq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u16)))
+mve_pred16_t __arm_vcmpneq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u16)))
+mve_pred16_t __arm_vcmpneq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u32)))
+mve_pred16_t __arm_vcmpneq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u32)))
+mve_pred16_t __arm_vcmpneq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u8)))
+mve_pred16_t __arm_vcmpneq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u8)))
+mve_pred16_t __arm_vcmpneq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s16)))
+mve_pred16_t __arm_vcmpneq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s16)))
+mve_pred16_t __arm_vcmpneq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s32)))
+mve_pred16_t __arm_vcmpneq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s32)))
+mve_pred16_t __arm_vcmpneq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s8)))
+mve_pred16_t __arm_vcmpneq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s8)))
+mve_pred16_t __arm_vcmpneq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u16)))
+mve_pred16_t __arm_vcmpneq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u16)))
+mve_pred16_t __arm_vcmpneq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u32)))
+mve_pred16_t __arm_vcmpneq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u32)))
+mve_pred16_t __arm_vcmpneq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u8)))
+mve_pred16_t __arm_vcmpneq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u8)))
+mve_pred16_t __arm_vcmpneq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s16)))
+mve_pred16_t __arm_vcmpneq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s16)))
+mve_pred16_t __arm_vcmpneq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s32)))
+mve_pred16_t __arm_vcmpneq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s32)))
+mve_pred16_t __arm_vcmpneq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s8)))
+mve_pred16_t __arm_vcmpneq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s8)))
+mve_pred16_t __arm_vcmpneq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u16)))
+mve_pred16_t __arm_vcmpneq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u16)))
+mve_pred16_t __arm_vcmpneq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u32)))
+mve_pred16_t __arm_vcmpneq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u32)))
+mve_pred16_t __arm_vcmpneq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u8)))
+mve_pred16_t __arm_vcmpneq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u8)))
+mve_pred16_t __arm_vcmpneq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s16)))
+int16x8_t __arm_vcreateq_s16(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s32)))
+int32x4_t __arm_vcreateq_s32(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s64)))
+int64x2_t __arm_vcreateq_s64(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s8)))
+int8x16_t __arm_vcreateq_s8(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u16)))
+uint16x8_t __arm_vcreateq_u16(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u32)))
+uint32x4_t __arm_vcreateq_u32(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u64)))
+uint64x2_t __arm_vcreateq_u64(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u8)))
+uint8x16_t __arm_vcreateq_u8(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp16q)))
+mve_pred16_t __arm_vctp16q(uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp16q_m)))
+mve_pred16_t __arm_vctp16q_m(uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp32q)))
+mve_pred16_t __arm_vctp32q(uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp32q_m)))
+mve_pred16_t __arm_vctp32q_m(uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp64q)))
+mve_pred16_t __arm_vctp64q(uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp64q_m)))
+mve_pred16_t __arm_vctp64q_m(uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp8q)))
+mve_pred16_t __arm_vctp8q(uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp8q_m)))
+mve_pred16_t __arm_vctp8q_m(uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u16)))
+uint16x8_t __arm_vddupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u16)))
+uint16x8_t __arm_vddupq_m(uint16x8_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u32)))
+uint32x4_t __arm_vddupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u32)))
+uint32x4_t __arm_vddupq_m(uint32x4_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u8)))
+uint8x16_t __arm_vddupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u8)))
+uint8x16_t __arm_vddupq_m(uint8x16_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u16)))
+uint16x8_t __arm_vddupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u16)))
+uint16x8_t __arm_vddupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u32)))
+uint32x4_t __arm_vddupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u32)))
+uint32x4_t __arm_vddupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u8)))
+uint8x16_t __arm_vddupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u8)))
+uint8x16_t __arm_vddupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u16)))
+uint16x8_t __arm_vddupq_n_u16(uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u16)))
+uint16x8_t __arm_vddupq_u16(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u32)))
+uint32x4_t __arm_vddupq_n_u32(uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u32)))
+uint32x4_t __arm_vddupq_u32(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u8)))
+uint8x16_t __arm_vddupq_n_u8(uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u8)))
+uint8x16_t __arm_vddupq_u8(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u16)))
+uint16x8_t __arm_vddupq_wb_u16(uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u16)))
+uint16x8_t __arm_vddupq_u16(uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u32)))
+uint32x4_t __arm_vddupq_wb_u32(uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u32)))
+uint32x4_t __arm_vddupq_u32(uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u8)))
+uint8x16_t __arm_vddupq_wb_u8(uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u8)))
+uint8x16_t __arm_vddupq_u8(uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u16)))
+uint16x8_t __arm_vddupq_x_n_u16(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u16)))
+uint16x8_t __arm_vddupq_x_u16(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u32)))
+uint32x4_t __arm_vddupq_x_n_u32(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u32)))
+uint32x4_t __arm_vddupq_x_u32(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u8)))
+uint8x16_t __arm_vddupq_x_n_u8(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u8)))
+uint8x16_t __arm_vddupq_x_u8(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u16)))
+uint16x8_t __arm_vddupq_x_wb_u16(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u16)))
+uint16x8_t __arm_vddupq_x_u16(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u32)))
+uint32x4_t __arm_vddupq_x_wb_u32(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u32)))
+uint32x4_t __arm_vddupq_x_u32(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u8)))
+uint8x16_t __arm_vddupq_x_wb_u8(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u8)))
+uint8x16_t __arm_vddupq_x_u8(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s16)))
+int16x8_t __arm_vdupq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s16)))
+int16x8_t __arm_vdupq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s32)))
+int32x4_t __arm_vdupq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s32)))
+int32x4_t __arm_vdupq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s8)))
+int8x16_t __arm_vdupq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s8)))
+int8x16_t __arm_vdupq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u16)))
+uint16x8_t __arm_vdupq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u16)))
+uint16x8_t __arm_vdupq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u32)))
+uint32x4_t __arm_vdupq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u32)))
+uint32x4_t __arm_vdupq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u8)))
+uint8x16_t __arm_vdupq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u8)))
+uint8x16_t __arm_vdupq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s16)))
+int16x8_t __arm_vdupq_n_s16(int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s32)))
+int32x4_t __arm_vdupq_n_s32(int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s8)))
+int8x16_t __arm_vdupq_n_s8(int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u16)))
+uint16x8_t __arm_vdupq_n_u16(uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u32)))
+uint32x4_t __arm_vdupq_n_u32(uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u8)))
+uint8x16_t __arm_vdupq_n_u8(uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s16)))
+int16x8_t __arm_vdupq_x_n_s16(int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s32)))
+int32x4_t __arm_vdupq_x_n_s32(int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s8)))
+int8x16_t __arm_vdupq_x_n_s8(int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u16)))
+uint16x8_t __arm_vdupq_x_n_u16(uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u32)))
+uint32x4_t __arm_vdupq_x_n_u32(uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u8)))
+uint8x16_t __arm_vdupq_x_n_u8(uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u16)))
+uint16x8_t __arm_vdwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u16)))
+uint16x8_t __arm_vdwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u32)))
+uint32x4_t __arm_vdwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u32)))
+uint32x4_t __arm_vdwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u8)))
+uint8x16_t __arm_vdwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u8)))
+uint8x16_t __arm_vdwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u16)))
+uint16x8_t __arm_vdwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u16)))
+uint16x8_t __arm_vdwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u32)))
+uint32x4_t __arm_vdwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u32)))
+uint32x4_t __arm_vdwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u8)))
+uint8x16_t __arm_vdwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u8)))
+uint8x16_t __arm_vdwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u16)))
+uint16x8_t __arm_vdwdupq_n_u16(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u16)))
+uint16x8_t __arm_vdwdupq_u16(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u32)))
+uint32x4_t __arm_vdwdupq_n_u32(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u32)))
+uint32x4_t __arm_vdwdupq_u32(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u8)))
+uint8x16_t __arm_vdwdupq_n_u8(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u8)))
+uint8x16_t __arm_vdwdupq_u8(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u16)))
+uint16x8_t __arm_vdwdupq_wb_u16(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u16)))
+uint16x8_t __arm_vdwdupq_u16(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u32)))
+uint32x4_t __arm_vdwdupq_wb_u32(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u32)))
+uint32x4_t __arm_vdwdupq_u32(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u8)))
+uint8x16_t __arm_vdwdupq_wb_u8(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u8)))
+uint8x16_t __arm_vdwdupq_u8(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u16)))
+uint16x8_t __arm_vdwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u16)))
+uint16x8_t __arm_vdwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u32)))
+uint32x4_t __arm_vdwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u32)))
+uint32x4_t __arm_vdwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u8)))
+uint8x16_t __arm_vdwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u8)))
+uint8x16_t __arm_vdwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u16)))
+uint16x8_t __arm_vdwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u16)))
+uint16x8_t __arm_vdwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u32)))
+uint32x4_t __arm_vdwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u32)))
+uint32x4_t __arm_vdwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u8)))
+uint8x16_t __arm_vdwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u8)))
+uint8x16_t __arm_vdwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s16)))
+int16x8_t __arm_veorq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s16)))
+int16x8_t __arm_veorq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s32)))
+int32x4_t __arm_veorq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s32)))
+int32x4_t __arm_veorq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s8)))
+int8x16_t __arm_veorq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s8)))
+int8x16_t __arm_veorq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u16)))
+uint16x8_t __arm_veorq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u16)))
+uint16x8_t __arm_veorq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u32)))
+uint32x4_t __arm_veorq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u32)))
+uint32x4_t __arm_veorq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u8)))
+uint8x16_t __arm_veorq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u8)))
+uint8x16_t __arm_veorq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s16)))
+int16x8_t __arm_veorq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s16)))
+int16x8_t __arm_veorq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s32)))
+int32x4_t __arm_veorq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s32)))
+int32x4_t __arm_veorq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s8)))
+int8x16_t __arm_veorq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s8)))
+int8x16_t __arm_veorq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u16)))
+uint16x8_t __arm_veorq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u16)))
+uint16x8_t __arm_veorq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u32)))
+uint32x4_t __arm_veorq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u32)))
+uint32x4_t __arm_veorq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u8)))
+uint8x16_t __arm_veorq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u8)))
+uint8x16_t __arm_veorq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s16)))
+int16x8_t __arm_veorq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s16)))
+int16x8_t __arm_veorq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s32)))
+int32x4_t __arm_veorq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s32)))
+int32x4_t __arm_veorq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s8)))
+int8x16_t __arm_veorq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s8)))
+int8x16_t __arm_veorq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u16)))
+uint16x8_t __arm_veorq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u16)))
+uint16x8_t __arm_veorq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u32)))
+uint32x4_t __arm_veorq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u32)))
+uint32x4_t __arm_veorq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u8)))
+uint8x16_t __arm_veorq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u8)))
+uint8x16_t __arm_veorq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s16)))
+int16_t __arm_vgetq_lane_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s16)))
+int16_t __arm_vgetq_lane(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s32)))
+int32_t __arm_vgetq_lane_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s32)))
+int32_t __arm_vgetq_lane(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s64)))
+int64_t __arm_vgetq_lane_s64(int64x2_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s64)))
+int64_t __arm_vgetq_lane(int64x2_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s8)))
+int8_t __arm_vgetq_lane_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s8)))
+int8_t __arm_vgetq_lane(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u16)))
+uint16_t __arm_vgetq_lane_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u16)))
+uint16_t __arm_vgetq_lane(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u32)))
+uint32_t __arm_vgetq_lane_u32(uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u32)))
+uint32_t __arm_vgetq_lane(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u64)))
+uint64_t __arm_vgetq_lane_u64(uint64x2_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u64)))
+uint64_t __arm_vgetq_lane(uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u8)))
+uint8_t __arm_vgetq_lane_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u8)))
+uint8_t __arm_vgetq_lane(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s16)))
+int16x8_t __arm_vhaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s16)))
+int16x8_t __arm_vhaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s32)))
+int32x4_t __arm_vhaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s32)))
+int32x4_t __arm_vhaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s8)))
+int8x16_t __arm_vhaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s8)))
+int8x16_t __arm_vhaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u16)))
+uint16x8_t __arm_vhaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u16)))
+uint16x8_t __arm_vhaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u32)))
+uint32x4_t __arm_vhaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u32)))
+uint32x4_t __arm_vhaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u8)))
+uint8x16_t __arm_vhaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u8)))
+uint8x16_t __arm_vhaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s16)))
+int16x8_t __arm_vhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s16)))
+int16x8_t __arm_vhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s32)))
+int32x4_t __arm_vhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s32)))
+int32x4_t __arm_vhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s8)))
+int8x16_t __arm_vhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s8)))
+int8x16_t __arm_vhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u16)))
+uint16x8_t __arm_vhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u16)))
+uint16x8_t __arm_vhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u32)))
+uint32x4_t __arm_vhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u32)))
+uint32x4_t __arm_vhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u8)))
+uint8x16_t __arm_vhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u8)))
+uint8x16_t __arm_vhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s16)))
+int16x8_t __arm_vhaddq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s16)))
+int16x8_t __arm_vhaddq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s32)))
+int32x4_t __arm_vhaddq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s32)))
+int32x4_t __arm_vhaddq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s8)))
+int8x16_t __arm_vhaddq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s8)))
+int8x16_t __arm_vhaddq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u16)))
+uint16x8_t __arm_vhaddq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u16)))
+uint16x8_t __arm_vhaddq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u32)))
+uint32x4_t __arm_vhaddq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u32)))
+uint32x4_t __arm_vhaddq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u8)))
+uint8x16_t __arm_vhaddq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u8)))
+uint8x16_t __arm_vhaddq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s16)))
+int16x8_t __arm_vhaddq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s16)))
+int16x8_t __arm_vhaddq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s32)))
+int32x4_t __arm_vhaddq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s32)))
+int32x4_t __arm_vhaddq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s8)))
+int8x16_t __arm_vhaddq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s8)))
+int8x16_t __arm_vhaddq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u16)))
+uint16x8_t __arm_vhaddq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u16)))
+uint16x8_t __arm_vhaddq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u32)))
+uint32x4_t __arm_vhaddq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u32)))
+uint32x4_t __arm_vhaddq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u8)))
+uint8x16_t __arm_vhaddq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u8)))
+uint8x16_t __arm_vhaddq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s16)))
+int16x8_t __arm_vhaddq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s16)))
+int16x8_t __arm_vhaddq_x(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s32)))
+int32x4_t __arm_vhaddq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s32)))
+int32x4_t __arm_vhaddq_x(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s8)))
+int8x16_t __arm_vhaddq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s8)))
+int8x16_t __arm_vhaddq_x(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u16)))
+uint16x8_t __arm_vhaddq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u16)))
+uint16x8_t __arm_vhaddq_x(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u32)))
+uint32x4_t __arm_vhaddq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u32)))
+uint32x4_t __arm_vhaddq_x(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u8)))
+uint8x16_t __arm_vhaddq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u8)))
+uint8x16_t __arm_vhaddq_x(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s16)))
+int16x8_t __arm_vhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s16)))
+int16x8_t __arm_vhaddq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s32)))
+int32x4_t __arm_vhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s32)))
+int32x4_t __arm_vhaddq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s8)))
+int8x16_t __arm_vhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s8)))
+int8x16_t __arm_vhaddq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u16)))
+uint16x8_t __arm_vhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u16)))
+uint16x8_t __arm_vhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u32)))
+uint32x4_t __arm_vhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u32)))
+uint32x4_t __arm_vhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u8)))
+uint8x16_t __arm_vhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u8)))
+uint8x16_t __arm_vhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16)))
+int16x8_t __arm_vhcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16)))
+int16x8_t __arm_vhcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32)))
+int32x4_t __arm_vhcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32)))
+int32x4_t __arm_vhcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8)))
+int8x16_t __arm_vhcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8)))
+int8x16_t __arm_vhcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s16)))
+int16x8_t __arm_vhcaddq_rot270_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s16)))
+int16x8_t __arm_vhcaddq_rot270(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s32)))
+int32x4_t __arm_vhcaddq_rot270_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s32)))
+int32x4_t __arm_vhcaddq_rot270(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s8)))
+int8x16_t __arm_vhcaddq_rot270_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s8)))
+int8x16_t __arm_vhcaddq_rot270(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16)))
+int16x8_t __arm_vhcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16)))
+int16x8_t __arm_vhcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32)))
+int32x4_t __arm_vhcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32)))
+int32x4_t __arm_vhcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8)))
+int8x16_t __arm_vhcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8)))
+int8x16_t __arm_vhcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16)))
+int16x8_t __arm_vhcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16)))
+int16x8_t __arm_vhcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32)))
+int32x4_t __arm_vhcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32)))
+int32x4_t __arm_vhcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8)))
+int8x16_t __arm_vhcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8)))
+int8x16_t __arm_vhcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s16)))
+int16x8_t __arm_vhcaddq_rot90_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s16)))
+int16x8_t __arm_vhcaddq_rot90(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s32)))
+int32x4_t __arm_vhcaddq_rot90_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s32)))
+int32x4_t __arm_vhcaddq_rot90(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s8)))
+int8x16_t __arm_vhcaddq_rot90_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s8)))
+int8x16_t __arm_vhcaddq_rot90(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16)))
+int16x8_t __arm_vhcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16)))
+int16x8_t __arm_vhcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32)))
+int32x4_t __arm_vhcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32)))
+int32x4_t __arm_vhcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8)))
+int8x16_t __arm_vhcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8)))
+int8x16_t __arm_vhcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s16)))
+int16x8_t __arm_vhsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s16)))
+int16x8_t __arm_vhsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s32)))
+int32x4_t __arm_vhsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s32)))
+int32x4_t __arm_vhsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s8)))
+int8x16_t __arm_vhsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s8)))
+int8x16_t __arm_vhsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u16)))
+uint16x8_t __arm_vhsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u16)))
+uint16x8_t __arm_vhsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u32)))
+uint32x4_t __arm_vhsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u32)))
+uint32x4_t __arm_vhsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u8)))
+uint8x16_t __arm_vhsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u8)))
+uint8x16_t __arm_vhsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s16)))
+int16x8_t __arm_vhsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s16)))
+int16x8_t __arm_vhsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s32)))
+int32x4_t __arm_vhsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s32)))
+int32x4_t __arm_vhsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s8)))
+int8x16_t __arm_vhsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s8)))
+int8x16_t __arm_vhsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u16)))
+uint16x8_t __arm_vhsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u16)))
+uint16x8_t __arm_vhsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u32)))
+uint32x4_t __arm_vhsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u32)))
+uint32x4_t __arm_vhsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u8)))
+uint8x16_t __arm_vhsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u8)))
+uint8x16_t __arm_vhsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s16)))
+int16x8_t __arm_vhsubq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s16)))
+int16x8_t __arm_vhsubq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s32)))
+int32x4_t __arm_vhsubq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s32)))
+int32x4_t __arm_vhsubq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s8)))
+int8x16_t __arm_vhsubq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s8)))
+int8x16_t __arm_vhsubq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u16)))
+uint16x8_t __arm_vhsubq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u16)))
+uint16x8_t __arm_vhsubq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u32)))
+uint32x4_t __arm_vhsubq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u32)))
+uint32x4_t __arm_vhsubq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u8)))
+uint8x16_t __arm_vhsubq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u8)))
+uint8x16_t __arm_vhsubq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s16)))
+int16x8_t __arm_vhsubq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s16)))
+int16x8_t __arm_vhsubq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s32)))
+int32x4_t __arm_vhsubq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s32)))
+int32x4_t __arm_vhsubq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s8)))
+int8x16_t __arm_vhsubq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s8)))
+int8x16_t __arm_vhsubq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u16)))
+uint16x8_t __arm_vhsubq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u16)))
+uint16x8_t __arm_vhsubq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u32)))
+uint32x4_t __arm_vhsubq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u32)))
+uint32x4_t __arm_vhsubq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u8)))
+uint8x16_t __arm_vhsubq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u8)))
+uint8x16_t __arm_vhsubq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s16)))
+int16x8_t __arm_vhsubq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s16)))
+int16x8_t __arm_vhsubq_x(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s32)))
+int32x4_t __arm_vhsubq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s32)))
+int32x4_t __arm_vhsubq_x(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s8)))
+int8x16_t __arm_vhsubq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s8)))
+int8x16_t __arm_vhsubq_x(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u16)))
+uint16x8_t __arm_vhsubq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u16)))
+uint16x8_t __arm_vhsubq_x(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u32)))
+uint32x4_t __arm_vhsubq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u32)))
+uint32x4_t __arm_vhsubq_x(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u8)))
+uint8x16_t __arm_vhsubq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u8)))
+uint8x16_t __arm_vhsubq_x(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s16)))
+int16x8_t __arm_vhsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s16)))
+int16x8_t __arm_vhsubq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s32)))
+int32x4_t __arm_vhsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s32)))
+int32x4_t __arm_vhsubq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s8)))
+int8x16_t __arm_vhsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s8)))
+int8x16_t __arm_vhsubq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u16)))
+uint16x8_t __arm_vhsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u16)))
+uint16x8_t __arm_vhsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u32)))
+uint32x4_t __arm_vhsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u32)))
+uint32x4_t __arm_vhsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u8)))
+uint8x16_t __arm_vhsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u8)))
+uint8x16_t __arm_vhsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u16)))
+uint16x8_t __arm_vidupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u16)))
+uint16x8_t __arm_vidupq_m(uint16x8_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u32)))
+uint32x4_t __arm_vidupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u32)))
+uint32x4_t __arm_vidupq_m(uint32x4_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u8)))
+uint8x16_t __arm_vidupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u8)))
+uint8x16_t __arm_vidupq_m(uint8x16_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u16)))
+uint16x8_t __arm_vidupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u16)))
+uint16x8_t __arm_vidupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u32)))
+uint32x4_t __arm_vidupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u32)))
+uint32x4_t __arm_vidupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u8)))
+uint8x16_t __arm_vidupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u8)))
+uint8x16_t __arm_vidupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u16)))
+uint16x8_t __arm_vidupq_n_u16(uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u16)))
+uint16x8_t __arm_vidupq_u16(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u32)))
+uint32x4_t __arm_vidupq_n_u32(uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u32)))
+uint32x4_t __arm_vidupq_u32(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u8)))
+uint8x16_t __arm_vidupq_n_u8(uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u8)))
+uint8x16_t __arm_vidupq_u8(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u16)))
+uint16x8_t __arm_vidupq_wb_u16(uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u16)))
+uint16x8_t __arm_vidupq_u16(uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u32)))
+uint32x4_t __arm_vidupq_wb_u32(uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u32)))
+uint32x4_t __arm_vidupq_u32(uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u8)))
+uint8x16_t __arm_vidupq_wb_u8(uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u8)))
+uint8x16_t __arm_vidupq_u8(uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u16)))
+uint16x8_t __arm_vidupq_x_n_u16(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u16)))
+uint16x8_t __arm_vidupq_x_u16(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u32)))
+uint32x4_t __arm_vidupq_x_n_u32(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u32)))
+uint32x4_t __arm_vidupq_x_u32(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u8)))
+uint8x16_t __arm_vidupq_x_n_u8(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u8)))
+uint8x16_t __arm_vidupq_x_u8(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u16)))
+uint16x8_t __arm_vidupq_x_wb_u16(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u16)))
+uint16x8_t __arm_vidupq_x_u16(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u32)))
+uint32x4_t __arm_vidupq_x_wb_u32(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u32)))
+uint32x4_t __arm_vidupq_x_u32(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u8)))
+uint8x16_t __arm_vidupq_x_wb_u8(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u8)))
+uint8x16_t __arm_vidupq_x_u8(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u16)))
+uint16x8_t __arm_viwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u16)))
+uint16x8_t __arm_viwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u32)))
+uint32x4_t __arm_viwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u32)))
+uint32x4_t __arm_viwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u8)))
+uint8x16_t __arm_viwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u8)))
+uint8x16_t __arm_viwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u16)))
+uint16x8_t __arm_viwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u16)))
+uint16x8_t __arm_viwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u32)))
+uint32x4_t __arm_viwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u32)))
+uint32x4_t __arm_viwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u8)))
+uint8x16_t __arm_viwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u8)))
+uint8x16_t __arm_viwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u16)))
+uint16x8_t __arm_viwdupq_n_u16(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u16)))
+uint16x8_t __arm_viwdupq_u16(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u32)))
+uint32x4_t __arm_viwdupq_n_u32(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u32)))
+uint32x4_t __arm_viwdupq_u32(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u8)))
+uint8x16_t __arm_viwdupq_n_u8(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u8)))
+uint8x16_t __arm_viwdupq_u8(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u16)))
+uint16x8_t __arm_viwdupq_wb_u16(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u16)))
+uint16x8_t __arm_viwdupq_u16(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u32)))
+uint32x4_t __arm_viwdupq_wb_u32(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u32)))
+uint32x4_t __arm_viwdupq_u32(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u8)))
+uint8x16_t __arm_viwdupq_wb_u8(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u8)))
+uint8x16_t __arm_viwdupq_u8(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u16)))
+uint16x8_t __arm_viwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u16)))
+uint16x8_t __arm_viwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u32)))
+uint32x4_t __arm_viwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u32)))
+uint32x4_t __arm_viwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u8)))
+uint8x16_t __arm_viwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u8)))
+uint8x16_t __arm_viwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u16)))
+uint16x8_t __arm_viwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u16)))
+uint16x8_t __arm_viwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u32)))
+uint32x4_t __arm_viwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u32)))
+uint32x4_t __arm_viwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u8)))
+uint8x16_t __arm_viwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u8)))
+uint8x16_t __arm_viwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s16)))
+int16x8_t __arm_vld1q_s16(const int16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s16)))
+int16x8_t __arm_vld1q(const int16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s32)))
+int32x4_t __arm_vld1q_s32(const int32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s32)))
+int32x4_t __arm_vld1q(const int32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s8)))
+int8x16_t __arm_vld1q_s8(const int8_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s8)))
+int8x16_t __arm_vld1q(const int8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u16)))
+uint16x8_t __arm_vld1q_u16(const uint16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u16)))
+uint16x8_t __arm_vld1q(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u32)))
+uint32x4_t __arm_vld1q_u32(const uint32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u32)))
+uint32x4_t __arm_vld1q(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u8)))
+uint8x16_t __arm_vld1q_u8(const uint8_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u8)))
+uint8x16_t __arm_vld1q(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s16)))
+int16x8_t __arm_vld1q_z_s16(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s16)))
+int16x8_t __arm_vld1q_z(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s32)))
+int32x4_t __arm_vld1q_z_s32(const int32_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s32)))
+int32x4_t __arm_vld1q_z(const int32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s8)))
+int8x16_t __arm_vld1q_z_s8(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s8)))
+int8x16_t __arm_vld1q_z(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u16)))
+uint16x8_t __arm_vld1q_z_u16(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u16)))
+uint16x8_t __arm_vld1q_z(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u32)))
+uint32x4_t __arm_vld1q_z_u32(const uint32_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u32)))
+uint32x4_t __arm_vld1q_z(const uint32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u8)))
+uint8x16_t __arm_vld1q_z_u8(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u8)))
+uint8x16_t __arm_vld1q_z(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s16)))
+int16x8x2_t __arm_vld2q_s16(const int16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s16)))
+int16x8x2_t __arm_vld2q(const int16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s32)))
+int32x4x2_t __arm_vld2q_s32(const int32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s32)))
+int32x4x2_t __arm_vld2q(const int32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s8)))
+int8x16x2_t __arm_vld2q_s8(const int8_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s8)))
+int8x16x2_t __arm_vld2q(const int8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u16)))
+uint16x8x2_t __arm_vld2q_u16(const uint16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u16)))
+uint16x8x2_t __arm_vld2q(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u32)))
+uint32x4x2_t __arm_vld2q_u32(const uint32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u32)))
+uint32x4x2_t __arm_vld2q(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u8)))
+uint8x16x2_t __arm_vld2q_u8(const uint8_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u8)))
+uint8x16x2_t __arm_vld2q(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s16)))
+int16x8x4_t __arm_vld4q_s16(const int16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s16)))
+int16x8x4_t __arm_vld4q(const int16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s32)))
+int32x4x4_t __arm_vld4q_s32(const int32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s32)))
+int32x4x4_t __arm_vld4q(const int32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s8)))
+int8x16x4_t __arm_vld4q_s8(const int8_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s8)))
+int8x16x4_t __arm_vld4q(const int8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u16)))
+uint16x8x4_t __arm_vld4q_u16(const uint16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u16)))
+uint16x8x4_t __arm_vld4q(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u32)))
+uint32x4x4_t __arm_vld4q_u32(const uint32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u32)))
+uint32x4x4_t __arm_vld4q(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u8)))
+uint8x16x4_t __arm_vld4q_u8(const uint8_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u8)))
+uint8x16x4_t __arm_vld4q(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))
+int16x8_t __arm_vldrbq_gather_offset_s16(const int8_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))
+int16x8_t __arm_vldrbq_gather_offset(const int8_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))
+int32x4_t __arm_vldrbq_gather_offset_s32(const int8_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))
+int32x4_t __arm_vldrbq_gather_offset(const int8_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))
+int8x16_t __arm_vldrbq_gather_offset_s8(const int8_t *, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))
+int8x16_t __arm_vldrbq_gather_offset(const int8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))
+uint16x8_t __arm_vldrbq_gather_offset_u16(const uint8_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))
+uint16x8_t __arm_vldrbq_gather_offset(const uint8_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))
+uint32x4_t __arm_vldrbq_gather_offset_u32(const uint8_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))
+uint32x4_t __arm_vldrbq_gather_offset(const uint8_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))
+uint8x16_t __arm_vldrbq_gather_offset_u8(const uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))
+uint8x16_t __arm_vldrbq_gather_offset(const uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))
+int16x8_t __arm_vldrbq_gather_offset_z_s16(const int8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))
+int16x8_t __arm_vldrbq_gather_offset_z(const int8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))
+int32x4_t __arm_vldrbq_gather_offset_z_s32(const int8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))
+int32x4_t __arm_vldrbq_gather_offset_z(const int8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))
+int8x16_t __arm_vldrbq_gather_offset_z_s8(const int8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))
+int8x16_t __arm_vldrbq_gather_offset_z(const int8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))
+uint16x8_t __arm_vldrbq_gather_offset_z_u16(const uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))
+uint16x8_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))
+uint32x4_t __arm_vldrbq_gather_offset_z_u32(const uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))
+uint32x4_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))
+uint8x16_t __arm_vldrbq_gather_offset_z_u8(const uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))
+uint8x16_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s16)))
+int16x8_t __arm_vldrbq_s16(const int8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s32)))
+int32x4_t __arm_vldrbq_s32(const int8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s8)))
+int8x16_t __arm_vldrbq_s8(const int8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u16)))
+uint16x8_t __arm_vldrbq_u16(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u32)))
+uint32x4_t __arm_vldrbq_u32(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u8)))
+uint8x16_t __arm_vldrbq_u8(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s16)))
+int16x8_t __arm_vldrbq_z_s16(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s32)))
+int32x4_t __arm_vldrbq_z_s32(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s8)))
+int8x16_t __arm_vldrbq_z_s8(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u16)))
+uint16x8_t __arm_vldrbq_z_u16(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u32)))
+uint32x4_t __arm_vldrbq_z_u32(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u8)))
+uint8x16_t __arm_vldrbq_z_u8(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_s64)))
+int64x2_t __arm_vldrdq_gather_base_s64(uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_u64)))
+uint64x2_t __arm_vldrdq_gather_base_u64(uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_s64)))
+int64x2_t __arm_vldrdq_gather_base_wb_s64(uint64x2_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_u64)))
+uint64x2_t __arm_vldrdq_gather_base_wb_u64(uint64x2_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_s64)))
+int64x2_t __arm_vldrdq_gather_base_wb_z_s64(uint64x2_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_u64)))
+uint64x2_t __arm_vldrdq_gather_base_wb_z_u64(uint64x2_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_z_s64)))
+int64x2_t __arm_vldrdq_gather_base_z_s64(uint64x2_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_z_u64)))
+uint64x2_t __arm_vldrdq_gather_base_z_u64(uint64x2_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))
+int64x2_t __arm_vldrdq_gather_offset_s64(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))
+int64x2_t __arm_vldrdq_gather_offset(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))
+uint64x2_t __arm_vldrdq_gather_offset_u64(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))
+uint64x2_t __arm_vldrdq_gather_offset(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))
+int64x2_t __arm_vldrdq_gather_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))
+int64x2_t __arm_vldrdq_gather_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))
+uint64x2_t __arm_vldrdq_gather_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))
+uint64x2_t __arm_vldrdq_gather_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))
+int64x2_t __arm_vldrdq_gather_shifted_offset_s64(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))
+int64x2_t __arm_vldrdq_gather_shifted_offset(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))
+uint64x2_t __arm_vldrdq_gather_shifted_offset_u64(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))
+uint64x2_t __arm_vldrdq_gather_shifted_offset(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))
+int64x2_t __arm_vldrdq_gather_shifted_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))
+int64x2_t __arm_vldrdq_gather_shifted_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))
+uint64x2_t __arm_vldrdq_gather_shifted_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))
+uint64x2_t __arm_vldrdq_gather_shifted_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))
+int16x8_t __arm_vldrhq_gather_offset_s16(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))
+int16x8_t __arm_vldrhq_gather_offset(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))
+int32x4_t __arm_vldrhq_gather_offset_s32(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))
+int32x4_t __arm_vldrhq_gather_offset(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))
+uint16x8_t __arm_vldrhq_gather_offset_u16(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))
+uint16x8_t __arm_vldrhq_gather_offset(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))
+uint32x4_t __arm_vldrhq_gather_offset_u32(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))
+uint32x4_t __arm_vldrhq_gather_offset(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))
+int16x8_t __arm_vldrhq_gather_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))
+int16x8_t __arm_vldrhq_gather_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))
+int32x4_t __arm_vldrhq_gather_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))
+int32x4_t __arm_vldrhq_gather_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))
+uint16x8_t __arm_vldrhq_gather_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))
+uint16x8_t __arm_vldrhq_gather_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))
+uint32x4_t __arm_vldrhq_gather_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))
+uint32x4_t __arm_vldrhq_gather_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))
+int16x8_t __arm_vldrhq_gather_shifted_offset_s16(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))
+int16x8_t __arm_vldrhq_gather_shifted_offset(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))
+int32x4_t __arm_vldrhq_gather_shifted_offset_s32(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))
+int32x4_t __arm_vldrhq_gather_shifted_offset(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))
+uint16x8_t __arm_vldrhq_gather_shifted_offset_u16(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))
+uint16x8_t __arm_vldrhq_gather_shifted_offset(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))
+uint32x4_t __arm_vldrhq_gather_shifted_offset_u32(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))
+uint32x4_t __arm_vldrhq_gather_shifted_offset(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))
+int16x8_t __arm_vldrhq_gather_shifted_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))
+int16x8_t __arm_vldrhq_gather_shifted_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))
+int32x4_t __arm_vldrhq_gather_shifted_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))
+int32x4_t __arm_vldrhq_gather_shifted_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))
+uint16x8_t __arm_vldrhq_gather_shifted_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))
+uint16x8_t __arm_vldrhq_gather_shifted_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))
+uint32x4_t __arm_vldrhq_gather_shifted_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))
+uint32x4_t __arm_vldrhq_gather_shifted_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_s16)))
+int16x8_t __arm_vldrhq_s16(const int16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_s32)))
+int32x4_t __arm_vldrhq_s32(const int16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_u16)))
+uint16x8_t __arm_vldrhq_u16(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_u32)))
+uint32x4_t __arm_vldrhq_u32(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_s16)))
+int16x8_t __arm_vldrhq_z_s16(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_s32)))
+int32x4_t __arm_vldrhq_z_s32(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_u16)))
+uint16x8_t __arm_vldrhq_z_u16(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_u32)))
+uint32x4_t __arm_vldrhq_z_u32(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_s32)))
+int32x4_t __arm_vldrwq_gather_base_s32(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_u32)))
+uint32x4_t __arm_vldrwq_gather_base_u32(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_s32)))
+int32x4_t __arm_vldrwq_gather_base_wb_s32(uint32x4_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_u32)))
+uint32x4_t __arm_vldrwq_gather_base_wb_u32(uint32x4_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_s32)))
+int32x4_t __arm_vldrwq_gather_base_wb_z_s32(uint32x4_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_u32)))
+uint32x4_t __arm_vldrwq_gather_base_wb_z_u32(uint32x4_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_s32)))
+int32x4_t __arm_vldrwq_gather_base_z_s32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_u32)))
+uint32x4_t __arm_vldrwq_gather_base_z_u32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))
+int32x4_t __arm_vldrwq_gather_offset_s32(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))
+int32x4_t __arm_vldrwq_gather_offset(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))
+uint32x4_t __arm_vldrwq_gather_offset_u32(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))
+uint32x4_t __arm_vldrwq_gather_offset(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))
+int32x4_t __arm_vldrwq_gather_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))
+int32x4_t __arm_vldrwq_gather_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))
+uint32x4_t __arm_vldrwq_gather_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))
+uint32x4_t __arm_vldrwq_gather_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))
+int32x4_t __arm_vldrwq_gather_shifted_offset_s32(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))
+int32x4_t __arm_vldrwq_gather_shifted_offset(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))
+uint32x4_t __arm_vldrwq_gather_shifted_offset_u32(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))
+uint32x4_t __arm_vldrwq_gather_shifted_offset(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))
+int32x4_t __arm_vldrwq_gather_shifted_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))
+int32x4_t __arm_vldrwq_gather_shifted_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))
+uint32x4_t __arm_vldrwq_gather_shifted_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))
+uint32x4_t __arm_vldrwq_gather_shifted_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_s32)))
+int32x4_t __arm_vldrwq_s32(const int32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_u32)))
+uint32x4_t __arm_vldrwq_u32(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_s32)))
+int32x4_t __arm_vldrwq_z_s32(const int32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_u32)))
+uint32x4_t __arm_vldrwq_z_u32(const uint32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s16)))
+uint16x8_t __arm_vmaxaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s16)))
+uint16x8_t __arm_vmaxaq_m(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s32)))
+uint32x4_t __arm_vmaxaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s32)))
+uint32x4_t __arm_vmaxaq_m(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s8)))
+uint8x16_t __arm_vmaxaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s8)))
+uint8x16_t __arm_vmaxaq_m(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s16)))
+uint16x8_t __arm_vmaxaq_s16(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s16)))
+uint16x8_t __arm_vmaxaq(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s32)))
+uint32x4_t __arm_vmaxaq_s32(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s32)))
+uint32x4_t __arm_vmaxaq(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s8)))
+uint8x16_t __arm_vmaxaq_s8(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s8)))
+uint8x16_t __arm_vmaxaq(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s16)))
+uint16_t __arm_vmaxavq_p_s16(uint16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s16)))
+uint16_t __arm_vmaxavq_p(uint16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s32)))
+uint32_t __arm_vmaxavq_p_s32(uint32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s32)))
+uint32_t __arm_vmaxavq_p(uint32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s8)))
+uint8_t __arm_vmaxavq_p_s8(uint8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s8)))
+uint8_t __arm_vmaxavq_p(uint8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s16)))
+uint16_t __arm_vmaxavq_s16(uint16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s16)))
+uint16_t __arm_vmaxavq(uint16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s32)))
+uint32_t __arm_vmaxavq_s32(uint32_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s32)))
+uint32_t __arm_vmaxavq(uint32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s8)))
+uint8_t __arm_vmaxavq_s8(uint8_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s8)))
+uint8_t __arm_vmaxavq(uint8_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s16)))
+int16x8_t __arm_vmaxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s16)))
+int16x8_t __arm_vmaxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s32)))
+int32x4_t __arm_vmaxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s32)))
+int32x4_t __arm_vmaxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s8)))
+int8x16_t __arm_vmaxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s8)))
+int8x16_t __arm_vmaxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u16)))
+uint16x8_t __arm_vmaxq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u16)))
+uint16x8_t __arm_vmaxq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u32)))
+uint32x4_t __arm_vmaxq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u32)))
+uint32x4_t __arm_vmaxq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u8)))
+uint8x16_t __arm_vmaxq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u8)))
+uint8x16_t __arm_vmaxq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s16)))
+int16x8_t __arm_vmaxq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s16)))
+int16x8_t __arm_vmaxq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s32)))
+int32x4_t __arm_vmaxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s32)))
+int32x4_t __arm_vmaxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s8)))
+int8x16_t __arm_vmaxq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s8)))
+int8x16_t __arm_vmaxq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u16)))
+uint16x8_t __arm_vmaxq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u16)))
+uint16x8_t __arm_vmaxq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u32)))
+uint32x4_t __arm_vmaxq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u32)))
+uint32x4_t __arm_vmaxq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u8)))
+uint8x16_t __arm_vmaxq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u8)))
+uint8x16_t __arm_vmaxq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s16)))
+int16x8_t __arm_vmaxq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s16)))
+int16x8_t __arm_vmaxq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s32)))
+int32x4_t __arm_vmaxq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s32)))
+int32x4_t __arm_vmaxq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s8)))
+int8x16_t __arm_vmaxq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s8)))
+int8x16_t __arm_vmaxq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u16)))
+uint16x8_t __arm_vmaxq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u16)))
+uint16x8_t __arm_vmaxq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u32)))
+uint32x4_t __arm_vmaxq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u32)))
+uint32x4_t __arm_vmaxq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u8)))
+uint8x16_t __arm_vmaxq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u8)))
+uint8x16_t __arm_vmaxq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s16)))
+int16_t __arm_vmaxvq_p_s16(int16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s16)))
+int16_t __arm_vmaxvq_p(int16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s32)))
+int32_t __arm_vmaxvq_p_s32(int32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s32)))
+int32_t __arm_vmaxvq_p(int32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s8)))
+int8_t __arm_vmaxvq_p_s8(int8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s8)))
+int8_t __arm_vmaxvq_p(int8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u16)))
+uint16_t __arm_vmaxvq_p_u16(uint16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u16)))
+uint16_t __arm_vmaxvq_p(uint16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u32)))
+uint32_t __arm_vmaxvq_p_u32(uint32_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u32)))
+uint32_t __arm_vmaxvq_p(uint32_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u8)))
+uint8_t __arm_vmaxvq_p_u8(uint8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u8)))
+uint8_t __arm_vmaxvq_p(uint8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s16)))
+int16_t __arm_vmaxvq_s16(int16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s16)))
+int16_t __arm_vmaxvq(int16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s32)))
+int32_t __arm_vmaxvq_s32(int32_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s32)))
+int32_t __arm_vmaxvq(int32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s8)))
+int8_t __arm_vmaxvq_s8(int8_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s8)))
+int8_t __arm_vmaxvq(int8_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u16)))
+uint16_t __arm_vmaxvq_u16(uint16_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u16)))
+uint16_t __arm_vmaxvq(uint16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u32)))
+uint32_t __arm_vmaxvq_u32(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u32)))
+uint32_t __arm_vmaxvq(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u8)))
+uint8_t __arm_vmaxvq_u8(uint8_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u8)))
+uint8_t __arm_vmaxvq(uint8_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s16)))
+uint16x8_t __arm_vminaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s16)))
+uint16x8_t __arm_vminaq_m(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s32)))
+uint32x4_t __arm_vminaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s32)))
+uint32x4_t __arm_vminaq_m(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s8)))
+uint8x16_t __arm_vminaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s8)))
+uint8x16_t __arm_vminaq_m(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s16)))
+uint16x8_t __arm_vminaq_s16(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s16)))
+uint16x8_t __arm_vminaq(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s32)))
+uint32x4_t __arm_vminaq_s32(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s32)))
+uint32x4_t __arm_vminaq(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s8)))
+uint8x16_t __arm_vminaq_s8(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s8)))
+uint8x16_t __arm_vminaq(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s16)))
+uint16_t __arm_vminavq_p_s16(uint16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s16)))
+uint16_t __arm_vminavq_p(uint16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s32)))
+uint32_t __arm_vminavq_p_s32(uint32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s32)))
+uint32_t __arm_vminavq_p(uint32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s8)))
+uint8_t __arm_vminavq_p_s8(uint8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s8)))
+uint8_t __arm_vminavq_p(uint8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s16)))
+uint16_t __arm_vminavq_s16(uint16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s16)))
+uint16_t __arm_vminavq(uint16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s32)))
+uint32_t __arm_vminavq_s32(uint32_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s32)))
+uint32_t __arm_vminavq(uint32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s8)))
+uint8_t __arm_vminavq_s8(uint8_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s8)))
+uint8_t __arm_vminavq(uint8_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s16)))
+int16x8_t __arm_vminq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s16)))
+int16x8_t __arm_vminq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s32)))
+int32x4_t __arm_vminq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s32)))
+int32x4_t __arm_vminq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s8)))
+int8x16_t __arm_vminq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s8)))
+int8x16_t __arm_vminq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u16)))
+uint16x8_t __arm_vminq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u16)))
+uint16x8_t __arm_vminq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u32)))
+uint32x4_t __arm_vminq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u32)))
+uint32x4_t __arm_vminq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u8)))
+uint8x16_t __arm_vminq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u8)))
+uint8x16_t __arm_vminq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s16)))
+int16x8_t __arm_vminq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s16)))
+int16x8_t __arm_vminq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s32)))
+int32x4_t __arm_vminq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s32)))
+int32x4_t __arm_vminq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s8)))
+int8x16_t __arm_vminq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s8)))
+int8x16_t __arm_vminq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u16)))
+uint16x8_t __arm_vminq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u16)))
+uint16x8_t __arm_vminq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u32)))
+uint32x4_t __arm_vminq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u32)))
+uint32x4_t __arm_vminq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u8)))
+uint8x16_t __arm_vminq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u8)))
+uint8x16_t __arm_vminq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s16)))
+int16x8_t __arm_vminq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s16)))
+int16x8_t __arm_vminq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s32)))
+int32x4_t __arm_vminq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s32)))
+int32x4_t __arm_vminq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s8)))
+int8x16_t __arm_vminq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s8)))
+int8x16_t __arm_vminq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u16)))
+uint16x8_t __arm_vminq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u16)))
+uint16x8_t __arm_vminq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u32)))
+uint32x4_t __arm_vminq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u32)))
+uint32x4_t __arm_vminq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u8)))
+uint8x16_t __arm_vminq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u8)))
+uint8x16_t __arm_vminq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s16)))
+int16_t __arm_vminvq_p_s16(int16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s16)))
+int16_t __arm_vminvq_p(int16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s32)))
+int32_t __arm_vminvq_p_s32(int32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s32)))
+int32_t __arm_vminvq_p(int32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s8)))
+int8_t __arm_vminvq_p_s8(int8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s8)))
+int8_t __arm_vminvq_p(int8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u16)))
+uint16_t __arm_vminvq_p_u16(uint16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u16)))
+uint16_t __arm_vminvq_p(uint16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u32)))
+uint32_t __arm_vminvq_p_u32(uint32_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u32)))
+uint32_t __arm_vminvq_p(uint32_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u8)))
+uint8_t __arm_vminvq_p_u8(uint8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u8)))
+uint8_t __arm_vminvq_p(uint8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s16)))
+int16_t __arm_vminvq_s16(int16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s16)))
+int16_t __arm_vminvq(int16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s32)))
+int32_t __arm_vminvq_s32(int32_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s32)))
+int32_t __arm_vminvq(int32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s8)))
+int8_t __arm_vminvq_s8(int8_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s8)))
+int8_t __arm_vminvq(int8_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u16)))
+uint16_t __arm_vminvq_u16(uint16_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u16)))
+uint16_t __arm_vminvq(uint16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u32)))
+uint32_t __arm_vminvq_u32(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u32)))
+uint32_t __arm_vminvq(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u8)))
+uint8_t __arm_vminvq_u8(uint8_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u8)))
+uint8_t __arm_vminvq(uint8_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s16)))
+int32_t __arm_vmladavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s16)))
+int32_t __arm_vmladavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s32)))
+int32_t __arm_vmladavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s32)))
+int32_t __arm_vmladavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s8)))
+int32_t __arm_vmladavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s8)))
+int32_t __arm_vmladavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u16)))
+uint32_t __arm_vmladavaq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u16)))
+uint32_t __arm_vmladavaq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u32)))
+uint32_t __arm_vmladavaq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u32)))
+uint32_t __arm_vmladavaq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u8)))
+uint32_t __arm_vmladavaq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u8)))
+uint32_t __arm_vmladavaq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s16)))
+int32_t __arm_vmladavaq_s16(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s16)))
+int32_t __arm_vmladavaq(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s32)))
+int32_t __arm_vmladavaq_s32(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s32)))
+int32_t __arm_vmladavaq(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s8)))
+int32_t __arm_vmladavaq_s8(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s8)))
+int32_t __arm_vmladavaq(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u16)))
+uint32_t __arm_vmladavaq_u16(uint32_t, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u16)))
+uint32_t __arm_vmladavaq(uint32_t, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u32)))
+uint32_t __arm_vmladavaq_u32(uint32_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u32)))
+uint32_t __arm_vmladavaq(uint32_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u8)))
+uint32_t __arm_vmladavaq_u8(uint32_t, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u8)))
+uint32_t __arm_vmladavaq(uint32_t, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s16)))
+int32_t __arm_vmladavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s16)))
+int32_t __arm_vmladavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s32)))
+int32_t __arm_vmladavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s32)))
+int32_t __arm_vmladavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s8)))
+int32_t __arm_vmladavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s8)))
+int32_t __arm_vmladavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s16)))
+int32_t __arm_vmladavaxq_s16(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s16)))
+int32_t __arm_vmladavaxq(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s32)))
+int32_t __arm_vmladavaxq_s32(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s32)))
+int32_t __arm_vmladavaxq(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s8)))
+int32_t __arm_vmladavaxq_s8(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s8)))
+int32_t __arm_vmladavaxq(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s16)))
+int32_t __arm_vmladavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s16)))
+int32_t __arm_vmladavq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s32)))
+int32_t __arm_vmladavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s32)))
+int32_t __arm_vmladavq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s8)))
+int32_t __arm_vmladavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s8)))
+int32_t __arm_vmladavq_p(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u16)))
+uint32_t __arm_vmladavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u16)))
+uint32_t __arm_vmladavq_p(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u32)))
+uint32_t __arm_vmladavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u32)))
+uint32_t __arm_vmladavq_p(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u8)))
+uint32_t __arm_vmladavq_p_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u8)))
+uint32_t __arm_vmladavq_p(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s16)))
+int32_t __arm_vmladavq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s16)))
+int32_t __arm_vmladavq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s32)))
+int32_t __arm_vmladavq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s32)))
+int32_t __arm_vmladavq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s8)))
+int32_t __arm_vmladavq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s8)))
+int32_t __arm_vmladavq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u16)))
+uint32_t __arm_vmladavq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u16)))
+uint32_t __arm_vmladavq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u32)))
+uint32_t __arm_vmladavq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u32)))
+uint32_t __arm_vmladavq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u8)))
+uint32_t __arm_vmladavq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u8)))
+uint32_t __arm_vmladavq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s16)))
+int32_t __arm_vmladavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s16)))
+int32_t __arm_vmladavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s32)))
+int32_t __arm_vmladavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s32)))
+int32_t __arm_vmladavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s8)))
+int32_t __arm_vmladavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s8)))
+int32_t __arm_vmladavxq_p(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s16)))
+int32_t __arm_vmladavxq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s16)))
+int32_t __arm_vmladavxq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s32)))
+int32_t __arm_vmladavxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s32)))
+int32_t __arm_vmladavxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s8)))
+int32_t __arm_vmladavxq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s8)))
+int32_t __arm_vmladavxq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s16)))
+int64_t __arm_vmlaldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s16)))
+int64_t __arm_vmlaldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s32)))
+int64_t __arm_vmlaldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s32)))
+int64_t __arm_vmlaldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u16)))
+uint64_t __arm_vmlaldavaq_p_u16(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u16)))
+uint64_t __arm_vmlaldavaq_p(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u32)))
+uint64_t __arm_vmlaldavaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u32)))
+uint64_t __arm_vmlaldavaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s16)))
+int64_t __arm_vmlaldavaq_s16(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s16)))
+int64_t __arm_vmlaldavaq(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s32)))
+int64_t __arm_vmlaldavaq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s32)))
+int64_t __arm_vmlaldavaq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u16)))
+uint64_t __arm_vmlaldavaq_u16(uint64_t, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u16)))
+uint64_t __arm_vmlaldavaq(uint64_t, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u32)))
+uint64_t __arm_vmlaldavaq_u32(uint64_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u32)))
+uint64_t __arm_vmlaldavaq(uint64_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s16)))
+int64_t __arm_vmlaldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s16)))
+int64_t __arm_vmlaldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s32)))
+int64_t __arm_vmlaldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s32)))
+int64_t __arm_vmlaldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s16)))
+int64_t __arm_vmlaldavaxq_s16(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s16)))
+int64_t __arm_vmlaldavaxq(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s32)))
+int64_t __arm_vmlaldavaxq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s32)))
+int64_t __arm_vmlaldavaxq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s16)))
+int64_t __arm_vmlaldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s16)))
+int64_t __arm_vmlaldavq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s32)))
+int64_t __arm_vmlaldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s32)))
+int64_t __arm_vmlaldavq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u16)))
+uint64_t __arm_vmlaldavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u16)))
+uint64_t __arm_vmlaldavq_p(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u32)))
+uint64_t __arm_vmlaldavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u32)))
+uint64_t __arm_vmlaldavq_p(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s16)))
+int64_t __arm_vmlaldavq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s16)))
+int64_t __arm_vmlaldavq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s32)))
+int64_t __arm_vmlaldavq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s32)))
+int64_t __arm_vmlaldavq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u16)))
+uint64_t __arm_vmlaldavq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u16)))
+uint64_t __arm_vmlaldavq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u32)))
+uint64_t __arm_vmlaldavq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u32)))
+uint64_t __arm_vmlaldavq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s16)))
+int64_t __arm_vmlaldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s16)))
+int64_t __arm_vmlaldavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s32)))
+int64_t __arm_vmlaldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s32)))
+int64_t __arm_vmlaldavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s16)))
+int64_t __arm_vmlaldavxq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s16)))
+int64_t __arm_vmlaldavxq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s32)))
+int64_t __arm_vmlaldavxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s32)))
+int64_t __arm_vmlaldavxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s16)))
+int16x8_t __arm_vmlaq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s16)))
+int16x8_t __arm_vmlaq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s32)))
+int32x4_t __arm_vmlaq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s32)))
+int32x4_t __arm_vmlaq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s8)))
+int8x16_t __arm_vmlaq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s8)))
+int8x16_t __arm_vmlaq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u16)))
+uint16x8_t __arm_vmlaq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u16)))
+uint16x8_t __arm_vmlaq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u32)))
+uint32x4_t __arm_vmlaq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u32)))
+uint32x4_t __arm_vmlaq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u8)))
+uint8x16_t __arm_vmlaq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u8)))
+uint8x16_t __arm_vmlaq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s16)))
+int16x8_t __arm_vmlaq_n_s16(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s16)))
+int16x8_t __arm_vmlaq(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s32)))
+int32x4_t __arm_vmlaq_n_s32(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s32)))
+int32x4_t __arm_vmlaq(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s8)))
+int8x16_t __arm_vmlaq_n_s8(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s8)))
+int8x16_t __arm_vmlaq(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u16)))
+uint16x8_t __arm_vmlaq_n_u16(uint16x8_t, uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u16)))
+uint16x8_t __arm_vmlaq(uint16x8_t, uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u32)))
+uint32x4_t __arm_vmlaq_n_u32(uint32x4_t, uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u32)))
+uint32x4_t __arm_vmlaq(uint32x4_t, uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u8)))
+uint8x16_t __arm_vmlaq_n_u8(uint8x16_t, uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u8)))
+uint8x16_t __arm_vmlaq(uint8x16_t, uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s16)))
+int16x8_t __arm_vmlasq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s16)))
+int16x8_t __arm_vmlasq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s32)))
+int32x4_t __arm_vmlasq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s32)))
+int32x4_t __arm_vmlasq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s8)))
+int8x16_t __arm_vmlasq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s8)))
+int8x16_t __arm_vmlasq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u16)))
+uint16x8_t __arm_vmlasq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u16)))
+uint16x8_t __arm_vmlasq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u32)))
+uint32x4_t __arm_vmlasq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u32)))
+uint32x4_t __arm_vmlasq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u8)))
+uint8x16_t __arm_vmlasq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u8)))
+uint8x16_t __arm_vmlasq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s16)))
+int16x8_t __arm_vmlasq_n_s16(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s16)))
+int16x8_t __arm_vmlasq(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s32)))
+int32x4_t __arm_vmlasq_n_s32(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s32)))
+int32x4_t __arm_vmlasq(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s8)))
+int8x16_t __arm_vmlasq_n_s8(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s8)))
+int8x16_t __arm_vmlasq(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u16)))
+uint16x8_t __arm_vmlasq_n_u16(uint16x8_t, uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u16)))
+uint16x8_t __arm_vmlasq(uint16x8_t, uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u32)))
+uint32x4_t __arm_vmlasq_n_u32(uint32x4_t, uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u32)))
+uint32x4_t __arm_vmlasq(uint32x4_t, uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u8)))
+uint8x16_t __arm_vmlasq_n_u8(uint8x16_t, uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u8)))
+uint8x16_t __arm_vmlasq(uint8x16_t, uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s16)))
+int32_t __arm_vmlsdavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s16)))
+int32_t __arm_vmlsdavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s32)))
+int32_t __arm_vmlsdavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s32)))
+int32_t __arm_vmlsdavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s8)))
+int32_t __arm_vmlsdavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s8)))
+int32_t __arm_vmlsdavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s16)))
+int32_t __arm_vmlsdavaq_s16(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s16)))
+int32_t __arm_vmlsdavaq(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s32)))
+int32_t __arm_vmlsdavaq_s32(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s32)))
+int32_t __arm_vmlsdavaq(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s8)))
+int32_t __arm_vmlsdavaq_s8(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s8)))
+int32_t __arm_vmlsdavaq(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s16)))
+int32_t __arm_vmlsdavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s16)))
+int32_t __arm_vmlsdavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s32)))
+int32_t __arm_vmlsdavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s32)))
+int32_t __arm_vmlsdavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s8)))
+int32_t __arm_vmlsdavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s8)))
+int32_t __arm_vmlsdavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s16)))
+int32_t __arm_vmlsdavaxq_s16(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s16)))
+int32_t __arm_vmlsdavaxq(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s32)))
+int32_t __arm_vmlsdavaxq_s32(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s32)))
+int32_t __arm_vmlsdavaxq(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s8)))
+int32_t __arm_vmlsdavaxq_s8(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s8)))
+int32_t __arm_vmlsdavaxq(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s16)))
+int32_t __arm_vmlsdavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s16)))
+int32_t __arm_vmlsdavq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s32)))
+int32_t __arm_vmlsdavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s32)))
+int32_t __arm_vmlsdavq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s8)))
+int32_t __arm_vmlsdavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s8)))
+int32_t __arm_vmlsdavq_p(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s16)))
+int32_t __arm_vmlsdavq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s16)))
+int32_t __arm_vmlsdavq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s32)))
+int32_t __arm_vmlsdavq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s32)))
+int32_t __arm_vmlsdavq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s8)))
+int32_t __arm_vmlsdavq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s8)))
+int32_t __arm_vmlsdavq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s16)))
+int32_t __arm_vmlsdavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s16)))
+int32_t __arm_vmlsdavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s32)))
+int32_t __arm_vmlsdavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s32)))
+int32_t __arm_vmlsdavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s8)))
+int32_t __arm_vmlsdavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s8)))
+int32_t __arm_vmlsdavxq_p(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s16)))
+int32_t __arm_vmlsdavxq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s16)))
+int32_t __arm_vmlsdavxq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s32)))
+int32_t __arm_vmlsdavxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s32)))
+int32_t __arm_vmlsdavxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s8)))
+int32_t __arm_vmlsdavxq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s8)))
+int32_t __arm_vmlsdavxq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s16)))
+int64_t __arm_vmlsldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s16)))
+int64_t __arm_vmlsldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s32)))
+int64_t __arm_vmlsldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s32)))
+int64_t __arm_vmlsldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s16)))
+int64_t __arm_vmlsldavaq_s16(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s16)))
+int64_t __arm_vmlsldavaq(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s32)))
+int64_t __arm_vmlsldavaq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s32)))
+int64_t __arm_vmlsldavaq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s16)))
+int64_t __arm_vmlsldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s16)))
+int64_t __arm_vmlsldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s32)))
+int64_t __arm_vmlsldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s32)))
+int64_t __arm_vmlsldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s16)))
+int64_t __arm_vmlsldavaxq_s16(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s16)))
+int64_t __arm_vmlsldavaxq(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s32)))
+int64_t __arm_vmlsldavaxq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s32)))
+int64_t __arm_vmlsldavaxq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s16)))
+int64_t __arm_vmlsldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s16)))
+int64_t __arm_vmlsldavq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s32)))
+int64_t __arm_vmlsldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s32)))
+int64_t __arm_vmlsldavq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s16)))
+int64_t __arm_vmlsldavq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s16)))
+int64_t __arm_vmlsldavq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s32)))
+int64_t __arm_vmlsldavq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s32)))
+int64_t __arm_vmlsldavq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s16)))
+int64_t __arm_vmlsldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s16)))
+int64_t __arm_vmlsldavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s32)))
+int64_t __arm_vmlsldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s32)))
+int64_t __arm_vmlsldavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s16)))
+int64_t __arm_vmlsldavxq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s16)))
+int64_t __arm_vmlsldavxq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s32)))
+int64_t __arm_vmlsldavxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s32)))
+int64_t __arm_vmlsldavxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s16)))
+int32x4_t __arm_vmovlbq_m_s16(int32x4_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s16)))
+int32x4_t __arm_vmovlbq_m(int32x4_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s8)))
+int16x8_t __arm_vmovlbq_m_s8(int16x8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s8)))
+int16x8_t __arm_vmovlbq_m(int16x8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u16)))
+uint32x4_t __arm_vmovlbq_m_u16(uint32x4_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u16)))
+uint32x4_t __arm_vmovlbq_m(uint32x4_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u8)))
+uint16x8_t __arm_vmovlbq_m_u8(uint16x8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u8)))
+uint16x8_t __arm_vmovlbq_m(uint16x8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s16)))
+int32x4_t __arm_vmovlbq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s16)))
+int32x4_t __arm_vmovlbq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s8)))
+int16x8_t __arm_vmovlbq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s8)))
+int16x8_t __arm_vmovlbq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u16)))
+uint32x4_t __arm_vmovlbq_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u16)))
+uint32x4_t __arm_vmovlbq(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u8)))
+uint16x8_t __arm_vmovlbq_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u8)))
+uint16x8_t __arm_vmovlbq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s16)))
+int32x4_t __arm_vmovlbq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s16)))
+int32x4_t __arm_vmovlbq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s8)))
+int16x8_t __arm_vmovlbq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s8)))
+int16x8_t __arm_vmovlbq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u16)))
+uint32x4_t __arm_vmovlbq_x_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u16)))
+uint32x4_t __arm_vmovlbq_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u8)))
+uint16x8_t __arm_vmovlbq_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u8)))
+uint16x8_t __arm_vmovlbq_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s16)))
+int32x4_t __arm_vmovltq_m_s16(int32x4_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s16)))
+int32x4_t __arm_vmovltq_m(int32x4_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s8)))
+int16x8_t __arm_vmovltq_m_s8(int16x8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s8)))
+int16x8_t __arm_vmovltq_m(int16x8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u16)))
+uint32x4_t __arm_vmovltq_m_u16(uint32x4_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u16)))
+uint32x4_t __arm_vmovltq_m(uint32x4_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u8)))
+uint16x8_t __arm_vmovltq_m_u8(uint16x8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u8)))
+uint16x8_t __arm_vmovltq_m(uint16x8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s16)))
+int32x4_t __arm_vmovltq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s16)))
+int32x4_t __arm_vmovltq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s8)))
+int16x8_t __arm_vmovltq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s8)))
+int16x8_t __arm_vmovltq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u16)))
+uint32x4_t __arm_vmovltq_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u16)))
+uint32x4_t __arm_vmovltq(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u8)))
+uint16x8_t __arm_vmovltq_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u8)))
+uint16x8_t __arm_vmovltq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s16)))
+int32x4_t __arm_vmovltq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s16)))
+int32x4_t __arm_vmovltq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s8)))
+int16x8_t __arm_vmovltq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s8)))
+int16x8_t __arm_vmovltq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u16)))
+uint32x4_t __arm_vmovltq_x_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u16)))
+uint32x4_t __arm_vmovltq_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u8)))
+uint16x8_t __arm_vmovltq_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u8)))
+uint16x8_t __arm_vmovltq_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s16)))
+int8x16_t __arm_vmovnbq_m_s16(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s16)))
+int8x16_t __arm_vmovnbq_m(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s32)))
+int16x8_t __arm_vmovnbq_m_s32(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s32)))
+int16x8_t __arm_vmovnbq_m(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u16)))
+uint8x16_t __arm_vmovnbq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u16)))
+uint8x16_t __arm_vmovnbq_m(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u32)))
+uint16x8_t __arm_vmovnbq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u32)))
+uint16x8_t __arm_vmovnbq_m(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s16)))
+int8x16_t __arm_vmovnbq_s16(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s16)))
+int8x16_t __arm_vmovnbq(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s32)))
+int16x8_t __arm_vmovnbq_s32(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s32)))
+int16x8_t __arm_vmovnbq(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u16)))
+uint8x16_t __arm_vmovnbq_u16(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u16)))
+uint8x16_t __arm_vmovnbq(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u32)))
+uint16x8_t __arm_vmovnbq_u32(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u32)))
+uint16x8_t __arm_vmovnbq(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s16)))
+int8x16_t __arm_vmovntq_m_s16(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s16)))
+int8x16_t __arm_vmovntq_m(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s32)))
+int16x8_t __arm_vmovntq_m_s32(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s32)))
+int16x8_t __arm_vmovntq_m(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u16)))
+uint8x16_t __arm_vmovntq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u16)))
+uint8x16_t __arm_vmovntq_m(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u32)))
+uint16x8_t __arm_vmovntq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u32)))
+uint16x8_t __arm_vmovntq_m(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s16)))
+int8x16_t __arm_vmovntq_s16(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s16)))
+int8x16_t __arm_vmovntq(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s32)))
+int16x8_t __arm_vmovntq_s32(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s32)))
+int16x8_t __arm_vmovntq(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u16)))
+uint8x16_t __arm_vmovntq_u16(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u16)))
+uint8x16_t __arm_vmovntq(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u32)))
+uint16x8_t __arm_vmovntq_u32(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u32)))
+uint16x8_t __arm_vmovntq(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s16)))
+int16x8_t __arm_vmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s16)))
+int16x8_t __arm_vmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s32)))
+int32x4_t __arm_vmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s32)))
+int32x4_t __arm_vmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s8)))
+int8x16_t __arm_vmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s8)))
+int8x16_t __arm_vmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u16)))
+uint16x8_t __arm_vmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u16)))
+uint16x8_t __arm_vmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u32)))
+uint32x4_t __arm_vmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u32)))
+uint32x4_t __arm_vmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u8)))
+uint8x16_t __arm_vmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u8)))
+uint8x16_t __arm_vmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s16)))
+int16x8_t __arm_vmulhq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s16)))
+int16x8_t __arm_vmulhq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s32)))
+int32x4_t __arm_vmulhq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s32)))
+int32x4_t __arm_vmulhq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s8)))
+int8x16_t __arm_vmulhq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s8)))
+int8x16_t __arm_vmulhq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u16)))
+uint16x8_t __arm_vmulhq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u16)))
+uint16x8_t __arm_vmulhq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u32)))
+uint32x4_t __arm_vmulhq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u32)))
+uint32x4_t __arm_vmulhq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u8)))
+uint8x16_t __arm_vmulhq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u8)))
+uint8x16_t __arm_vmulhq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s16)))
+int16x8_t __arm_vmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s16)))
+int16x8_t __arm_vmulhq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s32)))
+int32x4_t __arm_vmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s32)))
+int32x4_t __arm_vmulhq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s8)))
+int8x16_t __arm_vmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s8)))
+int8x16_t __arm_vmulhq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u16)))
+uint16x8_t __arm_vmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u16)))
+uint16x8_t __arm_vmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u32)))
+uint32x4_t __arm_vmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u32)))
+uint32x4_t __arm_vmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u8)))
+uint8x16_t __arm_vmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u8)))
+uint8x16_t __arm_vmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s16)))
+int32x4_t __arm_vmullbq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s16)))
+int32x4_t __arm_vmullbq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s32)))
+int64x2_t __arm_vmullbq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s32)))
+int64x2_t __arm_vmullbq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s8)))
+int16x8_t __arm_vmullbq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s8)))
+int16x8_t __arm_vmullbq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u16)))
+uint32x4_t __arm_vmullbq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u16)))
+uint32x4_t __arm_vmullbq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u32)))
+uint64x2_t __arm_vmullbq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u32)))
+uint64x2_t __arm_vmullbq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u8)))
+uint16x8_t __arm_vmullbq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u8)))
+uint16x8_t __arm_vmullbq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s16)))
+int32x4_t __arm_vmullbq_int_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s16)))
+int32x4_t __arm_vmullbq_int(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s32)))
+int64x2_t __arm_vmullbq_int_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s32)))
+int64x2_t __arm_vmullbq_int(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s8)))
+int16x8_t __arm_vmullbq_int_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s8)))
+int16x8_t __arm_vmullbq_int(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u16)))
+uint32x4_t __arm_vmullbq_int_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u16)))
+uint32x4_t __arm_vmullbq_int(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u32)))
+uint64x2_t __arm_vmullbq_int_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u32)))
+uint64x2_t __arm_vmullbq_int(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u8)))
+uint16x8_t __arm_vmullbq_int_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u8)))
+uint16x8_t __arm_vmullbq_int(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s16)))
+int32x4_t __arm_vmullbq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s16)))
+int32x4_t __arm_vmullbq_int_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s32)))
+int64x2_t __arm_vmullbq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s32)))
+int64x2_t __arm_vmullbq_int_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s8)))
+int16x8_t __arm_vmullbq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s8)))
+int16x8_t __arm_vmullbq_int_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u16)))
+uint32x4_t __arm_vmullbq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u16)))
+uint32x4_t __arm_vmullbq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u32)))
+uint64x2_t __arm_vmullbq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u32)))
+uint64x2_t __arm_vmullbq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u8)))
+uint16x8_t __arm_vmullbq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u8)))
+uint16x8_t __arm_vmullbq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p16)))
+uint32x4_t __arm_vmullbq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p16)))
+uint32x4_t __arm_vmullbq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p8)))
+uint16x8_t __arm_vmullbq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p8)))
+uint16x8_t __arm_vmullbq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p16)))
+uint32x4_t __arm_vmullbq_poly_p16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p16)))
+uint32x4_t __arm_vmullbq_poly(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p8)))
+uint16x8_t __arm_vmullbq_poly_p8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p8)))
+uint16x8_t __arm_vmullbq_poly(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p16)))
+uint32x4_t __arm_vmullbq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p16)))
+uint32x4_t __arm_vmullbq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p8)))
+uint16x8_t __arm_vmullbq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p8)))
+uint16x8_t __arm_vmullbq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s16)))
+int32x4_t __arm_vmulltq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s16)))
+int32x4_t __arm_vmulltq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s32)))
+int64x2_t __arm_vmulltq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s32)))
+int64x2_t __arm_vmulltq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s8)))
+int16x8_t __arm_vmulltq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s8)))
+int16x8_t __arm_vmulltq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u16)))
+uint32x4_t __arm_vmulltq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u16)))
+uint32x4_t __arm_vmulltq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u32)))
+uint64x2_t __arm_vmulltq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u32)))
+uint64x2_t __arm_vmulltq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u8)))
+uint16x8_t __arm_vmulltq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u8)))
+uint16x8_t __arm_vmulltq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s16)))
+int32x4_t __arm_vmulltq_int_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s16)))
+int32x4_t __arm_vmulltq_int(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s32)))
+int64x2_t __arm_vmulltq_int_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s32)))
+int64x2_t __arm_vmulltq_int(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s8)))
+int16x8_t __arm_vmulltq_int_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s8)))
+int16x8_t __arm_vmulltq_int(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u16)))
+uint32x4_t __arm_vmulltq_int_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u16)))
+uint32x4_t __arm_vmulltq_int(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u32)))
+uint64x2_t __arm_vmulltq_int_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u32)))
+uint64x2_t __arm_vmulltq_int(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u8)))
+uint16x8_t __arm_vmulltq_int_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u8)))
+uint16x8_t __arm_vmulltq_int(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s16)))
+int32x4_t __arm_vmulltq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s16)))
+int32x4_t __arm_vmulltq_int_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s32)))
+int64x2_t __arm_vmulltq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s32)))
+int64x2_t __arm_vmulltq_int_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s8)))
+int16x8_t __arm_vmulltq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s8)))
+int16x8_t __arm_vmulltq_int_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u16)))
+uint32x4_t __arm_vmulltq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u16)))
+uint32x4_t __arm_vmulltq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u32)))
+uint64x2_t __arm_vmulltq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u32)))
+uint64x2_t __arm_vmulltq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u8)))
+uint16x8_t __arm_vmulltq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u8)))
+uint16x8_t __arm_vmulltq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p16)))
+uint32x4_t __arm_vmulltq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p16)))
+uint32x4_t __arm_vmulltq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p8)))
+uint16x8_t __arm_vmulltq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p8)))
+uint16x8_t __arm_vmulltq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p16)))
+uint32x4_t __arm_vmulltq_poly_p16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p16)))
+uint32x4_t __arm_vmulltq_poly(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p8)))
+uint16x8_t __arm_vmulltq_poly_p8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p8)))
+uint16x8_t __arm_vmulltq_poly(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p16)))
+uint32x4_t __arm_vmulltq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p16)))
+uint32x4_t __arm_vmulltq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p8)))
+uint16x8_t __arm_vmulltq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p8)))
+uint16x8_t __arm_vmulltq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s16)))
+int16x8_t __arm_vmulq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s16)))
+int16x8_t __arm_vmulq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s32)))
+int32x4_t __arm_vmulq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s32)))
+int32x4_t __arm_vmulq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s8)))
+int8x16_t __arm_vmulq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s8)))
+int8x16_t __arm_vmulq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u16)))
+uint16x8_t __arm_vmulq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u16)))
+uint16x8_t __arm_vmulq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u32)))
+uint32x4_t __arm_vmulq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u32)))
+uint32x4_t __arm_vmulq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u8)))
+uint8x16_t __arm_vmulq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u8)))
+uint8x16_t __arm_vmulq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s16)))
+int16x8_t __arm_vmulq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s16)))
+int16x8_t __arm_vmulq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s32)))
+int32x4_t __arm_vmulq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s32)))
+int32x4_t __arm_vmulq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s8)))
+int8x16_t __arm_vmulq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s8)))
+int8x16_t __arm_vmulq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u16)))
+uint16x8_t __arm_vmulq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u16)))
+uint16x8_t __arm_vmulq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u32)))
+uint32x4_t __arm_vmulq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u32)))
+uint32x4_t __arm_vmulq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u8)))
+uint8x16_t __arm_vmulq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u8)))
+uint8x16_t __arm_vmulq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s16)))
+int16x8_t __arm_vmulq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s16)))
+int16x8_t __arm_vmulq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s32)))
+int32x4_t __arm_vmulq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s32)))
+int32x4_t __arm_vmulq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s8)))
+int8x16_t __arm_vmulq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s8)))
+int8x16_t __arm_vmulq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u16)))
+uint16x8_t __arm_vmulq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u16)))
+uint16x8_t __arm_vmulq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u32)))
+uint32x4_t __arm_vmulq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u32)))
+uint32x4_t __arm_vmulq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u8)))
+uint8x16_t __arm_vmulq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u8)))
+uint8x16_t __arm_vmulq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s16)))
+int16x8_t __arm_vmulq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s16)))
+int16x8_t __arm_vmulq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s32)))
+int32x4_t __arm_vmulq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s32)))
+int32x4_t __arm_vmulq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s8)))
+int8x16_t __arm_vmulq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s8)))
+int8x16_t __arm_vmulq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u16)))
+uint16x8_t __arm_vmulq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u16)))
+uint16x8_t __arm_vmulq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u32)))
+uint32x4_t __arm_vmulq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u32)))
+uint32x4_t __arm_vmulq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u8)))
+uint8x16_t __arm_vmulq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u8)))
+uint8x16_t __arm_vmulq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s16)))
+int16x8_t __arm_vmulq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s16)))
+int16x8_t __arm_vmulq_x(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s32)))
+int32x4_t __arm_vmulq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s32)))
+int32x4_t __arm_vmulq_x(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s8)))
+int8x16_t __arm_vmulq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s8)))
+int8x16_t __arm_vmulq_x(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u16)))
+uint16x8_t __arm_vmulq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u16)))
+uint16x8_t __arm_vmulq_x(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u32)))
+uint32x4_t __arm_vmulq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u32)))
+uint32x4_t __arm_vmulq_x(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u8)))
+uint8x16_t __arm_vmulq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u8)))
+uint8x16_t __arm_vmulq_x(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s16)))
+int16x8_t __arm_vmulq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s16)))
+int16x8_t __arm_vmulq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s32)))
+int32x4_t __arm_vmulq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s32)))
+int32x4_t __arm_vmulq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s8)))
+int8x16_t __arm_vmulq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s8)))
+int8x16_t __arm_vmulq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u16)))
+uint16x8_t __arm_vmulq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u16)))
+uint16x8_t __arm_vmulq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u32)))
+uint32x4_t __arm_vmulq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u32)))
+uint32x4_t __arm_vmulq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u8)))
+uint8x16_t __arm_vmulq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u8)))
+uint8x16_t __arm_vmulq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s16)))
+int16x8_t __arm_vmvnq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s16)))
+int16x8_t __arm_vmvnq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s32)))
+int32x4_t __arm_vmvnq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s32)))
+int32x4_t __arm_vmvnq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u16)))
+uint16x8_t __arm_vmvnq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u16)))
+uint16x8_t __arm_vmvnq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u32)))
+uint32x4_t __arm_vmvnq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u32)))
+uint32x4_t __arm_vmvnq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s16)))
+int16x8_t __arm_vmvnq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s16)))
+int16x8_t __arm_vmvnq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s32)))
+int32x4_t __arm_vmvnq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s32)))
+int32x4_t __arm_vmvnq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s8)))
+int8x16_t __arm_vmvnq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s8)))
+int8x16_t __arm_vmvnq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u16)))
+uint16x8_t __arm_vmvnq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u16)))
+uint16x8_t __arm_vmvnq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u32)))
+uint32x4_t __arm_vmvnq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u32)))
+uint32x4_t __arm_vmvnq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u8)))
+uint8x16_t __arm_vmvnq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u8)))
+uint8x16_t __arm_vmvnq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_s16)))
+int16x8_t __arm_vmvnq_n_s16(int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_s32)))
+int32x4_t __arm_vmvnq_n_s32(int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_u16)))
+uint16x8_t __arm_vmvnq_n_u16(uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_u32)))
+uint32x4_t __arm_vmvnq_n_u32(uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s16)))
+int16x8_t __arm_vmvnq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s16)))
+int16x8_t __arm_vmvnq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s32)))
+int32x4_t __arm_vmvnq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s32)))
+int32x4_t __arm_vmvnq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s8)))
+int8x16_t __arm_vmvnq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s8)))
+int8x16_t __arm_vmvnq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u16)))
+uint16x8_t __arm_vmvnq_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u16)))
+uint16x8_t __arm_vmvnq(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u32)))
+uint32x4_t __arm_vmvnq_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u32)))
+uint32x4_t __arm_vmvnq(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u8)))
+uint8x16_t __arm_vmvnq_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u8)))
+uint8x16_t __arm_vmvnq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_s16)))
+int16x8_t __arm_vmvnq_x_n_s16(int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_s32)))
+int32x4_t __arm_vmvnq_x_n_s32(int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_u16)))
+uint16x8_t __arm_vmvnq_x_n_u16(uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_u32)))
+uint32x4_t __arm_vmvnq_x_n_u32(uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s16)))
+int16x8_t __arm_vmvnq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s16)))
+int16x8_t __arm_vmvnq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s32)))
+int32x4_t __arm_vmvnq_x_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s32)))
+int32x4_t __arm_vmvnq_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s8)))
+int8x16_t __arm_vmvnq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s8)))
+int8x16_t __arm_vmvnq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u16)))
+uint16x8_t __arm_vmvnq_x_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u16)))
+uint16x8_t __arm_vmvnq_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u32)))
+uint32x4_t __arm_vmvnq_x_u32(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u32)))
+uint32x4_t __arm_vmvnq_x(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u8)))
+uint8x16_t __arm_vmvnq_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u8)))
+uint8x16_t __arm_vmvnq_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s16)))
+int16x8_t __arm_vnegq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s16)))
+int16x8_t __arm_vnegq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s32)))
+int32x4_t __arm_vnegq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s32)))
+int32x4_t __arm_vnegq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s8)))
+int8x16_t __arm_vnegq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s8)))
+int8x16_t __arm_vnegq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s16)))
+int16x8_t __arm_vnegq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s16)))
+int16x8_t __arm_vnegq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s32)))
+int32x4_t __arm_vnegq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s32)))
+int32x4_t __arm_vnegq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s8)))
+int8x16_t __arm_vnegq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s8)))
+int8x16_t __arm_vnegq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s16)))
+int16x8_t __arm_vnegq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s16)))
+int16x8_t __arm_vnegq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s32)))
+int32x4_t __arm_vnegq_x_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s32)))
+int32x4_t __arm_vnegq_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s8)))
+int8x16_t __arm_vnegq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s8)))
+int8x16_t __arm_vnegq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s16)))
+int16x8_t __arm_vornq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s16)))
+int16x8_t __arm_vornq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s32)))
+int32x4_t __arm_vornq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s32)))
+int32x4_t __arm_vornq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s8)))
+int8x16_t __arm_vornq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s8)))
+int8x16_t __arm_vornq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u16)))
+uint16x8_t __arm_vornq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u16)))
+uint16x8_t __arm_vornq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u32)))
+uint32x4_t __arm_vornq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u32)))
+uint32x4_t __arm_vornq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u8)))
+uint8x16_t __arm_vornq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u8)))
+uint8x16_t __arm_vornq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s16)))
+int16x8_t __arm_vornq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s16)))
+int16x8_t __arm_vornq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s32)))
+int32x4_t __arm_vornq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s32)))
+int32x4_t __arm_vornq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s8)))
+int8x16_t __arm_vornq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s8)))
+int8x16_t __arm_vornq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u16)))
+uint16x8_t __arm_vornq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u16)))
+uint16x8_t __arm_vornq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u32)))
+uint32x4_t __arm_vornq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u32)))
+uint32x4_t __arm_vornq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u8)))
+uint8x16_t __arm_vornq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u8)))
+uint8x16_t __arm_vornq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s16)))
+int16x8_t __arm_vornq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s16)))
+int16x8_t __arm_vornq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s32)))
+int32x4_t __arm_vornq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s32)))
+int32x4_t __arm_vornq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s8)))
+int8x16_t __arm_vornq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s8)))
+int8x16_t __arm_vornq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u16)))
+uint16x8_t __arm_vornq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u16)))
+uint16x8_t __arm_vornq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u32)))
+uint32x4_t __arm_vornq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u32)))
+uint32x4_t __arm_vornq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u8)))
+uint8x16_t __arm_vornq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u8)))
+uint8x16_t __arm_vornq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s16)))
+int16x8_t __arm_vorrq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s16)))
+int16x8_t __arm_vorrq_m_n(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s32)))
+int32x4_t __arm_vorrq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s32)))
+int32x4_t __arm_vorrq_m_n(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u16)))
+uint16x8_t __arm_vorrq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u16)))
+uint16x8_t __arm_vorrq_m_n(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u32)))
+uint32x4_t __arm_vorrq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u32)))
+uint32x4_t __arm_vorrq_m_n(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s16)))
+int16x8_t __arm_vorrq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s16)))
+int16x8_t __arm_vorrq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s32)))
+int32x4_t __arm_vorrq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s32)))
+int32x4_t __arm_vorrq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s8)))
+int8x16_t __arm_vorrq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s8)))
+int8x16_t __arm_vorrq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u16)))
+uint16x8_t __arm_vorrq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u16)))
+uint16x8_t __arm_vorrq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u32)))
+uint32x4_t __arm_vorrq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u32)))
+uint32x4_t __arm_vorrq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u8)))
+uint8x16_t __arm_vorrq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u8)))
+uint8x16_t __arm_vorrq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s16)))
+int16x8_t __arm_vorrq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s16)))
+int16x8_t __arm_vorrq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s32)))
+int32x4_t __arm_vorrq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s32)))
+int32x4_t __arm_vorrq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u16)))
+uint16x8_t __arm_vorrq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u16)))
+uint16x8_t __arm_vorrq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u32)))
+uint32x4_t __arm_vorrq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u32)))
+uint32x4_t __arm_vorrq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s16)))
+int16x8_t __arm_vorrq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s16)))
+int16x8_t __arm_vorrq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s32)))
+int32x4_t __arm_vorrq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s32)))
+int32x4_t __arm_vorrq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s8)))
+int8x16_t __arm_vorrq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s8)))
+int8x16_t __arm_vorrq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u16)))
+uint16x8_t __arm_vorrq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u16)))
+uint16x8_t __arm_vorrq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u32)))
+uint32x4_t __arm_vorrq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u32)))
+uint32x4_t __arm_vorrq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u8)))
+uint8x16_t __arm_vorrq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u8)))
+uint8x16_t __arm_vorrq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s16)))
+int16x8_t __arm_vorrq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s16)))
+int16x8_t __arm_vorrq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s32)))
+int32x4_t __arm_vorrq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s32)))
+int32x4_t __arm_vorrq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s8)))
+int8x16_t __arm_vorrq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s8)))
+int8x16_t __arm_vorrq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u16)))
+uint16x8_t __arm_vorrq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u16)))
+uint16x8_t __arm_vorrq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u32)))
+uint32x4_t __arm_vorrq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u32)))
+uint32x4_t __arm_vorrq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u8)))
+uint8x16_t __arm_vorrq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u8)))
+uint8x16_t __arm_vorrq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpnot)))
+mve_pred16_t __arm_vpnot(mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s16)))
+int16x8_t __arm_vpselq_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s16)))
+int16x8_t __arm_vpselq(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s32)))
+int32x4_t __arm_vpselq_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s32)))
+int32x4_t __arm_vpselq(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s64)))
+int64x2_t __arm_vpselq_s64(int64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s64)))
+int64x2_t __arm_vpselq(int64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s8)))
+int8x16_t __arm_vpselq_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s8)))
+int8x16_t __arm_vpselq(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u16)))
+uint16x8_t __arm_vpselq_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u16)))
+uint16x8_t __arm_vpselq(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u32)))
+uint32x4_t __arm_vpselq_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u32)))
+uint32x4_t __arm_vpselq(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u64)))
+uint64x2_t __arm_vpselq_u64(uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u64)))
+uint64x2_t __arm_vpselq(uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u8)))
+uint8x16_t __arm_vpselq_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u8)))
+uint8x16_t __arm_vpselq(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s16)))
+int16x8_t __arm_vqabsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s16)))
+int16x8_t __arm_vqabsq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s32)))
+int32x4_t __arm_vqabsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s32)))
+int32x4_t __arm_vqabsq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s8)))
+int8x16_t __arm_vqabsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s8)))
+int8x16_t __arm_vqabsq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s16)))
+int16x8_t __arm_vqabsq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s16)))
+int16x8_t __arm_vqabsq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s32)))
+int32x4_t __arm_vqabsq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s32)))
+int32x4_t __arm_vqabsq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s8)))
+int8x16_t __arm_vqabsq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s8)))
+int8x16_t __arm_vqabsq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s16)))
+int16x8_t __arm_vqaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s16)))
+int16x8_t __arm_vqaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s32)))
+int32x4_t __arm_vqaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s32)))
+int32x4_t __arm_vqaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s8)))
+int8x16_t __arm_vqaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s8)))
+int8x16_t __arm_vqaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u16)))
+uint16x8_t __arm_vqaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u16)))
+uint16x8_t __arm_vqaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u32)))
+uint32x4_t __arm_vqaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u32)))
+uint32x4_t __arm_vqaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u8)))
+uint8x16_t __arm_vqaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u8)))
+uint8x16_t __arm_vqaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s16)))
+int16x8_t __arm_vqaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s16)))
+int16x8_t __arm_vqaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s32)))
+int32x4_t __arm_vqaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s32)))
+int32x4_t __arm_vqaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s8)))
+int8x16_t __arm_vqaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s8)))
+int8x16_t __arm_vqaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u16)))
+uint16x8_t __arm_vqaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u16)))
+uint16x8_t __arm_vqaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u32)))
+uint32x4_t __arm_vqaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u32)))
+uint32x4_t __arm_vqaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u8)))
+uint8x16_t __arm_vqaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u8)))
+uint8x16_t __arm_vqaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s16)))
+int16x8_t __arm_vqaddq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s16)))
+int16x8_t __arm_vqaddq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s32)))
+int32x4_t __arm_vqaddq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s32)))
+int32x4_t __arm_vqaddq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s8)))
+int8x16_t __arm_vqaddq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s8)))
+int8x16_t __arm_vqaddq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u16)))
+uint16x8_t __arm_vqaddq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u16)))
+uint16x8_t __arm_vqaddq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u32)))
+uint32x4_t __arm_vqaddq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u32)))
+uint32x4_t __arm_vqaddq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u8)))
+uint8x16_t __arm_vqaddq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u8)))
+uint8x16_t __arm_vqaddq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s16)))
+int16x8_t __arm_vqaddq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s16)))
+int16x8_t __arm_vqaddq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s32)))
+int32x4_t __arm_vqaddq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s32)))
+int32x4_t __arm_vqaddq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s8)))
+int8x16_t __arm_vqaddq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s8)))
+int8x16_t __arm_vqaddq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u16)))
+uint16x8_t __arm_vqaddq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u16)))
+uint16x8_t __arm_vqaddq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u32)))
+uint32x4_t __arm_vqaddq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u32)))
+uint32x4_t __arm_vqaddq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u8)))
+uint8x16_t __arm_vqaddq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u8)))
+uint8x16_t __arm_vqaddq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s16)))
+int16x8_t __arm_vqdmladhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s16)))
+int16x8_t __arm_vqdmladhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s32)))
+int32x4_t __arm_vqdmladhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s32)))
+int32x4_t __arm_vqdmladhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s8)))
+int8x16_t __arm_vqdmladhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s8)))
+int8x16_t __arm_vqdmladhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s16)))
+int16x8_t __arm_vqdmladhq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s16)))
+int16x8_t __arm_vqdmladhq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s32)))
+int32x4_t __arm_vqdmladhq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s32)))
+int32x4_t __arm_vqdmladhq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s8)))
+int8x16_t __arm_vqdmladhq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s8)))
+int8x16_t __arm_vqdmladhq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s16)))
+int16x8_t __arm_vqdmladhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s16)))
+int16x8_t __arm_vqdmladhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s32)))
+int32x4_t __arm_vqdmladhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s32)))
+int32x4_t __arm_vqdmladhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s8)))
+int8x16_t __arm_vqdmladhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s8)))
+int8x16_t __arm_vqdmladhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s16)))
+int16x8_t __arm_vqdmladhxq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s16)))
+int16x8_t __arm_vqdmladhxq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s32)))
+int32x4_t __arm_vqdmladhxq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s32)))
+int32x4_t __arm_vqdmladhxq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s8)))
+int8x16_t __arm_vqdmladhxq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s8)))
+int8x16_t __arm_vqdmladhxq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s16)))
+int16x8_t __arm_vqdmlahq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s16)))
+int16x8_t __arm_vqdmlahq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s32)))
+int32x4_t __arm_vqdmlahq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s32)))
+int32x4_t __arm_vqdmlahq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s8)))
+int8x16_t __arm_vqdmlahq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s8)))
+int8x16_t __arm_vqdmlahq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s16)))
+int16x8_t __arm_vqdmlahq_n_s16(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s16)))
+int16x8_t __arm_vqdmlahq(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s32)))
+int32x4_t __arm_vqdmlahq_n_s32(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s32)))
+int32x4_t __arm_vqdmlahq(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s8)))
+int8x16_t __arm_vqdmlahq_n_s8(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s8)))
+int8x16_t __arm_vqdmlahq(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s16)))
+int16x8_t __arm_vqdmlashq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s16)))
+int16x8_t __arm_vqdmlashq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s32)))
+int32x4_t __arm_vqdmlashq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s32)))
+int32x4_t __arm_vqdmlashq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s8)))
+int8x16_t __arm_vqdmlashq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s8)))
+int8x16_t __arm_vqdmlashq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s16)))
+int16x8_t __arm_vqdmlashq_n_s16(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s16)))
+int16x8_t __arm_vqdmlashq(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s32)))
+int32x4_t __arm_vqdmlashq_n_s32(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s32)))
+int32x4_t __arm_vqdmlashq(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s8)))
+int8x16_t __arm_vqdmlashq_n_s8(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s8)))
+int8x16_t __arm_vqdmlashq(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s16)))
+int16x8_t __arm_vqdmlsdhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s16)))
+int16x8_t __arm_vqdmlsdhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s32)))
+int32x4_t __arm_vqdmlsdhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s32)))
+int32x4_t __arm_vqdmlsdhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s8)))
+int8x16_t __arm_vqdmlsdhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s8)))
+int8x16_t __arm_vqdmlsdhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s16)))
+int16x8_t __arm_vqdmlsdhq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s16)))
+int16x8_t __arm_vqdmlsdhq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s32)))
+int32x4_t __arm_vqdmlsdhq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s32)))
+int32x4_t __arm_vqdmlsdhq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s8)))
+int8x16_t __arm_vqdmlsdhq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s8)))
+int8x16_t __arm_vqdmlsdhq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s16)))
+int16x8_t __arm_vqdmlsdhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s16)))
+int16x8_t __arm_vqdmlsdhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s32)))
+int32x4_t __arm_vqdmlsdhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s32)))
+int32x4_t __arm_vqdmlsdhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s8)))
+int8x16_t __arm_vqdmlsdhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s8)))
+int8x16_t __arm_vqdmlsdhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s16)))
+int16x8_t __arm_vqdmlsdhxq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s16)))
+int16x8_t __arm_vqdmlsdhxq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s32)))
+int32x4_t __arm_vqdmlsdhxq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s32)))
+int32x4_t __arm_vqdmlsdhxq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s8)))
+int8x16_t __arm_vqdmlsdhxq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s8)))
+int8x16_t __arm_vqdmlsdhxq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s16)))
+int16x8_t __arm_vqdmulhq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s16)))
+int16x8_t __arm_vqdmulhq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s32)))
+int32x4_t __arm_vqdmulhq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s32)))
+int32x4_t __arm_vqdmulhq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s8)))
+int8x16_t __arm_vqdmulhq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s8)))
+int8x16_t __arm_vqdmulhq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s16)))
+int16x8_t __arm_vqdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s16)))
+int16x8_t __arm_vqdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s32)))
+int32x4_t __arm_vqdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s32)))
+int32x4_t __arm_vqdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s8)))
+int8x16_t __arm_vqdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s8)))
+int8x16_t __arm_vqdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s16)))
+int16x8_t __arm_vqdmulhq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s16)))
+int16x8_t __arm_vqdmulhq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s32)))
+int32x4_t __arm_vqdmulhq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s32)))
+int32x4_t __arm_vqdmulhq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s8)))
+int8x16_t __arm_vqdmulhq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s8)))
+int8x16_t __arm_vqdmulhq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s16)))
+int16x8_t __arm_vqdmulhq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s16)))
+int16x8_t __arm_vqdmulhq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s32)))
+int32x4_t __arm_vqdmulhq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s32)))
+int32x4_t __arm_vqdmulhq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s8)))
+int8x16_t __arm_vqdmulhq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s8)))
+int8x16_t __arm_vqdmulhq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s16)))
+int32x4_t __arm_vqdmullbq_m_n_s16(int32x4_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s16)))
+int32x4_t __arm_vqdmullbq_m(int32x4_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s32)))
+int64x2_t __arm_vqdmullbq_m_n_s32(int64x2_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s32)))
+int64x2_t __arm_vqdmullbq_m(int64x2_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s16)))
+int32x4_t __arm_vqdmullbq_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s16)))
+int32x4_t __arm_vqdmullbq_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s32)))
+int64x2_t __arm_vqdmullbq_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s32)))
+int64x2_t __arm_vqdmullbq_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s16)))
+int32x4_t __arm_vqdmullbq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s16)))
+int32x4_t __arm_vqdmullbq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s32)))
+int64x2_t __arm_vqdmullbq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s32)))
+int64x2_t __arm_vqdmullbq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s16)))
+int32x4_t __arm_vqdmullbq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s16)))
+int32x4_t __arm_vqdmullbq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s32)))
+int64x2_t __arm_vqdmullbq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s32)))
+int64x2_t __arm_vqdmullbq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s16)))
+int32x4_t __arm_vqdmulltq_m_n_s16(int32x4_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s16)))
+int32x4_t __arm_vqdmulltq_m(int32x4_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s32)))
+int64x2_t __arm_vqdmulltq_m_n_s32(int64x2_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s32)))
+int64x2_t __arm_vqdmulltq_m(int64x2_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s16)))
+int32x4_t __arm_vqdmulltq_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s16)))
+int32x4_t __arm_vqdmulltq_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s32)))
+int64x2_t __arm_vqdmulltq_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s32)))
+int64x2_t __arm_vqdmulltq_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s16)))
+int32x4_t __arm_vqdmulltq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s16)))
+int32x4_t __arm_vqdmulltq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s32)))
+int64x2_t __arm_vqdmulltq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s32)))
+int64x2_t __arm_vqdmulltq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s16)))
+int32x4_t __arm_vqdmulltq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s16)))
+int32x4_t __arm_vqdmulltq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s32)))
+int64x2_t __arm_vqdmulltq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s32)))
+int64x2_t __arm_vqdmulltq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s16)))
+int8x16_t __arm_vqmovnbq_m_s16(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s16)))
+int8x16_t __arm_vqmovnbq_m(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s32)))
+int16x8_t __arm_vqmovnbq_m_s32(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s32)))
+int16x8_t __arm_vqmovnbq_m(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u16)))
+uint8x16_t __arm_vqmovnbq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u16)))
+uint8x16_t __arm_vqmovnbq_m(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u32)))
+uint16x8_t __arm_vqmovnbq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u32)))
+uint16x8_t __arm_vqmovnbq_m(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s16)))
+int8x16_t __arm_vqmovnbq_s16(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s16)))
+int8x16_t __arm_vqmovnbq(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s32)))
+int16x8_t __arm_vqmovnbq_s32(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s32)))
+int16x8_t __arm_vqmovnbq(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u16)))
+uint8x16_t __arm_vqmovnbq_u16(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u16)))
+uint8x16_t __arm_vqmovnbq(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u32)))
+uint16x8_t __arm_vqmovnbq_u32(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u32)))
+uint16x8_t __arm_vqmovnbq(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s16)))
+int8x16_t __arm_vqmovntq_m_s16(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s16)))
+int8x16_t __arm_vqmovntq_m(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s32)))
+int16x8_t __arm_vqmovntq_m_s32(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s32)))
+int16x8_t __arm_vqmovntq_m(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u16)))
+uint8x16_t __arm_vqmovntq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u16)))
+uint8x16_t __arm_vqmovntq_m(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u32)))
+uint16x8_t __arm_vqmovntq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u32)))
+uint16x8_t __arm_vqmovntq_m(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s16)))
+int8x16_t __arm_vqmovntq_s16(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s16)))
+int8x16_t __arm_vqmovntq(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s32)))
+int16x8_t __arm_vqmovntq_s32(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s32)))
+int16x8_t __arm_vqmovntq(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u16)))
+uint8x16_t __arm_vqmovntq_u16(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u16)))
+uint8x16_t __arm_vqmovntq(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u32)))
+uint16x8_t __arm_vqmovntq_u32(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u32)))
+uint16x8_t __arm_vqmovntq(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s16)))
+uint8x16_t __arm_vqmovunbq_m_s16(uint8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s16)))
+uint8x16_t __arm_vqmovunbq_m(uint8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s32)))
+uint16x8_t __arm_vqmovunbq_m_s32(uint16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s32)))
+uint16x8_t __arm_vqmovunbq_m(uint16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s16)))
+uint8x16_t __arm_vqmovunbq_s16(uint8x16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s16)))
+uint8x16_t __arm_vqmovunbq(uint8x16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s32)))
+uint16x8_t __arm_vqmovunbq_s32(uint16x8_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s32)))
+uint16x8_t __arm_vqmovunbq(uint16x8_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s16)))
+uint8x16_t __arm_vqmovuntq_m_s16(uint8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s16)))
+uint8x16_t __arm_vqmovuntq_m(uint8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s32)))
+uint16x8_t __arm_vqmovuntq_m_s32(uint16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s32)))
+uint16x8_t __arm_vqmovuntq_m(uint16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s16)))
+uint8x16_t __arm_vqmovuntq_s16(uint8x16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s16)))
+uint8x16_t __arm_vqmovuntq(uint8x16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s32)))
+uint16x8_t __arm_vqmovuntq_s32(uint16x8_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s32)))
+uint16x8_t __arm_vqmovuntq(uint16x8_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s16)))
+int16x8_t __arm_vqnegq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s16)))
+int16x8_t __arm_vqnegq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s32)))
+int32x4_t __arm_vqnegq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s32)))
+int32x4_t __arm_vqnegq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s8)))
+int8x16_t __arm_vqnegq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s8)))
+int8x16_t __arm_vqnegq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s16)))
+int16x8_t __arm_vqnegq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s16)))
+int16x8_t __arm_vqnegq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s32)))
+int32x4_t __arm_vqnegq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s32)))
+int32x4_t __arm_vqnegq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s8)))
+int8x16_t __arm_vqnegq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s8)))
+int8x16_t __arm_vqnegq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s16)))
+int16x8_t __arm_vqrdmladhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s16)))
+int16x8_t __arm_vqrdmladhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s32)))
+int32x4_t __arm_vqrdmladhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s32)))
+int32x4_t __arm_vqrdmladhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s8)))
+int8x16_t __arm_vqrdmladhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s8)))
+int8x16_t __arm_vqrdmladhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s16)))
+int16x8_t __arm_vqrdmladhq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s16)))
+int16x8_t __arm_vqrdmladhq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s32)))
+int32x4_t __arm_vqrdmladhq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s32)))
+int32x4_t __arm_vqrdmladhq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s8)))
+int8x16_t __arm_vqrdmladhq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s8)))
+int8x16_t __arm_vqrdmladhq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s16)))
+int16x8_t __arm_vqrdmladhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s16)))
+int16x8_t __arm_vqrdmladhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s32)))
+int32x4_t __arm_vqrdmladhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s32)))
+int32x4_t __arm_vqrdmladhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s8)))
+int8x16_t __arm_vqrdmladhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s8)))
+int8x16_t __arm_vqrdmladhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s16)))
+int16x8_t __arm_vqrdmladhxq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s16)))
+int16x8_t __arm_vqrdmladhxq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s32)))
+int32x4_t __arm_vqrdmladhxq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s32)))
+int32x4_t __arm_vqrdmladhxq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s8)))
+int8x16_t __arm_vqrdmladhxq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s8)))
+int8x16_t __arm_vqrdmladhxq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s16)))
+int16x8_t __arm_vqrdmlahq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s16)))
+int16x8_t __arm_vqrdmlahq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s32)))
+int32x4_t __arm_vqrdmlahq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s32)))
+int32x4_t __arm_vqrdmlahq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s8)))
+int8x16_t __arm_vqrdmlahq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s8)))
+int8x16_t __arm_vqrdmlahq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s16)))
+int16x8_t __arm_vqrdmlahq_n_s16(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s16)))
+int16x8_t __arm_vqrdmlahq(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s32)))
+int32x4_t __arm_vqrdmlahq_n_s32(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s32)))
+int32x4_t __arm_vqrdmlahq(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s8)))
+int8x16_t __arm_vqrdmlahq_n_s8(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s8)))
+int8x16_t __arm_vqrdmlahq(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s16)))
+int16x8_t __arm_vqrdmlashq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s16)))
+int16x8_t __arm_vqrdmlashq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s32)))
+int32x4_t __arm_vqrdmlashq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s32)))
+int32x4_t __arm_vqrdmlashq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s8)))
+int8x16_t __arm_vqrdmlashq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s8)))
+int8x16_t __arm_vqrdmlashq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s16)))
+int16x8_t __arm_vqrdmlashq_n_s16(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s16)))
+int16x8_t __arm_vqrdmlashq(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s32)))
+int32x4_t __arm_vqrdmlashq_n_s32(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s32)))
+int32x4_t __arm_vqrdmlashq(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s8)))
+int8x16_t __arm_vqrdmlashq_n_s8(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s8)))
+int8x16_t __arm_vqrdmlashq(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s16)))
+int16x8_t __arm_vqrdmlsdhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s16)))
+int16x8_t __arm_vqrdmlsdhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s32)))
+int32x4_t __arm_vqrdmlsdhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s32)))
+int32x4_t __arm_vqrdmlsdhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s8)))
+int8x16_t __arm_vqrdmlsdhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s8)))
+int8x16_t __arm_vqrdmlsdhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s16)))
+int16x8_t __arm_vqrdmlsdhq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s16)))
+int16x8_t __arm_vqrdmlsdhq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s32)))
+int32x4_t __arm_vqrdmlsdhq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s32)))
+int32x4_t __arm_vqrdmlsdhq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s8)))
+int8x16_t __arm_vqrdmlsdhq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s8)))
+int8x16_t __arm_vqrdmlsdhq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s16)))
+int16x8_t __arm_vqrdmlsdhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s16)))
+int16x8_t __arm_vqrdmlsdhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s32)))
+int32x4_t __arm_vqrdmlsdhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s32)))
+int32x4_t __arm_vqrdmlsdhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s8)))
+int8x16_t __arm_vqrdmlsdhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s8)))
+int8x16_t __arm_vqrdmlsdhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s16)))
+int16x8_t __arm_vqrdmlsdhxq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s16)))
+int16x8_t __arm_vqrdmlsdhxq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s32)))
+int32x4_t __arm_vqrdmlsdhxq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s32)))
+int32x4_t __arm_vqrdmlsdhxq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s8)))
+int8x16_t __arm_vqrdmlsdhxq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s8)))
+int8x16_t __arm_vqrdmlsdhxq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s16)))
+int16x8_t __arm_vqrdmulhq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s16)))
+int16x8_t __arm_vqrdmulhq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s32)))
+int32x4_t __arm_vqrdmulhq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s32)))
+int32x4_t __arm_vqrdmulhq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s8)))
+int8x16_t __arm_vqrdmulhq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s8)))
+int8x16_t __arm_vqrdmulhq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s16)))
+int16x8_t __arm_vqrdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s16)))
+int16x8_t __arm_vqrdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s32)))
+int32x4_t __arm_vqrdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s32)))
+int32x4_t __arm_vqrdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s8)))
+int8x16_t __arm_vqrdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s8)))
+int8x16_t __arm_vqrdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s16)))
+int16x8_t __arm_vqrdmulhq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s16)))
+int16x8_t __arm_vqrdmulhq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s32)))
+int32x4_t __arm_vqrdmulhq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s32)))
+int32x4_t __arm_vqrdmulhq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s8)))
+int8x16_t __arm_vqrdmulhq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s8)))
+int8x16_t __arm_vqrdmulhq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s16)))
+int16x8_t __arm_vqrdmulhq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s16)))
+int16x8_t __arm_vqrdmulhq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s32)))
+int32x4_t __arm_vqrdmulhq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s32)))
+int32x4_t __arm_vqrdmulhq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s8)))
+int8x16_t __arm_vqrdmulhq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s8)))
+int8x16_t __arm_vqrdmulhq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s16)))
+int16x8_t __arm_vqrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s16)))
+int16x8_t __arm_vqrshlq_m_n(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s32)))
+int32x4_t __arm_vqrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s32)))
+int32x4_t __arm_vqrshlq_m_n(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s8)))
+int8x16_t __arm_vqrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s8)))
+int8x16_t __arm_vqrshlq_m_n(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u16)))
+uint16x8_t __arm_vqrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u16)))
+uint16x8_t __arm_vqrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u32)))
+uint32x4_t __arm_vqrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u32)))
+uint32x4_t __arm_vqrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u8)))
+uint8x16_t __arm_vqrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u8)))
+uint8x16_t __arm_vqrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s16)))
+int16x8_t __arm_vqrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s16)))
+int16x8_t __arm_vqrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s32)))
+int32x4_t __arm_vqrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s32)))
+int32x4_t __arm_vqrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s8)))
+int8x16_t __arm_vqrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s8)))
+int8x16_t __arm_vqrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u16)))
+uint16x8_t __arm_vqrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u16)))
+uint16x8_t __arm_vqrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u32)))
+uint32x4_t __arm_vqrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u32)))
+uint32x4_t __arm_vqrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u8)))
+uint8x16_t __arm_vqrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u8)))
+uint8x16_t __arm_vqrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s16)))
+int16x8_t __arm_vqrshlq_n_s16(int16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s16)))
+int16x8_t __arm_vqrshlq(int16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s32)))
+int32x4_t __arm_vqrshlq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s32)))
+int32x4_t __arm_vqrshlq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s8)))
+int8x16_t __arm_vqrshlq_n_s8(int8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s8)))
+int8x16_t __arm_vqrshlq(int8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u16)))
+uint16x8_t __arm_vqrshlq_n_u16(uint16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u16)))
+uint16x8_t __arm_vqrshlq(uint16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u32)))
+uint32x4_t __arm_vqrshlq_n_u32(uint32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u32)))
+uint32x4_t __arm_vqrshlq(uint32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u8)))
+uint8x16_t __arm_vqrshlq_n_u8(uint8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u8)))
+uint8x16_t __arm_vqrshlq(uint8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s16)))
+int16x8_t __arm_vqrshlq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s16)))
+int16x8_t __arm_vqrshlq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s32)))
+int32x4_t __arm_vqrshlq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s32)))
+int32x4_t __arm_vqrshlq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s8)))
+int8x16_t __arm_vqrshlq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s8)))
+int8x16_t __arm_vqrshlq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u16)))
+uint16x8_t __arm_vqrshlq_u16(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u16)))
+uint16x8_t __arm_vqrshlq(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u32)))
+uint32x4_t __arm_vqrshlq_u32(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u32)))
+uint32x4_t __arm_vqrshlq(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u8)))
+uint8x16_t __arm_vqrshlq_u8(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u8)))
+uint8x16_t __arm_vqrshlq(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16)))
+int8x16_t __arm_vqrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16)))
+int8x16_t __arm_vqrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32)))
+int16x8_t __arm_vqrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32)))
+int16x8_t __arm_vqrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16)))
+uint8x16_t __arm_vqrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16)))
+uint8x16_t __arm_vqrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32)))
+uint16x8_t __arm_vqrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32)))
+uint16x8_t __arm_vqrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s16)))
+int8x16_t __arm_vqrshrnbq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s16)))
+int8x16_t __arm_vqrshrnbq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s32)))
+int16x8_t __arm_vqrshrnbq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s32)))
+int16x8_t __arm_vqrshrnbq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u16)))
+uint8x16_t __arm_vqrshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u16)))
+uint8x16_t __arm_vqrshrnbq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u32)))
+uint16x8_t __arm_vqrshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u32)))
+uint16x8_t __arm_vqrshrnbq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s16)))
+int8x16_t __arm_vqrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s16)))
+int8x16_t __arm_vqrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s32)))
+int16x8_t __arm_vqrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s32)))
+int16x8_t __arm_vqrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u16)))
+uint8x16_t __arm_vqrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u16)))
+uint8x16_t __arm_vqrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u32)))
+uint16x8_t __arm_vqrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u32)))
+uint16x8_t __arm_vqrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s16)))
+int8x16_t __arm_vqrshrntq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s16)))
+int8x16_t __arm_vqrshrntq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s32)))
+int16x8_t __arm_vqrshrntq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s32)))
+int16x8_t __arm_vqrshrntq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u16)))
+uint8x16_t __arm_vqrshrntq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u16)))
+uint8x16_t __arm_vqrshrntq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u32)))
+uint16x8_t __arm_vqrshrntq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u32)))
+uint16x8_t __arm_vqrshrntq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16)))
+uint8x16_t __arm_vqrshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16)))
+uint8x16_t __arm_vqrshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32)))
+uint16x8_t __arm_vqrshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32)))
+uint16x8_t __arm_vqrshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s16)))
+uint8x16_t __arm_vqrshrunbq_n_s16(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s16)))
+uint8x16_t __arm_vqrshrunbq(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s32)))
+uint16x8_t __arm_vqrshrunbq_n_s32(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s32)))
+uint16x8_t __arm_vqrshrunbq(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s16)))
+uint8x16_t __arm_vqrshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s16)))
+uint8x16_t __arm_vqrshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s32)))
+uint16x8_t __arm_vqrshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s32)))
+uint16x8_t __arm_vqrshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s16)))
+uint8x16_t __arm_vqrshruntq_n_s16(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s16)))
+uint8x16_t __arm_vqrshruntq(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s32)))
+uint16x8_t __arm_vqrshruntq_n_s32(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s32)))
+uint16x8_t __arm_vqrshruntq(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s16)))
+int16x8_t __arm_vqshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s16)))
+int16x8_t __arm_vqshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s32)))
+int32x4_t __arm_vqshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s32)))
+int32x4_t __arm_vqshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s8)))
+int8x16_t __arm_vqshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s8)))
+int8x16_t __arm_vqshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u16)))
+uint16x8_t __arm_vqshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u16)))
+uint16x8_t __arm_vqshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u32)))
+uint32x4_t __arm_vqshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u32)))
+uint32x4_t __arm_vqshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u8)))
+uint8x16_t __arm_vqshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u8)))
+uint8x16_t __arm_vqshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s16)))
+int16x8_t __arm_vqshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s16)))
+int16x8_t __arm_vqshlq_m_r(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s32)))
+int32x4_t __arm_vqshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s32)))
+int32x4_t __arm_vqshlq_m_r(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s8)))
+int8x16_t __arm_vqshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s8)))
+int8x16_t __arm_vqshlq_m_r(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u16)))
+uint16x8_t __arm_vqshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u16)))
+uint16x8_t __arm_vqshlq_m_r(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u32)))
+uint32x4_t __arm_vqshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u32)))
+uint32x4_t __arm_vqshlq_m_r(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u8)))
+uint8x16_t __arm_vqshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u8)))
+uint8x16_t __arm_vqshlq_m_r(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s16)))
+int16x8_t __arm_vqshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s16)))
+int16x8_t __arm_vqshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s32)))
+int32x4_t __arm_vqshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s32)))
+int32x4_t __arm_vqshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s8)))
+int8x16_t __arm_vqshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s8)))
+int8x16_t __arm_vqshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u16)))
+uint16x8_t __arm_vqshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u16)))
+uint16x8_t __arm_vqshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u32)))
+uint32x4_t __arm_vqshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u32)))
+uint32x4_t __arm_vqshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u8)))
+uint8x16_t __arm_vqshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u8)))
+uint8x16_t __arm_vqshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s16)))
+int16x8_t __arm_vqshlq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s16)))
+int16x8_t __arm_vqshlq_n(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s32)))
+int32x4_t __arm_vqshlq_n_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s32)))
+int32x4_t __arm_vqshlq_n(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s8)))
+int8x16_t __arm_vqshlq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s8)))
+int8x16_t __arm_vqshlq_n(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u16)))
+uint16x8_t __arm_vqshlq_n_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u16)))
+uint16x8_t __arm_vqshlq_n(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u32)))
+uint32x4_t __arm_vqshlq_n_u32(uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u32)))
+uint32x4_t __arm_vqshlq_n(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u8)))
+uint8x16_t __arm_vqshlq_n_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u8)))
+uint8x16_t __arm_vqshlq_n(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s16)))
+int16x8_t __arm_vqshlq_r_s16(int16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s16)))
+int16x8_t __arm_vqshlq_r(int16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s32)))
+int32x4_t __arm_vqshlq_r_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s32)))
+int32x4_t __arm_vqshlq_r(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s8)))
+int8x16_t __arm_vqshlq_r_s8(int8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s8)))
+int8x16_t __arm_vqshlq_r(int8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u16)))
+uint16x8_t __arm_vqshlq_r_u16(uint16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u16)))
+uint16x8_t __arm_vqshlq_r(uint16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u32)))
+uint32x4_t __arm_vqshlq_r_u32(uint32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u32)))
+uint32x4_t __arm_vqshlq_r(uint32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u8)))
+uint8x16_t __arm_vqshlq_r_u8(uint8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u8)))
+uint8x16_t __arm_vqshlq_r(uint8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s16)))
+int16x8_t __arm_vqshlq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s16)))
+int16x8_t __arm_vqshlq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s32)))
+int32x4_t __arm_vqshlq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s32)))
+int32x4_t __arm_vqshlq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s8)))
+int8x16_t __arm_vqshlq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s8)))
+int8x16_t __arm_vqshlq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u16)))
+uint16x8_t __arm_vqshlq_u16(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u16)))
+uint16x8_t __arm_vqshlq(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u32)))
+uint32x4_t __arm_vqshlq_u32(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u32)))
+uint32x4_t __arm_vqshlq(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u8)))
+uint8x16_t __arm_vqshlq_u8(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u8)))
+uint8x16_t __arm_vqshlq(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s16)))
+uint16x8_t __arm_vqshluq_m_n_s16(uint16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s16)))
+uint16x8_t __arm_vqshluq_m(uint16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s32)))
+uint32x4_t __arm_vqshluq_m_n_s32(uint32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s32)))
+uint32x4_t __arm_vqshluq_m(uint32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s8)))
+uint8x16_t __arm_vqshluq_m_n_s8(uint8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s8)))
+uint8x16_t __arm_vqshluq_m(uint8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s16)))
+uint16x8_t __arm_vqshluq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s16)))
+uint16x8_t __arm_vqshluq(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s32)))
+uint32x4_t __arm_vqshluq_n_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s32)))
+uint32x4_t __arm_vqshluq(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s8)))
+uint8x16_t __arm_vqshluq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s8)))
+uint8x16_t __arm_vqshluq(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s16)))
+int8x16_t __arm_vqshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s16)))
+int8x16_t __arm_vqshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s32)))
+int16x8_t __arm_vqshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s32)))
+int16x8_t __arm_vqshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u16)))
+uint8x16_t __arm_vqshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u16)))
+uint8x16_t __arm_vqshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u32)))
+uint16x8_t __arm_vqshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u32)))
+uint16x8_t __arm_vqshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s16)))
+int8x16_t __arm_vqshrnbq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s16)))
+int8x16_t __arm_vqshrnbq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s32)))
+int16x8_t __arm_vqshrnbq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s32)))
+int16x8_t __arm_vqshrnbq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u16)))
+uint8x16_t __arm_vqshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u16)))
+uint8x16_t __arm_vqshrnbq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u32)))
+uint16x8_t __arm_vqshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u32)))
+uint16x8_t __arm_vqshrnbq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s16)))
+int8x16_t __arm_vqshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s16)))
+int8x16_t __arm_vqshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s32)))
+int16x8_t __arm_vqshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s32)))
+int16x8_t __arm_vqshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u16)))
+uint8x16_t __arm_vqshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u16)))
+uint8x16_t __arm_vqshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u32)))
+uint16x8_t __arm_vqshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u32)))
+uint16x8_t __arm_vqshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s16)))
+int8x16_t __arm_vqshrntq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s16)))
+int8x16_t __arm_vqshrntq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s32)))
+int16x8_t __arm_vqshrntq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s32)))
+int16x8_t __arm_vqshrntq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u16)))
+uint8x16_t __arm_vqshrntq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u16)))
+uint8x16_t __arm_vqshrntq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u32)))
+uint16x8_t __arm_vqshrntq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u32)))
+uint16x8_t __arm_vqshrntq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s16)))
+uint8x16_t __arm_vqshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s16)))
+uint8x16_t __arm_vqshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s32)))
+uint16x8_t __arm_vqshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s32)))
+uint16x8_t __arm_vqshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s16)))
+uint8x16_t __arm_vqshrunbq_n_s16(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s16)))
+uint8x16_t __arm_vqshrunbq(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s32)))
+uint16x8_t __arm_vqshrunbq_n_s32(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s32)))
+uint16x8_t __arm_vqshrunbq(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s16)))
+uint8x16_t __arm_vqshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s16)))
+uint8x16_t __arm_vqshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s32)))
+uint16x8_t __arm_vqshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s32)))
+uint16x8_t __arm_vqshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s16)))
+uint8x16_t __arm_vqshruntq_n_s16(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s16)))
+uint8x16_t __arm_vqshruntq(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s32)))
+uint16x8_t __arm_vqshruntq_n_s32(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s32)))
+uint16x8_t __arm_vqshruntq(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s16)))
+int16x8_t __arm_vqsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s16)))
+int16x8_t __arm_vqsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s32)))
+int32x4_t __arm_vqsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s32)))
+int32x4_t __arm_vqsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s8)))
+int8x16_t __arm_vqsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s8)))
+int8x16_t __arm_vqsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u16)))
+uint16x8_t __arm_vqsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u16)))
+uint16x8_t __arm_vqsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u32)))
+uint32x4_t __arm_vqsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u32)))
+uint32x4_t __arm_vqsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u8)))
+uint8x16_t __arm_vqsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u8)))
+uint8x16_t __arm_vqsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s16)))
+int16x8_t __arm_vqsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s16)))
+int16x8_t __arm_vqsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s32)))
+int32x4_t __arm_vqsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s32)))
+int32x4_t __arm_vqsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s8)))
+int8x16_t __arm_vqsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s8)))
+int8x16_t __arm_vqsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u16)))
+uint16x8_t __arm_vqsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u16)))
+uint16x8_t __arm_vqsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u32)))
+uint32x4_t __arm_vqsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u32)))
+uint32x4_t __arm_vqsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u8)))
+uint8x16_t __arm_vqsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u8)))
+uint8x16_t __arm_vqsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s16)))
+int16x8_t __arm_vqsubq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s16)))
+int16x8_t __arm_vqsubq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s32)))
+int32x4_t __arm_vqsubq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s32)))
+int32x4_t __arm_vqsubq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s8)))
+int8x16_t __arm_vqsubq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s8)))
+int8x16_t __arm_vqsubq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u16)))
+uint16x8_t __arm_vqsubq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u16)))
+uint16x8_t __arm_vqsubq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u32)))
+uint32x4_t __arm_vqsubq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u32)))
+uint32x4_t __arm_vqsubq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u8)))
+uint8x16_t __arm_vqsubq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u8)))
+uint8x16_t __arm_vqsubq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s16)))
+int16x8_t __arm_vqsubq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s16)))
+int16x8_t __arm_vqsubq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s32)))
+int32x4_t __arm_vqsubq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s32)))
+int32x4_t __arm_vqsubq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s8)))
+int8x16_t __arm_vqsubq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s8)))
+int8x16_t __arm_vqsubq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u16)))
+uint16x8_t __arm_vqsubq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u16)))
+uint16x8_t __arm_vqsubq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u32)))
+uint32x4_t __arm_vqsubq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u32)))
+uint32x4_t __arm_vqsubq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u8)))
+uint8x16_t __arm_vqsubq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u8)))
+uint8x16_t __arm_vqsubq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))
+int16x8_t __arm_vreinterpretq_s16_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))
+int16x8_t __arm_vreinterpretq_s16(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))
+int16x8_t __arm_vreinterpretq_s16_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))
+int16x8_t __arm_vreinterpretq_s16(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))
+int16x8_t __arm_vreinterpretq_s16_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))
+int16x8_t __arm_vreinterpretq_s16(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))
+int16x8_t __arm_vreinterpretq_s16_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))
+int16x8_t __arm_vreinterpretq_s16(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))
+int16x8_t __arm_vreinterpretq_s16_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))
+int16x8_t __arm_vreinterpretq_s16(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))
+int16x8_t __arm_vreinterpretq_s16_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))
+int16x8_t __arm_vreinterpretq_s16(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
+int16x8_t __arm_vreinterpretq_s16_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
+int16x8_t __arm_vreinterpretq_s16(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))
+int32x4_t __arm_vreinterpretq_s32_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))
+int32x4_t __arm_vreinterpretq_s32(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))
+int32x4_t __arm_vreinterpretq_s32_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))
+int32x4_t __arm_vreinterpretq_s32(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))
+int32x4_t __arm_vreinterpretq_s32_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))
+int32x4_t __arm_vreinterpretq_s32(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))
+int32x4_t __arm_vreinterpretq_s32_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))
+int32x4_t __arm_vreinterpretq_s32(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))
+int32x4_t __arm_vreinterpretq_s32_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))
+int32x4_t __arm_vreinterpretq_s32(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))
+int32x4_t __arm_vreinterpretq_s32_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))
+int32x4_t __arm_vreinterpretq_s32(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
+int32x4_t __arm_vreinterpretq_s32_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
+int32x4_t __arm_vreinterpretq_s32(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))
+int64x2_t __arm_vreinterpretq_s64_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))
+int64x2_t __arm_vreinterpretq_s64(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))
+int64x2_t __arm_vreinterpretq_s64_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))
+int64x2_t __arm_vreinterpretq_s64(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))
+int64x2_t __arm_vreinterpretq_s64_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))
+int64x2_t __arm_vreinterpretq_s64(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))
+int64x2_t __arm_vreinterpretq_s64_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))
+int64x2_t __arm_vreinterpretq_s64(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))
+int64x2_t __arm_vreinterpretq_s64_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))
+int64x2_t __arm_vreinterpretq_s64(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))
+int64x2_t __arm_vreinterpretq_s64_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))
+int64x2_t __arm_vreinterpretq_s64(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
+int64x2_t __arm_vreinterpretq_s64_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
+int64x2_t __arm_vreinterpretq_s64(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))
+int8x16_t __arm_vreinterpretq_s8_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))
+int8x16_t __arm_vreinterpretq_s8(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))
+int8x16_t __arm_vreinterpretq_s8_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))
+int8x16_t __arm_vreinterpretq_s8(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))
+int8x16_t __arm_vreinterpretq_s8_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))
+int8x16_t __arm_vreinterpretq_s8(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))
+int8x16_t __arm_vreinterpretq_s8_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))
+int8x16_t __arm_vreinterpretq_s8(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))
+int8x16_t __arm_vreinterpretq_s8_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))
+int8x16_t __arm_vreinterpretq_s8(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))
+int8x16_t __arm_vreinterpretq_s8_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))
+int8x16_t __arm_vreinterpretq_s8(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
+int8x16_t __arm_vreinterpretq_s8_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
+int8x16_t __arm_vreinterpretq_s8(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))
+uint16x8_t __arm_vreinterpretq_u16_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))
+uint16x8_t __arm_vreinterpretq_u16(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))
+uint16x8_t __arm_vreinterpretq_u16_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))
+uint16x8_t __arm_vreinterpretq_u16(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))
+uint16x8_t __arm_vreinterpretq_u16_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))
+uint16x8_t __arm_vreinterpretq_u16(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))
+uint16x8_t __arm_vreinterpretq_u16_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))
+uint16x8_t __arm_vreinterpretq_u16(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))
+uint16x8_t __arm_vreinterpretq_u16_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))
+uint16x8_t __arm_vreinterpretq_u16(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))
+uint16x8_t __arm_vreinterpretq_u16_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))
+uint16x8_t __arm_vreinterpretq_u16(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
+uint16x8_t __arm_vreinterpretq_u16_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
+uint16x8_t __arm_vreinterpretq_u16(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))
+uint32x4_t __arm_vreinterpretq_u32_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))
+uint32x4_t __arm_vreinterpretq_u32(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))
+uint32x4_t __arm_vreinterpretq_u32_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))
+uint32x4_t __arm_vreinterpretq_u32(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))
+uint32x4_t __arm_vreinterpretq_u32_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))
+uint32x4_t __arm_vreinterpretq_u32(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))
+uint32x4_t __arm_vreinterpretq_u32_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))
+uint32x4_t __arm_vreinterpretq_u32(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))
+uint32x4_t __arm_vreinterpretq_u32_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))
+uint32x4_t __arm_vreinterpretq_u32(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))
+uint32x4_t __arm_vreinterpretq_u32_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))
+uint32x4_t __arm_vreinterpretq_u32(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
+uint32x4_t __arm_vreinterpretq_u32_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
+uint32x4_t __arm_vreinterpretq_u32(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))
+uint64x2_t __arm_vreinterpretq_u64_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))
+uint64x2_t __arm_vreinterpretq_u64(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))
+uint64x2_t __arm_vreinterpretq_u64_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))
+uint64x2_t __arm_vreinterpretq_u64(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))
+uint64x2_t __arm_vreinterpretq_u64_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))
+uint64x2_t __arm_vreinterpretq_u64(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))
+uint64x2_t __arm_vreinterpretq_u64_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))
+uint64x2_t __arm_vreinterpretq_u64(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))
+uint64x2_t __arm_vreinterpretq_u64_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))
+uint64x2_t __arm_vreinterpretq_u64(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))
+uint64x2_t __arm_vreinterpretq_u64_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))
+uint64x2_t __arm_vreinterpretq_u64(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
+uint64x2_t __arm_vreinterpretq_u64_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
+uint64x2_t __arm_vreinterpretq_u64(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
+uint8x16_t __arm_vreinterpretq_u8_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
+uint8x16_t __arm_vreinterpretq_u8(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
+uint8x16_t __arm_vreinterpretq_u8_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
+uint8x16_t __arm_vreinterpretq_u8(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
+uint8x16_t __arm_vreinterpretq_u8_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
+uint8x16_t __arm_vreinterpretq_u8(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
+uint8x16_t __arm_vreinterpretq_u8_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
+uint8x16_t __arm_vreinterpretq_u8(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
+uint8x16_t __arm_vreinterpretq_u8_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
+uint8x16_t __arm_vreinterpretq_u8(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
+uint8x16_t __arm_vreinterpretq_u8_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
+uint8x16_t __arm_vreinterpretq_u8(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
+uint8x16_t __arm_vreinterpretq_u8_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
+uint8x16_t __arm_vreinterpretq_u8(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_s8)))
+int8x16_t __arm_vrev16q_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_s8)))
+int8x16_t __arm_vrev16q_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_u8)))
+uint8x16_t __arm_vrev16q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_u8)))
+uint8x16_t __arm_vrev16q_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_s8)))
+int8x16_t __arm_vrev16q_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_s8)))
+int8x16_t __arm_vrev16q(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_u8)))
+uint8x16_t __arm_vrev16q_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_u8)))
+uint8x16_t __arm_vrev16q(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_s8)))
+int8x16_t __arm_vrev16q_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_s8)))
+int8x16_t __arm_vrev16q_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_u8)))
+uint8x16_t __arm_vrev16q_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_u8)))
+uint8x16_t __arm_vrev16q_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s16)))
+int16x8_t __arm_vrev32q_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s16)))
+int16x8_t __arm_vrev32q_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s8)))
+int8x16_t __arm_vrev32q_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s8)))
+int8x16_t __arm_vrev32q_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u16)))
+uint16x8_t __arm_vrev32q_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u16)))
+uint16x8_t __arm_vrev32q_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u8)))
+uint8x16_t __arm_vrev32q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u8)))
+uint8x16_t __arm_vrev32q_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s16)))
+int16x8_t __arm_vrev32q_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s16)))
+int16x8_t __arm_vrev32q(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s8)))
+int8x16_t __arm_vrev32q_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s8)))
+int8x16_t __arm_vrev32q(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u16)))
+uint16x8_t __arm_vrev32q_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u16)))
+uint16x8_t __arm_vrev32q(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u8)))
+uint8x16_t __arm_vrev32q_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u8)))
+uint8x16_t __arm_vrev32q(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s16)))
+int16x8_t __arm_vrev32q_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s16)))
+int16x8_t __arm_vrev32q_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s8)))
+int8x16_t __arm_vrev32q_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s8)))
+int8x16_t __arm_vrev32q_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u16)))
+uint16x8_t __arm_vrev32q_x_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u16)))
+uint16x8_t __arm_vrev32q_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u8)))
+uint8x16_t __arm_vrev32q_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u8)))
+uint8x16_t __arm_vrev32q_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s16)))
+int16x8_t __arm_vrev64q_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s16)))
+int16x8_t __arm_vrev64q_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s32)))
+int32x4_t __arm_vrev64q_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s32)))
+int32x4_t __arm_vrev64q_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s8)))
+int8x16_t __arm_vrev64q_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s8)))
+int8x16_t __arm_vrev64q_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u16)))
+uint16x8_t __arm_vrev64q_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u16)))
+uint16x8_t __arm_vrev64q_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u32)))
+uint32x4_t __arm_vrev64q_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u32)))
+uint32x4_t __arm_vrev64q_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u8)))
+uint8x16_t __arm_vrev64q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u8)))
+uint8x16_t __arm_vrev64q_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s16)))
+int16x8_t __arm_vrev64q_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s16)))
+int16x8_t __arm_vrev64q(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s32)))
+int32x4_t __arm_vrev64q_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s32)))
+int32x4_t __arm_vrev64q(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s8)))
+int8x16_t __arm_vrev64q_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s8)))
+int8x16_t __arm_vrev64q(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u16)))
+uint16x8_t __arm_vrev64q_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u16)))
+uint16x8_t __arm_vrev64q(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u32)))
+uint32x4_t __arm_vrev64q_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u32)))
+uint32x4_t __arm_vrev64q(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u8)))
+uint8x16_t __arm_vrev64q_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u8)))
+uint8x16_t __arm_vrev64q(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s16)))
+int16x8_t __arm_vrev64q_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s16)))
+int16x8_t __arm_vrev64q_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s32)))
+int32x4_t __arm_vrev64q_x_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s32)))
+int32x4_t __arm_vrev64q_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s8)))
+int8x16_t __arm_vrev64q_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s8)))
+int8x16_t __arm_vrev64q_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u16)))
+uint16x8_t __arm_vrev64q_x_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u16)))
+uint16x8_t __arm_vrev64q_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u32)))
+uint32x4_t __arm_vrev64q_x_u32(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u32)))
+uint32x4_t __arm_vrev64q_x(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u8)))
+uint8x16_t __arm_vrev64q_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u8)))
+uint8x16_t __arm_vrev64q_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s16)))
+int16x8_t __arm_vrhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s16)))
+int16x8_t __arm_vrhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s32)))
+int32x4_t __arm_vrhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s32)))
+int32x4_t __arm_vrhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s8)))
+int8x16_t __arm_vrhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s8)))
+int8x16_t __arm_vrhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u16)))
+uint16x8_t __arm_vrhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u16)))
+uint16x8_t __arm_vrhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u32)))
+uint32x4_t __arm_vrhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u32)))
+uint32x4_t __arm_vrhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u8)))
+uint8x16_t __arm_vrhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u8)))
+uint8x16_t __arm_vrhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s16)))
+int16x8_t __arm_vrhaddq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s16)))
+int16x8_t __arm_vrhaddq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s32)))
+int32x4_t __arm_vrhaddq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s32)))
+int32x4_t __arm_vrhaddq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s8)))
+int8x16_t __arm_vrhaddq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s8)))
+int8x16_t __arm_vrhaddq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u16)))
+uint16x8_t __arm_vrhaddq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u16)))
+uint16x8_t __arm_vrhaddq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u32)))
+uint32x4_t __arm_vrhaddq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u32)))
+uint32x4_t __arm_vrhaddq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u8)))
+uint8x16_t __arm_vrhaddq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u8)))
+uint8x16_t __arm_vrhaddq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s16)))
+int16x8_t __arm_vrhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s16)))
+int16x8_t __arm_vrhaddq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s32)))
+int32x4_t __arm_vrhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s32)))
+int32x4_t __arm_vrhaddq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s8)))
+int8x16_t __arm_vrhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s8)))
+int8x16_t __arm_vrhaddq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u16)))
+uint16x8_t __arm_vrhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u16)))
+uint16x8_t __arm_vrhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u32)))
+uint32x4_t __arm_vrhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u32)))
+uint32x4_t __arm_vrhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u8)))
+uint8x16_t __arm_vrhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u8)))
+uint8x16_t __arm_vrhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32)))
+int64_t __arm_vrmlaldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32)))
+int64_t __arm_vrmlaldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32)))
+uint64_t __arm_vrmlaldavhaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32)))
+uint64_t __arm_vrmlaldavhaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_s32)))
+int64_t __arm_vrmlaldavhaq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_s32)))
+int64_t __arm_vrmlaldavhaq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_u32)))
+uint64_t __arm_vrmlaldavhaq_u32(uint64_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_u32)))
+uint64_t __arm_vrmlaldavhaq(uint64_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32)))
+int64_t __arm_vrmlaldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32)))
+int64_t __arm_vrmlaldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_s32)))
+int64_t __arm_vrmlaldavhaxq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_s32)))
+int64_t __arm_vrmlaldavhaxq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_s32)))
+int64_t __arm_vrmlaldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_s32)))
+int64_t __arm_vrmlaldavhq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_u32)))
+uint64_t __arm_vrmlaldavhq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_u32)))
+uint64_t __arm_vrmlaldavhq_p(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_s32)))
+int64_t __arm_vrmlaldavhq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_s32)))
+int64_t __arm_vrmlaldavhq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_u32)))
+uint64_t __arm_vrmlaldavhq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_u32)))
+uint64_t __arm_vrmlaldavhq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32)))
+int64_t __arm_vrmlaldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32)))
+int64_t __arm_vrmlaldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_s32)))
+int64_t __arm_vrmlaldavhxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_s32)))
+int64_t __arm_vrmlaldavhxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32)))
+int64_t __arm_vrmlsldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32)))
+int64_t __arm_vrmlsldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_s32)))
+int64_t __arm_vrmlsldavhaq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_s32)))
+int64_t __arm_vrmlsldavhaq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32)))
+int64_t __arm_vrmlsldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32)))
+int64_t __arm_vrmlsldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_s32)))
+int64_t __arm_vrmlsldavhaxq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_s32)))
+int64_t __arm_vrmlsldavhaxq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_p_s32)))
+int64_t __arm_vrmlsldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_p_s32)))
+int64_t __arm_vrmlsldavhq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_s32)))
+int64_t __arm_vrmlsldavhq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_s32)))
+int64_t __arm_vrmlsldavhq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32)))
+int64_t __arm_vrmlsldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32)))
+int64_t __arm_vrmlsldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_s32)))
+int64_t __arm_vrmlsldavhxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_s32)))
+int64_t __arm_vrmlsldavhxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s16)))
+int16x8_t __arm_vrmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s16)))
+int16x8_t __arm_vrmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s32)))
+int32x4_t __arm_vrmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s32)))
+int32x4_t __arm_vrmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s8)))
+int8x16_t __arm_vrmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s8)))
+int8x16_t __arm_vrmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u16)))
+uint16x8_t __arm_vrmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u16)))
+uint16x8_t __arm_vrmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u32)))
+uint32x4_t __arm_vrmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u32)))
+uint32x4_t __arm_vrmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u8)))
+uint8x16_t __arm_vrmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u8)))
+uint8x16_t __arm_vrmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s16)))
+int16x8_t __arm_vrmulhq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s16)))
+int16x8_t __arm_vrmulhq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s32)))
+int32x4_t __arm_vrmulhq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s32)))
+int32x4_t __arm_vrmulhq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s8)))
+int8x16_t __arm_vrmulhq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s8)))
+int8x16_t __arm_vrmulhq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u16)))
+uint16x8_t __arm_vrmulhq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u16)))
+uint16x8_t __arm_vrmulhq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u32)))
+uint32x4_t __arm_vrmulhq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u32)))
+uint32x4_t __arm_vrmulhq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u8)))
+uint8x16_t __arm_vrmulhq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u8)))
+uint8x16_t __arm_vrmulhq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s16)))
+int16x8_t __arm_vrmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s16)))
+int16x8_t __arm_vrmulhq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s32)))
+int32x4_t __arm_vrmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s32)))
+int32x4_t __arm_vrmulhq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s8)))
+int8x16_t __arm_vrmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s8)))
+int8x16_t __arm_vrmulhq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u16)))
+uint16x8_t __arm_vrmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u16)))
+uint16x8_t __arm_vrmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u32)))
+uint32x4_t __arm_vrmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u32)))
+uint32x4_t __arm_vrmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u8)))
+uint8x16_t __arm_vrmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u8)))
+uint8x16_t __arm_vrmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s16)))
+int16x8_t __arm_vrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s16)))
+int16x8_t __arm_vrshlq_m_n(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s32)))
+int32x4_t __arm_vrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s32)))
+int32x4_t __arm_vrshlq_m_n(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s8)))
+int8x16_t __arm_vrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s8)))
+int8x16_t __arm_vrshlq_m_n(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u16)))
+uint16x8_t __arm_vrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u16)))
+uint16x8_t __arm_vrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u32)))
+uint32x4_t __arm_vrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u32)))
+uint32x4_t __arm_vrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u8)))
+uint8x16_t __arm_vrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u8)))
+uint8x16_t __arm_vrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s16)))
+int16x8_t __arm_vrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s16)))
+int16x8_t __arm_vrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s32)))
+int32x4_t __arm_vrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s32)))
+int32x4_t __arm_vrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s8)))
+int8x16_t __arm_vrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s8)))
+int8x16_t __arm_vrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u16)))
+uint16x8_t __arm_vrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u16)))
+uint16x8_t __arm_vrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u32)))
+uint32x4_t __arm_vrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u32)))
+uint32x4_t __arm_vrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u8)))
+uint8x16_t __arm_vrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u8)))
+uint8x16_t __arm_vrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s16)))
+int16x8_t __arm_vrshlq_n_s16(int16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s16)))
+int16x8_t __arm_vrshlq(int16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s32)))
+int32x4_t __arm_vrshlq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s32)))
+int32x4_t __arm_vrshlq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s8)))
+int8x16_t __arm_vrshlq_n_s8(int8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s8)))
+int8x16_t __arm_vrshlq(int8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u16)))
+uint16x8_t __arm_vrshlq_n_u16(uint16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u16)))
+uint16x8_t __arm_vrshlq(uint16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u32)))
+uint32x4_t __arm_vrshlq_n_u32(uint32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u32)))
+uint32x4_t __arm_vrshlq(uint32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u8)))
+uint8x16_t __arm_vrshlq_n_u8(uint8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u8)))
+uint8x16_t __arm_vrshlq(uint8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s16)))
+int16x8_t __arm_vrshlq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s16)))
+int16x8_t __arm_vrshlq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s32)))
+int32x4_t __arm_vrshlq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s32)))
+int32x4_t __arm_vrshlq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s8)))
+int8x16_t __arm_vrshlq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s8)))
+int8x16_t __arm_vrshlq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u16)))
+uint16x8_t __arm_vrshlq_u16(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u16)))
+uint16x8_t __arm_vrshlq(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u32)))
+uint32x4_t __arm_vrshlq_u32(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u32)))
+uint32x4_t __arm_vrshlq(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u8)))
+uint8x16_t __arm_vrshlq_u8(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u8)))
+uint8x16_t __arm_vrshlq(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s16)))
+int16x8_t __arm_vrshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s16)))
+int16x8_t __arm_vrshlq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s32)))
+int32x4_t __arm_vrshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s32)))
+int32x4_t __arm_vrshlq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s8)))
+int8x16_t __arm_vrshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s8)))
+int8x16_t __arm_vrshlq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u16)))
+uint16x8_t __arm_vrshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u16)))
+uint16x8_t __arm_vrshlq_x(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u32)))
+uint32x4_t __arm_vrshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u32)))
+uint32x4_t __arm_vrshlq_x(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u8)))
+uint8x16_t __arm_vrshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u8)))
+uint8x16_t __arm_vrshlq_x(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s16)))
+int8x16_t __arm_vrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s16)))
+int8x16_t __arm_vrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s32)))
+int16x8_t __arm_vrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s32)))
+int16x8_t __arm_vrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u16)))
+uint8x16_t __arm_vrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u16)))
+uint8x16_t __arm_vrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u32)))
+uint16x8_t __arm_vrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u32)))
+uint16x8_t __arm_vrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s16)))
+int8x16_t __arm_vrshrnbq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s16)))
+int8x16_t __arm_vrshrnbq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s32)))
+int16x8_t __arm_vrshrnbq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s32)))
+int16x8_t __arm_vrshrnbq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u16)))
+uint8x16_t __arm_vrshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u16)))
+uint8x16_t __arm_vrshrnbq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u32)))
+uint16x8_t __arm_vrshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u32)))
+uint16x8_t __arm_vrshrnbq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s16)))
+int8x16_t __arm_vrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s16)))
+int8x16_t __arm_vrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s32)))
+int16x8_t __arm_vrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s32)))
+int16x8_t __arm_vrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u16)))
+uint8x16_t __arm_vrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u16)))
+uint8x16_t __arm_vrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u32)))
+uint16x8_t __arm_vrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u32)))
+uint16x8_t __arm_vrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s16)))
+int8x16_t __arm_vrshrntq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s16)))
+int8x16_t __arm_vrshrntq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s32)))
+int16x8_t __arm_vrshrntq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s32)))
+int16x8_t __arm_vrshrntq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u16)))
+uint8x16_t __arm_vrshrntq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u16)))
+uint8x16_t __arm_vrshrntq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u32)))
+uint16x8_t __arm_vrshrntq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u32)))
+uint16x8_t __arm_vrshrntq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s16)))
+int16x8_t __arm_vrshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s16)))
+int16x8_t __arm_vrshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s32)))
+int32x4_t __arm_vrshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s32)))
+int32x4_t __arm_vrshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s8)))
+int8x16_t __arm_vrshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s8)))
+int8x16_t __arm_vrshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u16)))
+uint16x8_t __arm_vrshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u16)))
+uint16x8_t __arm_vrshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u32)))
+uint32x4_t __arm_vrshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u32)))
+uint32x4_t __arm_vrshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u8)))
+uint8x16_t __arm_vrshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u8)))
+uint8x16_t __arm_vrshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s16)))
+int16x8_t __arm_vrshrq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s16)))
+int16x8_t __arm_vrshrq(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s32)))
+int32x4_t __arm_vrshrq_n_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s32)))
+int32x4_t __arm_vrshrq(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s8)))
+int8x16_t __arm_vrshrq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s8)))
+int8x16_t __arm_vrshrq(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u16)))
+uint16x8_t __arm_vrshrq_n_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u16)))
+uint16x8_t __arm_vrshrq(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u32)))
+uint32x4_t __arm_vrshrq_n_u32(uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u32)))
+uint32x4_t __arm_vrshrq(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u8)))
+uint8x16_t __arm_vrshrq_n_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u8)))
+uint8x16_t __arm_vrshrq(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s16)))
+int16x8_t __arm_vrshrq_x_n_s16(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s16)))
+int16x8_t __arm_vrshrq_x(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s32)))
+int32x4_t __arm_vrshrq_x_n_s32(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s32)))
+int32x4_t __arm_vrshrq_x(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s8)))
+int8x16_t __arm_vrshrq_x_n_s8(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s8)))
+int8x16_t __arm_vrshrq_x(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u16)))
+uint16x8_t __arm_vrshrq_x_n_u16(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u16)))
+uint16x8_t __arm_vrshrq_x(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u32)))
+uint32x4_t __arm_vrshrq_x_n_u32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u32)))
+uint32x4_t __arm_vrshrq_x(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u8)))
+uint8x16_t __arm_vrshrq_x_n_u8(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u8)))
+uint8x16_t __arm_vrshrq_x(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_s32)))
+int32x4_t __arm_vsbciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_s32)))
+int32x4_t __arm_vsbciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_u32)))
+uint32x4_t __arm_vsbciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_u32)))
+uint32x4_t __arm_vsbciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_s32)))
+int32x4_t __arm_vsbciq_s32(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_s32)))
+int32x4_t __arm_vsbciq(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_u32)))
+uint32x4_t __arm_vsbciq_u32(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_u32)))
+uint32x4_t __arm_vsbciq(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_s32)))
+int32x4_t __arm_vsbcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_s32)))
+int32x4_t __arm_vsbcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_u32)))
+uint32x4_t __arm_vsbcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_u32)))
+uint32x4_t __arm_vsbcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_s32)))
+int32x4_t __arm_vsbcq_s32(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_s32)))
+int32x4_t __arm_vsbcq(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_u32)))
+uint32x4_t __arm_vsbcq_u32(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_u32)))
+uint32x4_t __arm_vsbcq(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s16)))
+int16x8_t __arm_vsetq_lane_s16(int16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s16)))
+int16x8_t __arm_vsetq_lane(int16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s32)))
+int32x4_t __arm_vsetq_lane_s32(int32_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s32)))
+int32x4_t __arm_vsetq_lane(int32_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s64)))
+int64x2_t __arm_vsetq_lane_s64(int64_t, int64x2_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s64)))
+int64x2_t __arm_vsetq_lane(int64_t, int64x2_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s8)))
+int8x16_t __arm_vsetq_lane_s8(int8_t, int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s8)))
+int8x16_t __arm_vsetq_lane(int8_t, int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u16)))
+uint16x8_t __arm_vsetq_lane_u16(uint16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u16)))
+uint16x8_t __arm_vsetq_lane(uint16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u32)))
+uint32x4_t __arm_vsetq_lane_u32(uint32_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u32)))
+uint32x4_t __arm_vsetq_lane(uint32_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u64)))
+uint64x2_t __arm_vsetq_lane_u64(uint64_t, uint64x2_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u64)))
+uint64x2_t __arm_vsetq_lane(uint64_t, uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u8)))
+uint8x16_t __arm_vsetq_lane_u8(uint8_t, uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u8)))
+uint8x16_t __arm_vsetq_lane(uint8_t, uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s16)))
+int16x8_t __arm_vshlcq_m_s16(int16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s16)))
+int16x8_t __arm_vshlcq_m(int16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s32)))
+int32x4_t __arm_vshlcq_m_s32(int32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s32)))
+int32x4_t __arm_vshlcq_m(int32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s8)))
+int8x16_t __arm_vshlcq_m_s8(int8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s8)))
+int8x16_t __arm_vshlcq_m(int8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u16)))
+uint16x8_t __arm_vshlcq_m_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u16)))
+uint16x8_t __arm_vshlcq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u32)))
+uint32x4_t __arm_vshlcq_m_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u32)))
+uint32x4_t __arm_vshlcq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u8)))
+uint8x16_t __arm_vshlcq_m_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u8)))
+uint8x16_t __arm_vshlcq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s16)))
+int16x8_t __arm_vshlcq_s16(int16x8_t, uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s16)))
+int16x8_t __arm_vshlcq(int16x8_t, uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s32)))
+int32x4_t __arm_vshlcq_s32(int32x4_t, uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s32)))
+int32x4_t __arm_vshlcq(int32x4_t, uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s8)))
+int8x16_t __arm_vshlcq_s8(int8x16_t, uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s8)))
+int8x16_t __arm_vshlcq(int8x16_t, uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u16)))
+uint16x8_t __arm_vshlcq_u16(uint16x8_t, uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u16)))
+uint16x8_t __arm_vshlcq(uint16x8_t, uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u32)))
+uint32x4_t __arm_vshlcq_u32(uint32x4_t, uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u32)))
+uint32x4_t __arm_vshlcq(uint32x4_t, uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u8)))
+uint8x16_t __arm_vshlcq_u8(uint8x16_t, uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u8)))
+uint8x16_t __arm_vshlcq(uint8x16_t, uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s16)))
+int32x4_t __arm_vshllbq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s16)))
+int32x4_t __arm_vshllbq_m(int32x4_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s8)))
+int16x8_t __arm_vshllbq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s8)))
+int16x8_t __arm_vshllbq_m(int16x8_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u16)))
+uint32x4_t __arm_vshllbq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u16)))
+uint32x4_t __arm_vshllbq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u8)))
+uint16x8_t __arm_vshllbq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u8)))
+uint16x8_t __arm_vshllbq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s16)))
+int32x4_t __arm_vshllbq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s16)))
+int32x4_t __arm_vshllbq(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s8)))
+int16x8_t __arm_vshllbq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s8)))
+int16x8_t __arm_vshllbq(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u16)))
+uint32x4_t __arm_vshllbq_n_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u16)))
+uint32x4_t __arm_vshllbq(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u8)))
+uint16x8_t __arm_vshllbq_n_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u8)))
+uint16x8_t __arm_vshllbq(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s16)))
+int32x4_t __arm_vshllbq_x_n_s16(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s16)))
+int32x4_t __arm_vshllbq_x(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s8)))
+int16x8_t __arm_vshllbq_x_n_s8(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s8)))
+int16x8_t __arm_vshllbq_x(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u16)))
+uint32x4_t __arm_vshllbq_x_n_u16(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u16)))
+uint32x4_t __arm_vshllbq_x(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u8)))
+uint16x8_t __arm_vshllbq_x_n_u8(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u8)))
+uint16x8_t __arm_vshllbq_x(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s16)))
+int32x4_t __arm_vshlltq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s16)))
+int32x4_t __arm_vshlltq_m(int32x4_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s8)))
+int16x8_t __arm_vshlltq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s8)))
+int16x8_t __arm_vshlltq_m(int16x8_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u16)))
+uint32x4_t __arm_vshlltq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u16)))
+uint32x4_t __arm_vshlltq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u8)))
+uint16x8_t __arm_vshlltq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u8)))
+uint16x8_t __arm_vshlltq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s16)))
+int32x4_t __arm_vshlltq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s16)))
+int32x4_t __arm_vshlltq(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s8)))
+int16x8_t __arm_vshlltq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s8)))
+int16x8_t __arm_vshlltq(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u16)))
+uint32x4_t __arm_vshlltq_n_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u16)))
+uint32x4_t __arm_vshlltq(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u8)))
+uint16x8_t __arm_vshlltq_n_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u8)))
+uint16x8_t __arm_vshlltq(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s16)))
+int32x4_t __arm_vshlltq_x_n_s16(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s16)))
+int32x4_t __arm_vshlltq_x(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s8)))
+int16x8_t __arm_vshlltq_x_n_s8(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s8)))
+int16x8_t __arm_vshlltq_x(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u16)))
+uint32x4_t __arm_vshlltq_x_n_u16(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u16)))
+uint32x4_t __arm_vshlltq_x(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u8)))
+uint16x8_t __arm_vshlltq_x_n_u8(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u8)))
+uint16x8_t __arm_vshlltq_x(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s16)))
+int16x8_t __arm_vshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s16)))
+int16x8_t __arm_vshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s32)))
+int32x4_t __arm_vshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s32)))
+int32x4_t __arm_vshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s8)))
+int8x16_t __arm_vshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s8)))
+int8x16_t __arm_vshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u16)))
+uint16x8_t __arm_vshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u16)))
+uint16x8_t __arm_vshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u32)))
+uint32x4_t __arm_vshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u32)))
+uint32x4_t __arm_vshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u8)))
+uint8x16_t __arm_vshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u8)))
+uint8x16_t __arm_vshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s16)))
+int16x8_t __arm_vshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s16)))
+int16x8_t __arm_vshlq_m_r(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s32)))
+int32x4_t __arm_vshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s32)))
+int32x4_t __arm_vshlq_m_r(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s8)))
+int8x16_t __arm_vshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s8)))
+int8x16_t __arm_vshlq_m_r(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u16)))
+uint16x8_t __arm_vshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u16)))
+uint16x8_t __arm_vshlq_m_r(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u32)))
+uint32x4_t __arm_vshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u32)))
+uint32x4_t __arm_vshlq_m_r(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u8)))
+uint8x16_t __arm_vshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u8)))
+uint8x16_t __arm_vshlq_m_r(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s16)))
+int16x8_t __arm_vshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s16)))
+int16x8_t __arm_vshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s32)))
+int32x4_t __arm_vshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s32)))
+int32x4_t __arm_vshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s8)))
+int8x16_t __arm_vshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s8)))
+int8x16_t __arm_vshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u16)))
+uint16x8_t __arm_vshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u16)))
+uint16x8_t __arm_vshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u32)))
+uint32x4_t __arm_vshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u32)))
+uint32x4_t __arm_vshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u8)))
+uint8x16_t __arm_vshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u8)))
+uint8x16_t __arm_vshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s16)))
+int16x8_t __arm_vshlq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s16)))
+int16x8_t __arm_vshlq_n(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s32)))
+int32x4_t __arm_vshlq_n_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s32)))
+int32x4_t __arm_vshlq_n(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s8)))
+int8x16_t __arm_vshlq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s8)))
+int8x16_t __arm_vshlq_n(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u16)))
+uint16x8_t __arm_vshlq_n_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u16)))
+uint16x8_t __arm_vshlq_n(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u32)))
+uint32x4_t __arm_vshlq_n_u32(uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u32)))
+uint32x4_t __arm_vshlq_n(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u8)))
+uint8x16_t __arm_vshlq_n_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u8)))
+uint8x16_t __arm_vshlq_n(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s16)))
+int16x8_t __arm_vshlq_r_s16(int16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s16)))
+int16x8_t __arm_vshlq_r(int16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s32)))
+int32x4_t __arm_vshlq_r_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s32)))
+int32x4_t __arm_vshlq_r(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s8)))
+int8x16_t __arm_vshlq_r_s8(int8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s8)))
+int8x16_t __arm_vshlq_r(int8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u16)))
+uint16x8_t __arm_vshlq_r_u16(uint16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u16)))
+uint16x8_t __arm_vshlq_r(uint16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u32)))
+uint32x4_t __arm_vshlq_r_u32(uint32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u32)))
+uint32x4_t __arm_vshlq_r(uint32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u8)))
+uint8x16_t __arm_vshlq_r_u8(uint8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u8)))
+uint8x16_t __arm_vshlq_r(uint8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s16)))
+int16x8_t __arm_vshlq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s16)))
+int16x8_t __arm_vshlq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s32)))
+int32x4_t __arm_vshlq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s32)))
+int32x4_t __arm_vshlq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s8)))
+int8x16_t __arm_vshlq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s8)))
+int8x16_t __arm_vshlq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u16)))
+uint16x8_t __arm_vshlq_u16(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u16)))
+uint16x8_t __arm_vshlq(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u32)))
+uint32x4_t __arm_vshlq_u32(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u32)))
+uint32x4_t __arm_vshlq(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u8)))
+uint8x16_t __arm_vshlq_u8(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u8)))
+uint8x16_t __arm_vshlq(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s16)))
+int16x8_t __arm_vshlq_x_n_s16(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s16)))
+int16x8_t __arm_vshlq_x_n(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s32)))
+int32x4_t __arm_vshlq_x_n_s32(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s32)))
+int32x4_t __arm_vshlq_x_n(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s8)))
+int8x16_t __arm_vshlq_x_n_s8(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s8)))
+int8x16_t __arm_vshlq_x_n(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u16)))
+uint16x8_t __arm_vshlq_x_n_u16(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u16)))
+uint16x8_t __arm_vshlq_x_n(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u32)))
+uint32x4_t __arm_vshlq_x_n_u32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u32)))
+uint32x4_t __arm_vshlq_x_n(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u8)))
+uint8x16_t __arm_vshlq_x_n_u8(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u8)))
+uint8x16_t __arm_vshlq_x_n(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s16)))
+int16x8_t __arm_vshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s16)))
+int16x8_t __arm_vshlq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s32)))
+int32x4_t __arm_vshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s32)))
+int32x4_t __arm_vshlq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s8)))
+int8x16_t __arm_vshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s8)))
+int8x16_t __arm_vshlq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u16)))
+uint16x8_t __arm_vshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u16)))
+uint16x8_t __arm_vshlq_x(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u32)))
+uint32x4_t __arm_vshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u32)))
+uint32x4_t __arm_vshlq_x(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u8)))
+uint8x16_t __arm_vshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u8)))
+uint8x16_t __arm_vshlq_x(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s16)))
+int8x16_t __arm_vshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s16)))
+int8x16_t __arm_vshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s32)))
+int16x8_t __arm_vshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s32)))
+int16x8_t __arm_vshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u16)))
+uint8x16_t __arm_vshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u16)))
+uint8x16_t __arm_vshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u32)))
+uint16x8_t __arm_vshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u32)))
+uint16x8_t __arm_vshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s16)))
+int8x16_t __arm_vshrnbq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s16)))
+int8x16_t __arm_vshrnbq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s32)))
+int16x8_t __arm_vshrnbq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s32)))
+int16x8_t __arm_vshrnbq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u16)))
+uint8x16_t __arm_vshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u16)))
+uint8x16_t __arm_vshrnbq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u32)))
+uint16x8_t __arm_vshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u32)))
+uint16x8_t __arm_vshrnbq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s16)))
+int8x16_t __arm_vshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s16)))
+int8x16_t __arm_vshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s32)))
+int16x8_t __arm_vshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s32)))
+int16x8_t __arm_vshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u16)))
+uint8x16_t __arm_vshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u16)))
+uint8x16_t __arm_vshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u32)))
+uint16x8_t __arm_vshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u32)))
+uint16x8_t __arm_vshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s16)))
+int8x16_t __arm_vshrntq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s16)))
+int8x16_t __arm_vshrntq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s32)))
+int16x8_t __arm_vshrntq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s32)))
+int16x8_t __arm_vshrntq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u16)))
+uint8x16_t __arm_vshrntq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u16)))
+uint8x16_t __arm_vshrntq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u32)))
+uint16x8_t __arm_vshrntq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u32)))
+uint16x8_t __arm_vshrntq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s16)))
+int16x8_t __arm_vshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s16)))
+int16x8_t __arm_vshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s32)))
+int32x4_t __arm_vshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s32)))
+int32x4_t __arm_vshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s8)))
+int8x16_t __arm_vshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s8)))
+int8x16_t __arm_vshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u16)))
+uint16x8_t __arm_vshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u16)))
+uint16x8_t __arm_vshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u32)))
+uint32x4_t __arm_vshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u32)))
+uint32x4_t __arm_vshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u8)))
+uint8x16_t __arm_vshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u8)))
+uint8x16_t __arm_vshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s16)))
+int16x8_t __arm_vshrq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s16)))
+int16x8_t __arm_vshrq(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s32)))
+int32x4_t __arm_vshrq_n_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s32)))
+int32x4_t __arm_vshrq(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s8)))
+int8x16_t __arm_vshrq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s8)))
+int8x16_t __arm_vshrq(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u16)))
+uint16x8_t __arm_vshrq_n_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u16)))
+uint16x8_t __arm_vshrq(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u32)))
+uint32x4_t __arm_vshrq_n_u32(uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u32)))
+uint32x4_t __arm_vshrq(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u8)))
+uint8x16_t __arm_vshrq_n_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u8)))
+uint8x16_t __arm_vshrq(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s16)))
+int16x8_t __arm_vshrq_x_n_s16(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s16)))
+int16x8_t __arm_vshrq_x(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s32)))
+int32x4_t __arm_vshrq_x_n_s32(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s32)))
+int32x4_t __arm_vshrq_x(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s8)))
+int8x16_t __arm_vshrq_x_n_s8(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s8)))
+int8x16_t __arm_vshrq_x(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u16)))
+uint16x8_t __arm_vshrq_x_n_u16(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u16)))
+uint16x8_t __arm_vshrq_x(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u32)))
+uint32x4_t __arm_vshrq_x_n_u32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u32)))
+uint32x4_t __arm_vshrq_x(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u8)))
+uint8x16_t __arm_vshrq_x_n_u8(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u8)))
+uint8x16_t __arm_vshrq_x(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s16)))
+int16x8_t __arm_vsliq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s16)))
+int16x8_t __arm_vsliq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s32)))
+int32x4_t __arm_vsliq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s32)))
+int32x4_t __arm_vsliq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s8)))
+int8x16_t __arm_vsliq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s8)))
+int8x16_t __arm_vsliq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u16)))
+uint16x8_t __arm_vsliq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u16)))
+uint16x8_t __arm_vsliq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u32)))
+uint32x4_t __arm_vsliq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u32)))
+uint32x4_t __arm_vsliq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u8)))
+uint8x16_t __arm_vsliq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u8)))
+uint8x16_t __arm_vsliq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s16)))
+int16x8_t __arm_vsliq_n_s16(int16x8_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s16)))
+int16x8_t __arm_vsliq(int16x8_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s32)))
+int32x4_t __arm_vsliq_n_s32(int32x4_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s32)))
+int32x4_t __arm_vsliq(int32x4_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s8)))
+int8x16_t __arm_vsliq_n_s8(int8x16_t, int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s8)))
+int8x16_t __arm_vsliq(int8x16_t, int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u16)))
+uint16x8_t __arm_vsliq_n_u16(uint16x8_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u16)))
+uint16x8_t __arm_vsliq(uint16x8_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u32)))
+uint32x4_t __arm_vsliq_n_u32(uint32x4_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u32)))
+uint32x4_t __arm_vsliq(uint32x4_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u8)))
+uint8x16_t __arm_vsliq_n_u8(uint8x16_t, uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u8)))
+uint8x16_t __arm_vsliq(uint8x16_t, uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s16)))
+int16x8_t __arm_vsriq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s16)))
+int16x8_t __arm_vsriq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s32)))
+int32x4_t __arm_vsriq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s32)))
+int32x4_t __arm_vsriq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s8)))
+int8x16_t __arm_vsriq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s8)))
+int8x16_t __arm_vsriq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u16)))
+uint16x8_t __arm_vsriq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u16)))
+uint16x8_t __arm_vsriq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u32)))
+uint32x4_t __arm_vsriq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u32)))
+uint32x4_t __arm_vsriq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u8)))
+uint8x16_t __arm_vsriq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u8)))
+uint8x16_t __arm_vsriq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s16)))
+int16x8_t __arm_vsriq_n_s16(int16x8_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s16)))
+int16x8_t __arm_vsriq(int16x8_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s32)))
+int32x4_t __arm_vsriq_n_s32(int32x4_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s32)))
+int32x4_t __arm_vsriq(int32x4_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s8)))
+int8x16_t __arm_vsriq_n_s8(int8x16_t, int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s8)))
+int8x16_t __arm_vsriq(int8x16_t, int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u16)))
+uint16x8_t __arm_vsriq_n_u16(uint16x8_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u16)))
+uint16x8_t __arm_vsriq(uint16x8_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u32)))
+uint32x4_t __arm_vsriq_n_u32(uint32x4_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u32)))
+uint32x4_t __arm_vsriq(uint32x4_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u8)))
+uint8x16_t __arm_vsriq_n_u8(uint8x16_t, uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u8)))
+uint8x16_t __arm_vsriq(uint8x16_t, uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s16)))
+void __arm_vst1q_p_s16(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s16)))
+void __arm_vst1q_p(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s32)))
+void __arm_vst1q_p_s32(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s32)))
+void __arm_vst1q_p(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s8)))
+void __arm_vst1q_p_s8(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s8)))
+void __arm_vst1q_p(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u16)))
+void __arm_vst1q_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u16)))
+void __arm_vst1q_p(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u32)))
+void __arm_vst1q_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u32)))
+void __arm_vst1q_p(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u8)))
+void __arm_vst1q_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u8)))
+void __arm_vst1q_p(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s16)))
+void __arm_vst1q_s16(int16_t *, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s16)))
+void __arm_vst1q(int16_t *, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s32)))
+void __arm_vst1q_s32(int32_t *, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s32)))
+void __arm_vst1q(int32_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s8)))
+void __arm_vst1q_s8(int8_t *, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s8)))
+void __arm_vst1q(int8_t *, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u16)))
+void __arm_vst1q_u16(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u16)))
+void __arm_vst1q(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u32)))
+void __arm_vst1q_u32(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u32)))
+void __arm_vst1q(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u8)))
+void __arm_vst1q_u8(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u8)))
+void __arm_vst1q(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s16)))
+void __arm_vst2q_s16(int16_t *, int16x8x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s16)))
+void __arm_vst2q(int16_t *, int16x8x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s32)))
+void __arm_vst2q_s32(int32_t *, int32x4x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s32)))
+void __arm_vst2q(int32_t *, int32x4x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s8)))
+void __arm_vst2q_s8(int8_t *, int8x16x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s8)))
+void __arm_vst2q(int8_t *, int8x16x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u16)))
+void __arm_vst2q_u16(uint16_t *, uint16x8x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u16)))
+void __arm_vst2q(uint16_t *, uint16x8x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u32)))
+void __arm_vst2q_u32(uint32_t *, uint32x4x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u32)))
+void __arm_vst2q(uint32_t *, uint32x4x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u8)))
+void __arm_vst2q_u8(uint8_t *, uint8x16x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u8)))
+void __arm_vst2q(uint8_t *, uint8x16x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s16)))
+void __arm_vst4q_s16(int16_t *, int16x8x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s16)))
+void __arm_vst4q(int16_t *, int16x8x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s32)))
+void __arm_vst4q_s32(int32_t *, int32x4x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s32)))
+void __arm_vst4q(int32_t *, int32x4x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s8)))
+void __arm_vst4q_s8(int8_t *, int8x16x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s8)))
+void __arm_vst4q(int8_t *, int8x16x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u16)))
+void __arm_vst4q_u16(uint16_t *, uint16x8x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u16)))
+void __arm_vst4q(uint16_t *, uint16x8x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u32)))
+void __arm_vst4q_u32(uint32_t *, uint32x4x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u32)))
+void __arm_vst4q(uint32_t *, uint32x4x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u8)))
+void __arm_vst4q_u8(uint8_t *, uint8x16x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u8)))
+void __arm_vst4q(uint8_t *, uint8x16x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s16)))
+void __arm_vstrbq_p_s16(int8_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s16)))
+void __arm_vstrbq_p(int8_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s32)))
+void __arm_vstrbq_p_s32(int8_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s32)))
+void __arm_vstrbq_p(int8_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s8)))
+void __arm_vstrbq_p_s8(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s8)))
+void __arm_vstrbq_p(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u16)))
+void __arm_vstrbq_p_u16(uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u16)))
+void __arm_vstrbq_p(uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u32)))
+void __arm_vstrbq_p_u32(uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u32)))
+void __arm_vstrbq_p(uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u8)))
+void __arm_vstrbq_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u8)))
+void __arm_vstrbq_p(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s16)))
+void __arm_vstrbq_s16(int8_t *, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s16)))
+void __arm_vstrbq(int8_t *, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s32)))
+void __arm_vstrbq_s32(int8_t *, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s32)))
+void __arm_vstrbq(int8_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s8)))
+void __arm_vstrbq_s8(int8_t *, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s8)))
+void __arm_vstrbq(int8_t *, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))
+void __arm_vstrbq_scatter_offset_p_s16(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))
+void __arm_vstrbq_scatter_offset_p(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))
+void __arm_vstrbq_scatter_offset_p_s32(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))
+void __arm_vstrbq_scatter_offset_p(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))
+void __arm_vstrbq_scatter_offset_p_s8(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))
+void __arm_vstrbq_scatter_offset_p(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))
+void __arm_vstrbq_scatter_offset_p_u16(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))
+void __arm_vstrbq_scatter_offset_p(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))
+void __arm_vstrbq_scatter_offset_p_u32(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))
+void __arm_vstrbq_scatter_offset_p(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))
+void __arm_vstrbq_scatter_offset_p_u8(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))
+void __arm_vstrbq_scatter_offset_p(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))
+void __arm_vstrbq_scatter_offset_s16(int8_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))
+void __arm_vstrbq_scatter_offset(int8_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))
+void __arm_vstrbq_scatter_offset_s32(int8_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))
+void __arm_vstrbq_scatter_offset(int8_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))
+void __arm_vstrbq_scatter_offset_s8(int8_t *, uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))
+void __arm_vstrbq_scatter_offset(int8_t *, uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))
+void __arm_vstrbq_scatter_offset_u16(uint8_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))
+void __arm_vstrbq_scatter_offset(uint8_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))
+void __arm_vstrbq_scatter_offset_u32(uint8_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))
+void __arm_vstrbq_scatter_offset(uint8_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))
+void __arm_vstrbq_scatter_offset_u8(uint8_t *, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))
+void __arm_vstrbq_scatter_offset(uint8_t *, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u16)))
+void __arm_vstrbq_u16(uint8_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u16)))
+void __arm_vstrbq(uint8_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u32)))
+void __arm_vstrbq_u32(uint8_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u32)))
+void __arm_vstrbq(uint8_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u8)))
+void __arm_vstrbq_u8(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u8)))
+void __arm_vstrbq(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))
+void __arm_vstrdq_scatter_base_p_s64(uint64x2_t, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))
+void __arm_vstrdq_scatter_base_p(uint64x2_t, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))
+void __arm_vstrdq_scatter_base_p_u64(uint64x2_t, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))
+void __arm_vstrdq_scatter_base_p(uint64x2_t, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))
+void __arm_vstrdq_scatter_base_s64(uint64x2_t, int, int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))
+void __arm_vstrdq_scatter_base(uint64x2_t, int, int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))
+void __arm_vstrdq_scatter_base_u64(uint64x2_t, int, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))
+void __arm_vstrdq_scatter_base(uint64x2_t, int, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))
+void __arm_vstrdq_scatter_base_wb_p_s64(uint64x2_t *, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))
+void __arm_vstrdq_scatter_base_wb_p(uint64x2_t *, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))
+void __arm_vstrdq_scatter_base_wb_p_u64(uint64x2_t *, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))
+void __arm_vstrdq_scatter_base_wb_p(uint64x2_t *, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))
+void __arm_vstrdq_scatter_base_wb_s64(uint64x2_t *, int, int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))
+void __arm_vstrdq_scatter_base_wb(uint64x2_t *, int, int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))
+void __arm_vstrdq_scatter_base_wb_u64(uint64x2_t *, int, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))
+void __arm_vstrdq_scatter_base_wb(uint64x2_t *, int, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))
+void __arm_vstrdq_scatter_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))
+void __arm_vstrdq_scatter_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))
+void __arm_vstrdq_scatter_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))
+void __arm_vstrdq_scatter_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))
+void __arm_vstrdq_scatter_offset_s64(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))
+void __arm_vstrdq_scatter_offset(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))
+void __arm_vstrdq_scatter_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))
+void __arm_vstrdq_scatter_offset(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))
+void __arm_vstrdq_scatter_shifted_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))
+void __arm_vstrdq_scatter_shifted_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))
+void __arm_vstrdq_scatter_shifted_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))
+void __arm_vstrdq_scatter_shifted_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))
+void __arm_vstrdq_scatter_shifted_offset_s64(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))
+void __arm_vstrdq_scatter_shifted_offset(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))
+void __arm_vstrdq_scatter_shifted_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))
+void __arm_vstrdq_scatter_shifted_offset(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s16)))
+void __arm_vstrhq_p_s16(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s16)))
+void __arm_vstrhq_p(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s32)))
+void __arm_vstrhq_p_s32(int16_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s32)))
+void __arm_vstrhq_p(int16_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u16)))
+void __arm_vstrhq_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u16)))
+void __arm_vstrhq_p(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u32)))
+void __arm_vstrhq_p_u32(uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u32)))
+void __arm_vstrhq_p(uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s16)))
+void __arm_vstrhq_s16(int16_t *, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s16)))
+void __arm_vstrhq(int16_t *, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s32)))
+void __arm_vstrhq_s32(int16_t *, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s32)))
+void __arm_vstrhq(int16_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))
+void __arm_vstrhq_scatter_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))
+void __arm_vstrhq_scatter_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))
+void __arm_vstrhq_scatter_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))
+void __arm_vstrhq_scatter_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))
+void __arm_vstrhq_scatter_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))
+void __arm_vstrhq_scatter_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))
+void __arm_vstrhq_scatter_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))
+void __arm_vstrhq_scatter_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))
+void __arm_vstrhq_scatter_offset_s16(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))
+void __arm_vstrhq_scatter_offset(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))
+void __arm_vstrhq_scatter_offset_s32(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))
+void __arm_vstrhq_scatter_offset(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))
+void __arm_vstrhq_scatter_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))
+void __arm_vstrhq_scatter_offset(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))
+void __arm_vstrhq_scatter_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))
+void __arm_vstrhq_scatter_offset(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))
+void __arm_vstrhq_scatter_shifted_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))
+void __arm_vstrhq_scatter_shifted_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))
+void __arm_vstrhq_scatter_shifted_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))
+void __arm_vstrhq_scatter_shifted_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))
+void __arm_vstrhq_scatter_shifted_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))
+void __arm_vstrhq_scatter_shifted_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))
+void __arm_vstrhq_scatter_shifted_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))
+void __arm_vstrhq_scatter_shifted_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))
+void __arm_vstrhq_scatter_shifted_offset_s16(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))
+void __arm_vstrhq_scatter_shifted_offset(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))
+void __arm_vstrhq_scatter_shifted_offset_s32(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))
+void __arm_vstrhq_scatter_shifted_offset(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))
+void __arm_vstrhq_scatter_shifted_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))
+void __arm_vstrhq_scatter_shifted_offset(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))
+void __arm_vstrhq_scatter_shifted_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))
+void __arm_vstrhq_scatter_shifted_offset(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u16)))
+void __arm_vstrhq_u16(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u16)))
+void __arm_vstrhq(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u32)))
+void __arm_vstrhq_u32(uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u32)))
+void __arm_vstrhq(uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_s32)))
+void __arm_vstrwq_p_s32(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_s32)))
+void __arm_vstrwq_p(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_u32)))
+void __arm_vstrwq_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_u32)))
+void __arm_vstrwq_p(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_s32)))
+void __arm_vstrwq_s32(int32_t *, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_s32)))
+void __arm_vstrwq(int32_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))
+void __arm_vstrwq_scatter_base_p_s32(uint32x4_t, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))
+void __arm_vstrwq_scatter_base_p(uint32x4_t, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))
+void __arm_vstrwq_scatter_base_p_u32(uint32x4_t, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))
+void __arm_vstrwq_scatter_base_p(uint32x4_t, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))
+void __arm_vstrwq_scatter_base_s32(uint32x4_t, int, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))
+void __arm_vstrwq_scatter_base(uint32x4_t, int, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))
+void __arm_vstrwq_scatter_base_u32(uint32x4_t, int, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))
+void __arm_vstrwq_scatter_base(uint32x4_t, int, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))
+void __arm_vstrwq_scatter_base_wb_p_s32(uint32x4_t *, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))
+void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))
+void __arm_vstrwq_scatter_base_wb_p_u32(uint32x4_t *, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))
+void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))
+void __arm_vstrwq_scatter_base_wb_s32(uint32x4_t *, int, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))
+void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))
+void __arm_vstrwq_scatter_base_wb_u32(uint32x4_t *, int, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))
+void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))
+void __arm_vstrwq_scatter_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))
+void __arm_vstrwq_scatter_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))
+void __arm_vstrwq_scatter_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))
+void __arm_vstrwq_scatter_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))
+void __arm_vstrwq_scatter_offset_s32(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))
+void __arm_vstrwq_scatter_offset(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))
+void __arm_vstrwq_scatter_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))
+void __arm_vstrwq_scatter_offset(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))
+void __arm_vstrwq_scatter_shifted_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))
+void __arm_vstrwq_scatter_shifted_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))
+void __arm_vstrwq_scatter_shifted_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))
+void __arm_vstrwq_scatter_shifted_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))
+void __arm_vstrwq_scatter_shifted_offset_s32(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))
+void __arm_vstrwq_scatter_shifted_offset(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))
+void __arm_vstrwq_scatter_shifted_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))
+void __arm_vstrwq_scatter_shifted_offset(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_u32)))
+void __arm_vstrwq_u32(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_u32)))
+void __arm_vstrwq(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s16)))
+int16x8_t __arm_vsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s16)))
+int16x8_t __arm_vsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s32)))
+int32x4_t __arm_vsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s32)))
+int32x4_t __arm_vsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s8)))
+int8x16_t __arm_vsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s8)))
+int8x16_t __arm_vsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u16)))
+uint16x8_t __arm_vsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u16)))
+uint16x8_t __arm_vsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u32)))
+uint32x4_t __arm_vsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u32)))
+uint32x4_t __arm_vsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u8)))
+uint8x16_t __arm_vsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u8)))
+uint8x16_t __arm_vsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s16)))
+int16x8_t __arm_vsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s16)))
+int16x8_t __arm_vsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s32)))
+int32x4_t __arm_vsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s32)))
+int32x4_t __arm_vsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s8)))
+int8x16_t __arm_vsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s8)))
+int8x16_t __arm_vsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u16)))
+uint16x8_t __arm_vsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u16)))
+uint16x8_t __arm_vsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u32)))
+uint32x4_t __arm_vsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u32)))
+uint32x4_t __arm_vsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u8)))
+uint8x16_t __arm_vsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u8)))
+uint8x16_t __arm_vsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s16)))
+int16x8_t __arm_vsubq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s16)))
+int16x8_t __arm_vsubq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s32)))
+int32x4_t __arm_vsubq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s32)))
+int32x4_t __arm_vsubq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s8)))
+int8x16_t __arm_vsubq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s8)))
+int8x16_t __arm_vsubq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u16)))
+uint16x8_t __arm_vsubq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u16)))
+uint16x8_t __arm_vsubq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u32)))
+uint32x4_t __arm_vsubq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u32)))
+uint32x4_t __arm_vsubq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u8)))
+uint8x16_t __arm_vsubq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u8)))
+uint8x16_t __arm_vsubq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s16)))
+int16x8_t __arm_vsubq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s16)))
+int16x8_t __arm_vsubq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s32)))
+int32x4_t __arm_vsubq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s32)))
+int32x4_t __arm_vsubq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s8)))
+int8x16_t __arm_vsubq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s8)))
+int8x16_t __arm_vsubq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u16)))
+uint16x8_t __arm_vsubq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u16)))
+uint16x8_t __arm_vsubq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u32)))
+uint32x4_t __arm_vsubq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u32)))
+uint32x4_t __arm_vsubq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u8)))
+uint8x16_t __arm_vsubq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u8)))
+uint8x16_t __arm_vsubq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s16)))
+int16x8_t __arm_vsubq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s16)))
+int16x8_t __arm_vsubq_x(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s32)))
+int32x4_t __arm_vsubq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s32)))
+int32x4_t __arm_vsubq_x(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s8)))
+int8x16_t __arm_vsubq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s8)))
+int8x16_t __arm_vsubq_x(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u16)))
+uint16x8_t __arm_vsubq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u16)))
+uint16x8_t __arm_vsubq_x(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u32)))
+uint32x4_t __arm_vsubq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u32)))
+uint32x4_t __arm_vsubq_x(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u8)))
+uint8x16_t __arm_vsubq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u8)))
+uint8x16_t __arm_vsubq_x(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s16)))
+int16x8_t __arm_vsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s16)))
+int16x8_t __arm_vsubq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s32)))
+int32x4_t __arm_vsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s32)))
+int32x4_t __arm_vsubq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s8)))
+int8x16_t __arm_vsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s8)))
+int8x16_t __arm_vsubq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u16)))
+uint16x8_t __arm_vsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u16)))
+uint16x8_t __arm_vsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u32)))
+uint32x4_t __arm_vsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u32)))
+uint32x4_t __arm_vsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u8)))
+uint8x16_t __arm_vsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u8)))
+uint8x16_t __arm_vsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s16)))
+int16x8_t __arm_vuninitializedq(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s32)))
+int32x4_t __arm_vuninitializedq(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s64)))
+int64x2_t __arm_vuninitializedq(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s8)))
+int8x16_t __arm_vuninitializedq(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u16)))
+uint16x8_t __arm_vuninitializedq(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u32)))
+uint32x4_t __arm_vuninitializedq(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u64)))
+uint64x2_t __arm_vuninitializedq(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u8)))
+uint8x16_t __arm_vuninitializedq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s16)))
+int16x8_t __arm_vuninitializedq_s16();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s32)))
+int32x4_t __arm_vuninitializedq_s32();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s64)))
+int64x2_t __arm_vuninitializedq_s64();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s8)))
+int8x16_t __arm_vuninitializedq_s8();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u16)))
+uint16x8_t __arm_vuninitializedq_u16();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u32)))
+uint32x4_t __arm_vuninitializedq_u32();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u64)))
+uint64x2_t __arm_vuninitializedq_u64();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u8)))
+uint8x16_t __arm_vuninitializedq_u8();
+
+#if (__ARM_FEATURE_MVE & 2)
+
+typedef __fp16 float16_t;
+typedef float float32_t;
+typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) float16_t float16x8_t;
+typedef struct { float16x8_t val[2]; } float16x8x2_t;
+typedef struct { float16x8_t val[4]; } float16x8x4_t;
+typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) float32_t float32x4_t;
+typedef struct { float32x4_t val[2]; } float32x4x2_t;
+typedef struct { float32x4_t val[4]; } float32x4x4_t;
+
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f16)))
+float16x8_t __arm_vabdq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f16)))
+float16x8_t __arm_vabdq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f32)))
+float32x4_t __arm_vabdq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f32)))
+float32x4_t __arm_vabdq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f16)))
+float16x8_t __arm_vabdq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f16)))
+float16x8_t __arm_vabdq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f32)))
+float32x4_t __arm_vabdq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f32)))
+float32x4_t __arm_vabdq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f16)))
+float16x8_t __arm_vabdq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f16)))
+float16x8_t __arm_vabdq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f32)))
+float32x4_t __arm_vabdq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f32)))
+float32x4_t __arm_vabdq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f16)))
+float16x8_t __arm_vabsq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f16)))
+float16x8_t __arm_vabsq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f32)))
+float32x4_t __arm_vabsq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f32)))
+float32x4_t __arm_vabsq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f16)))
+float16x8_t __arm_vabsq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f16)))
+float16x8_t __arm_vabsq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f32)))
+float32x4_t __arm_vabsq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f32)))
+float32x4_t __arm_vabsq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f16)))
+float16x8_t __arm_vabsq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f16)))
+float16x8_t __arm_vabsq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f32)))
+float32x4_t __arm_vabsq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f32)))
+float32x4_t __arm_vabsq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f16)))
+float16x8_t __arm_vaddq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f16)))
+float16x8_t __arm_vaddq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f32)))
+float32x4_t __arm_vaddq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f32)))
+float32x4_t __arm_vaddq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f16)))
+float16x8_t __arm_vaddq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f16)))
+float16x8_t __arm_vaddq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f32)))
+float32x4_t __arm_vaddq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f32)))
+float32x4_t __arm_vaddq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f16)))
+float16x8_t __arm_vaddq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f16)))
+float16x8_t __arm_vaddq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f32)))
+float32x4_t __arm_vaddq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f32)))
+float32x4_t __arm_vaddq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f16)))
+float16x8_t __arm_vaddq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f16)))
+float16x8_t __arm_vaddq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f32)))
+float32x4_t __arm_vaddq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f32)))
+float32x4_t __arm_vaddq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f16)))
+float16x8_t __arm_vaddq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f16)))
+float16x8_t __arm_vaddq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f32)))
+float32x4_t __arm_vaddq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f32)))
+float32x4_t __arm_vaddq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f16)))
+float16x8_t __arm_vaddq_x_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f16)))
+float16x8_t __arm_vaddq_x(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f32)))
+float32x4_t __arm_vaddq_x_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f32)))
+float32x4_t __arm_vaddq_x(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_f16)))
+float16x8_t __arm_vandq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_f16)))
+float16x8_t __arm_vandq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_f32)))
+float32x4_t __arm_vandq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_f32)))
+float32x4_t __arm_vandq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f16)))
+float16x8_t __arm_vandq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f16)))
+float16x8_t __arm_vandq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f32)))
+float32x4_t __arm_vandq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f32)))
+float32x4_t __arm_vandq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f16)))
+float16x8_t __arm_vandq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f16)))
+float16x8_t __arm_vandq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f32)))
+float32x4_t __arm_vandq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f32)))
+float32x4_t __arm_vandq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f16)))
+float16x8_t __arm_vbicq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f16)))
+float16x8_t __arm_vbicq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f32)))
+float32x4_t __arm_vbicq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f32)))
+float32x4_t __arm_vbicq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f16)))
+float16x8_t __arm_vbicq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f16)))
+float16x8_t __arm_vbicq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f32)))
+float32x4_t __arm_vbicq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f32)))
+float32x4_t __arm_vbicq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f16)))
+float16x8_t __arm_vbicq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f16)))
+float16x8_t __arm_vbicq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f32)))
+float32x4_t __arm_vbicq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f32)))
+float32x4_t __arm_vbicq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f16)))
+float16x8_t __arm_vbrsrq_m_n_f16(float16x8_t, float16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f16)))
+float16x8_t __arm_vbrsrq_m(float16x8_t, float16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f32)))
+float32x4_t __arm_vbrsrq_m_n_f32(float32x4_t, float32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f32)))
+float32x4_t __arm_vbrsrq_m(float32x4_t, float32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f16)))
+float16x8_t __arm_vbrsrq_n_f16(float16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f16)))
+float16x8_t __arm_vbrsrq(float16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f32)))
+float32x4_t __arm_vbrsrq_n_f32(float32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f32)))
+float32x4_t __arm_vbrsrq(float32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f16)))
+float16x8_t __arm_vbrsrq_x_n_f16(float16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f16)))
+float16x8_t __arm_vbrsrq_x(float16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f32)))
+float32x4_t __arm_vbrsrq_x_n_f32(float32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f32)))
+float32x4_t __arm_vbrsrq_x(float32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f16)))
+float16x8_t __arm_vcaddq_rot270_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f16)))
+float16x8_t __arm_vcaddq_rot270(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f32)))
+float32x4_t __arm_vcaddq_rot270_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f32)))
+float32x4_t __arm_vcaddq_rot270(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f16)))
+float16x8_t __arm_vcaddq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f16)))
+float16x8_t __arm_vcaddq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f32)))
+float32x4_t __arm_vcaddq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f32)))
+float32x4_t __arm_vcaddq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f16)))
+float16x8_t __arm_vcaddq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f16)))
+float16x8_t __arm_vcaddq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f32)))
+float32x4_t __arm_vcaddq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f32)))
+float32x4_t __arm_vcaddq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f16)))
+float16x8_t __arm_vcaddq_rot90_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f16)))
+float16x8_t __arm_vcaddq_rot90(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f32)))
+float32x4_t __arm_vcaddq_rot90_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f32)))
+float32x4_t __arm_vcaddq_rot90(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f16)))
+float16x8_t __arm_vcaddq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f16)))
+float16x8_t __arm_vcaddq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f32)))
+float32x4_t __arm_vcaddq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f32)))
+float32x4_t __arm_vcaddq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f16)))
+float16x8_t __arm_vcaddq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f16)))
+float16x8_t __arm_vcaddq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f32)))
+float32x4_t __arm_vcaddq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f32)))
+float32x4_t __arm_vcaddq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f16)))
+float16x8_t __arm_vcmlaq_f16(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f16)))
+float16x8_t __arm_vcmlaq(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f32)))
+float32x4_t __arm_vcmlaq_f32(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f32)))
+float32x4_t __arm_vcmlaq(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f16)))
+float16x8_t __arm_vcmlaq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f16)))
+float16x8_t __arm_vcmlaq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f32)))
+float32x4_t __arm_vcmlaq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f32)))
+float32x4_t __arm_vcmlaq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f16)))
+float16x8_t __arm_vcmlaq_rot180_f16(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f16)))
+float16x8_t __arm_vcmlaq_rot180(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f32)))
+float32x4_t __arm_vcmlaq_rot180_f32(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f32)))
+float32x4_t __arm_vcmlaq_rot180(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16)))
+float16x8_t __arm_vcmlaq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16)))
+float16x8_t __arm_vcmlaq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32)))
+float32x4_t __arm_vcmlaq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32)))
+float32x4_t __arm_vcmlaq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f16)))
+float16x8_t __arm_vcmlaq_rot270_f16(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f16)))
+float16x8_t __arm_vcmlaq_rot270(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f32)))
+float32x4_t __arm_vcmlaq_rot270_f32(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f32)))
+float32x4_t __arm_vcmlaq_rot270(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16)))
+float16x8_t __arm_vcmlaq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16)))
+float16x8_t __arm_vcmlaq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32)))
+float32x4_t __arm_vcmlaq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32)))
+float32x4_t __arm_vcmlaq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f16)))
+float16x8_t __arm_vcmlaq_rot90_f16(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f16)))
+float16x8_t __arm_vcmlaq_rot90(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f32)))
+float32x4_t __arm_vcmlaq_rot90_f32(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f32)))
+float32x4_t __arm_vcmlaq_rot90(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16)))
+float16x8_t __arm_vcmlaq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16)))
+float16x8_t __arm_vcmlaq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32)))
+float32x4_t __arm_vcmlaq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32)))
+float32x4_t __arm_vcmlaq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f16)))
+mve_pred16_t __arm_vcmpeqq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f16)))
+mve_pred16_t __arm_vcmpeqq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f32)))
+mve_pred16_t __arm_vcmpeqq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f32)))
+mve_pred16_t __arm_vcmpeqq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f16)))
+mve_pred16_t __arm_vcmpeqq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f16)))
+mve_pred16_t __arm_vcmpeqq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f32)))
+mve_pred16_t __arm_vcmpeqq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f32)))
+mve_pred16_t __arm_vcmpeqq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))
+mve_pred16_t __arm_vcmpeqq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))
+mve_pred16_t __arm_vcmpeqq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))
+mve_pred16_t __arm_vcmpeqq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))
+mve_pred16_t __arm_vcmpeqq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f16)))
+mve_pred16_t __arm_vcmpeqq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f16)))
+mve_pred16_t __arm_vcmpeqq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f32)))
+mve_pred16_t __arm_vcmpeqq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f32)))
+mve_pred16_t __arm_vcmpeqq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f16)))
+mve_pred16_t __arm_vcmpgeq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f16)))
+mve_pred16_t __arm_vcmpgeq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f32)))
+mve_pred16_t __arm_vcmpgeq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f32)))
+mve_pred16_t __arm_vcmpgeq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f16)))
+mve_pred16_t __arm_vcmpgeq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f16)))
+mve_pred16_t __arm_vcmpgeq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f32)))
+mve_pred16_t __arm_vcmpgeq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f32)))
+mve_pred16_t __arm_vcmpgeq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))
+mve_pred16_t __arm_vcmpgeq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))
+mve_pred16_t __arm_vcmpgeq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))
+mve_pred16_t __arm_vcmpgeq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))
+mve_pred16_t __arm_vcmpgeq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f16)))
+mve_pred16_t __arm_vcmpgeq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f16)))
+mve_pred16_t __arm_vcmpgeq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f32)))
+mve_pred16_t __arm_vcmpgeq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f32)))
+mve_pred16_t __arm_vcmpgeq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f16)))
+mve_pred16_t __arm_vcmpgtq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f16)))
+mve_pred16_t __arm_vcmpgtq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f32)))
+mve_pred16_t __arm_vcmpgtq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f32)))
+mve_pred16_t __arm_vcmpgtq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f16)))
+mve_pred16_t __arm_vcmpgtq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f16)))
+mve_pred16_t __arm_vcmpgtq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f32)))
+mve_pred16_t __arm_vcmpgtq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f32)))
+mve_pred16_t __arm_vcmpgtq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))
+mve_pred16_t __arm_vcmpgtq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))
+mve_pred16_t __arm_vcmpgtq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))
+mve_pred16_t __arm_vcmpgtq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))
+mve_pred16_t __arm_vcmpgtq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f16)))
+mve_pred16_t __arm_vcmpgtq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f16)))
+mve_pred16_t __arm_vcmpgtq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f32)))
+mve_pred16_t __arm_vcmpgtq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f32)))
+mve_pred16_t __arm_vcmpgtq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f16)))
+mve_pred16_t __arm_vcmpleq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f16)))
+mve_pred16_t __arm_vcmpleq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f32)))
+mve_pred16_t __arm_vcmpleq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f32)))
+mve_pred16_t __arm_vcmpleq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f16)))
+mve_pred16_t __arm_vcmpleq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f16)))
+mve_pred16_t __arm_vcmpleq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f32)))
+mve_pred16_t __arm_vcmpleq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f32)))
+mve_pred16_t __arm_vcmpleq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))
+mve_pred16_t __arm_vcmpleq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))
+mve_pred16_t __arm_vcmpleq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))
+mve_pred16_t __arm_vcmpleq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))
+mve_pred16_t __arm_vcmpleq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f16)))
+mve_pred16_t __arm_vcmpleq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f16)))
+mve_pred16_t __arm_vcmpleq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f32)))
+mve_pred16_t __arm_vcmpleq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f32)))
+mve_pred16_t __arm_vcmpleq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f16)))
+mve_pred16_t __arm_vcmpltq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f16)))
+mve_pred16_t __arm_vcmpltq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f32)))
+mve_pred16_t __arm_vcmpltq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f32)))
+mve_pred16_t __arm_vcmpltq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f16)))
+mve_pred16_t __arm_vcmpltq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f16)))
+mve_pred16_t __arm_vcmpltq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f32)))
+mve_pred16_t __arm_vcmpltq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f32)))
+mve_pred16_t __arm_vcmpltq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))
+mve_pred16_t __arm_vcmpltq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))
+mve_pred16_t __arm_vcmpltq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))
+mve_pred16_t __arm_vcmpltq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))
+mve_pred16_t __arm_vcmpltq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f16)))
+mve_pred16_t __arm_vcmpltq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f16)))
+mve_pred16_t __arm_vcmpltq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f32)))
+mve_pred16_t __arm_vcmpltq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f32)))
+mve_pred16_t __arm_vcmpltq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f16)))
+mve_pred16_t __arm_vcmpneq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f16)))
+mve_pred16_t __arm_vcmpneq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f32)))
+mve_pred16_t __arm_vcmpneq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f32)))
+mve_pred16_t __arm_vcmpneq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f16)))
+mve_pred16_t __arm_vcmpneq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f16)))
+mve_pred16_t __arm_vcmpneq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f32)))
+mve_pred16_t __arm_vcmpneq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f32)))
+mve_pred16_t __arm_vcmpneq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))
+mve_pred16_t __arm_vcmpneq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))
+mve_pred16_t __arm_vcmpneq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))
+mve_pred16_t __arm_vcmpneq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))
+mve_pred16_t __arm_vcmpneq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f16)))
+mve_pred16_t __arm_vcmpneq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f16)))
+mve_pred16_t __arm_vcmpneq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f32)))
+mve_pred16_t __arm_vcmpneq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f32)))
+mve_pred16_t __arm_vcmpneq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f16)))
+float16x8_t __arm_vcmulq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f16)))
+float16x8_t __arm_vcmulq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f32)))
+float32x4_t __arm_vcmulq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f32)))
+float32x4_t __arm_vcmulq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f16)))
+float16x8_t __arm_vcmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f16)))
+float16x8_t __arm_vcmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f32)))
+float32x4_t __arm_vcmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f32)))
+float32x4_t __arm_vcmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f16)))
+float16x8_t __arm_vcmulq_rot180_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f16)))
+float16x8_t __arm_vcmulq_rot180(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f32)))
+float32x4_t __arm_vcmulq_rot180_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f32)))
+float32x4_t __arm_vcmulq_rot180(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f16)))
+float16x8_t __arm_vcmulq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f16)))
+float16x8_t __arm_vcmulq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f32)))
+float32x4_t __arm_vcmulq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f32)))
+float32x4_t __arm_vcmulq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f16)))
+float16x8_t __arm_vcmulq_rot180_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f16)))
+float16x8_t __arm_vcmulq_rot180_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f32)))
+float32x4_t __arm_vcmulq_rot180_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f32)))
+float32x4_t __arm_vcmulq_rot180_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f16)))
+float16x8_t __arm_vcmulq_rot270_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f16)))
+float16x8_t __arm_vcmulq_rot270(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f32)))
+float32x4_t __arm_vcmulq_rot270_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f32)))
+float32x4_t __arm_vcmulq_rot270(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f16)))
+float16x8_t __arm_vcmulq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f16)))
+float16x8_t __arm_vcmulq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f32)))
+float32x4_t __arm_vcmulq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f32)))
+float32x4_t __arm_vcmulq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f16)))
+float16x8_t __arm_vcmulq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f16)))
+float16x8_t __arm_vcmulq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f32)))
+float32x4_t __arm_vcmulq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f32)))
+float32x4_t __arm_vcmulq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f16)))
+float16x8_t __arm_vcmulq_rot90_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f16)))
+float16x8_t __arm_vcmulq_rot90(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f32)))
+float32x4_t __arm_vcmulq_rot90_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f32)))
+float32x4_t __arm_vcmulq_rot90(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f16)))
+float16x8_t __arm_vcmulq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f16)))
+float16x8_t __arm_vcmulq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f32)))
+float32x4_t __arm_vcmulq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f32)))
+float32x4_t __arm_vcmulq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f16)))
+float16x8_t __arm_vcmulq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f16)))
+float16x8_t __arm_vcmulq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f32)))
+float32x4_t __arm_vcmulq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f32)))
+float32x4_t __arm_vcmulq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f16)))
+float16x8_t __arm_vcmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f16)))
+float16x8_t __arm_vcmulq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f32)))
+float32x4_t __arm_vcmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f32)))
+float32x4_t __arm_vcmulq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_f16)))
+float16x8_t __arm_vcreateq_f16(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_f32)))
+float32x4_t __arm_vcreateq_f32(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s16_f16)))
+int16x8_t __arm_vcvtaq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s16_f16)))
+int16x8_t __arm_vcvtaq_m(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s32_f32)))
+int32x4_t __arm_vcvtaq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s32_f32)))
+int32x4_t __arm_vcvtaq_m(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u16_f16)))
+uint16x8_t __arm_vcvtaq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u16_f16)))
+uint16x8_t __arm_vcvtaq_m(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u32_f32)))
+uint32x4_t __arm_vcvtaq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u32_f32)))
+uint32x4_t __arm_vcvtaq_m(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_s16_f16)))
+int16x8_t __arm_vcvtaq_s16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_s32_f32)))
+int32x4_t __arm_vcvtaq_s32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_u16_f16)))
+uint16x8_t __arm_vcvtaq_u16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_u32_f32)))
+uint32x4_t __arm_vcvtaq_u32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_s16_f16)))
+int16x8_t __arm_vcvtaq_x_s16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_s32_f32)))
+int32x4_t __arm_vcvtaq_x_s32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_u16_f16)))
+uint16x8_t __arm_vcvtaq_x_u16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_u32_f32)))
+uint32x4_t __arm_vcvtaq_x_u32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_f16_f32)))
+float16x8_t __arm_vcvtbq_f16_f32(float16x8_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_f32_f16)))
+float32x4_t __arm_vcvtbq_f32_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_m_f16_f32)))
+float16x8_t __arm_vcvtbq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_m_f32_f16)))
+float32x4_t __arm_vcvtbq_m_f32_f16(float32x4_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_x_f32_f16)))
+float32x4_t __arm_vcvtbq_x_f32_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s16_f16)))
+int16x8_t __arm_vcvtmq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s16_f16)))
+int16x8_t __arm_vcvtmq_m(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s32_f32)))
+int32x4_t __arm_vcvtmq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s32_f32)))
+int32x4_t __arm_vcvtmq_m(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u16_f16)))
+uint16x8_t __arm_vcvtmq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u16_f16)))
+uint16x8_t __arm_vcvtmq_m(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u32_f32)))
+uint32x4_t __arm_vcvtmq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u32_f32)))
+uint32x4_t __arm_vcvtmq_m(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_s16_f16)))
+int16x8_t __arm_vcvtmq_s16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_s32_f32)))
+int32x4_t __arm_vcvtmq_s32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_u16_f16)))
+uint16x8_t __arm_vcvtmq_u16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_u32_f32)))
+uint32x4_t __arm_vcvtmq_u32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_s16_f16)))
+int16x8_t __arm_vcvtmq_x_s16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_s32_f32)))
+int32x4_t __arm_vcvtmq_x_s32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_u16_f16)))
+uint16x8_t __arm_vcvtmq_x_u16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_u32_f32)))
+uint32x4_t __arm_vcvtmq_x_u32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s16_f16)))
+int16x8_t __arm_vcvtnq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s16_f16)))
+int16x8_t __arm_vcvtnq_m(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s32_f32)))
+int32x4_t __arm_vcvtnq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s32_f32)))
+int32x4_t __arm_vcvtnq_m(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u16_f16)))
+uint16x8_t __arm_vcvtnq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u16_f16)))
+uint16x8_t __arm_vcvtnq_m(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u32_f32)))
+uint32x4_t __arm_vcvtnq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u32_f32)))
+uint32x4_t __arm_vcvtnq_m(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_s16_f16)))
+int16x8_t __arm_vcvtnq_s16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_s32_f32)))
+int32x4_t __arm_vcvtnq_s32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_u16_f16)))
+uint16x8_t __arm_vcvtnq_u16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_u32_f32)))
+uint32x4_t __arm_vcvtnq_u32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_s16_f16)))
+int16x8_t __arm_vcvtnq_x_s16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_s32_f32)))
+int32x4_t __arm_vcvtnq_x_s32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_u16_f16)))
+uint16x8_t __arm_vcvtnq_x_u16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_u32_f32)))
+uint32x4_t __arm_vcvtnq_x_u32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s16_f16)))
+int16x8_t __arm_vcvtpq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s16_f16)))
+int16x8_t __arm_vcvtpq_m(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s32_f32)))
+int32x4_t __arm_vcvtpq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s32_f32)))
+int32x4_t __arm_vcvtpq_m(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u16_f16)))
+uint16x8_t __arm_vcvtpq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u16_f16)))
+uint16x8_t __arm_vcvtpq_m(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u32_f32)))
+uint32x4_t __arm_vcvtpq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u32_f32)))
+uint32x4_t __arm_vcvtpq_m(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_s16_f16)))
+int16x8_t __arm_vcvtpq_s16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_s32_f32)))
+int32x4_t __arm_vcvtpq_s32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_u16_f16)))
+uint16x8_t __arm_vcvtpq_u16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_u32_f32)))
+uint32x4_t __arm_vcvtpq_u32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_s16_f16)))
+int16x8_t __arm_vcvtpq_x_s16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_s32_f32)))
+int32x4_t __arm_vcvtpq_x_s32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_u16_f16)))
+uint16x8_t __arm_vcvtpq_x_u16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_u32_f32)))
+uint32x4_t __arm_vcvtpq_x_u32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_s16)))
+float16x8_t __arm_vcvtq_f16_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_s16)))
+float16x8_t __arm_vcvtq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_u16)))
+float16x8_t __arm_vcvtq_f16_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_u16)))
+float16x8_t __arm_vcvtq(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_s32)))
+float32x4_t __arm_vcvtq_f32_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_s32)))
+float32x4_t __arm_vcvtq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_u32)))
+float32x4_t __arm_vcvtq_f32_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_u32)))
+float32x4_t __arm_vcvtq(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_s16)))
+float16x8_t __arm_vcvtq_m_f16_s16(float16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_s16)))
+float16x8_t __arm_vcvtq_m(float16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_u16)))
+float16x8_t __arm_vcvtq_m_f16_u16(float16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_u16)))
+float16x8_t __arm_vcvtq_m(float16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_s32)))
+float32x4_t __arm_vcvtq_m_f32_s32(float32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_s32)))
+float32x4_t __arm_vcvtq_m(float32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_u32)))
+float32x4_t __arm_vcvtq_m_f32_u32(float32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_u32)))
+float32x4_t __arm_vcvtq_m(float32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16)))
+float16x8_t __arm_vcvtq_m_n_f16_s16(float16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16)))
+float16x8_t __arm_vcvtq_m_n(float16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16)))
+float16x8_t __arm_vcvtq_m_n_f16_u16(float16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16)))
+float16x8_t __arm_vcvtq_m_n(float16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32)))
+float32x4_t __arm_vcvtq_m_n_f32_s32(float32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32)))
+float32x4_t __arm_vcvtq_m_n(float32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32)))
+float32x4_t __arm_vcvtq_m_n_f32_u32(float32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32)))
+float32x4_t __arm_vcvtq_m_n(float32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16)))
+int16x8_t __arm_vcvtq_m_n_s16_f16(int16x8_t, float16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16)))
+int16x8_t __arm_vcvtq_m_n(int16x8_t, float16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32)))
+int32x4_t __arm_vcvtq_m_n_s32_f32(int32x4_t, float32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32)))
+int32x4_t __arm_vcvtq_m_n(int32x4_t, float32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16)))
+uint16x8_t __arm_vcvtq_m_n_u16_f16(uint16x8_t, float16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16)))
+uint16x8_t __arm_vcvtq_m_n(uint16x8_t, float16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32)))
+uint32x4_t __arm_vcvtq_m_n_u32_f32(uint32x4_t, float32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32)))
+uint32x4_t __arm_vcvtq_m_n(uint32x4_t, float32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s16_f16)))
+int16x8_t __arm_vcvtq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s16_f16)))
+int16x8_t __arm_vcvtq_m(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s32_f32)))
+int32x4_t __arm_vcvtq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s32_f32)))
+int32x4_t __arm_vcvtq_m(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u16_f16)))
+uint16x8_t __arm_vcvtq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u16_f16)))
+uint16x8_t __arm_vcvtq_m(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u32_f32)))
+uint32x4_t __arm_vcvtq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u32_f32)))
+uint32x4_t __arm_vcvtq_m(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_s16)))
+float16x8_t __arm_vcvtq_n_f16_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_s16)))
+float16x8_t __arm_vcvtq_n(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_u16)))
+float16x8_t __arm_vcvtq_n_f16_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_u16)))
+float16x8_t __arm_vcvtq_n(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_s32)))
+float32x4_t __arm_vcvtq_n_f32_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_s32)))
+float32x4_t __arm_vcvtq_n(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_u32)))
+float32x4_t __arm_vcvtq_n_f32_u32(uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_u32)))
+float32x4_t __arm_vcvtq_n(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_s16_f16)))
+int16x8_t __arm_vcvtq_n_s16_f16(float16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_s32_f32)))
+int32x4_t __arm_vcvtq_n_s32_f32(float32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_u16_f16)))
+uint16x8_t __arm_vcvtq_n_u16_f16(float16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_u32_f32)))
+uint32x4_t __arm_vcvtq_n_u32_f32(float32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_s16_f16)))
+int16x8_t __arm_vcvtq_s16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_s32_f32)))
+int32x4_t __arm_vcvtq_s32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_u16_f16)))
+uint16x8_t __arm_vcvtq_u16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_u32_f32)))
+uint32x4_t __arm_vcvtq_u32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_s16)))
+float16x8_t __arm_vcvtq_x_f16_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_s16)))
+float16x8_t __arm_vcvtq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_u16)))
+float16x8_t __arm_vcvtq_x_f16_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_u16)))
+float16x8_t __arm_vcvtq_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_s32)))
+float32x4_t __arm_vcvtq_x_f32_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_s32)))
+float32x4_t __arm_vcvtq_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_u32)))
+float32x4_t __arm_vcvtq_x_f32_u32(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_u32)))
+float32x4_t __arm_vcvtq_x(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16)))
+float16x8_t __arm_vcvtq_x_n_f16_s16(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16)))
+float16x8_t __arm_vcvtq_x_n(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16)))
+float16x8_t __arm_vcvtq_x_n_f16_u16(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16)))
+float16x8_t __arm_vcvtq_x_n(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32)))
+float32x4_t __arm_vcvtq_x_n_f32_s32(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32)))
+float32x4_t __arm_vcvtq_x_n(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32)))
+float32x4_t __arm_vcvtq_x_n_f32_u32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32)))
+float32x4_t __arm_vcvtq_x_n(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_s16_f16)))
+int16x8_t __arm_vcvtq_x_n_s16_f16(float16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_s32_f32)))
+int32x4_t __arm_vcvtq_x_n_s32_f32(float32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_u16_f16)))
+uint16x8_t __arm_vcvtq_x_n_u16_f16(float16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_u32_f32)))
+uint32x4_t __arm_vcvtq_x_n_u32_f32(float32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_s16_f16)))
+int16x8_t __arm_vcvtq_x_s16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_s32_f32)))
+int32x4_t __arm_vcvtq_x_s32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_u16_f16)))
+uint16x8_t __arm_vcvtq_x_u16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_u32_f32)))
+uint32x4_t __arm_vcvtq_x_u32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_f16_f32)))
+float16x8_t __arm_vcvttq_f16_f32(float16x8_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_f32_f16)))
+float32x4_t __arm_vcvttq_f32_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_m_f16_f32)))
+float16x8_t __arm_vcvttq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_m_f32_f16)))
+float32x4_t __arm_vcvttq_m_f32_f16(float32x4_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_x_f32_f16)))
+float32x4_t __arm_vcvttq_x_f32_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f16)))
+float16x8_t __arm_vdupq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f16)))
+float16x8_t __arm_vdupq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f32)))
+float32x4_t __arm_vdupq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f32)))
+float32x4_t __arm_vdupq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_f16)))
+float16x8_t __arm_vdupq_n_f16(float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_f32)))
+float32x4_t __arm_vdupq_n_f32(float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_f16)))
+float16x8_t __arm_vdupq_x_n_f16(float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_f32)))
+float32x4_t __arm_vdupq_x_n_f32(float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_f16)))
+float16x8_t __arm_veorq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_f16)))
+float16x8_t __arm_veorq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_f32)))
+float32x4_t __arm_veorq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_f32)))
+float32x4_t __arm_veorq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f16)))
+float16x8_t __arm_veorq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f16)))
+float16x8_t __arm_veorq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f32)))
+float32x4_t __arm_veorq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f32)))
+float32x4_t __arm_veorq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f16)))
+float16x8_t __arm_veorq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f16)))
+float16x8_t __arm_veorq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f32)))
+float32x4_t __arm_veorq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f32)))
+float32x4_t __arm_veorq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f16)))
+float16x8_t __arm_vfmaq_f16(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f16)))
+float16x8_t __arm_vfmaq(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f32)))
+float32x4_t __arm_vfmaq_f32(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f32)))
+float32x4_t __arm_vfmaq(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f16)))
+float16x8_t __arm_vfmaq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f16)))
+float16x8_t __arm_vfmaq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f32)))
+float32x4_t __arm_vfmaq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f32)))
+float32x4_t __arm_vfmaq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f16)))
+float16x8_t __arm_vfmaq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f16)))
+float16x8_t __arm_vfmaq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f32)))
+float32x4_t __arm_vfmaq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f32)))
+float32x4_t __arm_vfmaq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f16)))
+float16x8_t __arm_vfmaq_n_f16(float16x8_t, float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f16)))
+float16x8_t __arm_vfmaq(float16x8_t, float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f32)))
+float32x4_t __arm_vfmaq_n_f32(float32x4_t, float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f32)))
+float32x4_t __arm_vfmaq(float32x4_t, float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f16)))
+float16x8_t __arm_vfmasq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f16)))
+float16x8_t __arm_vfmasq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f32)))
+float32x4_t __arm_vfmasq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f32)))
+float32x4_t __arm_vfmasq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f16)))
+float16x8_t __arm_vfmasq_n_f16(float16x8_t, float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f16)))
+float16x8_t __arm_vfmasq(float16x8_t, float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f32)))
+float32x4_t __arm_vfmasq_n_f32(float32x4_t, float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f32)))
+float32x4_t __arm_vfmasq(float32x4_t, float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f16)))
+float16x8_t __arm_vfmsq_f16(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f16)))
+float16x8_t __arm_vfmsq(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f32)))
+float32x4_t __arm_vfmsq_f32(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f32)))
+float32x4_t __arm_vfmsq(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f16)))
+float16x8_t __arm_vfmsq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f16)))
+float16x8_t __arm_vfmsq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f32)))
+float32x4_t __arm_vfmsq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f32)))
+float32x4_t __arm_vfmsq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f16)))
+float16_t __arm_vgetq_lane_f16(float16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f16)))
+float16_t __arm_vgetq_lane(float16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f32)))
+float32_t __arm_vgetq_lane_f32(float32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f32)))
+float32_t __arm_vgetq_lane(float32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f16)))
+float16x8_t __arm_vld1q_f16(const float16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f16)))
+float16x8_t __arm_vld1q(const float16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f32)))
+float32x4_t __arm_vld1q_f32(const float32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f32)))
+float32x4_t __arm_vld1q(const float32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f16)))
+float16x8_t __arm_vld1q_z_f16(const float16_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f16)))
+float16x8_t __arm_vld1q_z(const float16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f32)))
+float32x4_t __arm_vld1q_z_f32(const float32_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f32)))
+float32x4_t __arm_vld1q_z(const float32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f16)))
+float16x8x2_t __arm_vld2q_f16(const float16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f16)))
+float16x8x2_t __arm_vld2q(const float16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f32)))
+float32x4x2_t __arm_vld2q_f32(const float32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f32)))
+float32x4x2_t __arm_vld2q(const float32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f16)))
+float16x8x4_t __arm_vld4q_f16(const float16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f16)))
+float16x8x4_t __arm_vld4q(const float16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f32)))
+float32x4x4_t __arm_vld4q_f32(const float32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f32)))
+float32x4x4_t __arm_vld4q(const float32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_f16)))
+float16x8_t __arm_vldrhq_f16(const float16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))
+float16x8_t __arm_vldrhq_gather_offset_f16(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))
+float16x8_t __arm_vldrhq_gather_offset(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))
+float16x8_t __arm_vldrhq_gather_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))
+float16x8_t __arm_vldrhq_gather_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))
+float16x8_t __arm_vldrhq_gather_shifted_offset_f16(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))
+float16x8_t __arm_vldrhq_gather_shifted_offset(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))
+float16x8_t __arm_vldrhq_gather_shifted_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))
+float16x8_t __arm_vldrhq_gather_shifted_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_f16)))
+float16x8_t __arm_vldrhq_z_f16(const float16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_f32)))
+float32x4_t __arm_vldrwq_f32(const float32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_f32)))
+float32x4_t __arm_vldrwq_gather_base_f32(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_f32)))
+float32x4_t __arm_vldrwq_gather_base_wb_f32(uint32x4_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_f32)))
+float32x4_t __arm_vldrwq_gather_base_wb_z_f32(uint32x4_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_f32)))
+float32x4_t __arm_vldrwq_gather_base_z_f32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))
+float32x4_t __arm_vldrwq_gather_offset_f32(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))
+float32x4_t __arm_vldrwq_gather_offset(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))
+float32x4_t __arm_vldrwq_gather_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))
+float32x4_t __arm_vldrwq_gather_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))
+float32x4_t __arm_vldrwq_gather_shifted_offset_f32(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))
+float32x4_t __arm_vldrwq_gather_shifted_offset(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))
+float32x4_t __arm_vldrwq_gather_shifted_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))
+float32x4_t __arm_vldrwq_gather_shifted_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_f32)))
+float32x4_t __arm_vldrwq_z_f32(const float32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f16)))
+float16x8_t __arm_vmaxnmaq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f16)))
+float16x8_t __arm_vmaxnmaq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f32)))
+float32x4_t __arm_vmaxnmaq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f32)))
+float32x4_t __arm_vmaxnmaq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f16)))
+float16x8_t __arm_vmaxnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f16)))
+float16x8_t __arm_vmaxnmaq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f32)))
+float32x4_t __arm_vmaxnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f32)))
+float32x4_t __arm_vmaxnmaq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f16)))
+float16_t __arm_vmaxnmavq_f16(float16_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f16)))
+float16_t __arm_vmaxnmavq(float16_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f32)))
+float32_t __arm_vmaxnmavq_f32(float32_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f32)))
+float32_t __arm_vmaxnmavq(float32_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f16)))
+float16_t __arm_vmaxnmavq_p_f16(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f16)))
+float16_t __arm_vmaxnmavq_p(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f32)))
+float32_t __arm_vmaxnmavq_p_f32(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f32)))
+float32_t __arm_vmaxnmavq_p(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f16)))
+float16x8_t __arm_vmaxnmq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f16)))
+float16x8_t __arm_vmaxnmq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f32)))
+float32x4_t __arm_vmaxnmq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f32)))
+float32x4_t __arm_vmaxnmq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f16)))
+float16x8_t __arm_vmaxnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f16)))
+float16x8_t __arm_vmaxnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f32)))
+float32x4_t __arm_vmaxnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f32)))
+float32x4_t __arm_vmaxnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f16)))
+float16x8_t __arm_vmaxnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f16)))
+float16x8_t __arm_vmaxnmq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f32)))
+float32x4_t __arm_vmaxnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f32)))
+float32x4_t __arm_vmaxnmq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f16)))
+float16_t __arm_vmaxnmvq_f16(float16_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f16)))
+float16_t __arm_vmaxnmvq(float16_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f32)))
+float32_t __arm_vmaxnmvq_f32(float32_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f32)))
+float32_t __arm_vmaxnmvq(float32_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f16)))
+float16_t __arm_vmaxnmvq_p_f16(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f16)))
+float16_t __arm_vmaxnmvq_p(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f32)))
+float32_t __arm_vmaxnmvq_p_f32(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f32)))
+float32_t __arm_vmaxnmvq_p(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f16)))
+float16x8_t __arm_vminnmaq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f16)))
+float16x8_t __arm_vminnmaq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f32)))
+float32x4_t __arm_vminnmaq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f32)))
+float32x4_t __arm_vminnmaq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f16)))
+float16x8_t __arm_vminnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f16)))
+float16x8_t __arm_vminnmaq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f32)))
+float32x4_t __arm_vminnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f32)))
+float32x4_t __arm_vminnmaq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f16)))
+float16_t __arm_vminnmavq_f16(float16_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f16)))
+float16_t __arm_vminnmavq(float16_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f32)))
+float32_t __arm_vminnmavq_f32(float32_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f32)))
+float32_t __arm_vminnmavq(float32_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f16)))
+float16_t __arm_vminnmavq_p_f16(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f16)))
+float16_t __arm_vminnmavq_p(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f32)))
+float32_t __arm_vminnmavq_p_f32(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f32)))
+float32_t __arm_vminnmavq_p(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f16)))
+float16x8_t __arm_vminnmq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f16)))
+float16x8_t __arm_vminnmq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f32)))
+float32x4_t __arm_vminnmq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f32)))
+float32x4_t __arm_vminnmq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f16)))
+float16x8_t __arm_vminnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f16)))
+float16x8_t __arm_vminnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f32)))
+float32x4_t __arm_vminnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f32)))
+float32x4_t __arm_vminnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f16)))
+float16x8_t __arm_vminnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f16)))
+float16x8_t __arm_vminnmq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f32)))
+float32x4_t __arm_vminnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f32)))
+float32x4_t __arm_vminnmq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f16)))
+float16_t __arm_vminnmvq_f16(float16_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f16)))
+float16_t __arm_vminnmvq(float16_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f32)))
+float32_t __arm_vminnmvq_f32(float32_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f32)))
+float32_t __arm_vminnmvq(float32_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f16)))
+float16_t __arm_vminnmvq_p_f16(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f16)))
+float16_t __arm_vminnmvq_p(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f32)))
+float32_t __arm_vminnmvq_p_f32(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f32)))
+float32_t __arm_vminnmvq_p(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f16)))
+float16x8_t __arm_vmulq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f16)))
+float16x8_t __arm_vmulq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f32)))
+float32x4_t __arm_vmulq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f32)))
+float32x4_t __arm_vmulq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f16)))
+float16x8_t __arm_vmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f16)))
+float16x8_t __arm_vmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f32)))
+float32x4_t __arm_vmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f32)))
+float32x4_t __arm_vmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f16)))
+float16x8_t __arm_vmulq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f16)))
+float16x8_t __arm_vmulq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f32)))
+float32x4_t __arm_vmulq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f32)))
+float32x4_t __arm_vmulq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f16)))
+float16x8_t __arm_vmulq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f16)))
+float16x8_t __arm_vmulq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f32)))
+float32x4_t __arm_vmulq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f32)))
+float32x4_t __arm_vmulq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f16)))
+float16x8_t __arm_vmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f16)))
+float16x8_t __arm_vmulq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f32)))
+float32x4_t __arm_vmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f32)))
+float32x4_t __arm_vmulq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f16)))
+float16x8_t __arm_vmulq_x_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f16)))
+float16x8_t __arm_vmulq_x(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f32)))
+float32x4_t __arm_vmulq_x_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f32)))
+float32x4_t __arm_vmulq_x(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f16)))
+float16x8_t __arm_vnegq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f16)))
+float16x8_t __arm_vnegq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f32)))
+float32x4_t __arm_vnegq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f32)))
+float32x4_t __arm_vnegq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f16)))
+float16x8_t __arm_vnegq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f16)))
+float16x8_t __arm_vnegq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f32)))
+float32x4_t __arm_vnegq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f32)))
+float32x4_t __arm_vnegq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f16)))
+float16x8_t __arm_vnegq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f16)))
+float16x8_t __arm_vnegq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f32)))
+float32x4_t __arm_vnegq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f32)))
+float32x4_t __arm_vnegq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_f16)))
+float16x8_t __arm_vornq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_f16)))
+float16x8_t __arm_vornq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_f32)))
+float32x4_t __arm_vornq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_f32)))
+float32x4_t __arm_vornq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f16)))
+float16x8_t __arm_vornq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f16)))
+float16x8_t __arm_vornq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f32)))
+float32x4_t __arm_vornq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f32)))
+float32x4_t __arm_vornq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f16)))
+float16x8_t __arm_vornq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f16)))
+float16x8_t __arm_vornq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f32)))
+float32x4_t __arm_vornq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f32)))
+float32x4_t __arm_vornq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f16)))
+float16x8_t __arm_vorrq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f16)))
+float16x8_t __arm_vorrq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f32)))
+float32x4_t __arm_vorrq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f32)))
+float32x4_t __arm_vorrq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f16)))
+float16x8_t __arm_vorrq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f16)))
+float16x8_t __arm_vorrq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f32)))
+float32x4_t __arm_vorrq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f32)))
+float32x4_t __arm_vorrq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f16)))
+float16x8_t __arm_vorrq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f16)))
+float16x8_t __arm_vorrq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f32)))
+float32x4_t __arm_vorrq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f32)))
+float32x4_t __arm_vorrq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f16)))
+float16x8_t __arm_vpselq_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f16)))
+float16x8_t __arm_vpselq(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f32)))
+float32x4_t __arm_vpselq_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f32)))
+float32x4_t __arm_vpselq(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))
+float16x8_t __arm_vreinterpretq_f16_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))
+float16x8_t __arm_vreinterpretq_f16(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))
+float16x8_t __arm_vreinterpretq_f16_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))
+float16x8_t __arm_vreinterpretq_f16(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))
+float16x8_t __arm_vreinterpretq_f16_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))
+float16x8_t __arm_vreinterpretq_f16(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))
+float16x8_t __arm_vreinterpretq_f16_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))
+float16x8_t __arm_vreinterpretq_f16(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))
+float16x8_t __arm_vreinterpretq_f16_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))
+float16x8_t __arm_vreinterpretq_f16(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))
+float16x8_t __arm_vreinterpretq_f16_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))
+float16x8_t __arm_vreinterpretq_f16(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))
+float16x8_t __arm_vreinterpretq_f16_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))
+float16x8_t __arm_vreinterpretq_f16(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))
+float16x8_t __arm_vreinterpretq_f16_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))
+float16x8_t __arm_vreinterpretq_f16(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
+float16x8_t __arm_vreinterpretq_f16_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
+float16x8_t __arm_vreinterpretq_f16(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))
+float32x4_t __arm_vreinterpretq_f32_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))
+float32x4_t __arm_vreinterpretq_f32(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))
+float32x4_t __arm_vreinterpretq_f32_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))
+float32x4_t __arm_vreinterpretq_f32(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))
+float32x4_t __arm_vreinterpretq_f32_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))
+float32x4_t __arm_vreinterpretq_f32(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))
+float32x4_t __arm_vreinterpretq_f32_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))
+float32x4_t __arm_vreinterpretq_f32(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))
+float32x4_t __arm_vreinterpretq_f32_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))
+float32x4_t __arm_vreinterpretq_f32(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))
+float32x4_t __arm_vreinterpretq_f32_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))
+float32x4_t __arm_vreinterpretq_f32(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))
+float32x4_t __arm_vreinterpretq_f32_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))
+float32x4_t __arm_vreinterpretq_f32(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))
+float32x4_t __arm_vreinterpretq_f32_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))
+float32x4_t __arm_vreinterpretq_f32(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
+float32x4_t __arm_vreinterpretq_f32_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
+float32x4_t __arm_vreinterpretq_f32(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))
+int16x8_t __arm_vreinterpretq_s16_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))
+int16x8_t __arm_vreinterpretq_s16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))
+int16x8_t __arm_vreinterpretq_s16_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))
+int16x8_t __arm_vreinterpretq_s16(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))
+int32x4_t __arm_vreinterpretq_s32_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))
+int32x4_t __arm_vreinterpretq_s32(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))
+int32x4_t __arm_vreinterpretq_s32_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))
+int32x4_t __arm_vreinterpretq_s32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))
+int64x2_t __arm_vreinterpretq_s64_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))
+int64x2_t __arm_vreinterpretq_s64(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))
+int64x2_t __arm_vreinterpretq_s64_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))
+int64x2_t __arm_vreinterpretq_s64(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))
+int8x16_t __arm_vreinterpretq_s8_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))
+int8x16_t __arm_vreinterpretq_s8(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))
+int8x16_t __arm_vreinterpretq_s8_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))
+int8x16_t __arm_vreinterpretq_s8(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))
+uint16x8_t __arm_vreinterpretq_u16_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))
+uint16x8_t __arm_vreinterpretq_u16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))
+uint16x8_t __arm_vreinterpretq_u16_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))
+uint16x8_t __arm_vreinterpretq_u16(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))
+uint32x4_t __arm_vreinterpretq_u32_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))
+uint32x4_t __arm_vreinterpretq_u32(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))
+uint32x4_t __arm_vreinterpretq_u32_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))
+uint32x4_t __arm_vreinterpretq_u32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))
+uint64x2_t __arm_vreinterpretq_u64_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))
+uint64x2_t __arm_vreinterpretq_u64(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))
+uint64x2_t __arm_vreinterpretq_u64_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))
+uint64x2_t __arm_vreinterpretq_u64(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
+uint8x16_t __arm_vreinterpretq_u8_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
+uint8x16_t __arm_vreinterpretq_u8(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
+uint8x16_t __arm_vreinterpretq_u8_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
+uint8x16_t __arm_vreinterpretq_u8(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_f16)))
+float16x8_t __arm_vrev32q_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_f16)))
+float16x8_t __arm_vrev32q(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_f16)))
+float16x8_t __arm_vrev32q_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_f16)))
+float16x8_t __arm_vrev32q_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_f16)))
+float16x8_t __arm_vrev32q_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_f16)))
+float16x8_t __arm_vrev32q_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f16)))
+float16x8_t __arm_vrev64q_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f16)))
+float16x8_t __arm_vrev64q(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f32)))
+float32x4_t __arm_vrev64q_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f32)))
+float32x4_t __arm_vrev64q(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f16)))
+float16x8_t __arm_vrev64q_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f16)))
+float16x8_t __arm_vrev64q_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f32)))
+float32x4_t __arm_vrev64q_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f32)))
+float32x4_t __arm_vrev64q_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f16)))
+float16x8_t __arm_vrev64q_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f16)))
+float16x8_t __arm_vrev64q_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f32)))
+float32x4_t __arm_vrev64q_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f32)))
+float32x4_t __arm_vrev64q_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f16)))
+float16x8_t __arm_vrndaq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f16)))
+float16x8_t __arm_vrndaq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f32)))
+float32x4_t __arm_vrndaq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f32)))
+float32x4_t __arm_vrndaq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f16)))
+float16x8_t __arm_vrndaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f16)))
+float16x8_t __arm_vrndaq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f32)))
+float32x4_t __arm_vrndaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f32)))
+float32x4_t __arm_vrndaq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f16)))
+float16x8_t __arm_vrndaq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f16)))
+float16x8_t __arm_vrndaq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f32)))
+float32x4_t __arm_vrndaq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f32)))
+float32x4_t __arm_vrndaq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f16)))
+float16x8_t __arm_vrndmq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f16)))
+float16x8_t __arm_vrndmq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f32)))
+float32x4_t __arm_vrndmq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f32)))
+float32x4_t __arm_vrndmq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f16)))
+float16x8_t __arm_vrndmq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f16)))
+float16x8_t __arm_vrndmq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f32)))
+float32x4_t __arm_vrndmq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f32)))
+float32x4_t __arm_vrndmq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f16)))
+float16x8_t __arm_vrndmq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f16)))
+float16x8_t __arm_vrndmq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f32)))
+float32x4_t __arm_vrndmq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f32)))
+float32x4_t __arm_vrndmq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f16)))
+float16x8_t __arm_vrndnq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f16)))
+float16x8_t __arm_vrndnq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f32)))
+float32x4_t __arm_vrndnq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f32)))
+float32x4_t __arm_vrndnq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f16)))
+float16x8_t __arm_vrndnq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f16)))
+float16x8_t __arm_vrndnq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f32)))
+float32x4_t __arm_vrndnq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f32)))
+float32x4_t __arm_vrndnq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f16)))
+float16x8_t __arm_vrndnq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f16)))
+float16x8_t __arm_vrndnq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f32)))
+float32x4_t __arm_vrndnq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f32)))
+float32x4_t __arm_vrndnq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f16)))
+float16x8_t __arm_vrndpq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f16)))
+float16x8_t __arm_vrndpq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f32)))
+float32x4_t __arm_vrndpq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f32)))
+float32x4_t __arm_vrndpq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f16)))
+float16x8_t __arm_vrndpq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f16)))
+float16x8_t __arm_vrndpq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f32)))
+float32x4_t __arm_vrndpq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f32)))
+float32x4_t __arm_vrndpq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f16)))
+float16x8_t __arm_vrndpq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f16)))
+float16x8_t __arm_vrndpq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f32)))
+float32x4_t __arm_vrndpq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f32)))
+float32x4_t __arm_vrndpq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f16)))
+float16x8_t __arm_vrndq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f16)))
+float16x8_t __arm_vrndq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f32)))
+float32x4_t __arm_vrndq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f32)))
+float32x4_t __arm_vrndq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f16)))
+float16x8_t __arm_vrndq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f16)))
+float16x8_t __arm_vrndq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f32)))
+float32x4_t __arm_vrndq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f32)))
+float32x4_t __arm_vrndq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f16)))
+float16x8_t __arm_vrndq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f16)))
+float16x8_t __arm_vrndq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f32)))
+float32x4_t __arm_vrndq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f32)))
+float32x4_t __arm_vrndq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f16)))
+float16x8_t __arm_vrndxq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f16)))
+float16x8_t __arm_vrndxq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f32)))
+float32x4_t __arm_vrndxq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f32)))
+float32x4_t __arm_vrndxq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f16)))
+float16x8_t __arm_vrndxq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f16)))
+float16x8_t __arm_vrndxq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f32)))
+float32x4_t __arm_vrndxq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f32)))
+float32x4_t __arm_vrndxq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f16)))
+float16x8_t __arm_vrndxq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f16)))
+float16x8_t __arm_vrndxq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f32)))
+float32x4_t __arm_vrndxq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f32)))
+float32x4_t __arm_vrndxq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f16)))
+float16x8_t __arm_vsetq_lane_f16(float16_t, float16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f16)))
+float16x8_t __arm_vsetq_lane(float16_t, float16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f32)))
+float32x4_t __arm_vsetq_lane_f32(float32_t, float32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f32)))
+float32x4_t __arm_vsetq_lane(float32_t, float32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f16)))
+void __arm_vst1q_f16(float16_t *, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f16)))
+void __arm_vst1q(float16_t *, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f32)))
+void __arm_vst1q_f32(float32_t *, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f32)))
+void __arm_vst1q(float32_t *, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f16)))
+void __arm_vst1q_p_f16(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f16)))
+void __arm_vst1q_p(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f32)))
+void __arm_vst1q_p_f32(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f32)))
+void __arm_vst1q_p(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f16)))
+void __arm_vst2q_f16(float16_t *, float16x8x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f16)))
+void __arm_vst2q(float16_t *, float16x8x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f32)))
+void __arm_vst2q_f32(float32_t *, float32x4x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f32)))
+void __arm_vst2q(float32_t *, float32x4x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f16)))
+void __arm_vst4q_f16(float16_t *, float16x8x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f16)))
+void __arm_vst4q(float16_t *, float16x8x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f32)))
+void __arm_vst4q_f32(float32_t *, float32x4x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f32)))
+void __arm_vst4q(float32_t *, float32x4x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_f16)))
+void __arm_vstrhq_f16(float16_t *, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_f16)))
+void __arm_vstrhq(float16_t *, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_f16)))
+void __arm_vstrhq_p_f16(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_f16)))
+void __arm_vstrhq_p(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))
+void __arm_vstrhq_scatter_offset_f16(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))
+void __arm_vstrhq_scatter_offset(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))
+void __arm_vstrhq_scatter_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))
+void __arm_vstrhq_scatter_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))
+void __arm_vstrhq_scatter_shifted_offset_f16(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))
+void __arm_vstrhq_scatter_shifted_offset(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))
+void __arm_vstrhq_scatter_shifted_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))
+void __arm_vstrhq_scatter_shifted_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_f32)))
+void __arm_vstrwq_f32(float32_t *, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_f32)))
+void __arm_vstrwq(float32_t *, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_f32)))
+void __arm_vstrwq_p_f32(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_f32)))
+void __arm_vstrwq_p(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))
+void __arm_vstrwq_scatter_base_f32(uint32x4_t, int, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))
+void __arm_vstrwq_scatter_base(uint32x4_t, int, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))
+void __arm_vstrwq_scatter_base_p_f32(uint32x4_t, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))
+void __arm_vstrwq_scatter_base_p(uint32x4_t, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))
+void __arm_vstrwq_scatter_base_wb_f32(uint32x4_t *, int, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))
+void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))
+void __arm_vstrwq_scatter_base_wb_p_f32(uint32x4_t *, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))
+void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))
+void __arm_vstrwq_scatter_offset_f32(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))
+void __arm_vstrwq_scatter_offset(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))
+void __arm_vstrwq_scatter_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))
+void __arm_vstrwq_scatter_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))
+void __arm_vstrwq_scatter_shifted_offset_f32(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))
+void __arm_vstrwq_scatter_shifted_offset(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))
+void __arm_vstrwq_scatter_shifted_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))
+void __arm_vstrwq_scatter_shifted_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f16)))
+float16x8_t __arm_vsubq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f16)))
+float16x8_t __arm_vsubq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f32)))
+float32x4_t __arm_vsubq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f32)))
+float32x4_t __arm_vsubq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f16)))
+float16x8_t __arm_vsubq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f16)))
+float16x8_t __arm_vsubq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f32)))
+float32x4_t __arm_vsubq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f32)))
+float32x4_t __arm_vsubq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f16)))
+float16x8_t __arm_vsubq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f16)))
+float16x8_t __arm_vsubq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f32)))
+float32x4_t __arm_vsubq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f32)))
+float32x4_t __arm_vsubq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f16)))
+float16x8_t __arm_vsubq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f16)))
+float16x8_t __arm_vsubq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f32)))
+float32x4_t __arm_vsubq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f32)))
+float32x4_t __arm_vsubq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f16)))
+float16x8_t __arm_vsubq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f16)))
+float16x8_t __arm_vsubq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f32)))
+float32x4_t __arm_vsubq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f32)))
+float32x4_t __arm_vsubq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f16)))
+float16x8_t __arm_vsubq_x_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f16)))
+float16x8_t __arm_vsubq_x(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f32)))
+float32x4_t __arm_vsubq_x_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f32)))
+float32x4_t __arm_vsubq_x(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_f16)))
+float16x8_t __arm_vuninitializedq_f16();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_f32)))
+float32x4_t __arm_vuninitializedq_f32();
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f16)))
+float16x8_t __arm_vuninitializedq(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f32)))
+float32x4_t __arm_vuninitializedq(float32x4_t);
+
+#endif /* (__ARM_FEATURE_MVE & 2) */
+
+#if (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE)
+
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_asrl)))
+int64_t asrl(int64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_lsll)))
+uint64_t lsll(uint64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshr)))
+int32_t sqrshr(int32_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshrl)))
+int64_t sqrshrl(int64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshrl_sat48)))
+int64_t sqrshrl_sat48(int64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqshl)))
+int32_t sqshl(int32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqshll)))
+int64_t sqshll(int64_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_srshr)))
+int32_t srshr(int32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_srshrl)))
+int64_t srshrl(int64_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshl)))
+uint32_t uqrshl(uint32_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshll)))
+uint64_t uqrshll(uint64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshll_sat48)))
+uint64_t uqrshll_sat48(uint64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqshl)))
+uint32_t uqshl(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqshll)))
+uint64_t uqshll(uint64_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_urshr)))
+uint32_t urshr(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_urshrl)))
+uint64_t urshrl(uint64_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s16)))
+uint32_t vabavq_p_s16(uint32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s16)))
+uint32_t vabavq_p(uint32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s32)))
+uint32_t vabavq_p_s32(uint32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s32)))
+uint32_t vabavq_p(uint32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s8)))
+uint32_t vabavq_p_s8(uint32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s8)))
+uint32_t vabavq_p(uint32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u16)))
+uint32_t vabavq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u16)))
+uint32_t vabavq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u32)))
+uint32_t vabavq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u32)))
+uint32_t vabavq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u8)))
+uint32_t vabavq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u8)))
+uint32_t vabavq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s16)))
+uint32_t vabavq_s16(uint32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s16)))
+uint32_t vabavq(uint32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s32)))
+uint32_t vabavq_s32(uint32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s32)))
+uint32_t vabavq(uint32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s8)))
+uint32_t vabavq_s8(uint32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s8)))
+uint32_t vabavq(uint32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u16)))
+uint32_t vabavq_u16(uint32_t, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u16)))
+uint32_t vabavq(uint32_t, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u32)))
+uint32_t vabavq_u32(uint32_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u32)))
+uint32_t vabavq(uint32_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u8)))
+uint32_t vabavq_u8(uint32_t, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u8)))
+uint32_t vabavq(uint32_t, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s16)))
+int16x8_t vabdq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s16)))
+int16x8_t vabdq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s32)))
+int32x4_t vabdq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s32)))
+int32x4_t vabdq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s8)))
+int8x16_t vabdq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s8)))
+int8x16_t vabdq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u16)))
+uint16x8_t vabdq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u16)))
+uint16x8_t vabdq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u32)))
+uint32x4_t vabdq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u32)))
+uint32x4_t vabdq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u8)))
+uint8x16_t vabdq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u8)))
+uint8x16_t vabdq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s16)))
+int16x8_t vabdq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s16)))
+int16x8_t vabdq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s32)))
+int32x4_t vabdq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s32)))
+int32x4_t vabdq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s8)))
+int8x16_t vabdq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s8)))
+int8x16_t vabdq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u16)))
+uint16x8_t vabdq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u16)))
+uint16x8_t vabdq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u32)))
+uint32x4_t vabdq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u32)))
+uint32x4_t vabdq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u8)))
+uint8x16_t vabdq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u8)))
+uint8x16_t vabdq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s16)))
+int16x8_t vabdq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s16)))
+int16x8_t vabdq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s32)))
+int32x4_t vabdq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s32)))
+int32x4_t vabdq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s8)))
+int8x16_t vabdq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s8)))
+int8x16_t vabdq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u16)))
+uint16x8_t vabdq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u16)))
+uint16x8_t vabdq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u32)))
+uint32x4_t vabdq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u32)))
+uint32x4_t vabdq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u8)))
+uint8x16_t vabdq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u8)))
+uint8x16_t vabdq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s16)))
+int16x8_t vabsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s16)))
+int16x8_t vabsq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s32)))
+int32x4_t vabsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s32)))
+int32x4_t vabsq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s8)))
+int8x16_t vabsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s8)))
+int8x16_t vabsq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s16)))
+int16x8_t vabsq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s16)))
+int16x8_t vabsq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s32)))
+int32x4_t vabsq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s32)))
+int32x4_t vabsq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s8)))
+int8x16_t vabsq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s8)))
+int8x16_t vabsq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s16)))
+int16x8_t vabsq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s16)))
+int16x8_t vabsq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s32)))
+int32x4_t vabsq_x_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s32)))
+int32x4_t vabsq_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s8)))
+int8x16_t vabsq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s8)))
+int8x16_t vabsq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_s32)))
+int32x4_t vadciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_s32)))
+int32x4_t vadciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_u32)))
+uint32x4_t vadciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_u32)))
+uint32x4_t vadciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_s32)))
+int32x4_t vadciq_s32(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_s32)))
+int32x4_t vadciq(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_u32)))
+uint32x4_t vadciq_u32(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_u32)))
+uint32x4_t vadciq(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_s32)))
+int32x4_t vadcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_s32)))
+int32x4_t vadcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_u32)))
+uint32x4_t vadcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_u32)))
+uint32x4_t vadcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_s32)))
+int32x4_t vadcq_s32(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_s32)))
+int32x4_t vadcq(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_u32)))
+uint32x4_t vadcq_u32(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_u32)))
+uint32x4_t vadcq(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_s32)))
+int64_t vaddlvaq_p_s32(int64_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_s32)))
+int64_t vaddlvaq_p(int64_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_u32)))
+uint64_t vaddlvaq_p_u32(uint64_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_u32)))
+uint64_t vaddlvaq_p(uint64_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_s32)))
+int64_t vaddlvaq_s32(int64_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_s32)))
+int64_t vaddlvaq(int64_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_u32)))
+uint64_t vaddlvaq_u32(uint64_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_u32)))
+uint64_t vaddlvaq(uint64_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_s32)))
+int64_t vaddlvq_p_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_s32)))
+int64_t vaddlvq_p(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_u32)))
+uint64_t vaddlvq_p_u32(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_u32)))
+uint64_t vaddlvq_p(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_s32)))
+int64_t vaddlvq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_s32)))
+int64_t vaddlvq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_u32)))
+uint64_t vaddlvq_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_u32)))
+uint64_t vaddlvq(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s16)))
+int16x8_t vaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s16)))
+int16x8_t vaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s32)))
+int32x4_t vaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s32)))
+int32x4_t vaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s8)))
+int8x16_t vaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s8)))
+int8x16_t vaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u16)))
+uint16x8_t vaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u16)))
+uint16x8_t vaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u32)))
+uint32x4_t vaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u32)))
+uint32x4_t vaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u8)))
+uint8x16_t vaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u8)))
+uint8x16_t vaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s16)))
+int16x8_t vaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s16)))
+int16x8_t vaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s32)))
+int32x4_t vaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s32)))
+int32x4_t vaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s8)))
+int8x16_t vaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s8)))
+int8x16_t vaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u16)))
+uint16x8_t vaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u16)))
+uint16x8_t vaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u32)))
+uint32x4_t vaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u32)))
+uint32x4_t vaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u8)))
+uint8x16_t vaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u8)))
+uint8x16_t vaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s16)))
+int16x8_t vaddq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s16)))
+int16x8_t vaddq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s32)))
+int32x4_t vaddq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s32)))
+int32x4_t vaddq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s8)))
+int8x16_t vaddq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s8)))
+int8x16_t vaddq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u16)))
+uint16x8_t vaddq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u16)))
+uint16x8_t vaddq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u32)))
+uint32x4_t vaddq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u32)))
+uint32x4_t vaddq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u8)))
+uint8x16_t vaddq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u8)))
+uint8x16_t vaddq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s16)))
+int16x8_t vaddq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s16)))
+int16x8_t vaddq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s32)))
+int32x4_t vaddq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s32)))
+int32x4_t vaddq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s8)))
+int8x16_t vaddq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s8)))
+int8x16_t vaddq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u16)))
+uint16x8_t vaddq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u16)))
+uint16x8_t vaddq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u32)))
+uint32x4_t vaddq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u32)))
+uint32x4_t vaddq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u8)))
+uint8x16_t vaddq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u8)))
+uint8x16_t vaddq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s16)))
+int16x8_t vaddq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s16)))
+int16x8_t vaddq_x(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s32)))
+int32x4_t vaddq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s32)))
+int32x4_t vaddq_x(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s8)))
+int8x16_t vaddq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s8)))
+int8x16_t vaddq_x(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u16)))
+uint16x8_t vaddq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u16)))
+uint16x8_t vaddq_x(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u32)))
+uint32x4_t vaddq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u32)))
+uint32x4_t vaddq_x(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u8)))
+uint8x16_t vaddq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u8)))
+uint8x16_t vaddq_x(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s16)))
+int16x8_t vaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s16)))
+int16x8_t vaddq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s32)))
+int32x4_t vaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s32)))
+int32x4_t vaddq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s8)))
+int8x16_t vaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s8)))
+int8x16_t vaddq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u16)))
+uint16x8_t vaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u16)))
+uint16x8_t vaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u32)))
+uint32x4_t vaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u32)))
+uint32x4_t vaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u8)))
+uint8x16_t vaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u8)))
+uint8x16_t vaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s16)))
+int32_t vaddvaq_p_s16(int32_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s16)))
+int32_t vaddvaq_p(int32_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s32)))
+int32_t vaddvaq_p_s32(int32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s32)))
+int32_t vaddvaq_p(int32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s8)))
+int32_t vaddvaq_p_s8(int32_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s8)))
+int32_t vaddvaq_p(int32_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u16)))
+uint32_t vaddvaq_p_u16(uint32_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u16)))
+uint32_t vaddvaq_p(uint32_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u32)))
+uint32_t vaddvaq_p_u32(uint32_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u32)))
+uint32_t vaddvaq_p(uint32_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u8)))
+uint32_t vaddvaq_p_u8(uint32_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u8)))
+uint32_t vaddvaq_p(uint32_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s16)))
+int32_t vaddvaq_s16(int32_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s16)))
+int32_t vaddvaq(int32_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s32)))
+int32_t vaddvaq_s32(int32_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s32)))
+int32_t vaddvaq(int32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s8)))
+int32_t vaddvaq_s8(int32_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s8)))
+int32_t vaddvaq(int32_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u16)))
+uint32_t vaddvaq_u16(uint32_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u16)))
+uint32_t vaddvaq(uint32_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u32)))
+uint32_t vaddvaq_u32(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u32)))
+uint32_t vaddvaq(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u8)))
+uint32_t vaddvaq_u8(uint32_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u8)))
+uint32_t vaddvaq(uint32_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s16)))
+int32_t vaddvq_p_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s16)))
+int32_t vaddvq_p(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s32)))
+int32_t vaddvq_p_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s32)))
+int32_t vaddvq_p(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s8)))
+int32_t vaddvq_p_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s8)))
+int32_t vaddvq_p(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u16)))
+uint32_t vaddvq_p_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u16)))
+uint32_t vaddvq_p(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u32)))
+uint32_t vaddvq_p_u32(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u32)))
+uint32_t vaddvq_p(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u8)))
+uint32_t vaddvq_p_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u8)))
+uint32_t vaddvq_p(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s16)))
+int32_t vaddvq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s16)))
+int32_t vaddvq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s32)))
+int32_t vaddvq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s32)))
+int32_t vaddvq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s8)))
+int32_t vaddvq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s8)))
+int32_t vaddvq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u16)))
+uint32_t vaddvq_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u16)))
+uint32_t vaddvq(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u32)))
+uint32_t vaddvq_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u32)))
+uint32_t vaddvq(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u8)))
+uint32_t vaddvq_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u8)))
+uint32_t vaddvq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s16)))
+int16x8_t vandq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s16)))
+int16x8_t vandq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s32)))
+int32x4_t vandq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s32)))
+int32x4_t vandq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s8)))
+int8x16_t vandq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s8)))
+int8x16_t vandq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u16)))
+uint16x8_t vandq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u16)))
+uint16x8_t vandq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u32)))
+uint32x4_t vandq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u32)))
+uint32x4_t vandq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u8)))
+uint8x16_t vandq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u8)))
+uint8x16_t vandq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s16)))
+int16x8_t vandq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s16)))
+int16x8_t vandq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s32)))
+int32x4_t vandq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s32)))
+int32x4_t vandq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s8)))
+int8x16_t vandq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s8)))
+int8x16_t vandq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u16)))
+uint16x8_t vandq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u16)))
+uint16x8_t vandq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u32)))
+uint32x4_t vandq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u32)))
+uint32x4_t vandq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u8)))
+uint8x16_t vandq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u8)))
+uint8x16_t vandq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s16)))
+int16x8_t vandq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s16)))
+int16x8_t vandq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s32)))
+int32x4_t vandq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s32)))
+int32x4_t vandq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s8)))
+int8x16_t vandq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s8)))
+int8x16_t vandq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u16)))
+uint16x8_t vandq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u16)))
+uint16x8_t vandq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u32)))
+uint32x4_t vandq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u32)))
+uint32x4_t vandq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u8)))
+uint8x16_t vandq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u8)))
+uint8x16_t vandq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s16)))
+int16x8_t vbicq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s16)))
+int16x8_t vbicq_m_n(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s32)))
+int32x4_t vbicq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s32)))
+int32x4_t vbicq_m_n(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u16)))
+uint16x8_t vbicq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u16)))
+uint16x8_t vbicq_m_n(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u32)))
+uint32x4_t vbicq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u32)))
+uint32x4_t vbicq_m_n(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s16)))
+int16x8_t vbicq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s16)))
+int16x8_t vbicq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s32)))
+int32x4_t vbicq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s32)))
+int32x4_t vbicq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s8)))
+int8x16_t vbicq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s8)))
+int8x16_t vbicq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u16)))
+uint16x8_t vbicq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u16)))
+uint16x8_t vbicq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u32)))
+uint32x4_t vbicq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u32)))
+uint32x4_t vbicq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u8)))
+uint8x16_t vbicq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u8)))
+uint8x16_t vbicq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s16)))
+int16x8_t vbicq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s16)))
+int16x8_t vbicq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s32)))
+int32x4_t vbicq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s32)))
+int32x4_t vbicq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u16)))
+uint16x8_t vbicq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u16)))
+uint16x8_t vbicq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u32)))
+uint32x4_t vbicq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u32)))
+uint32x4_t vbicq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s16)))
+int16x8_t vbicq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s16)))
+int16x8_t vbicq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s32)))
+int32x4_t vbicq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s32)))
+int32x4_t vbicq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s8)))
+int8x16_t vbicq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s8)))
+int8x16_t vbicq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u16)))
+uint16x8_t vbicq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u16)))
+uint16x8_t vbicq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u32)))
+uint32x4_t vbicq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u32)))
+uint32x4_t vbicq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u8)))
+uint8x16_t vbicq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u8)))
+uint8x16_t vbicq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s16)))
+int16x8_t vbicq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s16)))
+int16x8_t vbicq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s32)))
+int32x4_t vbicq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s32)))
+int32x4_t vbicq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s8)))
+int8x16_t vbicq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s8)))
+int8x16_t vbicq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u16)))
+uint16x8_t vbicq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u16)))
+uint16x8_t vbicq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u32)))
+uint32x4_t vbicq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u32)))
+uint32x4_t vbicq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u8)))
+uint8x16_t vbicq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u8)))
+uint8x16_t vbicq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s16)))
+int16x8_t vbrsrq_m_n_s16(int16x8_t, int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s16)))
+int16x8_t vbrsrq_m(int16x8_t, int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s32)))
+int32x4_t vbrsrq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s32)))
+int32x4_t vbrsrq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s8)))
+int8x16_t vbrsrq_m_n_s8(int8x16_t, int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s8)))
+int8x16_t vbrsrq_m(int8x16_t, int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u16)))
+uint16x8_t vbrsrq_m_n_u16(uint16x8_t, uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u16)))
+uint16x8_t vbrsrq_m(uint16x8_t, uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u32)))
+uint32x4_t vbrsrq_m_n_u32(uint32x4_t, uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u32)))
+uint32x4_t vbrsrq_m(uint32x4_t, uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u8)))
+uint8x16_t vbrsrq_m_n_u8(uint8x16_t, uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u8)))
+uint8x16_t vbrsrq_m(uint8x16_t, uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s16)))
+int16x8_t vbrsrq_n_s16(int16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s16)))
+int16x8_t vbrsrq(int16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s32)))
+int32x4_t vbrsrq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s32)))
+int32x4_t vbrsrq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s8)))
+int8x16_t vbrsrq_n_s8(int8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s8)))
+int8x16_t vbrsrq(int8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u16)))
+uint16x8_t vbrsrq_n_u16(uint16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u16)))
+uint16x8_t vbrsrq(uint16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u32)))
+uint32x4_t vbrsrq_n_u32(uint32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u32)))
+uint32x4_t vbrsrq(uint32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u8)))
+uint8x16_t vbrsrq_n_u8(uint8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u8)))
+uint8x16_t vbrsrq(uint8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s16)))
+int16x8_t vbrsrq_x_n_s16(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s16)))
+int16x8_t vbrsrq_x(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s32)))
+int32x4_t vbrsrq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s32)))
+int32x4_t vbrsrq_x(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s8)))
+int8x16_t vbrsrq_x_n_s8(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s8)))
+int8x16_t vbrsrq_x(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u16)))
+uint16x8_t vbrsrq_x_n_u16(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u16)))
+uint16x8_t vbrsrq_x(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u32)))
+uint32x4_t vbrsrq_x_n_u32(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u32)))
+uint32x4_t vbrsrq_x(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u8)))
+uint8x16_t vbrsrq_x_n_u8(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u8)))
+uint8x16_t vbrsrq_x(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s16)))
+int16x8_t vcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s16)))
+int16x8_t vcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s32)))
+int32x4_t vcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s32)))
+int32x4_t vcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s8)))
+int8x16_t vcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s8)))
+int8x16_t vcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u16)))
+uint16x8_t vcaddq_rot270_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u16)))
+uint16x8_t vcaddq_rot270_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u32)))
+uint32x4_t vcaddq_rot270_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u32)))
+uint32x4_t vcaddq_rot270_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u8)))
+uint8x16_t vcaddq_rot270_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u8)))
+uint8x16_t vcaddq_rot270_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s16)))
+int16x8_t vcaddq_rot270_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s16)))
+int16x8_t vcaddq_rot270(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s32)))
+int32x4_t vcaddq_rot270_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s32)))
+int32x4_t vcaddq_rot270(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s8)))
+int8x16_t vcaddq_rot270_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s8)))
+int8x16_t vcaddq_rot270(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u16)))
+uint16x8_t vcaddq_rot270_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u16)))
+uint16x8_t vcaddq_rot270(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u32)))
+uint32x4_t vcaddq_rot270_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u32)))
+uint32x4_t vcaddq_rot270(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u8)))
+uint8x16_t vcaddq_rot270_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u8)))
+uint8x16_t vcaddq_rot270(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s16)))
+int16x8_t vcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s16)))
+int16x8_t vcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s32)))
+int32x4_t vcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s32)))
+int32x4_t vcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s8)))
+int8x16_t vcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s8)))
+int8x16_t vcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u16)))
+uint16x8_t vcaddq_rot270_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u16)))
+uint16x8_t vcaddq_rot270_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u32)))
+uint32x4_t vcaddq_rot270_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u32)))
+uint32x4_t vcaddq_rot270_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u8)))
+uint8x16_t vcaddq_rot270_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u8)))
+uint8x16_t vcaddq_rot270_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s16)))
+int16x8_t vcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s16)))
+int16x8_t vcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s32)))
+int32x4_t vcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s32)))
+int32x4_t vcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s8)))
+int8x16_t vcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s8)))
+int8x16_t vcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u16)))
+uint16x8_t vcaddq_rot90_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u16)))
+uint16x8_t vcaddq_rot90_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u32)))
+uint32x4_t vcaddq_rot90_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u32)))
+uint32x4_t vcaddq_rot90_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u8)))
+uint8x16_t vcaddq_rot90_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u8)))
+uint8x16_t vcaddq_rot90_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s16)))
+int16x8_t vcaddq_rot90_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s16)))
+int16x8_t vcaddq_rot90(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s32)))
+int32x4_t vcaddq_rot90_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s32)))
+int32x4_t vcaddq_rot90(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s8)))
+int8x16_t vcaddq_rot90_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s8)))
+int8x16_t vcaddq_rot90(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u16)))
+uint16x8_t vcaddq_rot90_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u16)))
+uint16x8_t vcaddq_rot90(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u32)))
+uint32x4_t vcaddq_rot90_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u32)))
+uint32x4_t vcaddq_rot90(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u8)))
+uint8x16_t vcaddq_rot90_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u8)))
+uint8x16_t vcaddq_rot90(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s16)))
+int16x8_t vcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s16)))
+int16x8_t vcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s32)))
+int32x4_t vcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s32)))
+int32x4_t vcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s8)))
+int8x16_t vcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s8)))
+int8x16_t vcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u16)))
+uint16x8_t vcaddq_rot90_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u16)))
+uint16x8_t vcaddq_rot90_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u32)))
+uint32x4_t vcaddq_rot90_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u32)))
+uint32x4_t vcaddq_rot90_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u8)))
+uint8x16_t vcaddq_rot90_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u8)))
+uint8x16_t vcaddq_rot90_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s16)))
+int16x8_t vclsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s16)))
+int16x8_t vclsq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s32)))
+int32x4_t vclsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s32)))
+int32x4_t vclsq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s8)))
+int8x16_t vclsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s8)))
+int8x16_t vclsq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s16)))
+int16x8_t vclsq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s16)))
+int16x8_t vclsq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s32)))
+int32x4_t vclsq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s32)))
+int32x4_t vclsq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s8)))
+int8x16_t vclsq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s8)))
+int8x16_t vclsq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s16)))
+int16x8_t vclsq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s16)))
+int16x8_t vclsq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s32)))
+int32x4_t vclsq_x_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s32)))
+int32x4_t vclsq_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s8)))
+int8x16_t vclsq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s8)))
+int8x16_t vclsq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s16)))
+int16x8_t vclzq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s16)))
+int16x8_t vclzq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s32)))
+int32x4_t vclzq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s32)))
+int32x4_t vclzq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s8)))
+int8x16_t vclzq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s8)))
+int8x16_t vclzq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u16)))
+uint16x8_t vclzq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u16)))
+uint16x8_t vclzq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u32)))
+uint32x4_t vclzq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u32)))
+uint32x4_t vclzq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u8)))
+uint8x16_t vclzq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u8)))
+uint8x16_t vclzq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s16)))
+int16x8_t vclzq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s16)))
+int16x8_t vclzq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s32)))
+int32x4_t vclzq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s32)))
+int32x4_t vclzq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s8)))
+int8x16_t vclzq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s8)))
+int8x16_t vclzq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u16)))
+uint16x8_t vclzq_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u16)))
+uint16x8_t vclzq(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u32)))
+uint32x4_t vclzq_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u32)))
+uint32x4_t vclzq(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u8)))
+uint8x16_t vclzq_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u8)))
+uint8x16_t vclzq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s16)))
+int16x8_t vclzq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s16)))
+int16x8_t vclzq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s32)))
+int32x4_t vclzq_x_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s32)))
+int32x4_t vclzq_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s8)))
+int8x16_t vclzq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s8)))
+int8x16_t vclzq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u16)))
+uint16x8_t vclzq_x_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u16)))
+uint16x8_t vclzq_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u32)))
+uint32x4_t vclzq_x_u32(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u32)))
+uint32x4_t vclzq_x(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u8)))
+uint8x16_t vclzq_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u8)))
+uint8x16_t vclzq_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))
+mve_pred16_t vcmpcsq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))
+mve_pred16_t vcmpcsq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))
+mve_pred16_t vcmpcsq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))
+mve_pred16_t vcmpcsq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))
+mve_pred16_t vcmpcsq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))
+mve_pred16_t vcmpcsq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u16)))
+mve_pred16_t vcmpcsq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u16)))
+mve_pred16_t vcmpcsq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u32)))
+mve_pred16_t vcmpcsq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u32)))
+mve_pred16_t vcmpcsq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u8)))
+mve_pred16_t vcmpcsq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u8)))
+mve_pred16_t vcmpcsq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u16)))
+mve_pred16_t vcmpcsq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u16)))
+mve_pred16_t vcmpcsq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u32)))
+mve_pred16_t vcmpcsq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u32)))
+mve_pred16_t vcmpcsq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u8)))
+mve_pred16_t vcmpcsq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u8)))
+mve_pred16_t vcmpcsq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u16)))
+mve_pred16_t vcmpcsq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u16)))
+mve_pred16_t vcmpcsq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u32)))
+mve_pred16_t vcmpcsq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u32)))
+mve_pred16_t vcmpcsq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u8)))
+mve_pred16_t vcmpcsq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u8)))
+mve_pred16_t vcmpcsq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))
+mve_pred16_t vcmpeqq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))
+mve_pred16_t vcmpeqq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))
+mve_pred16_t vcmpeqq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))
+mve_pred16_t vcmpeqq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))
+mve_pred16_t vcmpeqq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))
+mve_pred16_t vcmpeqq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))
+mve_pred16_t vcmpeqq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))
+mve_pred16_t vcmpeqq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))
+mve_pred16_t vcmpeqq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))
+mve_pred16_t vcmpeqq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))
+mve_pred16_t vcmpeqq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))
+mve_pred16_t vcmpeqq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s16)))
+mve_pred16_t vcmpeqq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s16)))
+mve_pred16_t vcmpeqq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s32)))
+mve_pred16_t vcmpeqq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s32)))
+mve_pred16_t vcmpeqq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s8)))
+mve_pred16_t vcmpeqq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s8)))
+mve_pred16_t vcmpeqq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u16)))
+mve_pred16_t vcmpeqq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u16)))
+mve_pred16_t vcmpeqq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u32)))
+mve_pred16_t vcmpeqq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u32)))
+mve_pred16_t vcmpeqq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u8)))
+mve_pred16_t vcmpeqq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u8)))
+mve_pred16_t vcmpeqq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s16)))
+mve_pred16_t vcmpeqq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s16)))
+mve_pred16_t vcmpeqq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s32)))
+mve_pred16_t vcmpeqq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s32)))
+mve_pred16_t vcmpeqq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s8)))
+mve_pred16_t vcmpeqq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s8)))
+mve_pred16_t vcmpeqq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u16)))
+mve_pred16_t vcmpeqq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u16)))
+mve_pred16_t vcmpeqq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u32)))
+mve_pred16_t vcmpeqq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u32)))
+mve_pred16_t vcmpeqq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u8)))
+mve_pred16_t vcmpeqq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u8)))
+mve_pred16_t vcmpeqq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s16)))
+mve_pred16_t vcmpeqq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s16)))
+mve_pred16_t vcmpeqq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s32)))
+mve_pred16_t vcmpeqq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s32)))
+mve_pred16_t vcmpeqq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s8)))
+mve_pred16_t vcmpeqq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s8)))
+mve_pred16_t vcmpeqq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u16)))
+mve_pred16_t vcmpeqq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u16)))
+mve_pred16_t vcmpeqq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u32)))
+mve_pred16_t vcmpeqq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u32)))
+mve_pred16_t vcmpeqq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u8)))
+mve_pred16_t vcmpeqq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u8)))
+mve_pred16_t vcmpeqq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))
+mve_pred16_t vcmpgeq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))
+mve_pred16_t vcmpgeq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))
+mve_pred16_t vcmpgeq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))
+mve_pred16_t vcmpgeq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))
+mve_pred16_t vcmpgeq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))
+mve_pred16_t vcmpgeq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s16)))
+mve_pred16_t vcmpgeq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s16)))
+mve_pred16_t vcmpgeq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s32)))
+mve_pred16_t vcmpgeq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s32)))
+mve_pred16_t vcmpgeq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s8)))
+mve_pred16_t vcmpgeq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s8)))
+mve_pred16_t vcmpgeq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s16)))
+mve_pred16_t vcmpgeq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s16)))
+mve_pred16_t vcmpgeq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s32)))
+mve_pred16_t vcmpgeq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s32)))
+mve_pred16_t vcmpgeq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s8)))
+mve_pred16_t vcmpgeq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s8)))
+mve_pred16_t vcmpgeq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s16)))
+mve_pred16_t vcmpgeq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s16)))
+mve_pred16_t vcmpgeq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s32)))
+mve_pred16_t vcmpgeq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s32)))
+mve_pred16_t vcmpgeq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s8)))
+mve_pred16_t vcmpgeq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s8)))
+mve_pred16_t vcmpgeq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))
+mve_pred16_t vcmpgtq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))
+mve_pred16_t vcmpgtq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))
+mve_pred16_t vcmpgtq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))
+mve_pred16_t vcmpgtq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))
+mve_pred16_t vcmpgtq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))
+mve_pred16_t vcmpgtq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s16)))
+mve_pred16_t vcmpgtq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s16)))
+mve_pred16_t vcmpgtq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s32)))
+mve_pred16_t vcmpgtq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s32)))
+mve_pred16_t vcmpgtq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s8)))
+mve_pred16_t vcmpgtq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s8)))
+mve_pred16_t vcmpgtq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s16)))
+mve_pred16_t vcmpgtq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s16)))
+mve_pred16_t vcmpgtq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s32)))
+mve_pred16_t vcmpgtq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s32)))
+mve_pred16_t vcmpgtq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s8)))
+mve_pred16_t vcmpgtq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s8)))
+mve_pred16_t vcmpgtq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s16)))
+mve_pred16_t vcmpgtq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s16)))
+mve_pred16_t vcmpgtq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s32)))
+mve_pred16_t vcmpgtq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s32)))
+mve_pred16_t vcmpgtq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s8)))
+mve_pred16_t vcmpgtq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s8)))
+mve_pred16_t vcmpgtq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))
+mve_pred16_t vcmphiq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))
+mve_pred16_t vcmphiq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))
+mve_pred16_t vcmphiq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))
+mve_pred16_t vcmphiq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))
+mve_pred16_t vcmphiq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))
+mve_pred16_t vcmphiq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u16)))
+mve_pred16_t vcmphiq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u16)))
+mve_pred16_t vcmphiq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u32)))
+mve_pred16_t vcmphiq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u32)))
+mve_pred16_t vcmphiq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u8)))
+mve_pred16_t vcmphiq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u8)))
+mve_pred16_t vcmphiq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u16)))
+mve_pred16_t vcmphiq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u16)))
+mve_pred16_t vcmphiq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u32)))
+mve_pred16_t vcmphiq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u32)))
+mve_pred16_t vcmphiq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u8)))
+mve_pred16_t vcmphiq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u8)))
+mve_pred16_t vcmphiq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u16)))
+mve_pred16_t vcmphiq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u16)))
+mve_pred16_t vcmphiq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u32)))
+mve_pred16_t vcmphiq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u32)))
+mve_pred16_t vcmphiq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u8)))
+mve_pred16_t vcmphiq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u8)))
+mve_pred16_t vcmphiq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))
+mve_pred16_t vcmpleq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))
+mve_pred16_t vcmpleq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))
+mve_pred16_t vcmpleq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))
+mve_pred16_t vcmpleq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))
+mve_pred16_t vcmpleq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))
+mve_pred16_t vcmpleq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s16)))
+mve_pred16_t vcmpleq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s16)))
+mve_pred16_t vcmpleq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s32)))
+mve_pred16_t vcmpleq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s32)))
+mve_pred16_t vcmpleq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s8)))
+mve_pred16_t vcmpleq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s8)))
+mve_pred16_t vcmpleq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s16)))
+mve_pred16_t vcmpleq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s16)))
+mve_pred16_t vcmpleq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s32)))
+mve_pred16_t vcmpleq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s32)))
+mve_pred16_t vcmpleq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s8)))
+mve_pred16_t vcmpleq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s8)))
+mve_pred16_t vcmpleq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s16)))
+mve_pred16_t vcmpleq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s16)))
+mve_pred16_t vcmpleq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s32)))
+mve_pred16_t vcmpleq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s32)))
+mve_pred16_t vcmpleq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s8)))
+mve_pred16_t vcmpleq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s8)))
+mve_pred16_t vcmpleq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))
+mve_pred16_t vcmpltq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))
+mve_pred16_t vcmpltq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))
+mve_pred16_t vcmpltq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))
+mve_pred16_t vcmpltq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))
+mve_pred16_t vcmpltq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))
+mve_pred16_t vcmpltq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s16)))
+mve_pred16_t vcmpltq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s16)))
+mve_pred16_t vcmpltq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s32)))
+mve_pred16_t vcmpltq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s32)))
+mve_pred16_t vcmpltq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s8)))
+mve_pred16_t vcmpltq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s8)))
+mve_pred16_t vcmpltq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s16)))
+mve_pred16_t vcmpltq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s16)))
+mve_pred16_t vcmpltq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s32)))
+mve_pred16_t vcmpltq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s32)))
+mve_pred16_t vcmpltq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s8)))
+mve_pred16_t vcmpltq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s8)))
+mve_pred16_t vcmpltq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s16)))
+mve_pred16_t vcmpltq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s16)))
+mve_pred16_t vcmpltq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s32)))
+mve_pred16_t vcmpltq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s32)))
+mve_pred16_t vcmpltq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s8)))
+mve_pred16_t vcmpltq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s8)))
+mve_pred16_t vcmpltq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))
+mve_pred16_t vcmpneq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))
+mve_pred16_t vcmpneq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))
+mve_pred16_t vcmpneq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))
+mve_pred16_t vcmpneq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))
+mve_pred16_t vcmpneq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))
+mve_pred16_t vcmpneq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))
+mve_pred16_t vcmpneq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))
+mve_pred16_t vcmpneq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))
+mve_pred16_t vcmpneq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))
+mve_pred16_t vcmpneq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))
+mve_pred16_t vcmpneq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))
+mve_pred16_t vcmpneq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s16)))
+mve_pred16_t vcmpneq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s16)))
+mve_pred16_t vcmpneq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s32)))
+mve_pred16_t vcmpneq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s32)))
+mve_pred16_t vcmpneq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s8)))
+mve_pred16_t vcmpneq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s8)))
+mve_pred16_t vcmpneq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u16)))
+mve_pred16_t vcmpneq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u16)))
+mve_pred16_t vcmpneq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u32)))
+mve_pred16_t vcmpneq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u32)))
+mve_pred16_t vcmpneq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u8)))
+mve_pred16_t vcmpneq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u8)))
+mve_pred16_t vcmpneq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s16)))
+mve_pred16_t vcmpneq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s16)))
+mve_pred16_t vcmpneq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s32)))
+mve_pred16_t vcmpneq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s32)))
+mve_pred16_t vcmpneq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s8)))
+mve_pred16_t vcmpneq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s8)))
+mve_pred16_t vcmpneq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u16)))
+mve_pred16_t vcmpneq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u16)))
+mve_pred16_t vcmpneq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u32)))
+mve_pred16_t vcmpneq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u32)))
+mve_pred16_t vcmpneq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u8)))
+mve_pred16_t vcmpneq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u8)))
+mve_pred16_t vcmpneq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s16)))
+mve_pred16_t vcmpneq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s16)))
+mve_pred16_t vcmpneq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s32)))
+mve_pred16_t vcmpneq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s32)))
+mve_pred16_t vcmpneq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s8)))
+mve_pred16_t vcmpneq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s8)))
+mve_pred16_t vcmpneq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u16)))
+mve_pred16_t vcmpneq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u16)))
+mve_pred16_t vcmpneq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u32)))
+mve_pred16_t vcmpneq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u32)))
+mve_pred16_t vcmpneq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u8)))
+mve_pred16_t vcmpneq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u8)))
+mve_pred16_t vcmpneq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s16)))
+int16x8_t vcreateq_s16(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s32)))
+int32x4_t vcreateq_s32(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s64)))
+int64x2_t vcreateq_s64(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s8)))
+int8x16_t vcreateq_s8(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u16)))
+uint16x8_t vcreateq_u16(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u32)))
+uint32x4_t vcreateq_u32(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u64)))
+uint64x2_t vcreateq_u64(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u8)))
+uint8x16_t vcreateq_u8(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp16q)))
+mve_pred16_t vctp16q(uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp16q_m)))
+mve_pred16_t vctp16q_m(uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp32q)))
+mve_pred16_t vctp32q(uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp32q_m)))
+mve_pred16_t vctp32q_m(uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp64q)))
+mve_pred16_t vctp64q(uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp64q_m)))
+mve_pred16_t vctp64q_m(uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp8q)))
+mve_pred16_t vctp8q(uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp8q_m)))
+mve_pred16_t vctp8q_m(uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u16)))
+uint16x8_t vddupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u16)))
+uint16x8_t vddupq_m(uint16x8_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u32)))
+uint32x4_t vddupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u32)))
+uint32x4_t vddupq_m(uint32x4_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u8)))
+uint8x16_t vddupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u8)))
+uint8x16_t vddupq_m(uint8x16_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u16)))
+uint16x8_t vddupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u16)))
+uint16x8_t vddupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u32)))
+uint32x4_t vddupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u32)))
+uint32x4_t vddupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u8)))
+uint8x16_t vddupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u8)))
+uint8x16_t vddupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u16)))
+uint16x8_t vddupq_n_u16(uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u16)))
+uint16x8_t vddupq_u16(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u32)))
+uint32x4_t vddupq_n_u32(uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u32)))
+uint32x4_t vddupq_u32(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u8)))
+uint8x16_t vddupq_n_u8(uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u8)))
+uint8x16_t vddupq_u8(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u16)))
+uint16x8_t vddupq_wb_u16(uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u16)))
+uint16x8_t vddupq_u16(uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u32)))
+uint32x4_t vddupq_wb_u32(uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u32)))
+uint32x4_t vddupq_u32(uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u8)))
+uint8x16_t vddupq_wb_u8(uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u8)))
+uint8x16_t vddupq_u8(uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u16)))
+uint16x8_t vddupq_x_n_u16(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u16)))
+uint16x8_t vddupq_x_u16(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u32)))
+uint32x4_t vddupq_x_n_u32(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u32)))
+uint32x4_t vddupq_x_u32(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u8)))
+uint8x16_t vddupq_x_n_u8(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u8)))
+uint8x16_t vddupq_x_u8(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u16)))
+uint16x8_t vddupq_x_wb_u16(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u16)))
+uint16x8_t vddupq_x_u16(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u32)))
+uint32x4_t vddupq_x_wb_u32(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u32)))
+uint32x4_t vddupq_x_u32(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u8)))
+uint8x16_t vddupq_x_wb_u8(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u8)))
+uint8x16_t vddupq_x_u8(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s16)))
+int16x8_t vdupq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s16)))
+int16x8_t vdupq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s32)))
+int32x4_t vdupq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s32)))
+int32x4_t vdupq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s8)))
+int8x16_t vdupq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s8)))
+int8x16_t vdupq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u16)))
+uint16x8_t vdupq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u16)))
+uint16x8_t vdupq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u32)))
+uint32x4_t vdupq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u32)))
+uint32x4_t vdupq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u8)))
+uint8x16_t vdupq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u8)))
+uint8x16_t vdupq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s16)))
+int16x8_t vdupq_n_s16(int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s32)))
+int32x4_t vdupq_n_s32(int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s8)))
+int8x16_t vdupq_n_s8(int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u16)))
+uint16x8_t vdupq_n_u16(uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u32)))
+uint32x4_t vdupq_n_u32(uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u8)))
+uint8x16_t vdupq_n_u8(uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s16)))
+int16x8_t vdupq_x_n_s16(int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s32)))
+int32x4_t vdupq_x_n_s32(int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s8)))
+int8x16_t vdupq_x_n_s8(int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u16)))
+uint16x8_t vdupq_x_n_u16(uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u32)))
+uint32x4_t vdupq_x_n_u32(uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u8)))
+uint8x16_t vdupq_x_n_u8(uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u16)))
+uint16x8_t vdwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u16)))
+uint16x8_t vdwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u32)))
+uint32x4_t vdwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u32)))
+uint32x4_t vdwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u8)))
+uint8x16_t vdwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u8)))
+uint8x16_t vdwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u16)))
+uint16x8_t vdwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u16)))
+uint16x8_t vdwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u32)))
+uint32x4_t vdwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u32)))
+uint32x4_t vdwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u8)))
+uint8x16_t vdwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u8)))
+uint8x16_t vdwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u16)))
+uint16x8_t vdwdupq_n_u16(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u16)))
+uint16x8_t vdwdupq_u16(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u32)))
+uint32x4_t vdwdupq_n_u32(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u32)))
+uint32x4_t vdwdupq_u32(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u8)))
+uint8x16_t vdwdupq_n_u8(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u8)))
+uint8x16_t vdwdupq_u8(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u16)))
+uint16x8_t vdwdupq_wb_u16(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u16)))
+uint16x8_t vdwdupq_u16(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u32)))
+uint32x4_t vdwdupq_wb_u32(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u32)))
+uint32x4_t vdwdupq_u32(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u8)))
+uint8x16_t vdwdupq_wb_u8(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u8)))
+uint8x16_t vdwdupq_u8(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u16)))
+uint16x8_t vdwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u16)))
+uint16x8_t vdwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u32)))
+uint32x4_t vdwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u32)))
+uint32x4_t vdwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u8)))
+uint8x16_t vdwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u8)))
+uint8x16_t vdwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u16)))
+uint16x8_t vdwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u16)))
+uint16x8_t vdwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u32)))
+uint32x4_t vdwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u32)))
+uint32x4_t vdwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u8)))
+uint8x16_t vdwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u8)))
+uint8x16_t vdwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s16)))
+int16x8_t veorq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s16)))
+int16x8_t veorq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s32)))
+int32x4_t veorq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s32)))
+int32x4_t veorq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s8)))
+int8x16_t veorq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s8)))
+int8x16_t veorq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u16)))
+uint16x8_t veorq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u16)))
+uint16x8_t veorq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u32)))
+uint32x4_t veorq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u32)))
+uint32x4_t veorq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u8)))
+uint8x16_t veorq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u8)))
+uint8x16_t veorq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s16)))
+int16x8_t veorq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s16)))
+int16x8_t veorq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s32)))
+int32x4_t veorq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s32)))
+int32x4_t veorq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s8)))
+int8x16_t veorq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s8)))
+int8x16_t veorq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u16)))
+uint16x8_t veorq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u16)))
+uint16x8_t veorq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u32)))
+uint32x4_t veorq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u32)))
+uint32x4_t veorq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u8)))
+uint8x16_t veorq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u8)))
+uint8x16_t veorq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s16)))
+int16x8_t veorq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s16)))
+int16x8_t veorq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s32)))
+int32x4_t veorq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s32)))
+int32x4_t veorq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s8)))
+int8x16_t veorq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s8)))
+int8x16_t veorq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u16)))
+uint16x8_t veorq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u16)))
+uint16x8_t veorq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u32)))
+uint32x4_t veorq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u32)))
+uint32x4_t veorq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u8)))
+uint8x16_t veorq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u8)))
+uint8x16_t veorq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s16)))
+int16_t vgetq_lane_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s16)))
+int16_t vgetq_lane(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s32)))
+int32_t vgetq_lane_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s32)))
+int32_t vgetq_lane(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s64)))
+int64_t vgetq_lane_s64(int64x2_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s64)))
+int64_t vgetq_lane(int64x2_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s8)))
+int8_t vgetq_lane_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s8)))
+int8_t vgetq_lane(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u16)))
+uint16_t vgetq_lane_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u16)))
+uint16_t vgetq_lane(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u32)))
+uint32_t vgetq_lane_u32(uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u32)))
+uint32_t vgetq_lane(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u64)))
+uint64_t vgetq_lane_u64(uint64x2_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u64)))
+uint64_t vgetq_lane(uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u8)))
+uint8_t vgetq_lane_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u8)))
+uint8_t vgetq_lane(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s16)))
+int16x8_t vhaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s16)))
+int16x8_t vhaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s32)))
+int32x4_t vhaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s32)))
+int32x4_t vhaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s8)))
+int8x16_t vhaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s8)))
+int8x16_t vhaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u16)))
+uint16x8_t vhaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u16)))
+uint16x8_t vhaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u32)))
+uint32x4_t vhaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u32)))
+uint32x4_t vhaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u8)))
+uint8x16_t vhaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u8)))
+uint8x16_t vhaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s16)))
+int16x8_t vhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s16)))
+int16x8_t vhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s32)))
+int32x4_t vhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s32)))
+int32x4_t vhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s8)))
+int8x16_t vhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s8)))
+int8x16_t vhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u16)))
+uint16x8_t vhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u16)))
+uint16x8_t vhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u32)))
+uint32x4_t vhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u32)))
+uint32x4_t vhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u8)))
+uint8x16_t vhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u8)))
+uint8x16_t vhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s16)))
+int16x8_t vhaddq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s16)))
+int16x8_t vhaddq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s32)))
+int32x4_t vhaddq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s32)))
+int32x4_t vhaddq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s8)))
+int8x16_t vhaddq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s8)))
+int8x16_t vhaddq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u16)))
+uint16x8_t vhaddq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u16)))
+uint16x8_t vhaddq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u32)))
+uint32x4_t vhaddq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u32)))
+uint32x4_t vhaddq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u8)))
+uint8x16_t vhaddq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u8)))
+uint8x16_t vhaddq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s16)))
+int16x8_t vhaddq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s16)))
+int16x8_t vhaddq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s32)))
+int32x4_t vhaddq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s32)))
+int32x4_t vhaddq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s8)))
+int8x16_t vhaddq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s8)))
+int8x16_t vhaddq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u16)))
+uint16x8_t vhaddq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u16)))
+uint16x8_t vhaddq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u32)))
+uint32x4_t vhaddq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u32)))
+uint32x4_t vhaddq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u8)))
+uint8x16_t vhaddq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u8)))
+uint8x16_t vhaddq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s16)))
+int16x8_t vhaddq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s16)))
+int16x8_t vhaddq_x(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s32)))
+int32x4_t vhaddq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s32)))
+int32x4_t vhaddq_x(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s8)))
+int8x16_t vhaddq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s8)))
+int8x16_t vhaddq_x(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u16)))
+uint16x8_t vhaddq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u16)))
+uint16x8_t vhaddq_x(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u32)))
+uint32x4_t vhaddq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u32)))
+uint32x4_t vhaddq_x(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u8)))
+uint8x16_t vhaddq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u8)))
+uint8x16_t vhaddq_x(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s16)))
+int16x8_t vhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s16)))
+int16x8_t vhaddq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s32)))
+int32x4_t vhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s32)))
+int32x4_t vhaddq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s8)))
+int8x16_t vhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s8)))
+int8x16_t vhaddq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u16)))
+uint16x8_t vhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u16)))
+uint16x8_t vhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u32)))
+uint32x4_t vhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u32)))
+uint32x4_t vhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u8)))
+uint8x16_t vhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u8)))
+uint8x16_t vhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16)))
+int16x8_t vhcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16)))
+int16x8_t vhcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32)))
+int32x4_t vhcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32)))
+int32x4_t vhcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8)))
+int8x16_t vhcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8)))
+int8x16_t vhcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s16)))
+int16x8_t vhcaddq_rot270_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s16)))
+int16x8_t vhcaddq_rot270(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s32)))
+int32x4_t vhcaddq_rot270_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s32)))
+int32x4_t vhcaddq_rot270(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s8)))
+int8x16_t vhcaddq_rot270_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s8)))
+int8x16_t vhcaddq_rot270(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16)))
+int16x8_t vhcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16)))
+int16x8_t vhcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32)))
+int32x4_t vhcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32)))
+int32x4_t vhcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8)))
+int8x16_t vhcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8)))
+int8x16_t vhcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16)))
+int16x8_t vhcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16)))
+int16x8_t vhcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32)))
+int32x4_t vhcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32)))
+int32x4_t vhcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8)))
+int8x16_t vhcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8)))
+int8x16_t vhcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s16)))
+int16x8_t vhcaddq_rot90_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s16)))
+int16x8_t vhcaddq_rot90(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s32)))
+int32x4_t vhcaddq_rot90_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s32)))
+int32x4_t vhcaddq_rot90(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s8)))
+int8x16_t vhcaddq_rot90_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s8)))
+int8x16_t vhcaddq_rot90(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16)))
+int16x8_t vhcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16)))
+int16x8_t vhcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32)))
+int32x4_t vhcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32)))
+int32x4_t vhcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8)))
+int8x16_t vhcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8)))
+int8x16_t vhcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s16)))
+int16x8_t vhsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s16)))
+int16x8_t vhsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s32)))
+int32x4_t vhsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s32)))
+int32x4_t vhsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s8)))
+int8x16_t vhsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s8)))
+int8x16_t vhsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u16)))
+uint16x8_t vhsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u16)))
+uint16x8_t vhsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u32)))
+uint32x4_t vhsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u32)))
+uint32x4_t vhsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u8)))
+uint8x16_t vhsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u8)))
+uint8x16_t vhsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s16)))
+int16x8_t vhsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s16)))
+int16x8_t vhsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s32)))
+int32x4_t vhsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s32)))
+int32x4_t vhsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s8)))
+int8x16_t vhsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s8)))
+int8x16_t vhsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u16)))
+uint16x8_t vhsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u16)))
+uint16x8_t vhsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u32)))
+uint32x4_t vhsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u32)))
+uint32x4_t vhsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u8)))
+uint8x16_t vhsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u8)))
+uint8x16_t vhsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s16)))
+int16x8_t vhsubq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s16)))
+int16x8_t vhsubq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s32)))
+int32x4_t vhsubq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s32)))
+int32x4_t vhsubq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s8)))
+int8x16_t vhsubq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s8)))
+int8x16_t vhsubq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u16)))
+uint16x8_t vhsubq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u16)))
+uint16x8_t vhsubq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u32)))
+uint32x4_t vhsubq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u32)))
+uint32x4_t vhsubq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u8)))
+uint8x16_t vhsubq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u8)))
+uint8x16_t vhsubq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s16)))
+int16x8_t vhsubq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s16)))
+int16x8_t vhsubq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s32)))
+int32x4_t vhsubq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s32)))
+int32x4_t vhsubq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s8)))
+int8x16_t vhsubq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s8)))
+int8x16_t vhsubq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u16)))
+uint16x8_t vhsubq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u16)))
+uint16x8_t vhsubq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u32)))
+uint32x4_t vhsubq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u32)))
+uint32x4_t vhsubq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u8)))
+uint8x16_t vhsubq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u8)))
+uint8x16_t vhsubq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s16)))
+int16x8_t vhsubq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s16)))
+int16x8_t vhsubq_x(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s32)))
+int32x4_t vhsubq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s32)))
+int32x4_t vhsubq_x(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s8)))
+int8x16_t vhsubq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s8)))
+int8x16_t vhsubq_x(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u16)))
+uint16x8_t vhsubq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u16)))
+uint16x8_t vhsubq_x(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u32)))
+uint32x4_t vhsubq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u32)))
+uint32x4_t vhsubq_x(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u8)))
+uint8x16_t vhsubq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u8)))
+uint8x16_t vhsubq_x(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s16)))
+int16x8_t vhsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s16)))
+int16x8_t vhsubq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s32)))
+int32x4_t vhsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s32)))
+int32x4_t vhsubq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s8)))
+int8x16_t vhsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s8)))
+int8x16_t vhsubq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u16)))
+uint16x8_t vhsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u16)))
+uint16x8_t vhsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u32)))
+uint32x4_t vhsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u32)))
+uint32x4_t vhsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u8)))
+uint8x16_t vhsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u8)))
+uint8x16_t vhsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u16)))
+uint16x8_t vidupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u16)))
+uint16x8_t vidupq_m(uint16x8_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u32)))
+uint32x4_t vidupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u32)))
+uint32x4_t vidupq_m(uint32x4_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u8)))
+uint8x16_t vidupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u8)))
+uint8x16_t vidupq_m(uint8x16_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u16)))
+uint16x8_t vidupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u16)))
+uint16x8_t vidupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u32)))
+uint32x4_t vidupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u32)))
+uint32x4_t vidupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u8)))
+uint8x16_t vidupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u8)))
+uint8x16_t vidupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u16)))
+uint16x8_t vidupq_n_u16(uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u16)))
+uint16x8_t vidupq_u16(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u32)))
+uint32x4_t vidupq_n_u32(uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u32)))
+uint32x4_t vidupq_u32(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u8)))
+uint8x16_t vidupq_n_u8(uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u8)))
+uint8x16_t vidupq_u8(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u16)))
+uint16x8_t vidupq_wb_u16(uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u16)))
+uint16x8_t vidupq_u16(uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u32)))
+uint32x4_t vidupq_wb_u32(uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u32)))
+uint32x4_t vidupq_u32(uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u8)))
+uint8x16_t vidupq_wb_u8(uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u8)))
+uint8x16_t vidupq_u8(uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u16)))
+uint16x8_t vidupq_x_n_u16(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u16)))
+uint16x8_t vidupq_x_u16(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u32)))
+uint32x4_t vidupq_x_n_u32(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u32)))
+uint32x4_t vidupq_x_u32(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u8)))
+uint8x16_t vidupq_x_n_u8(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u8)))
+uint8x16_t vidupq_x_u8(uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u16)))
+uint16x8_t vidupq_x_wb_u16(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u16)))
+uint16x8_t vidupq_x_u16(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u32)))
+uint32x4_t vidupq_x_wb_u32(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u32)))
+uint32x4_t vidupq_x_u32(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u8)))
+uint8x16_t vidupq_x_wb_u8(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u8)))
+uint8x16_t vidupq_x_u8(uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u16)))
+uint16x8_t viwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u16)))
+uint16x8_t viwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u32)))
+uint32x4_t viwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u32)))
+uint32x4_t viwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u8)))
+uint8x16_t viwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u8)))
+uint8x16_t viwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u16)))
+uint16x8_t viwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u16)))
+uint16x8_t viwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u32)))
+uint32x4_t viwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u32)))
+uint32x4_t viwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u8)))
+uint8x16_t viwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u8)))
+uint8x16_t viwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u16)))
+uint16x8_t viwdupq_n_u16(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u16)))
+uint16x8_t viwdupq_u16(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u32)))
+uint32x4_t viwdupq_n_u32(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u32)))
+uint32x4_t viwdupq_u32(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u8)))
+uint8x16_t viwdupq_n_u8(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u8)))
+uint8x16_t viwdupq_u8(uint32_t, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u16)))
+uint16x8_t viwdupq_wb_u16(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u16)))
+uint16x8_t viwdupq_u16(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u32)))
+uint32x4_t viwdupq_wb_u32(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u32)))
+uint32x4_t viwdupq_u32(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u8)))
+uint8x16_t viwdupq_wb_u8(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u8)))
+uint8x16_t viwdupq_u8(uint32_t *, uint32_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u16)))
+uint16x8_t viwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u16)))
+uint16x8_t viwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u32)))
+uint32x4_t viwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u32)))
+uint32x4_t viwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u8)))
+uint8x16_t viwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u8)))
+uint8x16_t viwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u16)))
+uint16x8_t viwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u16)))
+uint16x8_t viwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u32)))
+uint32x4_t viwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u32)))
+uint32x4_t viwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u8)))
+uint8x16_t viwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u8)))
+uint8x16_t viwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s16)))
+int16x8_t vld1q_s16(const int16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s16)))
+int16x8_t vld1q(const int16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s32)))
+int32x4_t vld1q_s32(const int32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s32)))
+int32x4_t vld1q(const int32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s8)))
+int8x16_t vld1q_s8(const int8_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s8)))
+int8x16_t vld1q(const int8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u16)))
+uint16x8_t vld1q_u16(const uint16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u16)))
+uint16x8_t vld1q(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u32)))
+uint32x4_t vld1q_u32(const uint32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u32)))
+uint32x4_t vld1q(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u8)))
+uint8x16_t vld1q_u8(const uint8_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u8)))
+uint8x16_t vld1q(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s16)))
+int16x8_t vld1q_z_s16(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s16)))
+int16x8_t vld1q_z(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s32)))
+int32x4_t vld1q_z_s32(const int32_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s32)))
+int32x4_t vld1q_z(const int32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s8)))
+int8x16_t vld1q_z_s8(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s8)))
+int8x16_t vld1q_z(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u16)))
+uint16x8_t vld1q_z_u16(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u16)))
+uint16x8_t vld1q_z(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u32)))
+uint32x4_t vld1q_z_u32(const uint32_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u32)))
+uint32x4_t vld1q_z(const uint32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u8)))
+uint8x16_t vld1q_z_u8(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u8)))
+uint8x16_t vld1q_z(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s16)))
+int16x8x2_t vld2q_s16(const int16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s16)))
+int16x8x2_t vld2q(const int16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s32)))
+int32x4x2_t vld2q_s32(const int32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s32)))
+int32x4x2_t vld2q(const int32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s8)))
+int8x16x2_t vld2q_s8(const int8_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s8)))
+int8x16x2_t vld2q(const int8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u16)))
+uint16x8x2_t vld2q_u16(const uint16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u16)))
+uint16x8x2_t vld2q(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u32)))
+uint32x4x2_t vld2q_u32(const uint32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u32)))
+uint32x4x2_t vld2q(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u8)))
+uint8x16x2_t vld2q_u8(const uint8_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u8)))
+uint8x16x2_t vld2q(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s16)))
+int16x8x4_t vld4q_s16(const int16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s16)))
+int16x8x4_t vld4q(const int16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s32)))
+int32x4x4_t vld4q_s32(const int32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s32)))
+int32x4x4_t vld4q(const int32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s8)))
+int8x16x4_t vld4q_s8(const int8_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s8)))
+int8x16x4_t vld4q(const int8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u16)))
+uint16x8x4_t vld4q_u16(const uint16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u16)))
+uint16x8x4_t vld4q(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u32)))
+uint32x4x4_t vld4q_u32(const uint32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u32)))
+uint32x4x4_t vld4q(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u8)))
+uint8x16x4_t vld4q_u8(const uint8_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u8)))
+uint8x16x4_t vld4q(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))
+int16x8_t vldrbq_gather_offset_s16(const int8_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))
+int16x8_t vldrbq_gather_offset(const int8_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))
+int32x4_t vldrbq_gather_offset_s32(const int8_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))
+int32x4_t vldrbq_gather_offset(const int8_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))
+int8x16_t vldrbq_gather_offset_s8(const int8_t *, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))
+int8x16_t vldrbq_gather_offset(const int8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))
+uint16x8_t vldrbq_gather_offset_u16(const uint8_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))
+uint16x8_t vldrbq_gather_offset(const uint8_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))
+uint32x4_t vldrbq_gather_offset_u32(const uint8_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))
+uint32x4_t vldrbq_gather_offset(const uint8_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))
+uint8x16_t vldrbq_gather_offset_u8(const uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))
+uint8x16_t vldrbq_gather_offset(const uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))
+int16x8_t vldrbq_gather_offset_z_s16(const int8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))
+int16x8_t vldrbq_gather_offset_z(const int8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))
+int32x4_t vldrbq_gather_offset_z_s32(const int8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))
+int32x4_t vldrbq_gather_offset_z(const int8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))
+int8x16_t vldrbq_gather_offset_z_s8(const int8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))
+int8x16_t vldrbq_gather_offset_z(const int8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))
+uint16x8_t vldrbq_gather_offset_z_u16(const uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))
+uint16x8_t vldrbq_gather_offset_z(const uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))
+uint32x4_t vldrbq_gather_offset_z_u32(const uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))
+uint32x4_t vldrbq_gather_offset_z(const uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))
+uint8x16_t vldrbq_gather_offset_z_u8(const uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))
+uint8x16_t vldrbq_gather_offset_z(const uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s16)))
+int16x8_t vldrbq_s16(const int8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s32)))
+int32x4_t vldrbq_s32(const int8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s8)))
+int8x16_t vldrbq_s8(const int8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u16)))
+uint16x8_t vldrbq_u16(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u32)))
+uint32x4_t vldrbq_u32(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u8)))
+uint8x16_t vldrbq_u8(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s16)))
+int16x8_t vldrbq_z_s16(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s32)))
+int32x4_t vldrbq_z_s32(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s8)))
+int8x16_t vldrbq_z_s8(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u16)))
+uint16x8_t vldrbq_z_u16(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u32)))
+uint32x4_t vldrbq_z_u32(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u8)))
+uint8x16_t vldrbq_z_u8(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_s64)))
+int64x2_t vldrdq_gather_base_s64(uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_u64)))
+uint64x2_t vldrdq_gather_base_u64(uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_s64)))
+int64x2_t vldrdq_gather_base_wb_s64(uint64x2_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_u64)))
+uint64x2_t vldrdq_gather_base_wb_u64(uint64x2_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_s64)))
+int64x2_t vldrdq_gather_base_wb_z_s64(uint64x2_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_u64)))
+uint64x2_t vldrdq_gather_base_wb_z_u64(uint64x2_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_z_s64)))
+int64x2_t vldrdq_gather_base_z_s64(uint64x2_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_z_u64)))
+uint64x2_t vldrdq_gather_base_z_u64(uint64x2_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))
+int64x2_t vldrdq_gather_offset_s64(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))
+int64x2_t vldrdq_gather_offset(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))
+uint64x2_t vldrdq_gather_offset_u64(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))
+uint64x2_t vldrdq_gather_offset(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))
+int64x2_t vldrdq_gather_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))
+int64x2_t vldrdq_gather_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))
+uint64x2_t vldrdq_gather_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))
+uint64x2_t vldrdq_gather_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))
+int64x2_t vldrdq_gather_shifted_offset_s64(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))
+int64x2_t vldrdq_gather_shifted_offset(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))
+uint64x2_t vldrdq_gather_shifted_offset_u64(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))
+uint64x2_t vldrdq_gather_shifted_offset(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))
+int64x2_t vldrdq_gather_shifted_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))
+int64x2_t vldrdq_gather_shifted_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))
+uint64x2_t vldrdq_gather_shifted_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))
+uint64x2_t vldrdq_gather_shifted_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))
+int16x8_t vldrhq_gather_offset_s16(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))
+int16x8_t vldrhq_gather_offset(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))
+int32x4_t vldrhq_gather_offset_s32(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))
+int32x4_t vldrhq_gather_offset(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))
+uint16x8_t vldrhq_gather_offset_u16(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))
+uint16x8_t vldrhq_gather_offset(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))
+uint32x4_t vldrhq_gather_offset_u32(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))
+uint32x4_t vldrhq_gather_offset(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))
+int16x8_t vldrhq_gather_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))
+int16x8_t vldrhq_gather_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))
+int32x4_t vldrhq_gather_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))
+int32x4_t vldrhq_gather_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))
+uint16x8_t vldrhq_gather_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))
+uint16x8_t vldrhq_gather_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))
+uint32x4_t vldrhq_gather_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))
+uint32x4_t vldrhq_gather_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))
+int16x8_t vldrhq_gather_shifted_offset_s16(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))
+int16x8_t vldrhq_gather_shifted_offset(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))
+int32x4_t vldrhq_gather_shifted_offset_s32(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))
+int32x4_t vldrhq_gather_shifted_offset(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))
+uint16x8_t vldrhq_gather_shifted_offset_u16(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))
+uint16x8_t vldrhq_gather_shifted_offset(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))
+uint32x4_t vldrhq_gather_shifted_offset_u32(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))
+uint32x4_t vldrhq_gather_shifted_offset(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))
+int16x8_t vldrhq_gather_shifted_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))
+int16x8_t vldrhq_gather_shifted_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))
+int32x4_t vldrhq_gather_shifted_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))
+int32x4_t vldrhq_gather_shifted_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))
+uint16x8_t vldrhq_gather_shifted_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))
+uint16x8_t vldrhq_gather_shifted_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))
+uint32x4_t vldrhq_gather_shifted_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))
+uint32x4_t vldrhq_gather_shifted_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_s16)))
+int16x8_t vldrhq_s16(const int16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_s32)))
+int32x4_t vldrhq_s32(const int16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_u16)))
+uint16x8_t vldrhq_u16(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_u32)))
+uint32x4_t vldrhq_u32(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_s16)))
+int16x8_t vldrhq_z_s16(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_s32)))
+int32x4_t vldrhq_z_s32(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_u16)))
+uint16x8_t vldrhq_z_u16(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_u32)))
+uint32x4_t vldrhq_z_u32(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_s32)))
+int32x4_t vldrwq_gather_base_s32(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_u32)))
+uint32x4_t vldrwq_gather_base_u32(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_s32)))
+int32x4_t vldrwq_gather_base_wb_s32(uint32x4_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_u32)))
+uint32x4_t vldrwq_gather_base_wb_u32(uint32x4_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_s32)))
+int32x4_t vldrwq_gather_base_wb_z_s32(uint32x4_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_u32)))
+uint32x4_t vldrwq_gather_base_wb_z_u32(uint32x4_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_s32)))
+int32x4_t vldrwq_gather_base_z_s32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_u32)))
+uint32x4_t vldrwq_gather_base_z_u32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))
+int32x4_t vldrwq_gather_offset_s32(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))
+int32x4_t vldrwq_gather_offset(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))
+uint32x4_t vldrwq_gather_offset_u32(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))
+uint32x4_t vldrwq_gather_offset(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))
+int32x4_t vldrwq_gather_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))
+int32x4_t vldrwq_gather_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))
+uint32x4_t vldrwq_gather_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))
+uint32x4_t vldrwq_gather_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))
+int32x4_t vldrwq_gather_shifted_offset_s32(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))
+int32x4_t vldrwq_gather_shifted_offset(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))
+uint32x4_t vldrwq_gather_shifted_offset_u32(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))
+uint32x4_t vldrwq_gather_shifted_offset(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))
+int32x4_t vldrwq_gather_shifted_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))
+int32x4_t vldrwq_gather_shifted_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))
+uint32x4_t vldrwq_gather_shifted_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))
+uint32x4_t vldrwq_gather_shifted_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_s32)))
+int32x4_t vldrwq_s32(const int32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_u32)))
+uint32x4_t vldrwq_u32(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_s32)))
+int32x4_t vldrwq_z_s32(const int32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_u32)))
+uint32x4_t vldrwq_z_u32(const uint32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s16)))
+uint16x8_t vmaxaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s16)))
+uint16x8_t vmaxaq_m(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s32)))
+uint32x4_t vmaxaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s32)))
+uint32x4_t vmaxaq_m(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s8)))
+uint8x16_t vmaxaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s8)))
+uint8x16_t vmaxaq_m(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s16)))
+uint16x8_t vmaxaq_s16(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s16)))
+uint16x8_t vmaxaq(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s32)))
+uint32x4_t vmaxaq_s32(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s32)))
+uint32x4_t vmaxaq(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s8)))
+uint8x16_t vmaxaq_s8(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s8)))
+uint8x16_t vmaxaq(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s16)))
+uint16_t vmaxavq_p_s16(uint16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s16)))
+uint16_t vmaxavq_p(uint16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s32)))
+uint32_t vmaxavq_p_s32(uint32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s32)))
+uint32_t vmaxavq_p(uint32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s8)))
+uint8_t vmaxavq_p_s8(uint8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s8)))
+uint8_t vmaxavq_p(uint8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s16)))
+uint16_t vmaxavq_s16(uint16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s16)))
+uint16_t vmaxavq(uint16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s32)))
+uint32_t vmaxavq_s32(uint32_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s32)))
+uint32_t vmaxavq(uint32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s8)))
+uint8_t vmaxavq_s8(uint8_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s8)))
+uint8_t vmaxavq(uint8_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s16)))
+int16x8_t vmaxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s16)))
+int16x8_t vmaxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s32)))
+int32x4_t vmaxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s32)))
+int32x4_t vmaxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s8)))
+int8x16_t vmaxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s8)))
+int8x16_t vmaxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u16)))
+uint16x8_t vmaxq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u16)))
+uint16x8_t vmaxq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u32)))
+uint32x4_t vmaxq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u32)))
+uint32x4_t vmaxq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u8)))
+uint8x16_t vmaxq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u8)))
+uint8x16_t vmaxq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s16)))
+int16x8_t vmaxq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s16)))
+int16x8_t vmaxq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s32)))
+int32x4_t vmaxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s32)))
+int32x4_t vmaxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s8)))
+int8x16_t vmaxq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s8)))
+int8x16_t vmaxq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u16)))
+uint16x8_t vmaxq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u16)))
+uint16x8_t vmaxq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u32)))
+uint32x4_t vmaxq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u32)))
+uint32x4_t vmaxq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u8)))
+uint8x16_t vmaxq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u8)))
+uint8x16_t vmaxq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s16)))
+int16x8_t vmaxq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s16)))
+int16x8_t vmaxq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s32)))
+int32x4_t vmaxq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s32)))
+int32x4_t vmaxq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s8)))
+int8x16_t vmaxq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s8)))
+int8x16_t vmaxq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u16)))
+uint16x8_t vmaxq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u16)))
+uint16x8_t vmaxq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u32)))
+uint32x4_t vmaxq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u32)))
+uint32x4_t vmaxq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u8)))
+uint8x16_t vmaxq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u8)))
+uint8x16_t vmaxq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s16)))
+int16_t vmaxvq_p_s16(int16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s16)))
+int16_t vmaxvq_p(int16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s32)))
+int32_t vmaxvq_p_s32(int32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s32)))
+int32_t vmaxvq_p(int32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s8)))
+int8_t vmaxvq_p_s8(int8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s8)))
+int8_t vmaxvq_p(int8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u16)))
+uint16_t vmaxvq_p_u16(uint16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u16)))
+uint16_t vmaxvq_p(uint16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u32)))
+uint32_t vmaxvq_p_u32(uint32_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u32)))
+uint32_t vmaxvq_p(uint32_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u8)))
+uint8_t vmaxvq_p_u8(uint8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u8)))
+uint8_t vmaxvq_p(uint8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s16)))
+int16_t vmaxvq_s16(int16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s16)))
+int16_t vmaxvq(int16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s32)))
+int32_t vmaxvq_s32(int32_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s32)))
+int32_t vmaxvq(int32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s8)))
+int8_t vmaxvq_s8(int8_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s8)))
+int8_t vmaxvq(int8_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u16)))
+uint16_t vmaxvq_u16(uint16_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u16)))
+uint16_t vmaxvq(uint16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u32)))
+uint32_t vmaxvq_u32(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u32)))
+uint32_t vmaxvq(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u8)))
+uint8_t vmaxvq_u8(uint8_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u8)))
+uint8_t vmaxvq(uint8_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s16)))
+uint16x8_t vminaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s16)))
+uint16x8_t vminaq_m(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s32)))
+uint32x4_t vminaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s32)))
+uint32x4_t vminaq_m(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s8)))
+uint8x16_t vminaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s8)))
+uint8x16_t vminaq_m(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s16)))
+uint16x8_t vminaq_s16(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s16)))
+uint16x8_t vminaq(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s32)))
+uint32x4_t vminaq_s32(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s32)))
+uint32x4_t vminaq(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s8)))
+uint8x16_t vminaq_s8(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s8)))
+uint8x16_t vminaq(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s16)))
+uint16_t vminavq_p_s16(uint16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s16)))
+uint16_t vminavq_p(uint16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s32)))
+uint32_t vminavq_p_s32(uint32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s32)))
+uint32_t vminavq_p(uint32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s8)))
+uint8_t vminavq_p_s8(uint8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s8)))
+uint8_t vminavq_p(uint8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s16)))
+uint16_t vminavq_s16(uint16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s16)))
+uint16_t vminavq(uint16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s32)))
+uint32_t vminavq_s32(uint32_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s32)))
+uint32_t vminavq(uint32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s8)))
+uint8_t vminavq_s8(uint8_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s8)))
+uint8_t vminavq(uint8_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s16)))
+int16x8_t vminq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s16)))
+int16x8_t vminq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s32)))
+int32x4_t vminq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s32)))
+int32x4_t vminq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s8)))
+int8x16_t vminq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s8)))
+int8x16_t vminq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u16)))
+uint16x8_t vminq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u16)))
+uint16x8_t vminq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u32)))
+uint32x4_t vminq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u32)))
+uint32x4_t vminq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u8)))
+uint8x16_t vminq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u8)))
+uint8x16_t vminq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s16)))
+int16x8_t vminq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s16)))
+int16x8_t vminq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s32)))
+int32x4_t vminq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s32)))
+int32x4_t vminq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s8)))
+int8x16_t vminq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s8)))
+int8x16_t vminq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u16)))
+uint16x8_t vminq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u16)))
+uint16x8_t vminq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u32)))
+uint32x4_t vminq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u32)))
+uint32x4_t vminq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u8)))
+uint8x16_t vminq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u8)))
+uint8x16_t vminq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s16)))
+int16x8_t vminq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s16)))
+int16x8_t vminq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s32)))
+int32x4_t vminq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s32)))
+int32x4_t vminq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s8)))
+int8x16_t vminq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s8)))
+int8x16_t vminq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u16)))
+uint16x8_t vminq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u16)))
+uint16x8_t vminq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u32)))
+uint32x4_t vminq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u32)))
+uint32x4_t vminq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u8)))
+uint8x16_t vminq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u8)))
+uint8x16_t vminq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s16)))
+int16_t vminvq_p_s16(int16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s16)))
+int16_t vminvq_p(int16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s32)))
+int32_t vminvq_p_s32(int32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s32)))
+int32_t vminvq_p(int32_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s8)))
+int8_t vminvq_p_s8(int8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s8)))
+int8_t vminvq_p(int8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u16)))
+uint16_t vminvq_p_u16(uint16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u16)))
+uint16_t vminvq_p(uint16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u32)))
+uint32_t vminvq_p_u32(uint32_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u32)))
+uint32_t vminvq_p(uint32_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u8)))
+uint8_t vminvq_p_u8(uint8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u8)))
+uint8_t vminvq_p(uint8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s16)))
+int16_t vminvq_s16(int16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s16)))
+int16_t vminvq(int16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s32)))
+int32_t vminvq_s32(int32_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s32)))
+int32_t vminvq(int32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s8)))
+int8_t vminvq_s8(int8_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s8)))
+int8_t vminvq(int8_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u16)))
+uint16_t vminvq_u16(uint16_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u16)))
+uint16_t vminvq(uint16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u32)))
+uint32_t vminvq_u32(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u32)))
+uint32_t vminvq(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u8)))
+uint8_t vminvq_u8(uint8_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u8)))
+uint8_t vminvq(uint8_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s16)))
+int32_t vmladavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s16)))
+int32_t vmladavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s32)))
+int32_t vmladavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s32)))
+int32_t vmladavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s8)))
+int32_t vmladavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s8)))
+int32_t vmladavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u16)))
+uint32_t vmladavaq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u16)))
+uint32_t vmladavaq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u32)))
+uint32_t vmladavaq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u32)))
+uint32_t vmladavaq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u8)))
+uint32_t vmladavaq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u8)))
+uint32_t vmladavaq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s16)))
+int32_t vmladavaq_s16(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s16)))
+int32_t vmladavaq(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s32)))
+int32_t vmladavaq_s32(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s32)))
+int32_t vmladavaq(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s8)))
+int32_t vmladavaq_s8(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s8)))
+int32_t vmladavaq(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u16)))
+uint32_t vmladavaq_u16(uint32_t, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u16)))
+uint32_t vmladavaq(uint32_t, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u32)))
+uint32_t vmladavaq_u32(uint32_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u32)))
+uint32_t vmladavaq(uint32_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u8)))
+uint32_t vmladavaq_u8(uint32_t, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u8)))
+uint32_t vmladavaq(uint32_t, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s16)))
+int32_t vmladavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s16)))
+int32_t vmladavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s32)))
+int32_t vmladavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s32)))
+int32_t vmladavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s8)))
+int32_t vmladavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s8)))
+int32_t vmladavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s16)))
+int32_t vmladavaxq_s16(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s16)))
+int32_t vmladavaxq(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s32)))
+int32_t vmladavaxq_s32(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s32)))
+int32_t vmladavaxq(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s8)))
+int32_t vmladavaxq_s8(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s8)))
+int32_t vmladavaxq(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s16)))
+int32_t vmladavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s16)))
+int32_t vmladavq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s32)))
+int32_t vmladavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s32)))
+int32_t vmladavq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s8)))
+int32_t vmladavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s8)))
+int32_t vmladavq_p(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u16)))
+uint32_t vmladavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u16)))
+uint32_t vmladavq_p(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u32)))
+uint32_t vmladavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u32)))
+uint32_t vmladavq_p(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u8)))
+uint32_t vmladavq_p_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u8)))
+uint32_t vmladavq_p(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s16)))
+int32_t vmladavq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s16)))
+int32_t vmladavq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s32)))
+int32_t vmladavq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s32)))
+int32_t vmladavq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s8)))
+int32_t vmladavq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s8)))
+int32_t vmladavq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u16)))
+uint32_t vmladavq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u16)))
+uint32_t vmladavq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u32)))
+uint32_t vmladavq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u32)))
+uint32_t vmladavq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u8)))
+uint32_t vmladavq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u8)))
+uint32_t vmladavq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s16)))
+int32_t vmladavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s16)))
+int32_t vmladavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s32)))
+int32_t vmladavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s32)))
+int32_t vmladavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s8)))
+int32_t vmladavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s8)))
+int32_t vmladavxq_p(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s16)))
+int32_t vmladavxq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s16)))
+int32_t vmladavxq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s32)))
+int32_t vmladavxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s32)))
+int32_t vmladavxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s8)))
+int32_t vmladavxq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s8)))
+int32_t vmladavxq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s16)))
+int64_t vmlaldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s16)))
+int64_t vmlaldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s32)))
+int64_t vmlaldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s32)))
+int64_t vmlaldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u16)))
+uint64_t vmlaldavaq_p_u16(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u16)))
+uint64_t vmlaldavaq_p(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u32)))
+uint64_t vmlaldavaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u32)))
+uint64_t vmlaldavaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s16)))
+int64_t vmlaldavaq_s16(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s16)))
+int64_t vmlaldavaq(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s32)))
+int64_t vmlaldavaq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s32)))
+int64_t vmlaldavaq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u16)))
+uint64_t vmlaldavaq_u16(uint64_t, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u16)))
+uint64_t vmlaldavaq(uint64_t, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u32)))
+uint64_t vmlaldavaq_u32(uint64_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u32)))
+uint64_t vmlaldavaq(uint64_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s16)))
+int64_t vmlaldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s16)))
+int64_t vmlaldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s32)))
+int64_t vmlaldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s32)))
+int64_t vmlaldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s16)))
+int64_t vmlaldavaxq_s16(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s16)))
+int64_t vmlaldavaxq(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s32)))
+int64_t vmlaldavaxq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s32)))
+int64_t vmlaldavaxq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s16)))
+int64_t vmlaldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s16)))
+int64_t vmlaldavq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s32)))
+int64_t vmlaldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s32)))
+int64_t vmlaldavq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u16)))
+uint64_t vmlaldavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u16)))
+uint64_t vmlaldavq_p(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u32)))
+uint64_t vmlaldavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u32)))
+uint64_t vmlaldavq_p(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s16)))
+int64_t vmlaldavq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s16)))
+int64_t vmlaldavq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s32)))
+int64_t vmlaldavq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s32)))
+int64_t vmlaldavq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u16)))
+uint64_t vmlaldavq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u16)))
+uint64_t vmlaldavq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u32)))
+uint64_t vmlaldavq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u32)))
+uint64_t vmlaldavq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s16)))
+int64_t vmlaldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s16)))
+int64_t vmlaldavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s32)))
+int64_t vmlaldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s32)))
+int64_t vmlaldavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s16)))
+int64_t vmlaldavxq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s16)))
+int64_t vmlaldavxq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s32)))
+int64_t vmlaldavxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s32)))
+int64_t vmlaldavxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s16)))
+int16x8_t vmlaq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s16)))
+int16x8_t vmlaq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s32)))
+int32x4_t vmlaq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s32)))
+int32x4_t vmlaq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s8)))
+int8x16_t vmlaq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s8)))
+int8x16_t vmlaq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u16)))
+uint16x8_t vmlaq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u16)))
+uint16x8_t vmlaq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u32)))
+uint32x4_t vmlaq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u32)))
+uint32x4_t vmlaq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u8)))
+uint8x16_t vmlaq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u8)))
+uint8x16_t vmlaq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s16)))
+int16x8_t vmlaq_n_s16(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s16)))
+int16x8_t vmlaq(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s32)))
+int32x4_t vmlaq_n_s32(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s32)))
+int32x4_t vmlaq(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s8)))
+int8x16_t vmlaq_n_s8(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s8)))
+int8x16_t vmlaq(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u16)))
+uint16x8_t vmlaq_n_u16(uint16x8_t, uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u16)))
+uint16x8_t vmlaq(uint16x8_t, uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u32)))
+uint32x4_t vmlaq_n_u32(uint32x4_t, uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u32)))
+uint32x4_t vmlaq(uint32x4_t, uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u8)))
+uint8x16_t vmlaq_n_u8(uint8x16_t, uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u8)))
+uint8x16_t vmlaq(uint8x16_t, uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s16)))
+int16x8_t vmlasq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s16)))
+int16x8_t vmlasq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s32)))
+int32x4_t vmlasq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s32)))
+int32x4_t vmlasq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s8)))
+int8x16_t vmlasq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s8)))
+int8x16_t vmlasq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u16)))
+uint16x8_t vmlasq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u16)))
+uint16x8_t vmlasq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u32)))
+uint32x4_t vmlasq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u32)))
+uint32x4_t vmlasq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u8)))
+uint8x16_t vmlasq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u8)))
+uint8x16_t vmlasq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s16)))
+int16x8_t vmlasq_n_s16(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s16)))
+int16x8_t vmlasq(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s32)))
+int32x4_t vmlasq_n_s32(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s32)))
+int32x4_t vmlasq(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s8)))
+int8x16_t vmlasq_n_s8(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s8)))
+int8x16_t vmlasq(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u16)))
+uint16x8_t vmlasq_n_u16(uint16x8_t, uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u16)))
+uint16x8_t vmlasq(uint16x8_t, uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u32)))
+uint32x4_t vmlasq_n_u32(uint32x4_t, uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u32)))
+uint32x4_t vmlasq(uint32x4_t, uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u8)))
+uint8x16_t vmlasq_n_u8(uint8x16_t, uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u8)))
+uint8x16_t vmlasq(uint8x16_t, uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s16)))
+int32_t vmlsdavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s16)))
+int32_t vmlsdavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s32)))
+int32_t vmlsdavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s32)))
+int32_t vmlsdavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s8)))
+int32_t vmlsdavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s8)))
+int32_t vmlsdavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s16)))
+int32_t vmlsdavaq_s16(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s16)))
+int32_t vmlsdavaq(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s32)))
+int32_t vmlsdavaq_s32(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s32)))
+int32_t vmlsdavaq(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s8)))
+int32_t vmlsdavaq_s8(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s8)))
+int32_t vmlsdavaq(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s16)))
+int32_t vmlsdavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s16)))
+int32_t vmlsdavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s32)))
+int32_t vmlsdavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s32)))
+int32_t vmlsdavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s8)))
+int32_t vmlsdavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s8)))
+int32_t vmlsdavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s16)))
+int32_t vmlsdavaxq_s16(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s16)))
+int32_t vmlsdavaxq(int32_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s32)))
+int32_t vmlsdavaxq_s32(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s32)))
+int32_t vmlsdavaxq(int32_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s8)))
+int32_t vmlsdavaxq_s8(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s8)))
+int32_t vmlsdavaxq(int32_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s16)))
+int32_t vmlsdavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s16)))
+int32_t vmlsdavq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s32)))
+int32_t vmlsdavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s32)))
+int32_t vmlsdavq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s8)))
+int32_t vmlsdavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s8)))
+int32_t vmlsdavq_p(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s16)))
+int32_t vmlsdavq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s16)))
+int32_t vmlsdavq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s32)))
+int32_t vmlsdavq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s32)))
+int32_t vmlsdavq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s8)))
+int32_t vmlsdavq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s8)))
+int32_t vmlsdavq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s16)))
+int32_t vmlsdavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s16)))
+int32_t vmlsdavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s32)))
+int32_t vmlsdavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s32)))
+int32_t vmlsdavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s8)))
+int32_t vmlsdavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s8)))
+int32_t vmlsdavxq_p(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s16)))
+int32_t vmlsdavxq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s16)))
+int32_t vmlsdavxq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s32)))
+int32_t vmlsdavxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s32)))
+int32_t vmlsdavxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s8)))
+int32_t vmlsdavxq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s8)))
+int32_t vmlsdavxq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s16)))
+int64_t vmlsldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s16)))
+int64_t vmlsldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s32)))
+int64_t vmlsldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s32)))
+int64_t vmlsldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s16)))
+int64_t vmlsldavaq_s16(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s16)))
+int64_t vmlsldavaq(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s32)))
+int64_t vmlsldavaq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s32)))
+int64_t vmlsldavaq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s16)))
+int64_t vmlsldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s16)))
+int64_t vmlsldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s32)))
+int64_t vmlsldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s32)))
+int64_t vmlsldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s16)))
+int64_t vmlsldavaxq_s16(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s16)))
+int64_t vmlsldavaxq(int64_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s32)))
+int64_t vmlsldavaxq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s32)))
+int64_t vmlsldavaxq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s16)))
+int64_t vmlsldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s16)))
+int64_t vmlsldavq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s32)))
+int64_t vmlsldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s32)))
+int64_t vmlsldavq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s16)))
+int64_t vmlsldavq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s16)))
+int64_t vmlsldavq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s32)))
+int64_t vmlsldavq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s32)))
+int64_t vmlsldavq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s16)))
+int64_t vmlsldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s16)))
+int64_t vmlsldavxq_p(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s32)))
+int64_t vmlsldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s32)))
+int64_t vmlsldavxq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s16)))
+int64_t vmlsldavxq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s16)))
+int64_t vmlsldavxq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s32)))
+int64_t vmlsldavxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s32)))
+int64_t vmlsldavxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s16)))
+int32x4_t vmovlbq_m_s16(int32x4_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s16)))
+int32x4_t vmovlbq_m(int32x4_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s8)))
+int16x8_t vmovlbq_m_s8(int16x8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s8)))
+int16x8_t vmovlbq_m(int16x8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u16)))
+uint32x4_t vmovlbq_m_u16(uint32x4_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u16)))
+uint32x4_t vmovlbq_m(uint32x4_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u8)))
+uint16x8_t vmovlbq_m_u8(uint16x8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u8)))
+uint16x8_t vmovlbq_m(uint16x8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s16)))
+int32x4_t vmovlbq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s16)))
+int32x4_t vmovlbq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s8)))
+int16x8_t vmovlbq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s8)))
+int16x8_t vmovlbq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u16)))
+uint32x4_t vmovlbq_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u16)))
+uint32x4_t vmovlbq(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u8)))
+uint16x8_t vmovlbq_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u8)))
+uint16x8_t vmovlbq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s16)))
+int32x4_t vmovlbq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s16)))
+int32x4_t vmovlbq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s8)))
+int16x8_t vmovlbq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s8)))
+int16x8_t vmovlbq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u16)))
+uint32x4_t vmovlbq_x_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u16)))
+uint32x4_t vmovlbq_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u8)))
+uint16x8_t vmovlbq_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u8)))
+uint16x8_t vmovlbq_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s16)))
+int32x4_t vmovltq_m_s16(int32x4_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s16)))
+int32x4_t vmovltq_m(int32x4_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s8)))
+int16x8_t vmovltq_m_s8(int16x8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s8)))
+int16x8_t vmovltq_m(int16x8_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u16)))
+uint32x4_t vmovltq_m_u16(uint32x4_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u16)))
+uint32x4_t vmovltq_m(uint32x4_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u8)))
+uint16x8_t vmovltq_m_u8(uint16x8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u8)))
+uint16x8_t vmovltq_m(uint16x8_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s16)))
+int32x4_t vmovltq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s16)))
+int32x4_t vmovltq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s8)))
+int16x8_t vmovltq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s8)))
+int16x8_t vmovltq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u16)))
+uint32x4_t vmovltq_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u16)))
+uint32x4_t vmovltq(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u8)))
+uint16x8_t vmovltq_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u8)))
+uint16x8_t vmovltq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s16)))
+int32x4_t vmovltq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s16)))
+int32x4_t vmovltq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s8)))
+int16x8_t vmovltq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s8)))
+int16x8_t vmovltq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u16)))
+uint32x4_t vmovltq_x_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u16)))
+uint32x4_t vmovltq_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u8)))
+uint16x8_t vmovltq_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u8)))
+uint16x8_t vmovltq_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s16)))
+int8x16_t vmovnbq_m_s16(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s16)))
+int8x16_t vmovnbq_m(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s32)))
+int16x8_t vmovnbq_m_s32(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s32)))
+int16x8_t vmovnbq_m(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u16)))
+uint8x16_t vmovnbq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u16)))
+uint8x16_t vmovnbq_m(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u32)))
+uint16x8_t vmovnbq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u32)))
+uint16x8_t vmovnbq_m(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s16)))
+int8x16_t vmovnbq_s16(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s16)))
+int8x16_t vmovnbq(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s32)))
+int16x8_t vmovnbq_s32(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s32)))
+int16x8_t vmovnbq(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u16)))
+uint8x16_t vmovnbq_u16(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u16)))
+uint8x16_t vmovnbq(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u32)))
+uint16x8_t vmovnbq_u32(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u32)))
+uint16x8_t vmovnbq(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s16)))
+int8x16_t vmovntq_m_s16(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s16)))
+int8x16_t vmovntq_m(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s32)))
+int16x8_t vmovntq_m_s32(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s32)))
+int16x8_t vmovntq_m(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u16)))
+uint8x16_t vmovntq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u16)))
+uint8x16_t vmovntq_m(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u32)))
+uint16x8_t vmovntq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u32)))
+uint16x8_t vmovntq_m(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s16)))
+int8x16_t vmovntq_s16(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s16)))
+int8x16_t vmovntq(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s32)))
+int16x8_t vmovntq_s32(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s32)))
+int16x8_t vmovntq(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u16)))
+uint8x16_t vmovntq_u16(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u16)))
+uint8x16_t vmovntq(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u32)))
+uint16x8_t vmovntq_u32(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u32)))
+uint16x8_t vmovntq(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s16)))
+int16x8_t vmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s16)))
+int16x8_t vmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s32)))
+int32x4_t vmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s32)))
+int32x4_t vmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s8)))
+int8x16_t vmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s8)))
+int8x16_t vmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u16)))
+uint16x8_t vmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u16)))
+uint16x8_t vmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u32)))
+uint32x4_t vmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u32)))
+uint32x4_t vmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u8)))
+uint8x16_t vmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u8)))
+uint8x16_t vmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s16)))
+int16x8_t vmulhq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s16)))
+int16x8_t vmulhq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s32)))
+int32x4_t vmulhq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s32)))
+int32x4_t vmulhq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s8)))
+int8x16_t vmulhq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s8)))
+int8x16_t vmulhq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u16)))
+uint16x8_t vmulhq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u16)))
+uint16x8_t vmulhq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u32)))
+uint32x4_t vmulhq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u32)))
+uint32x4_t vmulhq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u8)))
+uint8x16_t vmulhq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u8)))
+uint8x16_t vmulhq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s16)))
+int16x8_t vmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s16)))
+int16x8_t vmulhq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s32)))
+int32x4_t vmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s32)))
+int32x4_t vmulhq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s8)))
+int8x16_t vmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s8)))
+int8x16_t vmulhq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u16)))
+uint16x8_t vmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u16)))
+uint16x8_t vmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u32)))
+uint32x4_t vmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u32)))
+uint32x4_t vmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u8)))
+uint8x16_t vmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u8)))
+uint8x16_t vmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s16)))
+int32x4_t vmullbq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s16)))
+int32x4_t vmullbq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s32)))
+int64x2_t vmullbq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s32)))
+int64x2_t vmullbq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s8)))
+int16x8_t vmullbq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s8)))
+int16x8_t vmullbq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u16)))
+uint32x4_t vmullbq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u16)))
+uint32x4_t vmullbq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u32)))
+uint64x2_t vmullbq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u32)))
+uint64x2_t vmullbq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u8)))
+uint16x8_t vmullbq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u8)))
+uint16x8_t vmullbq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s16)))
+int32x4_t vmullbq_int_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s16)))
+int32x4_t vmullbq_int(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s32)))
+int64x2_t vmullbq_int_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s32)))
+int64x2_t vmullbq_int(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s8)))
+int16x8_t vmullbq_int_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s8)))
+int16x8_t vmullbq_int(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u16)))
+uint32x4_t vmullbq_int_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u16)))
+uint32x4_t vmullbq_int(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u32)))
+uint64x2_t vmullbq_int_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u32)))
+uint64x2_t vmullbq_int(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u8)))
+uint16x8_t vmullbq_int_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u8)))
+uint16x8_t vmullbq_int(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s16)))
+int32x4_t vmullbq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s16)))
+int32x4_t vmullbq_int_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s32)))
+int64x2_t vmullbq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s32)))
+int64x2_t vmullbq_int_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s8)))
+int16x8_t vmullbq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s8)))
+int16x8_t vmullbq_int_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u16)))
+uint32x4_t vmullbq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u16)))
+uint32x4_t vmullbq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u32)))
+uint64x2_t vmullbq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u32)))
+uint64x2_t vmullbq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u8)))
+uint16x8_t vmullbq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u8)))
+uint16x8_t vmullbq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p16)))
+uint32x4_t vmullbq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p16)))
+uint32x4_t vmullbq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p8)))
+uint16x8_t vmullbq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p8)))
+uint16x8_t vmullbq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p16)))
+uint32x4_t vmullbq_poly_p16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p16)))
+uint32x4_t vmullbq_poly(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p8)))
+uint16x8_t vmullbq_poly_p8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p8)))
+uint16x8_t vmullbq_poly(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p16)))
+uint32x4_t vmullbq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p16)))
+uint32x4_t vmullbq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p8)))
+uint16x8_t vmullbq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p8)))
+uint16x8_t vmullbq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s16)))
+int32x4_t vmulltq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s16)))
+int32x4_t vmulltq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s32)))
+int64x2_t vmulltq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s32)))
+int64x2_t vmulltq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s8)))
+int16x8_t vmulltq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s8)))
+int16x8_t vmulltq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u16)))
+uint32x4_t vmulltq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u16)))
+uint32x4_t vmulltq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u32)))
+uint64x2_t vmulltq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u32)))
+uint64x2_t vmulltq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u8)))
+uint16x8_t vmulltq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u8)))
+uint16x8_t vmulltq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s16)))
+int32x4_t vmulltq_int_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s16)))
+int32x4_t vmulltq_int(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s32)))
+int64x2_t vmulltq_int_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s32)))
+int64x2_t vmulltq_int(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s8)))
+int16x8_t vmulltq_int_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s8)))
+int16x8_t vmulltq_int(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u16)))
+uint32x4_t vmulltq_int_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u16)))
+uint32x4_t vmulltq_int(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u32)))
+uint64x2_t vmulltq_int_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u32)))
+uint64x2_t vmulltq_int(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u8)))
+uint16x8_t vmulltq_int_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u8)))
+uint16x8_t vmulltq_int(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s16)))
+int32x4_t vmulltq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s16)))
+int32x4_t vmulltq_int_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s32)))
+int64x2_t vmulltq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s32)))
+int64x2_t vmulltq_int_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s8)))
+int16x8_t vmulltq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s8)))
+int16x8_t vmulltq_int_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u16)))
+uint32x4_t vmulltq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u16)))
+uint32x4_t vmulltq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u32)))
+uint64x2_t vmulltq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u32)))
+uint64x2_t vmulltq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u8)))
+uint16x8_t vmulltq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u8)))
+uint16x8_t vmulltq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p16)))
+uint32x4_t vmulltq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p16)))
+uint32x4_t vmulltq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p8)))
+uint16x8_t vmulltq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p8)))
+uint16x8_t vmulltq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p16)))
+uint32x4_t vmulltq_poly_p16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p16)))
+uint32x4_t vmulltq_poly(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p8)))
+uint16x8_t vmulltq_poly_p8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p8)))
+uint16x8_t vmulltq_poly(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p16)))
+uint32x4_t vmulltq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p16)))
+uint32x4_t vmulltq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p8)))
+uint16x8_t vmulltq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p8)))
+uint16x8_t vmulltq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s16)))
+int16x8_t vmulq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s16)))
+int16x8_t vmulq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s32)))
+int32x4_t vmulq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s32)))
+int32x4_t vmulq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s8)))
+int8x16_t vmulq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s8)))
+int8x16_t vmulq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u16)))
+uint16x8_t vmulq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u16)))
+uint16x8_t vmulq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u32)))
+uint32x4_t vmulq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u32)))
+uint32x4_t vmulq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u8)))
+uint8x16_t vmulq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u8)))
+uint8x16_t vmulq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s16)))
+int16x8_t vmulq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s16)))
+int16x8_t vmulq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s32)))
+int32x4_t vmulq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s32)))
+int32x4_t vmulq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s8)))
+int8x16_t vmulq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s8)))
+int8x16_t vmulq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u16)))
+uint16x8_t vmulq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u16)))
+uint16x8_t vmulq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u32)))
+uint32x4_t vmulq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u32)))
+uint32x4_t vmulq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u8)))
+uint8x16_t vmulq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u8)))
+uint8x16_t vmulq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s16)))
+int16x8_t vmulq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s16)))
+int16x8_t vmulq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s32)))
+int32x4_t vmulq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s32)))
+int32x4_t vmulq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s8)))
+int8x16_t vmulq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s8)))
+int8x16_t vmulq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u16)))
+uint16x8_t vmulq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u16)))
+uint16x8_t vmulq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u32)))
+uint32x4_t vmulq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u32)))
+uint32x4_t vmulq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u8)))
+uint8x16_t vmulq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u8)))
+uint8x16_t vmulq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s16)))
+int16x8_t vmulq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s16)))
+int16x8_t vmulq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s32)))
+int32x4_t vmulq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s32)))
+int32x4_t vmulq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s8)))
+int8x16_t vmulq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s8)))
+int8x16_t vmulq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u16)))
+uint16x8_t vmulq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u16)))
+uint16x8_t vmulq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u32)))
+uint32x4_t vmulq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u32)))
+uint32x4_t vmulq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u8)))
+uint8x16_t vmulq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u8)))
+uint8x16_t vmulq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s16)))
+int16x8_t vmulq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s16)))
+int16x8_t vmulq_x(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s32)))
+int32x4_t vmulq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s32)))
+int32x4_t vmulq_x(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s8)))
+int8x16_t vmulq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s8)))
+int8x16_t vmulq_x(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u16)))
+uint16x8_t vmulq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u16)))
+uint16x8_t vmulq_x(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u32)))
+uint32x4_t vmulq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u32)))
+uint32x4_t vmulq_x(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u8)))
+uint8x16_t vmulq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u8)))
+uint8x16_t vmulq_x(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s16)))
+int16x8_t vmulq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s16)))
+int16x8_t vmulq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s32)))
+int32x4_t vmulq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s32)))
+int32x4_t vmulq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s8)))
+int8x16_t vmulq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s8)))
+int8x16_t vmulq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u16)))
+uint16x8_t vmulq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u16)))
+uint16x8_t vmulq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u32)))
+uint32x4_t vmulq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u32)))
+uint32x4_t vmulq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u8)))
+uint8x16_t vmulq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u8)))
+uint8x16_t vmulq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s16)))
+int16x8_t vmvnq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s16)))
+int16x8_t vmvnq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s32)))
+int32x4_t vmvnq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s32)))
+int32x4_t vmvnq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u16)))
+uint16x8_t vmvnq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u16)))
+uint16x8_t vmvnq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u32)))
+uint32x4_t vmvnq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u32)))
+uint32x4_t vmvnq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s16)))
+int16x8_t vmvnq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s16)))
+int16x8_t vmvnq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s32)))
+int32x4_t vmvnq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s32)))
+int32x4_t vmvnq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s8)))
+int8x16_t vmvnq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s8)))
+int8x16_t vmvnq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u16)))
+uint16x8_t vmvnq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u16)))
+uint16x8_t vmvnq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u32)))
+uint32x4_t vmvnq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u32)))
+uint32x4_t vmvnq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u8)))
+uint8x16_t vmvnq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u8)))
+uint8x16_t vmvnq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_s16)))
+int16x8_t vmvnq_n_s16(int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_s32)))
+int32x4_t vmvnq_n_s32(int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_u16)))
+uint16x8_t vmvnq_n_u16(uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_u32)))
+uint32x4_t vmvnq_n_u32(uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s16)))
+int16x8_t vmvnq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s16)))
+int16x8_t vmvnq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s32)))
+int32x4_t vmvnq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s32)))
+int32x4_t vmvnq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s8)))
+int8x16_t vmvnq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s8)))
+int8x16_t vmvnq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u16)))
+uint16x8_t vmvnq_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u16)))
+uint16x8_t vmvnq(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u32)))
+uint32x4_t vmvnq_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u32)))
+uint32x4_t vmvnq(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u8)))
+uint8x16_t vmvnq_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u8)))
+uint8x16_t vmvnq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_s16)))
+int16x8_t vmvnq_x_n_s16(int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_s32)))
+int32x4_t vmvnq_x_n_s32(int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_u16)))
+uint16x8_t vmvnq_x_n_u16(uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_u32)))
+uint32x4_t vmvnq_x_n_u32(uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s16)))
+int16x8_t vmvnq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s16)))
+int16x8_t vmvnq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s32)))
+int32x4_t vmvnq_x_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s32)))
+int32x4_t vmvnq_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s8)))
+int8x16_t vmvnq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s8)))
+int8x16_t vmvnq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u16)))
+uint16x8_t vmvnq_x_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u16)))
+uint16x8_t vmvnq_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u32)))
+uint32x4_t vmvnq_x_u32(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u32)))
+uint32x4_t vmvnq_x(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u8)))
+uint8x16_t vmvnq_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u8)))
+uint8x16_t vmvnq_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s16)))
+int16x8_t vnegq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s16)))
+int16x8_t vnegq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s32)))
+int32x4_t vnegq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s32)))
+int32x4_t vnegq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s8)))
+int8x16_t vnegq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s8)))
+int8x16_t vnegq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s16)))
+int16x8_t vnegq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s16)))
+int16x8_t vnegq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s32)))
+int32x4_t vnegq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s32)))
+int32x4_t vnegq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s8)))
+int8x16_t vnegq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s8)))
+int8x16_t vnegq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s16)))
+int16x8_t vnegq_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s16)))
+int16x8_t vnegq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s32)))
+int32x4_t vnegq_x_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s32)))
+int32x4_t vnegq_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s8)))
+int8x16_t vnegq_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s8)))
+int8x16_t vnegq_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s16)))
+int16x8_t vornq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s16)))
+int16x8_t vornq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s32)))
+int32x4_t vornq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s32)))
+int32x4_t vornq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s8)))
+int8x16_t vornq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s8)))
+int8x16_t vornq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u16)))
+uint16x8_t vornq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u16)))
+uint16x8_t vornq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u32)))
+uint32x4_t vornq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u32)))
+uint32x4_t vornq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u8)))
+uint8x16_t vornq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u8)))
+uint8x16_t vornq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s16)))
+int16x8_t vornq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s16)))
+int16x8_t vornq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s32)))
+int32x4_t vornq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s32)))
+int32x4_t vornq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s8)))
+int8x16_t vornq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s8)))
+int8x16_t vornq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u16)))
+uint16x8_t vornq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u16)))
+uint16x8_t vornq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u32)))
+uint32x4_t vornq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u32)))
+uint32x4_t vornq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u8)))
+uint8x16_t vornq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u8)))
+uint8x16_t vornq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s16)))
+int16x8_t vornq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s16)))
+int16x8_t vornq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s32)))
+int32x4_t vornq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s32)))
+int32x4_t vornq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s8)))
+int8x16_t vornq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s8)))
+int8x16_t vornq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u16)))
+uint16x8_t vornq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u16)))
+uint16x8_t vornq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u32)))
+uint32x4_t vornq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u32)))
+uint32x4_t vornq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u8)))
+uint8x16_t vornq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u8)))
+uint8x16_t vornq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s16)))
+int16x8_t vorrq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s16)))
+int16x8_t vorrq_m_n(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s32)))
+int32x4_t vorrq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s32)))
+int32x4_t vorrq_m_n(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u16)))
+uint16x8_t vorrq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u16)))
+uint16x8_t vorrq_m_n(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u32)))
+uint32x4_t vorrq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u32)))
+uint32x4_t vorrq_m_n(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s16)))
+int16x8_t vorrq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s16)))
+int16x8_t vorrq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s32)))
+int32x4_t vorrq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s32)))
+int32x4_t vorrq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s8)))
+int8x16_t vorrq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s8)))
+int8x16_t vorrq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u16)))
+uint16x8_t vorrq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u16)))
+uint16x8_t vorrq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u32)))
+uint32x4_t vorrq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u32)))
+uint32x4_t vorrq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u8)))
+uint8x16_t vorrq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u8)))
+uint8x16_t vorrq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s16)))
+int16x8_t vorrq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s16)))
+int16x8_t vorrq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s32)))
+int32x4_t vorrq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s32)))
+int32x4_t vorrq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u16)))
+uint16x8_t vorrq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u16)))
+uint16x8_t vorrq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u32)))
+uint32x4_t vorrq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u32)))
+uint32x4_t vorrq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s16)))
+int16x8_t vorrq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s16)))
+int16x8_t vorrq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s32)))
+int32x4_t vorrq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s32)))
+int32x4_t vorrq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s8)))
+int8x16_t vorrq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s8)))
+int8x16_t vorrq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u16)))
+uint16x8_t vorrq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u16)))
+uint16x8_t vorrq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u32)))
+uint32x4_t vorrq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u32)))
+uint32x4_t vorrq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u8)))
+uint8x16_t vorrq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u8)))
+uint8x16_t vorrq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s16)))
+int16x8_t vorrq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s16)))
+int16x8_t vorrq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s32)))
+int32x4_t vorrq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s32)))
+int32x4_t vorrq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s8)))
+int8x16_t vorrq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s8)))
+int8x16_t vorrq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u16)))
+uint16x8_t vorrq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u16)))
+uint16x8_t vorrq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u32)))
+uint32x4_t vorrq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u32)))
+uint32x4_t vorrq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u8)))
+uint8x16_t vorrq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u8)))
+uint8x16_t vorrq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpnot)))
+mve_pred16_t vpnot(mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s16)))
+int16x8_t vpselq_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s16)))
+int16x8_t vpselq(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s32)))
+int32x4_t vpselq_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s32)))
+int32x4_t vpselq(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s64)))
+int64x2_t vpselq_s64(int64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s64)))
+int64x2_t vpselq(int64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s8)))
+int8x16_t vpselq_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s8)))
+int8x16_t vpselq(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u16)))
+uint16x8_t vpselq_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u16)))
+uint16x8_t vpselq(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u32)))
+uint32x4_t vpselq_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u32)))
+uint32x4_t vpselq(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u64)))
+uint64x2_t vpselq_u64(uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u64)))
+uint64x2_t vpselq(uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u8)))
+uint8x16_t vpselq_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u8)))
+uint8x16_t vpselq(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s16)))
+int16x8_t vqabsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s16)))
+int16x8_t vqabsq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s32)))
+int32x4_t vqabsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s32)))
+int32x4_t vqabsq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s8)))
+int8x16_t vqabsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s8)))
+int8x16_t vqabsq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s16)))
+int16x8_t vqabsq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s16)))
+int16x8_t vqabsq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s32)))
+int32x4_t vqabsq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s32)))
+int32x4_t vqabsq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s8)))
+int8x16_t vqabsq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s8)))
+int8x16_t vqabsq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s16)))
+int16x8_t vqaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s16)))
+int16x8_t vqaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s32)))
+int32x4_t vqaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s32)))
+int32x4_t vqaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s8)))
+int8x16_t vqaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s8)))
+int8x16_t vqaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u16)))
+uint16x8_t vqaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u16)))
+uint16x8_t vqaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u32)))
+uint32x4_t vqaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u32)))
+uint32x4_t vqaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u8)))
+uint8x16_t vqaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u8)))
+uint8x16_t vqaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s16)))
+int16x8_t vqaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s16)))
+int16x8_t vqaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s32)))
+int32x4_t vqaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s32)))
+int32x4_t vqaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s8)))
+int8x16_t vqaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s8)))
+int8x16_t vqaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u16)))
+uint16x8_t vqaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u16)))
+uint16x8_t vqaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u32)))
+uint32x4_t vqaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u32)))
+uint32x4_t vqaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u8)))
+uint8x16_t vqaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u8)))
+uint8x16_t vqaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s16)))
+int16x8_t vqaddq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s16)))
+int16x8_t vqaddq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s32)))
+int32x4_t vqaddq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s32)))
+int32x4_t vqaddq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s8)))
+int8x16_t vqaddq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s8)))
+int8x16_t vqaddq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u16)))
+uint16x8_t vqaddq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u16)))
+uint16x8_t vqaddq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u32)))
+uint32x4_t vqaddq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u32)))
+uint32x4_t vqaddq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u8)))
+uint8x16_t vqaddq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u8)))
+uint8x16_t vqaddq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s16)))
+int16x8_t vqaddq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s16)))
+int16x8_t vqaddq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s32)))
+int32x4_t vqaddq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s32)))
+int32x4_t vqaddq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s8)))
+int8x16_t vqaddq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s8)))
+int8x16_t vqaddq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u16)))
+uint16x8_t vqaddq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u16)))
+uint16x8_t vqaddq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u32)))
+uint32x4_t vqaddq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u32)))
+uint32x4_t vqaddq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u8)))
+uint8x16_t vqaddq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u8)))
+uint8x16_t vqaddq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s16)))
+int16x8_t vqdmladhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s16)))
+int16x8_t vqdmladhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s32)))
+int32x4_t vqdmladhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s32)))
+int32x4_t vqdmladhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s8)))
+int8x16_t vqdmladhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s8)))
+int8x16_t vqdmladhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s16)))
+int16x8_t vqdmladhq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s16)))
+int16x8_t vqdmladhq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s32)))
+int32x4_t vqdmladhq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s32)))
+int32x4_t vqdmladhq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s8)))
+int8x16_t vqdmladhq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s8)))
+int8x16_t vqdmladhq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s16)))
+int16x8_t vqdmladhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s16)))
+int16x8_t vqdmladhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s32)))
+int32x4_t vqdmladhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s32)))
+int32x4_t vqdmladhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s8)))
+int8x16_t vqdmladhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s8)))
+int8x16_t vqdmladhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s16)))
+int16x8_t vqdmladhxq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s16)))
+int16x8_t vqdmladhxq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s32)))
+int32x4_t vqdmladhxq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s32)))
+int32x4_t vqdmladhxq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s8)))
+int8x16_t vqdmladhxq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s8)))
+int8x16_t vqdmladhxq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s16)))
+int16x8_t vqdmlahq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s16)))
+int16x8_t vqdmlahq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s32)))
+int32x4_t vqdmlahq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s32)))
+int32x4_t vqdmlahq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s8)))
+int8x16_t vqdmlahq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s8)))
+int8x16_t vqdmlahq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s16)))
+int16x8_t vqdmlahq_n_s16(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s16)))
+int16x8_t vqdmlahq(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s32)))
+int32x4_t vqdmlahq_n_s32(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s32)))
+int32x4_t vqdmlahq(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s8)))
+int8x16_t vqdmlahq_n_s8(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s8)))
+int8x16_t vqdmlahq(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s16)))
+int16x8_t vqdmlashq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s16)))
+int16x8_t vqdmlashq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s32)))
+int32x4_t vqdmlashq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s32)))
+int32x4_t vqdmlashq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s8)))
+int8x16_t vqdmlashq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s8)))
+int8x16_t vqdmlashq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s16)))
+int16x8_t vqdmlashq_n_s16(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s16)))
+int16x8_t vqdmlashq(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s32)))
+int32x4_t vqdmlashq_n_s32(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s32)))
+int32x4_t vqdmlashq(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s8)))
+int8x16_t vqdmlashq_n_s8(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s8)))
+int8x16_t vqdmlashq(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s16)))
+int16x8_t vqdmlsdhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s16)))
+int16x8_t vqdmlsdhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s32)))
+int32x4_t vqdmlsdhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s32)))
+int32x4_t vqdmlsdhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s8)))
+int8x16_t vqdmlsdhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s8)))
+int8x16_t vqdmlsdhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s16)))
+int16x8_t vqdmlsdhq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s16)))
+int16x8_t vqdmlsdhq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s32)))
+int32x4_t vqdmlsdhq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s32)))
+int32x4_t vqdmlsdhq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s8)))
+int8x16_t vqdmlsdhq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s8)))
+int8x16_t vqdmlsdhq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s16)))
+int16x8_t vqdmlsdhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s16)))
+int16x8_t vqdmlsdhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s32)))
+int32x4_t vqdmlsdhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s32)))
+int32x4_t vqdmlsdhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s8)))
+int8x16_t vqdmlsdhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s8)))
+int8x16_t vqdmlsdhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s16)))
+int16x8_t vqdmlsdhxq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s16)))
+int16x8_t vqdmlsdhxq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s32)))
+int32x4_t vqdmlsdhxq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s32)))
+int32x4_t vqdmlsdhxq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s8)))
+int8x16_t vqdmlsdhxq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s8)))
+int8x16_t vqdmlsdhxq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s16)))
+int16x8_t vqdmulhq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s16)))
+int16x8_t vqdmulhq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s32)))
+int32x4_t vqdmulhq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s32)))
+int32x4_t vqdmulhq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s8)))
+int8x16_t vqdmulhq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s8)))
+int8x16_t vqdmulhq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s16)))
+int16x8_t vqdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s16)))
+int16x8_t vqdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s32)))
+int32x4_t vqdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s32)))
+int32x4_t vqdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s8)))
+int8x16_t vqdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s8)))
+int8x16_t vqdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s16)))
+int16x8_t vqdmulhq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s16)))
+int16x8_t vqdmulhq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s32)))
+int32x4_t vqdmulhq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s32)))
+int32x4_t vqdmulhq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s8)))
+int8x16_t vqdmulhq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s8)))
+int8x16_t vqdmulhq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s16)))
+int16x8_t vqdmulhq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s16)))
+int16x8_t vqdmulhq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s32)))
+int32x4_t vqdmulhq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s32)))
+int32x4_t vqdmulhq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s8)))
+int8x16_t vqdmulhq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s8)))
+int8x16_t vqdmulhq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s16)))
+int32x4_t vqdmullbq_m_n_s16(int32x4_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s16)))
+int32x4_t vqdmullbq_m(int32x4_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s32)))
+int64x2_t vqdmullbq_m_n_s32(int64x2_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s32)))
+int64x2_t vqdmullbq_m(int64x2_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s16)))
+int32x4_t vqdmullbq_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s16)))
+int32x4_t vqdmullbq_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s32)))
+int64x2_t vqdmullbq_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s32)))
+int64x2_t vqdmullbq_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s16)))
+int32x4_t vqdmullbq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s16)))
+int32x4_t vqdmullbq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s32)))
+int64x2_t vqdmullbq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s32)))
+int64x2_t vqdmullbq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s16)))
+int32x4_t vqdmullbq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s16)))
+int32x4_t vqdmullbq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s32)))
+int64x2_t vqdmullbq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s32)))
+int64x2_t vqdmullbq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s16)))
+int32x4_t vqdmulltq_m_n_s16(int32x4_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s16)))
+int32x4_t vqdmulltq_m(int32x4_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s32)))
+int64x2_t vqdmulltq_m_n_s32(int64x2_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s32)))
+int64x2_t vqdmulltq_m(int64x2_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s16)))
+int32x4_t vqdmulltq_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s16)))
+int32x4_t vqdmulltq_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s32)))
+int64x2_t vqdmulltq_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s32)))
+int64x2_t vqdmulltq_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s16)))
+int32x4_t vqdmulltq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s16)))
+int32x4_t vqdmulltq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s32)))
+int64x2_t vqdmulltq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s32)))
+int64x2_t vqdmulltq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s16)))
+int32x4_t vqdmulltq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s16)))
+int32x4_t vqdmulltq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s32)))
+int64x2_t vqdmulltq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s32)))
+int64x2_t vqdmulltq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s16)))
+int8x16_t vqmovnbq_m_s16(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s16)))
+int8x16_t vqmovnbq_m(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s32)))
+int16x8_t vqmovnbq_m_s32(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s32)))
+int16x8_t vqmovnbq_m(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u16)))
+uint8x16_t vqmovnbq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u16)))
+uint8x16_t vqmovnbq_m(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u32)))
+uint16x8_t vqmovnbq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u32)))
+uint16x8_t vqmovnbq_m(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s16)))
+int8x16_t vqmovnbq_s16(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s16)))
+int8x16_t vqmovnbq(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s32)))
+int16x8_t vqmovnbq_s32(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s32)))
+int16x8_t vqmovnbq(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u16)))
+uint8x16_t vqmovnbq_u16(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u16)))
+uint8x16_t vqmovnbq(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u32)))
+uint16x8_t vqmovnbq_u32(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u32)))
+uint16x8_t vqmovnbq(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s16)))
+int8x16_t vqmovntq_m_s16(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s16)))
+int8x16_t vqmovntq_m(int8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s32)))
+int16x8_t vqmovntq_m_s32(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s32)))
+int16x8_t vqmovntq_m(int16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u16)))
+uint8x16_t vqmovntq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u16)))
+uint8x16_t vqmovntq_m(uint8x16_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u32)))
+uint16x8_t vqmovntq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u32)))
+uint16x8_t vqmovntq_m(uint16x8_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s16)))
+int8x16_t vqmovntq_s16(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s16)))
+int8x16_t vqmovntq(int8x16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s32)))
+int16x8_t vqmovntq_s32(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s32)))
+int16x8_t vqmovntq(int16x8_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u16)))
+uint8x16_t vqmovntq_u16(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u16)))
+uint8x16_t vqmovntq(uint8x16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u32)))
+uint16x8_t vqmovntq_u32(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u32)))
+uint16x8_t vqmovntq(uint16x8_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s16)))
+uint8x16_t vqmovunbq_m_s16(uint8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s16)))
+uint8x16_t vqmovunbq_m(uint8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s32)))
+uint16x8_t vqmovunbq_m_s32(uint16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s32)))
+uint16x8_t vqmovunbq_m(uint16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s16)))
+uint8x16_t vqmovunbq_s16(uint8x16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s16)))
+uint8x16_t vqmovunbq(uint8x16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s32)))
+uint16x8_t vqmovunbq_s32(uint16x8_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s32)))
+uint16x8_t vqmovunbq(uint16x8_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s16)))
+uint8x16_t vqmovuntq_m_s16(uint8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s16)))
+uint8x16_t vqmovuntq_m(uint8x16_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s32)))
+uint16x8_t vqmovuntq_m_s32(uint16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s32)))
+uint16x8_t vqmovuntq_m(uint16x8_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s16)))
+uint8x16_t vqmovuntq_s16(uint8x16_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s16)))
+uint8x16_t vqmovuntq(uint8x16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s32)))
+uint16x8_t vqmovuntq_s32(uint16x8_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s32)))
+uint16x8_t vqmovuntq(uint16x8_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s16)))
+int16x8_t vqnegq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s16)))
+int16x8_t vqnegq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s32)))
+int32x4_t vqnegq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s32)))
+int32x4_t vqnegq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s8)))
+int8x16_t vqnegq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s8)))
+int8x16_t vqnegq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s16)))
+int16x8_t vqnegq_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s16)))
+int16x8_t vqnegq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s32)))
+int32x4_t vqnegq_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s32)))
+int32x4_t vqnegq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s8)))
+int8x16_t vqnegq_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s8)))
+int8x16_t vqnegq(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s16)))
+int16x8_t vqrdmladhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s16)))
+int16x8_t vqrdmladhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s32)))
+int32x4_t vqrdmladhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s32)))
+int32x4_t vqrdmladhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s8)))
+int8x16_t vqrdmladhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s8)))
+int8x16_t vqrdmladhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s16)))
+int16x8_t vqrdmladhq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s16)))
+int16x8_t vqrdmladhq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s32)))
+int32x4_t vqrdmladhq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s32)))
+int32x4_t vqrdmladhq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s8)))
+int8x16_t vqrdmladhq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s8)))
+int8x16_t vqrdmladhq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s16)))
+int16x8_t vqrdmladhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s16)))
+int16x8_t vqrdmladhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s32)))
+int32x4_t vqrdmladhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s32)))
+int32x4_t vqrdmladhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s8)))
+int8x16_t vqrdmladhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s8)))
+int8x16_t vqrdmladhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s16)))
+int16x8_t vqrdmladhxq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s16)))
+int16x8_t vqrdmladhxq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s32)))
+int32x4_t vqrdmladhxq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s32)))
+int32x4_t vqrdmladhxq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s8)))
+int8x16_t vqrdmladhxq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s8)))
+int8x16_t vqrdmladhxq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s16)))
+int16x8_t vqrdmlahq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s16)))
+int16x8_t vqrdmlahq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s32)))
+int32x4_t vqrdmlahq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s32)))
+int32x4_t vqrdmlahq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s8)))
+int8x16_t vqrdmlahq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s8)))
+int8x16_t vqrdmlahq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s16)))
+int16x8_t vqrdmlahq_n_s16(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s16)))
+int16x8_t vqrdmlahq(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s32)))
+int32x4_t vqrdmlahq_n_s32(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s32)))
+int32x4_t vqrdmlahq(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s8)))
+int8x16_t vqrdmlahq_n_s8(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s8)))
+int8x16_t vqrdmlahq(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s16)))
+int16x8_t vqrdmlashq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s16)))
+int16x8_t vqrdmlashq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s32)))
+int32x4_t vqrdmlashq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s32)))
+int32x4_t vqrdmlashq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s8)))
+int8x16_t vqrdmlashq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s8)))
+int8x16_t vqrdmlashq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s16)))
+int16x8_t vqrdmlashq_n_s16(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s16)))
+int16x8_t vqrdmlashq(int16x8_t, int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s32)))
+int32x4_t vqrdmlashq_n_s32(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s32)))
+int32x4_t vqrdmlashq(int32x4_t, int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s8)))
+int8x16_t vqrdmlashq_n_s8(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s8)))
+int8x16_t vqrdmlashq(int8x16_t, int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s16)))
+int16x8_t vqrdmlsdhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s16)))
+int16x8_t vqrdmlsdhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s32)))
+int32x4_t vqrdmlsdhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s32)))
+int32x4_t vqrdmlsdhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s8)))
+int8x16_t vqrdmlsdhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s8)))
+int8x16_t vqrdmlsdhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s16)))
+int16x8_t vqrdmlsdhq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s16)))
+int16x8_t vqrdmlsdhq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s32)))
+int32x4_t vqrdmlsdhq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s32)))
+int32x4_t vqrdmlsdhq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s8)))
+int8x16_t vqrdmlsdhq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s8)))
+int8x16_t vqrdmlsdhq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s16)))
+int16x8_t vqrdmlsdhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s16)))
+int16x8_t vqrdmlsdhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s32)))
+int32x4_t vqrdmlsdhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s32)))
+int32x4_t vqrdmlsdhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s8)))
+int8x16_t vqrdmlsdhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s8)))
+int8x16_t vqrdmlsdhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s16)))
+int16x8_t vqrdmlsdhxq_s16(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s16)))
+int16x8_t vqrdmlsdhxq(int16x8_t, int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s32)))
+int32x4_t vqrdmlsdhxq_s32(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s32)))
+int32x4_t vqrdmlsdhxq(int32x4_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s8)))
+int8x16_t vqrdmlsdhxq_s8(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s8)))
+int8x16_t vqrdmlsdhxq(int8x16_t, int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s16)))
+int16x8_t vqrdmulhq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s16)))
+int16x8_t vqrdmulhq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s32)))
+int32x4_t vqrdmulhq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s32)))
+int32x4_t vqrdmulhq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s8)))
+int8x16_t vqrdmulhq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s8)))
+int8x16_t vqrdmulhq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s16)))
+int16x8_t vqrdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s16)))
+int16x8_t vqrdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s32)))
+int32x4_t vqrdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s32)))
+int32x4_t vqrdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s8)))
+int8x16_t vqrdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s8)))
+int8x16_t vqrdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s16)))
+int16x8_t vqrdmulhq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s16)))
+int16x8_t vqrdmulhq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s32)))
+int32x4_t vqrdmulhq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s32)))
+int32x4_t vqrdmulhq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s8)))
+int8x16_t vqrdmulhq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s8)))
+int8x16_t vqrdmulhq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s16)))
+int16x8_t vqrdmulhq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s16)))
+int16x8_t vqrdmulhq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s32)))
+int32x4_t vqrdmulhq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s32)))
+int32x4_t vqrdmulhq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s8)))
+int8x16_t vqrdmulhq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s8)))
+int8x16_t vqrdmulhq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s16)))
+int16x8_t vqrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s16)))
+int16x8_t vqrshlq_m_n(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s32)))
+int32x4_t vqrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s32)))
+int32x4_t vqrshlq_m_n(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s8)))
+int8x16_t vqrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s8)))
+int8x16_t vqrshlq_m_n(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u16)))
+uint16x8_t vqrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u16)))
+uint16x8_t vqrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u32)))
+uint32x4_t vqrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u32)))
+uint32x4_t vqrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u8)))
+uint8x16_t vqrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u8)))
+uint8x16_t vqrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s16)))
+int16x8_t vqrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s16)))
+int16x8_t vqrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s32)))
+int32x4_t vqrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s32)))
+int32x4_t vqrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s8)))
+int8x16_t vqrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s8)))
+int8x16_t vqrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u16)))
+uint16x8_t vqrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u16)))
+uint16x8_t vqrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u32)))
+uint32x4_t vqrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u32)))
+uint32x4_t vqrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u8)))
+uint8x16_t vqrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u8)))
+uint8x16_t vqrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s16)))
+int16x8_t vqrshlq_n_s16(int16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s16)))
+int16x8_t vqrshlq(int16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s32)))
+int32x4_t vqrshlq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s32)))
+int32x4_t vqrshlq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s8)))
+int8x16_t vqrshlq_n_s8(int8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s8)))
+int8x16_t vqrshlq(int8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u16)))
+uint16x8_t vqrshlq_n_u16(uint16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u16)))
+uint16x8_t vqrshlq(uint16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u32)))
+uint32x4_t vqrshlq_n_u32(uint32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u32)))
+uint32x4_t vqrshlq(uint32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u8)))
+uint8x16_t vqrshlq_n_u8(uint8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u8)))
+uint8x16_t vqrshlq(uint8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s16)))
+int16x8_t vqrshlq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s16)))
+int16x8_t vqrshlq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s32)))
+int32x4_t vqrshlq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s32)))
+int32x4_t vqrshlq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s8)))
+int8x16_t vqrshlq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s8)))
+int8x16_t vqrshlq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u16)))
+uint16x8_t vqrshlq_u16(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u16)))
+uint16x8_t vqrshlq(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u32)))
+uint32x4_t vqrshlq_u32(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u32)))
+uint32x4_t vqrshlq(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u8)))
+uint8x16_t vqrshlq_u8(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u8)))
+uint8x16_t vqrshlq(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16)))
+int8x16_t vqrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16)))
+int8x16_t vqrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32)))
+int16x8_t vqrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32)))
+int16x8_t vqrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16)))
+uint8x16_t vqrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16)))
+uint8x16_t vqrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32)))
+uint16x8_t vqrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32)))
+uint16x8_t vqrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s16)))
+int8x16_t vqrshrnbq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s16)))
+int8x16_t vqrshrnbq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s32)))
+int16x8_t vqrshrnbq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s32)))
+int16x8_t vqrshrnbq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u16)))
+uint8x16_t vqrshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u16)))
+uint8x16_t vqrshrnbq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u32)))
+uint16x8_t vqrshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u32)))
+uint16x8_t vqrshrnbq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s16)))
+int8x16_t vqrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s16)))
+int8x16_t vqrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s32)))
+int16x8_t vqrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s32)))
+int16x8_t vqrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u16)))
+uint8x16_t vqrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u16)))
+uint8x16_t vqrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u32)))
+uint16x8_t vqrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u32)))
+uint16x8_t vqrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s16)))
+int8x16_t vqrshrntq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s16)))
+int8x16_t vqrshrntq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s32)))
+int16x8_t vqrshrntq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s32)))
+int16x8_t vqrshrntq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u16)))
+uint8x16_t vqrshrntq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u16)))
+uint8x16_t vqrshrntq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u32)))
+uint16x8_t vqrshrntq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u32)))
+uint16x8_t vqrshrntq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16)))
+uint8x16_t vqrshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16)))
+uint8x16_t vqrshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32)))
+uint16x8_t vqrshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32)))
+uint16x8_t vqrshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s16)))
+uint8x16_t vqrshrunbq_n_s16(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s16)))
+uint8x16_t vqrshrunbq(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s32)))
+uint16x8_t vqrshrunbq_n_s32(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s32)))
+uint16x8_t vqrshrunbq(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s16)))
+uint8x16_t vqrshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s16)))
+uint8x16_t vqrshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s32)))
+uint16x8_t vqrshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s32)))
+uint16x8_t vqrshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s16)))
+uint8x16_t vqrshruntq_n_s16(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s16)))
+uint8x16_t vqrshruntq(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s32)))
+uint16x8_t vqrshruntq_n_s32(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s32)))
+uint16x8_t vqrshruntq(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s16)))
+int16x8_t vqshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s16)))
+int16x8_t vqshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s32)))
+int32x4_t vqshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s32)))
+int32x4_t vqshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s8)))
+int8x16_t vqshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s8)))
+int8x16_t vqshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u16)))
+uint16x8_t vqshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u16)))
+uint16x8_t vqshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u32)))
+uint32x4_t vqshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u32)))
+uint32x4_t vqshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u8)))
+uint8x16_t vqshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u8)))
+uint8x16_t vqshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s16)))
+int16x8_t vqshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s16)))
+int16x8_t vqshlq_m_r(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s32)))
+int32x4_t vqshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s32)))
+int32x4_t vqshlq_m_r(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s8)))
+int8x16_t vqshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s8)))
+int8x16_t vqshlq_m_r(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u16)))
+uint16x8_t vqshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u16)))
+uint16x8_t vqshlq_m_r(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u32)))
+uint32x4_t vqshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u32)))
+uint32x4_t vqshlq_m_r(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u8)))
+uint8x16_t vqshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u8)))
+uint8x16_t vqshlq_m_r(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s16)))
+int16x8_t vqshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s16)))
+int16x8_t vqshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s32)))
+int32x4_t vqshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s32)))
+int32x4_t vqshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s8)))
+int8x16_t vqshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s8)))
+int8x16_t vqshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u16)))
+uint16x8_t vqshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u16)))
+uint16x8_t vqshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u32)))
+uint32x4_t vqshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u32)))
+uint32x4_t vqshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u8)))
+uint8x16_t vqshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u8)))
+uint8x16_t vqshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s16)))
+int16x8_t vqshlq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s16)))
+int16x8_t vqshlq_n(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s32)))
+int32x4_t vqshlq_n_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s32)))
+int32x4_t vqshlq_n(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s8)))
+int8x16_t vqshlq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s8)))
+int8x16_t vqshlq_n(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u16)))
+uint16x8_t vqshlq_n_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u16)))
+uint16x8_t vqshlq_n(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u32)))
+uint32x4_t vqshlq_n_u32(uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u32)))
+uint32x4_t vqshlq_n(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u8)))
+uint8x16_t vqshlq_n_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u8)))
+uint8x16_t vqshlq_n(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s16)))
+int16x8_t vqshlq_r_s16(int16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s16)))
+int16x8_t vqshlq_r(int16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s32)))
+int32x4_t vqshlq_r_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s32)))
+int32x4_t vqshlq_r(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s8)))
+int8x16_t vqshlq_r_s8(int8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s8)))
+int8x16_t vqshlq_r(int8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u16)))
+uint16x8_t vqshlq_r_u16(uint16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u16)))
+uint16x8_t vqshlq_r(uint16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u32)))
+uint32x4_t vqshlq_r_u32(uint32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u32)))
+uint32x4_t vqshlq_r(uint32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u8)))
+uint8x16_t vqshlq_r_u8(uint8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u8)))
+uint8x16_t vqshlq_r(uint8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s16)))
+int16x8_t vqshlq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s16)))
+int16x8_t vqshlq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s32)))
+int32x4_t vqshlq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s32)))
+int32x4_t vqshlq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s8)))
+int8x16_t vqshlq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s8)))
+int8x16_t vqshlq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u16)))
+uint16x8_t vqshlq_u16(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u16)))
+uint16x8_t vqshlq(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u32)))
+uint32x4_t vqshlq_u32(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u32)))
+uint32x4_t vqshlq(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u8)))
+uint8x16_t vqshlq_u8(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u8)))
+uint8x16_t vqshlq(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s16)))
+uint16x8_t vqshluq_m_n_s16(uint16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s16)))
+uint16x8_t vqshluq_m(uint16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s32)))
+uint32x4_t vqshluq_m_n_s32(uint32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s32)))
+uint32x4_t vqshluq_m(uint32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s8)))
+uint8x16_t vqshluq_m_n_s8(uint8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s8)))
+uint8x16_t vqshluq_m(uint8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s16)))
+uint16x8_t vqshluq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s16)))
+uint16x8_t vqshluq(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s32)))
+uint32x4_t vqshluq_n_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s32)))
+uint32x4_t vqshluq(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s8)))
+uint8x16_t vqshluq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s8)))
+uint8x16_t vqshluq(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s16)))
+int8x16_t vqshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s16)))
+int8x16_t vqshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s32)))
+int16x8_t vqshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s32)))
+int16x8_t vqshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u16)))
+uint8x16_t vqshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u16)))
+uint8x16_t vqshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u32)))
+uint16x8_t vqshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u32)))
+uint16x8_t vqshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s16)))
+int8x16_t vqshrnbq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s16)))
+int8x16_t vqshrnbq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s32)))
+int16x8_t vqshrnbq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s32)))
+int16x8_t vqshrnbq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u16)))
+uint8x16_t vqshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u16)))
+uint8x16_t vqshrnbq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u32)))
+uint16x8_t vqshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u32)))
+uint16x8_t vqshrnbq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s16)))
+int8x16_t vqshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s16)))
+int8x16_t vqshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s32)))
+int16x8_t vqshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s32)))
+int16x8_t vqshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u16)))
+uint8x16_t vqshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u16)))
+uint8x16_t vqshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u32)))
+uint16x8_t vqshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u32)))
+uint16x8_t vqshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s16)))
+int8x16_t vqshrntq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s16)))
+int8x16_t vqshrntq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s32)))
+int16x8_t vqshrntq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s32)))
+int16x8_t vqshrntq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u16)))
+uint8x16_t vqshrntq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u16)))
+uint8x16_t vqshrntq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u32)))
+uint16x8_t vqshrntq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u32)))
+uint16x8_t vqshrntq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s16)))
+uint8x16_t vqshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s16)))
+uint8x16_t vqshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s32)))
+uint16x8_t vqshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s32)))
+uint16x8_t vqshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s16)))
+uint8x16_t vqshrunbq_n_s16(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s16)))
+uint8x16_t vqshrunbq(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s32)))
+uint16x8_t vqshrunbq_n_s32(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s32)))
+uint16x8_t vqshrunbq(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s16)))
+uint8x16_t vqshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s16)))
+uint8x16_t vqshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s32)))
+uint16x8_t vqshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s32)))
+uint16x8_t vqshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s16)))
+uint8x16_t vqshruntq_n_s16(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s16)))
+uint8x16_t vqshruntq(uint8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s32)))
+uint16x8_t vqshruntq_n_s32(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s32)))
+uint16x8_t vqshruntq(uint16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s16)))
+int16x8_t vqsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s16)))
+int16x8_t vqsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s32)))
+int32x4_t vqsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s32)))
+int32x4_t vqsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s8)))
+int8x16_t vqsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s8)))
+int8x16_t vqsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u16)))
+uint16x8_t vqsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u16)))
+uint16x8_t vqsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u32)))
+uint32x4_t vqsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u32)))
+uint32x4_t vqsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u8)))
+uint8x16_t vqsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u8)))
+uint8x16_t vqsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s16)))
+int16x8_t vqsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s16)))
+int16x8_t vqsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s32)))
+int32x4_t vqsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s32)))
+int32x4_t vqsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s8)))
+int8x16_t vqsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s8)))
+int8x16_t vqsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u16)))
+uint16x8_t vqsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u16)))
+uint16x8_t vqsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u32)))
+uint32x4_t vqsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u32)))
+uint32x4_t vqsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u8)))
+uint8x16_t vqsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u8)))
+uint8x16_t vqsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s16)))
+int16x8_t vqsubq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s16)))
+int16x8_t vqsubq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s32)))
+int32x4_t vqsubq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s32)))
+int32x4_t vqsubq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s8)))
+int8x16_t vqsubq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s8)))
+int8x16_t vqsubq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u16)))
+uint16x8_t vqsubq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u16)))
+uint16x8_t vqsubq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u32)))
+uint32x4_t vqsubq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u32)))
+uint32x4_t vqsubq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u8)))
+uint8x16_t vqsubq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u8)))
+uint8x16_t vqsubq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s16)))
+int16x8_t vqsubq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s16)))
+int16x8_t vqsubq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s32)))
+int32x4_t vqsubq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s32)))
+int32x4_t vqsubq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s8)))
+int8x16_t vqsubq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s8)))
+int8x16_t vqsubq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u16)))
+uint16x8_t vqsubq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u16)))
+uint16x8_t vqsubq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u32)))
+uint32x4_t vqsubq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u32)))
+uint32x4_t vqsubq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u8)))
+uint8x16_t vqsubq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u8)))
+uint8x16_t vqsubq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))
+int16x8_t vreinterpretq_s16_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))
+int16x8_t vreinterpretq_s16(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))
+int16x8_t vreinterpretq_s16_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))
+int16x8_t vreinterpretq_s16(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))
+int16x8_t vreinterpretq_s16_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))
+int16x8_t vreinterpretq_s16(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))
+int16x8_t vreinterpretq_s16_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))
+int16x8_t vreinterpretq_s16(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))
+int16x8_t vreinterpretq_s16_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))
+int16x8_t vreinterpretq_s16(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))
+int16x8_t vreinterpretq_s16_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))
+int16x8_t vreinterpretq_s16(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
+int16x8_t vreinterpretq_s16_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
+int16x8_t vreinterpretq_s16(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))
+int32x4_t vreinterpretq_s32_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))
+int32x4_t vreinterpretq_s32(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))
+int32x4_t vreinterpretq_s32_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))
+int32x4_t vreinterpretq_s32(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))
+int32x4_t vreinterpretq_s32_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))
+int32x4_t vreinterpretq_s32(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))
+int32x4_t vreinterpretq_s32_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))
+int32x4_t vreinterpretq_s32(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))
+int32x4_t vreinterpretq_s32_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))
+int32x4_t vreinterpretq_s32(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))
+int32x4_t vreinterpretq_s32_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))
+int32x4_t vreinterpretq_s32(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
+int32x4_t vreinterpretq_s32_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
+int32x4_t vreinterpretq_s32(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))
+int64x2_t vreinterpretq_s64_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))
+int64x2_t vreinterpretq_s64(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))
+int64x2_t vreinterpretq_s64_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))
+int64x2_t vreinterpretq_s64(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))
+int64x2_t vreinterpretq_s64_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))
+int64x2_t vreinterpretq_s64(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))
+int64x2_t vreinterpretq_s64_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))
+int64x2_t vreinterpretq_s64(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))
+int64x2_t vreinterpretq_s64_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))
+int64x2_t vreinterpretq_s64(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))
+int64x2_t vreinterpretq_s64_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))
+int64x2_t vreinterpretq_s64(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
+int64x2_t vreinterpretq_s64_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
+int64x2_t vreinterpretq_s64(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))
+int8x16_t vreinterpretq_s8_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))
+int8x16_t vreinterpretq_s8(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))
+int8x16_t vreinterpretq_s8_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))
+int8x16_t vreinterpretq_s8(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))
+int8x16_t vreinterpretq_s8_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))
+int8x16_t vreinterpretq_s8(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))
+int8x16_t vreinterpretq_s8_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))
+int8x16_t vreinterpretq_s8(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))
+int8x16_t vreinterpretq_s8_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))
+int8x16_t vreinterpretq_s8(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))
+int8x16_t vreinterpretq_s8_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))
+int8x16_t vreinterpretq_s8(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
+int8x16_t vreinterpretq_s8_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
+int8x16_t vreinterpretq_s8(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))
+uint16x8_t vreinterpretq_u16_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))
+uint16x8_t vreinterpretq_u16(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))
+uint16x8_t vreinterpretq_u16_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))
+uint16x8_t vreinterpretq_u16(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))
+uint16x8_t vreinterpretq_u16_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))
+uint16x8_t vreinterpretq_u16(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))
+uint16x8_t vreinterpretq_u16_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))
+uint16x8_t vreinterpretq_u16(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))
+uint16x8_t vreinterpretq_u16_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))
+uint16x8_t vreinterpretq_u16(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))
+uint16x8_t vreinterpretq_u16_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))
+uint16x8_t vreinterpretq_u16(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
+uint16x8_t vreinterpretq_u16_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
+uint16x8_t vreinterpretq_u16(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))
+uint32x4_t vreinterpretq_u32_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))
+uint32x4_t vreinterpretq_u32(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))
+uint32x4_t vreinterpretq_u32_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))
+uint32x4_t vreinterpretq_u32(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))
+uint32x4_t vreinterpretq_u32_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))
+uint32x4_t vreinterpretq_u32(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))
+uint32x4_t vreinterpretq_u32_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))
+uint32x4_t vreinterpretq_u32(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))
+uint32x4_t vreinterpretq_u32_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))
+uint32x4_t vreinterpretq_u32(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))
+uint32x4_t vreinterpretq_u32_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))
+uint32x4_t vreinterpretq_u32(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
+uint32x4_t vreinterpretq_u32_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
+uint32x4_t vreinterpretq_u32(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))
+uint64x2_t vreinterpretq_u64_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))
+uint64x2_t vreinterpretq_u64(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))
+uint64x2_t vreinterpretq_u64_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))
+uint64x2_t vreinterpretq_u64(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))
+uint64x2_t vreinterpretq_u64_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))
+uint64x2_t vreinterpretq_u64(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))
+uint64x2_t vreinterpretq_u64_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))
+uint64x2_t vreinterpretq_u64(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))
+uint64x2_t vreinterpretq_u64_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))
+uint64x2_t vreinterpretq_u64(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))
+uint64x2_t vreinterpretq_u64_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))
+uint64x2_t vreinterpretq_u64(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
+uint64x2_t vreinterpretq_u64_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
+uint64x2_t vreinterpretq_u64(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
+uint8x16_t vreinterpretq_u8_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
+uint8x16_t vreinterpretq_u8(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
+uint8x16_t vreinterpretq_u8_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
+uint8x16_t vreinterpretq_u8(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
+uint8x16_t vreinterpretq_u8_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
+uint8x16_t vreinterpretq_u8(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
+uint8x16_t vreinterpretq_u8_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
+uint8x16_t vreinterpretq_u8(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
+uint8x16_t vreinterpretq_u8_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
+uint8x16_t vreinterpretq_u8(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
+uint8x16_t vreinterpretq_u8_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
+uint8x16_t vreinterpretq_u8(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
+uint8x16_t vreinterpretq_u8_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
+uint8x16_t vreinterpretq_u8(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_s8)))
+int8x16_t vrev16q_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_s8)))
+int8x16_t vrev16q_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_u8)))
+uint8x16_t vrev16q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_u8)))
+uint8x16_t vrev16q_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_s8)))
+int8x16_t vrev16q_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_s8)))
+int8x16_t vrev16q(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_u8)))
+uint8x16_t vrev16q_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_u8)))
+uint8x16_t vrev16q(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_s8)))
+int8x16_t vrev16q_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_s8)))
+int8x16_t vrev16q_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_u8)))
+uint8x16_t vrev16q_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_u8)))
+uint8x16_t vrev16q_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s16)))
+int16x8_t vrev32q_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s16)))
+int16x8_t vrev32q_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s8)))
+int8x16_t vrev32q_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s8)))
+int8x16_t vrev32q_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u16)))
+uint16x8_t vrev32q_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u16)))
+uint16x8_t vrev32q_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u8)))
+uint8x16_t vrev32q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u8)))
+uint8x16_t vrev32q_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s16)))
+int16x8_t vrev32q_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s16)))
+int16x8_t vrev32q(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s8)))
+int8x16_t vrev32q_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s8)))
+int8x16_t vrev32q(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u16)))
+uint16x8_t vrev32q_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u16)))
+uint16x8_t vrev32q(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u8)))
+uint8x16_t vrev32q_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u8)))
+uint8x16_t vrev32q(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s16)))
+int16x8_t vrev32q_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s16)))
+int16x8_t vrev32q_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s8)))
+int8x16_t vrev32q_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s8)))
+int8x16_t vrev32q_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u16)))
+uint16x8_t vrev32q_x_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u16)))
+uint16x8_t vrev32q_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u8)))
+uint8x16_t vrev32q_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u8)))
+uint8x16_t vrev32q_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s16)))
+int16x8_t vrev64q_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s16)))
+int16x8_t vrev64q_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s32)))
+int32x4_t vrev64q_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s32)))
+int32x4_t vrev64q_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s8)))
+int8x16_t vrev64q_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s8)))
+int8x16_t vrev64q_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u16)))
+uint16x8_t vrev64q_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u16)))
+uint16x8_t vrev64q_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u32)))
+uint32x4_t vrev64q_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u32)))
+uint32x4_t vrev64q_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u8)))
+uint8x16_t vrev64q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u8)))
+uint8x16_t vrev64q_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s16)))
+int16x8_t vrev64q_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s16)))
+int16x8_t vrev64q(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s32)))
+int32x4_t vrev64q_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s32)))
+int32x4_t vrev64q(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s8)))
+int8x16_t vrev64q_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s8)))
+int8x16_t vrev64q(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u16)))
+uint16x8_t vrev64q_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u16)))
+uint16x8_t vrev64q(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u32)))
+uint32x4_t vrev64q_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u32)))
+uint32x4_t vrev64q(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u8)))
+uint8x16_t vrev64q_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u8)))
+uint8x16_t vrev64q(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s16)))
+int16x8_t vrev64q_x_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s16)))
+int16x8_t vrev64q_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s32)))
+int32x4_t vrev64q_x_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s32)))
+int32x4_t vrev64q_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s8)))
+int8x16_t vrev64q_x_s8(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s8)))
+int8x16_t vrev64q_x(int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u16)))
+uint16x8_t vrev64q_x_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u16)))
+uint16x8_t vrev64q_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u32)))
+uint32x4_t vrev64q_x_u32(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u32)))
+uint32x4_t vrev64q_x(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u8)))
+uint8x16_t vrev64q_x_u8(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u8)))
+uint8x16_t vrev64q_x(uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s16)))
+int16x8_t vrhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s16)))
+int16x8_t vrhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s32)))
+int32x4_t vrhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s32)))
+int32x4_t vrhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s8)))
+int8x16_t vrhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s8)))
+int8x16_t vrhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u16)))
+uint16x8_t vrhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u16)))
+uint16x8_t vrhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u32)))
+uint32x4_t vrhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u32)))
+uint32x4_t vrhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u8)))
+uint8x16_t vrhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u8)))
+uint8x16_t vrhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s16)))
+int16x8_t vrhaddq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s16)))
+int16x8_t vrhaddq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s32)))
+int32x4_t vrhaddq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s32)))
+int32x4_t vrhaddq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s8)))
+int8x16_t vrhaddq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s8)))
+int8x16_t vrhaddq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u16)))
+uint16x8_t vrhaddq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u16)))
+uint16x8_t vrhaddq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u32)))
+uint32x4_t vrhaddq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u32)))
+uint32x4_t vrhaddq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u8)))
+uint8x16_t vrhaddq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u8)))
+uint8x16_t vrhaddq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s16)))
+int16x8_t vrhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s16)))
+int16x8_t vrhaddq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s32)))
+int32x4_t vrhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s32)))
+int32x4_t vrhaddq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s8)))
+int8x16_t vrhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s8)))
+int8x16_t vrhaddq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u16)))
+uint16x8_t vrhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u16)))
+uint16x8_t vrhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u32)))
+uint32x4_t vrhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u32)))
+uint32x4_t vrhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u8)))
+uint8x16_t vrhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u8)))
+uint8x16_t vrhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32)))
+int64_t vrmlaldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32)))
+int64_t vrmlaldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32)))
+uint64_t vrmlaldavhaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32)))
+uint64_t vrmlaldavhaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_s32)))
+int64_t vrmlaldavhaq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_s32)))
+int64_t vrmlaldavhaq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_u32)))
+uint64_t vrmlaldavhaq_u32(uint64_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_u32)))
+uint64_t vrmlaldavhaq(uint64_t, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32)))
+int64_t vrmlaldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32)))
+int64_t vrmlaldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_s32)))
+int64_t vrmlaldavhaxq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_s32)))
+int64_t vrmlaldavhaxq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_s32)))
+int64_t vrmlaldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_s32)))
+int64_t vrmlaldavhq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_u32)))
+uint64_t vrmlaldavhq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_u32)))
+uint64_t vrmlaldavhq_p(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_s32)))
+int64_t vrmlaldavhq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_s32)))
+int64_t vrmlaldavhq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_u32)))
+uint64_t vrmlaldavhq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_u32)))
+uint64_t vrmlaldavhq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32)))
+int64_t vrmlaldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32)))
+int64_t vrmlaldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_s32)))
+int64_t vrmlaldavhxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_s32)))
+int64_t vrmlaldavhxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32)))
+int64_t vrmlsldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32)))
+int64_t vrmlsldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_s32)))
+int64_t vrmlsldavhaq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_s32)))
+int64_t vrmlsldavhaq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32)))
+int64_t vrmlsldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32)))
+int64_t vrmlsldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_s32)))
+int64_t vrmlsldavhaxq_s32(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_s32)))
+int64_t vrmlsldavhaxq(int64_t, int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_p_s32)))
+int64_t vrmlsldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_p_s32)))
+int64_t vrmlsldavhq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_s32)))
+int64_t vrmlsldavhq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_s32)))
+int64_t vrmlsldavhq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32)))
+int64_t vrmlsldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32)))
+int64_t vrmlsldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_s32)))
+int64_t vrmlsldavhxq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_s32)))
+int64_t vrmlsldavhxq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s16)))
+int16x8_t vrmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s16)))
+int16x8_t vrmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s32)))
+int32x4_t vrmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s32)))
+int32x4_t vrmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s8)))
+int8x16_t vrmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s8)))
+int8x16_t vrmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u16)))
+uint16x8_t vrmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u16)))
+uint16x8_t vrmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u32)))
+uint32x4_t vrmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u32)))
+uint32x4_t vrmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u8)))
+uint8x16_t vrmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u8)))
+uint8x16_t vrmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s16)))
+int16x8_t vrmulhq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s16)))
+int16x8_t vrmulhq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s32)))
+int32x4_t vrmulhq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s32)))
+int32x4_t vrmulhq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s8)))
+int8x16_t vrmulhq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s8)))
+int8x16_t vrmulhq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u16)))
+uint16x8_t vrmulhq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u16)))
+uint16x8_t vrmulhq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u32)))
+uint32x4_t vrmulhq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u32)))
+uint32x4_t vrmulhq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u8)))
+uint8x16_t vrmulhq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u8)))
+uint8x16_t vrmulhq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s16)))
+int16x8_t vrmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s16)))
+int16x8_t vrmulhq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s32)))
+int32x4_t vrmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s32)))
+int32x4_t vrmulhq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s8)))
+int8x16_t vrmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s8)))
+int8x16_t vrmulhq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u16)))
+uint16x8_t vrmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u16)))
+uint16x8_t vrmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u32)))
+uint32x4_t vrmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u32)))
+uint32x4_t vrmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u8)))
+uint8x16_t vrmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u8)))
+uint8x16_t vrmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s16)))
+int16x8_t vrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s16)))
+int16x8_t vrshlq_m_n(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s32)))
+int32x4_t vrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s32)))
+int32x4_t vrshlq_m_n(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s8)))
+int8x16_t vrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s8)))
+int8x16_t vrshlq_m_n(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u16)))
+uint16x8_t vrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u16)))
+uint16x8_t vrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u32)))
+uint32x4_t vrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u32)))
+uint32x4_t vrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u8)))
+uint8x16_t vrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u8)))
+uint8x16_t vrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s16)))
+int16x8_t vrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s16)))
+int16x8_t vrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s32)))
+int32x4_t vrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s32)))
+int32x4_t vrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s8)))
+int8x16_t vrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s8)))
+int8x16_t vrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u16)))
+uint16x8_t vrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u16)))
+uint16x8_t vrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u32)))
+uint32x4_t vrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u32)))
+uint32x4_t vrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u8)))
+uint8x16_t vrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u8)))
+uint8x16_t vrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s16)))
+int16x8_t vrshlq_n_s16(int16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s16)))
+int16x8_t vrshlq(int16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s32)))
+int32x4_t vrshlq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s32)))
+int32x4_t vrshlq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s8)))
+int8x16_t vrshlq_n_s8(int8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s8)))
+int8x16_t vrshlq(int8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u16)))
+uint16x8_t vrshlq_n_u16(uint16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u16)))
+uint16x8_t vrshlq(uint16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u32)))
+uint32x4_t vrshlq_n_u32(uint32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u32)))
+uint32x4_t vrshlq(uint32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u8)))
+uint8x16_t vrshlq_n_u8(uint8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u8)))
+uint8x16_t vrshlq(uint8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s16)))
+int16x8_t vrshlq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s16)))
+int16x8_t vrshlq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s32)))
+int32x4_t vrshlq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s32)))
+int32x4_t vrshlq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s8)))
+int8x16_t vrshlq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s8)))
+int8x16_t vrshlq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u16)))
+uint16x8_t vrshlq_u16(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u16)))
+uint16x8_t vrshlq(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u32)))
+uint32x4_t vrshlq_u32(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u32)))
+uint32x4_t vrshlq(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u8)))
+uint8x16_t vrshlq_u8(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u8)))
+uint8x16_t vrshlq(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s16)))
+int16x8_t vrshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s16)))
+int16x8_t vrshlq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s32)))
+int32x4_t vrshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s32)))
+int32x4_t vrshlq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s8)))
+int8x16_t vrshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s8)))
+int8x16_t vrshlq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u16)))
+uint16x8_t vrshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u16)))
+uint16x8_t vrshlq_x(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u32)))
+uint32x4_t vrshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u32)))
+uint32x4_t vrshlq_x(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u8)))
+uint8x16_t vrshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u8)))
+uint8x16_t vrshlq_x(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s16)))
+int8x16_t vrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s16)))
+int8x16_t vrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s32)))
+int16x8_t vrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s32)))
+int16x8_t vrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u16)))
+uint8x16_t vrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u16)))
+uint8x16_t vrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u32)))
+uint16x8_t vrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u32)))
+uint16x8_t vrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s16)))
+int8x16_t vrshrnbq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s16)))
+int8x16_t vrshrnbq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s32)))
+int16x8_t vrshrnbq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s32)))
+int16x8_t vrshrnbq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u16)))
+uint8x16_t vrshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u16)))
+uint8x16_t vrshrnbq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u32)))
+uint16x8_t vrshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u32)))
+uint16x8_t vrshrnbq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s16)))
+int8x16_t vrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s16)))
+int8x16_t vrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s32)))
+int16x8_t vrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s32)))
+int16x8_t vrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u16)))
+uint8x16_t vrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u16)))
+uint8x16_t vrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u32)))
+uint16x8_t vrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u32)))
+uint16x8_t vrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s16)))
+int8x16_t vrshrntq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s16)))
+int8x16_t vrshrntq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s32)))
+int16x8_t vrshrntq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s32)))
+int16x8_t vrshrntq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u16)))
+uint8x16_t vrshrntq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u16)))
+uint8x16_t vrshrntq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u32)))
+uint16x8_t vrshrntq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u32)))
+uint16x8_t vrshrntq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s16)))
+int16x8_t vrshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s16)))
+int16x8_t vrshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s32)))
+int32x4_t vrshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s32)))
+int32x4_t vrshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s8)))
+int8x16_t vrshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s8)))
+int8x16_t vrshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u16)))
+uint16x8_t vrshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u16)))
+uint16x8_t vrshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u32)))
+uint32x4_t vrshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u32)))
+uint32x4_t vrshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u8)))
+uint8x16_t vrshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u8)))
+uint8x16_t vrshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s16)))
+int16x8_t vrshrq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s16)))
+int16x8_t vrshrq(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s32)))
+int32x4_t vrshrq_n_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s32)))
+int32x4_t vrshrq(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s8)))
+int8x16_t vrshrq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s8)))
+int8x16_t vrshrq(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u16)))
+uint16x8_t vrshrq_n_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u16)))
+uint16x8_t vrshrq(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u32)))
+uint32x4_t vrshrq_n_u32(uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u32)))
+uint32x4_t vrshrq(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u8)))
+uint8x16_t vrshrq_n_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u8)))
+uint8x16_t vrshrq(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s16)))
+int16x8_t vrshrq_x_n_s16(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s16)))
+int16x8_t vrshrq_x(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s32)))
+int32x4_t vrshrq_x_n_s32(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s32)))
+int32x4_t vrshrq_x(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s8)))
+int8x16_t vrshrq_x_n_s8(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s8)))
+int8x16_t vrshrq_x(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u16)))
+uint16x8_t vrshrq_x_n_u16(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u16)))
+uint16x8_t vrshrq_x(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u32)))
+uint32x4_t vrshrq_x_n_u32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u32)))
+uint32x4_t vrshrq_x(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u8)))
+uint8x16_t vrshrq_x_n_u8(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u8)))
+uint8x16_t vrshrq_x(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_s32)))
+int32x4_t vsbciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_s32)))
+int32x4_t vsbciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_u32)))
+uint32x4_t vsbciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_u32)))
+uint32x4_t vsbciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_s32)))
+int32x4_t vsbciq_s32(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_s32)))
+int32x4_t vsbciq(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_u32)))
+uint32x4_t vsbciq_u32(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_u32)))
+uint32x4_t vsbciq(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_s32)))
+int32x4_t vsbcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_s32)))
+int32x4_t vsbcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_u32)))
+uint32x4_t vsbcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_u32)))
+uint32x4_t vsbcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_s32)))
+int32x4_t vsbcq_s32(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_s32)))
+int32x4_t vsbcq(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_u32)))
+uint32x4_t vsbcq_u32(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_u32)))
+uint32x4_t vsbcq(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s16)))
+int16x8_t vsetq_lane_s16(int16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s16)))
+int16x8_t vsetq_lane(int16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s32)))
+int32x4_t vsetq_lane_s32(int32_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s32)))
+int32x4_t vsetq_lane(int32_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s64)))
+int64x2_t vsetq_lane_s64(int64_t, int64x2_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s64)))
+int64x2_t vsetq_lane(int64_t, int64x2_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s8)))
+int8x16_t vsetq_lane_s8(int8_t, int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s8)))
+int8x16_t vsetq_lane(int8_t, int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u16)))
+uint16x8_t vsetq_lane_u16(uint16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u16)))
+uint16x8_t vsetq_lane(uint16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u32)))
+uint32x4_t vsetq_lane_u32(uint32_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u32)))
+uint32x4_t vsetq_lane(uint32_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u64)))
+uint64x2_t vsetq_lane_u64(uint64_t, uint64x2_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u64)))
+uint64x2_t vsetq_lane(uint64_t, uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u8)))
+uint8x16_t vsetq_lane_u8(uint8_t, uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u8)))
+uint8x16_t vsetq_lane(uint8_t, uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s16)))
+int16x8_t vshlcq_m_s16(int16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s16)))
+int16x8_t vshlcq_m(int16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s32)))
+int32x4_t vshlcq_m_s32(int32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s32)))
+int32x4_t vshlcq_m(int32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s8)))
+int8x16_t vshlcq_m_s8(int8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s8)))
+int8x16_t vshlcq_m(int8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u16)))
+uint16x8_t vshlcq_m_u16(uint16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u16)))
+uint16x8_t vshlcq_m(uint16x8_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u32)))
+uint32x4_t vshlcq_m_u32(uint32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u32)))
+uint32x4_t vshlcq_m(uint32x4_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u8)))
+uint8x16_t vshlcq_m_u8(uint8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u8)))
+uint8x16_t vshlcq_m(uint8x16_t, uint32_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s16)))
+int16x8_t vshlcq_s16(int16x8_t, uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s16)))
+int16x8_t vshlcq(int16x8_t, uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s32)))
+int32x4_t vshlcq_s32(int32x4_t, uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s32)))
+int32x4_t vshlcq(int32x4_t, uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s8)))
+int8x16_t vshlcq_s8(int8x16_t, uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s8)))
+int8x16_t vshlcq(int8x16_t, uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u16)))
+uint16x8_t vshlcq_u16(uint16x8_t, uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u16)))
+uint16x8_t vshlcq(uint16x8_t, uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u32)))
+uint32x4_t vshlcq_u32(uint32x4_t, uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u32)))
+uint32x4_t vshlcq(uint32x4_t, uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u8)))
+uint8x16_t vshlcq_u8(uint8x16_t, uint32_t *, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u8)))
+uint8x16_t vshlcq(uint8x16_t, uint32_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s16)))
+int32x4_t vshllbq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s16)))
+int32x4_t vshllbq_m(int32x4_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s8)))
+int16x8_t vshllbq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s8)))
+int16x8_t vshllbq_m(int16x8_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u16)))
+uint32x4_t vshllbq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u16)))
+uint32x4_t vshllbq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u8)))
+uint16x8_t vshllbq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u8)))
+uint16x8_t vshllbq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s16)))
+int32x4_t vshllbq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s16)))
+int32x4_t vshllbq(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s8)))
+int16x8_t vshllbq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s8)))
+int16x8_t vshllbq(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u16)))
+uint32x4_t vshllbq_n_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u16)))
+uint32x4_t vshllbq(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u8)))
+uint16x8_t vshllbq_n_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u8)))
+uint16x8_t vshllbq(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s16)))
+int32x4_t vshllbq_x_n_s16(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s16)))
+int32x4_t vshllbq_x(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s8)))
+int16x8_t vshllbq_x_n_s8(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s8)))
+int16x8_t vshllbq_x(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u16)))
+uint32x4_t vshllbq_x_n_u16(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u16)))
+uint32x4_t vshllbq_x(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u8)))
+uint16x8_t vshllbq_x_n_u8(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u8)))
+uint16x8_t vshllbq_x(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s16)))
+int32x4_t vshlltq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s16)))
+int32x4_t vshlltq_m(int32x4_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s8)))
+int16x8_t vshlltq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s8)))
+int16x8_t vshlltq_m(int16x8_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u16)))
+uint32x4_t vshlltq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u16)))
+uint32x4_t vshlltq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u8)))
+uint16x8_t vshlltq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u8)))
+uint16x8_t vshlltq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s16)))
+int32x4_t vshlltq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s16)))
+int32x4_t vshlltq(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s8)))
+int16x8_t vshlltq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s8)))
+int16x8_t vshlltq(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u16)))
+uint32x4_t vshlltq_n_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u16)))
+uint32x4_t vshlltq(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u8)))
+uint16x8_t vshlltq_n_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u8)))
+uint16x8_t vshlltq(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s16)))
+int32x4_t vshlltq_x_n_s16(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s16)))
+int32x4_t vshlltq_x(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s8)))
+int16x8_t vshlltq_x_n_s8(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s8)))
+int16x8_t vshlltq_x(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u16)))
+uint32x4_t vshlltq_x_n_u16(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u16)))
+uint32x4_t vshlltq_x(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u8)))
+uint16x8_t vshlltq_x_n_u8(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u8)))
+uint16x8_t vshlltq_x(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s16)))
+int16x8_t vshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s16)))
+int16x8_t vshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s32)))
+int32x4_t vshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s32)))
+int32x4_t vshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s8)))
+int8x16_t vshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s8)))
+int8x16_t vshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u16)))
+uint16x8_t vshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u16)))
+uint16x8_t vshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u32)))
+uint32x4_t vshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u32)))
+uint32x4_t vshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u8)))
+uint8x16_t vshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u8)))
+uint8x16_t vshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s16)))
+int16x8_t vshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s16)))
+int16x8_t vshlq_m_r(int16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s32)))
+int32x4_t vshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s32)))
+int32x4_t vshlq_m_r(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s8)))
+int8x16_t vshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s8)))
+int8x16_t vshlq_m_r(int8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u16)))
+uint16x8_t vshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u16)))
+uint16x8_t vshlq_m_r(uint16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u32)))
+uint32x4_t vshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u32)))
+uint32x4_t vshlq_m_r(uint32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u8)))
+uint8x16_t vshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u8)))
+uint8x16_t vshlq_m_r(uint8x16_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s16)))
+int16x8_t vshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s16)))
+int16x8_t vshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s32)))
+int32x4_t vshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s32)))
+int32x4_t vshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s8)))
+int8x16_t vshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s8)))
+int8x16_t vshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u16)))
+uint16x8_t vshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u16)))
+uint16x8_t vshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u32)))
+uint32x4_t vshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u32)))
+uint32x4_t vshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u8)))
+uint8x16_t vshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u8)))
+uint8x16_t vshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s16)))
+int16x8_t vshlq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s16)))
+int16x8_t vshlq_n(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s32)))
+int32x4_t vshlq_n_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s32)))
+int32x4_t vshlq_n(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s8)))
+int8x16_t vshlq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s8)))
+int8x16_t vshlq_n(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u16)))
+uint16x8_t vshlq_n_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u16)))
+uint16x8_t vshlq_n(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u32)))
+uint32x4_t vshlq_n_u32(uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u32)))
+uint32x4_t vshlq_n(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u8)))
+uint8x16_t vshlq_n_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u8)))
+uint8x16_t vshlq_n(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s16)))
+int16x8_t vshlq_r_s16(int16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s16)))
+int16x8_t vshlq_r(int16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s32)))
+int32x4_t vshlq_r_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s32)))
+int32x4_t vshlq_r(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s8)))
+int8x16_t vshlq_r_s8(int8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s8)))
+int8x16_t vshlq_r(int8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u16)))
+uint16x8_t vshlq_r_u16(uint16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u16)))
+uint16x8_t vshlq_r(uint16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u32)))
+uint32x4_t vshlq_r_u32(uint32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u32)))
+uint32x4_t vshlq_r(uint32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u8)))
+uint8x16_t vshlq_r_u8(uint8x16_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u8)))
+uint8x16_t vshlq_r(uint8x16_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s16)))
+int16x8_t vshlq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s16)))
+int16x8_t vshlq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s32)))
+int32x4_t vshlq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s32)))
+int32x4_t vshlq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s8)))
+int8x16_t vshlq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s8)))
+int8x16_t vshlq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u16)))
+uint16x8_t vshlq_u16(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u16)))
+uint16x8_t vshlq(uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u32)))
+uint32x4_t vshlq_u32(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u32)))
+uint32x4_t vshlq(uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u8)))
+uint8x16_t vshlq_u8(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u8)))
+uint8x16_t vshlq(uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s16)))
+int16x8_t vshlq_x_n_s16(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s16)))
+int16x8_t vshlq_x_n(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s32)))
+int32x4_t vshlq_x_n_s32(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s32)))
+int32x4_t vshlq_x_n(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s8)))
+int8x16_t vshlq_x_n_s8(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s8)))
+int8x16_t vshlq_x_n(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u16)))
+uint16x8_t vshlq_x_n_u16(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u16)))
+uint16x8_t vshlq_x_n(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u32)))
+uint32x4_t vshlq_x_n_u32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u32)))
+uint32x4_t vshlq_x_n(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u8)))
+uint8x16_t vshlq_x_n_u8(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u8)))
+uint8x16_t vshlq_x_n(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s16)))
+int16x8_t vshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s16)))
+int16x8_t vshlq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s32)))
+int32x4_t vshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s32)))
+int32x4_t vshlq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s8)))
+int8x16_t vshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s8)))
+int8x16_t vshlq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u16)))
+uint16x8_t vshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u16)))
+uint16x8_t vshlq_x(uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u32)))
+uint32x4_t vshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u32)))
+uint32x4_t vshlq_x(uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u8)))
+uint8x16_t vshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u8)))
+uint8x16_t vshlq_x(uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s16)))
+int8x16_t vshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s16)))
+int8x16_t vshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s32)))
+int16x8_t vshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s32)))
+int16x8_t vshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u16)))
+uint8x16_t vshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u16)))
+uint8x16_t vshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u32)))
+uint16x8_t vshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u32)))
+uint16x8_t vshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s16)))
+int8x16_t vshrnbq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s16)))
+int8x16_t vshrnbq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s32)))
+int16x8_t vshrnbq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s32)))
+int16x8_t vshrnbq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u16)))
+uint8x16_t vshrnbq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u16)))
+uint8x16_t vshrnbq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u32)))
+uint16x8_t vshrnbq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u32)))
+uint16x8_t vshrnbq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s16)))
+int8x16_t vshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s16)))
+int8x16_t vshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s32)))
+int16x8_t vshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s32)))
+int16x8_t vshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u16)))
+uint8x16_t vshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u16)))
+uint8x16_t vshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u32)))
+uint16x8_t vshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u32)))
+uint16x8_t vshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s16)))
+int8x16_t vshrntq_n_s16(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s16)))
+int8x16_t vshrntq(int8x16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s32)))
+int16x8_t vshrntq_n_s32(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s32)))
+int16x8_t vshrntq(int16x8_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u16)))
+uint8x16_t vshrntq_n_u16(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u16)))
+uint8x16_t vshrntq(uint8x16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u32)))
+uint16x8_t vshrntq_n_u32(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u32)))
+uint16x8_t vshrntq(uint16x8_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s16)))
+int16x8_t vshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s16)))
+int16x8_t vshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s32)))
+int32x4_t vshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s32)))
+int32x4_t vshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s8)))
+int8x16_t vshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s8)))
+int8x16_t vshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u16)))
+uint16x8_t vshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u16)))
+uint16x8_t vshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u32)))
+uint32x4_t vshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u32)))
+uint32x4_t vshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u8)))
+uint8x16_t vshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u8)))
+uint8x16_t vshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s16)))
+int16x8_t vshrq_n_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s16)))
+int16x8_t vshrq(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s32)))
+int32x4_t vshrq_n_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s32)))
+int32x4_t vshrq(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s8)))
+int8x16_t vshrq_n_s8(int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s8)))
+int8x16_t vshrq(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u16)))
+uint16x8_t vshrq_n_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u16)))
+uint16x8_t vshrq(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u32)))
+uint32x4_t vshrq_n_u32(uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u32)))
+uint32x4_t vshrq(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u8)))
+uint8x16_t vshrq_n_u8(uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u8)))
+uint8x16_t vshrq(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s16)))
+int16x8_t vshrq_x_n_s16(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s16)))
+int16x8_t vshrq_x(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s32)))
+int32x4_t vshrq_x_n_s32(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s32)))
+int32x4_t vshrq_x(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s8)))
+int8x16_t vshrq_x_n_s8(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s8)))
+int8x16_t vshrq_x(int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u16)))
+uint16x8_t vshrq_x_n_u16(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u16)))
+uint16x8_t vshrq_x(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u32)))
+uint32x4_t vshrq_x_n_u32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u32)))
+uint32x4_t vshrq_x(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u8)))
+uint8x16_t vshrq_x_n_u8(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u8)))
+uint8x16_t vshrq_x(uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s16)))
+int16x8_t vsliq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s16)))
+int16x8_t vsliq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s32)))
+int32x4_t vsliq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s32)))
+int32x4_t vsliq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s8)))
+int8x16_t vsliq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s8)))
+int8x16_t vsliq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u16)))
+uint16x8_t vsliq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u16)))
+uint16x8_t vsliq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u32)))
+uint32x4_t vsliq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u32)))
+uint32x4_t vsliq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u8)))
+uint8x16_t vsliq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u8)))
+uint8x16_t vsliq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s16)))
+int16x8_t vsliq_n_s16(int16x8_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s16)))
+int16x8_t vsliq(int16x8_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s32)))
+int32x4_t vsliq_n_s32(int32x4_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s32)))
+int32x4_t vsliq(int32x4_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s8)))
+int8x16_t vsliq_n_s8(int8x16_t, int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s8)))
+int8x16_t vsliq(int8x16_t, int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u16)))
+uint16x8_t vsliq_n_u16(uint16x8_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u16)))
+uint16x8_t vsliq(uint16x8_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u32)))
+uint32x4_t vsliq_n_u32(uint32x4_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u32)))
+uint32x4_t vsliq(uint32x4_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u8)))
+uint8x16_t vsliq_n_u8(uint8x16_t, uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u8)))
+uint8x16_t vsliq(uint8x16_t, uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s16)))
+int16x8_t vsriq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s16)))
+int16x8_t vsriq_m(int16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s32)))
+int32x4_t vsriq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s32)))
+int32x4_t vsriq_m(int32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s8)))
+int8x16_t vsriq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s8)))
+int8x16_t vsriq_m(int8x16_t, int8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u16)))
+uint16x8_t vsriq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u16)))
+uint16x8_t vsriq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u32)))
+uint32x4_t vsriq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u32)))
+uint32x4_t vsriq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u8)))
+uint8x16_t vsriq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u8)))
+uint8x16_t vsriq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s16)))
+int16x8_t vsriq_n_s16(int16x8_t, int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s16)))
+int16x8_t vsriq(int16x8_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s32)))
+int32x4_t vsriq_n_s32(int32x4_t, int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s32)))
+int32x4_t vsriq(int32x4_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s8)))
+int8x16_t vsriq_n_s8(int8x16_t, int8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s8)))
+int8x16_t vsriq(int8x16_t, int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u16)))
+uint16x8_t vsriq_n_u16(uint16x8_t, uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u16)))
+uint16x8_t vsriq(uint16x8_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u32)))
+uint32x4_t vsriq_n_u32(uint32x4_t, uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u32)))
+uint32x4_t vsriq(uint32x4_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u8)))
+uint8x16_t vsriq_n_u8(uint8x16_t, uint8x16_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u8)))
+uint8x16_t vsriq(uint8x16_t, uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s16)))
+void vst1q_p_s16(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s16)))
+void vst1q_p(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s32)))
+void vst1q_p_s32(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s32)))
+void vst1q_p(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s8)))
+void vst1q_p_s8(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s8)))
+void vst1q_p(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u16)))
+void vst1q_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u16)))
+void vst1q_p(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u32)))
+void vst1q_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u32)))
+void vst1q_p(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u8)))
+void vst1q_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u8)))
+void vst1q_p(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s16)))
+void vst1q_s16(int16_t *, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s16)))
+void vst1q(int16_t *, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s32)))
+void vst1q_s32(int32_t *, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s32)))
+void vst1q(int32_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s8)))
+void vst1q_s8(int8_t *, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s8)))
+void vst1q(int8_t *, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u16)))
+void vst1q_u16(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u16)))
+void vst1q(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u32)))
+void vst1q_u32(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u32)))
+void vst1q(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u8)))
+void vst1q_u8(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u8)))
+void vst1q(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s16)))
+void vst2q_s16(int16_t *, int16x8x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s16)))
+void vst2q(int16_t *, int16x8x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s32)))
+void vst2q_s32(int32_t *, int32x4x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s32)))
+void vst2q(int32_t *, int32x4x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s8)))
+void vst2q_s8(int8_t *, int8x16x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s8)))
+void vst2q(int8_t *, int8x16x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u16)))
+void vst2q_u16(uint16_t *, uint16x8x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u16)))
+void vst2q(uint16_t *, uint16x8x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u32)))
+void vst2q_u32(uint32_t *, uint32x4x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u32)))
+void vst2q(uint32_t *, uint32x4x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u8)))
+void vst2q_u8(uint8_t *, uint8x16x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u8)))
+void vst2q(uint8_t *, uint8x16x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s16)))
+void vst4q_s16(int16_t *, int16x8x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s16)))
+void vst4q(int16_t *, int16x8x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s32)))
+void vst4q_s32(int32_t *, int32x4x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s32)))
+void vst4q(int32_t *, int32x4x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s8)))
+void vst4q_s8(int8_t *, int8x16x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s8)))
+void vst4q(int8_t *, int8x16x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u16)))
+void vst4q_u16(uint16_t *, uint16x8x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u16)))
+void vst4q(uint16_t *, uint16x8x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u32)))
+void vst4q_u32(uint32_t *, uint32x4x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u32)))
+void vst4q(uint32_t *, uint32x4x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u8)))
+void vst4q_u8(uint8_t *, uint8x16x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u8)))
+void vst4q(uint8_t *, uint8x16x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s16)))
+void vstrbq_p_s16(int8_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s16)))
+void vstrbq_p(int8_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s32)))
+void vstrbq_p_s32(int8_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s32)))
+void vstrbq_p(int8_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s8)))
+void vstrbq_p_s8(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s8)))
+void vstrbq_p(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u16)))
+void vstrbq_p_u16(uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u16)))
+void vstrbq_p(uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u32)))
+void vstrbq_p_u32(uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u32)))
+void vstrbq_p(uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u8)))
+void vstrbq_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u8)))
+void vstrbq_p(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s16)))
+void vstrbq_s16(int8_t *, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s16)))
+void vstrbq(int8_t *, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s32)))
+void vstrbq_s32(int8_t *, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s32)))
+void vstrbq(int8_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s8)))
+void vstrbq_s8(int8_t *, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s8)))
+void vstrbq(int8_t *, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))
+void vstrbq_scatter_offset_p_s16(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))
+void vstrbq_scatter_offset_p(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))
+void vstrbq_scatter_offset_p_s32(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))
+void vstrbq_scatter_offset_p(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))
+void vstrbq_scatter_offset_p_s8(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))
+void vstrbq_scatter_offset_p(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))
+void vstrbq_scatter_offset_p_u16(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))
+void vstrbq_scatter_offset_p(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))
+void vstrbq_scatter_offset_p_u32(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))
+void vstrbq_scatter_offset_p(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))
+void vstrbq_scatter_offset_p_u8(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))
+void vstrbq_scatter_offset_p(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))
+void vstrbq_scatter_offset_s16(int8_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))
+void vstrbq_scatter_offset(int8_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))
+void vstrbq_scatter_offset_s32(int8_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))
+void vstrbq_scatter_offset(int8_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))
+void vstrbq_scatter_offset_s8(int8_t *, uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))
+void vstrbq_scatter_offset(int8_t *, uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))
+void vstrbq_scatter_offset_u16(uint8_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))
+void vstrbq_scatter_offset(uint8_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))
+void vstrbq_scatter_offset_u32(uint8_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))
+void vstrbq_scatter_offset(uint8_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))
+void vstrbq_scatter_offset_u8(uint8_t *, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))
+void vstrbq_scatter_offset(uint8_t *, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u16)))
+void vstrbq_u16(uint8_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u16)))
+void vstrbq(uint8_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u32)))
+void vstrbq_u32(uint8_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u32)))
+void vstrbq(uint8_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u8)))
+void vstrbq_u8(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u8)))
+void vstrbq(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))
+void vstrdq_scatter_base_p_s64(uint64x2_t, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))
+void vstrdq_scatter_base_p(uint64x2_t, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))
+void vstrdq_scatter_base_p_u64(uint64x2_t, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))
+void vstrdq_scatter_base_p(uint64x2_t, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))
+void vstrdq_scatter_base_s64(uint64x2_t, int, int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))
+void vstrdq_scatter_base(uint64x2_t, int, int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))
+void vstrdq_scatter_base_u64(uint64x2_t, int, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))
+void vstrdq_scatter_base(uint64x2_t, int, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))
+void vstrdq_scatter_base_wb_p_s64(uint64x2_t *, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))
+void vstrdq_scatter_base_wb_p(uint64x2_t *, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))
+void vstrdq_scatter_base_wb_p_u64(uint64x2_t *, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))
+void vstrdq_scatter_base_wb_p(uint64x2_t *, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))
+void vstrdq_scatter_base_wb_s64(uint64x2_t *, int, int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))
+void vstrdq_scatter_base_wb(uint64x2_t *, int, int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))
+void vstrdq_scatter_base_wb_u64(uint64x2_t *, int, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))
+void vstrdq_scatter_base_wb(uint64x2_t *, int, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))
+void vstrdq_scatter_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))
+void vstrdq_scatter_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))
+void vstrdq_scatter_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))
+void vstrdq_scatter_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))
+void vstrdq_scatter_offset_s64(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))
+void vstrdq_scatter_offset(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))
+void vstrdq_scatter_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))
+void vstrdq_scatter_offset(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))
+void vstrdq_scatter_shifted_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))
+void vstrdq_scatter_shifted_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))
+void vstrdq_scatter_shifted_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))
+void vstrdq_scatter_shifted_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))
+void vstrdq_scatter_shifted_offset_s64(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))
+void vstrdq_scatter_shifted_offset(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))
+void vstrdq_scatter_shifted_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))
+void vstrdq_scatter_shifted_offset(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s16)))
+void vstrhq_p_s16(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s16)))
+void vstrhq_p(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s32)))
+void vstrhq_p_s32(int16_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s32)))
+void vstrhq_p(int16_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u16)))
+void vstrhq_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u16)))
+void vstrhq_p(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u32)))
+void vstrhq_p_u32(uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u32)))
+void vstrhq_p(uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s16)))
+void vstrhq_s16(int16_t *, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s16)))
+void vstrhq(int16_t *, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s32)))
+void vstrhq_s32(int16_t *, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s32)))
+void vstrhq(int16_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))
+void vstrhq_scatter_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))
+void vstrhq_scatter_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))
+void vstrhq_scatter_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))
+void vstrhq_scatter_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))
+void vstrhq_scatter_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))
+void vstrhq_scatter_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))
+void vstrhq_scatter_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))
+void vstrhq_scatter_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))
+void vstrhq_scatter_offset_s16(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))
+void vstrhq_scatter_offset(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))
+void vstrhq_scatter_offset_s32(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))
+void vstrhq_scatter_offset(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))
+void vstrhq_scatter_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))
+void vstrhq_scatter_offset(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))
+void vstrhq_scatter_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))
+void vstrhq_scatter_offset(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))
+void vstrhq_scatter_shifted_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))
+void vstrhq_scatter_shifted_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))
+void vstrhq_scatter_shifted_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))
+void vstrhq_scatter_shifted_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))
+void vstrhq_scatter_shifted_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))
+void vstrhq_scatter_shifted_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))
+void vstrhq_scatter_shifted_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))
+void vstrhq_scatter_shifted_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))
+void vstrhq_scatter_shifted_offset_s16(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))
+void vstrhq_scatter_shifted_offset(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))
+void vstrhq_scatter_shifted_offset_s32(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))
+void vstrhq_scatter_shifted_offset(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))
+void vstrhq_scatter_shifted_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))
+void vstrhq_scatter_shifted_offset(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))
+void vstrhq_scatter_shifted_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))
+void vstrhq_scatter_shifted_offset(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u16)))
+void vstrhq_u16(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u16)))
+void vstrhq(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u32)))
+void vstrhq_u32(uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u32)))
+void vstrhq(uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_s32)))
+void vstrwq_p_s32(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_s32)))
+void vstrwq_p(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_u32)))
+void vstrwq_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_u32)))
+void vstrwq_p(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_s32)))
+void vstrwq_s32(int32_t *, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_s32)))
+void vstrwq(int32_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))
+void vstrwq_scatter_base_p_s32(uint32x4_t, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))
+void vstrwq_scatter_base_p(uint32x4_t, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))
+void vstrwq_scatter_base_p_u32(uint32x4_t, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))
+void vstrwq_scatter_base_p(uint32x4_t, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))
+void vstrwq_scatter_base_s32(uint32x4_t, int, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))
+void vstrwq_scatter_base(uint32x4_t, int, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))
+void vstrwq_scatter_base_u32(uint32x4_t, int, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))
+void vstrwq_scatter_base(uint32x4_t, int, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))
+void vstrwq_scatter_base_wb_p_s32(uint32x4_t *, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))
+void vstrwq_scatter_base_wb_p(uint32x4_t *, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))
+void vstrwq_scatter_base_wb_p_u32(uint32x4_t *, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))
+void vstrwq_scatter_base_wb_p(uint32x4_t *, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))
+void vstrwq_scatter_base_wb_s32(uint32x4_t *, int, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))
+void vstrwq_scatter_base_wb(uint32x4_t *, int, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))
+void vstrwq_scatter_base_wb_u32(uint32x4_t *, int, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))
+void vstrwq_scatter_base_wb(uint32x4_t *, int, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))
+void vstrwq_scatter_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))
+void vstrwq_scatter_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))
+void vstrwq_scatter_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))
+void vstrwq_scatter_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))
+void vstrwq_scatter_offset_s32(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))
+void vstrwq_scatter_offset(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))
+void vstrwq_scatter_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))
+void vstrwq_scatter_offset(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))
+void vstrwq_scatter_shifted_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))
+void vstrwq_scatter_shifted_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))
+void vstrwq_scatter_shifted_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))
+void vstrwq_scatter_shifted_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))
+void vstrwq_scatter_shifted_offset_s32(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))
+void vstrwq_scatter_shifted_offset(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))
+void vstrwq_scatter_shifted_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))
+void vstrwq_scatter_shifted_offset(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_u32)))
+void vstrwq_u32(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_u32)))
+void vstrwq(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s16)))
+int16x8_t vsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s16)))
+int16x8_t vsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s32)))
+int32x4_t vsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s32)))
+int32x4_t vsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s8)))
+int8x16_t vsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s8)))
+int8x16_t vsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u16)))
+uint16x8_t vsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u16)))
+uint16x8_t vsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u32)))
+uint32x4_t vsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u32)))
+uint32x4_t vsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u8)))
+uint8x16_t vsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u8)))
+uint8x16_t vsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s16)))
+int16x8_t vsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s16)))
+int16x8_t vsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s32)))
+int32x4_t vsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s32)))
+int32x4_t vsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s8)))
+int8x16_t vsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s8)))
+int8x16_t vsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u16)))
+uint16x8_t vsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u16)))
+uint16x8_t vsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u32)))
+uint32x4_t vsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u32)))
+uint32x4_t vsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u8)))
+uint8x16_t vsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u8)))
+uint8x16_t vsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s16)))
+int16x8_t vsubq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s16)))
+int16x8_t vsubq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s32)))
+int32x4_t vsubq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s32)))
+int32x4_t vsubq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s8)))
+int8x16_t vsubq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s8)))
+int8x16_t vsubq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u16)))
+uint16x8_t vsubq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u16)))
+uint16x8_t vsubq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u32)))
+uint32x4_t vsubq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u32)))
+uint32x4_t vsubq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u8)))
+uint8x16_t vsubq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u8)))
+uint8x16_t vsubq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s16)))
+int16x8_t vsubq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s16)))
+int16x8_t vsubq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s32)))
+int32x4_t vsubq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s32)))
+int32x4_t vsubq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s8)))
+int8x16_t vsubq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s8)))
+int8x16_t vsubq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u16)))
+uint16x8_t vsubq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u16)))
+uint16x8_t vsubq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u32)))
+uint32x4_t vsubq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u32)))
+uint32x4_t vsubq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u8)))
+uint8x16_t vsubq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u8)))
+uint8x16_t vsubq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s16)))
+int16x8_t vsubq_x_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s16)))
+int16x8_t vsubq_x(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s32)))
+int32x4_t vsubq_x_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s32)))
+int32x4_t vsubq_x(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s8)))
+int8x16_t vsubq_x_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s8)))
+int8x16_t vsubq_x(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u16)))
+uint16x8_t vsubq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u16)))
+uint16x8_t vsubq_x(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u32)))
+uint32x4_t vsubq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u32)))
+uint32x4_t vsubq_x(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u8)))
+uint8x16_t vsubq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u8)))
+uint8x16_t vsubq_x(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s16)))
+int16x8_t vsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s16)))
+int16x8_t vsubq_x(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s32)))
+int32x4_t vsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s32)))
+int32x4_t vsubq_x(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s8)))
+int8x16_t vsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s8)))
+int8x16_t vsubq_x(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u16)))
+uint16x8_t vsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u16)))
+uint16x8_t vsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u32)))
+uint32x4_t vsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u32)))
+uint32x4_t vsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u8)))
+uint8x16_t vsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u8)))
+uint8x16_t vsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s16)))
+int16x8_t vuninitializedq(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s32)))
+int32x4_t vuninitializedq(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s64)))
+int64x2_t vuninitializedq(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s8)))
+int8x16_t vuninitializedq(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u16)))
+uint16x8_t vuninitializedq(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u32)))
+uint32x4_t vuninitializedq(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u64)))
+uint64x2_t vuninitializedq(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u8)))
+uint8x16_t vuninitializedq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s16)))
+int16x8_t vuninitializedq_s16();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s32)))
+int32x4_t vuninitializedq_s32();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s64)))
+int64x2_t vuninitializedq_s64();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s8)))
+int8x16_t vuninitializedq_s8();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u16)))
+uint16x8_t vuninitializedq_u16();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u32)))
+uint32x4_t vuninitializedq_u32();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u64)))
+uint64x2_t vuninitializedq_u64();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u8)))
+uint8x16_t vuninitializedq_u8();
+
+#endif /* (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE) */
+
+#if (__ARM_FEATURE_MVE & 2) && (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE)
+
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f16)))
+float16x8_t vabdq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f16)))
+float16x8_t vabdq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f32)))
+float32x4_t vabdq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f32)))
+float32x4_t vabdq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f16)))
+float16x8_t vabdq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f16)))
+float16x8_t vabdq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f32)))
+float32x4_t vabdq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f32)))
+float32x4_t vabdq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f16)))
+float16x8_t vabdq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f16)))
+float16x8_t vabdq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f32)))
+float32x4_t vabdq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f32)))
+float32x4_t vabdq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f16)))
+float16x8_t vabsq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f16)))
+float16x8_t vabsq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f32)))
+float32x4_t vabsq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f32)))
+float32x4_t vabsq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f16)))
+float16x8_t vabsq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f16)))
+float16x8_t vabsq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f32)))
+float32x4_t vabsq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f32)))
+float32x4_t vabsq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f16)))
+float16x8_t vabsq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f16)))
+float16x8_t vabsq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f32)))
+float32x4_t vabsq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f32)))
+float32x4_t vabsq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f16)))
+float16x8_t vaddq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f16)))
+float16x8_t vaddq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f32)))
+float32x4_t vaddq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f32)))
+float32x4_t vaddq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f16)))
+float16x8_t vaddq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f16)))
+float16x8_t vaddq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f32)))
+float32x4_t vaddq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f32)))
+float32x4_t vaddq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f16)))
+float16x8_t vaddq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f16)))
+float16x8_t vaddq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f32)))
+float32x4_t vaddq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f32)))
+float32x4_t vaddq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f16)))
+float16x8_t vaddq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f16)))
+float16x8_t vaddq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f32)))
+float32x4_t vaddq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f32)))
+float32x4_t vaddq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f16)))
+float16x8_t vaddq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f16)))
+float16x8_t vaddq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f32)))
+float32x4_t vaddq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f32)))
+float32x4_t vaddq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f16)))
+float16x8_t vaddq_x_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f16)))
+float16x8_t vaddq_x(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f32)))
+float32x4_t vaddq_x_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f32)))
+float32x4_t vaddq_x(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_f16)))
+float16x8_t vandq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_f16)))
+float16x8_t vandq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_f32)))
+float32x4_t vandq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_f32)))
+float32x4_t vandq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f16)))
+float16x8_t vandq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f16)))
+float16x8_t vandq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f32)))
+float32x4_t vandq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f32)))
+float32x4_t vandq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f16)))
+float16x8_t vandq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f16)))
+float16x8_t vandq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f32)))
+float32x4_t vandq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f32)))
+float32x4_t vandq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f16)))
+float16x8_t vbicq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f16)))
+float16x8_t vbicq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f32)))
+float32x4_t vbicq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f32)))
+float32x4_t vbicq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f16)))
+float16x8_t vbicq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f16)))
+float16x8_t vbicq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f32)))
+float32x4_t vbicq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f32)))
+float32x4_t vbicq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f16)))
+float16x8_t vbicq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f16)))
+float16x8_t vbicq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f32)))
+float32x4_t vbicq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f32)))
+float32x4_t vbicq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f16)))
+float16x8_t vbrsrq_m_n_f16(float16x8_t, float16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f16)))
+float16x8_t vbrsrq_m(float16x8_t, float16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f32)))
+float32x4_t vbrsrq_m_n_f32(float32x4_t, float32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f32)))
+float32x4_t vbrsrq_m(float32x4_t, float32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f16)))
+float16x8_t vbrsrq_n_f16(float16x8_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f16)))
+float16x8_t vbrsrq(float16x8_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f32)))
+float32x4_t vbrsrq_n_f32(float32x4_t, int32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f32)))
+float32x4_t vbrsrq(float32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f16)))
+float16x8_t vbrsrq_x_n_f16(float16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f16)))
+float16x8_t vbrsrq_x(float16x8_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f32)))
+float32x4_t vbrsrq_x_n_f32(float32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f32)))
+float32x4_t vbrsrq_x(float32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f16)))
+float16x8_t vcaddq_rot270_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f16)))
+float16x8_t vcaddq_rot270(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f32)))
+float32x4_t vcaddq_rot270_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f32)))
+float32x4_t vcaddq_rot270(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f16)))
+float16x8_t vcaddq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f16)))
+float16x8_t vcaddq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f32)))
+float32x4_t vcaddq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f32)))
+float32x4_t vcaddq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f16)))
+float16x8_t vcaddq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f16)))
+float16x8_t vcaddq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f32)))
+float32x4_t vcaddq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f32)))
+float32x4_t vcaddq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f16)))
+float16x8_t vcaddq_rot90_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f16)))
+float16x8_t vcaddq_rot90(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f32)))
+float32x4_t vcaddq_rot90_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f32)))
+float32x4_t vcaddq_rot90(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f16)))
+float16x8_t vcaddq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f16)))
+float16x8_t vcaddq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f32)))
+float32x4_t vcaddq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f32)))
+float32x4_t vcaddq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f16)))
+float16x8_t vcaddq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f16)))
+float16x8_t vcaddq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f32)))
+float32x4_t vcaddq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f32)))
+float32x4_t vcaddq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f16)))
+float16x8_t vcmlaq_f16(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f16)))
+float16x8_t vcmlaq(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f32)))
+float32x4_t vcmlaq_f32(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f32)))
+float32x4_t vcmlaq(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f16)))
+float16x8_t vcmlaq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f16)))
+float16x8_t vcmlaq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f32)))
+float32x4_t vcmlaq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f32)))
+float32x4_t vcmlaq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f16)))
+float16x8_t vcmlaq_rot180_f16(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f16)))
+float16x8_t vcmlaq_rot180(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f32)))
+float32x4_t vcmlaq_rot180_f32(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f32)))
+float32x4_t vcmlaq_rot180(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16)))
+float16x8_t vcmlaq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16)))
+float16x8_t vcmlaq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32)))
+float32x4_t vcmlaq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32)))
+float32x4_t vcmlaq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f16)))
+float16x8_t vcmlaq_rot270_f16(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f16)))
+float16x8_t vcmlaq_rot270(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f32)))
+float32x4_t vcmlaq_rot270_f32(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f32)))
+float32x4_t vcmlaq_rot270(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16)))
+float16x8_t vcmlaq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16)))
+float16x8_t vcmlaq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32)))
+float32x4_t vcmlaq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32)))
+float32x4_t vcmlaq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f16)))
+float16x8_t vcmlaq_rot90_f16(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f16)))
+float16x8_t vcmlaq_rot90(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f32)))
+float32x4_t vcmlaq_rot90_f32(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f32)))
+float32x4_t vcmlaq_rot90(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16)))
+float16x8_t vcmlaq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16)))
+float16x8_t vcmlaq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32)))
+float32x4_t vcmlaq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32)))
+float32x4_t vcmlaq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f16)))
+mve_pred16_t vcmpeqq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f16)))
+mve_pred16_t vcmpeqq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f32)))
+mve_pred16_t vcmpeqq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f32)))
+mve_pred16_t vcmpeqq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f16)))
+mve_pred16_t vcmpeqq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f16)))
+mve_pred16_t vcmpeqq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f32)))
+mve_pred16_t vcmpeqq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f32)))
+mve_pred16_t vcmpeqq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))
+mve_pred16_t vcmpeqq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))
+mve_pred16_t vcmpeqq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))
+mve_pred16_t vcmpeqq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))
+mve_pred16_t vcmpeqq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f16)))
+mve_pred16_t vcmpeqq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f16)))
+mve_pred16_t vcmpeqq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f32)))
+mve_pred16_t vcmpeqq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f32)))
+mve_pred16_t vcmpeqq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f16)))
+mve_pred16_t vcmpgeq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f16)))
+mve_pred16_t vcmpgeq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f32)))
+mve_pred16_t vcmpgeq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f32)))
+mve_pred16_t vcmpgeq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f16)))
+mve_pred16_t vcmpgeq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f16)))
+mve_pred16_t vcmpgeq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f32)))
+mve_pred16_t vcmpgeq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f32)))
+mve_pred16_t vcmpgeq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))
+mve_pred16_t vcmpgeq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))
+mve_pred16_t vcmpgeq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))
+mve_pred16_t vcmpgeq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))
+mve_pred16_t vcmpgeq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f16)))
+mve_pred16_t vcmpgeq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f16)))
+mve_pred16_t vcmpgeq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f32)))
+mve_pred16_t vcmpgeq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f32)))
+mve_pred16_t vcmpgeq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f16)))
+mve_pred16_t vcmpgtq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f16)))
+mve_pred16_t vcmpgtq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f32)))
+mve_pred16_t vcmpgtq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f32)))
+mve_pred16_t vcmpgtq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f16)))
+mve_pred16_t vcmpgtq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f16)))
+mve_pred16_t vcmpgtq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f32)))
+mve_pred16_t vcmpgtq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f32)))
+mve_pred16_t vcmpgtq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))
+mve_pred16_t vcmpgtq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))
+mve_pred16_t vcmpgtq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))
+mve_pred16_t vcmpgtq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))
+mve_pred16_t vcmpgtq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f16)))
+mve_pred16_t vcmpgtq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f16)))
+mve_pred16_t vcmpgtq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f32)))
+mve_pred16_t vcmpgtq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f32)))
+mve_pred16_t vcmpgtq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f16)))
+mve_pred16_t vcmpleq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f16)))
+mve_pred16_t vcmpleq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f32)))
+mve_pred16_t vcmpleq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f32)))
+mve_pred16_t vcmpleq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f16)))
+mve_pred16_t vcmpleq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f16)))
+mve_pred16_t vcmpleq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f32)))
+mve_pred16_t vcmpleq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f32)))
+mve_pred16_t vcmpleq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))
+mve_pred16_t vcmpleq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))
+mve_pred16_t vcmpleq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))
+mve_pred16_t vcmpleq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))
+mve_pred16_t vcmpleq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f16)))
+mve_pred16_t vcmpleq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f16)))
+mve_pred16_t vcmpleq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f32)))
+mve_pred16_t vcmpleq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f32)))
+mve_pred16_t vcmpleq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f16)))
+mve_pred16_t vcmpltq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f16)))
+mve_pred16_t vcmpltq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f32)))
+mve_pred16_t vcmpltq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f32)))
+mve_pred16_t vcmpltq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f16)))
+mve_pred16_t vcmpltq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f16)))
+mve_pred16_t vcmpltq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f32)))
+mve_pred16_t vcmpltq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f32)))
+mve_pred16_t vcmpltq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))
+mve_pred16_t vcmpltq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))
+mve_pred16_t vcmpltq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))
+mve_pred16_t vcmpltq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))
+mve_pred16_t vcmpltq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f16)))
+mve_pred16_t vcmpltq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f16)))
+mve_pred16_t vcmpltq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f32)))
+mve_pred16_t vcmpltq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f32)))
+mve_pred16_t vcmpltq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f16)))
+mve_pred16_t vcmpneq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f16)))
+mve_pred16_t vcmpneq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f32)))
+mve_pred16_t vcmpneq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f32)))
+mve_pred16_t vcmpneq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f16)))
+mve_pred16_t vcmpneq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f16)))
+mve_pred16_t vcmpneq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f32)))
+mve_pred16_t vcmpneq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f32)))
+mve_pred16_t vcmpneq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))
+mve_pred16_t vcmpneq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))
+mve_pred16_t vcmpneq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))
+mve_pred16_t vcmpneq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))
+mve_pred16_t vcmpneq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f16)))
+mve_pred16_t vcmpneq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f16)))
+mve_pred16_t vcmpneq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f32)))
+mve_pred16_t vcmpneq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f32)))
+mve_pred16_t vcmpneq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f16)))
+float16x8_t vcmulq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f16)))
+float16x8_t vcmulq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f32)))
+float32x4_t vcmulq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f32)))
+float32x4_t vcmulq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f16)))
+float16x8_t vcmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f16)))
+float16x8_t vcmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f32)))
+float32x4_t vcmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f32)))
+float32x4_t vcmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f16)))
+float16x8_t vcmulq_rot180_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f16)))
+float16x8_t vcmulq_rot180(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f32)))
+float32x4_t vcmulq_rot180_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f32)))
+float32x4_t vcmulq_rot180(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f16)))
+float16x8_t vcmulq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f16)))
+float16x8_t vcmulq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f32)))
+float32x4_t vcmulq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f32)))
+float32x4_t vcmulq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f16)))
+float16x8_t vcmulq_rot180_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f16)))
+float16x8_t vcmulq_rot180_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f32)))
+float32x4_t vcmulq_rot180_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f32)))
+float32x4_t vcmulq_rot180_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f16)))
+float16x8_t vcmulq_rot270_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f16)))
+float16x8_t vcmulq_rot270(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f32)))
+float32x4_t vcmulq_rot270_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f32)))
+float32x4_t vcmulq_rot270(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f16)))
+float16x8_t vcmulq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f16)))
+float16x8_t vcmulq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f32)))
+float32x4_t vcmulq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f32)))
+float32x4_t vcmulq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f16)))
+float16x8_t vcmulq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f16)))
+float16x8_t vcmulq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f32)))
+float32x4_t vcmulq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f32)))
+float32x4_t vcmulq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f16)))
+float16x8_t vcmulq_rot90_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f16)))
+float16x8_t vcmulq_rot90(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f32)))
+float32x4_t vcmulq_rot90_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f32)))
+float32x4_t vcmulq_rot90(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f16)))
+float16x8_t vcmulq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f16)))
+float16x8_t vcmulq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f32)))
+float32x4_t vcmulq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f32)))
+float32x4_t vcmulq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f16)))
+float16x8_t vcmulq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f16)))
+float16x8_t vcmulq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f32)))
+float32x4_t vcmulq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f32)))
+float32x4_t vcmulq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f16)))
+float16x8_t vcmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f16)))
+float16x8_t vcmulq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f32)))
+float32x4_t vcmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f32)))
+float32x4_t vcmulq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_f16)))
+float16x8_t vcreateq_f16(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_f32)))
+float32x4_t vcreateq_f32(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s16_f16)))
+int16x8_t vcvtaq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s16_f16)))
+int16x8_t vcvtaq_m(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s32_f32)))
+int32x4_t vcvtaq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s32_f32)))
+int32x4_t vcvtaq_m(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u16_f16)))
+uint16x8_t vcvtaq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u16_f16)))
+uint16x8_t vcvtaq_m(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u32_f32)))
+uint32x4_t vcvtaq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u32_f32)))
+uint32x4_t vcvtaq_m(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_s16_f16)))
+int16x8_t vcvtaq_s16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_s32_f32)))
+int32x4_t vcvtaq_s32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_u16_f16)))
+uint16x8_t vcvtaq_u16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_u32_f32)))
+uint32x4_t vcvtaq_u32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_s16_f16)))
+int16x8_t vcvtaq_x_s16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_s32_f32)))
+int32x4_t vcvtaq_x_s32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_u16_f16)))
+uint16x8_t vcvtaq_x_u16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_u32_f32)))
+uint32x4_t vcvtaq_x_u32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_f16_f32)))
+float16x8_t vcvtbq_f16_f32(float16x8_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_f32_f16)))
+float32x4_t vcvtbq_f32_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_m_f16_f32)))
+float16x8_t vcvtbq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_m_f32_f16)))
+float32x4_t vcvtbq_m_f32_f16(float32x4_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_x_f32_f16)))
+float32x4_t vcvtbq_x_f32_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s16_f16)))
+int16x8_t vcvtmq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s16_f16)))
+int16x8_t vcvtmq_m(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s32_f32)))
+int32x4_t vcvtmq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s32_f32)))
+int32x4_t vcvtmq_m(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u16_f16)))
+uint16x8_t vcvtmq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u16_f16)))
+uint16x8_t vcvtmq_m(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u32_f32)))
+uint32x4_t vcvtmq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u32_f32)))
+uint32x4_t vcvtmq_m(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_s16_f16)))
+int16x8_t vcvtmq_s16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_s32_f32)))
+int32x4_t vcvtmq_s32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_u16_f16)))
+uint16x8_t vcvtmq_u16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_u32_f32)))
+uint32x4_t vcvtmq_u32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_s16_f16)))
+int16x8_t vcvtmq_x_s16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_s32_f32)))
+int32x4_t vcvtmq_x_s32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_u16_f16)))
+uint16x8_t vcvtmq_x_u16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_u32_f32)))
+uint32x4_t vcvtmq_x_u32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s16_f16)))
+int16x8_t vcvtnq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s16_f16)))
+int16x8_t vcvtnq_m(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s32_f32)))
+int32x4_t vcvtnq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s32_f32)))
+int32x4_t vcvtnq_m(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u16_f16)))
+uint16x8_t vcvtnq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u16_f16)))
+uint16x8_t vcvtnq_m(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u32_f32)))
+uint32x4_t vcvtnq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u32_f32)))
+uint32x4_t vcvtnq_m(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_s16_f16)))
+int16x8_t vcvtnq_s16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_s32_f32)))
+int32x4_t vcvtnq_s32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_u16_f16)))
+uint16x8_t vcvtnq_u16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_u32_f32)))
+uint32x4_t vcvtnq_u32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_s16_f16)))
+int16x8_t vcvtnq_x_s16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_s32_f32)))
+int32x4_t vcvtnq_x_s32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_u16_f16)))
+uint16x8_t vcvtnq_x_u16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_u32_f32)))
+uint32x4_t vcvtnq_x_u32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s16_f16)))
+int16x8_t vcvtpq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s16_f16)))
+int16x8_t vcvtpq_m(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s32_f32)))
+int32x4_t vcvtpq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s32_f32)))
+int32x4_t vcvtpq_m(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u16_f16)))
+uint16x8_t vcvtpq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u16_f16)))
+uint16x8_t vcvtpq_m(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u32_f32)))
+uint32x4_t vcvtpq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u32_f32)))
+uint32x4_t vcvtpq_m(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_s16_f16)))
+int16x8_t vcvtpq_s16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_s32_f32)))
+int32x4_t vcvtpq_s32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_u16_f16)))
+uint16x8_t vcvtpq_u16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_u32_f32)))
+uint32x4_t vcvtpq_u32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_s16_f16)))
+int16x8_t vcvtpq_x_s16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_s32_f32)))
+int32x4_t vcvtpq_x_s32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_u16_f16)))
+uint16x8_t vcvtpq_x_u16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_u32_f32)))
+uint32x4_t vcvtpq_x_u32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_s16)))
+float16x8_t vcvtq_f16_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_s16)))
+float16x8_t vcvtq(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_u16)))
+float16x8_t vcvtq_f16_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_u16)))
+float16x8_t vcvtq(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_s32)))
+float32x4_t vcvtq_f32_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_s32)))
+float32x4_t vcvtq(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_u32)))
+float32x4_t vcvtq_f32_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_u32)))
+float32x4_t vcvtq(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_s16)))
+float16x8_t vcvtq_m_f16_s16(float16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_s16)))
+float16x8_t vcvtq_m(float16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_u16)))
+float16x8_t vcvtq_m_f16_u16(float16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_u16)))
+float16x8_t vcvtq_m(float16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_s32)))
+float32x4_t vcvtq_m_f32_s32(float32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_s32)))
+float32x4_t vcvtq_m(float32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_u32)))
+float32x4_t vcvtq_m_f32_u32(float32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_u32)))
+float32x4_t vcvtq_m(float32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16)))
+float16x8_t vcvtq_m_n_f16_s16(float16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16)))
+float16x8_t vcvtq_m_n(float16x8_t, int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16)))
+float16x8_t vcvtq_m_n_f16_u16(float16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16)))
+float16x8_t vcvtq_m_n(float16x8_t, uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32)))
+float32x4_t vcvtq_m_n_f32_s32(float32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32)))
+float32x4_t vcvtq_m_n(float32x4_t, int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32)))
+float32x4_t vcvtq_m_n_f32_u32(float32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32)))
+float32x4_t vcvtq_m_n(float32x4_t, uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16)))
+int16x8_t vcvtq_m_n_s16_f16(int16x8_t, float16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16)))
+int16x8_t vcvtq_m_n(int16x8_t, float16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32)))
+int32x4_t vcvtq_m_n_s32_f32(int32x4_t, float32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32)))
+int32x4_t vcvtq_m_n(int32x4_t, float32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16)))
+uint16x8_t vcvtq_m_n_u16_f16(uint16x8_t, float16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16)))
+uint16x8_t vcvtq_m_n(uint16x8_t, float16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32)))
+uint32x4_t vcvtq_m_n_u32_f32(uint32x4_t, float32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32)))
+uint32x4_t vcvtq_m_n(uint32x4_t, float32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s16_f16)))
+int16x8_t vcvtq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s16_f16)))
+int16x8_t vcvtq_m(int16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s32_f32)))
+int32x4_t vcvtq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s32_f32)))
+int32x4_t vcvtq_m(int32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u16_f16)))
+uint16x8_t vcvtq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u16_f16)))
+uint16x8_t vcvtq_m(uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u32_f32)))
+uint32x4_t vcvtq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u32_f32)))
+uint32x4_t vcvtq_m(uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_s16)))
+float16x8_t vcvtq_n_f16_s16(int16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_s16)))
+float16x8_t vcvtq_n(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_u16)))
+float16x8_t vcvtq_n_f16_u16(uint16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_u16)))
+float16x8_t vcvtq_n(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_s32)))
+float32x4_t vcvtq_n_f32_s32(int32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_s32)))
+float32x4_t vcvtq_n(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_u32)))
+float32x4_t vcvtq_n_f32_u32(uint32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_u32)))
+float32x4_t vcvtq_n(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_s16_f16)))
+int16x8_t vcvtq_n_s16_f16(float16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_s32_f32)))
+int32x4_t vcvtq_n_s32_f32(float32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_u16_f16)))
+uint16x8_t vcvtq_n_u16_f16(float16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_u32_f32)))
+uint32x4_t vcvtq_n_u32_f32(float32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_s16_f16)))
+int16x8_t vcvtq_s16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_s32_f32)))
+int32x4_t vcvtq_s32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_u16_f16)))
+uint16x8_t vcvtq_u16_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_u32_f32)))
+uint32x4_t vcvtq_u32_f32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_s16)))
+float16x8_t vcvtq_x_f16_s16(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_s16)))
+float16x8_t vcvtq_x(int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_u16)))
+float16x8_t vcvtq_x_f16_u16(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_u16)))
+float16x8_t vcvtq_x(uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_s32)))
+float32x4_t vcvtq_x_f32_s32(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_s32)))
+float32x4_t vcvtq_x(int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_u32)))
+float32x4_t vcvtq_x_f32_u32(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_u32)))
+float32x4_t vcvtq_x(uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16)))
+float16x8_t vcvtq_x_n_f16_s16(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16)))
+float16x8_t vcvtq_x_n(int16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16)))
+float16x8_t vcvtq_x_n_f16_u16(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16)))
+float16x8_t vcvtq_x_n(uint16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32)))
+float32x4_t vcvtq_x_n_f32_s32(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32)))
+float32x4_t vcvtq_x_n(int32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32)))
+float32x4_t vcvtq_x_n_f32_u32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32)))
+float32x4_t vcvtq_x_n(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_s16_f16)))
+int16x8_t vcvtq_x_n_s16_f16(float16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_s32_f32)))
+int32x4_t vcvtq_x_n_s32_f32(float32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_u16_f16)))
+uint16x8_t vcvtq_x_n_u16_f16(float16x8_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_u32_f32)))
+uint32x4_t vcvtq_x_n_u32_f32(float32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_s16_f16)))
+int16x8_t vcvtq_x_s16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_s32_f32)))
+int32x4_t vcvtq_x_s32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_u16_f16)))
+uint16x8_t vcvtq_x_u16_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_u32_f32)))
+uint32x4_t vcvtq_x_u32_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_f16_f32)))
+float16x8_t vcvttq_f16_f32(float16x8_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_f32_f16)))
+float32x4_t vcvttq_f32_f16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_m_f16_f32)))
+float16x8_t vcvttq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_m_f32_f16)))
+float32x4_t vcvttq_m_f32_f16(float32x4_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_x_f32_f16)))
+float32x4_t vcvttq_x_f32_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f16)))
+float16x8_t vdupq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f16)))
+float16x8_t vdupq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f32)))
+float32x4_t vdupq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f32)))
+float32x4_t vdupq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_f16)))
+float16x8_t vdupq_n_f16(float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_f32)))
+float32x4_t vdupq_n_f32(float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_f16)))
+float16x8_t vdupq_x_n_f16(float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_f32)))
+float32x4_t vdupq_x_n_f32(float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_f16)))
+float16x8_t veorq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_f16)))
+float16x8_t veorq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_f32)))
+float32x4_t veorq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_f32)))
+float32x4_t veorq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f16)))
+float16x8_t veorq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f16)))
+float16x8_t veorq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f32)))
+float32x4_t veorq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f32)))
+float32x4_t veorq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f16)))
+float16x8_t veorq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f16)))
+float16x8_t veorq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f32)))
+float32x4_t veorq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f32)))
+float32x4_t veorq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f16)))
+float16x8_t vfmaq_f16(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f16)))
+float16x8_t vfmaq(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f32)))
+float32x4_t vfmaq_f32(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f32)))
+float32x4_t vfmaq(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f16)))
+float16x8_t vfmaq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f16)))
+float16x8_t vfmaq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f32)))
+float32x4_t vfmaq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f32)))
+float32x4_t vfmaq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f16)))
+float16x8_t vfmaq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f16)))
+float16x8_t vfmaq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f32)))
+float32x4_t vfmaq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f32)))
+float32x4_t vfmaq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f16)))
+float16x8_t vfmaq_n_f16(float16x8_t, float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f16)))
+float16x8_t vfmaq(float16x8_t, float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f32)))
+float32x4_t vfmaq_n_f32(float32x4_t, float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f32)))
+float32x4_t vfmaq(float32x4_t, float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f16)))
+float16x8_t vfmasq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f16)))
+float16x8_t vfmasq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f32)))
+float32x4_t vfmasq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f32)))
+float32x4_t vfmasq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f16)))
+float16x8_t vfmasq_n_f16(float16x8_t, float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f16)))
+float16x8_t vfmasq(float16x8_t, float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f32)))
+float32x4_t vfmasq_n_f32(float32x4_t, float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f32)))
+float32x4_t vfmasq(float32x4_t, float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f16)))
+float16x8_t vfmsq_f16(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f16)))
+float16x8_t vfmsq(float16x8_t, float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f32)))
+float32x4_t vfmsq_f32(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f32)))
+float32x4_t vfmsq(float32x4_t, float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f16)))
+float16x8_t vfmsq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f16)))
+float16x8_t vfmsq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f32)))
+float32x4_t vfmsq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f32)))
+float32x4_t vfmsq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f16)))
+float16_t vgetq_lane_f16(float16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f16)))
+float16_t vgetq_lane(float16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f32)))
+float32_t vgetq_lane_f32(float32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f32)))
+float32_t vgetq_lane(float32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f16)))
+float16x8_t vld1q_f16(const float16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f16)))
+float16x8_t vld1q(const float16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f32)))
+float32x4_t vld1q_f32(const float32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f32)))
+float32x4_t vld1q(const float32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f16)))
+float16x8_t vld1q_z_f16(const float16_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f16)))
+float16x8_t vld1q_z(const float16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f32)))
+float32x4_t vld1q_z_f32(const float32_t *, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f32)))
+float32x4_t vld1q_z(const float32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f16)))
+float16x8x2_t vld2q_f16(const float16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f16)))
+float16x8x2_t vld2q(const float16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f32)))
+float32x4x2_t vld2q_f32(const float32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f32)))
+float32x4x2_t vld2q(const float32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f16)))
+float16x8x4_t vld4q_f16(const float16_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f16)))
+float16x8x4_t vld4q(const float16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f32)))
+float32x4x4_t vld4q_f32(const float32_t *);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f32)))
+float32x4x4_t vld4q(const float32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_f16)))
+float16x8_t vldrhq_f16(const float16_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))
+float16x8_t vldrhq_gather_offset_f16(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))
+float16x8_t vldrhq_gather_offset(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))
+float16x8_t vldrhq_gather_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))
+float16x8_t vldrhq_gather_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))
+float16x8_t vldrhq_gather_shifted_offset_f16(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))
+float16x8_t vldrhq_gather_shifted_offset(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))
+float16x8_t vldrhq_gather_shifted_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))
+float16x8_t vldrhq_gather_shifted_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_f16)))
+float16x8_t vldrhq_z_f16(const float16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_f32)))
+float32x4_t vldrwq_f32(const float32_t *);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_f32)))
+float32x4_t vldrwq_gather_base_f32(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_f32)))
+float32x4_t vldrwq_gather_base_wb_f32(uint32x4_t *, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_f32)))
+float32x4_t vldrwq_gather_base_wb_z_f32(uint32x4_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_f32)))
+float32x4_t vldrwq_gather_base_z_f32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))
+float32x4_t vldrwq_gather_offset_f32(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))
+float32x4_t vldrwq_gather_offset(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))
+float32x4_t vldrwq_gather_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))
+float32x4_t vldrwq_gather_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))
+float32x4_t vldrwq_gather_shifted_offset_f32(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))
+float32x4_t vldrwq_gather_shifted_offset(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))
+float32x4_t vldrwq_gather_shifted_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))
+float32x4_t vldrwq_gather_shifted_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_f32)))
+float32x4_t vldrwq_z_f32(const float32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f16)))
+float16x8_t vmaxnmaq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f16)))
+float16x8_t vmaxnmaq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f32)))
+float32x4_t vmaxnmaq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f32)))
+float32x4_t vmaxnmaq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f16)))
+float16x8_t vmaxnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f16)))
+float16x8_t vmaxnmaq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f32)))
+float32x4_t vmaxnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f32)))
+float32x4_t vmaxnmaq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f16)))
+float16_t vmaxnmavq_f16(float16_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f16)))
+float16_t vmaxnmavq(float16_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f32)))
+float32_t vmaxnmavq_f32(float32_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f32)))
+float32_t vmaxnmavq(float32_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f16)))
+float16_t vmaxnmavq_p_f16(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f16)))
+float16_t vmaxnmavq_p(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f32)))
+float32_t vmaxnmavq_p_f32(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f32)))
+float32_t vmaxnmavq_p(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f16)))
+float16x8_t vmaxnmq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f16)))
+float16x8_t vmaxnmq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f32)))
+float32x4_t vmaxnmq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f32)))
+float32x4_t vmaxnmq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f16)))
+float16x8_t vmaxnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f16)))
+float16x8_t vmaxnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f32)))
+float32x4_t vmaxnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f32)))
+float32x4_t vmaxnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f16)))
+float16x8_t vmaxnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f16)))
+float16x8_t vmaxnmq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f32)))
+float32x4_t vmaxnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f32)))
+float32x4_t vmaxnmq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f16)))
+float16_t vmaxnmvq_f16(float16_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f16)))
+float16_t vmaxnmvq(float16_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f32)))
+float32_t vmaxnmvq_f32(float32_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f32)))
+float32_t vmaxnmvq(float32_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f16)))
+float16_t vmaxnmvq_p_f16(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f16)))
+float16_t vmaxnmvq_p(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f32)))
+float32_t vmaxnmvq_p_f32(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f32)))
+float32_t vmaxnmvq_p(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f16)))
+float16x8_t vminnmaq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f16)))
+float16x8_t vminnmaq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f32)))
+float32x4_t vminnmaq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f32)))
+float32x4_t vminnmaq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f16)))
+float16x8_t vminnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f16)))
+float16x8_t vminnmaq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f32)))
+float32x4_t vminnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f32)))
+float32x4_t vminnmaq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f16)))
+float16_t vminnmavq_f16(float16_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f16)))
+float16_t vminnmavq(float16_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f32)))
+float32_t vminnmavq_f32(float32_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f32)))
+float32_t vminnmavq(float32_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f16)))
+float16_t vminnmavq_p_f16(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f16)))
+float16_t vminnmavq_p(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f32)))
+float32_t vminnmavq_p_f32(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f32)))
+float32_t vminnmavq_p(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f16)))
+float16x8_t vminnmq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f16)))
+float16x8_t vminnmq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f32)))
+float32x4_t vminnmq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f32)))
+float32x4_t vminnmq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f16)))
+float16x8_t vminnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f16)))
+float16x8_t vminnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f32)))
+float32x4_t vminnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f32)))
+float32x4_t vminnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f16)))
+float16x8_t vminnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f16)))
+float16x8_t vminnmq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f32)))
+float32x4_t vminnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f32)))
+float32x4_t vminnmq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f16)))
+float16_t vminnmvq_f16(float16_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f16)))
+float16_t vminnmvq(float16_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f32)))
+float32_t vminnmvq_f32(float32_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f32)))
+float32_t vminnmvq(float32_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f16)))
+float16_t vminnmvq_p_f16(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f16)))
+float16_t vminnmvq_p(float16_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f32)))
+float32_t vminnmvq_p_f32(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f32)))
+float32_t vminnmvq_p(float32_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f16)))
+float16x8_t vmulq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f16)))
+float16x8_t vmulq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f32)))
+float32x4_t vmulq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f32)))
+float32x4_t vmulq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f16)))
+float16x8_t vmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f16)))
+float16x8_t vmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f32)))
+float32x4_t vmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f32)))
+float32x4_t vmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f16)))
+float16x8_t vmulq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f16)))
+float16x8_t vmulq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f32)))
+float32x4_t vmulq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f32)))
+float32x4_t vmulq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f16)))
+float16x8_t vmulq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f16)))
+float16x8_t vmulq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f32)))
+float32x4_t vmulq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f32)))
+float32x4_t vmulq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f16)))
+float16x8_t vmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f16)))
+float16x8_t vmulq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f32)))
+float32x4_t vmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f32)))
+float32x4_t vmulq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f16)))
+float16x8_t vmulq_x_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f16)))
+float16x8_t vmulq_x(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f32)))
+float32x4_t vmulq_x_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f32)))
+float32x4_t vmulq_x(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f16)))
+float16x8_t vnegq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f16)))
+float16x8_t vnegq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f32)))
+float32x4_t vnegq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f32)))
+float32x4_t vnegq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f16)))
+float16x8_t vnegq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f16)))
+float16x8_t vnegq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f32)))
+float32x4_t vnegq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f32)))
+float32x4_t vnegq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f16)))
+float16x8_t vnegq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f16)))
+float16x8_t vnegq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f32)))
+float32x4_t vnegq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f32)))
+float32x4_t vnegq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_f16)))
+float16x8_t vornq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_f16)))
+float16x8_t vornq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_f32)))
+float32x4_t vornq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_f32)))
+float32x4_t vornq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f16)))
+float16x8_t vornq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f16)))
+float16x8_t vornq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f32)))
+float32x4_t vornq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f32)))
+float32x4_t vornq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f16)))
+float16x8_t vornq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f16)))
+float16x8_t vornq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f32)))
+float32x4_t vornq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f32)))
+float32x4_t vornq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f16)))
+float16x8_t vorrq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f16)))
+float16x8_t vorrq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f32)))
+float32x4_t vorrq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f32)))
+float32x4_t vorrq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f16)))
+float16x8_t vorrq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f16)))
+float16x8_t vorrq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f32)))
+float32x4_t vorrq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f32)))
+float32x4_t vorrq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f16)))
+float16x8_t vorrq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f16)))
+float16x8_t vorrq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f32)))
+float32x4_t vorrq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f32)))
+float32x4_t vorrq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f16)))
+float16x8_t vpselq_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f16)))
+float16x8_t vpselq(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f32)))
+float32x4_t vpselq_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f32)))
+float32x4_t vpselq(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))
+float16x8_t vreinterpretq_f16_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))
+float16x8_t vreinterpretq_f16(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))
+float16x8_t vreinterpretq_f16_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))
+float16x8_t vreinterpretq_f16(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))
+float16x8_t vreinterpretq_f16_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))
+float16x8_t vreinterpretq_f16(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))
+float16x8_t vreinterpretq_f16_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))
+float16x8_t vreinterpretq_f16(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))
+float16x8_t vreinterpretq_f16_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))
+float16x8_t vreinterpretq_f16(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))
+float16x8_t vreinterpretq_f16_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))
+float16x8_t vreinterpretq_f16(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))
+float16x8_t vreinterpretq_f16_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))
+float16x8_t vreinterpretq_f16(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))
+float16x8_t vreinterpretq_f16_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))
+float16x8_t vreinterpretq_f16(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
+float16x8_t vreinterpretq_f16_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
+float16x8_t vreinterpretq_f16(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))
+float32x4_t vreinterpretq_f32_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))
+float32x4_t vreinterpretq_f32(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))
+float32x4_t vreinterpretq_f32_s16(int16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))
+float32x4_t vreinterpretq_f32(int16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))
+float32x4_t vreinterpretq_f32_s32(int32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))
+float32x4_t vreinterpretq_f32(int32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))
+float32x4_t vreinterpretq_f32_s64(int64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))
+float32x4_t vreinterpretq_f32(int64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))
+float32x4_t vreinterpretq_f32_s8(int8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))
+float32x4_t vreinterpretq_f32(int8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))
+float32x4_t vreinterpretq_f32_u16(uint16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))
+float32x4_t vreinterpretq_f32(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))
+float32x4_t vreinterpretq_f32_u32(uint32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))
+float32x4_t vreinterpretq_f32(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))
+float32x4_t vreinterpretq_f32_u64(uint64x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))
+float32x4_t vreinterpretq_f32(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
+float32x4_t vreinterpretq_f32_u8(uint8x16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
+float32x4_t vreinterpretq_f32(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))
+int16x8_t vreinterpretq_s16_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))
+int16x8_t vreinterpretq_s16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))
+int16x8_t vreinterpretq_s16_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))
+int16x8_t vreinterpretq_s16(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))
+int32x4_t vreinterpretq_s32_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))
+int32x4_t vreinterpretq_s32(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))
+int32x4_t vreinterpretq_s32_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))
+int32x4_t vreinterpretq_s32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))
+int64x2_t vreinterpretq_s64_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))
+int64x2_t vreinterpretq_s64(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))
+int64x2_t vreinterpretq_s64_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))
+int64x2_t vreinterpretq_s64(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))
+int8x16_t vreinterpretq_s8_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))
+int8x16_t vreinterpretq_s8(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))
+int8x16_t vreinterpretq_s8_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))
+int8x16_t vreinterpretq_s8(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))
+uint16x8_t vreinterpretq_u16_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))
+uint16x8_t vreinterpretq_u16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))
+uint16x8_t vreinterpretq_u16_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))
+uint16x8_t vreinterpretq_u16(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))
+uint32x4_t vreinterpretq_u32_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))
+uint32x4_t vreinterpretq_u32(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))
+uint32x4_t vreinterpretq_u32_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))
+uint32x4_t vreinterpretq_u32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))
+uint64x2_t vreinterpretq_u64_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))
+uint64x2_t vreinterpretq_u64(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))
+uint64x2_t vreinterpretq_u64_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))
+uint64x2_t vreinterpretq_u64(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
+uint8x16_t vreinterpretq_u8_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
+uint8x16_t vreinterpretq_u8(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
+uint8x16_t vreinterpretq_u8_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
+uint8x16_t vreinterpretq_u8(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_f16)))
+float16x8_t vrev32q_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_f16)))
+float16x8_t vrev32q(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_f16)))
+float16x8_t vrev32q_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_f16)))
+float16x8_t vrev32q_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_f16)))
+float16x8_t vrev32q_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_f16)))
+float16x8_t vrev32q_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f16)))
+float16x8_t vrev64q_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f16)))
+float16x8_t vrev64q(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f32)))
+float32x4_t vrev64q_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f32)))
+float32x4_t vrev64q(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f16)))
+float16x8_t vrev64q_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f16)))
+float16x8_t vrev64q_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f32)))
+float32x4_t vrev64q_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f32)))
+float32x4_t vrev64q_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f16)))
+float16x8_t vrev64q_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f16)))
+float16x8_t vrev64q_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f32)))
+float32x4_t vrev64q_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f32)))
+float32x4_t vrev64q_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f16)))
+float16x8_t vrndaq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f16)))
+float16x8_t vrndaq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f32)))
+float32x4_t vrndaq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f32)))
+float32x4_t vrndaq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f16)))
+float16x8_t vrndaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f16)))
+float16x8_t vrndaq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f32)))
+float32x4_t vrndaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f32)))
+float32x4_t vrndaq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f16)))
+float16x8_t vrndaq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f16)))
+float16x8_t vrndaq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f32)))
+float32x4_t vrndaq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f32)))
+float32x4_t vrndaq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f16)))
+float16x8_t vrndmq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f16)))
+float16x8_t vrndmq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f32)))
+float32x4_t vrndmq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f32)))
+float32x4_t vrndmq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f16)))
+float16x8_t vrndmq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f16)))
+float16x8_t vrndmq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f32)))
+float32x4_t vrndmq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f32)))
+float32x4_t vrndmq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f16)))
+float16x8_t vrndmq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f16)))
+float16x8_t vrndmq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f32)))
+float32x4_t vrndmq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f32)))
+float32x4_t vrndmq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f16)))
+float16x8_t vrndnq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f16)))
+float16x8_t vrndnq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f32)))
+float32x4_t vrndnq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f32)))
+float32x4_t vrndnq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f16)))
+float16x8_t vrndnq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f16)))
+float16x8_t vrndnq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f32)))
+float32x4_t vrndnq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f32)))
+float32x4_t vrndnq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f16)))
+float16x8_t vrndnq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f16)))
+float16x8_t vrndnq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f32)))
+float32x4_t vrndnq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f32)))
+float32x4_t vrndnq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f16)))
+float16x8_t vrndpq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f16)))
+float16x8_t vrndpq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f32)))
+float32x4_t vrndpq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f32)))
+float32x4_t vrndpq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f16)))
+float16x8_t vrndpq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f16)))
+float16x8_t vrndpq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f32)))
+float32x4_t vrndpq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f32)))
+float32x4_t vrndpq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f16)))
+float16x8_t vrndpq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f16)))
+float16x8_t vrndpq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f32)))
+float32x4_t vrndpq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f32)))
+float32x4_t vrndpq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f16)))
+float16x8_t vrndq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f16)))
+float16x8_t vrndq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f32)))
+float32x4_t vrndq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f32)))
+float32x4_t vrndq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f16)))
+float16x8_t vrndq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f16)))
+float16x8_t vrndq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f32)))
+float32x4_t vrndq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f32)))
+float32x4_t vrndq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f16)))
+float16x8_t vrndq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f16)))
+float16x8_t vrndq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f32)))
+float32x4_t vrndq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f32)))
+float32x4_t vrndq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f16)))
+float16x8_t vrndxq_f16(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f16)))
+float16x8_t vrndxq(float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f32)))
+float32x4_t vrndxq_f32(float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f32)))
+float32x4_t vrndxq(float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f16)))
+float16x8_t vrndxq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f16)))
+float16x8_t vrndxq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f32)))
+float32x4_t vrndxq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f32)))
+float32x4_t vrndxq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f16)))
+float16x8_t vrndxq_x_f16(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f16)))
+float16x8_t vrndxq_x(float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f32)))
+float32x4_t vrndxq_x_f32(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f32)))
+float32x4_t vrndxq_x(float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f16)))
+float16x8_t vsetq_lane_f16(float16_t, float16x8_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f16)))
+float16x8_t vsetq_lane(float16_t, float16x8_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f32)))
+float32x4_t vsetq_lane_f32(float32_t, float32x4_t, int);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f32)))
+float32x4_t vsetq_lane(float32_t, float32x4_t, int);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f16)))
+void vst1q_f16(float16_t *, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f16)))
+void vst1q(float16_t *, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f32)))
+void vst1q_f32(float32_t *, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f32)))
+void vst1q(float32_t *, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f16)))
+void vst1q_p_f16(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f16)))
+void vst1q_p(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f32)))
+void vst1q_p_f32(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f32)))
+void vst1q_p(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f16)))
+void vst2q_f16(float16_t *, float16x8x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f16)))
+void vst2q(float16_t *, float16x8x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f32)))
+void vst2q_f32(float32_t *, float32x4x2_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f32)))
+void vst2q(float32_t *, float32x4x2_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f16)))
+void vst4q_f16(float16_t *, float16x8x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f16)))
+void vst4q(float16_t *, float16x8x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f32)))
+void vst4q_f32(float32_t *, float32x4x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f32)))
+void vst4q(float32_t *, float32x4x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_f16)))
+void vstrhq_f16(float16_t *, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_f16)))
+void vstrhq(float16_t *, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_f16)))
+void vstrhq_p_f16(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_f16)))
+void vstrhq_p(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))
+void vstrhq_scatter_offset_f16(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))
+void vstrhq_scatter_offset(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))
+void vstrhq_scatter_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))
+void vstrhq_scatter_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))
+void vstrhq_scatter_shifted_offset_f16(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))
+void vstrhq_scatter_shifted_offset(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))
+void vstrhq_scatter_shifted_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))
+void vstrhq_scatter_shifted_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_f32)))
+void vstrwq_f32(float32_t *, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_f32)))
+void vstrwq(float32_t *, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_f32)))
+void vstrwq_p_f32(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_f32)))
+void vstrwq_p(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))
+void vstrwq_scatter_base_f32(uint32x4_t, int, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))
+void vstrwq_scatter_base(uint32x4_t, int, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))
+void vstrwq_scatter_base_p_f32(uint32x4_t, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))
+void vstrwq_scatter_base_p(uint32x4_t, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))
+void vstrwq_scatter_base_wb_f32(uint32x4_t *, int, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))
+void vstrwq_scatter_base_wb(uint32x4_t *, int, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))
+void vstrwq_scatter_base_wb_p_f32(uint32x4_t *, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))
+void vstrwq_scatter_base_wb_p(uint32x4_t *, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))
+void vstrwq_scatter_offset_f32(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))
+void vstrwq_scatter_offset(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))
+void vstrwq_scatter_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))
+void vstrwq_scatter_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))
+void vstrwq_scatter_shifted_offset_f32(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))
+void vstrwq_scatter_shifted_offset(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))
+void vstrwq_scatter_shifted_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))
+void vstrwq_scatter_shifted_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f16)))
+float16x8_t vsubq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f16)))
+float16x8_t vsubq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f32)))
+float32x4_t vsubq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f32)))
+float32x4_t vsubq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f16)))
+float16x8_t vsubq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f16)))
+float16x8_t vsubq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f32)))
+float32x4_t vsubq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f32)))
+float32x4_t vsubq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f16)))
+float16x8_t vsubq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f16)))
+float16x8_t vsubq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f32)))
+float32x4_t vsubq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f32)))
+float32x4_t vsubq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f16)))
+float16x8_t vsubq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f16)))
+float16x8_t vsubq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f32)))
+float32x4_t vsubq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f32)))
+float32x4_t vsubq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f16)))
+float16x8_t vsubq_x_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f16)))
+float16x8_t vsubq_x(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f32)))
+float32x4_t vsubq_x_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f32)))
+float32x4_t vsubq_x(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f16)))
+float16x8_t vsubq_x_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f16)))
+float16x8_t vsubq_x(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f32)))
+float32x4_t vsubq_x_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f32)))
+float32x4_t vsubq_x(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_f16)))
+float16x8_t vuninitializedq_f16();
+static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_f32)))
+float32x4_t vuninitializedq_f32();
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f16)))
+float16x8_t vuninitializedq(float16x8_t);
+static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f32)))
+float32x4_t vuninitializedq(float32x4_t);
+
+#endif /* (__ARM_FEATURE_MVE & 2) && (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE) */
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* __ARM_MVE_H */
diff --git a/darwin-x86/lib64/clang/11.0.1/include/arm_neon.h b/linux-x86/lib64/clang/11.0.5/include/arm_neon.h
similarity index 80%
copy from darwin-x86/lib64/clang/11.0.1/include/arm_neon.h
copy to linux-x86/lib64/clang/11.0.5/include/arm_neon.h
index a8ab92f..da1e17c 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/arm_neon.h
+++ b/linux-x86/lib64/clang/11.0.5/include/arm_neon.h
@@ -24,12 +24,21 @@
 #ifndef __ARM_NEON_H
 #define __ARM_NEON_H
 
+#ifndef __ARM_FP
+#error "NEON intrinsics not available with the soft-float ABI. Please use -mfloat-abi=softfp or -mfloat-abi=hard"
+#else
+
 #if !defined(__ARM_NEON)
 #error "NEON support not enabled"
-#endif
+#else
 
 #include <stdint.h>
 
+#ifdef __ARM_FEATURE_BF16
+#include <arm_bf16.h>
+typedef __bf16 bfloat16_t;
+#endif
+
 typedef float float32_t;
 typedef __fp16 float16_t;
 #ifdef __aarch64__
@@ -44,6 +53,7 @@
 #else
 typedef int8_t poly8_t;
 typedef int16_t poly16_t;
+typedef int64_t poly64_t;
 #endif
 typedef __attribute__((neon_vector_type(8))) int8_t int8x8_t;
 typedef __attribute__((neon_vector_type(16))) int8_t int8x16_t;
@@ -73,10 +83,8 @@
 typedef __attribute__((neon_polyvector_type(16))) poly8_t poly8x16_t;
 typedef __attribute__((neon_polyvector_type(4))) poly16_t poly16x4_t;
 typedef __attribute__((neon_polyvector_type(8))) poly16_t poly16x8_t;
-#ifdef __aarch64__
 typedef __attribute__((neon_polyvector_type(1))) poly64_t poly64x1_t;
 typedef __attribute__((neon_polyvector_type(2))) poly64_t poly64x2_t;
-#endif
 
 typedef struct int8x8x2_t {
   int8x8_t val[2];
@@ -184,7 +192,6 @@
   poly16x8_t val[2];
 } poly16x8x2_t;
 
-#ifdef __aarch64__
 typedef struct poly64x1x2_t {
   poly64x1_t val[2];
 } poly64x1x2_t;
@@ -193,7 +200,6 @@
   poly64x2_t val[2];
 } poly64x2x2_t;
 
-#endif
 typedef struct int8x8x3_t {
   int8x8_t val[3];
 } int8x8x3_t;
@@ -300,7 +306,6 @@
   poly16x8_t val[3];
 } poly16x8x3_t;
 
-#ifdef __aarch64__
 typedef struct poly64x1x3_t {
   poly64x1_t val[3];
 } poly64x1x3_t;
@@ -309,7 +314,6 @@
   poly64x2_t val[3];
 } poly64x2x3_t;
 
-#endif
 typedef struct int8x8x4_t {
   int8x8_t val[4];
 } int8x8x4_t;
@@ -416,7 +420,6 @@
   poly16x8_t val[4];
 } poly16x8x4_t;
 
-#ifdef __aarch64__
 typedef struct poly64x1x4_t {
   poly64x1_t val[4];
 } poly64x1x4_t;
@@ -425,11 +428,1303 @@
   poly64x2_t val[4];
 } poly64x2x4_t;
 
+#ifdef __ARM_FEATURE_BF16
+typedef __attribute__((neon_vector_type(4))) bfloat16_t bfloat16x4_t;
+typedef __attribute__((neon_vector_type(8))) bfloat16_t bfloat16x8_t;
+
+typedef struct bfloat16x4x2_t {
+  bfloat16x4_t val[2];
+} bfloat16x4x2_t;
+
+typedef struct bfloat16x8x2_t {
+  bfloat16x8_t val[2];
+} bfloat16x8x2_t;
+
+typedef struct bfloat16x4x3_t {
+  bfloat16x4_t val[3];
+} bfloat16x4x3_t;
+
+typedef struct bfloat16x8x3_t {
+  bfloat16x8_t val[3];
+} bfloat16x8x3_t;
+
+typedef struct bfloat16x4x4_t {
+  bfloat16x4_t val[4];
+} bfloat16x4x4_t;
+
+typedef struct bfloat16x8x4_t {
+  bfloat16x8_t val[4];
+} bfloat16x8x4_t;
+
 #endif
 
 #define __ai static __inline__ __attribute__((__always_inline__, __nodebug__))
 
 #ifdef __LITTLE_ENDIAN__
+#define splat_lane_p8(__p0, __p1) __extension__ ({ \
+  poly8x8_t __s0 = __p0; \
+  poly8x8_t __ret; \
+  __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \
+  __ret; \
+})
+#else
+#define splat_lane_p8(__p0, __p1) __extension__ ({ \
+  poly8x8_t __s0 = __p0; \
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __ret; \
+  __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 4); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_lane_p8(__p0, __p1) __extension__ ({ \
+  poly8x8_t __s0 = __p0; \
+  poly8x8_t __ret; \
+  __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \
+  __ret; \
+})
+#endif
+
+#define splat_lane_p64(__p0, __p1) __extension__ ({ \
+  poly64x1_t __s0 = __p0; \
+  poly64x1_t __ret; \
+  __ret = (poly64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 6); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define splat_lane_p16(__p0, __p1) __extension__ ({ \
+  poly16x4_t __s0 = __p0; \
+  poly16x4_t __ret; \
+  __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \
+  __ret; \
+})
+#else
+#define splat_lane_p16(__p0, __p1) __extension__ ({ \
+  poly16x4_t __s0 = __p0; \
+  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  poly16x4_t __ret; \
+  __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 5); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_lane_p16(__p0, __p1) __extension__ ({ \
+  poly16x4_t __s0 = __p0; \
+  poly16x4_t __ret; \
+  __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_p8(__p0, __p1) __extension__ ({ \
+  poly8x8_t __s0 = __p0; \
+  poly8x16_t __ret; \
+  __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \
+  __ret; \
+})
+#else
+#define splatq_lane_p8(__p0, __p1) __extension__ ({ \
+  poly8x8_t __s0 = __p0; \
+  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __ret; \
+  __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 4); \
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_p8(__p0, __p1) __extension__ ({ \
+  poly8x8_t __s0 = __p0; \
+  poly8x16_t __ret; \
+  __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_p64(__p0, __p1) __extension__ ({ \
+  poly64x1_t __s0 = __p0; \
+  poly64x2_t __ret; \
+  __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \
+  __ret; \
+})
+#else
+#define splatq_lane_p64(__p0, __p1) __extension__ ({ \
+  poly64x1_t __s0 = __p0; \
+  poly64x2_t __ret; \
+  __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_p64(__p0, __p1) __extension__ ({ \
+  poly64x1_t __s0 = __p0; \
+  poly64x2_t __ret; \
+  __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_p16(__p0, __p1) __extension__ ({ \
+  poly16x4_t __s0 = __p0; \
+  poly16x8_t __ret; \
+  __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \
+  __ret; \
+})
+#else
+#define splatq_lane_p16(__p0, __p1) __extension__ ({ \
+  poly16x4_t __s0 = __p0; \
+  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  poly16x8_t __ret; \
+  __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 5); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_p16(__p0, __p1) __extension__ ({ \
+  poly16x4_t __s0 = __p0; \
+  poly16x8_t __ret; \
+  __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_u8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __s0 = __p0; \
+  uint8x16_t __ret; \
+  __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \
+  __ret; \
+})
+#else
+#define splatq_lane_u8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __s0 = __p0; \
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret; \
+  __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 16); \
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_u8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __s0 = __p0; \
+  uint8x16_t __ret; \
+  __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_u32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __s0 = __p0; \
+  uint32x4_t __ret; \
+  __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \
+  __ret; \
+})
+#else
+#define splatq_lane_u32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __s0 = __p0; \
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  uint32x4_t __ret; \
+  __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 18); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_u32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __s0 = __p0; \
+  uint32x4_t __ret; \
+  __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_u64(__p0, __p1) __extension__ ({ \
+  uint64x1_t __s0 = __p0; \
+  uint64x2_t __ret; \
+  __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \
+  __ret; \
+})
+#else
+#define splatq_lane_u64(__p0, __p1) __extension__ ({ \
+  uint64x1_t __s0 = __p0; \
+  uint64x2_t __ret; \
+  __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_u64(__p0, __p1) __extension__ ({ \
+  uint64x1_t __s0 = __p0; \
+  uint64x2_t __ret; \
+  __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_u16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __s0 = __p0; \
+  uint16x8_t __ret; \
+  __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \
+  __ret; \
+})
+#else
+#define splatq_lane_u16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __s0 = __p0; \
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  uint16x8_t __ret; \
+  __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 17); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_u16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __s0 = __p0; \
+  uint16x8_t __ret; \
+  __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_s8(__p0, __p1) __extension__ ({ \
+  int8x8_t __s0 = __p0; \
+  int8x16_t __ret; \
+  __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \
+  __ret; \
+})
+#else
+#define splatq_lane_s8(__p0, __p1) __extension__ ({ \
+  int8x8_t __s0 = __p0; \
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret; \
+  __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 0); \
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_s8(__p0, __p1) __extension__ ({ \
+  int8x8_t __s0 = __p0; \
+  int8x16_t __ret; \
+  __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_f64(__p0, __p1) __extension__ ({ \
+  float64x1_t __s0 = __p0; \
+  float64x2_t __ret; \
+  __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \
+  __ret; \
+})
+#else
+#define splatq_lane_f64(__p0, __p1) __extension__ ({ \
+  float64x1_t __s0 = __p0; \
+  float64x2_t __ret; \
+  __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_f64(__p0, __p1) __extension__ ({ \
+  float64x1_t __s0 = __p0; \
+  float64x2_t __ret; \
+  __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_f32(__p0, __p1) __extension__ ({ \
+  float32x2_t __s0 = __p0; \
+  float32x4_t __ret; \
+  __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \
+  __ret; \
+})
+#else
+#define splatq_lane_f32(__p0, __p1) __extension__ ({ \
+  float32x2_t __s0 = __p0; \
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  float32x4_t __ret; \
+  __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 9); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_f32(__p0, __p1) __extension__ ({ \
+  float32x2_t __s0 = __p0; \
+  float32x4_t __ret; \
+  __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_f16(__p0, __p1) __extension__ ({ \
+  float16x4_t __s0 = __p0; \
+  float16x8_t __ret; \
+  __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \
+  __ret; \
+})
+#else
+#define splatq_lane_f16(__p0, __p1) __extension__ ({ \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  float16x8_t __ret; \
+  __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 8); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_f16(__p0, __p1) __extension__ ({ \
+  float16x4_t __s0 = __p0; \
+  float16x8_t __ret; \
+  __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_s32(__p0, __p1) __extension__ ({ \
+  int32x2_t __s0 = __p0; \
+  int32x4_t __ret; \
+  __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \
+  __ret; \
+})
+#else
+#define splatq_lane_s32(__p0, __p1) __extension__ ({ \
+  int32x2_t __s0 = __p0; \
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  int32x4_t __ret; \
+  __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 2); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_s32(__p0, __p1) __extension__ ({ \
+  int32x2_t __s0 = __p0; \
+  int32x4_t __ret; \
+  __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_s64(__p0, __p1) __extension__ ({ \
+  int64x1_t __s0 = __p0; \
+  int64x2_t __ret; \
+  __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \
+  __ret; \
+})
+#else
+#define splatq_lane_s64(__p0, __p1) __extension__ ({ \
+  int64x1_t __s0 = __p0; \
+  int64x2_t __ret; \
+  __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_s64(__p0, __p1) __extension__ ({ \
+  int64x1_t __s0 = __p0; \
+  int64x2_t __ret; \
+  __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_s16(__p0, __p1) __extension__ ({ \
+  int16x4_t __s0 = __p0; \
+  int16x8_t __ret; \
+  __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \
+  __ret; \
+})
+#else
+#define splatq_lane_s16(__p0, __p1) __extension__ ({ \
+  int16x4_t __s0 = __p0; \
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  int16x8_t __ret; \
+  __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 1); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_s16(__p0, __p1) __extension__ ({ \
+  int16x4_t __s0 = __p0; \
+  int16x8_t __ret; \
+  __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_lane_u8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __s0 = __p0; \
+  uint8x8_t __ret; \
+  __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \
+  __ret; \
+})
+#else
+#define splat_lane_u8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __s0 = __p0; \
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __ret; \
+  __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 16); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_lane_u8(__p0, __p1) __extension__ ({ \
+  uint8x8_t __s0 = __p0; \
+  uint8x8_t __ret; \
+  __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_lane_u32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __s0 = __p0; \
+  uint32x2_t __ret; \
+  __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \
+  __ret; \
+})
+#else
+#define splat_lane_u32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __s0 = __p0; \
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  uint32x2_t __ret; \
+  __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 18); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_lane_u32(__p0, __p1) __extension__ ({ \
+  uint32x2_t __s0 = __p0; \
+  uint32x2_t __ret; \
+  __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \
+  __ret; \
+})
+#endif
+
+#define splat_lane_u64(__p0, __p1) __extension__ ({ \
+  uint64x1_t __s0 = __p0; \
+  uint64x1_t __ret; \
+  __ret = (uint64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 19); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define splat_lane_u16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __s0 = __p0; \
+  uint16x4_t __ret; \
+  __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \
+  __ret; \
+})
+#else
+#define splat_lane_u16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __s0 = __p0; \
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  uint16x4_t __ret; \
+  __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 17); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_lane_u16(__p0, __p1) __extension__ ({ \
+  uint16x4_t __s0 = __p0; \
+  uint16x4_t __ret; \
+  __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_lane_s8(__p0, __p1) __extension__ ({ \
+  int8x8_t __s0 = __p0; \
+  int8x8_t __ret; \
+  __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \
+  __ret; \
+})
+#else
+#define splat_lane_s8(__p0, __p1) __extension__ ({ \
+  int8x8_t __s0 = __p0; \
+  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __ret; \
+  __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 0); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_lane_s8(__p0, __p1) __extension__ ({ \
+  int8x8_t __s0 = __p0; \
+  int8x8_t __ret; \
+  __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \
+  __ret; \
+})
+#endif
+
+#define splat_lane_f64(__p0, __p1) __extension__ ({ \
+  float64x1_t __s0 = __p0; \
+  float64x1_t __ret; \
+  __ret = (float64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 10); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define splat_lane_f32(__p0, __p1) __extension__ ({ \
+  float32x2_t __s0 = __p0; \
+  float32x2_t __ret; \
+  __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \
+  __ret; \
+})
+#else
+#define splat_lane_f32(__p0, __p1) __extension__ ({ \
+  float32x2_t __s0 = __p0; \
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  float32x2_t __ret; \
+  __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 9); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_lane_f32(__p0, __p1) __extension__ ({ \
+  float32x2_t __s0 = __p0; \
+  float32x2_t __ret; \
+  __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_lane_f16(__p0, __p1) __extension__ ({ \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __ret; \
+  __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \
+  __ret; \
+})
+#else
+#define splat_lane_f16(__p0, __p1) __extension__ ({ \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  float16x4_t __ret; \
+  __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 8); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_lane_f16(__p0, __p1) __extension__ ({ \
+  float16x4_t __s0 = __p0; \
+  float16x4_t __ret; \
+  __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_lane_s32(__p0, __p1) __extension__ ({ \
+  int32x2_t __s0 = __p0; \
+  int32x2_t __ret; \
+  __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \
+  __ret; \
+})
+#else
+#define splat_lane_s32(__p0, __p1) __extension__ ({ \
+  int32x2_t __s0 = __p0; \
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  int32x2_t __ret; \
+  __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 2); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_lane_s32(__p0, __p1) __extension__ ({ \
+  int32x2_t __s0 = __p0; \
+  int32x2_t __ret; \
+  __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \
+  __ret; \
+})
+#endif
+
+#define splat_lane_s64(__p0, __p1) __extension__ ({ \
+  int64x1_t __s0 = __p0; \
+  int64x1_t __ret; \
+  __ret = (int64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 3); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+#define splat_lane_s16(__p0, __p1) __extension__ ({ \
+  int16x4_t __s0 = __p0; \
+  int16x4_t __ret; \
+  __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \
+  __ret; \
+})
+#else
+#define splat_lane_s16(__p0, __p1) __extension__ ({ \
+  int16x4_t __s0 = __p0; \
+  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  int16x4_t __ret; \
+  __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 1); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_lane_s16(__p0, __p1) __extension__ ({ \
+  int16x4_t __s0 = __p0; \
+  int16x4_t __ret; \
+  __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_p8(__p0, __p1) __extension__ ({ \
+  poly8x16_t __s0 = __p0; \
+  poly8x8_t __ret; \
+  __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \
+  __ret; \
+})
+#else
+#define splat_laneq_p8(__p0, __p1) __extension__ ({ \
+  poly8x16_t __s0 = __p0; \
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __ret; \
+  __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 36); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_laneq_p8(__p0, __p1) __extension__ ({ \
+  poly8x16_t __s0 = __p0; \
+  poly8x8_t __ret; \
+  __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_p64(__p0, __p1) __extension__ ({ \
+  poly64x2_t __s0 = __p0; \
+  poly64x1_t __ret; \
+  __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \
+  __ret; \
+})
+#else
+#define splat_laneq_p64(__p0, __p1) __extension__ ({ \
+  poly64x2_t __s0 = __p0; \
+  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  poly64x1_t __ret; \
+  __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 38); \
+  __ret; \
+})
+#define __noswap_splat_laneq_p64(__p0, __p1) __extension__ ({ \
+  poly64x2_t __s0 = __p0; \
+  poly64x1_t __ret; \
+  __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_p16(__p0, __p1) __extension__ ({ \
+  poly16x8_t __s0 = __p0; \
+  poly16x4_t __ret; \
+  __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \
+  __ret; \
+})
+#else
+#define splat_laneq_p16(__p0, __p1) __extension__ ({ \
+  poly16x8_t __s0 = __p0; \
+  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x4_t __ret; \
+  __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 37); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_laneq_p16(__p0, __p1) __extension__ ({ \
+  poly16x8_t __s0 = __p0; \
+  poly16x4_t __ret; \
+  __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_p8(__p0, __p1) __extension__ ({ \
+  poly8x16_t __s0 = __p0; \
+  poly8x16_t __ret; \
+  __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \
+  __ret; \
+})
+#else
+#define splatq_laneq_p8(__p0, __p1) __extension__ ({ \
+  poly8x16_t __s0 = __p0; \
+  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __ret; \
+  __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 36); \
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_p8(__p0, __p1) __extension__ ({ \
+  poly8x16_t __s0 = __p0; \
+  poly8x16_t __ret; \
+  __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_p64(__p0, __p1) __extension__ ({ \
+  poly64x2_t __s0 = __p0; \
+  poly64x2_t __ret; \
+  __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \
+  __ret; \
+})
+#else
+#define splatq_laneq_p64(__p0, __p1) __extension__ ({ \
+  poly64x2_t __s0 = __p0; \
+  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  poly64x2_t __ret; \
+  __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 38); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_p64(__p0, __p1) __extension__ ({ \
+  poly64x2_t __s0 = __p0; \
+  poly64x2_t __ret; \
+  __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_p16(__p0, __p1) __extension__ ({ \
+  poly16x8_t __s0 = __p0; \
+  poly16x8_t __ret; \
+  __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \
+  __ret; \
+})
+#else
+#define splatq_laneq_p16(__p0, __p1) __extension__ ({ \
+  poly16x8_t __s0 = __p0; \
+  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x8_t __ret; \
+  __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 37); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_p16(__p0, __p1) __extension__ ({ \
+  poly16x8_t __s0 = __p0; \
+  poly16x8_t __ret; \
+  __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_u8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __s0 = __p0; \
+  uint8x16_t __ret; \
+  __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \
+  __ret; \
+})
+#else
+#define splatq_laneq_u8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __s0 = __p0; \
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret; \
+  __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 48); \
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_u8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __s0 = __p0; \
+  uint8x16_t __ret; \
+  __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_u32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __s0 = __p0; \
+  uint32x4_t __ret; \
+  __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \
+  __ret; \
+})
+#else
+#define splatq_laneq_u32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __s0 = __p0; \
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  uint32x4_t __ret; \
+  __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 50); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_u32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __s0 = __p0; \
+  uint32x4_t __ret; \
+  __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_u64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __s0 = __p0; \
+  uint64x2_t __ret; \
+  __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \
+  __ret; \
+})
+#else
+#define splatq_laneq_u64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __s0 = __p0; \
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  uint64x2_t __ret; \
+  __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 51); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_u64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __s0 = __p0; \
+  uint64x2_t __ret; \
+  __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_u16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __s0 = __p0; \
+  uint16x8_t __ret; \
+  __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \
+  __ret; \
+})
+#else
+#define splatq_laneq_u16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __s0 = __p0; \
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret; \
+  __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 49); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_u16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __s0 = __p0; \
+  uint16x8_t __ret; \
+  __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_s8(__p0, __p1) __extension__ ({ \
+  int8x16_t __s0 = __p0; \
+  int8x16_t __ret; \
+  __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \
+  __ret; \
+})
+#else
+#define splatq_laneq_s8(__p0, __p1) __extension__ ({ \
+  int8x16_t __s0 = __p0; \
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret; \
+  __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 32); \
+  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_s8(__p0, __p1) __extension__ ({ \
+  int8x16_t __s0 = __p0; \
+  int8x16_t __ret; \
+  __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_f64(__p0, __p1) __extension__ ({ \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __ret; \
+  __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \
+  __ret; \
+})
+#else
+#define splatq_laneq_f64(__p0, __p1) __extension__ ({ \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  float64x2_t __ret; \
+  __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 42); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_f64(__p0, __p1) __extension__ ({ \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __ret; \
+  __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_f32(__p0, __p1) __extension__ ({ \
+  float32x4_t __s0 = __p0; \
+  float32x4_t __ret; \
+  __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \
+  __ret; \
+})
+#else
+#define splatq_laneq_f32(__p0, __p1) __extension__ ({ \
+  float32x4_t __s0 = __p0; \
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  float32x4_t __ret; \
+  __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 41); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_f32(__p0, __p1) __extension__ ({ \
+  float32x4_t __s0 = __p0; \
+  float32x4_t __ret; \
+  __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_f16(__p0, __p1) __extension__ ({ \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __ret; \
+  __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \
+  __ret; \
+})
+#else
+#define splatq_laneq_f16(__p0, __p1) __extension__ ({ \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __ret; \
+  __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 40); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_f16(__p0, __p1) __extension__ ({ \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __ret; \
+  __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_s32(__p0, __p1) __extension__ ({ \
+  int32x4_t __s0 = __p0; \
+  int32x4_t __ret; \
+  __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \
+  __ret; \
+})
+#else
+#define splatq_laneq_s32(__p0, __p1) __extension__ ({ \
+  int32x4_t __s0 = __p0; \
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  int32x4_t __ret; \
+  __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 34); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_s32(__p0, __p1) __extension__ ({ \
+  int32x4_t __s0 = __p0; \
+  int32x4_t __ret; \
+  __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_s64(__p0, __p1) __extension__ ({ \
+  int64x2_t __s0 = __p0; \
+  int64x2_t __ret; \
+  __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \
+  __ret; \
+})
+#else
+#define splatq_laneq_s64(__p0, __p1) __extension__ ({ \
+  int64x2_t __s0 = __p0; \
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  int64x2_t __ret; \
+  __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 35); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_s64(__p0, __p1) __extension__ ({ \
+  int64x2_t __s0 = __p0; \
+  int64x2_t __ret; \
+  __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_s16(__p0, __p1) __extension__ ({ \
+  int16x8_t __s0 = __p0; \
+  int16x8_t __ret; \
+  __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \
+  __ret; \
+})
+#else
+#define splatq_laneq_s16(__p0, __p1) __extension__ ({ \
+  int16x8_t __s0 = __p0; \
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret; \
+  __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 33); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_s16(__p0, __p1) __extension__ ({ \
+  int16x8_t __s0 = __p0; \
+  int16x8_t __ret; \
+  __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_u8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __s0 = __p0; \
+  uint8x8_t __ret; \
+  __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \
+  __ret; \
+})
+#else
+#define splat_laneq_u8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __s0 = __p0; \
+  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __ret; \
+  __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 48); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_laneq_u8(__p0, __p1) __extension__ ({ \
+  uint8x16_t __s0 = __p0; \
+  uint8x8_t __ret; \
+  __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_u32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __s0 = __p0; \
+  uint32x2_t __ret; \
+  __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \
+  __ret; \
+})
+#else
+#define splat_laneq_u32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __s0 = __p0; \
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  uint32x2_t __ret; \
+  __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 50); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_laneq_u32(__p0, __p1) __extension__ ({ \
+  uint32x4_t __s0 = __p0; \
+  uint32x2_t __ret; \
+  __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_u64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __s0 = __p0; \
+  uint64x1_t __ret; \
+  __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \
+  __ret; \
+})
+#else
+#define splat_laneq_u64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __s0 = __p0; \
+  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  uint64x1_t __ret; \
+  __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 51); \
+  __ret; \
+})
+#define __noswap_splat_laneq_u64(__p0, __p1) __extension__ ({ \
+  uint64x2_t __s0 = __p0; \
+  uint64x1_t __ret; \
+  __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_u16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __s0 = __p0; \
+  uint16x4_t __ret; \
+  __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \
+  __ret; \
+})
+#else
+#define splat_laneq_u16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __s0 = __p0; \
+  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __ret; \
+  __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 49); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_laneq_u16(__p0, __p1) __extension__ ({ \
+  uint16x8_t __s0 = __p0; \
+  uint16x4_t __ret; \
+  __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_s8(__p0, __p1) __extension__ ({ \
+  int8x16_t __s0 = __p0; \
+  int8x8_t __ret; \
+  __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \
+  __ret; \
+})
+#else
+#define splat_laneq_s8(__p0, __p1) __extension__ ({ \
+  int8x16_t __s0 = __p0; \
+  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __ret; \
+  __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 32); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_laneq_s8(__p0, __p1) __extension__ ({ \
+  int8x16_t __s0 = __p0; \
+  int8x8_t __ret; \
+  __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_f64(__p0, __p1) __extension__ ({ \
+  float64x2_t __s0 = __p0; \
+  float64x1_t __ret; \
+  __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \
+  __ret; \
+})
+#else
+#define splat_laneq_f64(__p0, __p1) __extension__ ({ \
+  float64x2_t __s0 = __p0; \
+  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  float64x1_t __ret; \
+  __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 42); \
+  __ret; \
+})
+#define __noswap_splat_laneq_f64(__p0, __p1) __extension__ ({ \
+  float64x2_t __s0 = __p0; \
+  float64x1_t __ret; \
+  __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_f32(__p0, __p1) __extension__ ({ \
+  float32x4_t __s0 = __p0; \
+  float32x2_t __ret; \
+  __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \
+  __ret; \
+})
+#else
+#define splat_laneq_f32(__p0, __p1) __extension__ ({ \
+  float32x4_t __s0 = __p0; \
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  float32x2_t __ret; \
+  __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 41); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_laneq_f32(__p0, __p1) __extension__ ({ \
+  float32x4_t __s0 = __p0; \
+  float32x2_t __ret; \
+  __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_f16(__p0, __p1) __extension__ ({ \
+  float16x8_t __s0 = __p0; \
+  float16x4_t __ret; \
+  __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \
+  __ret; \
+})
+#else
+#define splat_laneq_f16(__p0, __p1) __extension__ ({ \
+  float16x8_t __s0 = __p0; \
+  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __ret; \
+  __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 40); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_laneq_f16(__p0, __p1) __extension__ ({ \
+  float16x8_t __s0 = __p0; \
+  float16x4_t __ret; \
+  __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_s32(__p0, __p1) __extension__ ({ \
+  int32x4_t __s0 = __p0; \
+  int32x2_t __ret; \
+  __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \
+  __ret; \
+})
+#else
+#define splat_laneq_s32(__p0, __p1) __extension__ ({ \
+  int32x4_t __s0 = __p0; \
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  int32x2_t __ret; \
+  __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 34); \
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_laneq_s32(__p0, __p1) __extension__ ({ \
+  int32x4_t __s0 = __p0; \
+  int32x2_t __ret; \
+  __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_s64(__p0, __p1) __extension__ ({ \
+  int64x2_t __s0 = __p0; \
+  int64x1_t __ret; \
+  __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \
+  __ret; \
+})
+#else
+#define splat_laneq_s64(__p0, __p1) __extension__ ({ \
+  int64x2_t __s0 = __p0; \
+  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+  int64x1_t __ret; \
+  __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 35); \
+  __ret; \
+})
+#define __noswap_splat_laneq_s64(__p0, __p1) __extension__ ({ \
+  int64x2_t __s0 = __p0; \
+  int64x1_t __ret; \
+  __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_s16(__p0, __p1) __extension__ ({ \
+  int16x8_t __s0 = __p0; \
+  int16x4_t __ret; \
+  __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \
+  __ret; \
+})
+#else
+#define splat_laneq_s16(__p0, __p1) __extension__ ({ \
+  int16x8_t __s0 = __p0; \
+  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __ret; \
+  __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 33); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_laneq_s16(__p0, __p1) __extension__ ({ \
+  int16x8_t __s0 = __p0; \
+  int16x4_t __ret; \
+  __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
 __ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
   uint8x16_t __ret;
   __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
@@ -4464,372 +5759,372 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_lane_p8(__p0_0, __p1_0) __extension__ ({ \
+  poly8x8_t __s0_0 = __p0_0; \
+  poly8x8_t __ret_0; \
+  __ret_0 = splat_lane_p8(__s0_0, __p1_0); \
+  __ret_0; \
 })
 #else
-#define vdup_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_lane_p8(__p0_1, __p1_1) __extension__ ({ \
+  poly8x8_t __s0_1 = __p0_1; \
+  poly8x8_t __rev0_1;  __rev0_1 = __builtin_shufflevector(__s0_1, __s0_1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __ret_1; \
+  __ret_1 = __noswap_splat_lane_p8(__rev0_1, __p1_1); \
+  __ret_1 = __builtin_shufflevector(__ret_1, __ret_1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_1; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_lane_p16(__p0_2, __p1_2) __extension__ ({ \
+  poly16x4_t __s0_2 = __p0_2; \
+  poly16x4_t __ret_2; \
+  __ret_2 = splat_lane_p16(__s0_2, __p1_2); \
+  __ret_2; \
 })
 #else
-#define vdup_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_lane_p16(__p0_3, __p1_3) __extension__ ({ \
+  poly16x4_t __s0_3 = __p0_3; \
+  poly16x4_t __rev0_3;  __rev0_3 = __builtin_shufflevector(__s0_3, __s0_3, 3, 2, 1, 0); \
+  poly16x4_t __ret_3; \
+  __ret_3 = __noswap_splat_lane_p16(__rev0_3, __p1_3); \
+  __ret_3 = __builtin_shufflevector(__ret_3, __ret_3, 3, 2, 1, 0); \
+  __ret_3; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x16_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_p8(__p0_4, __p1_4) __extension__ ({ \
+  poly8x8_t __s0_4 = __p0_4; \
+  poly8x16_t __ret_4; \
+  __ret_4 = splatq_lane_p8(__s0_4, __p1_4); \
+  __ret_4; \
 })
 #else
-#define vdupq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_lane_p8(__p0_5, __p1_5) __extension__ ({ \
+  poly8x8_t __s0_5 = __p0_5; \
+  poly8x8_t __rev0_5;  __rev0_5 = __builtin_shufflevector(__s0_5, __s0_5, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __ret_5; \
+  __ret_5 = __noswap_splatq_lane_p8(__rev0_5, __p1_5); \
+  __ret_5 = __builtin_shufflevector(__ret_5, __ret_5, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_5; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_p16(__p0_6, __p1_6) __extension__ ({ \
+  poly16x4_t __s0_6 = __p0_6; \
+  poly16x8_t __ret_6; \
+  __ret_6 = splatq_lane_p16(__s0_6, __p1_6); \
+  __ret_6; \
 })
 #else
-#define vdupq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_lane_p16(__p0_7, __p1_7) __extension__ ({ \
+  poly16x4_t __s0_7 = __p0_7; \
+  poly16x4_t __rev0_7;  __rev0_7 = __builtin_shufflevector(__s0_7, __s0_7, 3, 2, 1, 0); \
+  poly16x8_t __ret_7; \
+  __ret_7 = __noswap_splatq_lane_p16(__rev0_7, __p1_7); \
+  __ret_7 = __builtin_shufflevector(__ret_7, __ret_7, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_7; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_u8(__p0_8, __p1_8) __extension__ ({ \
+  uint8x8_t __s0_8 = __p0_8; \
+  uint8x16_t __ret_8; \
+  __ret_8 = splatq_lane_u8(__s0_8, __p1_8); \
+  __ret_8; \
 })
 #else
-#define vdupq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_lane_u8(__p0_9, __p1_9) __extension__ ({ \
+  uint8x8_t __s0_9 = __p0_9; \
+  uint8x8_t __rev0_9;  __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_9; \
+  __ret_9 = __noswap_splatq_lane_u8(__rev0_9, __p1_9); \
+  __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_9; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_u32(__p0_10, __p1_10) __extension__ ({ \
+  uint32x2_t __s0_10 = __p0_10; \
+  uint32x4_t __ret_10; \
+  __ret_10 = splatq_lane_u32(__s0_10, __p1_10); \
+  __ret_10; \
 })
 #else
-#define vdupq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_lane_u32(__p0_11, __p1_11) __extension__ ({ \
+  uint32x2_t __s0_11 = __p0_11; \
+  uint32x2_t __rev0_11;  __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 1, 0); \
+  uint32x4_t __ret_11; \
+  __ret_11 = __noswap_splatq_lane_u32(__rev0_11, __p1_11); \
+  __ret_11 = __builtin_shufflevector(__ret_11, __ret_11, 3, 2, 1, 0); \
+  __ret_11; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_u64(__p0_12, __p1_12) __extension__ ({ \
+  uint64x1_t __s0_12 = __p0_12; \
+  uint64x2_t __ret_12; \
+  __ret_12 = splatq_lane_u64(__s0_12, __p1_12); \
+  __ret_12; \
 })
 #else
-#define vdupq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdupq_lane_u64(__p0_13, __p1_13) __extension__ ({ \
+  uint64x1_t __s0_13 = __p0_13; \
+  uint64x2_t __ret_13; \
+  __ret_13 = __noswap_splatq_lane_u64(__s0_13, __p1_13); \
+  __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 1, 0); \
+  __ret_13; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_u16(__p0_14, __p1_14) __extension__ ({ \
+  uint16x4_t __s0_14 = __p0_14; \
+  uint16x8_t __ret_14; \
+  __ret_14 = splatq_lane_u16(__s0_14, __p1_14); \
+  __ret_14; \
 })
 #else
-#define vdupq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_lane_u16(__p0_15, __p1_15) __extension__ ({ \
+  uint16x4_t __s0_15 = __p0_15; \
+  uint16x4_t __rev0_15;  __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 3, 2, 1, 0); \
+  uint16x8_t __ret_15; \
+  __ret_15 = __noswap_splatq_lane_u16(__rev0_15, __p1_15); \
+  __ret_15 = __builtin_shufflevector(__ret_15, __ret_15, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_15; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_s8(__p0_16, __p1_16) __extension__ ({ \
+  int8x8_t __s0_16 = __p0_16; \
+  int8x16_t __ret_16; \
+  __ret_16 = splatq_lane_s8(__s0_16, __p1_16); \
+  __ret_16; \
 })
 #else
-#define vdupq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_lane_s8(__p0_17, __p1_17) __extension__ ({ \
+  int8x8_t __s0_17 = __p0_17; \
+  int8x8_t __rev0_17;  __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_17; \
+  __ret_17 = __noswap_splatq_lane_s8(__rev0_17, __p1_17); \
+  __ret_17 = __builtin_shufflevector(__ret_17, __ret_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_17; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_f32(__p0_18, __p1_18) __extension__ ({ \
+  float32x2_t __s0_18 = __p0_18; \
+  float32x4_t __ret_18; \
+  __ret_18 = splatq_lane_f32(__s0_18, __p1_18); \
+  __ret_18; \
 })
 #else
-#define vdupq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_lane_f32(__p0_19, __p1_19) __extension__ ({ \
+  float32x2_t __s0_19 = __p0_19; \
+  float32x2_t __rev0_19;  __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 1, 0); \
+  float32x4_t __ret_19; \
+  __ret_19 = __noswap_splatq_lane_f32(__rev0_19, __p1_19); \
+  __ret_19 = __builtin_shufflevector(__ret_19, __ret_19, 3, 2, 1, 0); \
+  __ret_19; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_s32(__p0_20, __p1_20) __extension__ ({ \
+  int32x2_t __s0_20 = __p0_20; \
+  int32x4_t __ret_20; \
+  __ret_20 = splatq_lane_s32(__s0_20, __p1_20); \
+  __ret_20; \
 })
 #else
-#define vdupq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_lane_s32(__p0_21, __p1_21) __extension__ ({ \
+  int32x2_t __s0_21 = __p0_21; \
+  int32x2_t __rev0_21;  __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 1, 0); \
+  int32x4_t __ret_21; \
+  __ret_21 = __noswap_splatq_lane_s32(__rev0_21, __p1_21); \
+  __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 3, 2, 1, 0); \
+  __ret_21; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_s64(__p0_22, __p1_22) __extension__ ({ \
+  int64x1_t __s0_22 = __p0_22; \
+  int64x2_t __ret_22; \
+  __ret_22 = splatq_lane_s64(__s0_22, __p1_22); \
+  __ret_22; \
 })
 #else
-#define vdupq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdupq_lane_s64(__p0_23, __p1_23) __extension__ ({ \
+  int64x1_t __s0_23 = __p0_23; \
+  int64x2_t __ret_23; \
+  __ret_23 = __noswap_splatq_lane_s64(__s0_23, __p1_23); \
+  __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 1, 0); \
+  __ret_23; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_s16(__p0_24, __p1_24) __extension__ ({ \
+  int16x4_t __s0_24 = __p0_24; \
+  int16x8_t __ret_24; \
+  __ret_24 = splatq_lane_s16(__s0_24, __p1_24); \
+  __ret_24; \
 })
 #else
-#define vdupq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_lane_s16(__p0_25, __p1_25) __extension__ ({ \
+  int16x4_t __s0_25 = __p0_25; \
+  int16x4_t __rev0_25;  __rev0_25 = __builtin_shufflevector(__s0_25, __s0_25, 3, 2, 1, 0); \
+  int16x8_t __ret_25; \
+  __ret_25 = __noswap_splatq_lane_s16(__rev0_25, __p1_25); \
+  __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_25; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_lane_u8(__p0_26, __p1_26) __extension__ ({ \
+  uint8x8_t __s0_26 = __p0_26; \
+  uint8x8_t __ret_26; \
+  __ret_26 = splat_lane_u8(__s0_26, __p1_26); \
+  __ret_26; \
 })
 #else
-#define vdup_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_lane_u8(__p0_27, __p1_27) __extension__ ({ \
+  uint8x8_t __s0_27 = __p0_27; \
+  uint8x8_t __rev0_27;  __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __ret_27; \
+  __ret_27 = __noswap_splat_lane_u8(__rev0_27, __p1_27); \
+  __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_27; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdup_lane_u32(__p0_28, __p1_28) __extension__ ({ \
+  uint32x2_t __s0_28 = __p0_28; \
+  uint32x2_t __ret_28; \
+  __ret_28 = splat_lane_u32(__s0_28, __p1_28); \
+  __ret_28; \
 })
 #else
-#define vdup_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdup_lane_u32(__p0_29, __p1_29) __extension__ ({ \
+  uint32x2_t __s0_29 = __p0_29; \
+  uint32x2_t __rev0_29;  __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 1, 0); \
+  uint32x2_t __ret_29; \
+  __ret_29 = __noswap_splat_lane_u32(__rev0_29, __p1_29); \
+  __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 1, 0); \
+  __ret_29; \
 })
 #endif
 
-#define vdup_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
+#define vdup_lane_u64(__p0_30, __p1_30) __extension__ ({ \
+  uint64x1_t __s0_30 = __p0_30; \
+  uint64x1_t __ret_30; \
+  __ret_30 = splat_lane_u64(__s0_30, __p1_30); \
+  __ret_30; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_lane_u16(__p0_31, __p1_31) __extension__ ({ \
+  uint16x4_t __s0_31 = __p0_31; \
+  uint16x4_t __ret_31; \
+  __ret_31 = splat_lane_u16(__s0_31, __p1_31); \
+  __ret_31; \
 })
 #else
-#define vdup_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_lane_u16(__p0_32, __p1_32) __extension__ ({ \
+  uint16x4_t __s0_32 = __p0_32; \
+  uint16x4_t __rev0_32;  __rev0_32 = __builtin_shufflevector(__s0_32, __s0_32, 3, 2, 1, 0); \
+  uint16x4_t __ret_32; \
+  __ret_32 = __noswap_splat_lane_u16(__rev0_32, __p1_32); \
+  __ret_32 = __builtin_shufflevector(__ret_32, __ret_32, 3, 2, 1, 0); \
+  __ret_32; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_lane_s8(__p0_33, __p1_33) __extension__ ({ \
+  int8x8_t __s0_33 = __p0_33; \
+  int8x8_t __ret_33; \
+  __ret_33 = splat_lane_s8(__s0_33, __p1_33); \
+  __ret_33; \
 })
 #else
-#define vdup_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_lane_s8(__p0_34, __p1_34) __extension__ ({ \
+  int8x8_t __s0_34 = __p0_34; \
+  int8x8_t __rev0_34;  __rev0_34 = __builtin_shufflevector(__s0_34, __s0_34, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __ret_34; \
+  __ret_34 = __noswap_splat_lane_s8(__rev0_34, __p1_34); \
+  __ret_34 = __builtin_shufflevector(__ret_34, __ret_34, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_34; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdup_lane_f32(__p0_35, __p1_35) __extension__ ({ \
+  float32x2_t __s0_35 = __p0_35; \
+  float32x2_t __ret_35; \
+  __ret_35 = splat_lane_f32(__s0_35, __p1_35); \
+  __ret_35; \
 })
 #else
-#define vdup_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdup_lane_f32(__p0_36, __p1_36) __extension__ ({ \
+  float32x2_t __s0_36 = __p0_36; \
+  float32x2_t __rev0_36;  __rev0_36 = __builtin_shufflevector(__s0_36, __s0_36, 1, 0); \
+  float32x2_t __ret_36; \
+  __ret_36 = __noswap_splat_lane_f32(__rev0_36, __p1_36); \
+  __ret_36 = __builtin_shufflevector(__ret_36, __ret_36, 1, 0); \
+  __ret_36; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdup_lane_s32(__p0_37, __p1_37) __extension__ ({ \
+  int32x2_t __s0_37 = __p0_37; \
+  int32x2_t __ret_37; \
+  __ret_37 = splat_lane_s32(__s0_37, __p1_37); \
+  __ret_37; \
 })
 #else
-#define vdup_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdup_lane_s32(__p0_38, __p1_38) __extension__ ({ \
+  int32x2_t __s0_38 = __p0_38; \
+  int32x2_t __rev0_38;  __rev0_38 = __builtin_shufflevector(__s0_38, __s0_38, 1, 0); \
+  int32x2_t __ret_38; \
+  __ret_38 = __noswap_splat_lane_s32(__rev0_38, __p1_38); \
+  __ret_38 = __builtin_shufflevector(__ret_38, __ret_38, 1, 0); \
+  __ret_38; \
 })
 #endif
 
-#define vdup_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
+#define vdup_lane_s64(__p0_39, __p1_39) __extension__ ({ \
+  int64x1_t __s0_39 = __p0_39; \
+  int64x1_t __ret_39; \
+  __ret_39 = splat_lane_s64(__s0_39, __p1_39); \
+  __ret_39; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_lane_s16(__p0_40, __p1_40) __extension__ ({ \
+  int16x4_t __s0_40 = __p0_40; \
+  int16x4_t __ret_40; \
+  __ret_40 = splat_lane_s16(__s0_40, __p1_40); \
+  __ret_40; \
 })
 #else
-#define vdup_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_lane_s16(__p0_41, __p1_41) __extension__ ({ \
+  int16x4_t __s0_41 = __p0_41; \
+  int16x4_t __rev0_41;  __rev0_41 = __builtin_shufflevector(__s0_41, __s0_41, 3, 2, 1, 0); \
+  int16x4_t __ret_41; \
+  __ret_41 = __noswap_splat_lane_s16(__rev0_41, __p1_41); \
+  __ret_41 = __builtin_shufflevector(__ret_41, __ret_41, 3, 2, 1, 0); \
+  __ret_41; \
 })
 #endif
 
@@ -13187,242 +14482,242 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlaq_lane_u32(__p0_42, __p1_42, __p2_42, __p3_42) __extension__ ({ \
+  uint32x4_t __s0_42 = __p0_42; \
+  uint32x4_t __s1_42 = __p1_42; \
+  uint32x2_t __s2_42 = __p2_42; \
+  uint32x4_t __ret_42; \
+  __ret_42 = __s0_42 + __s1_42 * splatq_lane_u32(__s2_42, __p3_42); \
+  __ret_42; \
 })
 #else
-#define vmlaq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlaq_lane_u32(__p0_43, __p1_43, __p2_43, __p3_43) __extension__ ({ \
+  uint32x4_t __s0_43 = __p0_43; \
+  uint32x4_t __s1_43 = __p1_43; \
+  uint32x2_t __s2_43 = __p2_43; \
+  uint32x4_t __rev0_43;  __rev0_43 = __builtin_shufflevector(__s0_43, __s0_43, 3, 2, 1, 0); \
+  uint32x4_t __rev1_43;  __rev1_43 = __builtin_shufflevector(__s1_43, __s1_43, 3, 2, 1, 0); \
+  uint32x2_t __rev2_43;  __rev2_43 = __builtin_shufflevector(__s2_43, __s2_43, 1, 0); \
+  uint32x4_t __ret_43; \
+  __ret_43 = __rev0_43 + __rev1_43 * __noswap_splatq_lane_u32(__rev2_43, __p3_43); \
+  __ret_43 = __builtin_shufflevector(__ret_43, __ret_43, 3, 2, 1, 0); \
+  __ret_43; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x8_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlaq_lane_u16(__p0_44, __p1_44, __p2_44, __p3_44) __extension__ ({ \
+  uint16x8_t __s0_44 = __p0_44; \
+  uint16x8_t __s1_44 = __p1_44; \
+  uint16x4_t __s2_44 = __p2_44; \
+  uint16x8_t __ret_44; \
+  __ret_44 = __s0_44 + __s1_44 * splatq_lane_u16(__s2_44, __p3_44); \
+  __ret_44; \
 })
 #else
-#define vmlaq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmlaq_lane_u16(__p0_45, __p1_45, __p2_45, __p3_45) __extension__ ({ \
+  uint16x8_t __s0_45 = __p0_45; \
+  uint16x8_t __s1_45 = __p1_45; \
+  uint16x4_t __s2_45 = __p2_45; \
+  uint16x8_t __rev0_45;  __rev0_45 = __builtin_shufflevector(__s0_45, __s0_45, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_45;  __rev1_45 = __builtin_shufflevector(__s1_45, __s1_45, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_45;  __rev2_45 = __builtin_shufflevector(__s2_45, __s2_45, 3, 2, 1, 0); \
+  uint16x8_t __ret_45; \
+  __ret_45 = __rev0_45 + __rev1_45 * __noswap_splatq_lane_u16(__rev2_45, __p3_45); \
+  __ret_45 = __builtin_shufflevector(__ret_45, __ret_45, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_45; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlaq_lane_f32(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \
+  float32x4_t __s0_46 = __p0_46; \
+  float32x4_t __s1_46 = __p1_46; \
+  float32x2_t __s2_46 = __p2_46; \
+  float32x4_t __ret_46; \
+  __ret_46 = __s0_46 + __s1_46 * splatq_lane_f32(__s2_46, __p3_46); \
+  __ret_46; \
 })
 #else
-#define vmlaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlaq_lane_f32(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \
+  float32x4_t __s0_47 = __p0_47; \
+  float32x4_t __s1_47 = __p1_47; \
+  float32x2_t __s2_47 = __p2_47; \
+  float32x4_t __rev0_47;  __rev0_47 = __builtin_shufflevector(__s0_47, __s0_47, 3, 2, 1, 0); \
+  float32x4_t __rev1_47;  __rev1_47 = __builtin_shufflevector(__s1_47, __s1_47, 3, 2, 1, 0); \
+  float32x2_t __rev2_47;  __rev2_47 = __builtin_shufflevector(__s2_47, __s2_47, 1, 0); \
+  float32x4_t __ret_47; \
+  __ret_47 = __rev0_47 + __rev1_47 * __noswap_splatq_lane_f32(__rev2_47, __p3_47); \
+  __ret_47 = __builtin_shufflevector(__ret_47, __ret_47, 3, 2, 1, 0); \
+  __ret_47; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlaq_lane_s32(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \
+  int32x4_t __s0_48 = __p0_48; \
+  int32x4_t __s1_48 = __p1_48; \
+  int32x2_t __s2_48 = __p2_48; \
+  int32x4_t __ret_48; \
+  __ret_48 = __s0_48 + __s1_48 * splatq_lane_s32(__s2_48, __p3_48); \
+  __ret_48; \
 })
 #else
-#define vmlaq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlaq_lane_s32(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \
+  int32x4_t __s0_49 = __p0_49; \
+  int32x4_t __s1_49 = __p1_49; \
+  int32x2_t __s2_49 = __p2_49; \
+  int32x4_t __rev0_49;  __rev0_49 = __builtin_shufflevector(__s0_49, __s0_49, 3, 2, 1, 0); \
+  int32x4_t __rev1_49;  __rev1_49 = __builtin_shufflevector(__s1_49, __s1_49, 3, 2, 1, 0); \
+  int32x2_t __rev2_49;  __rev2_49 = __builtin_shufflevector(__s2_49, __s2_49, 1, 0); \
+  int32x4_t __ret_49; \
+  __ret_49 = __rev0_49 + __rev1_49 * __noswap_splatq_lane_s32(__rev2_49, __p3_49); \
+  __ret_49 = __builtin_shufflevector(__ret_49, __ret_49, 3, 2, 1, 0); \
+  __ret_49; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlaq_lane_s16(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \
+  int16x8_t __s0_50 = __p0_50; \
+  int16x8_t __s1_50 = __p1_50; \
+  int16x4_t __s2_50 = __p2_50; \
+  int16x8_t __ret_50; \
+  __ret_50 = __s0_50 + __s1_50 * splatq_lane_s16(__s2_50, __p3_50); \
+  __ret_50; \
 })
 #else
-#define vmlaq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmlaq_lane_s16(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \
+  int16x8_t __s0_51 = __p0_51; \
+  int16x8_t __s1_51 = __p1_51; \
+  int16x4_t __s2_51 = __p2_51; \
+  int16x8_t __rev0_51;  __rev0_51 = __builtin_shufflevector(__s0_51, __s0_51, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_51;  __rev1_51 = __builtin_shufflevector(__s1_51, __s1_51, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_51;  __rev2_51 = __builtin_shufflevector(__s2_51, __s2_51, 3, 2, 1, 0); \
+  int16x8_t __ret_51; \
+  __ret_51 = __rev0_51 + __rev1_51 * __noswap_splatq_lane_s16(__rev2_51, __p3_51); \
+  __ret_51 = __builtin_shufflevector(__ret_51, __ret_51, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_51; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x2_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
+#define vmla_lane_u32(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \
+  uint32x2_t __s0_52 = __p0_52; \
+  uint32x2_t __s1_52 = __p1_52; \
+  uint32x2_t __s2_52 = __p2_52; \
+  uint32x2_t __ret_52; \
+  __ret_52 = __s0_52 + __s1_52 * splat_lane_u32(__s2_52, __p3_52); \
+  __ret_52; \
 })
 #else
-#define vmla_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmla_lane_u32(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \
+  uint32x2_t __s0_53 = __p0_53; \
+  uint32x2_t __s1_53 = __p1_53; \
+  uint32x2_t __s2_53 = __p2_53; \
+  uint32x2_t __rev0_53;  __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 1, 0); \
+  uint32x2_t __rev1_53;  __rev1_53 = __builtin_shufflevector(__s1_53, __s1_53, 1, 0); \
+  uint32x2_t __rev2_53;  __rev2_53 = __builtin_shufflevector(__s2_53, __s2_53, 1, 0); \
+  uint32x2_t __ret_53; \
+  __ret_53 = __rev0_53 + __rev1_53 * __noswap_splat_lane_u32(__rev2_53, __p3_53); \
+  __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 1, 0); \
+  __ret_53; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmla_lane_u16(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \
+  uint16x4_t __s0_54 = __p0_54; \
+  uint16x4_t __s1_54 = __p1_54; \
+  uint16x4_t __s2_54 = __p2_54; \
+  uint16x4_t __ret_54; \
+  __ret_54 = __s0_54 + __s1_54 * splat_lane_u16(__s2_54, __p3_54); \
+  __ret_54; \
 })
 #else
-#define vmla_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmla_lane_u16(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \
+  uint16x4_t __s0_55 = __p0_55; \
+  uint16x4_t __s1_55 = __p1_55; \
+  uint16x4_t __s2_55 = __p2_55; \
+  uint16x4_t __rev0_55;  __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 3, 2, 1, 0); \
+  uint16x4_t __rev1_55;  __rev1_55 = __builtin_shufflevector(__s1_55, __s1_55, 3, 2, 1, 0); \
+  uint16x4_t __rev2_55;  __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 3, 2, 1, 0); \
+  uint16x4_t __ret_55; \
+  __ret_55 = __rev0_55 + __rev1_55 * __noswap_splat_lane_u16(__rev2_55, __p3_55); \
+  __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 3, 2, 1, 0); \
+  __ret_55; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
+#define vmla_lane_f32(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \
+  float32x2_t __s0_56 = __p0_56; \
+  float32x2_t __s1_56 = __p1_56; \
+  float32x2_t __s2_56 = __p2_56; \
+  float32x2_t __ret_56; \
+  __ret_56 = __s0_56 + __s1_56 * splat_lane_f32(__s2_56, __p3_56); \
+  __ret_56; \
 })
 #else
-#define vmla_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmla_lane_f32(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \
+  float32x2_t __s0_57 = __p0_57; \
+  float32x2_t __s1_57 = __p1_57; \
+  float32x2_t __s2_57 = __p2_57; \
+  float32x2_t __rev0_57;  __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 1, 0); \
+  float32x2_t __rev1_57;  __rev1_57 = __builtin_shufflevector(__s1_57, __s1_57, 1, 0); \
+  float32x2_t __rev2_57;  __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 1, 0); \
+  float32x2_t __ret_57; \
+  __ret_57 = __rev0_57 + __rev1_57 * __noswap_splat_lane_f32(__rev2_57, __p3_57); \
+  __ret_57 = __builtin_shufflevector(__ret_57, __ret_57, 1, 0); \
+  __ret_57; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
+#define vmla_lane_s32(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \
+  int32x2_t __s0_58 = __p0_58; \
+  int32x2_t __s1_58 = __p1_58; \
+  int32x2_t __s2_58 = __p2_58; \
+  int32x2_t __ret_58; \
+  __ret_58 = __s0_58 + __s1_58 * splat_lane_s32(__s2_58, __p3_58); \
+  __ret_58; \
 })
 #else
-#define vmla_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmla_lane_s32(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \
+  int32x2_t __s0_59 = __p0_59; \
+  int32x2_t __s1_59 = __p1_59; \
+  int32x2_t __s2_59 = __p2_59; \
+  int32x2_t __rev0_59;  __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 1, 0); \
+  int32x2_t __rev1_59;  __rev1_59 = __builtin_shufflevector(__s1_59, __s1_59, 1, 0); \
+  int32x2_t __rev2_59;  __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 1, 0); \
+  int32x2_t __ret_59; \
+  __ret_59 = __rev0_59 + __rev1_59 * __noswap_splat_lane_s32(__rev2_59, __p3_59); \
+  __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 1, 0); \
+  __ret_59; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmla_lane_s16(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \
+  int16x4_t __s0_60 = __p0_60; \
+  int16x4_t __s1_60 = __p1_60; \
+  int16x4_t __s2_60 = __p2_60; \
+  int16x4_t __ret_60; \
+  __ret_60 = __s0_60 + __s1_60 * splat_lane_s16(__s2_60, __p3_60); \
+  __ret_60; \
 })
 #else
-#define vmla_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmla_lane_s16(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \
+  int16x4_t __s0_61 = __p0_61; \
+  int16x4_t __s1_61 = __p1_61; \
+  int16x4_t __s2_61 = __p2_61; \
+  int16x4_t __rev0_61;  __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 3, 2, 1, 0); \
+  int16x4_t __rev1_61;  __rev1_61 = __builtin_shufflevector(__s1_61, __s1_61, 3, 2, 1, 0); \
+  int16x4_t __rev2_61;  __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 3, 2, 1, 0); \
+  int16x4_t __ret_61; \
+  __ret_61 = __rev0_61 + __rev1_61 * __noswap_splat_lane_s16(__rev2_61, __p3_61); \
+  __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 3, 2, 1, 0); \
+  __ret_61; \
 })
 #endif
 
@@ -13849,242 +15144,242 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlsq_lane_u32(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \
+  uint32x4_t __s0_62 = __p0_62; \
+  uint32x4_t __s1_62 = __p1_62; \
+  uint32x2_t __s2_62 = __p2_62; \
+  uint32x4_t __ret_62; \
+  __ret_62 = __s0_62 - __s1_62 * splatq_lane_u32(__s2_62, __p3_62); \
+  __ret_62; \
 })
 #else
-#define vmlsq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsq_lane_u32(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \
+  uint32x4_t __s0_63 = __p0_63; \
+  uint32x4_t __s1_63 = __p1_63; \
+  uint32x2_t __s2_63 = __p2_63; \
+  uint32x4_t __rev0_63;  __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 3, 2, 1, 0); \
+  uint32x4_t __rev1_63;  __rev1_63 = __builtin_shufflevector(__s1_63, __s1_63, 3, 2, 1, 0); \
+  uint32x2_t __rev2_63;  __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 1, 0); \
+  uint32x4_t __ret_63; \
+  __ret_63 = __rev0_63 - __rev1_63 * __noswap_splatq_lane_u32(__rev2_63, __p3_63); \
+  __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 3, 2, 1, 0); \
+  __ret_63; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x8_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlsq_lane_u16(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \
+  uint16x8_t __s0_64 = __p0_64; \
+  uint16x8_t __s1_64 = __p1_64; \
+  uint16x4_t __s2_64 = __p2_64; \
+  uint16x8_t __ret_64; \
+  __ret_64 = __s0_64 - __s1_64 * splatq_lane_u16(__s2_64, __p3_64); \
+  __ret_64; \
 })
 #else
-#define vmlsq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsq_lane_u16(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \
+  uint16x8_t __s0_65 = __p0_65; \
+  uint16x8_t __s1_65 = __p1_65; \
+  uint16x4_t __s2_65 = __p2_65; \
+  uint16x8_t __rev0_65;  __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_65;  __rev1_65 = __builtin_shufflevector(__s1_65, __s1_65, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_65;  __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 3, 2, 1, 0); \
+  uint16x8_t __ret_65; \
+  __ret_65 = __rev0_65 - __rev1_65 * __noswap_splatq_lane_u16(__rev2_65, __p3_65); \
+  __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_65; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlsq_lane_f32(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \
+  float32x4_t __s0_66 = __p0_66; \
+  float32x4_t __s1_66 = __p1_66; \
+  float32x2_t __s2_66 = __p2_66; \
+  float32x4_t __ret_66; \
+  __ret_66 = __s0_66 - __s1_66 * splatq_lane_f32(__s2_66, __p3_66); \
+  __ret_66; \
 })
 #else
-#define vmlsq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsq_lane_f32(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \
+  float32x4_t __s0_67 = __p0_67; \
+  float32x4_t __s1_67 = __p1_67; \
+  float32x2_t __s2_67 = __p2_67; \
+  float32x4_t __rev0_67;  __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 3, 2, 1, 0); \
+  float32x4_t __rev1_67;  __rev1_67 = __builtin_shufflevector(__s1_67, __s1_67, 3, 2, 1, 0); \
+  float32x2_t __rev2_67;  __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 1, 0); \
+  float32x4_t __ret_67; \
+  __ret_67 = __rev0_67 - __rev1_67 * __noswap_splatq_lane_f32(__rev2_67, __p3_67); \
+  __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 3, 2, 1, 0); \
+  __ret_67; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlsq_lane_s32(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \
+  int32x4_t __s0_68 = __p0_68; \
+  int32x4_t __s1_68 = __p1_68; \
+  int32x2_t __s2_68 = __p2_68; \
+  int32x4_t __ret_68; \
+  __ret_68 = __s0_68 - __s1_68 * splatq_lane_s32(__s2_68, __p3_68); \
+  __ret_68; \
 })
 #else
-#define vmlsq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsq_lane_s32(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \
+  int32x4_t __s0_69 = __p0_69; \
+  int32x4_t __s1_69 = __p1_69; \
+  int32x2_t __s2_69 = __p2_69; \
+  int32x4_t __rev0_69;  __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 3, 2, 1, 0); \
+  int32x4_t __rev1_69;  __rev1_69 = __builtin_shufflevector(__s1_69, __s1_69, 3, 2, 1, 0); \
+  int32x2_t __rev2_69;  __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 1, 0); \
+  int32x4_t __ret_69; \
+  __ret_69 = __rev0_69 - __rev1_69 * __noswap_splatq_lane_s32(__rev2_69, __p3_69); \
+  __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 3, 2, 1, 0); \
+  __ret_69; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlsq_lane_s16(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \
+  int16x8_t __s0_70 = __p0_70; \
+  int16x8_t __s1_70 = __p1_70; \
+  int16x4_t __s2_70 = __p2_70; \
+  int16x8_t __ret_70; \
+  __ret_70 = __s0_70 - __s1_70 * splatq_lane_s16(__s2_70, __p3_70); \
+  __ret_70; \
 })
 #else
-#define vmlsq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsq_lane_s16(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \
+  int16x8_t __s0_71 = __p0_71; \
+  int16x8_t __s1_71 = __p1_71; \
+  int16x4_t __s2_71 = __p2_71; \
+  int16x8_t __rev0_71;  __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_71;  __rev1_71 = __builtin_shufflevector(__s1_71, __s1_71, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_71;  __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 3, 2, 1, 0); \
+  int16x8_t __ret_71; \
+  __ret_71 = __rev0_71 - __rev1_71 * __noswap_splatq_lane_s16(__rev2_71, __p3_71); \
+  __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_71; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x2_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
+#define vmls_lane_u32(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \
+  uint32x2_t __s0_72 = __p0_72; \
+  uint32x2_t __s1_72 = __p1_72; \
+  uint32x2_t __s2_72 = __p2_72; \
+  uint32x2_t __ret_72; \
+  __ret_72 = __s0_72 - __s1_72 * splat_lane_u32(__s2_72, __p3_72); \
+  __ret_72; \
 })
 #else
-#define vmls_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmls_lane_u32(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \
+  uint32x2_t __s0_73 = __p0_73; \
+  uint32x2_t __s1_73 = __p1_73; \
+  uint32x2_t __s2_73 = __p2_73; \
+  uint32x2_t __rev0_73;  __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 1, 0); \
+  uint32x2_t __rev1_73;  __rev1_73 = __builtin_shufflevector(__s1_73, __s1_73, 1, 0); \
+  uint32x2_t __rev2_73;  __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 1, 0); \
+  uint32x2_t __ret_73; \
+  __ret_73 = __rev0_73 - __rev1_73 * __noswap_splat_lane_u32(__rev2_73, __p3_73); \
+  __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 1, 0); \
+  __ret_73; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmls_lane_u16(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \
+  uint16x4_t __s0_74 = __p0_74; \
+  uint16x4_t __s1_74 = __p1_74; \
+  uint16x4_t __s2_74 = __p2_74; \
+  uint16x4_t __ret_74; \
+  __ret_74 = __s0_74 - __s1_74 * splat_lane_u16(__s2_74, __p3_74); \
+  __ret_74; \
 })
 #else
-#define vmls_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmls_lane_u16(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \
+  uint16x4_t __s0_75 = __p0_75; \
+  uint16x4_t __s1_75 = __p1_75; \
+  uint16x4_t __s2_75 = __p2_75; \
+  uint16x4_t __rev0_75;  __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 3, 2, 1, 0); \
+  uint16x4_t __rev1_75;  __rev1_75 = __builtin_shufflevector(__s1_75, __s1_75, 3, 2, 1, 0); \
+  uint16x4_t __rev2_75;  __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 3, 2, 1, 0); \
+  uint16x4_t __ret_75; \
+  __ret_75 = __rev0_75 - __rev1_75 * __noswap_splat_lane_u16(__rev2_75, __p3_75); \
+  __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 3, 2, 1, 0); \
+  __ret_75; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
+#define vmls_lane_f32(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \
+  float32x2_t __s0_76 = __p0_76; \
+  float32x2_t __s1_76 = __p1_76; \
+  float32x2_t __s2_76 = __p2_76; \
+  float32x2_t __ret_76; \
+  __ret_76 = __s0_76 - __s1_76 * splat_lane_f32(__s2_76, __p3_76); \
+  __ret_76; \
 })
 #else
-#define vmls_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmls_lane_f32(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \
+  float32x2_t __s0_77 = __p0_77; \
+  float32x2_t __s1_77 = __p1_77; \
+  float32x2_t __s2_77 = __p2_77; \
+  float32x2_t __rev0_77;  __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 1, 0); \
+  float32x2_t __rev1_77;  __rev1_77 = __builtin_shufflevector(__s1_77, __s1_77, 1, 0); \
+  float32x2_t __rev2_77;  __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 1, 0); \
+  float32x2_t __ret_77; \
+  __ret_77 = __rev0_77 - __rev1_77 * __noswap_splat_lane_f32(__rev2_77, __p3_77); \
+  __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 1, 0); \
+  __ret_77; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
+#define vmls_lane_s32(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \
+  int32x2_t __s0_78 = __p0_78; \
+  int32x2_t __s1_78 = __p1_78; \
+  int32x2_t __s2_78 = __p2_78; \
+  int32x2_t __ret_78; \
+  __ret_78 = __s0_78 - __s1_78 * splat_lane_s32(__s2_78, __p3_78); \
+  __ret_78; \
 })
 #else
-#define vmls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmls_lane_s32(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \
+  int32x2_t __s0_79 = __p0_79; \
+  int32x2_t __s1_79 = __p1_79; \
+  int32x2_t __s2_79 = __p2_79; \
+  int32x2_t __rev0_79;  __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 1, 0); \
+  int32x2_t __rev1_79;  __rev1_79 = __builtin_shufflevector(__s1_79, __s1_79, 1, 0); \
+  int32x2_t __rev2_79;  __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 1, 0); \
+  int32x2_t __ret_79; \
+  __ret_79 = __rev0_79 - __rev1_79 * __noswap_splat_lane_s32(__rev2_79, __p3_79); \
+  __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 1, 0); \
+  __ret_79; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmls_lane_s16(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \
+  int16x4_t __s0_80 = __p0_80; \
+  int16x4_t __s1_80 = __p1_80; \
+  int16x4_t __s2_80 = __p2_80; \
+  int16x4_t __ret_80; \
+  __ret_80 = __s0_80 - __s1_80 * splat_lane_s16(__s2_80, __p3_80); \
+  __ret_80; \
 })
 #else
-#define vmls_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmls_lane_s16(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \
+  int16x4_t __s0_81 = __p0_81; \
+  int16x4_t __s1_81 = __p1_81; \
+  int16x4_t __s2_81 = __p2_81; \
+  int16x4_t __rev0_81;  __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 3, 2, 1, 0); \
+  int16x4_t __rev1_81;  __rev1_81 = __builtin_shufflevector(__s1_81, __s1_81, 3, 2, 1, 0); \
+  int16x4_t __rev2_81;  __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 3, 2, 1, 0); \
+  int16x4_t __ret_81; \
+  __ret_81 = __rev0_81 - __rev1_81 * __noswap_splat_lane_s16(__rev2_81, __p3_81); \
+  __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 3, 2, 1, 0); \
+  __ret_81; \
 })
 #endif
 
@@ -15127,212 +16422,212 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmulq_lane_u32(__p0_82, __p1_82, __p2_82) __extension__ ({ \
+  uint32x4_t __s0_82 = __p0_82; \
+  uint32x2_t __s1_82 = __p1_82; \
+  uint32x4_t __ret_82; \
+  __ret_82 = __s0_82 * splatq_lane_u32(__s1_82, __p2_82); \
+  __ret_82; \
 })
 #else
-#define vmulq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmulq_lane_u32(__p0_83, __p1_83, __p2_83) __extension__ ({ \
+  uint32x4_t __s0_83 = __p0_83; \
+  uint32x2_t __s1_83 = __p1_83; \
+  uint32x4_t __rev0_83;  __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 3, 2, 1, 0); \
+  uint32x2_t __rev1_83;  __rev1_83 = __builtin_shufflevector(__s1_83, __s1_83, 1, 0); \
+  uint32x4_t __ret_83; \
+  __ret_83 = __rev0_83 * __noswap_splatq_lane_u32(__rev1_83, __p2_83); \
+  __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 3, 2, 1, 0); \
+  __ret_83; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmulq_lane_u16(__p0_84, __p1_84, __p2_84) __extension__ ({ \
+  uint16x8_t __s0_84 = __p0_84; \
+  uint16x4_t __s1_84 = __p1_84; \
+  uint16x8_t __ret_84; \
+  __ret_84 = __s0_84 * splatq_lane_u16(__s1_84, __p2_84); \
+  __ret_84; \
 })
 #else
-#define vmulq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmulq_lane_u16(__p0_85, __p1_85, __p2_85) __extension__ ({ \
+  uint16x8_t __s0_85 = __p0_85; \
+  uint16x4_t __s1_85 = __p1_85; \
+  uint16x8_t __rev0_85;  __rev0_85 = __builtin_shufflevector(__s0_85, __s0_85, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev1_85;  __rev1_85 = __builtin_shufflevector(__s1_85, __s1_85, 3, 2, 1, 0); \
+  uint16x8_t __ret_85; \
+  __ret_85 = __rev0_85 * __noswap_splatq_lane_u16(__rev1_85, __p2_85); \
+  __ret_85 = __builtin_shufflevector(__ret_85, __ret_85, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_85; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmulq_lane_f32(__p0_86, __p1_86, __p2_86) __extension__ ({ \
+  float32x4_t __s0_86 = __p0_86; \
+  float32x2_t __s1_86 = __p1_86; \
+  float32x4_t __ret_86; \
+  __ret_86 = __s0_86 * splatq_lane_f32(__s1_86, __p2_86); \
+  __ret_86; \
 })
 #else
-#define vmulq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmulq_lane_f32(__p0_87, __p1_87, __p2_87) __extension__ ({ \
+  float32x4_t __s0_87 = __p0_87; \
+  float32x2_t __s1_87 = __p1_87; \
+  float32x4_t __rev0_87;  __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \
+  float32x2_t __rev1_87;  __rev1_87 = __builtin_shufflevector(__s1_87, __s1_87, 1, 0); \
+  float32x4_t __ret_87; \
+  __ret_87 = __rev0_87 * __noswap_splatq_lane_f32(__rev1_87, __p2_87); \
+  __ret_87 = __builtin_shufflevector(__ret_87, __ret_87, 3, 2, 1, 0); \
+  __ret_87; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmulq_lane_s32(__p0_88, __p1_88, __p2_88) __extension__ ({ \
+  int32x4_t __s0_88 = __p0_88; \
+  int32x2_t __s1_88 = __p1_88; \
+  int32x4_t __ret_88; \
+  __ret_88 = __s0_88 * splatq_lane_s32(__s1_88, __p2_88); \
+  __ret_88; \
 })
 #else
-#define vmulq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmulq_lane_s32(__p0_89, __p1_89, __p2_89) __extension__ ({ \
+  int32x4_t __s0_89 = __p0_89; \
+  int32x2_t __s1_89 = __p1_89; \
+  int32x4_t __rev0_89;  __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 3, 2, 1, 0); \
+  int32x2_t __rev1_89;  __rev1_89 = __builtin_shufflevector(__s1_89, __s1_89, 1, 0); \
+  int32x4_t __ret_89; \
+  __ret_89 = __rev0_89 * __noswap_splatq_lane_s32(__rev1_89, __p2_89); \
+  __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 3, 2, 1, 0); \
+  __ret_89; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmulq_lane_s16(__p0_90, __p1_90, __p2_90) __extension__ ({ \
+  int16x8_t __s0_90 = __p0_90; \
+  int16x4_t __s1_90 = __p1_90; \
+  int16x8_t __ret_90; \
+  __ret_90 = __s0_90 * splatq_lane_s16(__s1_90, __p2_90); \
+  __ret_90; \
 })
 #else
-#define vmulq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmulq_lane_s16(__p0_91, __p1_91, __p2_91) __extension__ ({ \
+  int16x8_t __s0_91 = __p0_91; \
+  int16x4_t __s1_91 = __p1_91; \
+  int16x8_t __rev0_91;  __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev1_91;  __rev1_91 = __builtin_shufflevector(__s1_91, __s1_91, 3, 2, 1, 0); \
+  int16x8_t __ret_91; \
+  __ret_91 = __rev0_91 * __noswap_splatq_lane_s16(__rev1_91, __p2_91); \
+  __ret_91 = __builtin_shufflevector(__ret_91, __ret_91, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_91; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
+#define vmul_lane_u32(__p0_92, __p1_92, __p2_92) __extension__ ({ \
+  uint32x2_t __s0_92 = __p0_92; \
+  uint32x2_t __s1_92 = __p1_92; \
+  uint32x2_t __ret_92; \
+  __ret_92 = __s0_92 * splat_lane_u32(__s1_92, __p2_92); \
+  __ret_92; \
 })
 #else
-#define vmul_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmul_lane_u32(__p0_93, __p1_93, __p2_93) __extension__ ({ \
+  uint32x2_t __s0_93 = __p0_93; \
+  uint32x2_t __s1_93 = __p1_93; \
+  uint32x2_t __rev0_93;  __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 1, 0); \
+  uint32x2_t __rev1_93;  __rev1_93 = __builtin_shufflevector(__s1_93, __s1_93, 1, 0); \
+  uint32x2_t __ret_93; \
+  __ret_93 = __rev0_93 * __noswap_splat_lane_u32(__rev1_93, __p2_93); \
+  __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 1, 0); \
+  __ret_93; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmul_lane_u16(__p0_94, __p1_94, __p2_94) __extension__ ({ \
+  uint16x4_t __s0_94 = __p0_94; \
+  uint16x4_t __s1_94 = __p1_94; \
+  uint16x4_t __ret_94; \
+  __ret_94 = __s0_94 * splat_lane_u16(__s1_94, __p2_94); \
+  __ret_94; \
 })
 #else
-#define vmul_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmul_lane_u16(__p0_95, __p1_95, __p2_95) __extension__ ({ \
+  uint16x4_t __s0_95 = __p0_95; \
+  uint16x4_t __s1_95 = __p1_95; \
+  uint16x4_t __rev0_95;  __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 3, 2, 1, 0); \
+  uint16x4_t __rev1_95;  __rev1_95 = __builtin_shufflevector(__s1_95, __s1_95, 3, 2, 1, 0); \
+  uint16x4_t __ret_95; \
+  __ret_95 = __rev0_95 * __noswap_splat_lane_u16(__rev1_95, __p2_95); \
+  __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 3, 2, 1, 0); \
+  __ret_95; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
+#define vmul_lane_f32(__p0_96, __p1_96, __p2_96) __extension__ ({ \
+  float32x2_t __s0_96 = __p0_96; \
+  float32x2_t __s1_96 = __p1_96; \
+  float32x2_t __ret_96; \
+  __ret_96 = __s0_96 * splat_lane_f32(__s1_96, __p2_96); \
+  __ret_96; \
 })
 #else
-#define vmul_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmul_lane_f32(__p0_97, __p1_97, __p2_97) __extension__ ({ \
+  float32x2_t __s0_97 = __p0_97; \
+  float32x2_t __s1_97 = __p1_97; \
+  float32x2_t __rev0_97;  __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, 1, 0); \
+  float32x2_t __rev1_97;  __rev1_97 = __builtin_shufflevector(__s1_97, __s1_97, 1, 0); \
+  float32x2_t __ret_97; \
+  __ret_97 = __rev0_97 * __noswap_splat_lane_f32(__rev1_97, __p2_97); \
+  __ret_97 = __builtin_shufflevector(__ret_97, __ret_97, 1, 0); \
+  __ret_97; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
+#define vmul_lane_s32(__p0_98, __p1_98, __p2_98) __extension__ ({ \
+  int32x2_t __s0_98 = __p0_98; \
+  int32x2_t __s1_98 = __p1_98; \
+  int32x2_t __ret_98; \
+  __ret_98 = __s0_98 * splat_lane_s32(__s1_98, __p2_98); \
+  __ret_98; \
 })
 #else
-#define vmul_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmul_lane_s32(__p0_99, __p1_99, __p2_99) __extension__ ({ \
+  int32x2_t __s0_99 = __p0_99; \
+  int32x2_t __s1_99 = __p1_99; \
+  int32x2_t __rev0_99;  __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 1, 0); \
+  int32x2_t __rev1_99;  __rev1_99 = __builtin_shufflevector(__s1_99, __s1_99, 1, 0); \
+  int32x2_t __ret_99; \
+  __ret_99 = __rev0_99 * __noswap_splat_lane_s32(__rev1_99, __p2_99); \
+  __ret_99 = __builtin_shufflevector(__ret_99, __ret_99, 1, 0); \
+  __ret_99; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmul_lane_s16(__p0_100, __p1_100, __p2_100) __extension__ ({ \
+  int16x4_t __s0_100 = __p0_100; \
+  int16x4_t __s1_100 = __p1_100; \
+  int16x4_t __ret_100; \
+  __ret_100 = __s0_100 * splat_lane_s16(__s1_100, __p2_100); \
+  __ret_100; \
 })
 #else
-#define vmul_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmul_lane_s16(__p0_101, __p1_101, __p2_101) __extension__ ({ \
+  int16x4_t __s0_101 = __p0_101; \
+  int16x4_t __s1_101 = __p1_101; \
+  int16x4_t __rev0_101;  __rev0_101 = __builtin_shufflevector(__s0_101, __s0_101, 3, 2, 1, 0); \
+  int16x4_t __rev1_101;  __rev1_101 = __builtin_shufflevector(__s1_101, __s1_101, 3, 2, 1, 0); \
+  int16x4_t __ret_101; \
+  __ret_101 = __rev0_101 * __noswap_splat_lane_s16(__rev1_101, __p2_101); \
+  __ret_101 = __builtin_shufflevector(__ret_101, __ret_101, 3, 2, 1, 0); \
+  __ret_101; \
 })
 #endif
 
@@ -15651,86 +16946,86 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = vmull_u32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vmull_lane_u32(__p0_102, __p1_102, __p2_102) __extension__ ({ \
+  uint32x2_t __s0_102 = __p0_102; \
+  uint32x2_t __s1_102 = __p1_102; \
+  uint64x2_t __ret_102; \
+  __ret_102 = vmull_u32(__s0_102, splat_lane_u32(__s1_102, __p2_102)); \
+  __ret_102; \
 })
 #else
-#define vmull_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __noswap_vmull_u32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmull_lane_u32(__p0_103, __p1_103, __p2_103) __extension__ ({ \
+  uint32x2_t __s0_103 = __p0_103; \
+  uint32x2_t __s1_103 = __p1_103; \
+  uint32x2_t __rev0_103;  __rev0_103 = __builtin_shufflevector(__s0_103, __s0_103, 1, 0); \
+  uint32x2_t __rev1_103;  __rev1_103 = __builtin_shufflevector(__s1_103, __s1_103, 1, 0); \
+  uint64x2_t __ret_103; \
+  __ret_103 = __noswap_vmull_u32(__rev0_103, __noswap_splat_lane_u32(__rev1_103, __p2_103)); \
+  __ret_103 = __builtin_shufflevector(__ret_103, __ret_103, 1, 0); \
+  __ret_103; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = vmull_u16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmull_lane_u16(__p0_104, __p1_104, __p2_104) __extension__ ({ \
+  uint16x4_t __s0_104 = __p0_104; \
+  uint16x4_t __s1_104 = __p1_104; \
+  uint32x4_t __ret_104; \
+  __ret_104 = vmull_u16(__s0_104, splat_lane_u16(__s1_104, __p2_104)); \
+  __ret_104; \
 })
 #else
-#define vmull_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __noswap_vmull_u16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmull_lane_u16(__p0_105, __p1_105, __p2_105) __extension__ ({ \
+  uint16x4_t __s0_105 = __p0_105; \
+  uint16x4_t __s1_105 = __p1_105; \
+  uint16x4_t __rev0_105;  __rev0_105 = __builtin_shufflevector(__s0_105, __s0_105, 3, 2, 1, 0); \
+  uint16x4_t __rev1_105;  __rev1_105 = __builtin_shufflevector(__s1_105, __s1_105, 3, 2, 1, 0); \
+  uint32x4_t __ret_105; \
+  __ret_105 = __noswap_vmull_u16(__rev0_105, __noswap_splat_lane_u16(__rev1_105, __p2_105)); \
+  __ret_105 = __builtin_shufflevector(__ret_105, __ret_105, 3, 2, 1, 0); \
+  __ret_105; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vmull_lane_s32(__p0_106, __p1_106, __p2_106) __extension__ ({ \
+  int32x2_t __s0_106 = __p0_106; \
+  int32x2_t __s1_106 = __p1_106; \
+  int64x2_t __ret_106; \
+  __ret_106 = vmull_s32(__s0_106, splat_lane_s32(__s1_106, __p2_106)); \
+  __ret_106; \
 })
 #else
-#define vmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmull_lane_s32(__p0_107, __p1_107, __p2_107) __extension__ ({ \
+  int32x2_t __s0_107 = __p0_107; \
+  int32x2_t __s1_107 = __p1_107; \
+  int32x2_t __rev0_107;  __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 1, 0); \
+  int32x2_t __rev1_107;  __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 1, 0); \
+  int64x2_t __ret_107; \
+  __ret_107 = __noswap_vmull_s32(__rev0_107, __noswap_splat_lane_s32(__rev1_107, __p2_107)); \
+  __ret_107 = __builtin_shufflevector(__ret_107, __ret_107, 1, 0); \
+  __ret_107; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmull_lane_s16(__p0_108, __p1_108, __p2_108) __extension__ ({ \
+  int16x4_t __s0_108 = __p0_108; \
+  int16x4_t __s1_108 = __p1_108; \
+  int32x4_t __ret_108; \
+  __ret_108 = vmull_s16(__s0_108, splat_lane_s16(__s1_108, __p2_108)); \
+  __ret_108; \
 })
 #else
-#define vmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmull_lane_s16(__p0_109, __p1_109, __p2_109) __extension__ ({ \
+  int16x4_t __s0_109 = __p0_109; \
+  int16x4_t __s1_109 = __p1_109; \
+  int16x4_t __rev0_109;  __rev0_109 = __builtin_shufflevector(__s0_109, __s0_109, 3, 2, 1, 0); \
+  int16x4_t __rev1_109;  __rev1_109 = __builtin_shufflevector(__s1_109, __s1_109, 3, 2, 1, 0); \
+  int32x4_t __ret_109; \
+  __ret_109 = __noswap_vmull_s16(__rev0_109, __noswap_splat_lane_s16(__rev1_109, __p2_109)); \
+  __ret_109 = __builtin_shufflevector(__ret_109, __ret_109, 3, 2, 1, 0); \
+  __ret_109; \
 })
 #endif
 
@@ -17824,50 +19119,50 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlal_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vqdmlal_lane_s32(__p0_110, __p1_110, __p2_110, __p3_110) __extension__ ({ \
+  int64x2_t __s0_110 = __p0_110; \
+  int32x2_t __s1_110 = __p1_110; \
+  int32x2_t __s2_110 = __p2_110; \
+  int64x2_t __ret_110; \
+  __ret_110 = vqdmlal_s32(__s0_110, __s1_110, splat_lane_s32(__s2_110, __p3_110)); \
+  __ret_110; \
 })
 #else
-#define vqdmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlal_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmlal_lane_s32(__p0_111, __p1_111, __p2_111, __p3_111) __extension__ ({ \
+  int64x2_t __s0_111 = __p0_111; \
+  int32x2_t __s1_111 = __p1_111; \
+  int32x2_t __s2_111 = __p2_111; \
+  int64x2_t __rev0_111;  __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 1, 0); \
+  int32x2_t __rev1_111;  __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 1, 0); \
+  int32x2_t __rev2_111;  __rev2_111 = __builtin_shufflevector(__s2_111, __s2_111, 1, 0); \
+  int64x2_t __ret_111; \
+  __ret_111 = __noswap_vqdmlal_s32(__rev0_111, __rev1_111, __noswap_splat_lane_s32(__rev2_111, __p3_111)); \
+  __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 1, 0); \
+  __ret_111; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlal_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vqdmlal_lane_s16(__p0_112, __p1_112, __p2_112, __p3_112) __extension__ ({ \
+  int32x4_t __s0_112 = __p0_112; \
+  int16x4_t __s1_112 = __p1_112; \
+  int16x4_t __s2_112 = __p2_112; \
+  int32x4_t __ret_112; \
+  __ret_112 = vqdmlal_s16(__s0_112, __s1_112, splat_lane_s16(__s2_112, __p3_112)); \
+  __ret_112; \
 })
 #else
-#define vqdmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlal_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmlal_lane_s16(__p0_113, __p1_113, __p2_113, __p3_113) __extension__ ({ \
+  int32x4_t __s0_113 = __p0_113; \
+  int16x4_t __s1_113 = __p1_113; \
+  int16x4_t __s2_113 = __p2_113; \
+  int32x4_t __rev0_113;  __rev0_113 = __builtin_shufflevector(__s0_113, __s0_113, 3, 2, 1, 0); \
+  int16x4_t __rev1_113;  __rev1_113 = __builtin_shufflevector(__s1_113, __s1_113, 3, 2, 1, 0); \
+  int16x4_t __rev2_113;  __rev2_113 = __builtin_shufflevector(__s2_113, __s2_113, 3, 2, 1, 0); \
+  int32x4_t __ret_113; \
+  __ret_113 = __noswap_vqdmlal_s16(__rev0_113, __rev1_113, __noswap_splat_lane_s16(__rev2_113, __p3_113)); \
+  __ret_113 = __builtin_shufflevector(__ret_113, __ret_113, 3, 2, 1, 0); \
+  __ret_113; \
 })
 #endif
 
@@ -17962,50 +19257,50 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlsl_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vqdmlsl_lane_s32(__p0_114, __p1_114, __p2_114, __p3_114) __extension__ ({ \
+  int64x2_t __s0_114 = __p0_114; \
+  int32x2_t __s1_114 = __p1_114; \
+  int32x2_t __s2_114 = __p2_114; \
+  int64x2_t __ret_114; \
+  __ret_114 = vqdmlsl_s32(__s0_114, __s1_114, splat_lane_s32(__s2_114, __p3_114)); \
+  __ret_114; \
 })
 #else
-#define vqdmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmlsl_lane_s32(__p0_115, __p1_115, __p2_115, __p3_115) __extension__ ({ \
+  int64x2_t __s0_115 = __p0_115; \
+  int32x2_t __s1_115 = __p1_115; \
+  int32x2_t __s2_115 = __p2_115; \
+  int64x2_t __rev0_115;  __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 1, 0); \
+  int32x2_t __rev1_115;  __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, 1, 0); \
+  int32x2_t __rev2_115;  __rev2_115 = __builtin_shufflevector(__s2_115, __s2_115, 1, 0); \
+  int64x2_t __ret_115; \
+  __ret_115 = __noswap_vqdmlsl_s32(__rev0_115, __rev1_115, __noswap_splat_lane_s32(__rev2_115, __p3_115)); \
+  __ret_115 = __builtin_shufflevector(__ret_115, __ret_115, 1, 0); \
+  __ret_115; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlsl_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vqdmlsl_lane_s16(__p0_116, __p1_116, __p2_116, __p3_116) __extension__ ({ \
+  int32x4_t __s0_116 = __p0_116; \
+  int16x4_t __s1_116 = __p1_116; \
+  int16x4_t __s2_116 = __p2_116; \
+  int32x4_t __ret_116; \
+  __ret_116 = vqdmlsl_s16(__s0_116, __s1_116, splat_lane_s16(__s2_116, __p3_116)); \
+  __ret_116; \
 })
 #else
-#define vqdmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmlsl_lane_s16(__p0_117, __p1_117, __p2_117, __p3_117) __extension__ ({ \
+  int32x4_t __s0_117 = __p0_117; \
+  int16x4_t __s1_117 = __p1_117; \
+  int16x4_t __s2_117 = __p2_117; \
+  int32x4_t __rev0_117;  __rev0_117 = __builtin_shufflevector(__s0_117, __s0_117, 3, 2, 1, 0); \
+  int16x4_t __rev1_117;  __rev1_117 = __builtin_shufflevector(__s1_117, __s1_117, 3, 2, 1, 0); \
+  int16x4_t __rev2_117;  __rev2_117 = __builtin_shufflevector(__s2_117, __s2_117, 3, 2, 1, 0); \
+  int32x4_t __ret_117; \
+  __ret_117 = __noswap_vqdmlsl_s16(__rev0_117, __rev1_117, __noswap_splat_lane_s16(__rev2_117, __p3_117)); \
+  __ret_117 = __builtin_shufflevector(__ret_117, __ret_117, 3, 2, 1, 0); \
+  __ret_117; \
 })
 #endif
 
@@ -18250,44 +19545,44 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vqdmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vqdmull_lane_s32(__p0_118, __p1_118, __p2_118) __extension__ ({ \
+  int32x2_t __s0_118 = __p0_118; \
+  int32x2_t __s1_118 = __p1_118; \
+  int64x2_t __ret_118; \
+  __ret_118 = vqdmull_s32(__s0_118, splat_lane_s32(__s1_118, __p2_118)); \
+  __ret_118; \
 })
 #else
-#define vqdmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmull_lane_s32(__p0_119, __p1_119, __p2_119) __extension__ ({ \
+  int32x2_t __s0_119 = __p0_119; \
+  int32x2_t __s1_119 = __p1_119; \
+  int32x2_t __rev0_119;  __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, 1, 0); \
+  int32x2_t __rev1_119;  __rev1_119 = __builtin_shufflevector(__s1_119, __s1_119, 1, 0); \
+  int64x2_t __ret_119; \
+  __ret_119 = __noswap_vqdmull_s32(__rev0_119, __noswap_splat_lane_s32(__rev1_119, __p2_119)); \
+  __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 1, 0); \
+  __ret_119; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqdmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vqdmull_lane_s16(__p0_120, __p1_120, __p2_120) __extension__ ({ \
+  int16x4_t __s0_120 = __p0_120; \
+  int16x4_t __s1_120 = __p1_120; \
+  int32x4_t __ret_120; \
+  __ret_120 = vqdmull_s16(__s0_120, splat_lane_s16(__s1_120, __p2_120)); \
+  __ret_120; \
 })
 #else
-#define vqdmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmull_lane_s16(__p0_121, __p1_121, __p2_121) __extension__ ({ \
+  int16x4_t __s0_121 = __p0_121; \
+  int16x4_t __s1_121 = __p1_121; \
+  int16x4_t __rev0_121;  __rev0_121 = __builtin_shufflevector(__s0_121, __s0_121, 3, 2, 1, 0); \
+  int16x4_t __rev1_121;  __rev1_121 = __builtin_shufflevector(__s1_121, __s1_121, 3, 2, 1, 0); \
+  int32x4_t __ret_121; \
+  __ret_121 = __noswap_vqdmull_s16(__rev0_121, __noswap_splat_lane_s16(__rev1_121, __p2_121)); \
+  __ret_121 = __builtin_shufflevector(__ret_121, __ret_121, 3, 2, 1, 0); \
+  __ret_121; \
 })
 #endif
 
@@ -30796,38 +32091,38 @@
 
 #if !defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_f16(__p0_122, __p1_122) __extension__ ({ \
+  float16x4_t __s0_122 = __p0_122; \
+  float16x8_t __ret_122; \
+  __ret_122 = splatq_lane_f16(__s0_122, __p1_122); \
+  __ret_122; \
 })
 #else
-#define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_lane_f16(__p0_123, __p1_123) __extension__ ({ \
+  float16x4_t __s0_123 = __p0_123; \
+  float16x4_t __rev0_123;  __rev0_123 = __builtin_shufflevector(__s0_123, __s0_123, 3, 2, 1, 0); \
+  float16x8_t __ret_123; \
+  __ret_123 = __noswap_splatq_lane_f16(__rev0_123, __p1_123); \
+  __ret_123 = __builtin_shufflevector(__ret_123, __ret_123, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_123; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_lane_f16(__p0_124, __p1_124) __extension__ ({ \
+  float16x4_t __s0_124 = __p0_124; \
+  float16x4_t __ret_124; \
+  __ret_124 = splat_lane_f16(__s0_124, __p1_124); \
+  __ret_124; \
 })
 #else
-#define vdup_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_lane_f16(__p0_125, __p1_125) __extension__ ({ \
+  float16x4_t __s0_125 = __p0_125; \
+  float16x4_t __rev0_125;  __rev0_125 = __builtin_shufflevector(__s0_125, __s0_125, 3, 2, 1, 0); \
+  float16x4_t __ret_125; \
+  __ret_125 = __noswap_splat_lane_f16(__rev0_125, __p1_125); \
+  __ret_125 = __builtin_shufflevector(__ret_125, __ret_125, 3, 2, 1, 0); \
+  __ret_125; \
 })
 #endif
 
@@ -30900,170 +32195,170 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vqdmulhq_lane_s32(__p0_126, __p1_126, __p2_126) __extension__ ({ \
+  int32x4_t __s0_126 = __p0_126; \
+  int32x2_t __s1_126 = __p1_126; \
+  int32x4_t __ret_126; \
+  __ret_126 = vqdmulhq_s32(__s0_126, splatq_lane_s32(__s1_126, __p2_126)); \
+  __ret_126; \
 })
 #else
-#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmulhq_lane_s32(__p0_127, __p1_127, __p2_127) __extension__ ({ \
+  int32x4_t __s0_127 = __p0_127; \
+  int32x2_t __s1_127 = __p1_127; \
+  int32x4_t __rev0_127;  __rev0_127 = __builtin_shufflevector(__s0_127, __s0_127, 3, 2, 1, 0); \
+  int32x2_t __rev1_127;  __rev1_127 = __builtin_shufflevector(__s1_127, __s1_127, 1, 0); \
+  int32x4_t __ret_127; \
+  __ret_127 = __noswap_vqdmulhq_s32(__rev0_127, __noswap_splatq_lane_s32(__rev1_127, __p2_127)); \
+  __ret_127 = __builtin_shufflevector(__ret_127, __ret_127, 3, 2, 1, 0); \
+  __ret_127; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = vqdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vqdmulhq_lane_s16(__p0_128, __p1_128, __p2_128) __extension__ ({ \
+  int16x8_t __s0_128 = __p0_128; \
+  int16x4_t __s1_128 = __p1_128; \
+  int16x8_t __ret_128; \
+  __ret_128 = vqdmulhq_s16(__s0_128, splatq_lane_s16(__s1_128, __p2_128)); \
+  __ret_128; \
 })
 #else
-#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmulhq_lane_s16(__p0_129, __p1_129, __p2_129) __extension__ ({ \
+  int16x8_t __s0_129 = __p0_129; \
+  int16x4_t __s1_129 = __p1_129; \
+  int16x8_t __rev0_129;  __rev0_129 = __builtin_shufflevector(__s0_129, __s0_129, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev1_129;  __rev1_129 = __builtin_shufflevector(__s1_129, __s1_129, 3, 2, 1, 0); \
+  int16x8_t __ret_129; \
+  __ret_129 = __noswap_vqdmulhq_s16(__rev0_129, __noswap_splatq_lane_s16(__rev1_129, __p2_129)); \
+  __ret_129 = __builtin_shufflevector(__ret_129, __ret_129, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_129; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = vqdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vqdmulh_lane_s32(__p0_130, __p1_130, __p2_130) __extension__ ({ \
+  int32x2_t __s0_130 = __p0_130; \
+  int32x2_t __s1_130 = __p1_130; \
+  int32x2_t __ret_130; \
+  __ret_130 = vqdmulh_s32(__s0_130, splat_lane_s32(__s1_130, __p2_130)); \
+  __ret_130; \
 })
 #else
-#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmulh_lane_s32(__p0_131, __p1_131, __p2_131) __extension__ ({ \
+  int32x2_t __s0_131 = __p0_131; \
+  int32x2_t __s1_131 = __p1_131; \
+  int32x2_t __rev0_131;  __rev0_131 = __builtin_shufflevector(__s0_131, __s0_131, 1, 0); \
+  int32x2_t __rev1_131;  __rev1_131 = __builtin_shufflevector(__s1_131, __s1_131, 1, 0); \
+  int32x2_t __ret_131; \
+  __ret_131 = __noswap_vqdmulh_s32(__rev0_131, __noswap_splat_lane_s32(__rev1_131, __p2_131)); \
+  __ret_131 = __builtin_shufflevector(__ret_131, __ret_131, 1, 0); \
+  __ret_131; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = vqdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vqdmulh_lane_s16(__p0_132, __p1_132, __p2_132) __extension__ ({ \
+  int16x4_t __s0_132 = __p0_132; \
+  int16x4_t __s1_132 = __p1_132; \
+  int16x4_t __ret_132; \
+  __ret_132 = vqdmulh_s16(__s0_132, splat_lane_s16(__s1_132, __p2_132)); \
+  __ret_132; \
 })
 #else
-#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmulh_lane_s16(__p0_133, __p1_133, __p2_133) __extension__ ({ \
+  int16x4_t __s0_133 = __p0_133; \
+  int16x4_t __s1_133 = __p1_133; \
+  int16x4_t __rev0_133;  __rev0_133 = __builtin_shufflevector(__s0_133, __s0_133, 3, 2, 1, 0); \
+  int16x4_t __rev1_133;  __rev1_133 = __builtin_shufflevector(__s1_133, __s1_133, 3, 2, 1, 0); \
+  int16x4_t __ret_133; \
+  __ret_133 = __noswap_vqdmulh_s16(__rev0_133, __noswap_splat_lane_s16(__rev1_133, __p2_133)); \
+  __ret_133 = __builtin_shufflevector(__ret_133, __ret_133, 3, 2, 1, 0); \
+  __ret_133; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqrdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vqrdmulhq_lane_s32(__p0_134, __p1_134, __p2_134) __extension__ ({ \
+  int32x4_t __s0_134 = __p0_134; \
+  int32x2_t __s1_134 = __p1_134; \
+  int32x4_t __ret_134; \
+  __ret_134 = vqrdmulhq_s32(__s0_134, splatq_lane_s32(__s1_134, __p2_134)); \
+  __ret_134; \
 })
 #else
-#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqrdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmulhq_lane_s32(__p0_135, __p1_135, __p2_135) __extension__ ({ \
+  int32x4_t __s0_135 = __p0_135; \
+  int32x2_t __s1_135 = __p1_135; \
+  int32x4_t __rev0_135;  __rev0_135 = __builtin_shufflevector(__s0_135, __s0_135, 3, 2, 1, 0); \
+  int32x2_t __rev1_135;  __rev1_135 = __builtin_shufflevector(__s1_135, __s1_135, 1, 0); \
+  int32x4_t __ret_135; \
+  __ret_135 = __noswap_vqrdmulhq_s32(__rev0_135, __noswap_splatq_lane_s32(__rev1_135, __p2_135)); \
+  __ret_135 = __builtin_shufflevector(__ret_135, __ret_135, 3, 2, 1, 0); \
+  __ret_135; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = vqrdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vqrdmulhq_lane_s16(__p0_136, __p1_136, __p2_136) __extension__ ({ \
+  int16x8_t __s0_136 = __p0_136; \
+  int16x4_t __s1_136 = __p1_136; \
+  int16x8_t __ret_136; \
+  __ret_136 = vqrdmulhq_s16(__s0_136, splatq_lane_s16(__s1_136, __p2_136)); \
+  __ret_136; \
 })
 #else
-#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqrdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmulhq_lane_s16(__p0_137, __p1_137, __p2_137) __extension__ ({ \
+  int16x8_t __s0_137 = __p0_137; \
+  int16x4_t __s1_137 = __p1_137; \
+  int16x8_t __rev0_137;  __rev0_137 = __builtin_shufflevector(__s0_137, __s0_137, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev1_137;  __rev1_137 = __builtin_shufflevector(__s1_137, __s1_137, 3, 2, 1, 0); \
+  int16x8_t __ret_137; \
+  __ret_137 = __noswap_vqrdmulhq_s16(__rev0_137, __noswap_splatq_lane_s16(__rev1_137, __p2_137)); \
+  __ret_137 = __builtin_shufflevector(__ret_137, __ret_137, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_137; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = vqrdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vqrdmulh_lane_s32(__p0_138, __p1_138, __p2_138) __extension__ ({ \
+  int32x2_t __s0_138 = __p0_138; \
+  int32x2_t __s1_138 = __p1_138; \
+  int32x2_t __ret_138; \
+  __ret_138 = vqrdmulh_s32(__s0_138, splat_lane_s32(__s1_138, __p2_138)); \
+  __ret_138; \
 })
 #else
-#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqrdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqrdmulh_lane_s32(__p0_139, __p1_139, __p2_139) __extension__ ({ \
+  int32x2_t __s0_139 = __p0_139; \
+  int32x2_t __s1_139 = __p1_139; \
+  int32x2_t __rev0_139;  __rev0_139 = __builtin_shufflevector(__s0_139, __s0_139, 1, 0); \
+  int32x2_t __rev1_139;  __rev1_139 = __builtin_shufflevector(__s1_139, __s1_139, 1, 0); \
+  int32x2_t __ret_139; \
+  __ret_139 = __noswap_vqrdmulh_s32(__rev0_139, __noswap_splat_lane_s32(__rev1_139, __p2_139)); \
+  __ret_139 = __builtin_shufflevector(__ret_139, __ret_139, 1, 0); \
+  __ret_139; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = vqrdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vqrdmulh_lane_s16(__p0_140, __p1_140, __p2_140) __extension__ ({ \
+  int16x4_t __s0_140 = __p0_140; \
+  int16x4_t __s1_140 = __p1_140; \
+  int16x4_t __ret_140; \
+  __ret_140 = vqrdmulh_s16(__s0_140, splat_lane_s16(__s1_140, __p2_140)); \
+  __ret_140; \
 })
 #else
-#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqrdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmulh_lane_s16(__p0_141, __p1_141, __p2_141) __extension__ ({ \
+  int16x4_t __s0_141 = __p0_141; \
+  int16x4_t __s1_141 = __p1_141; \
+  int16x4_t __rev0_141;  __rev0_141 = __builtin_shufflevector(__s0_141, __s0_141, 3, 2, 1, 0); \
+  int16x4_t __rev1_141;  __rev1_141 = __builtin_shufflevector(__s1_141, __s1_141, 3, 2, 1, 0); \
+  int16x4_t __ret_141; \
+  __ret_141 = __noswap_vqrdmulh_s16(__rev0_141, __noswap_splat_lane_s16(__rev1_141, __p2_141)); \
+  __ret_141 = __builtin_shufflevector(__ret_141, __ret_141, 3, 2, 1, 0); \
+  __ret_141; \
 })
 #endif
 
@@ -36714,6 +38009,2357 @@
   return __ret;
 }
 #endif
+#if defined(__ARM_FEATURE_BF16) && !defined(__aarch64__)
+__ai poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t)(__p0);
+  return __ret;
+}
+__ai poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) {
+  poly64x1_t __ret;
+  __ret = (poly64x1_t)(__p0);
+  return __ret;
+}
+__ai poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) {
+  poly16x4_t __ret;
+  __ret = (poly16x4_t)(__p0);
+  return __ret;
+}
+__ai poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t)(__p0);
+  return __ret;
+}
+__ai poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) {
+  poly64x2_t __ret;
+  __ret = (poly64x2_t)(__p0);
+  return __ret;
+}
+__ai poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) {
+  poly16x8_t __ret;
+  __ret = (poly16x8_t)(__p0);
+  return __ret;
+}
+__ai uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t)(__p0);
+  return __ret;
+}
+__ai uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t)(__p0);
+  return __ret;
+}
+__ai uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0);
+  return __ret;
+}
+__ai uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t)(__p0);
+  return __ret;
+}
+__ai int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) {
+  int8x16_t __ret;
+  __ret = (int8x16_t)(__p0);
+  return __ret;
+}
+__ai float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) {
+  float32x4_t __ret;
+  __ret = (float32x4_t)(__p0);
+  return __ret;
+}
+__ai float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = (float16x8_t)(__p0);
+  return __ret;
+}
+__ai int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) {
+  int32x4_t __ret;
+  __ret = (int32x4_t)(__p0);
+  return __ret;
+}
+__ai int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) {
+  int64x2_t __ret;
+  __ret = (int64x2_t)(__p0);
+  return __ret;
+}
+__ai int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) {
+  int16x8_t __ret;
+  __ret = (int16x8_t)(__p0);
+  return __ret;
+}
+__ai uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t)(__p0);
+  return __ret;
+}
+__ai uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t)(__p0);
+  return __ret;
+}
+__ai uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0);
+  return __ret;
+}
+__ai uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t)(__p0);
+  return __ret;
+}
+__ai int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) {
+  int8x8_t __ret;
+  __ret = (int8x8_t)(__p0);
+  return __ret;
+}
+__ai float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) {
+  float32x2_t __ret;
+  __ret = (float32x2_t)(__p0);
+  return __ret;
+}
+__ai float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = (float16x4_t)(__p0);
+  return __ret;
+}
+__ai int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) {
+  int32x2_t __ret;
+  __ret = (int32x2_t)(__p0);
+  return __ret;
+}
+__ai int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) {
+  int64x1_t __ret;
+  __ret = (int64x1_t)(__p0);
+  return __ret;
+}
+__ai int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) {
+  int16x4_t __ret;
+  __ret = (int16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+#endif
+#if defined(__ARM_FEATURE_BF16) && defined(__aarch64__)
+__ai poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) {
+  poly8x8_t __ret;
+  __ret = (poly8x8_t)(__p0);
+  return __ret;
+}
+__ai poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) {
+  poly64x1_t __ret;
+  __ret = (poly64x1_t)(__p0);
+  return __ret;
+}
+__ai poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) {
+  poly16x4_t __ret;
+  __ret = (poly16x4_t)(__p0);
+  return __ret;
+}
+__ai poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) {
+  poly8x16_t __ret;
+  __ret = (poly8x16_t)(__p0);
+  return __ret;
+}
+__ai poly128_t vreinterpretq_p128_bf16(bfloat16x8_t __p0) {
+  poly128_t __ret;
+  __ret = (poly128_t)(__p0);
+  return __ret;
+}
+__ai poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) {
+  poly64x2_t __ret;
+  __ret = (poly64x2_t)(__p0);
+  return __ret;
+}
+__ai poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) {
+  poly16x8_t __ret;
+  __ret = (poly16x8_t)(__p0);
+  return __ret;
+}
+__ai uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) {
+  uint8x16_t __ret;
+  __ret = (uint8x16_t)(__p0);
+  return __ret;
+}
+__ai uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t)(__p0);
+  return __ret;
+}
+__ai uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) {
+  uint64x2_t __ret;
+  __ret = (uint64x2_t)(__p0);
+  return __ret;
+}
+__ai uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) {
+  uint16x8_t __ret;
+  __ret = (uint16x8_t)(__p0);
+  return __ret;
+}
+__ai int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) {
+  int8x16_t __ret;
+  __ret = (int8x16_t)(__p0);
+  return __ret;
+}
+__ai float64x2_t vreinterpretq_f64_bf16(bfloat16x8_t __p0) {
+  float64x2_t __ret;
+  __ret = (float64x2_t)(__p0);
+  return __ret;
+}
+__ai float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) {
+  float32x4_t __ret;
+  __ret = (float32x4_t)(__p0);
+  return __ret;
+}
+__ai float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) {
+  float16x8_t __ret;
+  __ret = (float16x8_t)(__p0);
+  return __ret;
+}
+__ai int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) {
+  int32x4_t __ret;
+  __ret = (int32x4_t)(__p0);
+  return __ret;
+}
+__ai int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) {
+  int64x2_t __ret;
+  __ret = (int64x2_t)(__p0);
+  return __ret;
+}
+__ai int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) {
+  int16x8_t __ret;
+  __ret = (int16x8_t)(__p0);
+  return __ret;
+}
+__ai uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) {
+  uint8x8_t __ret;
+  __ret = (uint8x8_t)(__p0);
+  return __ret;
+}
+__ai uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) {
+  uint32x2_t __ret;
+  __ret = (uint32x2_t)(__p0);
+  return __ret;
+}
+__ai uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) {
+  uint64x1_t __ret;
+  __ret = (uint64x1_t)(__p0);
+  return __ret;
+}
+__ai uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) {
+  uint16x4_t __ret;
+  __ret = (uint16x4_t)(__p0);
+  return __ret;
+}
+__ai int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) {
+  int8x8_t __ret;
+  __ret = (int8x8_t)(__p0);
+  return __ret;
+}
+__ai float64x1_t vreinterpret_f64_bf16(bfloat16x4_t __p0) {
+  float64x1_t __ret;
+  __ret = (float64x1_t)(__p0);
+  return __ret;
+}
+__ai float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) {
+  float32x2_t __ret;
+  __ret = (float32x2_t)(__p0);
+  return __ret;
+}
+__ai float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) {
+  float16x4_t __ret;
+  __ret = (float16x4_t)(__p0);
+  return __ret;
+}
+__ai int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) {
+  int32x2_t __ret;
+  __ret = (int32x2_t)(__p0);
+  return __ret;
+}
+__ai int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) {
+  int64x1_t __ret;
+  __ret = (int64x1_t)(__p0);
+  return __ret;
+}
+__ai int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) {
+  int16x4_t __ret;
+  __ret = (int16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p128(poly128_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_f64(float64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_f64(float64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+__ai bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t)(__p0);
+  return __ret;
+}
+#endif
+#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)
+#ifdef __LITTLE_ENDIAN__
+#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 11); \
+  __ret; \
+})
+#else
+#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 11); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 11); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 11); \
+  __ret; \
+})
+#else
+#define splat_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 11); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 11); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 43); \
+  __ret; \
+})
+#else
+#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 43); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 43); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 43); \
+  __ret; \
+})
+#else
+#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 43); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_splat_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 43); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#else
+__ai float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
+  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  bfloat16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai float32x2_t __noswap_vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
+  float32x2_t __ret;
+  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vbfdotq_lane_f32(__p0_142, __p1_142, __p2_142, __p3_142) __extension__ ({ \
+  float32x4_t __s0_142 = __p0_142; \
+  bfloat16x8_t __s1_142 = __p1_142; \
+  bfloat16x4_t __s2_142 = __p2_142; \
+  float32x4_t __ret_142; \
+bfloat16x4_t __reint_142 = __s2_142; \
+float32x4_t __reint1_142 = splatq_lane_f32(*(float32x2_t *) &__reint_142, __p3_142); \
+  __ret_142 = vbfdotq_f32(__s0_142, __s1_142, *(bfloat16x8_t *) &__reint1_142); \
+  __ret_142; \
+})
+#else
+#define vbfdotq_lane_f32(__p0_143, __p1_143, __p2_143, __p3_143) __extension__ ({ \
+  float32x4_t __s0_143 = __p0_143; \
+  bfloat16x8_t __s1_143 = __p1_143; \
+  bfloat16x4_t __s2_143 = __p2_143; \
+  float32x4_t __rev0_143;  __rev0_143 = __builtin_shufflevector(__s0_143, __s0_143, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_143;  __rev1_143 = __builtin_shufflevector(__s1_143, __s1_143, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_143;  __rev2_143 = __builtin_shufflevector(__s2_143, __s2_143, 3, 2, 1, 0); \
+  float32x4_t __ret_143; \
+bfloat16x4_t __reint_143 = __rev2_143; \
+float32x4_t __reint1_143 = __noswap_splatq_lane_f32(*(float32x2_t *) &__reint_143, __p3_143); \
+  __ret_143 = __noswap_vbfdotq_f32(__rev0_143, __rev1_143, *(bfloat16x8_t *) &__reint1_143); \
+  __ret_143 = __builtin_shufflevector(__ret_143, __ret_143, 3, 2, 1, 0); \
+  __ret_143; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vbfdot_lane_f32(__p0_144, __p1_144, __p2_144, __p3_144) __extension__ ({ \
+  float32x2_t __s0_144 = __p0_144; \
+  bfloat16x4_t __s1_144 = __p1_144; \
+  bfloat16x4_t __s2_144 = __p2_144; \
+  float32x2_t __ret_144; \
+bfloat16x4_t __reint_144 = __s2_144; \
+float32x2_t __reint1_144 = splat_lane_f32(*(float32x2_t *) &__reint_144, __p3_144); \
+  __ret_144 = vbfdot_f32(__s0_144, __s1_144, *(bfloat16x4_t *) &__reint1_144); \
+  __ret_144; \
+})
+#else
+#define vbfdot_lane_f32(__p0_145, __p1_145, __p2_145, __p3_145) __extension__ ({ \
+  float32x2_t __s0_145 = __p0_145; \
+  bfloat16x4_t __s1_145 = __p1_145; \
+  bfloat16x4_t __s2_145 = __p2_145; \
+  float32x2_t __rev0_145;  __rev0_145 = __builtin_shufflevector(__s0_145, __s0_145, 1, 0); \
+  bfloat16x4_t __rev1_145;  __rev1_145 = __builtin_shufflevector(__s1_145, __s1_145, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_145;  __rev2_145 = __builtin_shufflevector(__s2_145, __s2_145, 3, 2, 1, 0); \
+  float32x2_t __ret_145; \
+bfloat16x4_t __reint_145 = __rev2_145; \
+float32x2_t __reint1_145 = __noswap_splat_lane_f32(*(float32x2_t *) &__reint_145, __p3_145); \
+  __ret_145 = __noswap_vbfdot_f32(__rev0_145, __rev1_145, *(bfloat16x4_t *) &__reint1_145); \
+  __ret_145 = __builtin_shufflevector(__ret_145, __ret_145, 1, 0); \
+  __ret_145; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vbfdotq_laneq_f32(__p0_146, __p1_146, __p2_146, __p3_146) __extension__ ({ \
+  float32x4_t __s0_146 = __p0_146; \
+  bfloat16x8_t __s1_146 = __p1_146; \
+  bfloat16x8_t __s2_146 = __p2_146; \
+  float32x4_t __ret_146; \
+bfloat16x8_t __reint_146 = __s2_146; \
+float32x4_t __reint1_146 = splatq_laneq_f32(*(float32x4_t *) &__reint_146, __p3_146); \
+  __ret_146 = vbfdotq_f32(__s0_146, __s1_146, *(bfloat16x8_t *) &__reint1_146); \
+  __ret_146; \
+})
+#else
+#define vbfdotq_laneq_f32(__p0_147, __p1_147, __p2_147, __p3_147) __extension__ ({ \
+  float32x4_t __s0_147 = __p0_147; \
+  bfloat16x8_t __s1_147 = __p1_147; \
+  bfloat16x8_t __s2_147 = __p2_147; \
+  float32x4_t __rev0_147;  __rev0_147 = __builtin_shufflevector(__s0_147, __s0_147, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_147;  __rev1_147 = __builtin_shufflevector(__s1_147, __s1_147, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_147;  __rev2_147 = __builtin_shufflevector(__s2_147, __s2_147, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x4_t __ret_147; \
+bfloat16x8_t __reint_147 = __rev2_147; \
+float32x4_t __reint1_147 = __noswap_splatq_laneq_f32(*(float32x4_t *) &__reint_147, __p3_147); \
+  __ret_147 = __noswap_vbfdotq_f32(__rev0_147, __rev1_147, *(bfloat16x8_t *) &__reint1_147); \
+  __ret_147 = __builtin_shufflevector(__ret_147, __ret_147, 3, 2, 1, 0); \
+  __ret_147; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vbfdot_laneq_f32(__p0_148, __p1_148, __p2_148, __p3_148) __extension__ ({ \
+  float32x2_t __s0_148 = __p0_148; \
+  bfloat16x4_t __s1_148 = __p1_148; \
+  bfloat16x8_t __s2_148 = __p2_148; \
+  float32x2_t __ret_148; \
+bfloat16x8_t __reint_148 = __s2_148; \
+float32x2_t __reint1_148 = splat_laneq_f32(*(float32x4_t *) &__reint_148, __p3_148); \
+  __ret_148 = vbfdot_f32(__s0_148, __s1_148, *(bfloat16x4_t *) &__reint1_148); \
+  __ret_148; \
+})
+#else
+#define vbfdot_laneq_f32(__p0_149, __p1_149, __p2_149, __p3_149) __extension__ ({ \
+  float32x2_t __s0_149 = __p0_149; \
+  bfloat16x4_t __s1_149 = __p1_149; \
+  bfloat16x8_t __s2_149 = __p2_149; \
+  float32x2_t __rev0_149;  __rev0_149 = __builtin_shufflevector(__s0_149, __s0_149, 1, 0); \
+  bfloat16x4_t __rev1_149;  __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_149;  __rev2_149 = __builtin_shufflevector(__s2_149, __s2_149, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x2_t __ret_149; \
+bfloat16x8_t __reint_149 = __rev2_149; \
+float32x2_t __reint1_149 = __noswap_splat_laneq_f32(*(float32x4_t *) &__reint_149, __p3_149); \
+  __ret_149 = __noswap_vbfdot_f32(__rev0_149, __rev1_149, *(bfloat16x4_t *) &__reint1_149); \
+  __ret_149 = __builtin_shufflevector(__ret_149, __ret_149, 1, 0); \
+  __ret_149; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai float32x4_t __noswap_vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+  return __ret;
+}
+#else
+__ai float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  float32x4_t __ret;
+  __ret = (float32x4_t) __builtin_neon_vbfmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
+  bfloat16x8_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
+  return __ret;
+}
+#else
+__ai bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
+  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  bfloat16x8_t __ret;
+  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+__ai bfloat16x8_t __noswap_vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
+  bfloat16x8_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
+  return __ret;
+}
+#endif
+
+#define vcreate_bf16(__p0) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  uint64_t __promote = __p0; \
+  __ret = (bfloat16x4_t)(__promote); \
+  __ret; \
+})
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_150) {
+  float32x4_t __ret_150;
+bfloat16x4_t __reint_150 = __p0_150;
+int32x4_t __reint1_150 = vshll_n_s16(*(int16x4_t *) &__reint_150, 16);
+  __ret_150 = *(float32x4_t *) &__reint1_150;
+  return __ret_150;
+}
+#else
+__ai float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_151) {
+  bfloat16x4_t __rev0_151;  __rev0_151 = __builtin_shufflevector(__p0_151, __p0_151, 3, 2, 1, 0);
+  float32x4_t __ret_151;
+bfloat16x4_t __reint_151 = __rev0_151;
+int32x4_t __reint1_151 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_151, 16);
+  __ret_151 = *(float32x4_t *) &__reint1_151;
+  __ret_151 = __builtin_shufflevector(__ret_151, __ret_151, 3, 2, 1, 0);
+  return __ret_151;
+}
+__ai float32x4_t __noswap_vcvt_f32_bf16(bfloat16x4_t __p0_152) {
+  float32x4_t __ret_152;
+bfloat16x4_t __reint_152 = __p0_152;
+int32x4_t __reint1_152 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_152, 16);
+  __ret_152 = *(float32x4_t *) &__reint1_152;
+  return __ret_152;
+}
+#endif
+
+__ai float32_t vcvtah_f32_bf16(bfloat16_t __p0) {
+  float32_t __ret;
+bfloat16_t __reint = __p0;
+int32_t __reint1 = *(int32_t *) &__reint << 16;
+  __ret = *(float32_t *) &__reint1;
+  return __ret;
+}
+__ai bfloat16_t vcvth_bf16_f32(float32_t __p0) {
+  bfloat16_t __ret;
+  __ret = (bfloat16_t) __builtin_neon_vcvth_bf16_f32(__p0);
+  return __ret;
+}
+#ifdef __LITTLE_ENDIAN__
+#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16_t __ret; \
+  __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  bfloat16_t __ret; \
+  __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_bf16(__p0_153, __p1_153) __extension__ ({ \
+  bfloat16x4_t __s0_153 = __p0_153; \
+  bfloat16x8_t __ret_153; \
+  __ret_153 = splatq_lane_bf16(__s0_153, __p1_153); \
+  __ret_153; \
+})
+#else
+#define vdupq_lane_bf16(__p0_154, __p1_154) __extension__ ({ \
+  bfloat16x4_t __s0_154 = __p0_154; \
+  bfloat16x4_t __rev0_154;  __rev0_154 = __builtin_shufflevector(__s0_154, __s0_154, 3, 2, 1, 0); \
+  bfloat16x8_t __ret_154; \
+  __ret_154 = __noswap_splatq_lane_bf16(__rev0_154, __p1_154); \
+  __ret_154 = __builtin_shufflevector(__ret_154, __ret_154, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_154; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_bf16(__p0_155, __p1_155) __extension__ ({ \
+  bfloat16x4_t __s0_155 = __p0_155; \
+  bfloat16x4_t __ret_155; \
+  __ret_155 = splat_lane_bf16(__s0_155, __p1_155); \
+  __ret_155; \
+})
+#else
+#define vdup_lane_bf16(__p0_156, __p1_156) __extension__ ({ \
+  bfloat16x4_t __s0_156 = __p0_156; \
+  bfloat16x4_t __rev0_156;  __rev0_156 = __builtin_shufflevector(__s0_156, __s0_156, 3, 2, 1, 0); \
+  bfloat16x4_t __ret_156; \
+  __ret_156 = __noswap_splat_lane_bf16(__rev0_156, __p1_156); \
+  __ret_156 = __builtin_shufflevector(__ret_156, __ret_156, 3, 2, 1, 0); \
+  __ret_156; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16_t __ret; \
+  __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16_t __ret; \
+  __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__rev0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_laneq_bf16(__p0_157, __p1_157) __extension__ ({ \
+  bfloat16x8_t __s0_157 = __p0_157; \
+  bfloat16x8_t __ret_157; \
+  __ret_157 = splatq_laneq_bf16(__s0_157, __p1_157); \
+  __ret_157; \
+})
+#else
+#define vdupq_laneq_bf16(__p0_158, __p1_158) __extension__ ({ \
+  bfloat16x8_t __s0_158 = __p0_158; \
+  bfloat16x8_t __rev0_158;  __rev0_158 = __builtin_shufflevector(__s0_158, __s0_158, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __ret_158; \
+  __ret_158 = __noswap_splatq_laneq_bf16(__rev0_158, __p1_158); \
+  __ret_158 = __builtin_shufflevector(__ret_158, __ret_158, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_158; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_laneq_bf16(__p0_159, __p1_159) __extension__ ({ \
+  bfloat16x8_t __s0_159 = __p0_159; \
+  bfloat16x4_t __ret_159; \
+  __ret_159 = splat_laneq_bf16(__s0_159, __p1_159); \
+  __ret_159; \
+})
+#else
+#define vdup_laneq_bf16(__p0_160, __p1_160) __extension__ ({ \
+  bfloat16x8_t __s0_160 = __p0_160; \
+  bfloat16x8_t __rev0_160;  __rev0_160 = __builtin_shufflevector(__s0_160, __s0_160, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __ret_160; \
+  __ret_160 = __noswap_splat_laneq_bf16(__rev0_160, __p1_160); \
+  __ret_160 = __builtin_shufflevector(__ret_160, __ret_160, 3, 2, 1, 0); \
+  __ret_160; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+  return __ret;
+}
+#else
+__ai bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0};
+  return __ret;
+}
+#else
+__ai bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0};
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
+  return __ret;
+}
+#else
+__ai bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) {
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  bfloat16x4_t __ret;
+  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai bfloat16x4_t __noswap_vget_high_bf16(bfloat16x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16_t __ret; \
+  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16_t __ret; \
+  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__rev0, __p1); \
+  __ret; \
+})
+#define __noswap_vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s0 = __p0; \
+  bfloat16_t __ret; \
+  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16_t __ret; \
+  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \
+  __ret; \
+})
+#else
+#define vget_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+  bfloat16_t __ret; \
+  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__rev0, __p1); \
+  __ret; \
+})
+#define __noswap_vget_lane_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s0 = __p0; \
+  bfloat16_t __ret; \
+  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
+  return __ret;
+}
+#else
+__ai bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) {
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  bfloat16x4_t __ret;
+  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai bfloat16x4_t __noswap_vget_low_bf16(bfloat16x8_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_bf16(__p0) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vld1q_v(__p0, 43); \
+  __ret; \
+})
+#else
+#define vld1q_bf16(__p0) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vld1q_v(__p0, 43); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_bf16(__p0) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vld1_v(__p0, 11); \
+  __ret; \
+})
+#else
+#define vld1_bf16(__p0) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vld1_v(__p0, 11); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_v(__p0, 43); \
+  __ret; \
+})
+#else
+#define vld1q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_v(__p0, 43); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_v(__p0, 11); \
+  __ret; \
+})
+#else
+#define vld1_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_v(__p0, 11); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8_t __s1 = __p1; \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 43); \
+  __ret; \
+})
+#else
+#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8_t __s1 = __p1; \
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 43); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4_t __s1 = __p1; \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 11); \
+  __ret; \
+})
+#else
+#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4_t __s1 = __p1; \
+  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 11); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_bf16_x2(__p0) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld1q_x2_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld1q_bf16_x2(__p0) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld1q_x2_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_bf16_x2(__p0) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld1_x2_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld1_bf16_x2(__p0) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld1_x2_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_bf16_x3(__p0) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld1q_x3_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld1q_bf16_x3(__p0) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld1q_x3_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_bf16_x3(__p0) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld1_x3_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld1_bf16_x3(__p0) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld1_x3_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_bf16_x4(__p0) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld1q_x4_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld1q_bf16_x4(__p0) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld1q_x4_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_bf16_x4(__p0) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld1_x4_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld1_bf16_x4(__p0) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld1_x4_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_bf16(__p0) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld2q_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld2q_bf16(__p0) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld2q_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_bf16(__p0) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld2_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld2_bf16(__p0) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld2_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld2q_dup_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld2q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld2q_dup_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld2_dup_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld2_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld2_dup_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \
+  __ret; \
+})
+#else
+#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  bfloat16x8x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8x2_t __ret; \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \
+  __ret; \
+})
+#else
+#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  bfloat16x4x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  bfloat16x4x2_t __ret; \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_bf16(__p0) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld3q_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld3q_bf16(__p0) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld3q_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_bf16(__p0) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld3_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld3_bf16(__p0) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld3_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld3q_dup_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld3q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld3q_dup_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld3_dup_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld3_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld3_dup_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \
+  __ret; \
+})
+#else
+#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  bfloat16x8x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8x3_t __ret; \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \
+  __ret; \
+})
+#else
+#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  bfloat16x4x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  bfloat16x4x3_t __ret; \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_bf16(__p0) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld4q_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld4q_bf16(__p0) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld4q_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_bf16(__p0) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld4_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld4_bf16(__p0) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld4_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld4q_dup_v(&__ret, __p0, 43); \
+  __ret; \
+})
+#else
+#define vld4q_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld4q_dup_v(&__ret, __p0, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld4_dup_v(&__ret, __p0, 11); \
+  __ret; \
+})
+#else
+#define vld4_dup_bf16(__p0) __extension__ ({ \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld4_dup_v(&__ret, __p0, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \
+  __ret; \
+})
+#else
+#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  bfloat16x8x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8x4_t __ret; \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \
+  __ret; \
+})
+#else
+#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  bfloat16x4x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+  bfloat16x4x4_t __ret; \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \
+ \
+  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16_t __s0 = __p0; \
+  bfloat16x8_t __s1 = __p1; \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \
+  __ret; \
+})
+#else
+#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16_t __s0 = __p0; \
+  bfloat16x8_t __s1 = __p1; \
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__rev1, __p2); \
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16_t __s0 = __p0; \
+  bfloat16x8_t __s1 = __p1; \
+  bfloat16x8_t __ret; \
+  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16_t __s0 = __p0; \
+  bfloat16x4_t __s1 = __p1; \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \
+  __ret; \
+})
+#else
+#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16_t __s0 = __p0; \
+  bfloat16x4_t __s1 = __p1; \
+  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__rev1, __p2); \
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+  __ret; \
+})
+#define __noswap_vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16_t __s0 = __p0; \
+  bfloat16x4_t __s1 = __p1; \
+  bfloat16x4_t __ret; \
+  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \
+  __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s1 = __p1; \
+  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 43); \
+})
+#else
+#define vst1q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8_t __s1 = __p1; \
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s1 = __p1; \
+  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 11); \
+})
+#else
+#define vst1_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4_t __s1 = __p1; \
+  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8_t __s1 = __p1; \
+  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 43); \
+})
+#else
+#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8_t __s1 = __p1; \
+  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4_t __s1 = __p1; \
+  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 11); \
+})
+#else
+#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4_t __s1 = __p1; \
+  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \
+})
+#else
+#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  bfloat16x8x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \
+})
+#else
+#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  bfloat16x4x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \
+})
+#else
+#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  bfloat16x8x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \
+})
+#else
+#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  bfloat16x4x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \
+})
+#else
+#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  bfloat16x8x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \
+})
+#else
+#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  bfloat16x4x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \
+})
+#else
+#define vst2q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  bfloat16x8x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \
+})
+#else
+#define vst2_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  bfloat16x4x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \
+})
+#else
+#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x2_t __s1 = __p1; \
+  bfloat16x8x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \
+})
+#else
+#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x2_t __s1 = __p1; \
+  bfloat16x4x2_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \
+})
+#else
+#define vst3q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  bfloat16x8x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \
+})
+#else
+#define vst3_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  bfloat16x4x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \
+})
+#else
+#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x3_t __s1 = __p1; \
+  bfloat16x8x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \
+})
+#else
+#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x3_t __s1 = __p1; \
+  bfloat16x4x3_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \
+})
+#else
+#define vst4q_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  bfloat16x8x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \
+})
+#else
+#define vst4_bf16(__p0, __p1) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  bfloat16x4x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \
+})
+#else
+#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x8x4_t __s1 = __p1; \
+  bfloat16x8x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \
+})
+#else
+#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
+  bfloat16x4x4_t __s1 = __p1; \
+  bfloat16x4x4_t __rev1; \
+  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \
+})
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && !defined(__aarch64__)
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__p0, 11);
+  return __ret;
+}
+#else
+__ai bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) {
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__rev0, 11);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai bfloat16x4_t __noswap___a32_vcvt_bf16_f32(float32x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__p0, 11);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = __a32_vcvt_bf16_f32(__p0);
+  return __ret;
+}
+#else
+__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x4_t __ret;
+  __ret = __noswap___a32_vcvt_bf16_f32(__rev0);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
+  bfloat16x8_t __ret;
+  __ret = vcombine_bf16(__a32_vcvt_bf16_f32(__p1), vget_low_bf16(__p0));
+  return __ret;
+}
+#else
+__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  bfloat16x8_t __ret;
+  __ret = __noswap_vcombine_bf16(__noswap___a32_vcvt_bf16_f32(__rev1), __noswap_vget_low_bf16(__rev0));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = vcombine_bf16((bfloat16x4_t)(0ULL), __a32_vcvt_bf16_f32(__p0));
+  return __ret;
+}
+#else
+__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x8_t __ret;
+  __ret = __noswap_vcombine_bf16((bfloat16x4_t)(0ULL), __noswap___a32_vcvt_bf16_f32(__rev0));
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && defined(__aarch64__)
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__p0, 43);
+  return __ret;
+}
+#else
+__ai bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__rev0, 43);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+__ai bfloat16x8_t __noswap___a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__p0, 43);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_lane_bf16(__p0_161, __p1_161, __p2_161, __p3_161) __extension__ ({ \
+  bfloat16x8_t __s0_161 = __p0_161; \
+  bfloat16x4_t __s2_161 = __p2_161; \
+  bfloat16x8_t __ret_161; \
+  __ret_161 = vsetq_lane_bf16(vget_lane_bf16(__s2_161, __p3_161), __s0_161, __p1_161); \
+  __ret_161; \
+})
+#else
+#define vcopyq_lane_bf16(__p0_162, __p1_162, __p2_162, __p3_162) __extension__ ({ \
+  bfloat16x8_t __s0_162 = __p0_162; \
+  bfloat16x4_t __s2_162 = __p2_162; \
+  bfloat16x8_t __rev0_162;  __rev0_162 = __builtin_shufflevector(__s0_162, __s0_162, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_162;  __rev2_162 = __builtin_shufflevector(__s2_162, __s2_162, 3, 2, 1, 0); \
+  bfloat16x8_t __ret_162; \
+  __ret_162 = __noswap_vsetq_lane_bf16(__noswap_vget_lane_bf16(__rev2_162, __p3_162), __rev0_162, __p1_162); \
+  __ret_162 = __builtin_shufflevector(__ret_162, __ret_162, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_162; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_lane_bf16(__p0_163, __p1_163, __p2_163, __p3_163) __extension__ ({ \
+  bfloat16x4_t __s0_163 = __p0_163; \
+  bfloat16x4_t __s2_163 = __p2_163; \
+  bfloat16x4_t __ret_163; \
+  __ret_163 = vset_lane_bf16(vget_lane_bf16(__s2_163, __p3_163), __s0_163, __p1_163); \
+  __ret_163; \
+})
+#else
+#define vcopy_lane_bf16(__p0_164, __p1_164, __p2_164, __p3_164) __extension__ ({ \
+  bfloat16x4_t __s0_164 = __p0_164; \
+  bfloat16x4_t __s2_164 = __p2_164; \
+  bfloat16x4_t __rev0_164;  __rev0_164 = __builtin_shufflevector(__s0_164, __s0_164, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_164;  __rev2_164 = __builtin_shufflevector(__s2_164, __s2_164, 3, 2, 1, 0); \
+  bfloat16x4_t __ret_164; \
+  __ret_164 = __noswap_vset_lane_bf16(__noswap_vget_lane_bf16(__rev2_164, __p3_164), __rev0_164, __p1_164); \
+  __ret_164 = __builtin_shufflevector(__ret_164, __ret_164, 3, 2, 1, 0); \
+  __ret_164; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopyq_laneq_bf16(__p0_165, __p1_165, __p2_165, __p3_165) __extension__ ({ \
+  bfloat16x8_t __s0_165 = __p0_165; \
+  bfloat16x8_t __s2_165 = __p2_165; \
+  bfloat16x8_t __ret_165; \
+  __ret_165 = vsetq_lane_bf16(vgetq_lane_bf16(__s2_165, __p3_165), __s0_165, __p1_165); \
+  __ret_165; \
+})
+#else
+#define vcopyq_laneq_bf16(__p0_166, __p1_166, __p2_166, __p3_166) __extension__ ({ \
+  bfloat16x8_t __s0_166 = __p0_166; \
+  bfloat16x8_t __s2_166 = __p2_166; \
+  bfloat16x8_t __rev0_166;  __rev0_166 = __builtin_shufflevector(__s0_166, __s0_166, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_166;  __rev2_166 = __builtin_shufflevector(__s2_166, __s2_166, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __ret_166; \
+  __ret_166 = __noswap_vsetq_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_166, __p3_166), __rev0_166, __p1_166); \
+  __ret_166 = __builtin_shufflevector(__ret_166, __ret_166, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_166; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcopy_laneq_bf16(__p0_167, __p1_167, __p2_167, __p3_167) __extension__ ({ \
+  bfloat16x4_t __s0_167 = __p0_167; \
+  bfloat16x8_t __s2_167 = __p2_167; \
+  bfloat16x4_t __ret_167; \
+  __ret_167 = vset_lane_bf16(vgetq_lane_bf16(__s2_167, __p3_167), __s0_167, __p1_167); \
+  __ret_167; \
+})
+#else
+#define vcopy_laneq_bf16(__p0_168, __p1_168, __p2_168, __p3_168) __extension__ ({ \
+  bfloat16x4_t __s0_168 = __p0_168; \
+  bfloat16x8_t __s2_168 = __p2_168; \
+  bfloat16x4_t __rev0_168;  __rev0_168 = __builtin_shufflevector(__s0_168, __s0_168, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_168;  __rev2_168 = __builtin_shufflevector(__s2_168, __s2_168, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __ret_168; \
+  __ret_168 = __noswap_vset_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_168, __p3_168), __rev0_168, __p1_168); \
+  __ret_168 = __builtin_shufflevector(__ret_168, __ret_168, 3, 2, 1, 0); \
+  __ret_168; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
+  bfloat16x4_t __ret;
+  __ret = vget_low_bf16(__a64_vcvtq_low_bf16_f32(__p0));
+  return __ret;
+}
+#else
+__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x4_t __ret;
+  __ret = __noswap_vget_low_bf16(__noswap___a64_vcvtq_low_bf16_f32(__rev0));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_v((int8x16_t)__p0, (int8x16_t)__p1, 43);
+  return __ret;
+}
+#else
+__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+  bfloat16x8_t __ret;
+  __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_v((int8x16_t)__rev0, (int8x16_t)__rev1, 43);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
+  bfloat16x8_t __ret;
+  __ret = __a64_vcvtq_low_bf16_f32(__p0);
+  return __ret;
+}
+#else
+__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
+  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  bfloat16x8_t __ret;
+  __ret = __noswap___a64_vcvtq_low_bf16_f32(__rev0);
+  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#endif
 #if defined(__ARM_FEATURE_COMPLEX)
 #ifdef __LITTLE_ENDIAN__
 __ai float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) {
@@ -36984,228 +40630,228 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdotq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x8_t __s2 = __p2; \
-  uint32x4_t __ret; \
-uint8x8_t __reint = __s2; \
-uint32x4_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = vdotq_u32(__s0, __s1, *(uint8x16_t *) &__reint1); \
-  __ret; \
+#define vdotq_lane_u32(__p0_169, __p1_169, __p2_169, __p3_169) __extension__ ({ \
+  uint32x4_t __s0_169 = __p0_169; \
+  uint8x16_t __s1_169 = __p1_169; \
+  uint8x8_t __s2_169 = __p2_169; \
+  uint32x4_t __ret_169; \
+uint8x8_t __reint_169 = __s2_169; \
+uint32x4_t __reint1_169 = splatq_lane_u32(*(uint32x2_t *) &__reint_169, __p3_169); \
+  __ret_169 = vdotq_u32(__s0_169, __s1_169, *(uint8x16_t *) &__reint1_169); \
+  __ret_169; \
 })
 #else
-#define vdotq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x8_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-uint8x8_t __reint = __rev2; \
-uint32x4_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = __noswap_vdotq_u32(__rev0, __rev1, *(uint8x16_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdotq_lane_u32(__p0_170, __p1_170, __p2_170, __p3_170) __extension__ ({ \
+  uint32x4_t __s0_170 = __p0_170; \
+  uint8x16_t __s1_170 = __p1_170; \
+  uint8x8_t __s2_170 = __p2_170; \
+  uint32x4_t __rev0_170;  __rev0_170 = __builtin_shufflevector(__s0_170, __s0_170, 3, 2, 1, 0); \
+  uint8x16_t __rev1_170;  __rev1_170 = __builtin_shufflevector(__s1_170, __s1_170, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_170;  __rev2_170 = __builtin_shufflevector(__s2_170, __s2_170, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_170; \
+uint8x8_t __reint_170 = __rev2_170; \
+uint32x4_t __reint1_170 = __noswap_splatq_lane_u32(*(uint32x2_t *) &__reint_170, __p3_170); \
+  __ret_170 = __noswap_vdotq_u32(__rev0_170, __rev1_170, *(uint8x16_t *) &__reint1_170); \
+  __ret_170 = __builtin_shufflevector(__ret_170, __ret_170, 3, 2, 1, 0); \
+  __ret_170; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdotq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-int8x8_t __reint = __s2; \
-int32x4_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = vdotq_s32(__s0, __s1, *(int8x16_t *) &__reint1); \
-  __ret; \
+#define vdotq_lane_s32(__p0_171, __p1_171, __p2_171, __p3_171) __extension__ ({ \
+  int32x4_t __s0_171 = __p0_171; \
+  int8x16_t __s1_171 = __p1_171; \
+  int8x8_t __s2_171 = __p2_171; \
+  int32x4_t __ret_171; \
+int8x8_t __reint_171 = __s2_171; \
+int32x4_t __reint1_171 = splatq_lane_s32(*(int32x2_t *) &__reint_171, __p3_171); \
+  __ret_171 = vdotq_s32(__s0_171, __s1_171, *(int8x16_t *) &__reint1_171); \
+  __ret_171; \
 })
 #else
-#define vdotq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-int8x8_t __reint = __rev2; \
-int32x4_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = __noswap_vdotq_s32(__rev0, __rev1, *(int8x16_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdotq_lane_s32(__p0_172, __p1_172, __p2_172, __p3_172) __extension__ ({ \
+  int32x4_t __s0_172 = __p0_172; \
+  int8x16_t __s1_172 = __p1_172; \
+  int8x8_t __s2_172 = __p2_172; \
+  int32x4_t __rev0_172;  __rev0_172 = __builtin_shufflevector(__s0_172, __s0_172, 3, 2, 1, 0); \
+  int8x16_t __rev1_172;  __rev1_172 = __builtin_shufflevector(__s1_172, __s1_172, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_172;  __rev2_172 = __builtin_shufflevector(__s2_172, __s2_172, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_172; \
+int8x8_t __reint_172 = __rev2_172; \
+int32x4_t __reint1_172 = __noswap_splatq_lane_s32(*(int32x2_t *) &__reint_172, __p3_172); \
+  __ret_172 = __noswap_vdotq_s32(__rev0_172, __rev1_172, *(int8x16_t *) &__reint1_172); \
+  __ret_172 = __builtin_shufflevector(__ret_172, __ret_172, 3, 2, 1, 0); \
+  __ret_172; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdot_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __s2 = __p2; \
-  uint32x2_t __ret; \
-uint8x8_t __reint = __s2; \
-uint32x2_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3); \
-  __ret = vdot_u32(__s0, __s1, *(uint8x8_t *) &__reint1); \
-  __ret; \
+#define vdot_lane_u32(__p0_173, __p1_173, __p2_173, __p3_173) __extension__ ({ \
+  uint32x2_t __s0_173 = __p0_173; \
+  uint8x8_t __s1_173 = __p1_173; \
+  uint8x8_t __s2_173 = __p2_173; \
+  uint32x2_t __ret_173; \
+uint8x8_t __reint_173 = __s2_173; \
+uint32x2_t __reint1_173 = splat_lane_u32(*(uint32x2_t *) &__reint_173, __p3_173); \
+  __ret_173 = vdot_u32(__s0_173, __s1_173, *(uint8x8_t *) &__reint1_173); \
+  __ret_173; \
 })
 #else
-#define vdot_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __s2 = __p2; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-uint8x8_t __reint = __rev2; \
-uint32x2_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3); \
-  __ret = __noswap_vdot_u32(__rev0, __rev1, *(uint8x8_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdot_lane_u32(__p0_174, __p1_174, __p2_174, __p3_174) __extension__ ({ \
+  uint32x2_t __s0_174 = __p0_174; \
+  uint8x8_t __s1_174 = __p1_174; \
+  uint8x8_t __s2_174 = __p2_174; \
+  uint32x2_t __rev0_174;  __rev0_174 = __builtin_shufflevector(__s0_174, __s0_174, 1, 0); \
+  uint8x8_t __rev1_174;  __rev1_174 = __builtin_shufflevector(__s1_174, __s1_174, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_174;  __rev2_174 = __builtin_shufflevector(__s2_174, __s2_174, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x2_t __ret_174; \
+uint8x8_t __reint_174 = __rev2_174; \
+uint32x2_t __reint1_174 = __noswap_splat_lane_u32(*(uint32x2_t *) &__reint_174, __p3_174); \
+  __ret_174 = __noswap_vdot_u32(__rev0_174, __rev1_174, *(uint8x8_t *) &__reint1_174); \
+  __ret_174 = __builtin_shufflevector(__ret_174, __ret_174, 1, 0); \
+  __ret_174; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdot_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __s2 = __p2; \
-  int32x2_t __ret; \
-int8x8_t __reint = __s2; \
-int32x2_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3); \
-  __ret = vdot_s32(__s0, __s1, *(int8x8_t *) &__reint1); \
-  __ret; \
+#define vdot_lane_s32(__p0_175, __p1_175, __p2_175, __p3_175) __extension__ ({ \
+  int32x2_t __s0_175 = __p0_175; \
+  int8x8_t __s1_175 = __p1_175; \
+  int8x8_t __s2_175 = __p2_175; \
+  int32x2_t __ret_175; \
+int8x8_t __reint_175 = __s2_175; \
+int32x2_t __reint1_175 = splat_lane_s32(*(int32x2_t *) &__reint_175, __p3_175); \
+  __ret_175 = vdot_s32(__s0_175, __s1_175, *(int8x8_t *) &__reint1_175); \
+  __ret_175; \
 })
 #else
-#define vdot_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-int8x8_t __reint = __rev2; \
-int32x2_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3); \
-  __ret = __noswap_vdot_s32(__rev0, __rev1, *(int8x8_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdot_lane_s32(__p0_176, __p1_176, __p2_176, __p3_176) __extension__ ({ \
+  int32x2_t __s0_176 = __p0_176; \
+  int8x8_t __s1_176 = __p1_176; \
+  int8x8_t __s2_176 = __p2_176; \
+  int32x2_t __rev0_176;  __rev0_176 = __builtin_shufflevector(__s0_176, __s0_176, 1, 0); \
+  int8x8_t __rev1_176;  __rev1_176 = __builtin_shufflevector(__s1_176, __s1_176, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_176;  __rev2_176 = __builtin_shufflevector(__s2_176, __s2_176, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x2_t __ret_176; \
+int8x8_t __reint_176 = __rev2_176; \
+int32x2_t __reint1_176 = __noswap_splat_lane_s32(*(int32x2_t *) &__reint_176, __p3_176); \
+  __ret_176 = __noswap_vdot_s32(__rev0_176, __rev1_176, *(int8x8_t *) &__reint1_176); \
+  __ret_176 = __builtin_shufflevector(__ret_176, __ret_176, 1, 0); \
+  __ret_176; \
 })
 #endif
 
 #endif
 #if defined(__ARM_FEATURE_DOTPROD) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
-#define vdotq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __s2 = __p2; \
-  uint32x4_t __ret; \
-uint8x16_t __reint = __s2; \
-uint32x4_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = vdotq_u32(__s0, __s1, *(uint8x16_t *) &__reint1); \
-  __ret; \
+#define vdotq_laneq_u32(__p0_177, __p1_177, __p2_177, __p3_177) __extension__ ({ \
+  uint32x4_t __s0_177 = __p0_177; \
+  uint8x16_t __s1_177 = __p1_177; \
+  uint8x16_t __s2_177 = __p2_177; \
+  uint32x4_t __ret_177; \
+uint8x16_t __reint_177 = __s2_177; \
+uint32x4_t __reint1_177 = splatq_laneq_u32(*(uint32x4_t *) &__reint_177, __p3_177); \
+  __ret_177 = vdotq_u32(__s0_177, __s1_177, *(uint8x16_t *) &__reint1_177); \
+  __ret_177; \
 })
 #else
-#define vdotq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-uint8x16_t __reint = __rev2; \
-uint32x4_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = __noswap_vdotq_u32(__rev0, __rev1, *(uint8x16_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdotq_laneq_u32(__p0_178, __p1_178, __p2_178, __p3_178) __extension__ ({ \
+  uint32x4_t __s0_178 = __p0_178; \
+  uint8x16_t __s1_178 = __p1_178; \
+  uint8x16_t __s2_178 = __p2_178; \
+  uint32x4_t __rev0_178;  __rev0_178 = __builtin_shufflevector(__s0_178, __s0_178, 3, 2, 1, 0); \
+  uint8x16_t __rev1_178;  __rev1_178 = __builtin_shufflevector(__s1_178, __s1_178, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_178;  __rev2_178 = __builtin_shufflevector(__s2_178, __s2_178, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_178; \
+uint8x16_t __reint_178 = __rev2_178; \
+uint32x4_t __reint1_178 = __noswap_splatq_laneq_u32(*(uint32x4_t *) &__reint_178, __p3_178); \
+  __ret_178 = __noswap_vdotq_u32(__rev0_178, __rev1_178, *(uint8x16_t *) &__reint1_178); \
+  __ret_178 = __builtin_shufflevector(__ret_178, __ret_178, 3, 2, 1, 0); \
+  __ret_178; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdotq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __s2 = __p2; \
-  int32x4_t __ret; \
-int8x16_t __reint = __s2; \
-int32x4_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = vdotq_s32(__s0, __s1, *(int8x16_t *) &__reint1); \
-  __ret; \
+#define vdotq_laneq_s32(__p0_179, __p1_179, __p2_179, __p3_179) __extension__ ({ \
+  int32x4_t __s0_179 = __p0_179; \
+  int8x16_t __s1_179 = __p1_179; \
+  int8x16_t __s2_179 = __p2_179; \
+  int32x4_t __ret_179; \
+int8x16_t __reint_179 = __s2_179; \
+int32x4_t __reint1_179 = splatq_laneq_s32(*(int32x4_t *) &__reint_179, __p3_179); \
+  __ret_179 = vdotq_s32(__s0_179, __s1_179, *(int8x16_t *) &__reint1_179); \
+  __ret_179; \
 })
 #else
-#define vdotq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-int8x16_t __reint = __rev2; \
-int32x4_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = __noswap_vdotq_s32(__rev0, __rev1, *(int8x16_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdotq_laneq_s32(__p0_180, __p1_180, __p2_180, __p3_180) __extension__ ({ \
+  int32x4_t __s0_180 = __p0_180; \
+  int8x16_t __s1_180 = __p1_180; \
+  int8x16_t __s2_180 = __p2_180; \
+  int32x4_t __rev0_180;  __rev0_180 = __builtin_shufflevector(__s0_180, __s0_180, 3, 2, 1, 0); \
+  int8x16_t __rev1_180;  __rev1_180 = __builtin_shufflevector(__s1_180, __s1_180, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_180;  __rev2_180 = __builtin_shufflevector(__s2_180, __s2_180, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_180; \
+int8x16_t __reint_180 = __rev2_180; \
+int32x4_t __reint1_180 = __noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_180, __p3_180); \
+  __ret_180 = __noswap_vdotq_s32(__rev0_180, __rev1_180, *(int8x16_t *) &__reint1_180); \
+  __ret_180 = __builtin_shufflevector(__ret_180, __ret_180, 3, 2, 1, 0); \
+  __ret_180; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdot_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x16_t __s2 = __p2; \
-  uint32x2_t __ret; \
-uint8x16_t __reint = __s2; \
-uint32x2_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3); \
-  __ret = vdot_u32(__s0, __s1, *(uint8x8_t *) &__reint1); \
-  __ret; \
+#define vdot_laneq_u32(__p0_181, __p1_181, __p2_181, __p3_181) __extension__ ({ \
+  uint32x2_t __s0_181 = __p0_181; \
+  uint8x8_t __s1_181 = __p1_181; \
+  uint8x16_t __s2_181 = __p2_181; \
+  uint32x2_t __ret_181; \
+uint8x16_t __reint_181 = __s2_181; \
+uint32x2_t __reint1_181 = splat_laneq_u32(*(uint32x4_t *) &__reint_181, __p3_181); \
+  __ret_181 = vdot_u32(__s0_181, __s1_181, *(uint8x8_t *) &__reint1_181); \
+  __ret_181; \
 })
 #else
-#define vdot_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x16_t __s2 = __p2; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-uint8x16_t __reint = __rev2; \
-uint32x2_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3); \
-  __ret = __noswap_vdot_u32(__rev0, __rev1, *(uint8x8_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdot_laneq_u32(__p0_182, __p1_182, __p2_182, __p3_182) __extension__ ({ \
+  uint32x2_t __s0_182 = __p0_182; \
+  uint8x8_t __s1_182 = __p1_182; \
+  uint8x16_t __s2_182 = __p2_182; \
+  uint32x2_t __rev0_182;  __rev0_182 = __builtin_shufflevector(__s0_182, __s0_182, 1, 0); \
+  uint8x8_t __rev1_182;  __rev1_182 = __builtin_shufflevector(__s1_182, __s1_182, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_182;  __rev2_182 = __builtin_shufflevector(__s2_182, __s2_182, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x2_t __ret_182; \
+uint8x16_t __reint_182 = __rev2_182; \
+uint32x2_t __reint1_182 = __noswap_splat_laneq_u32(*(uint32x4_t *) &__reint_182, __p3_182); \
+  __ret_182 = __noswap_vdot_u32(__rev0_182, __rev1_182, *(uint8x8_t *) &__reint1_182); \
+  __ret_182 = __builtin_shufflevector(__ret_182, __ret_182, 1, 0); \
+  __ret_182; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdot_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x16_t __s2 = __p2; \
-  int32x2_t __ret; \
-int8x16_t __reint = __s2; \
-int32x2_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3); \
-  __ret = vdot_s32(__s0, __s1, *(int8x8_t *) &__reint1); \
-  __ret; \
+#define vdot_laneq_s32(__p0_183, __p1_183, __p2_183, __p3_183) __extension__ ({ \
+  int32x2_t __s0_183 = __p0_183; \
+  int8x8_t __s1_183 = __p1_183; \
+  int8x16_t __s2_183 = __p2_183; \
+  int32x2_t __ret_183; \
+int8x16_t __reint_183 = __s2_183; \
+int32x2_t __reint1_183 = splat_laneq_s32(*(int32x4_t *) &__reint_183, __p3_183); \
+  __ret_183 = vdot_s32(__s0_183, __s1_183, *(int8x8_t *) &__reint1_183); \
+  __ret_183; \
 })
 #else
-#define vdot_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x16_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-int8x16_t __reint = __rev2; \
-int32x2_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3); \
-  __ret = __noswap_vdot_s32(__rev0, __rev1, *(int8x8_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdot_laneq_s32(__p0_184, __p1_184, __p2_184, __p3_184) __extension__ ({ \
+  int32x2_t __s0_184 = __p0_184; \
+  int8x8_t __s1_184 = __p1_184; \
+  int8x16_t __s2_184 = __p2_184; \
+  int32x2_t __rev0_184;  __rev0_184 = __builtin_shufflevector(__s0_184, __s0_184, 1, 0); \
+  int8x8_t __rev1_184;  __rev1_184 = __builtin_shufflevector(__s1_184, __s1_184, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_184;  __rev2_184 = __builtin_shufflevector(__s2_184, __s2_184, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x2_t __ret_184; \
+int8x16_t __reint_184 = __rev2_184; \
+int32x2_t __reint1_184 = __noswap_splat_laneq_s32(*(int32x4_t *) &__reint_184, __p3_184); \
+  __ret_184 = __noswap_vdot_s32(__rev0_184, __rev1_184, *(int8x8_t *) &__reint1_184); \
+  __ret_184 = __builtin_shufflevector(__ret_184, __ret_184, 1, 0); \
+  __ret_184; \
 })
 #endif
 
@@ -38872,44 +42518,44 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmulq_lane_f16(__p0_185, __p1_185, __p2_185) __extension__ ({ \
+  float16x8_t __s0_185 = __p0_185; \
+  float16x4_t __s1_185 = __p1_185; \
+  float16x8_t __ret_185; \
+  __ret_185 = __s0_185 * splatq_lane_f16(__s1_185, __p2_185); \
+  __ret_185; \
 })
 #else
-#define vmulq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmulq_lane_f16(__p0_186, __p1_186, __p2_186) __extension__ ({ \
+  float16x8_t __s0_186 = __p0_186; \
+  float16x4_t __s1_186 = __p1_186; \
+  float16x8_t __rev0_186;  __rev0_186 = __builtin_shufflevector(__s0_186, __s0_186, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev1_186;  __rev1_186 = __builtin_shufflevector(__s1_186, __s1_186, 3, 2, 1, 0); \
+  float16x8_t __ret_186; \
+  __ret_186 = __rev0_186 * __noswap_splatq_lane_f16(__rev1_186, __p2_186); \
+  __ret_186 = __builtin_shufflevector(__ret_186, __ret_186, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_186; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmul_lane_f16(__p0_187, __p1_187, __p2_187) __extension__ ({ \
+  float16x4_t __s0_187 = __p0_187; \
+  float16x4_t __s1_187 = __p1_187; \
+  float16x4_t __ret_187; \
+  __ret_187 = __s0_187 * splat_lane_f16(__s1_187, __p2_187); \
+  __ret_187; \
 })
 #else
-#define vmul_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmul_lane_f16(__p0_188, __p1_188, __p2_188) __extension__ ({ \
+  float16x4_t __s0_188 = __p0_188; \
+  float16x4_t __s1_188 = __p1_188; \
+  float16x4_t __rev0_188;  __rev0_188 = __builtin_shufflevector(__s0_188, __s0_188, 3, 2, 1, 0); \
+  float16x4_t __rev1_188;  __rev1_188 = __builtin_shufflevector(__s1_188, __s1_188, 3, 2, 1, 0); \
+  float16x4_t __ret_188; \
+  __ret_188 = __rev0_188 * __noswap_splat_lane_f16(__rev1_188, __p2_188); \
+  __ret_188 = __builtin_shufflevector(__ret_188, __ret_188, 3, 2, 1, 0); \
+  __ret_188; \
 })
 #endif
 
@@ -39651,140 +43297,140 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmsh_lane_f16(__p0_0, __p1_0, __p2_0, __p3_0) __extension__ ({ \
-  float16_t __s0_0 = __p0_0; \
-  float16_t __s1_0 = __p1_0; \
-  float16x4_t __s2_0 = __p2_0; \
-  float16_t __ret_0; \
-  __ret_0 = vfmah_lane_f16(__s0_0, -__s1_0, __s2_0, __p3_0); \
-  __ret_0; \
+#define vfmsh_lane_f16(__p0_189, __p1_189, __p2_189, __p3_189) __extension__ ({ \
+  float16_t __s0_189 = __p0_189; \
+  float16_t __s1_189 = __p1_189; \
+  float16x4_t __s2_189 = __p2_189; \
+  float16_t __ret_189; \
+  __ret_189 = vfmah_lane_f16(__s0_189, -__s1_189, __s2_189, __p3_189); \
+  __ret_189; \
 })
 #else
-#define vfmsh_lane_f16(__p0_1, __p1_1, __p2_1, __p3_1) __extension__ ({ \
-  float16_t __s0_1 = __p0_1; \
-  float16_t __s1_1 = __p1_1; \
-  float16x4_t __s2_1 = __p2_1; \
-  float16x4_t __rev2_1;  __rev2_1 = __builtin_shufflevector(__s2_1, __s2_1, 3, 2, 1, 0); \
-  float16_t __ret_1; \
-  __ret_1 = __noswap_vfmah_lane_f16(__s0_1, -__s1_1, __rev2_1, __p3_1); \
-  __ret_1; \
+#define vfmsh_lane_f16(__p0_190, __p1_190, __p2_190, __p3_190) __extension__ ({ \
+  float16_t __s0_190 = __p0_190; \
+  float16_t __s1_190 = __p1_190; \
+  float16x4_t __s2_190 = __p2_190; \
+  float16x4_t __rev2_190;  __rev2_190 = __builtin_shufflevector(__s2_190, __s2_190, 3, 2, 1, 0); \
+  float16_t __ret_190; \
+  __ret_190 = __noswap_vfmah_lane_f16(__s0_190, -__s1_190, __rev2_190, __p3_190); \
+  __ret_190; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f16(__p0_2, __p1_2, __p2_2, __p3_2) __extension__ ({ \
-  float16x8_t __s0_2 = __p0_2; \
-  float16x8_t __s1_2 = __p1_2; \
-  float16x4_t __s2_2 = __p2_2; \
-  float16x8_t __ret_2; \
-  __ret_2 = vfmaq_lane_f16(__s0_2, -__s1_2, __s2_2, __p3_2); \
-  __ret_2; \
+#define vfmsq_lane_f16(__p0_191, __p1_191, __p2_191, __p3_191) __extension__ ({ \
+  float16x8_t __s0_191 = __p0_191; \
+  float16x8_t __s1_191 = __p1_191; \
+  float16x4_t __s2_191 = __p2_191; \
+  float16x8_t __ret_191; \
+  __ret_191 = vfmaq_lane_f16(__s0_191, -__s1_191, __s2_191, __p3_191); \
+  __ret_191; \
 })
 #else
-#define vfmsq_lane_f16(__p0_3, __p1_3, __p2_3, __p3_3) __extension__ ({ \
-  float16x8_t __s0_3 = __p0_3; \
-  float16x8_t __s1_3 = __p1_3; \
-  float16x4_t __s2_3 = __p2_3; \
-  float16x8_t __rev0_3;  __rev0_3 = __builtin_shufflevector(__s0_3, __s0_3, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_3;  __rev1_3 = __builtin_shufflevector(__s1_3, __s1_3, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_3;  __rev2_3 = __builtin_shufflevector(__s2_3, __s2_3, 3, 2, 1, 0); \
-  float16x8_t __ret_3; \
-  __ret_3 = __noswap_vfmaq_lane_f16(__rev0_3, -__rev1_3, __rev2_3, __p3_3); \
-  __ret_3 = __builtin_shufflevector(__ret_3, __ret_3, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_3; \
+#define vfmsq_lane_f16(__p0_192, __p1_192, __p2_192, __p3_192) __extension__ ({ \
+  float16x8_t __s0_192 = __p0_192; \
+  float16x8_t __s1_192 = __p1_192; \
+  float16x4_t __s2_192 = __p2_192; \
+  float16x8_t __rev0_192;  __rev0_192 = __builtin_shufflevector(__s0_192, __s0_192, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_192;  __rev1_192 = __builtin_shufflevector(__s1_192, __s1_192, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_192;  __rev2_192 = __builtin_shufflevector(__s2_192, __s2_192, 3, 2, 1, 0); \
+  float16x8_t __ret_192; \
+  __ret_192 = __noswap_vfmaq_lane_f16(__rev0_192, -__rev1_192, __rev2_192, __p3_192); \
+  __ret_192 = __builtin_shufflevector(__ret_192, __ret_192, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_192; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfms_lane_f16(__p0_4, __p1_4, __p2_4, __p3_4) __extension__ ({ \
-  float16x4_t __s0_4 = __p0_4; \
-  float16x4_t __s1_4 = __p1_4; \
-  float16x4_t __s2_4 = __p2_4; \
-  float16x4_t __ret_4; \
-  __ret_4 = vfma_lane_f16(__s0_4, -__s1_4, __s2_4, __p3_4); \
-  __ret_4; \
+#define vfms_lane_f16(__p0_193, __p1_193, __p2_193, __p3_193) __extension__ ({ \
+  float16x4_t __s0_193 = __p0_193; \
+  float16x4_t __s1_193 = __p1_193; \
+  float16x4_t __s2_193 = __p2_193; \
+  float16x4_t __ret_193; \
+  __ret_193 = vfma_lane_f16(__s0_193, -__s1_193, __s2_193, __p3_193); \
+  __ret_193; \
 })
 #else
-#define vfms_lane_f16(__p0_5, __p1_5, __p2_5, __p3_5) __extension__ ({ \
-  float16x4_t __s0_5 = __p0_5; \
-  float16x4_t __s1_5 = __p1_5; \
-  float16x4_t __s2_5 = __p2_5; \
-  float16x4_t __rev0_5;  __rev0_5 = __builtin_shufflevector(__s0_5, __s0_5, 3, 2, 1, 0); \
-  float16x4_t __rev1_5;  __rev1_5 = __builtin_shufflevector(__s1_5, __s1_5, 3, 2, 1, 0); \
-  float16x4_t __rev2_5;  __rev2_5 = __builtin_shufflevector(__s2_5, __s2_5, 3, 2, 1, 0); \
-  float16x4_t __ret_5; \
-  __ret_5 = __noswap_vfma_lane_f16(__rev0_5, -__rev1_5, __rev2_5, __p3_5); \
-  __ret_5 = __builtin_shufflevector(__ret_5, __ret_5, 3, 2, 1, 0); \
-  __ret_5; \
+#define vfms_lane_f16(__p0_194, __p1_194, __p2_194, __p3_194) __extension__ ({ \
+  float16x4_t __s0_194 = __p0_194; \
+  float16x4_t __s1_194 = __p1_194; \
+  float16x4_t __s2_194 = __p2_194; \
+  float16x4_t __rev0_194;  __rev0_194 = __builtin_shufflevector(__s0_194, __s0_194, 3, 2, 1, 0); \
+  float16x4_t __rev1_194;  __rev1_194 = __builtin_shufflevector(__s1_194, __s1_194, 3, 2, 1, 0); \
+  float16x4_t __rev2_194;  __rev2_194 = __builtin_shufflevector(__s2_194, __s2_194, 3, 2, 1, 0); \
+  float16x4_t __ret_194; \
+  __ret_194 = __noswap_vfma_lane_f16(__rev0_194, -__rev1_194, __rev2_194, __p3_194); \
+  __ret_194 = __builtin_shufflevector(__ret_194, __ret_194, 3, 2, 1, 0); \
+  __ret_194; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmsh_laneq_f16(__p0_6, __p1_6, __p2_6, __p3_6) __extension__ ({ \
-  float16_t __s0_6 = __p0_6; \
-  float16_t __s1_6 = __p1_6; \
-  float16x8_t __s2_6 = __p2_6; \
-  float16_t __ret_6; \
-  __ret_6 = vfmah_laneq_f16(__s0_6, -__s1_6, __s2_6, __p3_6); \
-  __ret_6; \
+#define vfmsh_laneq_f16(__p0_195, __p1_195, __p2_195, __p3_195) __extension__ ({ \
+  float16_t __s0_195 = __p0_195; \
+  float16_t __s1_195 = __p1_195; \
+  float16x8_t __s2_195 = __p2_195; \
+  float16_t __ret_195; \
+  __ret_195 = vfmah_laneq_f16(__s0_195, -__s1_195, __s2_195, __p3_195); \
+  __ret_195; \
 })
 #else
-#define vfmsh_laneq_f16(__p0_7, __p1_7, __p2_7, __p3_7) __extension__ ({ \
-  float16_t __s0_7 = __p0_7; \
-  float16_t __s1_7 = __p1_7; \
-  float16x8_t __s2_7 = __p2_7; \
-  float16x8_t __rev2_7;  __rev2_7 = __builtin_shufflevector(__s2_7, __s2_7, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_7; \
-  __ret_7 = __noswap_vfmah_laneq_f16(__s0_7, -__s1_7, __rev2_7, __p3_7); \
-  __ret_7; \
+#define vfmsh_laneq_f16(__p0_196, __p1_196, __p2_196, __p3_196) __extension__ ({ \
+  float16_t __s0_196 = __p0_196; \
+  float16_t __s1_196 = __p1_196; \
+  float16x8_t __s2_196 = __p2_196; \
+  float16x8_t __rev2_196;  __rev2_196 = __builtin_shufflevector(__s2_196, __s2_196, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16_t __ret_196; \
+  __ret_196 = __noswap_vfmah_laneq_f16(__s0_196, -__s1_196, __rev2_196, __p3_196); \
+  __ret_196; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f16(__p0_8, __p1_8, __p2_8, __p3_8) __extension__ ({ \
-  float16x8_t __s0_8 = __p0_8; \
-  float16x8_t __s1_8 = __p1_8; \
-  float16x8_t __s2_8 = __p2_8; \
-  float16x8_t __ret_8; \
-  __ret_8 = vfmaq_laneq_f16(__s0_8, -__s1_8, __s2_8, __p3_8); \
-  __ret_8; \
+#define vfmsq_laneq_f16(__p0_197, __p1_197, __p2_197, __p3_197) __extension__ ({ \
+  float16x8_t __s0_197 = __p0_197; \
+  float16x8_t __s1_197 = __p1_197; \
+  float16x8_t __s2_197 = __p2_197; \
+  float16x8_t __ret_197; \
+  __ret_197 = vfmaq_laneq_f16(__s0_197, -__s1_197, __s2_197, __p3_197); \
+  __ret_197; \
 })
 #else
-#define vfmsq_laneq_f16(__p0_9, __p1_9, __p2_9, __p3_9) __extension__ ({ \
-  float16x8_t __s0_9 = __p0_9; \
-  float16x8_t __s1_9 = __p1_9; \
-  float16x8_t __s2_9 = __p2_9; \
-  float16x8_t __rev0_9;  __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_9;  __rev1_9 = __builtin_shufflevector(__s1_9, __s1_9, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_9;  __rev2_9 = __builtin_shufflevector(__s2_9, __s2_9, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_9; \
-  __ret_9 = __noswap_vfmaq_laneq_f16(__rev0_9, -__rev1_9, __rev2_9, __p3_9); \
-  __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_9; \
+#define vfmsq_laneq_f16(__p0_198, __p1_198, __p2_198, __p3_198) __extension__ ({ \
+  float16x8_t __s0_198 = __p0_198; \
+  float16x8_t __s1_198 = __p1_198; \
+  float16x8_t __s2_198 = __p2_198; \
+  float16x8_t __rev0_198;  __rev0_198 = __builtin_shufflevector(__s0_198, __s0_198, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_198;  __rev1_198 = __builtin_shufflevector(__s1_198, __s1_198, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_198;  __rev2_198 = __builtin_shufflevector(__s2_198, __s2_198, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __ret_198; \
+  __ret_198 = __noswap_vfmaq_laneq_f16(__rev0_198, -__rev1_198, __rev2_198, __p3_198); \
+  __ret_198 = __builtin_shufflevector(__ret_198, __ret_198, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_198; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f16(__p0_10, __p1_10, __p2_10, __p3_10) __extension__ ({ \
-  float16x4_t __s0_10 = __p0_10; \
-  float16x4_t __s1_10 = __p1_10; \
-  float16x8_t __s2_10 = __p2_10; \
-  float16x4_t __ret_10; \
-  __ret_10 = vfma_laneq_f16(__s0_10, -__s1_10, __s2_10, __p3_10); \
-  __ret_10; \
+#define vfms_laneq_f16(__p0_199, __p1_199, __p2_199, __p3_199) __extension__ ({ \
+  float16x4_t __s0_199 = __p0_199; \
+  float16x4_t __s1_199 = __p1_199; \
+  float16x8_t __s2_199 = __p2_199; \
+  float16x4_t __ret_199; \
+  __ret_199 = vfma_laneq_f16(__s0_199, -__s1_199, __s2_199, __p3_199); \
+  __ret_199; \
 })
 #else
-#define vfms_laneq_f16(__p0_11, __p1_11, __p2_11, __p3_11) __extension__ ({ \
-  float16x4_t __s0_11 = __p0_11; \
-  float16x4_t __s1_11 = __p1_11; \
-  float16x8_t __s2_11 = __p2_11; \
-  float16x4_t __rev0_11;  __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 3, 2, 1, 0); \
-  float16x4_t __rev1_11;  __rev1_11 = __builtin_shufflevector(__s1_11, __s1_11, 3, 2, 1, 0); \
-  float16x8_t __rev2_11;  __rev2_11 = __builtin_shufflevector(__s2_11, __s2_11, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_11; \
-  __ret_11 = __noswap_vfma_laneq_f16(__rev0_11, -__rev1_11, __rev2_11, __p3_11); \
-  __ret_11 = __builtin_shufflevector(__ret_11, __ret_11, 3, 2, 1, 0); \
-  __ret_11; \
+#define vfms_laneq_f16(__p0_200, __p1_200, __p2_200, __p3_200) __extension__ ({ \
+  float16x4_t __s0_200 = __p0_200; \
+  float16x4_t __s1_200 = __p1_200; \
+  float16x8_t __s2_200 = __p2_200; \
+  float16x4_t __rev0_200;  __rev0_200 = __builtin_shufflevector(__s0_200, __s0_200, 3, 2, 1, 0); \
+  float16x4_t __rev1_200;  __rev1_200 = __builtin_shufflevector(__s1_200, __s1_200, 3, 2, 1, 0); \
+  float16x8_t __rev2_200;  __rev2_200 = __builtin_shufflevector(__s2_200, __s2_200, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __ret_200; \
+  __ret_200 = __noswap_vfma_laneq_f16(__rev0_200, -__rev1_200, __rev2_200, __p3_200); \
+  __ret_200 = __builtin_shufflevector(__ret_200, __ret_200, 3, 2, 1, 0); \
+  __ret_200; \
 })
 #endif
 
@@ -39971,44 +43617,44 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmulq_laneq_f16(__p0_201, __p1_201, __p2_201) __extension__ ({ \
+  float16x8_t __s0_201 = __p0_201; \
+  float16x8_t __s1_201 = __p1_201; \
+  float16x8_t __ret_201; \
+  __ret_201 = __s0_201 * splatq_laneq_f16(__s1_201, __p2_201); \
+  __ret_201; \
 })
 #else
-#define vmulq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmulq_laneq_f16(__p0_202, __p1_202, __p2_202) __extension__ ({ \
+  float16x8_t __s0_202 = __p0_202; \
+  float16x8_t __s1_202 = __p1_202; \
+  float16x8_t __rev0_202;  __rev0_202 = __builtin_shufflevector(__s0_202, __s0_202, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_202;  __rev1_202 = __builtin_shufflevector(__s1_202, __s1_202, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __ret_202; \
+  __ret_202 = __rev0_202 * __noswap_splatq_laneq_f16(__rev1_202, __p2_202); \
+  __ret_202 = __builtin_shufflevector(__ret_202, __ret_202, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_202; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmul_laneq_f16(__p0_203, __p1_203, __p2_203) __extension__ ({ \
+  float16x4_t __s0_203 = __p0_203; \
+  float16x8_t __s1_203 = __p1_203; \
+  float16x4_t __ret_203; \
+  __ret_203 = __s0_203 * splat_laneq_f16(__s1_203, __p2_203); \
+  __ret_203; \
 })
 #else
-#define vmul_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmul_laneq_f16(__p0_204, __p1_204, __p2_204) __extension__ ({ \
+  float16x4_t __s0_204 = __p0_204; \
+  float16x8_t __s1_204 = __p1_204; \
+  float16x4_t __rev0_204;  __rev0_204 = __builtin_shufflevector(__s0_204, __s0_204, 3, 2, 1, 0); \
+  float16x8_t __rev1_204;  __rev1_204 = __builtin_shufflevector(__s1_204, __s1_204, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __ret_204; \
+  __ret_204 = __rev0_204 * __noswap_splat_laneq_f16(__rev1_204, __p2_204); \
+  __ret_204 = __builtin_shufflevector(__ret_204, __ret_204, 3, 2, 1, 0); \
+  __ret_204; \
 })
 #endif
 
@@ -40076,44 +43722,44 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = vmulxq_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmulxq_lane_f16(__p0_205, __p1_205, __p2_205) __extension__ ({ \
+  float16x8_t __s0_205 = __p0_205; \
+  float16x4_t __s1_205 = __p1_205; \
+  float16x8_t __ret_205; \
+  __ret_205 = vmulxq_f16(__s0_205, splatq_lane_f16(__s1_205, __p2_205)); \
+  __ret_205; \
 })
 #else
-#define vmulxq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vmulxq_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmulxq_lane_f16(__p0_206, __p1_206, __p2_206) __extension__ ({ \
+  float16x8_t __s0_206 = __p0_206; \
+  float16x4_t __s1_206 = __p1_206; \
+  float16x8_t __rev0_206;  __rev0_206 = __builtin_shufflevector(__s0_206, __s0_206, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev1_206;  __rev1_206 = __builtin_shufflevector(__s1_206, __s1_206, 3, 2, 1, 0); \
+  float16x8_t __ret_206; \
+  __ret_206 = __noswap_vmulxq_f16(__rev0_206, __noswap_splatq_lane_f16(__rev1_206, __p2_206)); \
+  __ret_206 = __builtin_shufflevector(__ret_206, __ret_206, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_206; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulx_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = vmulx_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmulx_lane_f16(__p0_207, __p1_207, __p2_207) __extension__ ({ \
+  float16x4_t __s0_207 = __p0_207; \
+  float16x4_t __s1_207 = __p1_207; \
+  float16x4_t __ret_207; \
+  __ret_207 = vmulx_f16(__s0_207, splat_lane_f16(__s1_207, __p2_207)); \
+  __ret_207; \
 })
 #else
-#define vmulx_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vmulx_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmulx_lane_f16(__p0_208, __p1_208, __p2_208) __extension__ ({ \
+  float16x4_t __s0_208 = __p0_208; \
+  float16x4_t __s1_208 = __p1_208; \
+  float16x4_t __rev0_208;  __rev0_208 = __builtin_shufflevector(__s0_208, __s0_208, 3, 2, 1, 0); \
+  float16x4_t __rev1_208;  __rev1_208 = __builtin_shufflevector(__s1_208, __s1_208, 3, 2, 1, 0); \
+  float16x4_t __ret_208; \
+  __ret_208 = __noswap_vmulx_f16(__rev0_208, __noswap_splat_lane_f16(__rev1_208, __p2_208)); \
+  __ret_208 = __builtin_shufflevector(__ret_208, __ret_208, 3, 2, 1, 0); \
+  __ret_208; \
 })
 #endif
 
@@ -40137,44 +43783,44 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = vmulxq_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmulxq_laneq_f16(__p0_209, __p1_209, __p2_209) __extension__ ({ \
+  float16x8_t __s0_209 = __p0_209; \
+  float16x8_t __s1_209 = __p1_209; \
+  float16x8_t __ret_209; \
+  __ret_209 = vmulxq_f16(__s0_209, splatq_laneq_f16(__s1_209, __p2_209)); \
+  __ret_209; \
 })
 #else
-#define vmulxq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vmulxq_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmulxq_laneq_f16(__p0_210, __p1_210, __p2_210) __extension__ ({ \
+  float16x8_t __s0_210 = __p0_210; \
+  float16x8_t __s1_210 = __p1_210; \
+  float16x8_t __rev0_210;  __rev0_210 = __builtin_shufflevector(__s0_210, __s0_210, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev1_210;  __rev1_210 = __builtin_shufflevector(__s1_210, __s1_210, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __ret_210; \
+  __ret_210 = __noswap_vmulxq_f16(__rev0_210, __noswap_splatq_laneq_f16(__rev1_210, __p2_210)); \
+  __ret_210 = __builtin_shufflevector(__ret_210, __ret_210, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_210; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = vmulx_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmulx_laneq_f16(__p0_211, __p1_211, __p2_211) __extension__ ({ \
+  float16x4_t __s0_211 = __p0_211; \
+  float16x8_t __s1_211 = __p1_211; \
+  float16x4_t __ret_211; \
+  __ret_211 = vmulx_f16(__s0_211, splat_laneq_f16(__s1_211, __p2_211)); \
+  __ret_211; \
 })
 #else
-#define vmulx_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vmulx_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmulx_laneq_f16(__p0_212, __p1_212, __p2_212) __extension__ ({ \
+  float16x4_t __s0_212 = __p0_212; \
+  float16x8_t __s1_212 = __p1_212; \
+  float16x4_t __rev0_212;  __rev0_212 = __builtin_shufflevector(__s0_212, __s0_212, 3, 2, 1, 0); \
+  float16x8_t __rev1_212;  __rev1_212 = __builtin_shufflevector(__s1_212, __s1_212, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __ret_212; \
+  __ret_212 = __noswap_vmulx_f16(__rev0_212, __noswap_splat_laneq_f16(__rev1_212, __p2_212)); \
+  __ret_212 = __builtin_shufflevector(__ret_212, __ret_212, 3, 2, 1, 0); \
+  __ret_212; \
 })
 #endif
 
@@ -40606,6 +44252,160 @@
 #endif
 
 #endif
+#if defined(__ARM_FEATURE_MATMUL_INT8)
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
+  return __ret;
+}
+#else
+__ai uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint32x4_t __ret;
+  __ret = (uint32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#else
+__ai int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#else
+__ai int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+__ai int32x4_t __noswap_vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+  return __ret;
+}
+#else
+__ai int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
+  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
+  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+  return __ret;
+}
+__ai int32x2_t __noswap_vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
+  int32x2_t __ret;
+  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vusdotq_lane_s32(__p0_213, __p1_213, __p2_213, __p3_213) __extension__ ({ \
+  int32x4_t __s0_213 = __p0_213; \
+  uint8x16_t __s1_213 = __p1_213; \
+  int8x8_t __s2_213 = __p2_213; \
+  int32x4_t __ret_213; \
+int8x8_t __reint_213 = __s2_213; \
+  __ret_213 = vusdotq_s32(__s0_213, __s1_213, (int8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_213, __p3_213))); \
+  __ret_213; \
+})
+#else
+#define vusdotq_lane_s32(__p0_214, __p1_214, __p2_214, __p3_214) __extension__ ({ \
+  int32x4_t __s0_214 = __p0_214; \
+  uint8x16_t __s1_214 = __p1_214; \
+  int8x8_t __s2_214 = __p2_214; \
+  int32x4_t __rev0_214;  __rev0_214 = __builtin_shufflevector(__s0_214, __s0_214, 3, 2, 1, 0); \
+  uint8x16_t __rev1_214;  __rev1_214 = __builtin_shufflevector(__s1_214, __s1_214, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_214;  __rev2_214 = __builtin_shufflevector(__s2_214, __s2_214, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_214; \
+int8x8_t __reint_214 = __rev2_214; \
+  __ret_214 = __noswap_vusdotq_s32(__rev0_214, __rev1_214, (int8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_214, __p3_214))); \
+  __ret_214 = __builtin_shufflevector(__ret_214, __ret_214, 3, 2, 1, 0); \
+  __ret_214; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vusdot_lane_s32(__p0_215, __p1_215, __p2_215, __p3_215) __extension__ ({ \
+  int32x2_t __s0_215 = __p0_215; \
+  uint8x8_t __s1_215 = __p1_215; \
+  int8x8_t __s2_215 = __p2_215; \
+  int32x2_t __ret_215; \
+int8x8_t __reint_215 = __s2_215; \
+  __ret_215 = vusdot_s32(__s0_215, __s1_215, (int8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_215, __p3_215))); \
+  __ret_215; \
+})
+#else
+#define vusdot_lane_s32(__p0_216, __p1_216, __p2_216, __p3_216) __extension__ ({ \
+  int32x2_t __s0_216 = __p0_216; \
+  uint8x8_t __s1_216 = __p1_216; \
+  int8x8_t __s2_216 = __p2_216; \
+  int32x2_t __rev0_216;  __rev0_216 = __builtin_shufflevector(__s0_216, __s0_216, 1, 0); \
+  uint8x8_t __rev1_216;  __rev1_216 = __builtin_shufflevector(__s1_216, __s1_216, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_216;  __rev2_216 = __builtin_shufflevector(__s2_216, __s2_216, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x2_t __ret_216; \
+int8x8_t __reint_216 = __rev2_216; \
+  __ret_216 = __noswap_vusdot_s32(__rev0_216, __rev1_216, (int8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_216, __p3_216))); \
+  __ret_216 = __builtin_shufflevector(__ret_216, __ret_216, 1, 0); \
+  __ret_216; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vusmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+  return __ret;
+}
+#else
+__ai int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
+  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int32x4_t __ret;
+  __ret = (int32x4_t) __builtin_neon_vusmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#endif
 #if defined(__ARM_FEATURE_QRDMX)
 #ifdef __LITTLE_ENDIAN__
 __ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
@@ -40680,98 +44480,98 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqaddq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
+#define vqrdmlahq_lane_s32(__p0_217, __p1_217, __p2_217, __p3_217) __extension__ ({ \
+  int32x4_t __s0_217 = __p0_217; \
+  int32x4_t __s1_217 = __p1_217; \
+  int32x2_t __s2_217 = __p2_217; \
+  int32x4_t __ret_217; \
+  __ret_217 = vqaddq_s32(__s0_217, vqrdmulhq_s32(__s1_217, splatq_lane_s32(__s2_217, __p3_217))); \
+  __ret_217; \
 })
 #else
-#define vqrdmlahq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmlahq_lane_s32(__p0_218, __p1_218, __p2_218, __p3_218) __extension__ ({ \
+  int32x4_t __s0_218 = __p0_218; \
+  int32x4_t __s1_218 = __p1_218; \
+  int32x2_t __s2_218 = __p2_218; \
+  int32x4_t __rev0_218;  __rev0_218 = __builtin_shufflevector(__s0_218, __s0_218, 3, 2, 1, 0); \
+  int32x4_t __rev1_218;  __rev1_218 = __builtin_shufflevector(__s1_218, __s1_218, 3, 2, 1, 0); \
+  int32x2_t __rev2_218;  __rev2_218 = __builtin_shufflevector(__s2_218, __s2_218, 1, 0); \
+  int32x4_t __ret_218; \
+  __ret_218 = __noswap_vqaddq_s32(__rev0_218, __noswap_vqrdmulhq_s32(__rev1_218, __noswap_splatq_lane_s32(__rev2_218, __p3_218))); \
+  __ret_218 = __builtin_shufflevector(__ret_218, __ret_218, 3, 2, 1, 0); \
+  __ret_218; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = vqaddq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret; \
+#define vqrdmlahq_lane_s16(__p0_219, __p1_219, __p2_219, __p3_219) __extension__ ({ \
+  int16x8_t __s0_219 = __p0_219; \
+  int16x8_t __s1_219 = __p1_219; \
+  int16x4_t __s2_219 = __p2_219; \
+  int16x8_t __ret_219; \
+  __ret_219 = vqaddq_s16(__s0_219, vqrdmulhq_s16(__s1_219, splatq_lane_s16(__s2_219, __p3_219))); \
+  __ret_219; \
 })
 #else
-#define vqrdmlahq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmlahq_lane_s16(__p0_220, __p1_220, __p2_220, __p3_220) __extension__ ({ \
+  int16x8_t __s0_220 = __p0_220; \
+  int16x8_t __s1_220 = __p1_220; \
+  int16x4_t __s2_220 = __p2_220; \
+  int16x8_t __rev0_220;  __rev0_220 = __builtin_shufflevector(__s0_220, __s0_220, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_220;  __rev1_220 = __builtin_shufflevector(__s1_220, __s1_220, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_220;  __rev2_220 = __builtin_shufflevector(__s2_220, __s2_220, 3, 2, 1, 0); \
+  int16x8_t __ret_220; \
+  __ret_220 = __noswap_vqaddq_s16(__rev0_220, __noswap_vqrdmulhq_s16(__rev1_220, __noswap_splatq_lane_s16(__rev2_220, __p3_220))); \
+  __ret_220 = __builtin_shufflevector(__ret_220, __ret_220, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_220; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = vqadd_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
-  __ret; \
+#define vqrdmlah_lane_s32(__p0_221, __p1_221, __p2_221, __p3_221) __extension__ ({ \
+  int32x2_t __s0_221 = __p0_221; \
+  int32x2_t __s1_221 = __p1_221; \
+  int32x2_t __s2_221 = __p2_221; \
+  int32x2_t __ret_221; \
+  __ret_221 = vqadd_s32(__s0_221, vqrdmulh_s32(__s1_221, splat_lane_s32(__s2_221, __p3_221))); \
+  __ret_221; \
 })
 #else
-#define vqrdmlah_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqrdmlah_lane_s32(__p0_222, __p1_222, __p2_222, __p3_222) __extension__ ({ \
+  int32x2_t __s0_222 = __p0_222; \
+  int32x2_t __s1_222 = __p1_222; \
+  int32x2_t __s2_222 = __p2_222; \
+  int32x2_t __rev0_222;  __rev0_222 = __builtin_shufflevector(__s0_222, __s0_222, 1, 0); \
+  int32x2_t __rev1_222;  __rev1_222 = __builtin_shufflevector(__s1_222, __s1_222, 1, 0); \
+  int32x2_t __rev2_222;  __rev2_222 = __builtin_shufflevector(__s2_222, __s2_222, 1, 0); \
+  int32x2_t __ret_222; \
+  __ret_222 = __noswap_vqadd_s32(__rev0_222, __noswap_vqrdmulh_s32(__rev1_222, __noswap_splat_lane_s32(__rev2_222, __p3_222))); \
+  __ret_222 = __builtin_shufflevector(__ret_222, __ret_222, 1, 0); \
+  __ret_222; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = vqadd_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
+#define vqrdmlah_lane_s16(__p0_223, __p1_223, __p2_223, __p3_223) __extension__ ({ \
+  int16x4_t __s0_223 = __p0_223; \
+  int16x4_t __s1_223 = __p1_223; \
+  int16x4_t __s2_223 = __p2_223; \
+  int16x4_t __ret_223; \
+  __ret_223 = vqadd_s16(__s0_223, vqrdmulh_s16(__s1_223, splat_lane_s16(__s2_223, __p3_223))); \
+  __ret_223; \
 })
 #else
-#define vqrdmlah_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmlah_lane_s16(__p0_224, __p1_224, __p2_224, __p3_224) __extension__ ({ \
+  int16x4_t __s0_224 = __p0_224; \
+  int16x4_t __s1_224 = __p1_224; \
+  int16x4_t __s2_224 = __p2_224; \
+  int16x4_t __rev0_224;  __rev0_224 = __builtin_shufflevector(__s0_224, __s0_224, 3, 2, 1, 0); \
+  int16x4_t __rev1_224;  __rev1_224 = __builtin_shufflevector(__s1_224, __s1_224, 3, 2, 1, 0); \
+  int16x4_t __rev2_224;  __rev2_224 = __builtin_shufflevector(__s2_224, __s2_224, 3, 2, 1, 0); \
+  int16x4_t __ret_224; \
+  __ret_224 = __noswap_vqadd_s16(__rev0_224, __noswap_vqrdmulh_s16(__rev1_224, __noswap_splat_lane_s16(__rev2_224, __p3_224))); \
+  __ret_224 = __builtin_shufflevector(__ret_224, __ret_224, 3, 2, 1, 0); \
+  __ret_224; \
 })
 #endif
 
@@ -40848,292 +44648,292 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqsubq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
+#define vqrdmlshq_lane_s32(__p0_225, __p1_225, __p2_225, __p3_225) __extension__ ({ \
+  int32x4_t __s0_225 = __p0_225; \
+  int32x4_t __s1_225 = __p1_225; \
+  int32x2_t __s2_225 = __p2_225; \
+  int32x4_t __ret_225; \
+  __ret_225 = vqsubq_s32(__s0_225, vqrdmulhq_s32(__s1_225, splatq_lane_s32(__s2_225, __p3_225))); \
+  __ret_225; \
 })
 #else
-#define vqrdmlshq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmlshq_lane_s32(__p0_226, __p1_226, __p2_226, __p3_226) __extension__ ({ \
+  int32x4_t __s0_226 = __p0_226; \
+  int32x4_t __s1_226 = __p1_226; \
+  int32x2_t __s2_226 = __p2_226; \
+  int32x4_t __rev0_226;  __rev0_226 = __builtin_shufflevector(__s0_226, __s0_226, 3, 2, 1, 0); \
+  int32x4_t __rev1_226;  __rev1_226 = __builtin_shufflevector(__s1_226, __s1_226, 3, 2, 1, 0); \
+  int32x2_t __rev2_226;  __rev2_226 = __builtin_shufflevector(__s2_226, __s2_226, 1, 0); \
+  int32x4_t __ret_226; \
+  __ret_226 = __noswap_vqsubq_s32(__rev0_226, __noswap_vqrdmulhq_s32(__rev1_226, __noswap_splatq_lane_s32(__rev2_226, __p3_226))); \
+  __ret_226 = __builtin_shufflevector(__ret_226, __ret_226, 3, 2, 1, 0); \
+  __ret_226; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = vqsubq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret; \
+#define vqrdmlshq_lane_s16(__p0_227, __p1_227, __p2_227, __p3_227) __extension__ ({ \
+  int16x8_t __s0_227 = __p0_227; \
+  int16x8_t __s1_227 = __p1_227; \
+  int16x4_t __s2_227 = __p2_227; \
+  int16x8_t __ret_227; \
+  __ret_227 = vqsubq_s16(__s0_227, vqrdmulhq_s16(__s1_227, splatq_lane_s16(__s2_227, __p3_227))); \
+  __ret_227; \
 })
 #else
-#define vqrdmlshq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmlshq_lane_s16(__p0_228, __p1_228, __p2_228, __p3_228) __extension__ ({ \
+  int16x8_t __s0_228 = __p0_228; \
+  int16x8_t __s1_228 = __p1_228; \
+  int16x4_t __s2_228 = __p2_228; \
+  int16x8_t __rev0_228;  __rev0_228 = __builtin_shufflevector(__s0_228, __s0_228, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_228;  __rev1_228 = __builtin_shufflevector(__s1_228, __s1_228, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_228;  __rev2_228 = __builtin_shufflevector(__s2_228, __s2_228, 3, 2, 1, 0); \
+  int16x8_t __ret_228; \
+  __ret_228 = __noswap_vqsubq_s16(__rev0_228, __noswap_vqrdmulhq_s16(__rev1_228, __noswap_splatq_lane_s16(__rev2_228, __p3_228))); \
+  __ret_228 = __builtin_shufflevector(__ret_228, __ret_228, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_228; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = vqsub_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
-  __ret; \
+#define vqrdmlsh_lane_s32(__p0_229, __p1_229, __p2_229, __p3_229) __extension__ ({ \
+  int32x2_t __s0_229 = __p0_229; \
+  int32x2_t __s1_229 = __p1_229; \
+  int32x2_t __s2_229 = __p2_229; \
+  int32x2_t __ret_229; \
+  __ret_229 = vqsub_s32(__s0_229, vqrdmulh_s32(__s1_229, splat_lane_s32(__s2_229, __p3_229))); \
+  __ret_229; \
 })
 #else
-#define vqrdmlsh_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqrdmlsh_lane_s32(__p0_230, __p1_230, __p2_230, __p3_230) __extension__ ({ \
+  int32x2_t __s0_230 = __p0_230; \
+  int32x2_t __s1_230 = __p1_230; \
+  int32x2_t __s2_230 = __p2_230; \
+  int32x2_t __rev0_230;  __rev0_230 = __builtin_shufflevector(__s0_230, __s0_230, 1, 0); \
+  int32x2_t __rev1_230;  __rev1_230 = __builtin_shufflevector(__s1_230, __s1_230, 1, 0); \
+  int32x2_t __rev2_230;  __rev2_230 = __builtin_shufflevector(__s2_230, __s2_230, 1, 0); \
+  int32x2_t __ret_230; \
+  __ret_230 = __noswap_vqsub_s32(__rev0_230, __noswap_vqrdmulh_s32(__rev1_230, __noswap_splat_lane_s32(__rev2_230, __p3_230))); \
+  __ret_230 = __builtin_shufflevector(__ret_230, __ret_230, 1, 0); \
+  __ret_230; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = vqsub_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
+#define vqrdmlsh_lane_s16(__p0_231, __p1_231, __p2_231, __p3_231) __extension__ ({ \
+  int16x4_t __s0_231 = __p0_231; \
+  int16x4_t __s1_231 = __p1_231; \
+  int16x4_t __s2_231 = __p2_231; \
+  int16x4_t __ret_231; \
+  __ret_231 = vqsub_s16(__s0_231, vqrdmulh_s16(__s1_231, splat_lane_s16(__s2_231, __p3_231))); \
+  __ret_231; \
 })
 #else
-#define vqrdmlsh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmlsh_lane_s16(__p0_232, __p1_232, __p2_232, __p3_232) __extension__ ({ \
+  int16x4_t __s0_232 = __p0_232; \
+  int16x4_t __s1_232 = __p1_232; \
+  int16x4_t __s2_232 = __p2_232; \
+  int16x4_t __rev0_232;  __rev0_232 = __builtin_shufflevector(__s0_232, __s0_232, 3, 2, 1, 0); \
+  int16x4_t __rev1_232;  __rev1_232 = __builtin_shufflevector(__s1_232, __s1_232, 3, 2, 1, 0); \
+  int16x4_t __rev2_232;  __rev2_232 = __builtin_shufflevector(__s2_232, __s2_232, 3, 2, 1, 0); \
+  int16x4_t __ret_232; \
+  __ret_232 = __noswap_vqsub_s16(__rev0_232, __noswap_vqrdmulh_s16(__rev1_232, __noswap_splat_lane_s16(__rev2_232, __p3_232))); \
+  __ret_232 = __builtin_shufflevector(__ret_232, __ret_232, 3, 2, 1, 0); \
+  __ret_232; \
 })
 #endif
 
 #endif
 #if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqaddq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
+#define vqrdmlahq_laneq_s32(__p0_233, __p1_233, __p2_233, __p3_233) __extension__ ({ \
+  int32x4_t __s0_233 = __p0_233; \
+  int32x4_t __s1_233 = __p1_233; \
+  int32x4_t __s2_233 = __p2_233; \
+  int32x4_t __ret_233; \
+  __ret_233 = vqaddq_s32(__s0_233, vqrdmulhq_s32(__s1_233, splatq_laneq_s32(__s2_233, __p3_233))); \
+  __ret_233; \
 })
 #else
-#define vqrdmlahq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmlahq_laneq_s32(__p0_234, __p1_234, __p2_234, __p3_234) __extension__ ({ \
+  int32x4_t __s0_234 = __p0_234; \
+  int32x4_t __s1_234 = __p1_234; \
+  int32x4_t __s2_234 = __p2_234; \
+  int32x4_t __rev0_234;  __rev0_234 = __builtin_shufflevector(__s0_234, __s0_234, 3, 2, 1, 0); \
+  int32x4_t __rev1_234;  __rev1_234 = __builtin_shufflevector(__s1_234, __s1_234, 3, 2, 1, 0); \
+  int32x4_t __rev2_234;  __rev2_234 = __builtin_shufflevector(__s2_234, __s2_234, 3, 2, 1, 0); \
+  int32x4_t __ret_234; \
+  __ret_234 = __noswap_vqaddq_s32(__rev0_234, __noswap_vqrdmulhq_s32(__rev1_234, __noswap_splatq_laneq_s32(__rev2_234, __p3_234))); \
+  __ret_234 = __builtin_shufflevector(__ret_234, __ret_234, 3, 2, 1, 0); \
+  __ret_234; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = vqaddq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret; \
+#define vqrdmlahq_laneq_s16(__p0_235, __p1_235, __p2_235, __p3_235) __extension__ ({ \
+  int16x8_t __s0_235 = __p0_235; \
+  int16x8_t __s1_235 = __p1_235; \
+  int16x8_t __s2_235 = __p2_235; \
+  int16x8_t __ret_235; \
+  __ret_235 = vqaddq_s16(__s0_235, vqrdmulhq_s16(__s1_235, splatq_laneq_s16(__s2_235, __p3_235))); \
+  __ret_235; \
 })
 #else
-#define vqrdmlahq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmlahq_laneq_s16(__p0_236, __p1_236, __p2_236, __p3_236) __extension__ ({ \
+  int16x8_t __s0_236 = __p0_236; \
+  int16x8_t __s1_236 = __p1_236; \
+  int16x8_t __s2_236 = __p2_236; \
+  int16x8_t __rev0_236;  __rev0_236 = __builtin_shufflevector(__s0_236, __s0_236, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_236;  __rev1_236 = __builtin_shufflevector(__s1_236, __s1_236, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_236;  __rev2_236 = __builtin_shufflevector(__s2_236, __s2_236, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_236; \
+  __ret_236 = __noswap_vqaddq_s16(__rev0_236, __noswap_vqrdmulhq_s16(__rev1_236, __noswap_splatq_laneq_s16(__rev2_236, __p3_236))); \
+  __ret_236 = __builtin_shufflevector(__ret_236, __ret_236, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_236; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = vqadd_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
-  __ret; \
+#define vqrdmlah_laneq_s32(__p0_237, __p1_237, __p2_237, __p3_237) __extension__ ({ \
+  int32x2_t __s0_237 = __p0_237; \
+  int32x2_t __s1_237 = __p1_237; \
+  int32x4_t __s2_237 = __p2_237; \
+  int32x2_t __ret_237; \
+  __ret_237 = vqadd_s32(__s0_237, vqrdmulh_s32(__s1_237, splat_laneq_s32(__s2_237, __p3_237))); \
+  __ret_237; \
 })
 #else
-#define vqrdmlah_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqrdmlah_laneq_s32(__p0_238, __p1_238, __p2_238, __p3_238) __extension__ ({ \
+  int32x2_t __s0_238 = __p0_238; \
+  int32x2_t __s1_238 = __p1_238; \
+  int32x4_t __s2_238 = __p2_238; \
+  int32x2_t __rev0_238;  __rev0_238 = __builtin_shufflevector(__s0_238, __s0_238, 1, 0); \
+  int32x2_t __rev1_238;  __rev1_238 = __builtin_shufflevector(__s1_238, __s1_238, 1, 0); \
+  int32x4_t __rev2_238;  __rev2_238 = __builtin_shufflevector(__s2_238, __s2_238, 3, 2, 1, 0); \
+  int32x2_t __ret_238; \
+  __ret_238 = __noswap_vqadd_s32(__rev0_238, __noswap_vqrdmulh_s32(__rev1_238, __noswap_splat_laneq_s32(__rev2_238, __p3_238))); \
+  __ret_238 = __builtin_shufflevector(__ret_238, __ret_238, 1, 0); \
+  __ret_238; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = vqadd_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
+#define vqrdmlah_laneq_s16(__p0_239, __p1_239, __p2_239, __p3_239) __extension__ ({ \
+  int16x4_t __s0_239 = __p0_239; \
+  int16x4_t __s1_239 = __p1_239; \
+  int16x8_t __s2_239 = __p2_239; \
+  int16x4_t __ret_239; \
+  __ret_239 = vqadd_s16(__s0_239, vqrdmulh_s16(__s1_239, splat_laneq_s16(__s2_239, __p3_239))); \
+  __ret_239; \
 })
 #else
-#define vqrdmlah_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmlah_laneq_s16(__p0_240, __p1_240, __p2_240, __p3_240) __extension__ ({ \
+  int16x4_t __s0_240 = __p0_240; \
+  int16x4_t __s1_240 = __p1_240; \
+  int16x8_t __s2_240 = __p2_240; \
+  int16x4_t __rev0_240;  __rev0_240 = __builtin_shufflevector(__s0_240, __s0_240, 3, 2, 1, 0); \
+  int16x4_t __rev1_240;  __rev1_240 = __builtin_shufflevector(__s1_240, __s1_240, 3, 2, 1, 0); \
+  int16x8_t __rev2_240;  __rev2_240 = __builtin_shufflevector(__s2_240, __s2_240, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __ret_240; \
+  __ret_240 = __noswap_vqadd_s16(__rev0_240, __noswap_vqrdmulh_s16(__rev1_240, __noswap_splat_laneq_s16(__rev2_240, __p3_240))); \
+  __ret_240 = __builtin_shufflevector(__ret_240, __ret_240, 3, 2, 1, 0); \
+  __ret_240; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqsubq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
+#define vqrdmlshq_laneq_s32(__p0_241, __p1_241, __p2_241, __p3_241) __extension__ ({ \
+  int32x4_t __s0_241 = __p0_241; \
+  int32x4_t __s1_241 = __p1_241; \
+  int32x4_t __s2_241 = __p2_241; \
+  int32x4_t __ret_241; \
+  __ret_241 = vqsubq_s32(__s0_241, vqrdmulhq_s32(__s1_241, splatq_laneq_s32(__s2_241, __p3_241))); \
+  __ret_241; \
 })
 #else
-#define vqrdmlshq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmlshq_laneq_s32(__p0_242, __p1_242, __p2_242, __p3_242) __extension__ ({ \
+  int32x4_t __s0_242 = __p0_242; \
+  int32x4_t __s1_242 = __p1_242; \
+  int32x4_t __s2_242 = __p2_242; \
+  int32x4_t __rev0_242;  __rev0_242 = __builtin_shufflevector(__s0_242, __s0_242, 3, 2, 1, 0); \
+  int32x4_t __rev1_242;  __rev1_242 = __builtin_shufflevector(__s1_242, __s1_242, 3, 2, 1, 0); \
+  int32x4_t __rev2_242;  __rev2_242 = __builtin_shufflevector(__s2_242, __s2_242, 3, 2, 1, 0); \
+  int32x4_t __ret_242; \
+  __ret_242 = __noswap_vqsubq_s32(__rev0_242, __noswap_vqrdmulhq_s32(__rev1_242, __noswap_splatq_laneq_s32(__rev2_242, __p3_242))); \
+  __ret_242 = __builtin_shufflevector(__ret_242, __ret_242, 3, 2, 1, 0); \
+  __ret_242; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = vqsubq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret; \
+#define vqrdmlshq_laneq_s16(__p0_243, __p1_243, __p2_243, __p3_243) __extension__ ({ \
+  int16x8_t __s0_243 = __p0_243; \
+  int16x8_t __s1_243 = __p1_243; \
+  int16x8_t __s2_243 = __p2_243; \
+  int16x8_t __ret_243; \
+  __ret_243 = vqsubq_s16(__s0_243, vqrdmulhq_s16(__s1_243, splatq_laneq_s16(__s2_243, __p3_243))); \
+  __ret_243; \
 })
 #else
-#define vqrdmlshq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmlshq_laneq_s16(__p0_244, __p1_244, __p2_244, __p3_244) __extension__ ({ \
+  int16x8_t __s0_244 = __p0_244; \
+  int16x8_t __s1_244 = __p1_244; \
+  int16x8_t __s2_244 = __p2_244; \
+  int16x8_t __rev0_244;  __rev0_244 = __builtin_shufflevector(__s0_244, __s0_244, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_244;  __rev1_244 = __builtin_shufflevector(__s1_244, __s1_244, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_244;  __rev2_244 = __builtin_shufflevector(__s2_244, __s2_244, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_244; \
+  __ret_244 = __noswap_vqsubq_s16(__rev0_244, __noswap_vqrdmulhq_s16(__rev1_244, __noswap_splatq_laneq_s16(__rev2_244, __p3_244))); \
+  __ret_244 = __builtin_shufflevector(__ret_244, __ret_244, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_244; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = vqsub_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
-  __ret; \
+#define vqrdmlsh_laneq_s32(__p0_245, __p1_245, __p2_245, __p3_245) __extension__ ({ \
+  int32x2_t __s0_245 = __p0_245; \
+  int32x2_t __s1_245 = __p1_245; \
+  int32x4_t __s2_245 = __p2_245; \
+  int32x2_t __ret_245; \
+  __ret_245 = vqsub_s32(__s0_245, vqrdmulh_s32(__s1_245, splat_laneq_s32(__s2_245, __p3_245))); \
+  __ret_245; \
 })
 #else
-#define vqrdmlsh_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqrdmlsh_laneq_s32(__p0_246, __p1_246, __p2_246, __p3_246) __extension__ ({ \
+  int32x2_t __s0_246 = __p0_246; \
+  int32x2_t __s1_246 = __p1_246; \
+  int32x4_t __s2_246 = __p2_246; \
+  int32x2_t __rev0_246;  __rev0_246 = __builtin_shufflevector(__s0_246, __s0_246, 1, 0); \
+  int32x2_t __rev1_246;  __rev1_246 = __builtin_shufflevector(__s1_246, __s1_246, 1, 0); \
+  int32x4_t __rev2_246;  __rev2_246 = __builtin_shufflevector(__s2_246, __s2_246, 3, 2, 1, 0); \
+  int32x2_t __ret_246; \
+  __ret_246 = __noswap_vqsub_s32(__rev0_246, __noswap_vqrdmulh_s32(__rev1_246, __noswap_splat_laneq_s32(__rev2_246, __p3_246))); \
+  __ret_246 = __builtin_shufflevector(__ret_246, __ret_246, 1, 0); \
+  __ret_246; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = vqsub_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
+#define vqrdmlsh_laneq_s16(__p0_247, __p1_247, __p2_247, __p3_247) __extension__ ({ \
+  int16x4_t __s0_247 = __p0_247; \
+  int16x4_t __s1_247 = __p1_247; \
+  int16x8_t __s2_247 = __p2_247; \
+  int16x4_t __ret_247; \
+  __ret_247 = vqsub_s16(__s0_247, vqrdmulh_s16(__s1_247, splat_laneq_s16(__s2_247, __p3_247))); \
+  __ret_247; \
 })
 #else
-#define vqrdmlsh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqrdmlsh_laneq_s16(__p0_248, __p1_248, __p2_248, __p3_248) __extension__ ({ \
+  int16x4_t __s0_248 = __p0_248; \
+  int16x4_t __s1_248 = __p1_248; \
+  int16x8_t __s2_248 = __p2_248; \
+  int16x4_t __rev0_248;  __rev0_248 = __builtin_shufflevector(__s0_248, __s0_248, 3, 2, 1, 0); \
+  int16x4_t __rev1_248;  __rev1_248 = __builtin_shufflevector(__s1_248, __s1_248, 3, 2, 1, 0); \
+  int16x8_t __rev2_248;  __rev2_248 = __builtin_shufflevector(__s2_248, __s2_248, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __ret_248; \
+  __ret_248 = __noswap_vqsub_s16(__rev0_248, __noswap_vqrdmulh_s16(__rev1_248, __noswap_splat_laneq_s16(__rev2_248, __p3_248))); \
+  __ret_248 = __builtin_shufflevector(__ret_248, __ret_248, 3, 2, 1, 0); \
+  __ret_248; \
 })
 #endif
 
@@ -43582,892 +47382,892 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p8(__p0_12, __p1_12, __p2_12, __p3_12) __extension__ ({ \
-  poly8x16_t __s0_12 = __p0_12; \
-  poly8x8_t __s2_12 = __p2_12; \
-  poly8x16_t __ret_12; \
-  __ret_12 = vsetq_lane_p8(vget_lane_p8(__s2_12, __p3_12), __s0_12, __p1_12); \
-  __ret_12; \
+#define vcopyq_lane_p8(__p0_249, __p1_249, __p2_249, __p3_249) __extension__ ({ \
+  poly8x16_t __s0_249 = __p0_249; \
+  poly8x8_t __s2_249 = __p2_249; \
+  poly8x16_t __ret_249; \
+  __ret_249 = vsetq_lane_p8(vget_lane_p8(__s2_249, __p3_249), __s0_249, __p1_249); \
+  __ret_249; \
 })
 #else
-#define vcopyq_lane_p8(__p0_13, __p1_13, __p2_13, __p3_13) __extension__ ({ \
-  poly8x16_t __s0_13 = __p0_13; \
-  poly8x8_t __s2_13 = __p2_13; \
-  poly8x16_t __rev0_13;  __rev0_13 = __builtin_shufflevector(__s0_13, __s0_13, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev2_13;  __rev2_13 = __builtin_shufflevector(__s2_13, __s2_13, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_13; \
-  __ret_13 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_13, __p3_13), __rev0_13, __p1_13); \
-  __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_13; \
+#define vcopyq_lane_p8(__p0_250, __p1_250, __p2_250, __p3_250) __extension__ ({ \
+  poly8x16_t __s0_250 = __p0_250; \
+  poly8x8_t __s2_250 = __p2_250; \
+  poly8x16_t __rev0_250;  __rev0_250 = __builtin_shufflevector(__s0_250, __s0_250, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __rev2_250;  __rev2_250 = __builtin_shufflevector(__s2_250, __s2_250, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __ret_250; \
+  __ret_250 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_250, __p3_250), __rev0_250, __p1_250); \
+  __ret_250 = __builtin_shufflevector(__ret_250, __ret_250, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_250; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p16(__p0_14, __p1_14, __p2_14, __p3_14) __extension__ ({ \
-  poly16x8_t __s0_14 = __p0_14; \
-  poly16x4_t __s2_14 = __p2_14; \
-  poly16x8_t __ret_14; \
-  __ret_14 = vsetq_lane_p16(vget_lane_p16(__s2_14, __p3_14), __s0_14, __p1_14); \
-  __ret_14; \
+#define vcopyq_lane_p16(__p0_251, __p1_251, __p2_251, __p3_251) __extension__ ({ \
+  poly16x8_t __s0_251 = __p0_251; \
+  poly16x4_t __s2_251 = __p2_251; \
+  poly16x8_t __ret_251; \
+  __ret_251 = vsetq_lane_p16(vget_lane_p16(__s2_251, __p3_251), __s0_251, __p1_251); \
+  __ret_251; \
 })
 #else
-#define vcopyq_lane_p16(__p0_15, __p1_15, __p2_15, __p3_15) __extension__ ({ \
-  poly16x8_t __s0_15 = __p0_15; \
-  poly16x4_t __s2_15 = __p2_15; \
-  poly16x8_t __rev0_15;  __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __rev2_15;  __rev2_15 = __builtin_shufflevector(__s2_15, __s2_15, 3, 2, 1, 0); \
-  poly16x8_t __ret_15; \
-  __ret_15 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_15, __p3_15), __rev0_15, __p1_15); \
-  __ret_15 = __builtin_shufflevector(__ret_15, __ret_15, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_15; \
+#define vcopyq_lane_p16(__p0_252, __p1_252, __p2_252, __p3_252) __extension__ ({ \
+  poly16x8_t __s0_252 = __p0_252; \
+  poly16x4_t __s2_252 = __p2_252; \
+  poly16x8_t __rev0_252;  __rev0_252 = __builtin_shufflevector(__s0_252, __s0_252, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x4_t __rev2_252;  __rev2_252 = __builtin_shufflevector(__s2_252, __s2_252, 3, 2, 1, 0); \
+  poly16x8_t __ret_252; \
+  __ret_252 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_252, __p3_252), __rev0_252, __p1_252); \
+  __ret_252 = __builtin_shufflevector(__ret_252, __ret_252, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_252; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u8(__p0_16, __p1_16, __p2_16, __p3_16) __extension__ ({ \
-  uint8x16_t __s0_16 = __p0_16; \
-  uint8x8_t __s2_16 = __p2_16; \
-  uint8x16_t __ret_16; \
-  __ret_16 = vsetq_lane_u8(vget_lane_u8(__s2_16, __p3_16), __s0_16, __p1_16); \
-  __ret_16; \
+#define vcopyq_lane_u8(__p0_253, __p1_253, __p2_253, __p3_253) __extension__ ({ \
+  uint8x16_t __s0_253 = __p0_253; \
+  uint8x8_t __s2_253 = __p2_253; \
+  uint8x16_t __ret_253; \
+  __ret_253 = vsetq_lane_u8(vget_lane_u8(__s2_253, __p3_253), __s0_253, __p1_253); \
+  __ret_253; \
 })
 #else
-#define vcopyq_lane_u8(__p0_17, __p1_17, __p2_17, __p3_17) __extension__ ({ \
-  uint8x16_t __s0_17 = __p0_17; \
-  uint8x8_t __s2_17 = __p2_17; \
-  uint8x16_t __rev0_17;  __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_17;  __rev2_17 = __builtin_shufflevector(__s2_17, __s2_17, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_17; \
-  __ret_17 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_17, __p3_17), __rev0_17, __p1_17); \
-  __ret_17 = __builtin_shufflevector(__ret_17, __ret_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_17; \
+#define vcopyq_lane_u8(__p0_254, __p1_254, __p2_254, __p3_254) __extension__ ({ \
+  uint8x16_t __s0_254 = __p0_254; \
+  uint8x8_t __s2_254 = __p2_254; \
+  uint8x16_t __rev0_254;  __rev0_254 = __builtin_shufflevector(__s0_254, __s0_254, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_254;  __rev2_254 = __builtin_shufflevector(__s2_254, __s2_254, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_254; \
+  __ret_254 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_254, __p3_254), __rev0_254, __p1_254); \
+  __ret_254 = __builtin_shufflevector(__ret_254, __ret_254, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_254; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u32(__p0_18, __p1_18, __p2_18, __p3_18) __extension__ ({ \
-  uint32x4_t __s0_18 = __p0_18; \
-  uint32x2_t __s2_18 = __p2_18; \
-  uint32x4_t __ret_18; \
-  __ret_18 = vsetq_lane_u32(vget_lane_u32(__s2_18, __p3_18), __s0_18, __p1_18); \
-  __ret_18; \
+#define vcopyq_lane_u32(__p0_255, __p1_255, __p2_255, __p3_255) __extension__ ({ \
+  uint32x4_t __s0_255 = __p0_255; \
+  uint32x2_t __s2_255 = __p2_255; \
+  uint32x4_t __ret_255; \
+  __ret_255 = vsetq_lane_u32(vget_lane_u32(__s2_255, __p3_255), __s0_255, __p1_255); \
+  __ret_255; \
 })
 #else
-#define vcopyq_lane_u32(__p0_19, __p1_19, __p2_19, __p3_19) __extension__ ({ \
-  uint32x4_t __s0_19 = __p0_19; \
-  uint32x2_t __s2_19 = __p2_19; \
-  uint32x4_t __rev0_19;  __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 3, 2, 1, 0); \
-  uint32x2_t __rev2_19;  __rev2_19 = __builtin_shufflevector(__s2_19, __s2_19, 1, 0); \
-  uint32x4_t __ret_19; \
-  __ret_19 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_19, __p3_19), __rev0_19, __p1_19); \
-  __ret_19 = __builtin_shufflevector(__ret_19, __ret_19, 3, 2, 1, 0); \
-  __ret_19; \
+#define vcopyq_lane_u32(__p0_256, __p1_256, __p2_256, __p3_256) __extension__ ({ \
+  uint32x4_t __s0_256 = __p0_256; \
+  uint32x2_t __s2_256 = __p2_256; \
+  uint32x4_t __rev0_256;  __rev0_256 = __builtin_shufflevector(__s0_256, __s0_256, 3, 2, 1, 0); \
+  uint32x2_t __rev2_256;  __rev2_256 = __builtin_shufflevector(__s2_256, __s2_256, 1, 0); \
+  uint32x4_t __ret_256; \
+  __ret_256 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_256, __p3_256), __rev0_256, __p1_256); \
+  __ret_256 = __builtin_shufflevector(__ret_256, __ret_256, 3, 2, 1, 0); \
+  __ret_256; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u64(__p0_20, __p1_20, __p2_20, __p3_20) __extension__ ({ \
-  uint64x2_t __s0_20 = __p0_20; \
-  uint64x1_t __s2_20 = __p2_20; \
-  uint64x2_t __ret_20; \
-  __ret_20 = vsetq_lane_u64(vget_lane_u64(__s2_20, __p3_20), __s0_20, __p1_20); \
-  __ret_20; \
+#define vcopyq_lane_u64(__p0_257, __p1_257, __p2_257, __p3_257) __extension__ ({ \
+  uint64x2_t __s0_257 = __p0_257; \
+  uint64x1_t __s2_257 = __p2_257; \
+  uint64x2_t __ret_257; \
+  __ret_257 = vsetq_lane_u64(vget_lane_u64(__s2_257, __p3_257), __s0_257, __p1_257); \
+  __ret_257; \
 })
 #else
-#define vcopyq_lane_u64(__p0_21, __p1_21, __p2_21, __p3_21) __extension__ ({ \
-  uint64x2_t __s0_21 = __p0_21; \
-  uint64x1_t __s2_21 = __p2_21; \
-  uint64x2_t __rev0_21;  __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 1, 0); \
-  uint64x2_t __ret_21; \
-  __ret_21 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_21, __p3_21), __rev0_21, __p1_21); \
-  __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 1, 0); \
-  __ret_21; \
+#define vcopyq_lane_u64(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \
+  uint64x2_t __s0_258 = __p0_258; \
+  uint64x1_t __s2_258 = __p2_258; \
+  uint64x2_t __rev0_258;  __rev0_258 = __builtin_shufflevector(__s0_258, __s0_258, 1, 0); \
+  uint64x2_t __ret_258; \
+  __ret_258 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_258, __p3_258), __rev0_258, __p1_258); \
+  __ret_258 = __builtin_shufflevector(__ret_258, __ret_258, 1, 0); \
+  __ret_258; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u16(__p0_22, __p1_22, __p2_22, __p3_22) __extension__ ({ \
-  uint16x8_t __s0_22 = __p0_22; \
-  uint16x4_t __s2_22 = __p2_22; \
-  uint16x8_t __ret_22; \
-  __ret_22 = vsetq_lane_u16(vget_lane_u16(__s2_22, __p3_22), __s0_22, __p1_22); \
-  __ret_22; \
+#define vcopyq_lane_u16(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \
+  uint16x8_t __s0_259 = __p0_259; \
+  uint16x4_t __s2_259 = __p2_259; \
+  uint16x8_t __ret_259; \
+  __ret_259 = vsetq_lane_u16(vget_lane_u16(__s2_259, __p3_259), __s0_259, __p1_259); \
+  __ret_259; \
 })
 #else
-#define vcopyq_lane_u16(__p0_23, __p1_23, __p2_23, __p3_23) __extension__ ({ \
-  uint16x8_t __s0_23 = __p0_23; \
-  uint16x4_t __s2_23 = __p2_23; \
-  uint16x8_t __rev0_23;  __rev0_23 = __builtin_shufflevector(__s0_23, __s0_23, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_23;  __rev2_23 = __builtin_shufflevector(__s2_23, __s2_23, 3, 2, 1, 0); \
-  uint16x8_t __ret_23; \
-  __ret_23 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_23, __p3_23), __rev0_23, __p1_23); \
-  __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_23; \
+#define vcopyq_lane_u16(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \
+  uint16x8_t __s0_260 = __p0_260; \
+  uint16x4_t __s2_260 = __p2_260; \
+  uint16x8_t __rev0_260;  __rev0_260 = __builtin_shufflevector(__s0_260, __s0_260, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_260;  __rev2_260 = __builtin_shufflevector(__s2_260, __s2_260, 3, 2, 1, 0); \
+  uint16x8_t __ret_260; \
+  __ret_260 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_260, __p3_260), __rev0_260, __p1_260); \
+  __ret_260 = __builtin_shufflevector(__ret_260, __ret_260, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_260; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s8(__p0_24, __p1_24, __p2_24, __p3_24) __extension__ ({ \
-  int8x16_t __s0_24 = __p0_24; \
-  int8x8_t __s2_24 = __p2_24; \
-  int8x16_t __ret_24; \
-  __ret_24 = vsetq_lane_s8(vget_lane_s8(__s2_24, __p3_24), __s0_24, __p1_24); \
-  __ret_24; \
+#define vcopyq_lane_s8(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \
+  int8x16_t __s0_261 = __p0_261; \
+  int8x8_t __s2_261 = __p2_261; \
+  int8x16_t __ret_261; \
+  __ret_261 = vsetq_lane_s8(vget_lane_s8(__s2_261, __p3_261), __s0_261, __p1_261); \
+  __ret_261; \
 })
 #else
-#define vcopyq_lane_s8(__p0_25, __p1_25, __p2_25, __p3_25) __extension__ ({ \
-  int8x16_t __s0_25 = __p0_25; \
-  int8x8_t __s2_25 = __p2_25; \
-  int8x16_t __rev0_25;  __rev0_25 = __builtin_shufflevector(__s0_25, __s0_25, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_25;  __rev2_25 = __builtin_shufflevector(__s2_25, __s2_25, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_25; \
-  __ret_25 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_25, __p3_25), __rev0_25, __p1_25); \
-  __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_25; \
+#define vcopyq_lane_s8(__p0_262, __p1_262, __p2_262, __p3_262) __extension__ ({ \
+  int8x16_t __s0_262 = __p0_262; \
+  int8x8_t __s2_262 = __p2_262; \
+  int8x16_t __rev0_262;  __rev0_262 = __builtin_shufflevector(__s0_262, __s0_262, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_262;  __rev2_262 = __builtin_shufflevector(__s2_262, __s2_262, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_262; \
+  __ret_262 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_262, __p3_262), __rev0_262, __p1_262); \
+  __ret_262 = __builtin_shufflevector(__ret_262, __ret_262, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_262; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_f32(__p0_26, __p1_26, __p2_26, __p3_26) __extension__ ({ \
-  float32x4_t __s0_26 = __p0_26; \
-  float32x2_t __s2_26 = __p2_26; \
-  float32x4_t __ret_26; \
-  __ret_26 = vsetq_lane_f32(vget_lane_f32(__s2_26, __p3_26), __s0_26, __p1_26); \
-  __ret_26; \
+#define vcopyq_lane_f32(__p0_263, __p1_263, __p2_263, __p3_263) __extension__ ({ \
+  float32x4_t __s0_263 = __p0_263; \
+  float32x2_t __s2_263 = __p2_263; \
+  float32x4_t __ret_263; \
+  __ret_263 = vsetq_lane_f32(vget_lane_f32(__s2_263, __p3_263), __s0_263, __p1_263); \
+  __ret_263; \
 })
 #else
-#define vcopyq_lane_f32(__p0_27, __p1_27, __p2_27, __p3_27) __extension__ ({ \
-  float32x4_t __s0_27 = __p0_27; \
-  float32x2_t __s2_27 = __p2_27; \
-  float32x4_t __rev0_27;  __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 3, 2, 1, 0); \
-  float32x2_t __rev2_27;  __rev2_27 = __builtin_shufflevector(__s2_27, __s2_27, 1, 0); \
-  float32x4_t __ret_27; \
-  __ret_27 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_27, __p3_27), __rev0_27, __p1_27); \
-  __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 3, 2, 1, 0); \
-  __ret_27; \
+#define vcopyq_lane_f32(__p0_264, __p1_264, __p2_264, __p3_264) __extension__ ({ \
+  float32x4_t __s0_264 = __p0_264; \
+  float32x2_t __s2_264 = __p2_264; \
+  float32x4_t __rev0_264;  __rev0_264 = __builtin_shufflevector(__s0_264, __s0_264, 3, 2, 1, 0); \
+  float32x2_t __rev2_264;  __rev2_264 = __builtin_shufflevector(__s2_264, __s2_264, 1, 0); \
+  float32x4_t __ret_264; \
+  __ret_264 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_264, __p3_264), __rev0_264, __p1_264); \
+  __ret_264 = __builtin_shufflevector(__ret_264, __ret_264, 3, 2, 1, 0); \
+  __ret_264; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s32(__p0_28, __p1_28, __p2_28, __p3_28) __extension__ ({ \
-  int32x4_t __s0_28 = __p0_28; \
-  int32x2_t __s2_28 = __p2_28; \
-  int32x4_t __ret_28; \
-  __ret_28 = vsetq_lane_s32(vget_lane_s32(__s2_28, __p3_28), __s0_28, __p1_28); \
-  __ret_28; \
+#define vcopyq_lane_s32(__p0_265, __p1_265, __p2_265, __p3_265) __extension__ ({ \
+  int32x4_t __s0_265 = __p0_265; \
+  int32x2_t __s2_265 = __p2_265; \
+  int32x4_t __ret_265; \
+  __ret_265 = vsetq_lane_s32(vget_lane_s32(__s2_265, __p3_265), __s0_265, __p1_265); \
+  __ret_265; \
 })
 #else
-#define vcopyq_lane_s32(__p0_29, __p1_29, __p2_29, __p3_29) __extension__ ({ \
-  int32x4_t __s0_29 = __p0_29; \
-  int32x2_t __s2_29 = __p2_29; \
-  int32x4_t __rev0_29;  __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 3, 2, 1, 0); \
-  int32x2_t __rev2_29;  __rev2_29 = __builtin_shufflevector(__s2_29, __s2_29, 1, 0); \
-  int32x4_t __ret_29; \
-  __ret_29 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_29, __p3_29), __rev0_29, __p1_29); \
-  __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 3, 2, 1, 0); \
-  __ret_29; \
+#define vcopyq_lane_s32(__p0_266, __p1_266, __p2_266, __p3_266) __extension__ ({ \
+  int32x4_t __s0_266 = __p0_266; \
+  int32x2_t __s2_266 = __p2_266; \
+  int32x4_t __rev0_266;  __rev0_266 = __builtin_shufflevector(__s0_266, __s0_266, 3, 2, 1, 0); \
+  int32x2_t __rev2_266;  __rev2_266 = __builtin_shufflevector(__s2_266, __s2_266, 1, 0); \
+  int32x4_t __ret_266; \
+  __ret_266 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_266, __p3_266), __rev0_266, __p1_266); \
+  __ret_266 = __builtin_shufflevector(__ret_266, __ret_266, 3, 2, 1, 0); \
+  __ret_266; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s64(__p0_30, __p1_30, __p2_30, __p3_30) __extension__ ({ \
-  int64x2_t __s0_30 = __p0_30; \
-  int64x1_t __s2_30 = __p2_30; \
-  int64x2_t __ret_30; \
-  __ret_30 = vsetq_lane_s64(vget_lane_s64(__s2_30, __p3_30), __s0_30, __p1_30); \
-  __ret_30; \
+#define vcopyq_lane_s64(__p0_267, __p1_267, __p2_267, __p3_267) __extension__ ({ \
+  int64x2_t __s0_267 = __p0_267; \
+  int64x1_t __s2_267 = __p2_267; \
+  int64x2_t __ret_267; \
+  __ret_267 = vsetq_lane_s64(vget_lane_s64(__s2_267, __p3_267), __s0_267, __p1_267); \
+  __ret_267; \
 })
 #else
-#define vcopyq_lane_s64(__p0_31, __p1_31, __p2_31, __p3_31) __extension__ ({ \
-  int64x2_t __s0_31 = __p0_31; \
-  int64x1_t __s2_31 = __p2_31; \
-  int64x2_t __rev0_31;  __rev0_31 = __builtin_shufflevector(__s0_31, __s0_31, 1, 0); \
-  int64x2_t __ret_31; \
-  __ret_31 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_31, __p3_31), __rev0_31, __p1_31); \
-  __ret_31 = __builtin_shufflevector(__ret_31, __ret_31, 1, 0); \
-  __ret_31; \
+#define vcopyq_lane_s64(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \
+  int64x2_t __s0_268 = __p0_268; \
+  int64x1_t __s2_268 = __p2_268; \
+  int64x2_t __rev0_268;  __rev0_268 = __builtin_shufflevector(__s0_268, __s0_268, 1, 0); \
+  int64x2_t __ret_268; \
+  __ret_268 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_268, __p3_268), __rev0_268, __p1_268); \
+  __ret_268 = __builtin_shufflevector(__ret_268, __ret_268, 1, 0); \
+  __ret_268; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s16(__p0_32, __p1_32, __p2_32, __p3_32) __extension__ ({ \
-  int16x8_t __s0_32 = __p0_32; \
-  int16x4_t __s2_32 = __p2_32; \
-  int16x8_t __ret_32; \
-  __ret_32 = vsetq_lane_s16(vget_lane_s16(__s2_32, __p3_32), __s0_32, __p1_32); \
-  __ret_32; \
+#define vcopyq_lane_s16(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \
+  int16x8_t __s0_269 = __p0_269; \
+  int16x4_t __s2_269 = __p2_269; \
+  int16x8_t __ret_269; \
+  __ret_269 = vsetq_lane_s16(vget_lane_s16(__s2_269, __p3_269), __s0_269, __p1_269); \
+  __ret_269; \
 })
 #else
-#define vcopyq_lane_s16(__p0_33, __p1_33, __p2_33, __p3_33) __extension__ ({ \
-  int16x8_t __s0_33 = __p0_33; \
-  int16x4_t __s2_33 = __p2_33; \
-  int16x8_t __rev0_33;  __rev0_33 = __builtin_shufflevector(__s0_33, __s0_33, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_33;  __rev2_33 = __builtin_shufflevector(__s2_33, __s2_33, 3, 2, 1, 0); \
-  int16x8_t __ret_33; \
-  __ret_33 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_33, __p3_33), __rev0_33, __p1_33); \
-  __ret_33 = __builtin_shufflevector(__ret_33, __ret_33, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_33; \
+#define vcopyq_lane_s16(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \
+  int16x8_t __s0_270 = __p0_270; \
+  int16x4_t __s2_270 = __p2_270; \
+  int16x8_t __rev0_270;  __rev0_270 = __builtin_shufflevector(__s0_270, __s0_270, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_270;  __rev2_270 = __builtin_shufflevector(__s2_270, __s2_270, 3, 2, 1, 0); \
+  int16x8_t __ret_270; \
+  __ret_270 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_270, __p3_270), __rev0_270, __p1_270); \
+  __ret_270 = __builtin_shufflevector(__ret_270, __ret_270, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_270; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_p8(__p0_34, __p1_34, __p2_34, __p3_34) __extension__ ({ \
-  poly8x8_t __s0_34 = __p0_34; \
-  poly8x8_t __s2_34 = __p2_34; \
-  poly8x8_t __ret_34; \
-  __ret_34 = vset_lane_p8(vget_lane_p8(__s2_34, __p3_34), __s0_34, __p1_34); \
-  __ret_34; \
+#define vcopy_lane_p8(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \
+  poly8x8_t __s0_271 = __p0_271; \
+  poly8x8_t __s2_271 = __p2_271; \
+  poly8x8_t __ret_271; \
+  __ret_271 = vset_lane_p8(vget_lane_p8(__s2_271, __p3_271), __s0_271, __p1_271); \
+  __ret_271; \
 })
 #else
-#define vcopy_lane_p8(__p0_35, __p1_35, __p2_35, __p3_35) __extension__ ({ \
-  poly8x8_t __s0_35 = __p0_35; \
-  poly8x8_t __s2_35 = __p2_35; \
-  poly8x8_t __rev0_35;  __rev0_35 = __builtin_shufflevector(__s0_35, __s0_35, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev2_35;  __rev2_35 = __builtin_shufflevector(__s2_35, __s2_35, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_35; \
-  __ret_35 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_35, __p3_35), __rev0_35, __p1_35); \
-  __ret_35 = __builtin_shufflevector(__ret_35, __ret_35, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_35; \
+#define vcopy_lane_p8(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \
+  poly8x8_t __s0_272 = __p0_272; \
+  poly8x8_t __s2_272 = __p2_272; \
+  poly8x8_t __rev0_272;  __rev0_272 = __builtin_shufflevector(__s0_272, __s0_272, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __rev2_272;  __rev2_272 = __builtin_shufflevector(__s2_272, __s2_272, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __ret_272; \
+  __ret_272 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_272, __p3_272), __rev0_272, __p1_272); \
+  __ret_272 = __builtin_shufflevector(__ret_272, __ret_272, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_272; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_p16(__p0_36, __p1_36, __p2_36, __p3_36) __extension__ ({ \
-  poly16x4_t __s0_36 = __p0_36; \
-  poly16x4_t __s2_36 = __p2_36; \
-  poly16x4_t __ret_36; \
-  __ret_36 = vset_lane_p16(vget_lane_p16(__s2_36, __p3_36), __s0_36, __p1_36); \
-  __ret_36; \
+#define vcopy_lane_p16(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \
+  poly16x4_t __s0_273 = __p0_273; \
+  poly16x4_t __s2_273 = __p2_273; \
+  poly16x4_t __ret_273; \
+  __ret_273 = vset_lane_p16(vget_lane_p16(__s2_273, __p3_273), __s0_273, __p1_273); \
+  __ret_273; \
 })
 #else
-#define vcopy_lane_p16(__p0_37, __p1_37, __p2_37, __p3_37) __extension__ ({ \
-  poly16x4_t __s0_37 = __p0_37; \
-  poly16x4_t __s2_37 = __p2_37; \
-  poly16x4_t __rev0_37;  __rev0_37 = __builtin_shufflevector(__s0_37, __s0_37, 3, 2, 1, 0); \
-  poly16x4_t __rev2_37;  __rev2_37 = __builtin_shufflevector(__s2_37, __s2_37, 3, 2, 1, 0); \
-  poly16x4_t __ret_37; \
-  __ret_37 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_37, __p3_37), __rev0_37, __p1_37); \
-  __ret_37 = __builtin_shufflevector(__ret_37, __ret_37, 3, 2, 1, 0); \
-  __ret_37; \
+#define vcopy_lane_p16(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \
+  poly16x4_t __s0_274 = __p0_274; \
+  poly16x4_t __s2_274 = __p2_274; \
+  poly16x4_t __rev0_274;  __rev0_274 = __builtin_shufflevector(__s0_274, __s0_274, 3, 2, 1, 0); \
+  poly16x4_t __rev2_274;  __rev2_274 = __builtin_shufflevector(__s2_274, __s2_274, 3, 2, 1, 0); \
+  poly16x4_t __ret_274; \
+  __ret_274 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_274, __p3_274), __rev0_274, __p1_274); \
+  __ret_274 = __builtin_shufflevector(__ret_274, __ret_274, 3, 2, 1, 0); \
+  __ret_274; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u8(__p0_38, __p1_38, __p2_38, __p3_38) __extension__ ({ \
-  uint8x8_t __s0_38 = __p0_38; \
-  uint8x8_t __s2_38 = __p2_38; \
-  uint8x8_t __ret_38; \
-  __ret_38 = vset_lane_u8(vget_lane_u8(__s2_38, __p3_38), __s0_38, __p1_38); \
-  __ret_38; \
+#define vcopy_lane_u8(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \
+  uint8x8_t __s0_275 = __p0_275; \
+  uint8x8_t __s2_275 = __p2_275; \
+  uint8x8_t __ret_275; \
+  __ret_275 = vset_lane_u8(vget_lane_u8(__s2_275, __p3_275), __s0_275, __p1_275); \
+  __ret_275; \
 })
 #else
-#define vcopy_lane_u8(__p0_39, __p1_39, __p2_39, __p3_39) __extension__ ({ \
-  uint8x8_t __s0_39 = __p0_39; \
-  uint8x8_t __s2_39 = __p2_39; \
-  uint8x8_t __rev0_39;  __rev0_39 = __builtin_shufflevector(__s0_39, __s0_39, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_39;  __rev2_39 = __builtin_shufflevector(__s2_39, __s2_39, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_39; \
-  __ret_39 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_39, __p3_39), __rev0_39, __p1_39); \
-  __ret_39 = __builtin_shufflevector(__ret_39, __ret_39, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_39; \
+#define vcopy_lane_u8(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \
+  uint8x8_t __s0_276 = __p0_276; \
+  uint8x8_t __s2_276 = __p2_276; \
+  uint8x8_t __rev0_276;  __rev0_276 = __builtin_shufflevector(__s0_276, __s0_276, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_276;  __rev2_276 = __builtin_shufflevector(__s2_276, __s2_276, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __ret_276; \
+  __ret_276 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_276, __p3_276), __rev0_276, __p1_276); \
+  __ret_276 = __builtin_shufflevector(__ret_276, __ret_276, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_276; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u32(__p0_40, __p1_40, __p2_40, __p3_40) __extension__ ({ \
-  uint32x2_t __s0_40 = __p0_40; \
-  uint32x2_t __s2_40 = __p2_40; \
-  uint32x2_t __ret_40; \
-  __ret_40 = vset_lane_u32(vget_lane_u32(__s2_40, __p3_40), __s0_40, __p1_40); \
-  __ret_40; \
+#define vcopy_lane_u32(__p0_277, __p1_277, __p2_277, __p3_277) __extension__ ({ \
+  uint32x2_t __s0_277 = __p0_277; \
+  uint32x2_t __s2_277 = __p2_277; \
+  uint32x2_t __ret_277; \
+  __ret_277 = vset_lane_u32(vget_lane_u32(__s2_277, __p3_277), __s0_277, __p1_277); \
+  __ret_277; \
 })
 #else
-#define vcopy_lane_u32(__p0_41, __p1_41, __p2_41, __p3_41) __extension__ ({ \
-  uint32x2_t __s0_41 = __p0_41; \
-  uint32x2_t __s2_41 = __p2_41; \
-  uint32x2_t __rev0_41;  __rev0_41 = __builtin_shufflevector(__s0_41, __s0_41, 1, 0); \
-  uint32x2_t __rev2_41;  __rev2_41 = __builtin_shufflevector(__s2_41, __s2_41, 1, 0); \
-  uint32x2_t __ret_41; \
-  __ret_41 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_41, __p3_41), __rev0_41, __p1_41); \
-  __ret_41 = __builtin_shufflevector(__ret_41, __ret_41, 1, 0); \
-  __ret_41; \
+#define vcopy_lane_u32(__p0_278, __p1_278, __p2_278, __p3_278) __extension__ ({ \
+  uint32x2_t __s0_278 = __p0_278; \
+  uint32x2_t __s2_278 = __p2_278; \
+  uint32x2_t __rev0_278;  __rev0_278 = __builtin_shufflevector(__s0_278, __s0_278, 1, 0); \
+  uint32x2_t __rev2_278;  __rev2_278 = __builtin_shufflevector(__s2_278, __s2_278, 1, 0); \
+  uint32x2_t __ret_278; \
+  __ret_278 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_278, __p3_278), __rev0_278, __p1_278); \
+  __ret_278 = __builtin_shufflevector(__ret_278, __ret_278, 1, 0); \
+  __ret_278; \
 })
 #endif
 
-#define vcopy_lane_u64(__p0_42, __p1_42, __p2_42, __p3_42) __extension__ ({ \
-  uint64x1_t __s0_42 = __p0_42; \
-  uint64x1_t __s2_42 = __p2_42; \
-  uint64x1_t __ret_42; \
-  __ret_42 = vset_lane_u64(vget_lane_u64(__s2_42, __p3_42), __s0_42, __p1_42); \
-  __ret_42; \
+#define vcopy_lane_u64(__p0_279, __p1_279, __p2_279, __p3_279) __extension__ ({ \
+  uint64x1_t __s0_279 = __p0_279; \
+  uint64x1_t __s2_279 = __p2_279; \
+  uint64x1_t __ret_279; \
+  __ret_279 = vset_lane_u64(vget_lane_u64(__s2_279, __p3_279), __s0_279, __p1_279); \
+  __ret_279; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u16(__p0_43, __p1_43, __p2_43, __p3_43) __extension__ ({ \
-  uint16x4_t __s0_43 = __p0_43; \
-  uint16x4_t __s2_43 = __p2_43; \
-  uint16x4_t __ret_43; \
-  __ret_43 = vset_lane_u16(vget_lane_u16(__s2_43, __p3_43), __s0_43, __p1_43); \
-  __ret_43; \
+#define vcopy_lane_u16(__p0_280, __p1_280, __p2_280, __p3_280) __extension__ ({ \
+  uint16x4_t __s0_280 = __p0_280; \
+  uint16x4_t __s2_280 = __p2_280; \
+  uint16x4_t __ret_280; \
+  __ret_280 = vset_lane_u16(vget_lane_u16(__s2_280, __p3_280), __s0_280, __p1_280); \
+  __ret_280; \
 })
 #else
-#define vcopy_lane_u16(__p0_44, __p1_44, __p2_44, __p3_44) __extension__ ({ \
-  uint16x4_t __s0_44 = __p0_44; \
-  uint16x4_t __s2_44 = __p2_44; \
-  uint16x4_t __rev0_44;  __rev0_44 = __builtin_shufflevector(__s0_44, __s0_44, 3, 2, 1, 0); \
-  uint16x4_t __rev2_44;  __rev2_44 = __builtin_shufflevector(__s2_44, __s2_44, 3, 2, 1, 0); \
-  uint16x4_t __ret_44; \
-  __ret_44 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_44, __p3_44), __rev0_44, __p1_44); \
-  __ret_44 = __builtin_shufflevector(__ret_44, __ret_44, 3, 2, 1, 0); \
-  __ret_44; \
+#define vcopy_lane_u16(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \
+  uint16x4_t __s0_281 = __p0_281; \
+  uint16x4_t __s2_281 = __p2_281; \
+  uint16x4_t __rev0_281;  __rev0_281 = __builtin_shufflevector(__s0_281, __s0_281, 3, 2, 1, 0); \
+  uint16x4_t __rev2_281;  __rev2_281 = __builtin_shufflevector(__s2_281, __s2_281, 3, 2, 1, 0); \
+  uint16x4_t __ret_281; \
+  __ret_281 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_281, __p3_281), __rev0_281, __p1_281); \
+  __ret_281 = __builtin_shufflevector(__ret_281, __ret_281, 3, 2, 1, 0); \
+  __ret_281; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s8(__p0_45, __p1_45, __p2_45, __p3_45) __extension__ ({ \
-  int8x8_t __s0_45 = __p0_45; \
-  int8x8_t __s2_45 = __p2_45; \
-  int8x8_t __ret_45; \
-  __ret_45 = vset_lane_s8(vget_lane_s8(__s2_45, __p3_45), __s0_45, __p1_45); \
-  __ret_45; \
+#define vcopy_lane_s8(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \
+  int8x8_t __s0_282 = __p0_282; \
+  int8x8_t __s2_282 = __p2_282; \
+  int8x8_t __ret_282; \
+  __ret_282 = vset_lane_s8(vget_lane_s8(__s2_282, __p3_282), __s0_282, __p1_282); \
+  __ret_282; \
 })
 #else
-#define vcopy_lane_s8(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \
-  int8x8_t __s0_46 = __p0_46; \
-  int8x8_t __s2_46 = __p2_46; \
-  int8x8_t __rev0_46;  __rev0_46 = __builtin_shufflevector(__s0_46, __s0_46, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_46;  __rev2_46 = __builtin_shufflevector(__s2_46, __s2_46, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_46; \
-  __ret_46 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_46, __p3_46), __rev0_46, __p1_46); \
-  __ret_46 = __builtin_shufflevector(__ret_46, __ret_46, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_46; \
+#define vcopy_lane_s8(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \
+  int8x8_t __s0_283 = __p0_283; \
+  int8x8_t __s2_283 = __p2_283; \
+  int8x8_t __rev0_283;  __rev0_283 = __builtin_shufflevector(__s0_283, __s0_283, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __rev2_283;  __rev2_283 = __builtin_shufflevector(__s2_283, __s2_283, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __ret_283; \
+  __ret_283 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_283, __p3_283), __rev0_283, __p1_283); \
+  __ret_283 = __builtin_shufflevector(__ret_283, __ret_283, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_283; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_f32(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \
-  float32x2_t __s0_47 = __p0_47; \
-  float32x2_t __s2_47 = __p2_47; \
-  float32x2_t __ret_47; \
-  __ret_47 = vset_lane_f32(vget_lane_f32(__s2_47, __p3_47), __s0_47, __p1_47); \
-  __ret_47; \
+#define vcopy_lane_f32(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \
+  float32x2_t __s0_284 = __p0_284; \
+  float32x2_t __s2_284 = __p2_284; \
+  float32x2_t __ret_284; \
+  __ret_284 = vset_lane_f32(vget_lane_f32(__s2_284, __p3_284), __s0_284, __p1_284); \
+  __ret_284; \
 })
 #else
-#define vcopy_lane_f32(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \
-  float32x2_t __s0_48 = __p0_48; \
-  float32x2_t __s2_48 = __p2_48; \
-  float32x2_t __rev0_48;  __rev0_48 = __builtin_shufflevector(__s0_48, __s0_48, 1, 0); \
-  float32x2_t __rev2_48;  __rev2_48 = __builtin_shufflevector(__s2_48, __s2_48, 1, 0); \
-  float32x2_t __ret_48; \
-  __ret_48 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_48, __p3_48), __rev0_48, __p1_48); \
-  __ret_48 = __builtin_shufflevector(__ret_48, __ret_48, 1, 0); \
-  __ret_48; \
+#define vcopy_lane_f32(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \
+  float32x2_t __s0_285 = __p0_285; \
+  float32x2_t __s2_285 = __p2_285; \
+  float32x2_t __rev0_285;  __rev0_285 = __builtin_shufflevector(__s0_285, __s0_285, 1, 0); \
+  float32x2_t __rev2_285;  __rev2_285 = __builtin_shufflevector(__s2_285, __s2_285, 1, 0); \
+  float32x2_t __ret_285; \
+  __ret_285 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_285, __p3_285), __rev0_285, __p1_285); \
+  __ret_285 = __builtin_shufflevector(__ret_285, __ret_285, 1, 0); \
+  __ret_285; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s32(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \
-  int32x2_t __s0_49 = __p0_49; \
-  int32x2_t __s2_49 = __p2_49; \
-  int32x2_t __ret_49; \
-  __ret_49 = vset_lane_s32(vget_lane_s32(__s2_49, __p3_49), __s0_49, __p1_49); \
-  __ret_49; \
+#define vcopy_lane_s32(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \
+  int32x2_t __s0_286 = __p0_286; \
+  int32x2_t __s2_286 = __p2_286; \
+  int32x2_t __ret_286; \
+  __ret_286 = vset_lane_s32(vget_lane_s32(__s2_286, __p3_286), __s0_286, __p1_286); \
+  __ret_286; \
 })
 #else
-#define vcopy_lane_s32(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \
-  int32x2_t __s0_50 = __p0_50; \
-  int32x2_t __s2_50 = __p2_50; \
-  int32x2_t __rev0_50;  __rev0_50 = __builtin_shufflevector(__s0_50, __s0_50, 1, 0); \
-  int32x2_t __rev2_50;  __rev2_50 = __builtin_shufflevector(__s2_50, __s2_50, 1, 0); \
-  int32x2_t __ret_50; \
-  __ret_50 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_50, __p3_50), __rev0_50, __p1_50); \
-  __ret_50 = __builtin_shufflevector(__ret_50, __ret_50, 1, 0); \
-  __ret_50; \
+#define vcopy_lane_s32(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \
+  int32x2_t __s0_287 = __p0_287; \
+  int32x2_t __s2_287 = __p2_287; \
+  int32x2_t __rev0_287;  __rev0_287 = __builtin_shufflevector(__s0_287, __s0_287, 1, 0); \
+  int32x2_t __rev2_287;  __rev2_287 = __builtin_shufflevector(__s2_287, __s2_287, 1, 0); \
+  int32x2_t __ret_287; \
+  __ret_287 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_287, __p3_287), __rev0_287, __p1_287); \
+  __ret_287 = __builtin_shufflevector(__ret_287, __ret_287, 1, 0); \
+  __ret_287; \
 })
 #endif
 
-#define vcopy_lane_s64(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \
-  int64x1_t __s0_51 = __p0_51; \
-  int64x1_t __s2_51 = __p2_51; \
-  int64x1_t __ret_51; \
-  __ret_51 = vset_lane_s64(vget_lane_s64(__s2_51, __p3_51), __s0_51, __p1_51); \
-  __ret_51; \
+#define vcopy_lane_s64(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \
+  int64x1_t __s0_288 = __p0_288; \
+  int64x1_t __s2_288 = __p2_288; \
+  int64x1_t __ret_288; \
+  __ret_288 = vset_lane_s64(vget_lane_s64(__s2_288, __p3_288), __s0_288, __p1_288); \
+  __ret_288; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s16(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \
-  int16x4_t __s0_52 = __p0_52; \
-  int16x4_t __s2_52 = __p2_52; \
-  int16x4_t __ret_52; \
-  __ret_52 = vset_lane_s16(vget_lane_s16(__s2_52, __p3_52), __s0_52, __p1_52); \
-  __ret_52; \
+#define vcopy_lane_s16(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \
+  int16x4_t __s0_289 = __p0_289; \
+  int16x4_t __s2_289 = __p2_289; \
+  int16x4_t __ret_289; \
+  __ret_289 = vset_lane_s16(vget_lane_s16(__s2_289, __p3_289), __s0_289, __p1_289); \
+  __ret_289; \
 })
 #else
-#define vcopy_lane_s16(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \
-  int16x4_t __s0_53 = __p0_53; \
-  int16x4_t __s2_53 = __p2_53; \
-  int16x4_t __rev0_53;  __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 3, 2, 1, 0); \
-  int16x4_t __rev2_53;  __rev2_53 = __builtin_shufflevector(__s2_53, __s2_53, 3, 2, 1, 0); \
-  int16x4_t __ret_53; \
-  __ret_53 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_53, __p3_53), __rev0_53, __p1_53); \
-  __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 3, 2, 1, 0); \
-  __ret_53; \
+#define vcopy_lane_s16(__p0_290, __p1_290, __p2_290, __p3_290) __extension__ ({ \
+  int16x4_t __s0_290 = __p0_290; \
+  int16x4_t __s2_290 = __p2_290; \
+  int16x4_t __rev0_290;  __rev0_290 = __builtin_shufflevector(__s0_290, __s0_290, 3, 2, 1, 0); \
+  int16x4_t __rev2_290;  __rev2_290 = __builtin_shufflevector(__s2_290, __s2_290, 3, 2, 1, 0); \
+  int16x4_t __ret_290; \
+  __ret_290 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_290, __p3_290), __rev0_290, __p1_290); \
+  __ret_290 = __builtin_shufflevector(__ret_290, __ret_290, 3, 2, 1, 0); \
+  __ret_290; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p8(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \
-  poly8x16_t __s0_54 = __p0_54; \
-  poly8x16_t __s2_54 = __p2_54; \
-  poly8x16_t __ret_54; \
-  __ret_54 = vsetq_lane_p8(vgetq_lane_p8(__s2_54, __p3_54), __s0_54, __p1_54); \
-  __ret_54; \
+#define vcopyq_laneq_p8(__p0_291, __p1_291, __p2_291, __p3_291) __extension__ ({ \
+  poly8x16_t __s0_291 = __p0_291; \
+  poly8x16_t __s2_291 = __p2_291; \
+  poly8x16_t __ret_291; \
+  __ret_291 = vsetq_lane_p8(vgetq_lane_p8(__s2_291, __p3_291), __s0_291, __p1_291); \
+  __ret_291; \
 })
 #else
-#define vcopyq_laneq_p8(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \
-  poly8x16_t __s0_55 = __p0_55; \
-  poly8x16_t __s2_55 = __p2_55; \
-  poly8x16_t __rev0_55;  __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev2_55;  __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_55; \
-  __ret_55 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_55, __p3_55), __rev0_55, __p1_55); \
-  __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_55; \
+#define vcopyq_laneq_p8(__p0_292, __p1_292, __p2_292, __p3_292) __extension__ ({ \
+  poly8x16_t __s0_292 = __p0_292; \
+  poly8x16_t __s2_292 = __p2_292; \
+  poly8x16_t __rev0_292;  __rev0_292 = __builtin_shufflevector(__s0_292, __s0_292, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __rev2_292;  __rev2_292 = __builtin_shufflevector(__s2_292, __s2_292, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __ret_292; \
+  __ret_292 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_292, __p3_292), __rev0_292, __p1_292); \
+  __ret_292 = __builtin_shufflevector(__ret_292, __ret_292, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_292; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p16(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \
-  poly16x8_t __s0_56 = __p0_56; \
-  poly16x8_t __s2_56 = __p2_56; \
-  poly16x8_t __ret_56; \
-  __ret_56 = vsetq_lane_p16(vgetq_lane_p16(__s2_56, __p3_56), __s0_56, __p1_56); \
-  __ret_56; \
+#define vcopyq_laneq_p16(__p0_293, __p1_293, __p2_293, __p3_293) __extension__ ({ \
+  poly16x8_t __s0_293 = __p0_293; \
+  poly16x8_t __s2_293 = __p2_293; \
+  poly16x8_t __ret_293; \
+  __ret_293 = vsetq_lane_p16(vgetq_lane_p16(__s2_293, __p3_293), __s0_293, __p1_293); \
+  __ret_293; \
 })
 #else
-#define vcopyq_laneq_p16(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \
-  poly16x8_t __s0_57 = __p0_57; \
-  poly16x8_t __s2_57 = __p2_57; \
-  poly16x8_t __rev0_57;  __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev2_57;  __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret_57; \
-  __ret_57 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_57, __p3_57), __rev0_57, __p1_57); \
-  __ret_57 = __builtin_shufflevector(__ret_57, __ret_57, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_57; \
+#define vcopyq_laneq_p16(__p0_294, __p1_294, __p2_294, __p3_294) __extension__ ({ \
+  poly16x8_t __s0_294 = __p0_294; \
+  poly16x8_t __s2_294 = __p2_294; \
+  poly16x8_t __rev0_294;  __rev0_294 = __builtin_shufflevector(__s0_294, __s0_294, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x8_t __rev2_294;  __rev2_294 = __builtin_shufflevector(__s2_294, __s2_294, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x8_t __ret_294; \
+  __ret_294 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_294, __p3_294), __rev0_294, __p1_294); \
+  __ret_294 = __builtin_shufflevector(__ret_294, __ret_294, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_294; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u8(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \
-  uint8x16_t __s0_58 = __p0_58; \
-  uint8x16_t __s2_58 = __p2_58; \
-  uint8x16_t __ret_58; \
-  __ret_58 = vsetq_lane_u8(vgetq_lane_u8(__s2_58, __p3_58), __s0_58, __p1_58); \
-  __ret_58; \
+#define vcopyq_laneq_u8(__p0_295, __p1_295, __p2_295, __p3_295) __extension__ ({ \
+  uint8x16_t __s0_295 = __p0_295; \
+  uint8x16_t __s2_295 = __p2_295; \
+  uint8x16_t __ret_295; \
+  __ret_295 = vsetq_lane_u8(vgetq_lane_u8(__s2_295, __p3_295), __s0_295, __p1_295); \
+  __ret_295; \
 })
 #else
-#define vcopyq_laneq_u8(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \
-  uint8x16_t __s0_59 = __p0_59; \
-  uint8x16_t __s2_59 = __p2_59; \
-  uint8x16_t __rev0_59;  __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_59;  __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_59; \
-  __ret_59 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_59, __p3_59), __rev0_59, __p1_59); \
-  __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_59; \
+#define vcopyq_laneq_u8(__p0_296, __p1_296, __p2_296, __p3_296) __extension__ ({ \
+  uint8x16_t __s0_296 = __p0_296; \
+  uint8x16_t __s2_296 = __p2_296; \
+  uint8x16_t __rev0_296;  __rev0_296 = __builtin_shufflevector(__s0_296, __s0_296, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_296;  __rev2_296 = __builtin_shufflevector(__s2_296, __s2_296, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_296; \
+  __ret_296 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_296, __p3_296), __rev0_296, __p1_296); \
+  __ret_296 = __builtin_shufflevector(__ret_296, __ret_296, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_296; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u32(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \
-  uint32x4_t __s0_60 = __p0_60; \
-  uint32x4_t __s2_60 = __p2_60; \
-  uint32x4_t __ret_60; \
-  __ret_60 = vsetq_lane_u32(vgetq_lane_u32(__s2_60, __p3_60), __s0_60, __p1_60); \
-  __ret_60; \
+#define vcopyq_laneq_u32(__p0_297, __p1_297, __p2_297, __p3_297) __extension__ ({ \
+  uint32x4_t __s0_297 = __p0_297; \
+  uint32x4_t __s2_297 = __p2_297; \
+  uint32x4_t __ret_297; \
+  __ret_297 = vsetq_lane_u32(vgetq_lane_u32(__s2_297, __p3_297), __s0_297, __p1_297); \
+  __ret_297; \
 })
 #else
-#define vcopyq_laneq_u32(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \
-  uint32x4_t __s0_61 = __p0_61; \
-  uint32x4_t __s2_61 = __p2_61; \
-  uint32x4_t __rev0_61;  __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 3, 2, 1, 0); \
-  uint32x4_t __rev2_61;  __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 3, 2, 1, 0); \
-  uint32x4_t __ret_61; \
-  __ret_61 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_61, __p3_61), __rev0_61, __p1_61); \
-  __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 3, 2, 1, 0); \
-  __ret_61; \
+#define vcopyq_laneq_u32(__p0_298, __p1_298, __p2_298, __p3_298) __extension__ ({ \
+  uint32x4_t __s0_298 = __p0_298; \
+  uint32x4_t __s2_298 = __p2_298; \
+  uint32x4_t __rev0_298;  __rev0_298 = __builtin_shufflevector(__s0_298, __s0_298, 3, 2, 1, 0); \
+  uint32x4_t __rev2_298;  __rev2_298 = __builtin_shufflevector(__s2_298, __s2_298, 3, 2, 1, 0); \
+  uint32x4_t __ret_298; \
+  __ret_298 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_298, __p3_298), __rev0_298, __p1_298); \
+  __ret_298 = __builtin_shufflevector(__ret_298, __ret_298, 3, 2, 1, 0); \
+  __ret_298; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u64(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \
-  uint64x2_t __s0_62 = __p0_62; \
-  uint64x2_t __s2_62 = __p2_62; \
-  uint64x2_t __ret_62; \
-  __ret_62 = vsetq_lane_u64(vgetq_lane_u64(__s2_62, __p3_62), __s0_62, __p1_62); \
-  __ret_62; \
+#define vcopyq_laneq_u64(__p0_299, __p1_299, __p2_299, __p3_299) __extension__ ({ \
+  uint64x2_t __s0_299 = __p0_299; \
+  uint64x2_t __s2_299 = __p2_299; \
+  uint64x2_t __ret_299; \
+  __ret_299 = vsetq_lane_u64(vgetq_lane_u64(__s2_299, __p3_299), __s0_299, __p1_299); \
+  __ret_299; \
 })
 #else
-#define vcopyq_laneq_u64(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \
-  uint64x2_t __s0_63 = __p0_63; \
-  uint64x2_t __s2_63 = __p2_63; \
-  uint64x2_t __rev0_63;  __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 1, 0); \
-  uint64x2_t __rev2_63;  __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 1, 0); \
-  uint64x2_t __ret_63; \
-  __ret_63 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_63, __p3_63), __rev0_63, __p1_63); \
-  __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 1, 0); \
-  __ret_63; \
+#define vcopyq_laneq_u64(__p0_300, __p1_300, __p2_300, __p3_300) __extension__ ({ \
+  uint64x2_t __s0_300 = __p0_300; \
+  uint64x2_t __s2_300 = __p2_300; \
+  uint64x2_t __rev0_300;  __rev0_300 = __builtin_shufflevector(__s0_300, __s0_300, 1, 0); \
+  uint64x2_t __rev2_300;  __rev2_300 = __builtin_shufflevector(__s2_300, __s2_300, 1, 0); \
+  uint64x2_t __ret_300; \
+  __ret_300 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_300, __p3_300), __rev0_300, __p1_300); \
+  __ret_300 = __builtin_shufflevector(__ret_300, __ret_300, 1, 0); \
+  __ret_300; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u16(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \
-  uint16x8_t __s0_64 = __p0_64; \
-  uint16x8_t __s2_64 = __p2_64; \
-  uint16x8_t __ret_64; \
-  __ret_64 = vsetq_lane_u16(vgetq_lane_u16(__s2_64, __p3_64), __s0_64, __p1_64); \
-  __ret_64; \
+#define vcopyq_laneq_u16(__p0_301, __p1_301, __p2_301, __p3_301) __extension__ ({ \
+  uint16x8_t __s0_301 = __p0_301; \
+  uint16x8_t __s2_301 = __p2_301; \
+  uint16x8_t __ret_301; \
+  __ret_301 = vsetq_lane_u16(vgetq_lane_u16(__s2_301, __p3_301), __s0_301, __p1_301); \
+  __ret_301; \
 })
 #else
-#define vcopyq_laneq_u16(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \
-  uint16x8_t __s0_65 = __p0_65; \
-  uint16x8_t __s2_65 = __p2_65; \
-  uint16x8_t __rev0_65;  __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_65;  __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_65; \
-  __ret_65 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_65, __p3_65), __rev0_65, __p1_65); \
-  __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_65; \
+#define vcopyq_laneq_u16(__p0_302, __p1_302, __p2_302, __p3_302) __extension__ ({ \
+  uint16x8_t __s0_302 = __p0_302; \
+  uint16x8_t __s2_302 = __p2_302; \
+  uint16x8_t __rev0_302;  __rev0_302 = __builtin_shufflevector(__s0_302, __s0_302, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_302;  __rev2_302 = __builtin_shufflevector(__s2_302, __s2_302, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret_302; \
+  __ret_302 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_302, __p3_302), __rev0_302, __p1_302); \
+  __ret_302 = __builtin_shufflevector(__ret_302, __ret_302, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_302; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s8(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \
-  int8x16_t __s0_66 = __p0_66; \
-  int8x16_t __s2_66 = __p2_66; \
-  int8x16_t __ret_66; \
-  __ret_66 = vsetq_lane_s8(vgetq_lane_s8(__s2_66, __p3_66), __s0_66, __p1_66); \
-  __ret_66; \
+#define vcopyq_laneq_s8(__p0_303, __p1_303, __p2_303, __p3_303) __extension__ ({ \
+  int8x16_t __s0_303 = __p0_303; \
+  int8x16_t __s2_303 = __p2_303; \
+  int8x16_t __ret_303; \
+  __ret_303 = vsetq_lane_s8(vgetq_lane_s8(__s2_303, __p3_303), __s0_303, __p1_303); \
+  __ret_303; \
 })
 #else
-#define vcopyq_laneq_s8(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \
-  int8x16_t __s0_67 = __p0_67; \
-  int8x16_t __s2_67 = __p2_67; \
-  int8x16_t __rev0_67;  __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_67;  __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_67; \
-  __ret_67 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_67, __p3_67), __rev0_67, __p1_67); \
-  __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_67; \
+#define vcopyq_laneq_s8(__p0_304, __p1_304, __p2_304, __p3_304) __extension__ ({ \
+  int8x16_t __s0_304 = __p0_304; \
+  int8x16_t __s2_304 = __p2_304; \
+  int8x16_t __rev0_304;  __rev0_304 = __builtin_shufflevector(__s0_304, __s0_304, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_304;  __rev2_304 = __builtin_shufflevector(__s2_304, __s2_304, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_304; \
+  __ret_304 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_304, __p3_304), __rev0_304, __p1_304); \
+  __ret_304 = __builtin_shufflevector(__ret_304, __ret_304, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_304; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_f32(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \
-  float32x4_t __s0_68 = __p0_68; \
-  float32x4_t __s2_68 = __p2_68; \
-  float32x4_t __ret_68; \
-  __ret_68 = vsetq_lane_f32(vgetq_lane_f32(__s2_68, __p3_68), __s0_68, __p1_68); \
-  __ret_68; \
+#define vcopyq_laneq_f32(__p0_305, __p1_305, __p2_305, __p3_305) __extension__ ({ \
+  float32x4_t __s0_305 = __p0_305; \
+  float32x4_t __s2_305 = __p2_305; \
+  float32x4_t __ret_305; \
+  __ret_305 = vsetq_lane_f32(vgetq_lane_f32(__s2_305, __p3_305), __s0_305, __p1_305); \
+  __ret_305; \
 })
 #else
-#define vcopyq_laneq_f32(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \
-  float32x4_t __s0_69 = __p0_69; \
-  float32x4_t __s2_69 = __p2_69; \
-  float32x4_t __rev0_69;  __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 3, 2, 1, 0); \
-  float32x4_t __rev2_69;  __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 3, 2, 1, 0); \
-  float32x4_t __ret_69; \
-  __ret_69 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_69, __p3_69), __rev0_69, __p1_69); \
-  __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 3, 2, 1, 0); \
-  __ret_69; \
+#define vcopyq_laneq_f32(__p0_306, __p1_306, __p2_306, __p3_306) __extension__ ({ \
+  float32x4_t __s0_306 = __p0_306; \
+  float32x4_t __s2_306 = __p2_306; \
+  float32x4_t __rev0_306;  __rev0_306 = __builtin_shufflevector(__s0_306, __s0_306, 3, 2, 1, 0); \
+  float32x4_t __rev2_306;  __rev2_306 = __builtin_shufflevector(__s2_306, __s2_306, 3, 2, 1, 0); \
+  float32x4_t __ret_306; \
+  __ret_306 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_306, __p3_306), __rev0_306, __p1_306); \
+  __ret_306 = __builtin_shufflevector(__ret_306, __ret_306, 3, 2, 1, 0); \
+  __ret_306; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s32(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \
-  int32x4_t __s0_70 = __p0_70; \
-  int32x4_t __s2_70 = __p2_70; \
-  int32x4_t __ret_70; \
-  __ret_70 = vsetq_lane_s32(vgetq_lane_s32(__s2_70, __p3_70), __s0_70, __p1_70); \
-  __ret_70; \
+#define vcopyq_laneq_s32(__p0_307, __p1_307, __p2_307, __p3_307) __extension__ ({ \
+  int32x4_t __s0_307 = __p0_307; \
+  int32x4_t __s2_307 = __p2_307; \
+  int32x4_t __ret_307; \
+  __ret_307 = vsetq_lane_s32(vgetq_lane_s32(__s2_307, __p3_307), __s0_307, __p1_307); \
+  __ret_307; \
 })
 #else
-#define vcopyq_laneq_s32(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \
-  int32x4_t __s0_71 = __p0_71; \
-  int32x4_t __s2_71 = __p2_71; \
-  int32x4_t __rev0_71;  __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 3, 2, 1, 0); \
-  int32x4_t __rev2_71;  __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 3, 2, 1, 0); \
-  int32x4_t __ret_71; \
-  __ret_71 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_71, __p3_71), __rev0_71, __p1_71); \
-  __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 3, 2, 1, 0); \
-  __ret_71; \
+#define vcopyq_laneq_s32(__p0_308, __p1_308, __p2_308, __p3_308) __extension__ ({ \
+  int32x4_t __s0_308 = __p0_308; \
+  int32x4_t __s2_308 = __p2_308; \
+  int32x4_t __rev0_308;  __rev0_308 = __builtin_shufflevector(__s0_308, __s0_308, 3, 2, 1, 0); \
+  int32x4_t __rev2_308;  __rev2_308 = __builtin_shufflevector(__s2_308, __s2_308, 3, 2, 1, 0); \
+  int32x4_t __ret_308; \
+  __ret_308 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_308, __p3_308), __rev0_308, __p1_308); \
+  __ret_308 = __builtin_shufflevector(__ret_308, __ret_308, 3, 2, 1, 0); \
+  __ret_308; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s64(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \
-  int64x2_t __s0_72 = __p0_72; \
-  int64x2_t __s2_72 = __p2_72; \
-  int64x2_t __ret_72; \
-  __ret_72 = vsetq_lane_s64(vgetq_lane_s64(__s2_72, __p3_72), __s0_72, __p1_72); \
-  __ret_72; \
+#define vcopyq_laneq_s64(__p0_309, __p1_309, __p2_309, __p3_309) __extension__ ({ \
+  int64x2_t __s0_309 = __p0_309; \
+  int64x2_t __s2_309 = __p2_309; \
+  int64x2_t __ret_309; \
+  __ret_309 = vsetq_lane_s64(vgetq_lane_s64(__s2_309, __p3_309), __s0_309, __p1_309); \
+  __ret_309; \
 })
 #else
-#define vcopyq_laneq_s64(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \
-  int64x2_t __s0_73 = __p0_73; \
-  int64x2_t __s2_73 = __p2_73; \
-  int64x2_t __rev0_73;  __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 1, 0); \
-  int64x2_t __rev2_73;  __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 1, 0); \
-  int64x2_t __ret_73; \
-  __ret_73 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_73, __p3_73), __rev0_73, __p1_73); \
-  __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 1, 0); \
-  __ret_73; \
+#define vcopyq_laneq_s64(__p0_310, __p1_310, __p2_310, __p3_310) __extension__ ({ \
+  int64x2_t __s0_310 = __p0_310; \
+  int64x2_t __s2_310 = __p2_310; \
+  int64x2_t __rev0_310;  __rev0_310 = __builtin_shufflevector(__s0_310, __s0_310, 1, 0); \
+  int64x2_t __rev2_310;  __rev2_310 = __builtin_shufflevector(__s2_310, __s2_310, 1, 0); \
+  int64x2_t __ret_310; \
+  __ret_310 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_310, __p3_310), __rev0_310, __p1_310); \
+  __ret_310 = __builtin_shufflevector(__ret_310, __ret_310, 1, 0); \
+  __ret_310; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s16(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \
-  int16x8_t __s0_74 = __p0_74; \
-  int16x8_t __s2_74 = __p2_74; \
-  int16x8_t __ret_74; \
-  __ret_74 = vsetq_lane_s16(vgetq_lane_s16(__s2_74, __p3_74), __s0_74, __p1_74); \
-  __ret_74; \
+#define vcopyq_laneq_s16(__p0_311, __p1_311, __p2_311, __p3_311) __extension__ ({ \
+  int16x8_t __s0_311 = __p0_311; \
+  int16x8_t __s2_311 = __p2_311; \
+  int16x8_t __ret_311; \
+  __ret_311 = vsetq_lane_s16(vgetq_lane_s16(__s2_311, __p3_311), __s0_311, __p1_311); \
+  __ret_311; \
 })
 #else
-#define vcopyq_laneq_s16(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \
-  int16x8_t __s0_75 = __p0_75; \
-  int16x8_t __s2_75 = __p2_75; \
-  int16x8_t __rev0_75;  __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_75;  __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_75; \
-  __ret_75 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_75, __p3_75), __rev0_75, __p1_75); \
-  __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_75; \
+#define vcopyq_laneq_s16(__p0_312, __p1_312, __p2_312, __p3_312) __extension__ ({ \
+  int16x8_t __s0_312 = __p0_312; \
+  int16x8_t __s2_312 = __p2_312; \
+  int16x8_t __rev0_312;  __rev0_312 = __builtin_shufflevector(__s0_312, __s0_312, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_312;  __rev2_312 = __builtin_shufflevector(__s2_312, __s2_312, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_312; \
+  __ret_312 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_312, __p3_312), __rev0_312, __p1_312); \
+  __ret_312 = __builtin_shufflevector(__ret_312, __ret_312, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_312; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p8(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \
-  poly8x8_t __s0_76 = __p0_76; \
-  poly8x16_t __s2_76 = __p2_76; \
-  poly8x8_t __ret_76; \
-  __ret_76 = vset_lane_p8(vgetq_lane_p8(__s2_76, __p3_76), __s0_76, __p1_76); \
-  __ret_76; \
+#define vcopy_laneq_p8(__p0_313, __p1_313, __p2_313, __p3_313) __extension__ ({ \
+  poly8x8_t __s0_313 = __p0_313; \
+  poly8x16_t __s2_313 = __p2_313; \
+  poly8x8_t __ret_313; \
+  __ret_313 = vset_lane_p8(vgetq_lane_p8(__s2_313, __p3_313), __s0_313, __p1_313); \
+  __ret_313; \
 })
 #else
-#define vcopy_laneq_p8(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \
-  poly8x8_t __s0_77 = __p0_77; \
-  poly8x16_t __s2_77 = __p2_77; \
-  poly8x8_t __rev0_77;  __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev2_77;  __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_77; \
-  __ret_77 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_77, __p3_77), __rev0_77, __p1_77); \
-  __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_77; \
+#define vcopy_laneq_p8(__p0_314, __p1_314, __p2_314, __p3_314) __extension__ ({ \
+  poly8x8_t __s0_314 = __p0_314; \
+  poly8x16_t __s2_314 = __p2_314; \
+  poly8x8_t __rev0_314;  __rev0_314 = __builtin_shufflevector(__s0_314, __s0_314, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __rev2_314;  __rev2_314 = __builtin_shufflevector(__s2_314, __s2_314, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __ret_314; \
+  __ret_314 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_314, __p3_314), __rev0_314, __p1_314); \
+  __ret_314 = __builtin_shufflevector(__ret_314, __ret_314, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_314; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p16(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \
-  poly16x4_t __s0_78 = __p0_78; \
-  poly16x8_t __s2_78 = __p2_78; \
-  poly16x4_t __ret_78; \
-  __ret_78 = vset_lane_p16(vgetq_lane_p16(__s2_78, __p3_78), __s0_78, __p1_78); \
-  __ret_78; \
+#define vcopy_laneq_p16(__p0_315, __p1_315, __p2_315, __p3_315) __extension__ ({ \
+  poly16x4_t __s0_315 = __p0_315; \
+  poly16x8_t __s2_315 = __p2_315; \
+  poly16x4_t __ret_315; \
+  __ret_315 = vset_lane_p16(vgetq_lane_p16(__s2_315, __p3_315), __s0_315, __p1_315); \
+  __ret_315; \
 })
 #else
-#define vcopy_laneq_p16(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \
-  poly16x4_t __s0_79 = __p0_79; \
-  poly16x8_t __s2_79 = __p2_79; \
-  poly16x4_t __rev0_79;  __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 3, 2, 1, 0); \
-  poly16x8_t __rev2_79;  __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret_79; \
-  __ret_79 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_79, __p3_79), __rev0_79, __p1_79); \
-  __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 3, 2, 1, 0); \
-  __ret_79; \
+#define vcopy_laneq_p16(__p0_316, __p1_316, __p2_316, __p3_316) __extension__ ({ \
+  poly16x4_t __s0_316 = __p0_316; \
+  poly16x8_t __s2_316 = __p2_316; \
+  poly16x4_t __rev0_316;  __rev0_316 = __builtin_shufflevector(__s0_316, __s0_316, 3, 2, 1, 0); \
+  poly16x8_t __rev2_316;  __rev2_316 = __builtin_shufflevector(__s2_316, __s2_316, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x4_t __ret_316; \
+  __ret_316 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_316, __p3_316), __rev0_316, __p1_316); \
+  __ret_316 = __builtin_shufflevector(__ret_316, __ret_316, 3, 2, 1, 0); \
+  __ret_316; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u8(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \
-  uint8x8_t __s0_80 = __p0_80; \
-  uint8x16_t __s2_80 = __p2_80; \
-  uint8x8_t __ret_80; \
-  __ret_80 = vset_lane_u8(vgetq_lane_u8(__s2_80, __p3_80), __s0_80, __p1_80); \
-  __ret_80; \
+#define vcopy_laneq_u8(__p0_317, __p1_317, __p2_317, __p3_317) __extension__ ({ \
+  uint8x8_t __s0_317 = __p0_317; \
+  uint8x16_t __s2_317 = __p2_317; \
+  uint8x8_t __ret_317; \
+  __ret_317 = vset_lane_u8(vgetq_lane_u8(__s2_317, __p3_317), __s0_317, __p1_317); \
+  __ret_317; \
 })
 #else
-#define vcopy_laneq_u8(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \
-  uint8x8_t __s0_81 = __p0_81; \
-  uint8x16_t __s2_81 = __p2_81; \
-  uint8x8_t __rev0_81;  __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_81;  __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_81; \
-  __ret_81 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_81, __p3_81), __rev0_81, __p1_81); \
-  __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_81; \
+#define vcopy_laneq_u8(__p0_318, __p1_318, __p2_318, __p3_318) __extension__ ({ \
+  uint8x8_t __s0_318 = __p0_318; \
+  uint8x16_t __s2_318 = __p2_318; \
+  uint8x8_t __rev0_318;  __rev0_318 = __builtin_shufflevector(__s0_318, __s0_318, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_318;  __rev2_318 = __builtin_shufflevector(__s2_318, __s2_318, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __ret_318; \
+  __ret_318 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_318, __p3_318), __rev0_318, __p1_318); \
+  __ret_318 = __builtin_shufflevector(__ret_318, __ret_318, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_318; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u32(__p0_82, __p1_82, __p2_82, __p3_82) __extension__ ({ \
-  uint32x2_t __s0_82 = __p0_82; \
-  uint32x4_t __s2_82 = __p2_82; \
-  uint32x2_t __ret_82; \
-  __ret_82 = vset_lane_u32(vgetq_lane_u32(__s2_82, __p3_82), __s0_82, __p1_82); \
-  __ret_82; \
+#define vcopy_laneq_u32(__p0_319, __p1_319, __p2_319, __p3_319) __extension__ ({ \
+  uint32x2_t __s0_319 = __p0_319; \
+  uint32x4_t __s2_319 = __p2_319; \
+  uint32x2_t __ret_319; \
+  __ret_319 = vset_lane_u32(vgetq_lane_u32(__s2_319, __p3_319), __s0_319, __p1_319); \
+  __ret_319; \
 })
 #else
-#define vcopy_laneq_u32(__p0_83, __p1_83, __p2_83, __p3_83) __extension__ ({ \
-  uint32x2_t __s0_83 = __p0_83; \
-  uint32x4_t __s2_83 = __p2_83; \
-  uint32x2_t __rev0_83;  __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 1, 0); \
-  uint32x4_t __rev2_83;  __rev2_83 = __builtin_shufflevector(__s2_83, __s2_83, 3, 2, 1, 0); \
-  uint32x2_t __ret_83; \
-  __ret_83 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_83, __p3_83), __rev0_83, __p1_83); \
-  __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 1, 0); \
-  __ret_83; \
+#define vcopy_laneq_u32(__p0_320, __p1_320, __p2_320, __p3_320) __extension__ ({ \
+  uint32x2_t __s0_320 = __p0_320; \
+  uint32x4_t __s2_320 = __p2_320; \
+  uint32x2_t __rev0_320;  __rev0_320 = __builtin_shufflevector(__s0_320, __s0_320, 1, 0); \
+  uint32x4_t __rev2_320;  __rev2_320 = __builtin_shufflevector(__s2_320, __s2_320, 3, 2, 1, 0); \
+  uint32x2_t __ret_320; \
+  __ret_320 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_320, __p3_320), __rev0_320, __p1_320); \
+  __ret_320 = __builtin_shufflevector(__ret_320, __ret_320, 1, 0); \
+  __ret_320; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u64(__p0_84, __p1_84, __p2_84, __p3_84) __extension__ ({ \
-  uint64x1_t __s0_84 = __p0_84; \
-  uint64x2_t __s2_84 = __p2_84; \
-  uint64x1_t __ret_84; \
-  __ret_84 = vset_lane_u64(vgetq_lane_u64(__s2_84, __p3_84), __s0_84, __p1_84); \
-  __ret_84; \
+#define vcopy_laneq_u64(__p0_321, __p1_321, __p2_321, __p3_321) __extension__ ({ \
+  uint64x1_t __s0_321 = __p0_321; \
+  uint64x2_t __s2_321 = __p2_321; \
+  uint64x1_t __ret_321; \
+  __ret_321 = vset_lane_u64(vgetq_lane_u64(__s2_321, __p3_321), __s0_321, __p1_321); \
+  __ret_321; \
 })
 #else
-#define vcopy_laneq_u64(__p0_85, __p1_85, __p2_85, __p3_85) __extension__ ({ \
-  uint64x1_t __s0_85 = __p0_85; \
-  uint64x2_t __s2_85 = __p2_85; \
-  uint64x2_t __rev2_85;  __rev2_85 = __builtin_shufflevector(__s2_85, __s2_85, 1, 0); \
-  uint64x1_t __ret_85; \
-  __ret_85 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_85, __p3_85), __s0_85, __p1_85); \
-  __ret_85; \
+#define vcopy_laneq_u64(__p0_322, __p1_322, __p2_322, __p3_322) __extension__ ({ \
+  uint64x1_t __s0_322 = __p0_322; \
+  uint64x2_t __s2_322 = __p2_322; \
+  uint64x2_t __rev2_322;  __rev2_322 = __builtin_shufflevector(__s2_322, __s2_322, 1, 0); \
+  uint64x1_t __ret_322; \
+  __ret_322 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_322, __p3_322), __s0_322, __p1_322); \
+  __ret_322; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u16(__p0_86, __p1_86, __p2_86, __p3_86) __extension__ ({ \
-  uint16x4_t __s0_86 = __p0_86; \
-  uint16x8_t __s2_86 = __p2_86; \
-  uint16x4_t __ret_86; \
-  __ret_86 = vset_lane_u16(vgetq_lane_u16(__s2_86, __p3_86), __s0_86, __p1_86); \
-  __ret_86; \
+#define vcopy_laneq_u16(__p0_323, __p1_323, __p2_323, __p3_323) __extension__ ({ \
+  uint16x4_t __s0_323 = __p0_323; \
+  uint16x8_t __s2_323 = __p2_323; \
+  uint16x4_t __ret_323; \
+  __ret_323 = vset_lane_u16(vgetq_lane_u16(__s2_323, __p3_323), __s0_323, __p1_323); \
+  __ret_323; \
 })
 #else
-#define vcopy_laneq_u16(__p0_87, __p1_87, __p2_87, __p3_87) __extension__ ({ \
-  uint16x4_t __s0_87 = __p0_87; \
-  uint16x8_t __s2_87 = __p2_87; \
-  uint16x4_t __rev0_87;  __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \
-  uint16x8_t __rev2_87;  __rev2_87 = __builtin_shufflevector(__s2_87, __s2_87, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_87; \
-  __ret_87 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_87, __p3_87), __rev0_87, __p1_87); \
-  __ret_87 = __builtin_shufflevector(__ret_87, __ret_87, 3, 2, 1, 0); \
-  __ret_87; \
+#define vcopy_laneq_u16(__p0_324, __p1_324, __p2_324, __p3_324) __extension__ ({ \
+  uint16x4_t __s0_324 = __p0_324; \
+  uint16x8_t __s2_324 = __p2_324; \
+  uint16x4_t __rev0_324;  __rev0_324 = __builtin_shufflevector(__s0_324, __s0_324, 3, 2, 1, 0); \
+  uint16x8_t __rev2_324;  __rev2_324 = __builtin_shufflevector(__s2_324, __s2_324, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __ret_324; \
+  __ret_324 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_324, __p3_324), __rev0_324, __p1_324); \
+  __ret_324 = __builtin_shufflevector(__ret_324, __ret_324, 3, 2, 1, 0); \
+  __ret_324; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s8(__p0_88, __p1_88, __p2_88, __p3_88) __extension__ ({ \
-  int8x8_t __s0_88 = __p0_88; \
-  int8x16_t __s2_88 = __p2_88; \
-  int8x8_t __ret_88; \
-  __ret_88 = vset_lane_s8(vgetq_lane_s8(__s2_88, __p3_88), __s0_88, __p1_88); \
-  __ret_88; \
+#define vcopy_laneq_s8(__p0_325, __p1_325, __p2_325, __p3_325) __extension__ ({ \
+  int8x8_t __s0_325 = __p0_325; \
+  int8x16_t __s2_325 = __p2_325; \
+  int8x8_t __ret_325; \
+  __ret_325 = vset_lane_s8(vgetq_lane_s8(__s2_325, __p3_325), __s0_325, __p1_325); \
+  __ret_325; \
 })
 #else
-#define vcopy_laneq_s8(__p0_89, __p1_89, __p2_89, __p3_89) __extension__ ({ \
-  int8x8_t __s0_89 = __p0_89; \
-  int8x16_t __s2_89 = __p2_89; \
-  int8x8_t __rev0_89;  __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_89;  __rev2_89 = __builtin_shufflevector(__s2_89, __s2_89, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_89; \
-  __ret_89 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_89, __p3_89), __rev0_89, __p1_89); \
-  __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_89; \
+#define vcopy_laneq_s8(__p0_326, __p1_326, __p2_326, __p3_326) __extension__ ({ \
+  int8x8_t __s0_326 = __p0_326; \
+  int8x16_t __s2_326 = __p2_326; \
+  int8x8_t __rev0_326;  __rev0_326 = __builtin_shufflevector(__s0_326, __s0_326, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_326;  __rev2_326 = __builtin_shufflevector(__s2_326, __s2_326, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __ret_326; \
+  __ret_326 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_326, __p3_326), __rev0_326, __p1_326); \
+  __ret_326 = __builtin_shufflevector(__ret_326, __ret_326, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_326; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_f32(__p0_90, __p1_90, __p2_90, __p3_90) __extension__ ({ \
-  float32x2_t __s0_90 = __p0_90; \
-  float32x4_t __s2_90 = __p2_90; \
-  float32x2_t __ret_90; \
-  __ret_90 = vset_lane_f32(vgetq_lane_f32(__s2_90, __p3_90), __s0_90, __p1_90); \
-  __ret_90; \
+#define vcopy_laneq_f32(__p0_327, __p1_327, __p2_327, __p3_327) __extension__ ({ \
+  float32x2_t __s0_327 = __p0_327; \
+  float32x4_t __s2_327 = __p2_327; \
+  float32x2_t __ret_327; \
+  __ret_327 = vset_lane_f32(vgetq_lane_f32(__s2_327, __p3_327), __s0_327, __p1_327); \
+  __ret_327; \
 })
 #else
-#define vcopy_laneq_f32(__p0_91, __p1_91, __p2_91, __p3_91) __extension__ ({ \
-  float32x2_t __s0_91 = __p0_91; \
-  float32x4_t __s2_91 = __p2_91; \
-  float32x2_t __rev0_91;  __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, 1, 0); \
-  float32x4_t __rev2_91;  __rev2_91 = __builtin_shufflevector(__s2_91, __s2_91, 3, 2, 1, 0); \
-  float32x2_t __ret_91; \
-  __ret_91 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_91, __p3_91), __rev0_91, __p1_91); \
-  __ret_91 = __builtin_shufflevector(__ret_91, __ret_91, 1, 0); \
-  __ret_91; \
+#define vcopy_laneq_f32(__p0_328, __p1_328, __p2_328, __p3_328) __extension__ ({ \
+  float32x2_t __s0_328 = __p0_328; \
+  float32x4_t __s2_328 = __p2_328; \
+  float32x2_t __rev0_328;  __rev0_328 = __builtin_shufflevector(__s0_328, __s0_328, 1, 0); \
+  float32x4_t __rev2_328;  __rev2_328 = __builtin_shufflevector(__s2_328, __s2_328, 3, 2, 1, 0); \
+  float32x2_t __ret_328; \
+  __ret_328 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_328, __p3_328), __rev0_328, __p1_328); \
+  __ret_328 = __builtin_shufflevector(__ret_328, __ret_328, 1, 0); \
+  __ret_328; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s32(__p0_92, __p1_92, __p2_92, __p3_92) __extension__ ({ \
-  int32x2_t __s0_92 = __p0_92; \
-  int32x4_t __s2_92 = __p2_92; \
-  int32x2_t __ret_92; \
-  __ret_92 = vset_lane_s32(vgetq_lane_s32(__s2_92, __p3_92), __s0_92, __p1_92); \
-  __ret_92; \
+#define vcopy_laneq_s32(__p0_329, __p1_329, __p2_329, __p3_329) __extension__ ({ \
+  int32x2_t __s0_329 = __p0_329; \
+  int32x4_t __s2_329 = __p2_329; \
+  int32x2_t __ret_329; \
+  __ret_329 = vset_lane_s32(vgetq_lane_s32(__s2_329, __p3_329), __s0_329, __p1_329); \
+  __ret_329; \
 })
 #else
-#define vcopy_laneq_s32(__p0_93, __p1_93, __p2_93, __p3_93) __extension__ ({ \
-  int32x2_t __s0_93 = __p0_93; \
-  int32x4_t __s2_93 = __p2_93; \
-  int32x2_t __rev0_93;  __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 1, 0); \
-  int32x4_t __rev2_93;  __rev2_93 = __builtin_shufflevector(__s2_93, __s2_93, 3, 2, 1, 0); \
-  int32x2_t __ret_93; \
-  __ret_93 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_93, __p3_93), __rev0_93, __p1_93); \
-  __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 1, 0); \
-  __ret_93; \
+#define vcopy_laneq_s32(__p0_330, __p1_330, __p2_330, __p3_330) __extension__ ({ \
+  int32x2_t __s0_330 = __p0_330; \
+  int32x4_t __s2_330 = __p2_330; \
+  int32x2_t __rev0_330;  __rev0_330 = __builtin_shufflevector(__s0_330, __s0_330, 1, 0); \
+  int32x4_t __rev2_330;  __rev2_330 = __builtin_shufflevector(__s2_330, __s2_330, 3, 2, 1, 0); \
+  int32x2_t __ret_330; \
+  __ret_330 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_330, __p3_330), __rev0_330, __p1_330); \
+  __ret_330 = __builtin_shufflevector(__ret_330, __ret_330, 1, 0); \
+  __ret_330; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s64(__p0_94, __p1_94, __p2_94, __p3_94) __extension__ ({ \
-  int64x1_t __s0_94 = __p0_94; \
-  int64x2_t __s2_94 = __p2_94; \
-  int64x1_t __ret_94; \
-  __ret_94 = vset_lane_s64(vgetq_lane_s64(__s2_94, __p3_94), __s0_94, __p1_94); \
-  __ret_94; \
+#define vcopy_laneq_s64(__p0_331, __p1_331, __p2_331, __p3_331) __extension__ ({ \
+  int64x1_t __s0_331 = __p0_331; \
+  int64x2_t __s2_331 = __p2_331; \
+  int64x1_t __ret_331; \
+  __ret_331 = vset_lane_s64(vgetq_lane_s64(__s2_331, __p3_331), __s0_331, __p1_331); \
+  __ret_331; \
 })
 #else
-#define vcopy_laneq_s64(__p0_95, __p1_95, __p2_95, __p3_95) __extension__ ({ \
-  int64x1_t __s0_95 = __p0_95; \
-  int64x2_t __s2_95 = __p2_95; \
-  int64x2_t __rev2_95;  __rev2_95 = __builtin_shufflevector(__s2_95, __s2_95, 1, 0); \
-  int64x1_t __ret_95; \
-  __ret_95 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_95, __p3_95), __s0_95, __p1_95); \
-  __ret_95; \
+#define vcopy_laneq_s64(__p0_332, __p1_332, __p2_332, __p3_332) __extension__ ({ \
+  int64x1_t __s0_332 = __p0_332; \
+  int64x2_t __s2_332 = __p2_332; \
+  int64x2_t __rev2_332;  __rev2_332 = __builtin_shufflevector(__s2_332, __s2_332, 1, 0); \
+  int64x1_t __ret_332; \
+  __ret_332 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_332, __p3_332), __s0_332, __p1_332); \
+  __ret_332; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s16(__p0_96, __p1_96, __p2_96, __p3_96) __extension__ ({ \
-  int16x4_t __s0_96 = __p0_96; \
-  int16x8_t __s2_96 = __p2_96; \
-  int16x4_t __ret_96; \
-  __ret_96 = vset_lane_s16(vgetq_lane_s16(__s2_96, __p3_96), __s0_96, __p1_96); \
-  __ret_96; \
+#define vcopy_laneq_s16(__p0_333, __p1_333, __p2_333, __p3_333) __extension__ ({ \
+  int16x4_t __s0_333 = __p0_333; \
+  int16x8_t __s2_333 = __p2_333; \
+  int16x4_t __ret_333; \
+  __ret_333 = vset_lane_s16(vgetq_lane_s16(__s2_333, __p3_333), __s0_333, __p1_333); \
+  __ret_333; \
 })
 #else
-#define vcopy_laneq_s16(__p0_97, __p1_97, __p2_97, __p3_97) __extension__ ({ \
-  int16x4_t __s0_97 = __p0_97; \
-  int16x8_t __s2_97 = __p2_97; \
-  int16x4_t __rev0_97;  __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, 3, 2, 1, 0); \
-  int16x8_t __rev2_97;  __rev2_97 = __builtin_shufflevector(__s2_97, __s2_97, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_97; \
-  __ret_97 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_97, __p3_97), __rev0_97, __p1_97); \
-  __ret_97 = __builtin_shufflevector(__ret_97, __ret_97, 3, 2, 1, 0); \
-  __ret_97; \
+#define vcopy_laneq_s16(__p0_334, __p1_334, __p2_334, __p3_334) __extension__ ({ \
+  int16x4_t __s0_334 = __p0_334; \
+  int16x8_t __s2_334 = __p2_334; \
+  int16x4_t __rev0_334;  __rev0_334 = __builtin_shufflevector(__s0_334, __s0_334, 3, 2, 1, 0); \
+  int16x8_t __rev2_334;  __rev2_334 = __builtin_shufflevector(__s2_334, __s2_334, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __ret_334; \
+  __ret_334 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_334, __p3_334), __rev0_334, __p1_334); \
+  __ret_334 = __builtin_shufflevector(__ret_334, __ret_334, 3, 2, 1, 0); \
+  __ret_334; \
 })
 #endif
 
@@ -45209,85 +49009,85 @@
 })
 #endif
 
-#define vdup_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
+#define vdup_lane_p64(__p0_335, __p1_335) __extension__ ({ \
+  poly64x1_t __s0_335 = __p0_335; \
+  poly64x1_t __ret_335; \
+  __ret_335 = splat_lane_p64(__s0_335, __p1_335); \
+  __ret_335; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_p64(__p0_336, __p1_336) __extension__ ({ \
+  poly64x1_t __s0_336 = __p0_336; \
+  poly64x2_t __ret_336; \
+  __ret_336 = splatq_lane_p64(__s0_336, __p1_336); \
+  __ret_336; \
 })
 #else
-#define vdupq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdupq_lane_p64(__p0_337, __p1_337) __extension__ ({ \
+  poly64x1_t __s0_337 = __p0_337; \
+  poly64x2_t __ret_337; \
+  __ret_337 = __noswap_splatq_lane_p64(__s0_337, __p1_337); \
+  __ret_337 = __builtin_shufflevector(__ret_337, __ret_337, 1, 0); \
+  __ret_337; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_f64(__p0_338, __p1_338) __extension__ ({ \
+  float64x1_t __s0_338 = __p0_338; \
+  float64x2_t __ret_338; \
+  __ret_338 = splatq_lane_f64(__s0_338, __p1_338); \
+  __ret_338; \
 })
 #else
-#define vdupq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdupq_lane_f64(__p0_339, __p1_339) __extension__ ({ \
+  float64x1_t __s0_339 = __p0_339; \
+  float64x2_t __ret_339; \
+  __ret_339 = __noswap_splatq_lane_f64(__s0_339, __p1_339); \
+  __ret_339 = __builtin_shufflevector(__ret_339, __ret_339, 1, 0); \
+  __ret_339; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_lane_f16(__p0_340, __p1_340) __extension__ ({ \
+  float16x4_t __s0_340 = __p0_340; \
+  float16x8_t __ret_340; \
+  __ret_340 = splatq_lane_f16(__s0_340, __p1_340); \
+  __ret_340; \
 })
 #else
-#define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_lane_f16(__p0_341, __p1_341) __extension__ ({ \
+  float16x4_t __s0_341 = __p0_341; \
+  float16x4_t __rev0_341;  __rev0_341 = __builtin_shufflevector(__s0_341, __s0_341, 3, 2, 1, 0); \
+  float16x8_t __ret_341; \
+  __ret_341 = __noswap_splatq_lane_f16(__rev0_341, __p1_341); \
+  __ret_341 = __builtin_shufflevector(__ret_341, __ret_341, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_341; \
 })
 #endif
 
-#define vdup_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
+#define vdup_lane_f64(__p0_342, __p1_342) __extension__ ({ \
+  float64x1_t __s0_342 = __p0_342; \
+  float64x1_t __ret_342; \
+  __ret_342 = splat_lane_f64(__s0_342, __p1_342); \
+  __ret_342; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_lane_f16(__p0_343, __p1_343) __extension__ ({ \
+  float16x4_t __s0_343 = __p0_343; \
+  float16x4_t __ret_343; \
+  __ret_343 = splat_lane_f16(__s0_343, __p1_343); \
+  __ret_343; \
 })
 #else
-#define vdup_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_lane_f16(__p0_344, __p1_344) __extension__ ({ \
+  float16x4_t __s0_344 = __p0_344; \
+  float16x4_t __rev0_344;  __rev0_344 = __builtin_shufflevector(__s0_344, __s0_344, 3, 2, 1, 0); \
+  float16x4_t __ret_344; \
+  __ret_344 = __noswap_splat_lane_f16(__rev0_344, __p1_344); \
+  __ret_344 = __builtin_shufflevector(__ret_344, __ret_344, 3, 2, 1, 0); \
+  __ret_344; \
 })
 #endif
 
@@ -45496,502 +49296,502 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_laneq_p8(__p0_345, __p1_345) __extension__ ({ \
+  poly8x16_t __s0_345 = __p0_345; \
+  poly8x8_t __ret_345; \
+  __ret_345 = splat_laneq_p8(__s0_345, __p1_345); \
+  __ret_345; \
 })
 #else
-#define vdup_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_laneq_p8(__p0_346, __p1_346) __extension__ ({ \
+  poly8x16_t __s0_346 = __p0_346; \
+  poly8x16_t __rev0_346;  __rev0_346 = __builtin_shufflevector(__s0_346, __s0_346, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x8_t __ret_346; \
+  __ret_346 = __noswap_splat_laneq_p8(__rev0_346, __p1_346); \
+  __ret_346 = __builtin_shufflevector(__ret_346, __ret_346, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_346; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
+#define vdup_laneq_p64(__p0_347, __p1_347) __extension__ ({ \
+  poly64x2_t __s0_347 = __p0_347; \
+  poly64x1_t __ret_347; \
+  __ret_347 = splat_laneq_p64(__s0_347, __p1_347); \
+  __ret_347; \
 })
 #else
-#define vdup_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x1_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
-  __ret; \
+#define vdup_laneq_p64(__p0_348, __p1_348) __extension__ ({ \
+  poly64x2_t __s0_348 = __p0_348; \
+  poly64x2_t __rev0_348;  __rev0_348 = __builtin_shufflevector(__s0_348, __s0_348, 1, 0); \
+  poly64x1_t __ret_348; \
+  __ret_348 = __noswap_splat_laneq_p64(__rev0_348, __p1_348); \
+  __ret_348; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_laneq_p16(__p0_349, __p1_349) __extension__ ({ \
+  poly16x8_t __s0_349 = __p0_349; \
+  poly16x4_t __ret_349; \
+  __ret_349 = splat_laneq_p16(__s0_349, __p1_349); \
+  __ret_349; \
 })
 #else
-#define vdup_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_laneq_p16(__p0_350, __p1_350) __extension__ ({ \
+  poly16x8_t __s0_350 = __p0_350; \
+  poly16x8_t __rev0_350;  __rev0_350 = __builtin_shufflevector(__s0_350, __s0_350, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x4_t __ret_350; \
+  __ret_350 = __noswap_splat_laneq_p16(__rev0_350, __p1_350); \
+  __ret_350 = __builtin_shufflevector(__ret_350, __ret_350, 3, 2, 1, 0); \
+  __ret_350; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_p8(__p0_351, __p1_351) __extension__ ({ \
+  poly8x16_t __s0_351 = __p0_351; \
+  poly8x16_t __ret_351; \
+  __ret_351 = splatq_laneq_p8(__s0_351, __p1_351); \
+  __ret_351; \
 })
 #else
-#define vdupq_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_laneq_p8(__p0_352, __p1_352) __extension__ ({ \
+  poly8x16_t __s0_352 = __p0_352; \
+  poly8x16_t __rev0_352;  __rev0_352 = __builtin_shufflevector(__s0_352, __s0_352, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly8x16_t __ret_352; \
+  __ret_352 = __noswap_splatq_laneq_p8(__rev0_352, __p1_352); \
+  __ret_352 = __builtin_shufflevector(__ret_352, __ret_352, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_352; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_p64(__p0_353, __p1_353) __extension__ ({ \
+  poly64x2_t __s0_353 = __p0_353; \
+  poly64x2_t __ret_353; \
+  __ret_353 = splatq_laneq_p64(__s0_353, __p1_353); \
+  __ret_353; \
 })
 #else
-#define vdupq_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdupq_laneq_p64(__p0_354, __p1_354) __extension__ ({ \
+  poly64x2_t __s0_354 = __p0_354; \
+  poly64x2_t __rev0_354;  __rev0_354 = __builtin_shufflevector(__s0_354, __s0_354, 1, 0); \
+  poly64x2_t __ret_354; \
+  __ret_354 = __noswap_splatq_laneq_p64(__rev0_354, __p1_354); \
+  __ret_354 = __builtin_shufflevector(__ret_354, __ret_354, 1, 0); \
+  __ret_354; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_p16(__p0_355, __p1_355) __extension__ ({ \
+  poly16x8_t __s0_355 = __p0_355; \
+  poly16x8_t __ret_355; \
+  __ret_355 = splatq_laneq_p16(__s0_355, __p1_355); \
+  __ret_355; \
 })
 #else
-#define vdupq_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_laneq_p16(__p0_356, __p1_356) __extension__ ({ \
+  poly16x8_t __s0_356 = __p0_356; \
+  poly16x8_t __rev0_356;  __rev0_356 = __builtin_shufflevector(__s0_356, __s0_356, 7, 6, 5, 4, 3, 2, 1, 0); \
+  poly16x8_t __ret_356; \
+  __ret_356 = __noswap_splatq_laneq_p16(__rev0_356, __p1_356); \
+  __ret_356 = __builtin_shufflevector(__ret_356, __ret_356, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_356; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_u8(__p0_357, __p1_357) __extension__ ({ \
+  uint8x16_t __s0_357 = __p0_357; \
+  uint8x16_t __ret_357; \
+  __ret_357 = splatq_laneq_u8(__s0_357, __p1_357); \
+  __ret_357; \
 })
 #else
-#define vdupq_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_laneq_u8(__p0_358, __p1_358) __extension__ ({ \
+  uint8x16_t __s0_358 = __p0_358; \
+  uint8x16_t __rev0_358;  __rev0_358 = __builtin_shufflevector(__s0_358, __s0_358, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_358; \
+  __ret_358 = __noswap_splatq_laneq_u8(__rev0_358, __p1_358); \
+  __ret_358 = __builtin_shufflevector(__ret_358, __ret_358, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_358; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_u32(__p0_359, __p1_359) __extension__ ({ \
+  uint32x4_t __s0_359 = __p0_359; \
+  uint32x4_t __ret_359; \
+  __ret_359 = splatq_laneq_u32(__s0_359, __p1_359); \
+  __ret_359; \
 })
 #else
-#define vdupq_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_laneq_u32(__p0_360, __p1_360) __extension__ ({ \
+  uint32x4_t __s0_360 = __p0_360; \
+  uint32x4_t __rev0_360;  __rev0_360 = __builtin_shufflevector(__s0_360, __s0_360, 3, 2, 1, 0); \
+  uint32x4_t __ret_360; \
+  __ret_360 = __noswap_splatq_laneq_u32(__rev0_360, __p1_360); \
+  __ret_360 = __builtin_shufflevector(__ret_360, __ret_360, 3, 2, 1, 0); \
+  __ret_360; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_u64(__p0_361, __p1_361) __extension__ ({ \
+  uint64x2_t __s0_361 = __p0_361; \
+  uint64x2_t __ret_361; \
+  __ret_361 = splatq_laneq_u64(__s0_361, __p1_361); \
+  __ret_361; \
 })
 #else
-#define vdupq_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdupq_laneq_u64(__p0_362, __p1_362) __extension__ ({ \
+  uint64x2_t __s0_362 = __p0_362; \
+  uint64x2_t __rev0_362;  __rev0_362 = __builtin_shufflevector(__s0_362, __s0_362, 1, 0); \
+  uint64x2_t __ret_362; \
+  __ret_362 = __noswap_splatq_laneq_u64(__rev0_362, __p1_362); \
+  __ret_362 = __builtin_shufflevector(__ret_362, __ret_362, 1, 0); \
+  __ret_362; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_u16(__p0_363, __p1_363) __extension__ ({ \
+  uint16x8_t __s0_363 = __p0_363; \
+  uint16x8_t __ret_363; \
+  __ret_363 = splatq_laneq_u16(__s0_363, __p1_363); \
+  __ret_363; \
 })
 #else
-#define vdupq_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_laneq_u16(__p0_364, __p1_364) __extension__ ({ \
+  uint16x8_t __s0_364 = __p0_364; \
+  uint16x8_t __rev0_364;  __rev0_364 = __builtin_shufflevector(__s0_364, __s0_364, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret_364; \
+  __ret_364 = __noswap_splatq_laneq_u16(__rev0_364, __p1_364); \
+  __ret_364 = __builtin_shufflevector(__ret_364, __ret_364, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_364; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_s8(__p0_365, __p1_365) __extension__ ({ \
+  int8x16_t __s0_365 = __p0_365; \
+  int8x16_t __ret_365; \
+  __ret_365 = splatq_laneq_s8(__s0_365, __p1_365); \
+  __ret_365; \
 })
 #else
-#define vdupq_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_laneq_s8(__p0_366, __p1_366) __extension__ ({ \
+  int8x16_t __s0_366 = __p0_366; \
+  int8x16_t __rev0_366;  __rev0_366 = __builtin_shufflevector(__s0_366, __s0_366, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_366; \
+  __ret_366 = __noswap_splatq_laneq_s8(__rev0_366, __p1_366); \
+  __ret_366 = __builtin_shufflevector(__ret_366, __ret_366, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_366; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_f64(__p0_367, __p1_367) __extension__ ({ \
+  float64x2_t __s0_367 = __p0_367; \
+  float64x2_t __ret_367; \
+  __ret_367 = splatq_laneq_f64(__s0_367, __p1_367); \
+  __ret_367; \
 })
 #else
-#define vdupq_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdupq_laneq_f64(__p0_368, __p1_368) __extension__ ({ \
+  float64x2_t __s0_368 = __p0_368; \
+  float64x2_t __rev0_368;  __rev0_368 = __builtin_shufflevector(__s0_368, __s0_368, 1, 0); \
+  float64x2_t __ret_368; \
+  __ret_368 = __noswap_splatq_laneq_f64(__rev0_368, __p1_368); \
+  __ret_368 = __builtin_shufflevector(__ret_368, __ret_368, 1, 0); \
+  __ret_368; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_f32(__p0_369, __p1_369) __extension__ ({ \
+  float32x4_t __s0_369 = __p0_369; \
+  float32x4_t __ret_369; \
+  __ret_369 = splatq_laneq_f32(__s0_369, __p1_369); \
+  __ret_369; \
 })
 #else
-#define vdupq_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_laneq_f32(__p0_370, __p1_370) __extension__ ({ \
+  float32x4_t __s0_370 = __p0_370; \
+  float32x4_t __rev0_370;  __rev0_370 = __builtin_shufflevector(__s0_370, __s0_370, 3, 2, 1, 0); \
+  float32x4_t __ret_370; \
+  __ret_370 = __noswap_splatq_laneq_f32(__rev0_370, __p1_370); \
+  __ret_370 = __builtin_shufflevector(__ret_370, __ret_370, 3, 2, 1, 0); \
+  __ret_370; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_f16(__p0_371, __p1_371) __extension__ ({ \
+  float16x8_t __s0_371 = __p0_371; \
+  float16x8_t __ret_371; \
+  __ret_371 = splatq_laneq_f16(__s0_371, __p1_371); \
+  __ret_371; \
 })
 #else
-#define vdupq_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_laneq_f16(__p0_372, __p1_372) __extension__ ({ \
+  float16x8_t __s0_372 = __p0_372; \
+  float16x8_t __rev0_372;  __rev0_372 = __builtin_shufflevector(__s0_372, __s0_372, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __ret_372; \
+  __ret_372 = __noswap_splatq_laneq_f16(__rev0_372, __p1_372); \
+  __ret_372 = __builtin_shufflevector(__ret_372, __ret_372, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_372; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_s32(__p0_373, __p1_373) __extension__ ({ \
+  int32x4_t __s0_373 = __p0_373; \
+  int32x4_t __ret_373; \
+  __ret_373 = splatq_laneq_s32(__s0_373, __p1_373); \
+  __ret_373; \
 })
 #else
-#define vdupq_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_laneq_s32(__p0_374, __p1_374) __extension__ ({ \
+  int32x4_t __s0_374 = __p0_374; \
+  int32x4_t __rev0_374;  __rev0_374 = __builtin_shufflevector(__s0_374, __s0_374, 3, 2, 1, 0); \
+  int32x4_t __ret_374; \
+  __ret_374 = __noswap_splatq_laneq_s32(__rev0_374, __p1_374); \
+  __ret_374 = __builtin_shufflevector(__ret_374, __ret_374, 3, 2, 1, 0); \
+  __ret_374; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_s64(__p0_375, __p1_375) __extension__ ({ \
+  int64x2_t __s0_375 = __p0_375; \
+  int64x2_t __ret_375; \
+  __ret_375 = splatq_laneq_s64(__s0_375, __p1_375); \
+  __ret_375; \
 })
 #else
-#define vdupq_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdupq_laneq_s64(__p0_376, __p1_376) __extension__ ({ \
+  int64x2_t __s0_376 = __p0_376; \
+  int64x2_t __rev0_376;  __rev0_376 = __builtin_shufflevector(__s0_376, __s0_376, 1, 0); \
+  int64x2_t __ret_376; \
+  __ret_376 = __noswap_splatq_laneq_s64(__rev0_376, __p1_376); \
+  __ret_376 = __builtin_shufflevector(__ret_376, __ret_376, 1, 0); \
+  __ret_376; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdupq_laneq_s16(__p0_377, __p1_377) __extension__ ({ \
+  int16x8_t __s0_377 = __p0_377; \
+  int16x8_t __ret_377; \
+  __ret_377 = splatq_laneq_s16(__s0_377, __p1_377); \
+  __ret_377; \
 })
 #else
-#define vdupq_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdupq_laneq_s16(__p0_378, __p1_378) __extension__ ({ \
+  int16x8_t __s0_378 = __p0_378; \
+  int16x8_t __rev0_378;  __rev0_378 = __builtin_shufflevector(__s0_378, __s0_378, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_378; \
+  __ret_378 = __noswap_splatq_laneq_s16(__rev0_378, __p1_378); \
+  __ret_378 = __builtin_shufflevector(__ret_378, __ret_378, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_378; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_laneq_u8(__p0_379, __p1_379) __extension__ ({ \
+  uint8x16_t __s0_379 = __p0_379; \
+  uint8x8_t __ret_379; \
+  __ret_379 = splat_laneq_u8(__s0_379, __p1_379); \
+  __ret_379; \
 })
 #else
-#define vdup_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_laneq_u8(__p0_380, __p1_380) __extension__ ({ \
+  uint8x16_t __s0_380 = __p0_380; \
+  uint8x16_t __rev0_380;  __rev0_380 = __builtin_shufflevector(__s0_380, __s0_380, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __ret_380; \
+  __ret_380 = __noswap_splat_laneq_u8(__rev0_380, __p1_380); \
+  __ret_380 = __builtin_shufflevector(__ret_380, __ret_380, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_380; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdup_laneq_u32(__p0_381, __p1_381) __extension__ ({ \
+  uint32x4_t __s0_381 = __p0_381; \
+  uint32x2_t __ret_381; \
+  __ret_381 = splat_laneq_u32(__s0_381, __p1_381); \
+  __ret_381; \
 })
 #else
-#define vdup_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdup_laneq_u32(__p0_382, __p1_382) __extension__ ({ \
+  uint32x4_t __s0_382 = __p0_382; \
+  uint32x4_t __rev0_382;  __rev0_382 = __builtin_shufflevector(__s0_382, __s0_382, 3, 2, 1, 0); \
+  uint32x2_t __ret_382; \
+  __ret_382 = __noswap_splat_laneq_u32(__rev0_382, __p1_382); \
+  __ret_382 = __builtin_shufflevector(__ret_382, __ret_382, 1, 0); \
+  __ret_382; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
+#define vdup_laneq_u64(__p0_383, __p1_383) __extension__ ({ \
+  uint64x2_t __s0_383 = __p0_383; \
+  uint64x1_t __ret_383; \
+  __ret_383 = splat_laneq_u64(__s0_383, __p1_383); \
+  __ret_383; \
 })
 #else
-#define vdup_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x1_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
-  __ret; \
+#define vdup_laneq_u64(__p0_384, __p1_384) __extension__ ({ \
+  uint64x2_t __s0_384 = __p0_384; \
+  uint64x2_t __rev0_384;  __rev0_384 = __builtin_shufflevector(__s0_384, __s0_384, 1, 0); \
+  uint64x1_t __ret_384; \
+  __ret_384 = __noswap_splat_laneq_u64(__rev0_384, __p1_384); \
+  __ret_384; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_laneq_u16(__p0_385, __p1_385) __extension__ ({ \
+  uint16x8_t __s0_385 = __p0_385; \
+  uint16x4_t __ret_385; \
+  __ret_385 = splat_laneq_u16(__s0_385, __p1_385); \
+  __ret_385; \
 })
 #else
-#define vdup_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_laneq_u16(__p0_386, __p1_386) __extension__ ({ \
+  uint16x8_t __s0_386 = __p0_386; \
+  uint16x8_t __rev0_386;  __rev0_386 = __builtin_shufflevector(__s0_386, __s0_386, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __ret_386; \
+  __ret_386 = __noswap_splat_laneq_u16(__rev0_386, __p1_386); \
+  __ret_386 = __builtin_shufflevector(__ret_386, __ret_386, 3, 2, 1, 0); \
+  __ret_386; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_laneq_s8(__p0_387, __p1_387) __extension__ ({ \
+  int8x16_t __s0_387 = __p0_387; \
+  int8x8_t __ret_387; \
+  __ret_387 = splat_laneq_s8(__s0_387, __p1_387); \
+  __ret_387; \
 })
 #else
-#define vdup_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_laneq_s8(__p0_388, __p1_388) __extension__ ({ \
+  int8x16_t __s0_388 = __p0_388; \
+  int8x16_t __rev0_388;  __rev0_388 = __builtin_shufflevector(__s0_388, __s0_388, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x8_t __ret_388; \
+  __ret_388 = __noswap_splat_laneq_s8(__rev0_388, __p1_388); \
+  __ret_388 = __builtin_shufflevector(__ret_388, __ret_388, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_388; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
+#define vdup_laneq_f64(__p0_389, __p1_389) __extension__ ({ \
+  float64x2_t __s0_389 = __p0_389; \
+  float64x1_t __ret_389; \
+  __ret_389 = splat_laneq_f64(__s0_389, __p1_389); \
+  __ret_389; \
 })
 #else
-#define vdup_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x1_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
-  __ret; \
+#define vdup_laneq_f64(__p0_390, __p1_390) __extension__ ({ \
+  float64x2_t __s0_390 = __p0_390; \
+  float64x2_t __rev0_390;  __rev0_390 = __builtin_shufflevector(__s0_390, __s0_390, 1, 0); \
+  float64x1_t __ret_390; \
+  __ret_390 = __noswap_splat_laneq_f64(__rev0_390, __p1_390); \
+  __ret_390; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdup_laneq_f32(__p0_391, __p1_391) __extension__ ({ \
+  float32x4_t __s0_391 = __p0_391; \
+  float32x2_t __ret_391; \
+  __ret_391 = splat_laneq_f32(__s0_391, __p1_391); \
+  __ret_391; \
 })
 #else
-#define vdup_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdup_laneq_f32(__p0_392, __p1_392) __extension__ ({ \
+  float32x4_t __s0_392 = __p0_392; \
+  float32x4_t __rev0_392;  __rev0_392 = __builtin_shufflevector(__s0_392, __s0_392, 3, 2, 1, 0); \
+  float32x2_t __ret_392; \
+  __ret_392 = __noswap_splat_laneq_f32(__rev0_392, __p1_392); \
+  __ret_392 = __builtin_shufflevector(__ret_392, __ret_392, 1, 0); \
+  __ret_392; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_laneq_f16(__p0_393, __p1_393) __extension__ ({ \
+  float16x8_t __s0_393 = __p0_393; \
+  float16x4_t __ret_393; \
+  __ret_393 = splat_laneq_f16(__s0_393, __p1_393); \
+  __ret_393; \
 })
 #else
-#define vdup_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_laneq_f16(__p0_394, __p1_394) __extension__ ({ \
+  float16x8_t __s0_394 = __p0_394; \
+  float16x8_t __rev0_394;  __rev0_394 = __builtin_shufflevector(__s0_394, __s0_394, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __ret_394; \
+  __ret_394 = __noswap_splat_laneq_f16(__rev0_394, __p1_394); \
+  __ret_394 = __builtin_shufflevector(__ret_394, __ret_394, 3, 2, 1, 0); \
+  __ret_394; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
+#define vdup_laneq_s32(__p0_395, __p1_395) __extension__ ({ \
+  int32x4_t __s0_395 = __p0_395; \
+  int32x2_t __ret_395; \
+  __ret_395 = splat_laneq_s32(__s0_395, __p1_395); \
+  __ret_395; \
 })
 #else
-#define vdup_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vdup_laneq_s32(__p0_396, __p1_396) __extension__ ({ \
+  int32x4_t __s0_396 = __p0_396; \
+  int32x4_t __rev0_396;  __rev0_396 = __builtin_shufflevector(__s0_396, __s0_396, 3, 2, 1, 0); \
+  int32x2_t __ret_396; \
+  __ret_396 = __noswap_splat_laneq_s32(__rev0_396, __p1_396); \
+  __ret_396 = __builtin_shufflevector(__ret_396, __ret_396, 1, 0); \
+  __ret_396; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
+#define vdup_laneq_s64(__p0_397, __p1_397) __extension__ ({ \
+  int64x2_t __s0_397 = __p0_397; \
+  int64x1_t __ret_397; \
+  __ret_397 = splat_laneq_s64(__s0_397, __p1_397); \
+  __ret_397; \
 })
 #else
-#define vdup_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x1_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
-  __ret; \
+#define vdup_laneq_s64(__p0_398, __p1_398) __extension__ ({ \
+  int64x2_t __s0_398 = __p0_398; \
+  int64x2_t __rev0_398;  __rev0_398 = __builtin_shufflevector(__s0_398, __s0_398, 1, 0); \
+  int64x1_t __ret_398; \
+  __ret_398 = __noswap_splat_laneq_s64(__rev0_398, __p1_398); \
+  __ret_398; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
+#define vdup_laneq_s16(__p0_399, __p1_399) __extension__ ({ \
+  int16x8_t __s0_399 = __p0_399; \
+  int16x4_t __ret_399; \
+  __ret_399 = splat_laneq_s16(__s0_399, __p1_399); \
+  __ret_399; \
 })
 #else
-#define vdup_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vdup_laneq_s16(__p0_400, __p1_400) __extension__ ({ \
+  int16x8_t __s0_400 = __p0_400; \
+  int16x8_t __rev0_400;  __rev0_400 = __builtin_shufflevector(__s0_400, __s0_400, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __ret_400; \
+  __ret_400 = __noswap_splat_laneq_s16(__rev0_400, __p1_400); \
+  __ret_400 = __builtin_shufflevector(__ret_400, __ret_400, 3, 2, 1, 0); \
+  __ret_400; \
 })
 #endif
 
@@ -46487,246 +50287,246 @@
   __ret = vfma_f64(__p0, -__p1, __p2);
   return __ret;
 }
-#define vfmsd_lane_f64(__p0_98, __p1_98, __p2_98, __p3_98) __extension__ ({ \
-  float64_t __s0_98 = __p0_98; \
-  float64_t __s1_98 = __p1_98; \
-  float64x1_t __s2_98 = __p2_98; \
-  float64_t __ret_98; \
-  __ret_98 = vfmad_lane_f64(__s0_98, -__s1_98, __s2_98, __p3_98); \
-  __ret_98; \
+#define vfmsd_lane_f64(__p0_401, __p1_401, __p2_401, __p3_401) __extension__ ({ \
+  float64_t __s0_401 = __p0_401; \
+  float64_t __s1_401 = __p1_401; \
+  float64x1_t __s2_401 = __p2_401; \
+  float64_t __ret_401; \
+  __ret_401 = vfmad_lane_f64(__s0_401, -__s1_401, __s2_401, __p3_401); \
+  __ret_401; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vfmss_lane_f32(__p0_99, __p1_99, __p2_99, __p3_99) __extension__ ({ \
-  float32_t __s0_99 = __p0_99; \
-  float32_t __s1_99 = __p1_99; \
-  float32x2_t __s2_99 = __p2_99; \
-  float32_t __ret_99; \
-  __ret_99 = vfmas_lane_f32(__s0_99, -__s1_99, __s2_99, __p3_99); \
-  __ret_99; \
+#define vfmss_lane_f32(__p0_402, __p1_402, __p2_402, __p3_402) __extension__ ({ \
+  float32_t __s0_402 = __p0_402; \
+  float32_t __s1_402 = __p1_402; \
+  float32x2_t __s2_402 = __p2_402; \
+  float32_t __ret_402; \
+  __ret_402 = vfmas_lane_f32(__s0_402, -__s1_402, __s2_402, __p3_402); \
+  __ret_402; \
 })
 #else
-#define vfmss_lane_f32(__p0_100, __p1_100, __p2_100, __p3_100) __extension__ ({ \
-  float32_t __s0_100 = __p0_100; \
-  float32_t __s1_100 = __p1_100; \
-  float32x2_t __s2_100 = __p2_100; \
-  float32x2_t __rev2_100;  __rev2_100 = __builtin_shufflevector(__s2_100, __s2_100, 1, 0); \
-  float32_t __ret_100; \
-  __ret_100 = __noswap_vfmas_lane_f32(__s0_100, -__s1_100, __rev2_100, __p3_100); \
-  __ret_100; \
+#define vfmss_lane_f32(__p0_403, __p1_403, __p2_403, __p3_403) __extension__ ({ \
+  float32_t __s0_403 = __p0_403; \
+  float32_t __s1_403 = __p1_403; \
+  float32x2_t __s2_403 = __p2_403; \
+  float32x2_t __rev2_403;  __rev2_403 = __builtin_shufflevector(__s2_403, __s2_403, 1, 0); \
+  float32_t __ret_403; \
+  __ret_403 = __noswap_vfmas_lane_f32(__s0_403, -__s1_403, __rev2_403, __p3_403); \
+  __ret_403; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f64(__p0_101, __p1_101, __p2_101, __p3_101) __extension__ ({ \
-  float64x2_t __s0_101 = __p0_101; \
-  float64x2_t __s1_101 = __p1_101; \
-  float64x1_t __s2_101 = __p2_101; \
-  float64x2_t __ret_101; \
-  __ret_101 = vfmaq_lane_f64(__s0_101, -__s1_101, __s2_101, __p3_101); \
-  __ret_101; \
+#define vfmsq_lane_f64(__p0_404, __p1_404, __p2_404, __p3_404) __extension__ ({ \
+  float64x2_t __s0_404 = __p0_404; \
+  float64x2_t __s1_404 = __p1_404; \
+  float64x1_t __s2_404 = __p2_404; \
+  float64x2_t __ret_404; \
+  __ret_404 = vfmaq_lane_f64(__s0_404, -__s1_404, __s2_404, __p3_404); \
+  __ret_404; \
 })
 #else
-#define vfmsq_lane_f64(__p0_102, __p1_102, __p2_102, __p3_102) __extension__ ({ \
-  float64x2_t __s0_102 = __p0_102; \
-  float64x2_t __s1_102 = __p1_102; \
-  float64x1_t __s2_102 = __p2_102; \
-  float64x2_t __rev0_102;  __rev0_102 = __builtin_shufflevector(__s0_102, __s0_102, 1, 0); \
-  float64x2_t __rev1_102;  __rev1_102 = __builtin_shufflevector(__s1_102, __s1_102, 1, 0); \
-  float64x2_t __ret_102; \
-  __ret_102 = __noswap_vfmaq_lane_f64(__rev0_102, -__rev1_102, __s2_102, __p3_102); \
-  __ret_102 = __builtin_shufflevector(__ret_102, __ret_102, 1, 0); \
-  __ret_102; \
+#define vfmsq_lane_f64(__p0_405, __p1_405, __p2_405, __p3_405) __extension__ ({ \
+  float64x2_t __s0_405 = __p0_405; \
+  float64x2_t __s1_405 = __p1_405; \
+  float64x1_t __s2_405 = __p2_405; \
+  float64x2_t __rev0_405;  __rev0_405 = __builtin_shufflevector(__s0_405, __s0_405, 1, 0); \
+  float64x2_t __rev1_405;  __rev1_405 = __builtin_shufflevector(__s1_405, __s1_405, 1, 0); \
+  float64x2_t __ret_405; \
+  __ret_405 = __noswap_vfmaq_lane_f64(__rev0_405, -__rev1_405, __s2_405, __p3_405); \
+  __ret_405 = __builtin_shufflevector(__ret_405, __ret_405, 1, 0); \
+  __ret_405; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f32(__p0_103, __p1_103, __p2_103, __p3_103) __extension__ ({ \
-  float32x4_t __s0_103 = __p0_103; \
-  float32x4_t __s1_103 = __p1_103; \
-  float32x2_t __s2_103 = __p2_103; \
-  float32x4_t __ret_103; \
-  __ret_103 = vfmaq_lane_f32(__s0_103, -__s1_103, __s2_103, __p3_103); \
-  __ret_103; \
+#define vfmsq_lane_f32(__p0_406, __p1_406, __p2_406, __p3_406) __extension__ ({ \
+  float32x4_t __s0_406 = __p0_406; \
+  float32x4_t __s1_406 = __p1_406; \
+  float32x2_t __s2_406 = __p2_406; \
+  float32x4_t __ret_406; \
+  __ret_406 = vfmaq_lane_f32(__s0_406, -__s1_406, __s2_406, __p3_406); \
+  __ret_406; \
 })
 #else
-#define vfmsq_lane_f32(__p0_104, __p1_104, __p2_104, __p3_104) __extension__ ({ \
-  float32x4_t __s0_104 = __p0_104; \
-  float32x4_t __s1_104 = __p1_104; \
-  float32x2_t __s2_104 = __p2_104; \
-  float32x4_t __rev0_104;  __rev0_104 = __builtin_shufflevector(__s0_104, __s0_104, 3, 2, 1, 0); \
-  float32x4_t __rev1_104;  __rev1_104 = __builtin_shufflevector(__s1_104, __s1_104, 3, 2, 1, 0); \
-  float32x2_t __rev2_104;  __rev2_104 = __builtin_shufflevector(__s2_104, __s2_104, 1, 0); \
-  float32x4_t __ret_104; \
-  __ret_104 = __noswap_vfmaq_lane_f32(__rev0_104, -__rev1_104, __rev2_104, __p3_104); \
-  __ret_104 = __builtin_shufflevector(__ret_104, __ret_104, 3, 2, 1, 0); \
-  __ret_104; \
+#define vfmsq_lane_f32(__p0_407, __p1_407, __p2_407, __p3_407) __extension__ ({ \
+  float32x4_t __s0_407 = __p0_407; \
+  float32x4_t __s1_407 = __p1_407; \
+  float32x2_t __s2_407 = __p2_407; \
+  float32x4_t __rev0_407;  __rev0_407 = __builtin_shufflevector(__s0_407, __s0_407, 3, 2, 1, 0); \
+  float32x4_t __rev1_407;  __rev1_407 = __builtin_shufflevector(__s1_407, __s1_407, 3, 2, 1, 0); \
+  float32x2_t __rev2_407;  __rev2_407 = __builtin_shufflevector(__s2_407, __s2_407, 1, 0); \
+  float32x4_t __ret_407; \
+  __ret_407 = __noswap_vfmaq_lane_f32(__rev0_407, -__rev1_407, __rev2_407, __p3_407); \
+  __ret_407 = __builtin_shufflevector(__ret_407, __ret_407, 3, 2, 1, 0); \
+  __ret_407; \
 })
 #endif
 
-#define vfms_lane_f64(__p0_105, __p1_105, __p2_105, __p3_105) __extension__ ({ \
-  float64x1_t __s0_105 = __p0_105; \
-  float64x1_t __s1_105 = __p1_105; \
-  float64x1_t __s2_105 = __p2_105; \
-  float64x1_t __ret_105; \
-  __ret_105 = vfma_lane_f64(__s0_105, -__s1_105, __s2_105, __p3_105); \
-  __ret_105; \
+#define vfms_lane_f64(__p0_408, __p1_408, __p2_408, __p3_408) __extension__ ({ \
+  float64x1_t __s0_408 = __p0_408; \
+  float64x1_t __s1_408 = __p1_408; \
+  float64x1_t __s2_408 = __p2_408; \
+  float64x1_t __ret_408; \
+  __ret_408 = vfma_lane_f64(__s0_408, -__s1_408, __s2_408, __p3_408); \
+  __ret_408; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vfms_lane_f32(__p0_106, __p1_106, __p2_106, __p3_106) __extension__ ({ \
-  float32x2_t __s0_106 = __p0_106; \
-  float32x2_t __s1_106 = __p1_106; \
-  float32x2_t __s2_106 = __p2_106; \
-  float32x2_t __ret_106; \
-  __ret_106 = vfma_lane_f32(__s0_106, -__s1_106, __s2_106, __p3_106); \
-  __ret_106; \
+#define vfms_lane_f32(__p0_409, __p1_409, __p2_409, __p3_409) __extension__ ({ \
+  float32x2_t __s0_409 = __p0_409; \
+  float32x2_t __s1_409 = __p1_409; \
+  float32x2_t __s2_409 = __p2_409; \
+  float32x2_t __ret_409; \
+  __ret_409 = vfma_lane_f32(__s0_409, -__s1_409, __s2_409, __p3_409); \
+  __ret_409; \
 })
 #else
-#define vfms_lane_f32(__p0_107, __p1_107, __p2_107, __p3_107) __extension__ ({ \
-  float32x2_t __s0_107 = __p0_107; \
-  float32x2_t __s1_107 = __p1_107; \
-  float32x2_t __s2_107 = __p2_107; \
-  float32x2_t __rev0_107;  __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 1, 0); \
-  float32x2_t __rev1_107;  __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 1, 0); \
-  float32x2_t __rev2_107;  __rev2_107 = __builtin_shufflevector(__s2_107, __s2_107, 1, 0); \
-  float32x2_t __ret_107; \
-  __ret_107 = __noswap_vfma_lane_f32(__rev0_107, -__rev1_107, __rev2_107, __p3_107); \
-  __ret_107 = __builtin_shufflevector(__ret_107, __ret_107, 1, 0); \
-  __ret_107; \
+#define vfms_lane_f32(__p0_410, __p1_410, __p2_410, __p3_410) __extension__ ({ \
+  float32x2_t __s0_410 = __p0_410; \
+  float32x2_t __s1_410 = __p1_410; \
+  float32x2_t __s2_410 = __p2_410; \
+  float32x2_t __rev0_410;  __rev0_410 = __builtin_shufflevector(__s0_410, __s0_410, 1, 0); \
+  float32x2_t __rev1_410;  __rev1_410 = __builtin_shufflevector(__s1_410, __s1_410, 1, 0); \
+  float32x2_t __rev2_410;  __rev2_410 = __builtin_shufflevector(__s2_410, __s2_410, 1, 0); \
+  float32x2_t __ret_410; \
+  __ret_410 = __noswap_vfma_lane_f32(__rev0_410, -__rev1_410, __rev2_410, __p3_410); \
+  __ret_410 = __builtin_shufflevector(__ret_410, __ret_410, 1, 0); \
+  __ret_410; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmsd_laneq_f64(__p0_108, __p1_108, __p2_108, __p3_108) __extension__ ({ \
-  float64_t __s0_108 = __p0_108; \
-  float64_t __s1_108 = __p1_108; \
-  float64x2_t __s2_108 = __p2_108; \
-  float64_t __ret_108; \
-  __ret_108 = vfmad_laneq_f64(__s0_108, -__s1_108, __s2_108, __p3_108); \
-  __ret_108; \
+#define vfmsd_laneq_f64(__p0_411, __p1_411, __p2_411, __p3_411) __extension__ ({ \
+  float64_t __s0_411 = __p0_411; \
+  float64_t __s1_411 = __p1_411; \
+  float64x2_t __s2_411 = __p2_411; \
+  float64_t __ret_411; \
+  __ret_411 = vfmad_laneq_f64(__s0_411, -__s1_411, __s2_411, __p3_411); \
+  __ret_411; \
 })
 #else
-#define vfmsd_laneq_f64(__p0_109, __p1_109, __p2_109, __p3_109) __extension__ ({ \
-  float64_t __s0_109 = __p0_109; \
-  float64_t __s1_109 = __p1_109; \
-  float64x2_t __s2_109 = __p2_109; \
-  float64x2_t __rev2_109;  __rev2_109 = __builtin_shufflevector(__s2_109, __s2_109, 1, 0); \
-  float64_t __ret_109; \
-  __ret_109 = __noswap_vfmad_laneq_f64(__s0_109, -__s1_109, __rev2_109, __p3_109); \
-  __ret_109; \
+#define vfmsd_laneq_f64(__p0_412, __p1_412, __p2_412, __p3_412) __extension__ ({ \
+  float64_t __s0_412 = __p0_412; \
+  float64_t __s1_412 = __p1_412; \
+  float64x2_t __s2_412 = __p2_412; \
+  float64x2_t __rev2_412;  __rev2_412 = __builtin_shufflevector(__s2_412, __s2_412, 1, 0); \
+  float64_t __ret_412; \
+  __ret_412 = __noswap_vfmad_laneq_f64(__s0_412, -__s1_412, __rev2_412, __p3_412); \
+  __ret_412; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmss_laneq_f32(__p0_110, __p1_110, __p2_110, __p3_110) __extension__ ({ \
-  float32_t __s0_110 = __p0_110; \
-  float32_t __s1_110 = __p1_110; \
-  float32x4_t __s2_110 = __p2_110; \
-  float32_t __ret_110; \
-  __ret_110 = vfmas_laneq_f32(__s0_110, -__s1_110, __s2_110, __p3_110); \
-  __ret_110; \
+#define vfmss_laneq_f32(__p0_413, __p1_413, __p2_413, __p3_413) __extension__ ({ \
+  float32_t __s0_413 = __p0_413; \
+  float32_t __s1_413 = __p1_413; \
+  float32x4_t __s2_413 = __p2_413; \
+  float32_t __ret_413; \
+  __ret_413 = vfmas_laneq_f32(__s0_413, -__s1_413, __s2_413, __p3_413); \
+  __ret_413; \
 })
 #else
-#define vfmss_laneq_f32(__p0_111, __p1_111, __p2_111, __p3_111) __extension__ ({ \
-  float32_t __s0_111 = __p0_111; \
-  float32_t __s1_111 = __p1_111; \
-  float32x4_t __s2_111 = __p2_111; \
-  float32x4_t __rev2_111;  __rev2_111 = __builtin_shufflevector(__s2_111, __s2_111, 3, 2, 1, 0); \
-  float32_t __ret_111; \
-  __ret_111 = __noswap_vfmas_laneq_f32(__s0_111, -__s1_111, __rev2_111, __p3_111); \
-  __ret_111; \
+#define vfmss_laneq_f32(__p0_414, __p1_414, __p2_414, __p3_414) __extension__ ({ \
+  float32_t __s0_414 = __p0_414; \
+  float32_t __s1_414 = __p1_414; \
+  float32x4_t __s2_414 = __p2_414; \
+  float32x4_t __rev2_414;  __rev2_414 = __builtin_shufflevector(__s2_414, __s2_414, 3, 2, 1, 0); \
+  float32_t __ret_414; \
+  __ret_414 = __noswap_vfmas_laneq_f32(__s0_414, -__s1_414, __rev2_414, __p3_414); \
+  __ret_414; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f64(__p0_112, __p1_112, __p2_112, __p3_112) __extension__ ({ \
-  float64x2_t __s0_112 = __p0_112; \
-  float64x2_t __s1_112 = __p1_112; \
-  float64x2_t __s2_112 = __p2_112; \
-  float64x2_t __ret_112; \
-  __ret_112 = vfmaq_laneq_f64(__s0_112, -__s1_112, __s2_112, __p3_112); \
-  __ret_112; \
+#define vfmsq_laneq_f64(__p0_415, __p1_415, __p2_415, __p3_415) __extension__ ({ \
+  float64x2_t __s0_415 = __p0_415; \
+  float64x2_t __s1_415 = __p1_415; \
+  float64x2_t __s2_415 = __p2_415; \
+  float64x2_t __ret_415; \
+  __ret_415 = vfmaq_laneq_f64(__s0_415, -__s1_415, __s2_415, __p3_415); \
+  __ret_415; \
 })
 #else
-#define vfmsq_laneq_f64(__p0_113, __p1_113, __p2_113, __p3_113) __extension__ ({ \
-  float64x2_t __s0_113 = __p0_113; \
-  float64x2_t __s1_113 = __p1_113; \
-  float64x2_t __s2_113 = __p2_113; \
-  float64x2_t __rev0_113;  __rev0_113 = __builtin_shufflevector(__s0_113, __s0_113, 1, 0); \
-  float64x2_t __rev1_113;  __rev1_113 = __builtin_shufflevector(__s1_113, __s1_113, 1, 0); \
-  float64x2_t __rev2_113;  __rev2_113 = __builtin_shufflevector(__s2_113, __s2_113, 1, 0); \
-  float64x2_t __ret_113; \
-  __ret_113 = __noswap_vfmaq_laneq_f64(__rev0_113, -__rev1_113, __rev2_113, __p3_113); \
-  __ret_113 = __builtin_shufflevector(__ret_113, __ret_113, 1, 0); \
-  __ret_113; \
+#define vfmsq_laneq_f64(__p0_416, __p1_416, __p2_416, __p3_416) __extension__ ({ \
+  float64x2_t __s0_416 = __p0_416; \
+  float64x2_t __s1_416 = __p1_416; \
+  float64x2_t __s2_416 = __p2_416; \
+  float64x2_t __rev0_416;  __rev0_416 = __builtin_shufflevector(__s0_416, __s0_416, 1, 0); \
+  float64x2_t __rev1_416;  __rev1_416 = __builtin_shufflevector(__s1_416, __s1_416, 1, 0); \
+  float64x2_t __rev2_416;  __rev2_416 = __builtin_shufflevector(__s2_416, __s2_416, 1, 0); \
+  float64x2_t __ret_416; \
+  __ret_416 = __noswap_vfmaq_laneq_f64(__rev0_416, -__rev1_416, __rev2_416, __p3_416); \
+  __ret_416 = __builtin_shufflevector(__ret_416, __ret_416, 1, 0); \
+  __ret_416; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f32(__p0_114, __p1_114, __p2_114, __p3_114) __extension__ ({ \
-  float32x4_t __s0_114 = __p0_114; \
-  float32x4_t __s1_114 = __p1_114; \
-  float32x4_t __s2_114 = __p2_114; \
-  float32x4_t __ret_114; \
-  __ret_114 = vfmaq_laneq_f32(__s0_114, -__s1_114, __s2_114, __p3_114); \
-  __ret_114; \
+#define vfmsq_laneq_f32(__p0_417, __p1_417, __p2_417, __p3_417) __extension__ ({ \
+  float32x4_t __s0_417 = __p0_417; \
+  float32x4_t __s1_417 = __p1_417; \
+  float32x4_t __s2_417 = __p2_417; \
+  float32x4_t __ret_417; \
+  __ret_417 = vfmaq_laneq_f32(__s0_417, -__s1_417, __s2_417, __p3_417); \
+  __ret_417; \
 })
 #else
-#define vfmsq_laneq_f32(__p0_115, __p1_115, __p2_115, __p3_115) __extension__ ({ \
-  float32x4_t __s0_115 = __p0_115; \
-  float32x4_t __s1_115 = __p1_115; \
-  float32x4_t __s2_115 = __p2_115; \
-  float32x4_t __rev0_115;  __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 3, 2, 1, 0); \
-  float32x4_t __rev1_115;  __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, 3, 2, 1, 0); \
-  float32x4_t __rev2_115;  __rev2_115 = __builtin_shufflevector(__s2_115, __s2_115, 3, 2, 1, 0); \
-  float32x4_t __ret_115; \
-  __ret_115 = __noswap_vfmaq_laneq_f32(__rev0_115, -__rev1_115, __rev2_115, __p3_115); \
-  __ret_115 = __builtin_shufflevector(__ret_115, __ret_115, 3, 2, 1, 0); \
-  __ret_115; \
+#define vfmsq_laneq_f32(__p0_418, __p1_418, __p2_418, __p3_418) __extension__ ({ \
+  float32x4_t __s0_418 = __p0_418; \
+  float32x4_t __s1_418 = __p1_418; \
+  float32x4_t __s2_418 = __p2_418; \
+  float32x4_t __rev0_418;  __rev0_418 = __builtin_shufflevector(__s0_418, __s0_418, 3, 2, 1, 0); \
+  float32x4_t __rev1_418;  __rev1_418 = __builtin_shufflevector(__s1_418, __s1_418, 3, 2, 1, 0); \
+  float32x4_t __rev2_418;  __rev2_418 = __builtin_shufflevector(__s2_418, __s2_418, 3, 2, 1, 0); \
+  float32x4_t __ret_418; \
+  __ret_418 = __noswap_vfmaq_laneq_f32(__rev0_418, -__rev1_418, __rev2_418, __p3_418); \
+  __ret_418 = __builtin_shufflevector(__ret_418, __ret_418, 3, 2, 1, 0); \
+  __ret_418; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f64(__p0_116, __p1_116, __p2_116, __p3_116) __extension__ ({ \
-  float64x1_t __s0_116 = __p0_116; \
-  float64x1_t __s1_116 = __p1_116; \
-  float64x2_t __s2_116 = __p2_116; \
-  float64x1_t __ret_116; \
-  __ret_116 = vfma_laneq_f64(__s0_116, -__s1_116, __s2_116, __p3_116); \
-  __ret_116; \
+#define vfms_laneq_f64(__p0_419, __p1_419, __p2_419, __p3_419) __extension__ ({ \
+  float64x1_t __s0_419 = __p0_419; \
+  float64x1_t __s1_419 = __p1_419; \
+  float64x2_t __s2_419 = __p2_419; \
+  float64x1_t __ret_419; \
+  __ret_419 = vfma_laneq_f64(__s0_419, -__s1_419, __s2_419, __p3_419); \
+  __ret_419; \
 })
 #else
-#define vfms_laneq_f64(__p0_117, __p1_117, __p2_117, __p3_117) __extension__ ({ \
-  float64x1_t __s0_117 = __p0_117; \
-  float64x1_t __s1_117 = __p1_117; \
-  float64x2_t __s2_117 = __p2_117; \
-  float64x2_t __rev2_117;  __rev2_117 = __builtin_shufflevector(__s2_117, __s2_117, 1, 0); \
-  float64x1_t __ret_117; \
-  __ret_117 = __noswap_vfma_laneq_f64(__s0_117, -__s1_117, __rev2_117, __p3_117); \
-  __ret_117; \
+#define vfms_laneq_f64(__p0_420, __p1_420, __p2_420, __p3_420) __extension__ ({ \
+  float64x1_t __s0_420 = __p0_420; \
+  float64x1_t __s1_420 = __p1_420; \
+  float64x2_t __s2_420 = __p2_420; \
+  float64x2_t __rev2_420;  __rev2_420 = __builtin_shufflevector(__s2_420, __s2_420, 1, 0); \
+  float64x1_t __ret_420; \
+  __ret_420 = __noswap_vfma_laneq_f64(__s0_420, -__s1_420, __rev2_420, __p3_420); \
+  __ret_420; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f32(__p0_118, __p1_118, __p2_118, __p3_118) __extension__ ({ \
-  float32x2_t __s0_118 = __p0_118; \
-  float32x2_t __s1_118 = __p1_118; \
-  float32x4_t __s2_118 = __p2_118; \
-  float32x2_t __ret_118; \
-  __ret_118 = vfma_laneq_f32(__s0_118, -__s1_118, __s2_118, __p3_118); \
-  __ret_118; \
+#define vfms_laneq_f32(__p0_421, __p1_421, __p2_421, __p3_421) __extension__ ({ \
+  float32x2_t __s0_421 = __p0_421; \
+  float32x2_t __s1_421 = __p1_421; \
+  float32x4_t __s2_421 = __p2_421; \
+  float32x2_t __ret_421; \
+  __ret_421 = vfma_laneq_f32(__s0_421, -__s1_421, __s2_421, __p3_421); \
+  __ret_421; \
 })
 #else
-#define vfms_laneq_f32(__p0_119, __p1_119, __p2_119, __p3_119) __extension__ ({ \
-  float32x2_t __s0_119 = __p0_119; \
-  float32x2_t __s1_119 = __p1_119; \
-  float32x4_t __s2_119 = __p2_119; \
-  float32x2_t __rev0_119;  __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, 1, 0); \
-  float32x2_t __rev1_119;  __rev1_119 = __builtin_shufflevector(__s1_119, __s1_119, 1, 0); \
-  float32x4_t __rev2_119;  __rev2_119 = __builtin_shufflevector(__s2_119, __s2_119, 3, 2, 1, 0); \
-  float32x2_t __ret_119; \
-  __ret_119 = __noswap_vfma_laneq_f32(__rev0_119, -__rev1_119, __rev2_119, __p3_119); \
-  __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 1, 0); \
-  __ret_119; \
+#define vfms_laneq_f32(__p0_422, __p1_422, __p2_422, __p3_422) __extension__ ({ \
+  float32x2_t __s0_422 = __p0_422; \
+  float32x2_t __s1_422 = __p1_422; \
+  float32x4_t __s2_422 = __p2_422; \
+  float32x2_t __rev0_422;  __rev0_422 = __builtin_shufflevector(__s0_422, __s0_422, 1, 0); \
+  float32x2_t __rev1_422;  __rev1_422 = __builtin_shufflevector(__s1_422, __s1_422, 1, 0); \
+  float32x4_t __rev2_422;  __rev2_422 = __builtin_shufflevector(__s2_422, __s2_422, 3, 2, 1, 0); \
+  float32x2_t __ret_422; \
+  __ret_422 = __noswap_vfma_laneq_f32(__rev0_422, -__rev1_422, __rev2_422, __p3_422); \
+  __ret_422 = __builtin_shufflevector(__ret_422, __ret_422, 1, 0); \
+  __ret_422; \
 })
 #endif
 
@@ -48748,242 +52548,242 @@
   return __ret;
 }
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlaq_laneq_u32(__p0_423, __p1_423, __p2_423, __p3_423) __extension__ ({ \
+  uint32x4_t __s0_423 = __p0_423; \
+  uint32x4_t __s1_423 = __p1_423; \
+  uint32x4_t __s2_423 = __p2_423; \
+  uint32x4_t __ret_423; \
+  __ret_423 = __s0_423 + __s1_423 * splatq_laneq_u32(__s2_423, __p3_423); \
+  __ret_423; \
 })
 #else
-#define vmlaq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlaq_laneq_u32(__p0_424, __p1_424, __p2_424, __p3_424) __extension__ ({ \
+  uint32x4_t __s0_424 = __p0_424; \
+  uint32x4_t __s1_424 = __p1_424; \
+  uint32x4_t __s2_424 = __p2_424; \
+  uint32x4_t __rev0_424;  __rev0_424 = __builtin_shufflevector(__s0_424, __s0_424, 3, 2, 1, 0); \
+  uint32x4_t __rev1_424;  __rev1_424 = __builtin_shufflevector(__s1_424, __s1_424, 3, 2, 1, 0); \
+  uint32x4_t __rev2_424;  __rev2_424 = __builtin_shufflevector(__s2_424, __s2_424, 3, 2, 1, 0); \
+  uint32x4_t __ret_424; \
+  __ret_424 = __rev0_424 + __rev1_424 * __noswap_splatq_laneq_u32(__rev2_424, __p3_424); \
+  __ret_424 = __builtin_shufflevector(__ret_424, __ret_424, 3, 2, 1, 0); \
+  __ret_424; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x8_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlaq_laneq_u16(__p0_425, __p1_425, __p2_425, __p3_425) __extension__ ({ \
+  uint16x8_t __s0_425 = __p0_425; \
+  uint16x8_t __s1_425 = __p1_425; \
+  uint16x8_t __s2_425 = __p2_425; \
+  uint16x8_t __ret_425; \
+  __ret_425 = __s0_425 + __s1_425 * splatq_laneq_u16(__s2_425, __p3_425); \
+  __ret_425; \
 })
 #else
-#define vmlaq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmlaq_laneq_u16(__p0_426, __p1_426, __p2_426, __p3_426) __extension__ ({ \
+  uint16x8_t __s0_426 = __p0_426; \
+  uint16x8_t __s1_426 = __p1_426; \
+  uint16x8_t __s2_426 = __p2_426; \
+  uint16x8_t __rev0_426;  __rev0_426 = __builtin_shufflevector(__s0_426, __s0_426, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_426;  __rev1_426 = __builtin_shufflevector(__s1_426, __s1_426, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_426;  __rev2_426 = __builtin_shufflevector(__s2_426, __s2_426, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret_426; \
+  __ret_426 = __rev0_426 + __rev1_426 * __noswap_splatq_laneq_u16(__rev2_426, __p3_426); \
+  __ret_426 = __builtin_shufflevector(__ret_426, __ret_426, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_426; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlaq_laneq_f32(__p0_427, __p1_427, __p2_427, __p3_427) __extension__ ({ \
+  float32x4_t __s0_427 = __p0_427; \
+  float32x4_t __s1_427 = __p1_427; \
+  float32x4_t __s2_427 = __p2_427; \
+  float32x4_t __ret_427; \
+  __ret_427 = __s0_427 + __s1_427 * splatq_laneq_f32(__s2_427, __p3_427); \
+  __ret_427; \
 })
 #else
-#define vmlaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlaq_laneq_f32(__p0_428, __p1_428, __p2_428, __p3_428) __extension__ ({ \
+  float32x4_t __s0_428 = __p0_428; \
+  float32x4_t __s1_428 = __p1_428; \
+  float32x4_t __s2_428 = __p2_428; \
+  float32x4_t __rev0_428;  __rev0_428 = __builtin_shufflevector(__s0_428, __s0_428, 3, 2, 1, 0); \
+  float32x4_t __rev1_428;  __rev1_428 = __builtin_shufflevector(__s1_428, __s1_428, 3, 2, 1, 0); \
+  float32x4_t __rev2_428;  __rev2_428 = __builtin_shufflevector(__s2_428, __s2_428, 3, 2, 1, 0); \
+  float32x4_t __ret_428; \
+  __ret_428 = __rev0_428 + __rev1_428 * __noswap_splatq_laneq_f32(__rev2_428, __p3_428); \
+  __ret_428 = __builtin_shufflevector(__ret_428, __ret_428, 3, 2, 1, 0); \
+  __ret_428; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlaq_laneq_s32(__p0_429, __p1_429, __p2_429, __p3_429) __extension__ ({ \
+  int32x4_t __s0_429 = __p0_429; \
+  int32x4_t __s1_429 = __p1_429; \
+  int32x4_t __s2_429 = __p2_429; \
+  int32x4_t __ret_429; \
+  __ret_429 = __s0_429 + __s1_429 * splatq_laneq_s32(__s2_429, __p3_429); \
+  __ret_429; \
 })
 #else
-#define vmlaq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlaq_laneq_s32(__p0_430, __p1_430, __p2_430, __p3_430) __extension__ ({ \
+  int32x4_t __s0_430 = __p0_430; \
+  int32x4_t __s1_430 = __p1_430; \
+  int32x4_t __s2_430 = __p2_430; \
+  int32x4_t __rev0_430;  __rev0_430 = __builtin_shufflevector(__s0_430, __s0_430, 3, 2, 1, 0); \
+  int32x4_t __rev1_430;  __rev1_430 = __builtin_shufflevector(__s1_430, __s1_430, 3, 2, 1, 0); \
+  int32x4_t __rev2_430;  __rev2_430 = __builtin_shufflevector(__s2_430, __s2_430, 3, 2, 1, 0); \
+  int32x4_t __ret_430; \
+  __ret_430 = __rev0_430 + __rev1_430 * __noswap_splatq_laneq_s32(__rev2_430, __p3_430); \
+  __ret_430 = __builtin_shufflevector(__ret_430, __ret_430, 3, 2, 1, 0); \
+  __ret_430; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlaq_laneq_s16(__p0_431, __p1_431, __p2_431, __p3_431) __extension__ ({ \
+  int16x8_t __s0_431 = __p0_431; \
+  int16x8_t __s1_431 = __p1_431; \
+  int16x8_t __s2_431 = __p2_431; \
+  int16x8_t __ret_431; \
+  __ret_431 = __s0_431 + __s1_431 * splatq_laneq_s16(__s2_431, __p3_431); \
+  __ret_431; \
 })
 #else
-#define vmlaq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmlaq_laneq_s16(__p0_432, __p1_432, __p2_432, __p3_432) __extension__ ({ \
+  int16x8_t __s0_432 = __p0_432; \
+  int16x8_t __s1_432 = __p1_432; \
+  int16x8_t __s2_432 = __p2_432; \
+  int16x8_t __rev0_432;  __rev0_432 = __builtin_shufflevector(__s0_432, __s0_432, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_432;  __rev1_432 = __builtin_shufflevector(__s1_432, __s1_432, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_432;  __rev2_432 = __builtin_shufflevector(__s2_432, __s2_432, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_432; \
+  __ret_432 = __rev0_432 + __rev1_432 * __noswap_splatq_laneq_s16(__rev2_432, __p3_432); \
+  __ret_432 = __builtin_shufflevector(__ret_432, __ret_432, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_432; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x2_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
+#define vmla_laneq_u32(__p0_433, __p1_433, __p2_433, __p3_433) __extension__ ({ \
+  uint32x2_t __s0_433 = __p0_433; \
+  uint32x2_t __s1_433 = __p1_433; \
+  uint32x4_t __s2_433 = __p2_433; \
+  uint32x2_t __ret_433; \
+  __ret_433 = __s0_433 + __s1_433 * splat_laneq_u32(__s2_433, __p3_433); \
+  __ret_433; \
 })
 #else
-#define vmla_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmla_laneq_u32(__p0_434, __p1_434, __p2_434, __p3_434) __extension__ ({ \
+  uint32x2_t __s0_434 = __p0_434; \
+  uint32x2_t __s1_434 = __p1_434; \
+  uint32x4_t __s2_434 = __p2_434; \
+  uint32x2_t __rev0_434;  __rev0_434 = __builtin_shufflevector(__s0_434, __s0_434, 1, 0); \
+  uint32x2_t __rev1_434;  __rev1_434 = __builtin_shufflevector(__s1_434, __s1_434, 1, 0); \
+  uint32x4_t __rev2_434;  __rev2_434 = __builtin_shufflevector(__s2_434, __s2_434, 3, 2, 1, 0); \
+  uint32x2_t __ret_434; \
+  __ret_434 = __rev0_434 + __rev1_434 * __noswap_splat_laneq_u32(__rev2_434, __p3_434); \
+  __ret_434 = __builtin_shufflevector(__ret_434, __ret_434, 1, 0); \
+  __ret_434; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmla_laneq_u16(__p0_435, __p1_435, __p2_435, __p3_435) __extension__ ({ \
+  uint16x4_t __s0_435 = __p0_435; \
+  uint16x4_t __s1_435 = __p1_435; \
+  uint16x8_t __s2_435 = __p2_435; \
+  uint16x4_t __ret_435; \
+  __ret_435 = __s0_435 + __s1_435 * splat_laneq_u16(__s2_435, __p3_435); \
+  __ret_435; \
 })
 #else
-#define vmla_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmla_laneq_u16(__p0_436, __p1_436, __p2_436, __p3_436) __extension__ ({ \
+  uint16x4_t __s0_436 = __p0_436; \
+  uint16x4_t __s1_436 = __p1_436; \
+  uint16x8_t __s2_436 = __p2_436; \
+  uint16x4_t __rev0_436;  __rev0_436 = __builtin_shufflevector(__s0_436, __s0_436, 3, 2, 1, 0); \
+  uint16x4_t __rev1_436;  __rev1_436 = __builtin_shufflevector(__s1_436, __s1_436, 3, 2, 1, 0); \
+  uint16x8_t __rev2_436;  __rev2_436 = __builtin_shufflevector(__s2_436, __s2_436, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __ret_436; \
+  __ret_436 = __rev0_436 + __rev1_436 * __noswap_splat_laneq_u16(__rev2_436, __p3_436); \
+  __ret_436 = __builtin_shufflevector(__ret_436, __ret_436, 3, 2, 1, 0); \
+  __ret_436; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
+#define vmla_laneq_f32(__p0_437, __p1_437, __p2_437, __p3_437) __extension__ ({ \
+  float32x2_t __s0_437 = __p0_437; \
+  float32x2_t __s1_437 = __p1_437; \
+  float32x4_t __s2_437 = __p2_437; \
+  float32x2_t __ret_437; \
+  __ret_437 = __s0_437 + __s1_437 * splat_laneq_f32(__s2_437, __p3_437); \
+  __ret_437; \
 })
 #else
-#define vmla_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmla_laneq_f32(__p0_438, __p1_438, __p2_438, __p3_438) __extension__ ({ \
+  float32x2_t __s0_438 = __p0_438; \
+  float32x2_t __s1_438 = __p1_438; \
+  float32x4_t __s2_438 = __p2_438; \
+  float32x2_t __rev0_438;  __rev0_438 = __builtin_shufflevector(__s0_438, __s0_438, 1, 0); \
+  float32x2_t __rev1_438;  __rev1_438 = __builtin_shufflevector(__s1_438, __s1_438, 1, 0); \
+  float32x4_t __rev2_438;  __rev2_438 = __builtin_shufflevector(__s2_438, __s2_438, 3, 2, 1, 0); \
+  float32x2_t __ret_438; \
+  __ret_438 = __rev0_438 + __rev1_438 * __noswap_splat_laneq_f32(__rev2_438, __p3_438); \
+  __ret_438 = __builtin_shufflevector(__ret_438, __ret_438, 1, 0); \
+  __ret_438; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
+#define vmla_laneq_s32(__p0_439, __p1_439, __p2_439, __p3_439) __extension__ ({ \
+  int32x2_t __s0_439 = __p0_439; \
+  int32x2_t __s1_439 = __p1_439; \
+  int32x4_t __s2_439 = __p2_439; \
+  int32x2_t __ret_439; \
+  __ret_439 = __s0_439 + __s1_439 * splat_laneq_s32(__s2_439, __p3_439); \
+  __ret_439; \
 })
 #else
-#define vmla_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmla_laneq_s32(__p0_440, __p1_440, __p2_440, __p3_440) __extension__ ({ \
+  int32x2_t __s0_440 = __p0_440; \
+  int32x2_t __s1_440 = __p1_440; \
+  int32x4_t __s2_440 = __p2_440; \
+  int32x2_t __rev0_440;  __rev0_440 = __builtin_shufflevector(__s0_440, __s0_440, 1, 0); \
+  int32x2_t __rev1_440;  __rev1_440 = __builtin_shufflevector(__s1_440, __s1_440, 1, 0); \
+  int32x4_t __rev2_440;  __rev2_440 = __builtin_shufflevector(__s2_440, __s2_440, 3, 2, 1, 0); \
+  int32x2_t __ret_440; \
+  __ret_440 = __rev0_440 + __rev1_440 * __noswap_splat_laneq_s32(__rev2_440, __p3_440); \
+  __ret_440 = __builtin_shufflevector(__ret_440, __ret_440, 1, 0); \
+  __ret_440; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmla_laneq_s16(__p0_441, __p1_441, __p2_441, __p3_441) __extension__ ({ \
+  int16x4_t __s0_441 = __p0_441; \
+  int16x4_t __s1_441 = __p1_441; \
+  int16x8_t __s2_441 = __p2_441; \
+  int16x4_t __ret_441; \
+  __ret_441 = __s0_441 + __s1_441 * splat_laneq_s16(__s2_441, __p3_441); \
+  __ret_441; \
 })
 #else
-#define vmla_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmla_laneq_s16(__p0_442, __p1_442, __p2_442, __p3_442) __extension__ ({ \
+  int16x4_t __s0_442 = __p0_442; \
+  int16x4_t __s1_442 = __p1_442; \
+  int16x8_t __s2_442 = __p2_442; \
+  int16x4_t __rev0_442;  __rev0_442 = __builtin_shufflevector(__s0_442, __s0_442, 3, 2, 1, 0); \
+  int16x4_t __rev1_442;  __rev1_442 = __builtin_shufflevector(__s1_442, __s1_442, 3, 2, 1, 0); \
+  int16x8_t __rev2_442;  __rev2_442 = __builtin_shufflevector(__s2_442, __s2_442, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __ret_442; \
+  __ret_442 = __rev0_442 + __rev1_442 * __noswap_splat_laneq_s16(__rev2_442, __p3_442); \
+  __ret_442 = __builtin_shufflevector(__ret_442, __ret_442, 3, 2, 1, 0); \
+  __ret_442; \
 })
 #endif
 
@@ -49005,290 +52805,290 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 + vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlal_high_lane_u32(__p0_443, __p1_443, __p2_443, __p3_443) __extension__ ({ \
+  uint64x2_t __s0_443 = __p0_443; \
+  uint32x4_t __s1_443 = __p1_443; \
+  uint32x2_t __s2_443 = __p2_443; \
+  uint64x2_t __ret_443; \
+  __ret_443 = __s0_443 + vmull_u32(vget_high_u32(__s1_443), splat_lane_u32(__s2_443, __p3_443)); \
+  __ret_443; \
 })
 #else
-#define vmlal_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlal_high_lane_u32(__p0_444, __p1_444, __p2_444, __p3_444) __extension__ ({ \
+  uint64x2_t __s0_444 = __p0_444; \
+  uint32x4_t __s1_444 = __p1_444; \
+  uint32x2_t __s2_444 = __p2_444; \
+  uint64x2_t __rev0_444;  __rev0_444 = __builtin_shufflevector(__s0_444, __s0_444, 1, 0); \
+  uint32x4_t __rev1_444;  __rev1_444 = __builtin_shufflevector(__s1_444, __s1_444, 3, 2, 1, 0); \
+  uint32x2_t __rev2_444;  __rev2_444 = __builtin_shufflevector(__s2_444, __s2_444, 1, 0); \
+  uint64x2_t __ret_444; \
+  __ret_444 = __rev0_444 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_444), __noswap_splat_lane_u32(__rev2_444, __p3_444)); \
+  __ret_444 = __builtin_shufflevector(__ret_444, __ret_444, 1, 0); \
+  __ret_444; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 + vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlal_high_lane_u16(__p0_445, __p1_445, __p2_445, __p3_445) __extension__ ({ \
+  uint32x4_t __s0_445 = __p0_445; \
+  uint16x8_t __s1_445 = __p1_445; \
+  uint16x4_t __s2_445 = __p2_445; \
+  uint32x4_t __ret_445; \
+  __ret_445 = __s0_445 + vmull_u16(vget_high_u16(__s1_445), splat_lane_u16(__s2_445, __p3_445)); \
+  __ret_445; \
 })
 #else
-#define vmlal_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlal_high_lane_u16(__p0_446, __p1_446, __p2_446, __p3_446) __extension__ ({ \
+  uint32x4_t __s0_446 = __p0_446; \
+  uint16x8_t __s1_446 = __p1_446; \
+  uint16x4_t __s2_446 = __p2_446; \
+  uint32x4_t __rev0_446;  __rev0_446 = __builtin_shufflevector(__s0_446, __s0_446, 3, 2, 1, 0); \
+  uint16x8_t __rev1_446;  __rev1_446 = __builtin_shufflevector(__s1_446, __s1_446, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_446;  __rev2_446 = __builtin_shufflevector(__s2_446, __s2_446, 3, 2, 1, 0); \
+  uint32x4_t __ret_446; \
+  __ret_446 = __rev0_446 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_446), __noswap_splat_lane_u16(__rev2_446, __p3_446)); \
+  __ret_446 = __builtin_shufflevector(__ret_446, __ret_446, 3, 2, 1, 0); \
+  __ret_446; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 + vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlal_high_lane_s32(__p0_447, __p1_447, __p2_447, __p3_447) __extension__ ({ \
+  int64x2_t __s0_447 = __p0_447; \
+  int32x4_t __s1_447 = __p1_447; \
+  int32x2_t __s2_447 = __p2_447; \
+  int64x2_t __ret_447; \
+  __ret_447 = __s0_447 + vmull_s32(vget_high_s32(__s1_447), splat_lane_s32(__s2_447, __p3_447)); \
+  __ret_447; \
 })
 #else
-#define vmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlal_high_lane_s32(__p0_448, __p1_448, __p2_448, __p3_448) __extension__ ({ \
+  int64x2_t __s0_448 = __p0_448; \
+  int32x4_t __s1_448 = __p1_448; \
+  int32x2_t __s2_448 = __p2_448; \
+  int64x2_t __rev0_448;  __rev0_448 = __builtin_shufflevector(__s0_448, __s0_448, 1, 0); \
+  int32x4_t __rev1_448;  __rev1_448 = __builtin_shufflevector(__s1_448, __s1_448, 3, 2, 1, 0); \
+  int32x2_t __rev2_448;  __rev2_448 = __builtin_shufflevector(__s2_448, __s2_448, 1, 0); \
+  int64x2_t __ret_448; \
+  __ret_448 = __rev0_448 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_448), __noswap_splat_lane_s32(__rev2_448, __p3_448)); \
+  __ret_448 = __builtin_shufflevector(__ret_448, __ret_448, 1, 0); \
+  __ret_448; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 + vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlal_high_lane_s16(__p0_449, __p1_449, __p2_449, __p3_449) __extension__ ({ \
+  int32x4_t __s0_449 = __p0_449; \
+  int16x8_t __s1_449 = __p1_449; \
+  int16x4_t __s2_449 = __p2_449; \
+  int32x4_t __ret_449; \
+  __ret_449 = __s0_449 + vmull_s16(vget_high_s16(__s1_449), splat_lane_s16(__s2_449, __p3_449)); \
+  __ret_449; \
 })
 #else
-#define vmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlal_high_lane_s16(__p0_450, __p1_450, __p2_450, __p3_450) __extension__ ({ \
+  int32x4_t __s0_450 = __p0_450; \
+  int16x8_t __s1_450 = __p1_450; \
+  int16x4_t __s2_450 = __p2_450; \
+  int32x4_t __rev0_450;  __rev0_450 = __builtin_shufflevector(__s0_450, __s0_450, 3, 2, 1, 0); \
+  int16x8_t __rev1_450;  __rev1_450 = __builtin_shufflevector(__s1_450, __s1_450, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_450;  __rev2_450 = __builtin_shufflevector(__s2_450, __s2_450, 3, 2, 1, 0); \
+  int32x4_t __ret_450; \
+  __ret_450 = __rev0_450 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_450), __noswap_splat_lane_s16(__rev2_450, __p3_450)); \
+  __ret_450 = __builtin_shufflevector(__ret_450, __ret_450, 3, 2, 1, 0); \
+  __ret_450; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 + vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlal_high_laneq_u32(__p0_451, __p1_451, __p2_451, __p3_451) __extension__ ({ \
+  uint64x2_t __s0_451 = __p0_451; \
+  uint32x4_t __s1_451 = __p1_451; \
+  uint32x4_t __s2_451 = __p2_451; \
+  uint64x2_t __ret_451; \
+  __ret_451 = __s0_451 + vmull_u32(vget_high_u32(__s1_451), splat_laneq_u32(__s2_451, __p3_451)); \
+  __ret_451; \
 })
 #else
-#define vmlal_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlal_high_laneq_u32(__p0_452, __p1_452, __p2_452, __p3_452) __extension__ ({ \
+  uint64x2_t __s0_452 = __p0_452; \
+  uint32x4_t __s1_452 = __p1_452; \
+  uint32x4_t __s2_452 = __p2_452; \
+  uint64x2_t __rev0_452;  __rev0_452 = __builtin_shufflevector(__s0_452, __s0_452, 1, 0); \
+  uint32x4_t __rev1_452;  __rev1_452 = __builtin_shufflevector(__s1_452, __s1_452, 3, 2, 1, 0); \
+  uint32x4_t __rev2_452;  __rev2_452 = __builtin_shufflevector(__s2_452, __s2_452, 3, 2, 1, 0); \
+  uint64x2_t __ret_452; \
+  __ret_452 = __rev0_452 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_452), __noswap_splat_laneq_u32(__rev2_452, __p3_452)); \
+  __ret_452 = __builtin_shufflevector(__ret_452, __ret_452, 1, 0); \
+  __ret_452; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 + vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlal_high_laneq_u16(__p0_453, __p1_453, __p2_453, __p3_453) __extension__ ({ \
+  uint32x4_t __s0_453 = __p0_453; \
+  uint16x8_t __s1_453 = __p1_453; \
+  uint16x8_t __s2_453 = __p2_453; \
+  uint32x4_t __ret_453; \
+  __ret_453 = __s0_453 + vmull_u16(vget_high_u16(__s1_453), splat_laneq_u16(__s2_453, __p3_453)); \
+  __ret_453; \
 })
 #else
-#define vmlal_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlal_high_laneq_u16(__p0_454, __p1_454, __p2_454, __p3_454) __extension__ ({ \
+  uint32x4_t __s0_454 = __p0_454; \
+  uint16x8_t __s1_454 = __p1_454; \
+  uint16x8_t __s2_454 = __p2_454; \
+  uint32x4_t __rev0_454;  __rev0_454 = __builtin_shufflevector(__s0_454, __s0_454, 3, 2, 1, 0); \
+  uint16x8_t __rev1_454;  __rev1_454 = __builtin_shufflevector(__s1_454, __s1_454, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_454;  __rev2_454 = __builtin_shufflevector(__s2_454, __s2_454, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_454; \
+  __ret_454 = __rev0_454 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_454), __noswap_splat_laneq_u16(__rev2_454, __p3_454)); \
+  __ret_454 = __builtin_shufflevector(__ret_454, __ret_454, 3, 2, 1, 0); \
+  __ret_454; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 + vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlal_high_laneq_s32(__p0_455, __p1_455, __p2_455, __p3_455) __extension__ ({ \
+  int64x2_t __s0_455 = __p0_455; \
+  int32x4_t __s1_455 = __p1_455; \
+  int32x4_t __s2_455 = __p2_455; \
+  int64x2_t __ret_455; \
+  __ret_455 = __s0_455 + vmull_s32(vget_high_s32(__s1_455), splat_laneq_s32(__s2_455, __p3_455)); \
+  __ret_455; \
 })
 #else
-#define vmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlal_high_laneq_s32(__p0_456, __p1_456, __p2_456, __p3_456) __extension__ ({ \
+  int64x2_t __s0_456 = __p0_456; \
+  int32x4_t __s1_456 = __p1_456; \
+  int32x4_t __s2_456 = __p2_456; \
+  int64x2_t __rev0_456;  __rev0_456 = __builtin_shufflevector(__s0_456, __s0_456, 1, 0); \
+  int32x4_t __rev1_456;  __rev1_456 = __builtin_shufflevector(__s1_456, __s1_456, 3, 2, 1, 0); \
+  int32x4_t __rev2_456;  __rev2_456 = __builtin_shufflevector(__s2_456, __s2_456, 3, 2, 1, 0); \
+  int64x2_t __ret_456; \
+  __ret_456 = __rev0_456 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_456), __noswap_splat_laneq_s32(__rev2_456, __p3_456)); \
+  __ret_456 = __builtin_shufflevector(__ret_456, __ret_456, 1, 0); \
+  __ret_456; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 + vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlal_high_laneq_s16(__p0_457, __p1_457, __p2_457, __p3_457) __extension__ ({ \
+  int32x4_t __s0_457 = __p0_457; \
+  int16x8_t __s1_457 = __p1_457; \
+  int16x8_t __s2_457 = __p2_457; \
+  int32x4_t __ret_457; \
+  __ret_457 = __s0_457 + vmull_s16(vget_high_s16(__s1_457), splat_laneq_s16(__s2_457, __p3_457)); \
+  __ret_457; \
 })
 #else
-#define vmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlal_high_laneq_s16(__p0_458, __p1_458, __p2_458, __p3_458) __extension__ ({ \
+  int32x4_t __s0_458 = __p0_458; \
+  int16x8_t __s1_458 = __p1_458; \
+  int16x8_t __s2_458 = __p2_458; \
+  int32x4_t __rev0_458;  __rev0_458 = __builtin_shufflevector(__s0_458, __s0_458, 3, 2, 1, 0); \
+  int16x8_t __rev1_458;  __rev1_458 = __builtin_shufflevector(__s1_458, __s1_458, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_458;  __rev2_458 = __builtin_shufflevector(__s2_458, __s2_458, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_458; \
+  __ret_458 = __rev0_458 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_458), __noswap_splat_laneq_s16(__rev2_458, __p3_458)); \
+  __ret_458 = __builtin_shufflevector(__ret_458, __ret_458, 3, 2, 1, 0); \
+  __ret_458; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 + vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlal_laneq_u32(__p0_459, __p1_459, __p2_459, __p3_459) __extension__ ({ \
+  uint64x2_t __s0_459 = __p0_459; \
+  uint32x2_t __s1_459 = __p1_459; \
+  uint32x4_t __s2_459 = __p2_459; \
+  uint64x2_t __ret_459; \
+  __ret_459 = __s0_459 + vmull_u32(__s1_459, splat_laneq_u32(__s2_459, __p3_459)); \
+  __ret_459; \
 })
 #else
-#define vmlal_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlal_laneq_u32(__p0_460, __p1_460, __p2_460, __p3_460) __extension__ ({ \
+  uint64x2_t __s0_460 = __p0_460; \
+  uint32x2_t __s1_460 = __p1_460; \
+  uint32x4_t __s2_460 = __p2_460; \
+  uint64x2_t __rev0_460;  __rev0_460 = __builtin_shufflevector(__s0_460, __s0_460, 1, 0); \
+  uint32x2_t __rev1_460;  __rev1_460 = __builtin_shufflevector(__s1_460, __s1_460, 1, 0); \
+  uint32x4_t __rev2_460;  __rev2_460 = __builtin_shufflevector(__s2_460, __s2_460, 3, 2, 1, 0); \
+  uint64x2_t __ret_460; \
+  __ret_460 = __rev0_460 + __noswap_vmull_u32(__rev1_460, __noswap_splat_laneq_u32(__rev2_460, __p3_460)); \
+  __ret_460 = __builtin_shufflevector(__ret_460, __ret_460, 1, 0); \
+  __ret_460; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 + vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlal_laneq_u16(__p0_461, __p1_461, __p2_461, __p3_461) __extension__ ({ \
+  uint32x4_t __s0_461 = __p0_461; \
+  uint16x4_t __s1_461 = __p1_461; \
+  uint16x8_t __s2_461 = __p2_461; \
+  uint32x4_t __ret_461; \
+  __ret_461 = __s0_461 + vmull_u16(__s1_461, splat_laneq_u16(__s2_461, __p3_461)); \
+  __ret_461; \
 })
 #else
-#define vmlal_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlal_laneq_u16(__p0_462, __p1_462, __p2_462, __p3_462) __extension__ ({ \
+  uint32x4_t __s0_462 = __p0_462; \
+  uint16x4_t __s1_462 = __p1_462; \
+  uint16x8_t __s2_462 = __p2_462; \
+  uint32x4_t __rev0_462;  __rev0_462 = __builtin_shufflevector(__s0_462, __s0_462, 3, 2, 1, 0); \
+  uint16x4_t __rev1_462;  __rev1_462 = __builtin_shufflevector(__s1_462, __s1_462, 3, 2, 1, 0); \
+  uint16x8_t __rev2_462;  __rev2_462 = __builtin_shufflevector(__s2_462, __s2_462, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_462; \
+  __ret_462 = __rev0_462 + __noswap_vmull_u16(__rev1_462, __noswap_splat_laneq_u16(__rev2_462, __p3_462)); \
+  __ret_462 = __builtin_shufflevector(__ret_462, __ret_462, 3, 2, 1, 0); \
+  __ret_462; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 + vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlal_laneq_s32(__p0_463, __p1_463, __p2_463, __p3_463) __extension__ ({ \
+  int64x2_t __s0_463 = __p0_463; \
+  int32x2_t __s1_463 = __p1_463; \
+  int32x4_t __s2_463 = __p2_463; \
+  int64x2_t __ret_463; \
+  __ret_463 = __s0_463 + vmull_s32(__s1_463, splat_laneq_s32(__s2_463, __p3_463)); \
+  __ret_463; \
 })
 #else
-#define vmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlal_laneq_s32(__p0_464, __p1_464, __p2_464, __p3_464) __extension__ ({ \
+  int64x2_t __s0_464 = __p0_464; \
+  int32x2_t __s1_464 = __p1_464; \
+  int32x4_t __s2_464 = __p2_464; \
+  int64x2_t __rev0_464;  __rev0_464 = __builtin_shufflevector(__s0_464, __s0_464, 1, 0); \
+  int32x2_t __rev1_464;  __rev1_464 = __builtin_shufflevector(__s1_464, __s1_464, 1, 0); \
+  int32x4_t __rev2_464;  __rev2_464 = __builtin_shufflevector(__s2_464, __s2_464, 3, 2, 1, 0); \
+  int64x2_t __ret_464; \
+  __ret_464 = __rev0_464 + __noswap_vmull_s32(__rev1_464, __noswap_splat_laneq_s32(__rev2_464, __p3_464)); \
+  __ret_464 = __builtin_shufflevector(__ret_464, __ret_464, 1, 0); \
+  __ret_464; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 + vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlal_laneq_s16(__p0_465, __p1_465, __p2_465, __p3_465) __extension__ ({ \
+  int32x4_t __s0_465 = __p0_465; \
+  int16x4_t __s1_465 = __p1_465; \
+  int16x8_t __s2_465 = __p2_465; \
+  int32x4_t __ret_465; \
+  __ret_465 = __s0_465 + vmull_s16(__s1_465, splat_laneq_s16(__s2_465, __p3_465)); \
+  __ret_465; \
 })
 #else
-#define vmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlal_laneq_s16(__p0_466, __p1_466, __p2_466, __p3_466) __extension__ ({ \
+  int32x4_t __s0_466 = __p0_466; \
+  int16x4_t __s1_466 = __p1_466; \
+  int16x8_t __s2_466 = __p2_466; \
+  int32x4_t __rev0_466;  __rev0_466 = __builtin_shufflevector(__s0_466, __s0_466, 3, 2, 1, 0); \
+  int16x4_t __rev1_466;  __rev1_466 = __builtin_shufflevector(__s1_466, __s1_466, 3, 2, 1, 0); \
+  int16x8_t __rev2_466;  __rev2_466 = __builtin_shufflevector(__s2_466, __s2_466, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_466; \
+  __ret_466 = __rev0_466 + __noswap_vmull_s16(__rev1_466, __noswap_splat_laneq_s16(__rev2_466, __p3_466)); \
+  __ret_466 = __builtin_shufflevector(__ret_466, __ret_466, 3, 2, 1, 0); \
+  __ret_466; \
 })
 #endif
 
@@ -49316,242 +53116,242 @@
   return __ret;
 }
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlsq_laneq_u32(__p0_467, __p1_467, __p2_467, __p3_467) __extension__ ({ \
+  uint32x4_t __s0_467 = __p0_467; \
+  uint32x4_t __s1_467 = __p1_467; \
+  uint32x4_t __s2_467 = __p2_467; \
+  uint32x4_t __ret_467; \
+  __ret_467 = __s0_467 - __s1_467 * splatq_laneq_u32(__s2_467, __p3_467); \
+  __ret_467; \
 })
 #else
-#define vmlsq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsq_laneq_u32(__p0_468, __p1_468, __p2_468, __p3_468) __extension__ ({ \
+  uint32x4_t __s0_468 = __p0_468; \
+  uint32x4_t __s1_468 = __p1_468; \
+  uint32x4_t __s2_468 = __p2_468; \
+  uint32x4_t __rev0_468;  __rev0_468 = __builtin_shufflevector(__s0_468, __s0_468, 3, 2, 1, 0); \
+  uint32x4_t __rev1_468;  __rev1_468 = __builtin_shufflevector(__s1_468, __s1_468, 3, 2, 1, 0); \
+  uint32x4_t __rev2_468;  __rev2_468 = __builtin_shufflevector(__s2_468, __s2_468, 3, 2, 1, 0); \
+  uint32x4_t __ret_468; \
+  __ret_468 = __rev0_468 - __rev1_468 * __noswap_splatq_laneq_u32(__rev2_468, __p3_468); \
+  __ret_468 = __builtin_shufflevector(__ret_468, __ret_468, 3, 2, 1, 0); \
+  __ret_468; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x8_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlsq_laneq_u16(__p0_469, __p1_469, __p2_469, __p3_469) __extension__ ({ \
+  uint16x8_t __s0_469 = __p0_469; \
+  uint16x8_t __s1_469 = __p1_469; \
+  uint16x8_t __s2_469 = __p2_469; \
+  uint16x8_t __ret_469; \
+  __ret_469 = __s0_469 - __s1_469 * splatq_laneq_u16(__s2_469, __p3_469); \
+  __ret_469; \
 })
 #else
-#define vmlsq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsq_laneq_u16(__p0_470, __p1_470, __p2_470, __p3_470) __extension__ ({ \
+  uint16x8_t __s0_470 = __p0_470; \
+  uint16x8_t __s1_470 = __p1_470; \
+  uint16x8_t __s2_470 = __p2_470; \
+  uint16x8_t __rev0_470;  __rev0_470 = __builtin_shufflevector(__s0_470, __s0_470, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_470;  __rev1_470 = __builtin_shufflevector(__s1_470, __s1_470, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_470;  __rev2_470 = __builtin_shufflevector(__s2_470, __s2_470, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret_470; \
+  __ret_470 = __rev0_470 - __rev1_470 * __noswap_splatq_laneq_u16(__rev2_470, __p3_470); \
+  __ret_470 = __builtin_shufflevector(__ret_470, __ret_470, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_470; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlsq_laneq_f32(__p0_471, __p1_471, __p2_471, __p3_471) __extension__ ({ \
+  float32x4_t __s0_471 = __p0_471; \
+  float32x4_t __s1_471 = __p1_471; \
+  float32x4_t __s2_471 = __p2_471; \
+  float32x4_t __ret_471; \
+  __ret_471 = __s0_471 - __s1_471 * splatq_laneq_f32(__s2_471, __p3_471); \
+  __ret_471; \
 })
 #else
-#define vmlsq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsq_laneq_f32(__p0_472, __p1_472, __p2_472, __p3_472) __extension__ ({ \
+  float32x4_t __s0_472 = __p0_472; \
+  float32x4_t __s1_472 = __p1_472; \
+  float32x4_t __s2_472 = __p2_472; \
+  float32x4_t __rev0_472;  __rev0_472 = __builtin_shufflevector(__s0_472, __s0_472, 3, 2, 1, 0); \
+  float32x4_t __rev1_472;  __rev1_472 = __builtin_shufflevector(__s1_472, __s1_472, 3, 2, 1, 0); \
+  float32x4_t __rev2_472;  __rev2_472 = __builtin_shufflevector(__s2_472, __s2_472, 3, 2, 1, 0); \
+  float32x4_t __ret_472; \
+  __ret_472 = __rev0_472 - __rev1_472 * __noswap_splatq_laneq_f32(__rev2_472, __p3_472); \
+  __ret_472 = __builtin_shufflevector(__ret_472, __ret_472, 3, 2, 1, 0); \
+  __ret_472; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlsq_laneq_s32(__p0_473, __p1_473, __p2_473, __p3_473) __extension__ ({ \
+  int32x4_t __s0_473 = __p0_473; \
+  int32x4_t __s1_473 = __p1_473; \
+  int32x4_t __s2_473 = __p2_473; \
+  int32x4_t __ret_473; \
+  __ret_473 = __s0_473 - __s1_473 * splatq_laneq_s32(__s2_473, __p3_473); \
+  __ret_473; \
 })
 #else
-#define vmlsq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsq_laneq_s32(__p0_474, __p1_474, __p2_474, __p3_474) __extension__ ({ \
+  int32x4_t __s0_474 = __p0_474; \
+  int32x4_t __s1_474 = __p1_474; \
+  int32x4_t __s2_474 = __p2_474; \
+  int32x4_t __rev0_474;  __rev0_474 = __builtin_shufflevector(__s0_474, __s0_474, 3, 2, 1, 0); \
+  int32x4_t __rev1_474;  __rev1_474 = __builtin_shufflevector(__s1_474, __s1_474, 3, 2, 1, 0); \
+  int32x4_t __rev2_474;  __rev2_474 = __builtin_shufflevector(__s2_474, __s2_474, 3, 2, 1, 0); \
+  int32x4_t __ret_474; \
+  __ret_474 = __rev0_474 - __rev1_474 * __noswap_splatq_laneq_s32(__rev2_474, __p3_474); \
+  __ret_474 = __builtin_shufflevector(__ret_474, __ret_474, 3, 2, 1, 0); \
+  __ret_474; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmlsq_laneq_s16(__p0_475, __p1_475, __p2_475, __p3_475) __extension__ ({ \
+  int16x8_t __s0_475 = __p0_475; \
+  int16x8_t __s1_475 = __p1_475; \
+  int16x8_t __s2_475 = __p2_475; \
+  int16x8_t __ret_475; \
+  __ret_475 = __s0_475 - __s1_475 * splatq_laneq_s16(__s2_475, __p3_475); \
+  __ret_475; \
 })
 #else
-#define vmlsq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsq_laneq_s16(__p0_476, __p1_476, __p2_476, __p3_476) __extension__ ({ \
+  int16x8_t __s0_476 = __p0_476; \
+  int16x8_t __s1_476 = __p1_476; \
+  int16x8_t __s2_476 = __p2_476; \
+  int16x8_t __rev0_476;  __rev0_476 = __builtin_shufflevector(__s0_476, __s0_476, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_476;  __rev1_476 = __builtin_shufflevector(__s1_476, __s1_476, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_476;  __rev2_476 = __builtin_shufflevector(__s2_476, __s2_476, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_476; \
+  __ret_476 = __rev0_476 - __rev1_476 * __noswap_splatq_laneq_s16(__rev2_476, __p3_476); \
+  __ret_476 = __builtin_shufflevector(__ret_476, __ret_476, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_476; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x2_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
+#define vmls_laneq_u32(__p0_477, __p1_477, __p2_477, __p3_477) __extension__ ({ \
+  uint32x2_t __s0_477 = __p0_477; \
+  uint32x2_t __s1_477 = __p1_477; \
+  uint32x4_t __s2_477 = __p2_477; \
+  uint32x2_t __ret_477; \
+  __ret_477 = __s0_477 - __s1_477 * splat_laneq_u32(__s2_477, __p3_477); \
+  __ret_477; \
 })
 #else
-#define vmls_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmls_laneq_u32(__p0_478, __p1_478, __p2_478, __p3_478) __extension__ ({ \
+  uint32x2_t __s0_478 = __p0_478; \
+  uint32x2_t __s1_478 = __p1_478; \
+  uint32x4_t __s2_478 = __p2_478; \
+  uint32x2_t __rev0_478;  __rev0_478 = __builtin_shufflevector(__s0_478, __s0_478, 1, 0); \
+  uint32x2_t __rev1_478;  __rev1_478 = __builtin_shufflevector(__s1_478, __s1_478, 1, 0); \
+  uint32x4_t __rev2_478;  __rev2_478 = __builtin_shufflevector(__s2_478, __s2_478, 3, 2, 1, 0); \
+  uint32x2_t __ret_478; \
+  __ret_478 = __rev0_478 - __rev1_478 * __noswap_splat_laneq_u32(__rev2_478, __p3_478); \
+  __ret_478 = __builtin_shufflevector(__ret_478, __ret_478, 1, 0); \
+  __ret_478; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmls_laneq_u16(__p0_479, __p1_479, __p2_479, __p3_479) __extension__ ({ \
+  uint16x4_t __s0_479 = __p0_479; \
+  uint16x4_t __s1_479 = __p1_479; \
+  uint16x8_t __s2_479 = __p2_479; \
+  uint16x4_t __ret_479; \
+  __ret_479 = __s0_479 - __s1_479 * splat_laneq_u16(__s2_479, __p3_479); \
+  __ret_479; \
 })
 #else
-#define vmls_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmls_laneq_u16(__p0_480, __p1_480, __p2_480, __p3_480) __extension__ ({ \
+  uint16x4_t __s0_480 = __p0_480; \
+  uint16x4_t __s1_480 = __p1_480; \
+  uint16x8_t __s2_480 = __p2_480; \
+  uint16x4_t __rev0_480;  __rev0_480 = __builtin_shufflevector(__s0_480, __s0_480, 3, 2, 1, 0); \
+  uint16x4_t __rev1_480;  __rev1_480 = __builtin_shufflevector(__s1_480, __s1_480, 3, 2, 1, 0); \
+  uint16x8_t __rev2_480;  __rev2_480 = __builtin_shufflevector(__s2_480, __s2_480, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __ret_480; \
+  __ret_480 = __rev0_480 - __rev1_480 * __noswap_splat_laneq_u16(__rev2_480, __p3_480); \
+  __ret_480 = __builtin_shufflevector(__ret_480, __ret_480, 3, 2, 1, 0); \
+  __ret_480; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
+#define vmls_laneq_f32(__p0_481, __p1_481, __p2_481, __p3_481) __extension__ ({ \
+  float32x2_t __s0_481 = __p0_481; \
+  float32x2_t __s1_481 = __p1_481; \
+  float32x4_t __s2_481 = __p2_481; \
+  float32x2_t __ret_481; \
+  __ret_481 = __s0_481 - __s1_481 * splat_laneq_f32(__s2_481, __p3_481); \
+  __ret_481; \
 })
 #else
-#define vmls_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmls_laneq_f32(__p0_482, __p1_482, __p2_482, __p3_482) __extension__ ({ \
+  float32x2_t __s0_482 = __p0_482; \
+  float32x2_t __s1_482 = __p1_482; \
+  float32x4_t __s2_482 = __p2_482; \
+  float32x2_t __rev0_482;  __rev0_482 = __builtin_shufflevector(__s0_482, __s0_482, 1, 0); \
+  float32x2_t __rev1_482;  __rev1_482 = __builtin_shufflevector(__s1_482, __s1_482, 1, 0); \
+  float32x4_t __rev2_482;  __rev2_482 = __builtin_shufflevector(__s2_482, __s2_482, 3, 2, 1, 0); \
+  float32x2_t __ret_482; \
+  __ret_482 = __rev0_482 - __rev1_482 * __noswap_splat_laneq_f32(__rev2_482, __p3_482); \
+  __ret_482 = __builtin_shufflevector(__ret_482, __ret_482, 1, 0); \
+  __ret_482; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
+#define vmls_laneq_s32(__p0_483, __p1_483, __p2_483, __p3_483) __extension__ ({ \
+  int32x2_t __s0_483 = __p0_483; \
+  int32x2_t __s1_483 = __p1_483; \
+  int32x4_t __s2_483 = __p2_483; \
+  int32x2_t __ret_483; \
+  __ret_483 = __s0_483 - __s1_483 * splat_laneq_s32(__s2_483, __p3_483); \
+  __ret_483; \
 })
 #else
-#define vmls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmls_laneq_s32(__p0_484, __p1_484, __p2_484, __p3_484) __extension__ ({ \
+  int32x2_t __s0_484 = __p0_484; \
+  int32x2_t __s1_484 = __p1_484; \
+  int32x4_t __s2_484 = __p2_484; \
+  int32x2_t __rev0_484;  __rev0_484 = __builtin_shufflevector(__s0_484, __s0_484, 1, 0); \
+  int32x2_t __rev1_484;  __rev1_484 = __builtin_shufflevector(__s1_484, __s1_484, 1, 0); \
+  int32x4_t __rev2_484;  __rev2_484 = __builtin_shufflevector(__s2_484, __s2_484, 3, 2, 1, 0); \
+  int32x2_t __ret_484; \
+  __ret_484 = __rev0_484 - __rev1_484 * __noswap_splat_laneq_s32(__rev2_484, __p3_484); \
+  __ret_484 = __builtin_shufflevector(__ret_484, __ret_484, 1, 0); \
+  __ret_484; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
+#define vmls_laneq_s16(__p0_485, __p1_485, __p2_485, __p3_485) __extension__ ({ \
+  int16x4_t __s0_485 = __p0_485; \
+  int16x4_t __s1_485 = __p1_485; \
+  int16x8_t __s2_485 = __p2_485; \
+  int16x4_t __ret_485; \
+  __ret_485 = __s0_485 - __s1_485 * splat_laneq_s16(__s2_485, __p3_485); \
+  __ret_485; \
 })
 #else
-#define vmls_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmls_laneq_s16(__p0_486, __p1_486, __p2_486, __p3_486) __extension__ ({ \
+  int16x4_t __s0_486 = __p0_486; \
+  int16x4_t __s1_486 = __p1_486; \
+  int16x8_t __s2_486 = __p2_486; \
+  int16x4_t __rev0_486;  __rev0_486 = __builtin_shufflevector(__s0_486, __s0_486, 3, 2, 1, 0); \
+  int16x4_t __rev1_486;  __rev1_486 = __builtin_shufflevector(__s1_486, __s1_486, 3, 2, 1, 0); \
+  int16x8_t __rev2_486;  __rev2_486 = __builtin_shufflevector(__s2_486, __s2_486, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __ret_486; \
+  __ret_486 = __rev0_486 - __rev1_486 * __noswap_splat_laneq_s16(__rev2_486, __p3_486); \
+  __ret_486 = __builtin_shufflevector(__ret_486, __ret_486, 3, 2, 1, 0); \
+  __ret_486; \
 })
 #endif
 
@@ -49573,290 +53373,290 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 - vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlsl_high_lane_u32(__p0_487, __p1_487, __p2_487, __p3_487) __extension__ ({ \
+  uint64x2_t __s0_487 = __p0_487; \
+  uint32x4_t __s1_487 = __p1_487; \
+  uint32x2_t __s2_487 = __p2_487; \
+  uint64x2_t __ret_487; \
+  __ret_487 = __s0_487 - vmull_u32(vget_high_u32(__s1_487), splat_lane_u32(__s2_487, __p3_487)); \
+  __ret_487; \
 })
 #else
-#define vmlsl_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlsl_high_lane_u32(__p0_488, __p1_488, __p2_488, __p3_488) __extension__ ({ \
+  uint64x2_t __s0_488 = __p0_488; \
+  uint32x4_t __s1_488 = __p1_488; \
+  uint32x2_t __s2_488 = __p2_488; \
+  uint64x2_t __rev0_488;  __rev0_488 = __builtin_shufflevector(__s0_488, __s0_488, 1, 0); \
+  uint32x4_t __rev1_488;  __rev1_488 = __builtin_shufflevector(__s1_488, __s1_488, 3, 2, 1, 0); \
+  uint32x2_t __rev2_488;  __rev2_488 = __builtin_shufflevector(__s2_488, __s2_488, 1, 0); \
+  uint64x2_t __ret_488; \
+  __ret_488 = __rev0_488 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_488), __noswap_splat_lane_u32(__rev2_488, __p3_488)); \
+  __ret_488 = __builtin_shufflevector(__ret_488, __ret_488, 1, 0); \
+  __ret_488; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 - vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlsl_high_lane_u16(__p0_489, __p1_489, __p2_489, __p3_489) __extension__ ({ \
+  uint32x4_t __s0_489 = __p0_489; \
+  uint16x8_t __s1_489 = __p1_489; \
+  uint16x4_t __s2_489 = __p2_489; \
+  uint32x4_t __ret_489; \
+  __ret_489 = __s0_489 - vmull_u16(vget_high_u16(__s1_489), splat_lane_u16(__s2_489, __p3_489)); \
+  __ret_489; \
 })
 #else
-#define vmlsl_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsl_high_lane_u16(__p0_490, __p1_490, __p2_490, __p3_490) __extension__ ({ \
+  uint32x4_t __s0_490 = __p0_490; \
+  uint16x8_t __s1_490 = __p1_490; \
+  uint16x4_t __s2_490 = __p2_490; \
+  uint32x4_t __rev0_490;  __rev0_490 = __builtin_shufflevector(__s0_490, __s0_490, 3, 2, 1, 0); \
+  uint16x8_t __rev1_490;  __rev1_490 = __builtin_shufflevector(__s1_490, __s1_490, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev2_490;  __rev2_490 = __builtin_shufflevector(__s2_490, __s2_490, 3, 2, 1, 0); \
+  uint32x4_t __ret_490; \
+  __ret_490 = __rev0_490 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_490), __noswap_splat_lane_u16(__rev2_490, __p3_490)); \
+  __ret_490 = __builtin_shufflevector(__ret_490, __ret_490, 3, 2, 1, 0); \
+  __ret_490; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 - vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlsl_high_lane_s32(__p0_491, __p1_491, __p2_491, __p3_491) __extension__ ({ \
+  int64x2_t __s0_491 = __p0_491; \
+  int32x4_t __s1_491 = __p1_491; \
+  int32x2_t __s2_491 = __p2_491; \
+  int64x2_t __ret_491; \
+  __ret_491 = __s0_491 - vmull_s32(vget_high_s32(__s1_491), splat_lane_s32(__s2_491, __p3_491)); \
+  __ret_491; \
 })
 #else
-#define vmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlsl_high_lane_s32(__p0_492, __p1_492, __p2_492, __p3_492) __extension__ ({ \
+  int64x2_t __s0_492 = __p0_492; \
+  int32x4_t __s1_492 = __p1_492; \
+  int32x2_t __s2_492 = __p2_492; \
+  int64x2_t __rev0_492;  __rev0_492 = __builtin_shufflevector(__s0_492, __s0_492, 1, 0); \
+  int32x4_t __rev1_492;  __rev1_492 = __builtin_shufflevector(__s1_492, __s1_492, 3, 2, 1, 0); \
+  int32x2_t __rev2_492;  __rev2_492 = __builtin_shufflevector(__s2_492, __s2_492, 1, 0); \
+  int64x2_t __ret_492; \
+  __ret_492 = __rev0_492 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_492), __noswap_splat_lane_s32(__rev2_492, __p3_492)); \
+  __ret_492 = __builtin_shufflevector(__ret_492, __ret_492, 1, 0); \
+  __ret_492; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 - vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlsl_high_lane_s16(__p0_493, __p1_493, __p2_493, __p3_493) __extension__ ({ \
+  int32x4_t __s0_493 = __p0_493; \
+  int16x8_t __s1_493 = __p1_493; \
+  int16x4_t __s2_493 = __p2_493; \
+  int32x4_t __ret_493; \
+  __ret_493 = __s0_493 - vmull_s16(vget_high_s16(__s1_493), splat_lane_s16(__s2_493, __p3_493)); \
+  __ret_493; \
 })
 #else
-#define vmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsl_high_lane_s16(__p0_494, __p1_494, __p2_494, __p3_494) __extension__ ({ \
+  int32x4_t __s0_494 = __p0_494; \
+  int16x8_t __s1_494 = __p1_494; \
+  int16x4_t __s2_494 = __p2_494; \
+  int32x4_t __rev0_494;  __rev0_494 = __builtin_shufflevector(__s0_494, __s0_494, 3, 2, 1, 0); \
+  int16x8_t __rev1_494;  __rev1_494 = __builtin_shufflevector(__s1_494, __s1_494, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_494;  __rev2_494 = __builtin_shufflevector(__s2_494, __s2_494, 3, 2, 1, 0); \
+  int32x4_t __ret_494; \
+  __ret_494 = __rev0_494 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_494), __noswap_splat_lane_s16(__rev2_494, __p3_494)); \
+  __ret_494 = __builtin_shufflevector(__ret_494, __ret_494, 3, 2, 1, 0); \
+  __ret_494; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 - vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlsl_high_laneq_u32(__p0_495, __p1_495, __p2_495, __p3_495) __extension__ ({ \
+  uint64x2_t __s0_495 = __p0_495; \
+  uint32x4_t __s1_495 = __p1_495; \
+  uint32x4_t __s2_495 = __p2_495; \
+  uint64x2_t __ret_495; \
+  __ret_495 = __s0_495 - vmull_u32(vget_high_u32(__s1_495), splat_laneq_u32(__s2_495, __p3_495)); \
+  __ret_495; \
 })
 #else
-#define vmlsl_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlsl_high_laneq_u32(__p0_496, __p1_496, __p2_496, __p3_496) __extension__ ({ \
+  uint64x2_t __s0_496 = __p0_496; \
+  uint32x4_t __s1_496 = __p1_496; \
+  uint32x4_t __s2_496 = __p2_496; \
+  uint64x2_t __rev0_496;  __rev0_496 = __builtin_shufflevector(__s0_496, __s0_496, 1, 0); \
+  uint32x4_t __rev1_496;  __rev1_496 = __builtin_shufflevector(__s1_496, __s1_496, 3, 2, 1, 0); \
+  uint32x4_t __rev2_496;  __rev2_496 = __builtin_shufflevector(__s2_496, __s2_496, 3, 2, 1, 0); \
+  uint64x2_t __ret_496; \
+  __ret_496 = __rev0_496 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_496), __noswap_splat_laneq_u32(__rev2_496, __p3_496)); \
+  __ret_496 = __builtin_shufflevector(__ret_496, __ret_496, 1, 0); \
+  __ret_496; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 - vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlsl_high_laneq_u16(__p0_497, __p1_497, __p2_497, __p3_497) __extension__ ({ \
+  uint32x4_t __s0_497 = __p0_497; \
+  uint16x8_t __s1_497 = __p1_497; \
+  uint16x8_t __s2_497 = __p2_497; \
+  uint32x4_t __ret_497; \
+  __ret_497 = __s0_497 - vmull_u16(vget_high_u16(__s1_497), splat_laneq_u16(__s2_497, __p3_497)); \
+  __ret_497; \
 })
 #else
-#define vmlsl_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsl_high_laneq_u16(__p0_498, __p1_498, __p2_498, __p3_498) __extension__ ({ \
+  uint32x4_t __s0_498 = __p0_498; \
+  uint16x8_t __s1_498 = __p1_498; \
+  uint16x8_t __s2_498 = __p2_498; \
+  uint32x4_t __rev0_498;  __rev0_498 = __builtin_shufflevector(__s0_498, __s0_498, 3, 2, 1, 0); \
+  uint16x8_t __rev1_498;  __rev1_498 = __builtin_shufflevector(__s1_498, __s1_498, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev2_498;  __rev2_498 = __builtin_shufflevector(__s2_498, __s2_498, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_498; \
+  __ret_498 = __rev0_498 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_498), __noswap_splat_laneq_u16(__rev2_498, __p3_498)); \
+  __ret_498 = __builtin_shufflevector(__ret_498, __ret_498, 3, 2, 1, 0); \
+  __ret_498; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 - vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlsl_high_laneq_s32(__p0_499, __p1_499, __p2_499, __p3_499) __extension__ ({ \
+  int64x2_t __s0_499 = __p0_499; \
+  int32x4_t __s1_499 = __p1_499; \
+  int32x4_t __s2_499 = __p2_499; \
+  int64x2_t __ret_499; \
+  __ret_499 = __s0_499 - vmull_s32(vget_high_s32(__s1_499), splat_laneq_s32(__s2_499, __p3_499)); \
+  __ret_499; \
 })
 #else
-#define vmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlsl_high_laneq_s32(__p0_500, __p1_500, __p2_500, __p3_500) __extension__ ({ \
+  int64x2_t __s0_500 = __p0_500; \
+  int32x4_t __s1_500 = __p1_500; \
+  int32x4_t __s2_500 = __p2_500; \
+  int64x2_t __rev0_500;  __rev0_500 = __builtin_shufflevector(__s0_500, __s0_500, 1, 0); \
+  int32x4_t __rev1_500;  __rev1_500 = __builtin_shufflevector(__s1_500, __s1_500, 3, 2, 1, 0); \
+  int32x4_t __rev2_500;  __rev2_500 = __builtin_shufflevector(__s2_500, __s2_500, 3, 2, 1, 0); \
+  int64x2_t __ret_500; \
+  __ret_500 = __rev0_500 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_500), __noswap_splat_laneq_s32(__rev2_500, __p3_500)); \
+  __ret_500 = __builtin_shufflevector(__ret_500, __ret_500, 1, 0); \
+  __ret_500; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 - vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlsl_high_laneq_s16(__p0_501, __p1_501, __p2_501, __p3_501) __extension__ ({ \
+  int32x4_t __s0_501 = __p0_501; \
+  int16x8_t __s1_501 = __p1_501; \
+  int16x8_t __s2_501 = __p2_501; \
+  int32x4_t __ret_501; \
+  __ret_501 = __s0_501 - vmull_s16(vget_high_s16(__s1_501), splat_laneq_s16(__s2_501, __p3_501)); \
+  __ret_501; \
 })
 #else
-#define vmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsl_high_laneq_s16(__p0_502, __p1_502, __p2_502, __p3_502) __extension__ ({ \
+  int32x4_t __s0_502 = __p0_502; \
+  int16x8_t __s1_502 = __p1_502; \
+  int16x8_t __s2_502 = __p2_502; \
+  int32x4_t __rev0_502;  __rev0_502 = __builtin_shufflevector(__s0_502, __s0_502, 3, 2, 1, 0); \
+  int16x8_t __rev1_502;  __rev1_502 = __builtin_shufflevector(__s1_502, __s1_502, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_502;  __rev2_502 = __builtin_shufflevector(__s2_502, __s2_502, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_502; \
+  __ret_502 = __rev0_502 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_502), __noswap_splat_laneq_s16(__rev2_502, __p3_502)); \
+  __ret_502 = __builtin_shufflevector(__ret_502, __ret_502, 3, 2, 1, 0); \
+  __ret_502; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 - vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlsl_laneq_u32(__p0_503, __p1_503, __p2_503, __p3_503) __extension__ ({ \
+  uint64x2_t __s0_503 = __p0_503; \
+  uint32x2_t __s1_503 = __p1_503; \
+  uint32x4_t __s2_503 = __p2_503; \
+  uint64x2_t __ret_503; \
+  __ret_503 = __s0_503 - vmull_u32(__s1_503, splat_laneq_u32(__s2_503, __p3_503)); \
+  __ret_503; \
 })
 #else
-#define vmlsl_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlsl_laneq_u32(__p0_504, __p1_504, __p2_504, __p3_504) __extension__ ({ \
+  uint64x2_t __s0_504 = __p0_504; \
+  uint32x2_t __s1_504 = __p1_504; \
+  uint32x4_t __s2_504 = __p2_504; \
+  uint64x2_t __rev0_504;  __rev0_504 = __builtin_shufflevector(__s0_504, __s0_504, 1, 0); \
+  uint32x2_t __rev1_504;  __rev1_504 = __builtin_shufflevector(__s1_504, __s1_504, 1, 0); \
+  uint32x4_t __rev2_504;  __rev2_504 = __builtin_shufflevector(__s2_504, __s2_504, 3, 2, 1, 0); \
+  uint64x2_t __ret_504; \
+  __ret_504 = __rev0_504 - __noswap_vmull_u32(__rev1_504, __noswap_splat_laneq_u32(__rev2_504, __p3_504)); \
+  __ret_504 = __builtin_shufflevector(__ret_504, __ret_504, 1, 0); \
+  __ret_504; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 - vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlsl_laneq_u16(__p0_505, __p1_505, __p2_505, __p3_505) __extension__ ({ \
+  uint32x4_t __s0_505 = __p0_505; \
+  uint16x4_t __s1_505 = __p1_505; \
+  uint16x8_t __s2_505 = __p2_505; \
+  uint32x4_t __ret_505; \
+  __ret_505 = __s0_505 - vmull_u16(__s1_505, splat_laneq_u16(__s2_505, __p3_505)); \
+  __ret_505; \
 })
 #else
-#define vmlsl_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsl_laneq_u16(__p0_506, __p1_506, __p2_506, __p3_506) __extension__ ({ \
+  uint32x4_t __s0_506 = __p0_506; \
+  uint16x4_t __s1_506 = __p1_506; \
+  uint16x8_t __s2_506 = __p2_506; \
+  uint32x4_t __rev0_506;  __rev0_506 = __builtin_shufflevector(__s0_506, __s0_506, 3, 2, 1, 0); \
+  uint16x4_t __rev1_506;  __rev1_506 = __builtin_shufflevector(__s1_506, __s1_506, 3, 2, 1, 0); \
+  uint16x8_t __rev2_506;  __rev2_506 = __builtin_shufflevector(__s2_506, __s2_506, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_506; \
+  __ret_506 = __rev0_506 - __noswap_vmull_u16(__rev1_506, __noswap_splat_laneq_u16(__rev2_506, __p3_506)); \
+  __ret_506 = __builtin_shufflevector(__ret_506, __ret_506, 3, 2, 1, 0); \
+  __ret_506; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 - vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlsl_laneq_s32(__p0_507, __p1_507, __p2_507, __p3_507) __extension__ ({ \
+  int64x2_t __s0_507 = __p0_507; \
+  int32x2_t __s1_507 = __p1_507; \
+  int32x4_t __s2_507 = __p2_507; \
+  int64x2_t __ret_507; \
+  __ret_507 = __s0_507 - vmull_s32(__s1_507, splat_laneq_s32(__s2_507, __p3_507)); \
+  __ret_507; \
 })
 #else
-#define vmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlsl_laneq_s32(__p0_508, __p1_508, __p2_508, __p3_508) __extension__ ({ \
+  int64x2_t __s0_508 = __p0_508; \
+  int32x2_t __s1_508 = __p1_508; \
+  int32x4_t __s2_508 = __p2_508; \
+  int64x2_t __rev0_508;  __rev0_508 = __builtin_shufflevector(__s0_508, __s0_508, 1, 0); \
+  int32x2_t __rev1_508;  __rev1_508 = __builtin_shufflevector(__s1_508, __s1_508, 1, 0); \
+  int32x4_t __rev2_508;  __rev2_508 = __builtin_shufflevector(__s2_508, __s2_508, 3, 2, 1, 0); \
+  int64x2_t __ret_508; \
+  __ret_508 = __rev0_508 - __noswap_vmull_s32(__rev1_508, __noswap_splat_laneq_s32(__rev2_508, __p3_508)); \
+  __ret_508 = __builtin_shufflevector(__ret_508, __ret_508, 1, 0); \
+  __ret_508; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 - vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlsl_laneq_s16(__p0_509, __p1_509, __p2_509, __p3_509) __extension__ ({ \
+  int32x4_t __s0_509 = __p0_509; \
+  int16x4_t __s1_509 = __p1_509; \
+  int16x8_t __s2_509 = __p2_509; \
+  int32x4_t __ret_509; \
+  __ret_509 = __s0_509 - vmull_s16(__s1_509, splat_laneq_s16(__s2_509, __p3_509)); \
+  __ret_509; \
 })
 #else
-#define vmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsl_laneq_s16(__p0_510, __p1_510, __p2_510, __p3_510) __extension__ ({ \
+  int32x4_t __s0_510 = __p0_510; \
+  int16x4_t __s1_510 = __p1_510; \
+  int16x8_t __s2_510 = __p2_510; \
+  int32x4_t __rev0_510;  __rev0_510 = __builtin_shufflevector(__s0_510, __s0_510, 3, 2, 1, 0); \
+  int16x4_t __rev1_510;  __rev1_510 = __builtin_shufflevector(__s1_510, __s1_510, 3, 2, 1, 0); \
+  int16x8_t __rev2_510;  __rev2_510 = __builtin_shufflevector(__s2_510, __s2_510, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_510; \
+  __ret_510 = __rev0_510 - __noswap_vmull_s16(__rev1_510, __noswap_splat_laneq_s16(__rev2_510, __p3_510)); \
+  __ret_510 = __builtin_shufflevector(__ret_510, __ret_510, 3, 2, 1, 0); \
+  __ret_510; \
 })
 #endif
 
@@ -49901,146 +53701,146 @@
   return __ret;
 }
 #ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_120) {
-  uint16x8_t __ret_120;
-  uint8x8_t __a1_120 = vget_high_u8(__p0_120);
-  __ret_120 = (uint16x8_t)(vshll_n_u8(__a1_120, 0));
-  return __ret_120;
+__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_511) {
+  uint16x8_t __ret_511;
+  uint8x8_t __a1_511 = vget_high_u8(__p0_511);
+  __ret_511 = (uint16x8_t)(vshll_n_u8(__a1_511, 0));
+  return __ret_511;
 }
 #else
-__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_121) {
-  uint8x16_t __rev0_121;  __rev0_121 = __builtin_shufflevector(__p0_121, __p0_121, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret_121;
-  uint8x8_t __a1_121 = __noswap_vget_high_u8(__rev0_121);
-  __ret_121 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_121, 0));
-  __ret_121 = __builtin_shufflevector(__ret_121, __ret_121, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret_121;
+__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_512) {
+  uint8x16_t __rev0_512;  __rev0_512 = __builtin_shufflevector(__p0_512, __p0_512, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint16x8_t __ret_512;
+  uint8x8_t __a1_512 = __noswap_vget_high_u8(__rev0_512);
+  __ret_512 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_512, 0));
+  __ret_512 = __builtin_shufflevector(__ret_512, __ret_512, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret_512;
 }
-__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_122) {
-  uint16x8_t __ret_122;
-  uint8x8_t __a1_122 = __noswap_vget_high_u8(__p0_122);
-  __ret_122 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_122, 0));
-  return __ret_122;
+__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_513) {
+  uint16x8_t __ret_513;
+  uint8x8_t __a1_513 = __noswap_vget_high_u8(__p0_513);
+  __ret_513 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_513, 0));
+  return __ret_513;
 }
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_123) {
-  uint64x2_t __ret_123;
-  uint32x2_t __a1_123 = vget_high_u32(__p0_123);
-  __ret_123 = (uint64x2_t)(vshll_n_u32(__a1_123, 0));
-  return __ret_123;
+__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_514) {
+  uint64x2_t __ret_514;
+  uint32x2_t __a1_514 = vget_high_u32(__p0_514);
+  __ret_514 = (uint64x2_t)(vshll_n_u32(__a1_514, 0));
+  return __ret_514;
 }
 #else
-__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_124) {
-  uint32x4_t __rev0_124;  __rev0_124 = __builtin_shufflevector(__p0_124, __p0_124, 3, 2, 1, 0);
-  uint64x2_t __ret_124;
-  uint32x2_t __a1_124 = __noswap_vget_high_u32(__rev0_124);
-  __ret_124 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_124, 0));
-  __ret_124 = __builtin_shufflevector(__ret_124, __ret_124, 1, 0);
-  return __ret_124;
+__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_515) {
+  uint32x4_t __rev0_515;  __rev0_515 = __builtin_shufflevector(__p0_515, __p0_515, 3, 2, 1, 0);
+  uint64x2_t __ret_515;
+  uint32x2_t __a1_515 = __noswap_vget_high_u32(__rev0_515);
+  __ret_515 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_515, 0));
+  __ret_515 = __builtin_shufflevector(__ret_515, __ret_515, 1, 0);
+  return __ret_515;
 }
-__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_125) {
-  uint64x2_t __ret_125;
-  uint32x2_t __a1_125 = __noswap_vget_high_u32(__p0_125);
-  __ret_125 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_125, 0));
-  return __ret_125;
+__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_516) {
+  uint64x2_t __ret_516;
+  uint32x2_t __a1_516 = __noswap_vget_high_u32(__p0_516);
+  __ret_516 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_516, 0));
+  return __ret_516;
 }
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_126) {
-  uint32x4_t __ret_126;
-  uint16x4_t __a1_126 = vget_high_u16(__p0_126);
-  __ret_126 = (uint32x4_t)(vshll_n_u16(__a1_126, 0));
-  return __ret_126;
+__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_517) {
+  uint32x4_t __ret_517;
+  uint16x4_t __a1_517 = vget_high_u16(__p0_517);
+  __ret_517 = (uint32x4_t)(vshll_n_u16(__a1_517, 0));
+  return __ret_517;
 }
 #else
-__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_127) {
-  uint16x8_t __rev0_127;  __rev0_127 = __builtin_shufflevector(__p0_127, __p0_127, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret_127;
-  uint16x4_t __a1_127 = __noswap_vget_high_u16(__rev0_127);
-  __ret_127 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_127, 0));
-  __ret_127 = __builtin_shufflevector(__ret_127, __ret_127, 3, 2, 1, 0);
-  return __ret_127;
+__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_518) {
+  uint16x8_t __rev0_518;  __rev0_518 = __builtin_shufflevector(__p0_518, __p0_518, 7, 6, 5, 4, 3, 2, 1, 0);
+  uint32x4_t __ret_518;
+  uint16x4_t __a1_518 = __noswap_vget_high_u16(__rev0_518);
+  __ret_518 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_518, 0));
+  __ret_518 = __builtin_shufflevector(__ret_518, __ret_518, 3, 2, 1, 0);
+  return __ret_518;
 }
-__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_128) {
-  uint32x4_t __ret_128;
-  uint16x4_t __a1_128 = __noswap_vget_high_u16(__p0_128);
-  __ret_128 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_128, 0));
-  return __ret_128;
+__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_519) {
+  uint32x4_t __ret_519;
+  uint16x4_t __a1_519 = __noswap_vget_high_u16(__p0_519);
+  __ret_519 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_519, 0));
+  return __ret_519;
 }
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovl_high_s8(int8x16_t __p0_129) {
-  int16x8_t __ret_129;
-  int8x8_t __a1_129 = vget_high_s8(__p0_129);
-  __ret_129 = (int16x8_t)(vshll_n_s8(__a1_129, 0));
-  return __ret_129;
+__ai int16x8_t vmovl_high_s8(int8x16_t __p0_520) {
+  int16x8_t __ret_520;
+  int8x8_t __a1_520 = vget_high_s8(__p0_520);
+  __ret_520 = (int16x8_t)(vshll_n_s8(__a1_520, 0));
+  return __ret_520;
 }
 #else
-__ai int16x8_t vmovl_high_s8(int8x16_t __p0_130) {
-  int8x16_t __rev0_130;  __rev0_130 = __builtin_shufflevector(__p0_130, __p0_130, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret_130;
-  int8x8_t __a1_130 = __noswap_vget_high_s8(__rev0_130);
-  __ret_130 = (int16x8_t)(__noswap_vshll_n_s8(__a1_130, 0));
-  __ret_130 = __builtin_shufflevector(__ret_130, __ret_130, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret_130;
+__ai int16x8_t vmovl_high_s8(int8x16_t __p0_521) {
+  int8x16_t __rev0_521;  __rev0_521 = __builtin_shufflevector(__p0_521, __p0_521, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+  int16x8_t __ret_521;
+  int8x8_t __a1_521 = __noswap_vget_high_s8(__rev0_521);
+  __ret_521 = (int16x8_t)(__noswap_vshll_n_s8(__a1_521, 0));
+  __ret_521 = __builtin_shufflevector(__ret_521, __ret_521, 7, 6, 5, 4, 3, 2, 1, 0);
+  return __ret_521;
 }
-__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_131) {
-  int16x8_t __ret_131;
-  int8x8_t __a1_131 = __noswap_vget_high_s8(__p0_131);
-  __ret_131 = (int16x8_t)(__noswap_vshll_n_s8(__a1_131, 0));
-  return __ret_131;
+__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_522) {
+  int16x8_t __ret_522;
+  int8x8_t __a1_522 = __noswap_vget_high_s8(__p0_522);
+  __ret_522 = (int16x8_t)(__noswap_vshll_n_s8(__a1_522, 0));
+  return __ret_522;
 }
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmovl_high_s32(int32x4_t __p0_132) {
-  int64x2_t __ret_132;
-  int32x2_t __a1_132 = vget_high_s32(__p0_132);
-  __ret_132 = (int64x2_t)(vshll_n_s32(__a1_132, 0));
-  return __ret_132;
+__ai int64x2_t vmovl_high_s32(int32x4_t __p0_523) {
+  int64x2_t __ret_523;
+  int32x2_t __a1_523 = vget_high_s32(__p0_523);
+  __ret_523 = (int64x2_t)(vshll_n_s32(__a1_523, 0));
+  return __ret_523;
 }
 #else
-__ai int64x2_t vmovl_high_s32(int32x4_t __p0_133) {
-  int32x4_t __rev0_133;  __rev0_133 = __builtin_shufflevector(__p0_133, __p0_133, 3, 2, 1, 0);
-  int64x2_t __ret_133;
-  int32x2_t __a1_133 = __noswap_vget_high_s32(__rev0_133);
-  __ret_133 = (int64x2_t)(__noswap_vshll_n_s32(__a1_133, 0));
-  __ret_133 = __builtin_shufflevector(__ret_133, __ret_133, 1, 0);
-  return __ret_133;
+__ai int64x2_t vmovl_high_s32(int32x4_t __p0_524) {
+  int32x4_t __rev0_524;  __rev0_524 = __builtin_shufflevector(__p0_524, __p0_524, 3, 2, 1, 0);
+  int64x2_t __ret_524;
+  int32x2_t __a1_524 = __noswap_vget_high_s32(__rev0_524);
+  __ret_524 = (int64x2_t)(__noswap_vshll_n_s32(__a1_524, 0));
+  __ret_524 = __builtin_shufflevector(__ret_524, __ret_524, 1, 0);
+  return __ret_524;
 }
-__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_134) {
-  int64x2_t __ret_134;
-  int32x2_t __a1_134 = __noswap_vget_high_s32(__p0_134);
-  __ret_134 = (int64x2_t)(__noswap_vshll_n_s32(__a1_134, 0));
-  return __ret_134;
+__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_525) {
+  int64x2_t __ret_525;
+  int32x2_t __a1_525 = __noswap_vget_high_s32(__p0_525);
+  __ret_525 = (int64x2_t)(__noswap_vshll_n_s32(__a1_525, 0));
+  return __ret_525;
 }
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovl_high_s16(int16x8_t __p0_135) {
-  int32x4_t __ret_135;
-  int16x4_t __a1_135 = vget_high_s16(__p0_135);
-  __ret_135 = (int32x4_t)(vshll_n_s16(__a1_135, 0));
-  return __ret_135;
+__ai int32x4_t vmovl_high_s16(int16x8_t __p0_526) {
+  int32x4_t __ret_526;
+  int16x4_t __a1_526 = vget_high_s16(__p0_526);
+  __ret_526 = (int32x4_t)(vshll_n_s16(__a1_526, 0));
+  return __ret_526;
 }
 #else
-__ai int32x4_t vmovl_high_s16(int16x8_t __p0_136) {
-  int16x8_t __rev0_136;  __rev0_136 = __builtin_shufflevector(__p0_136, __p0_136, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret_136;
-  int16x4_t __a1_136 = __noswap_vget_high_s16(__rev0_136);
-  __ret_136 = (int32x4_t)(__noswap_vshll_n_s16(__a1_136, 0));
-  __ret_136 = __builtin_shufflevector(__ret_136, __ret_136, 3, 2, 1, 0);
-  return __ret_136;
+__ai int32x4_t vmovl_high_s16(int16x8_t __p0_527) {
+  int16x8_t __rev0_527;  __rev0_527 = __builtin_shufflevector(__p0_527, __p0_527, 7, 6, 5, 4, 3, 2, 1, 0);
+  int32x4_t __ret_527;
+  int16x4_t __a1_527 = __noswap_vget_high_s16(__rev0_527);
+  __ret_527 = (int32x4_t)(__noswap_vshll_n_s16(__a1_527, 0));
+  __ret_527 = __builtin_shufflevector(__ret_527, __ret_527, 3, 2, 1, 0);
+  return __ret_527;
 }
-__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_137) {
-  int32x4_t __ret_137;
-  int16x4_t __a1_137 = __noswap_vget_high_s16(__p0_137);
-  __ret_137 = (int32x4_t)(__noswap_vshll_n_s16(__a1_137, 0));
-  return __ret_137;
+__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_528) {
+  int32x4_t __ret_528;
+  int16x4_t __a1_528 = __noswap_vget_high_s16(__p0_528);
+  __ret_528 = (int32x4_t)(__noswap_vshll_n_s16(__a1_528, 0));
+  return __ret_528;
 }
 #endif
 
@@ -50168,29 +53968,29 @@
   __ret = __p0 * __p1;
   return __ret;
 }
-#define vmuld_lane_f64(__p0_138, __p1_138, __p2_138) __extension__ ({ \
-  float64_t __s0_138 = __p0_138; \
-  float64x1_t __s1_138 = __p1_138; \
-  float64_t __ret_138; \
-  __ret_138 = __s0_138 * vget_lane_f64(__s1_138, __p2_138); \
-  __ret_138; \
+#define vmuld_lane_f64(__p0_529, __p1_529, __p2_529) __extension__ ({ \
+  float64_t __s0_529 = __p0_529; \
+  float64x1_t __s1_529 = __p1_529; \
+  float64_t __ret_529; \
+  __ret_529 = __s0_529 * vget_lane_f64(__s1_529, __p2_529); \
+  __ret_529; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vmuls_lane_f32(__p0_139, __p1_139, __p2_139) __extension__ ({ \
-  float32_t __s0_139 = __p0_139; \
-  float32x2_t __s1_139 = __p1_139; \
-  float32_t __ret_139; \
-  __ret_139 = __s0_139 * vget_lane_f32(__s1_139, __p2_139); \
-  __ret_139; \
+#define vmuls_lane_f32(__p0_530, __p1_530, __p2_530) __extension__ ({ \
+  float32_t __s0_530 = __p0_530; \
+  float32x2_t __s1_530 = __p1_530; \
+  float32_t __ret_530; \
+  __ret_530 = __s0_530 * vget_lane_f32(__s1_530, __p2_530); \
+  __ret_530; \
 })
 #else
-#define vmuls_lane_f32(__p0_140, __p1_140, __p2_140) __extension__ ({ \
-  float32_t __s0_140 = __p0_140; \
-  float32x2_t __s1_140 = __p1_140; \
-  float32x2_t __rev1_140;  __rev1_140 = __builtin_shufflevector(__s1_140, __s1_140, 1, 0); \
-  float32_t __ret_140; \
-  __ret_140 = __s0_140 * __noswap_vget_lane_f32(__rev1_140, __p2_140); \
-  __ret_140; \
+#define vmuls_lane_f32(__p0_531, __p1_531, __p2_531) __extension__ ({ \
+  float32_t __s0_531 = __p0_531; \
+  float32x2_t __s1_531 = __p1_531; \
+  float32x2_t __rev1_531;  __rev1_531 = __builtin_shufflevector(__s1_531, __s1_531, 1, 0); \
+  float32_t __ret_531; \
+  __ret_531 = __s0_531 * __noswap_vget_lane_f32(__rev1_531, __p2_531); \
+  __ret_531; \
 })
 #endif
 
@@ -50202,60 +54002,60 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
+#define vmulq_lane_f64(__p0_532, __p1_532, __p2_532) __extension__ ({ \
+  float64x2_t __s0_532 = __p0_532; \
+  float64x1_t __s1_532 = __p1_532; \
+  float64x2_t __ret_532; \
+  __ret_532 = __s0_532 * splatq_lane_f64(__s1_532, __p2_532); \
+  __ret_532; \
 })
 #else
-#define vmulq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmulq_lane_f64(__p0_533, __p1_533, __p2_533) __extension__ ({ \
+  float64x2_t __s0_533 = __p0_533; \
+  float64x1_t __s1_533 = __p1_533; \
+  float64x2_t __rev0_533;  __rev0_533 = __builtin_shufflevector(__s0_533, __s0_533, 1, 0); \
+  float64x2_t __ret_533; \
+  __ret_533 = __rev0_533 * __noswap_splatq_lane_f64(__s1_533, __p2_533); \
+  __ret_533 = __builtin_shufflevector(__ret_533, __ret_533, 1, 0); \
+  __ret_533; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmuld_laneq_f64(__p0_141, __p1_141, __p2_141) __extension__ ({ \
-  float64_t __s0_141 = __p0_141; \
-  float64x2_t __s1_141 = __p1_141; \
-  float64_t __ret_141; \
-  __ret_141 = __s0_141 * vgetq_lane_f64(__s1_141, __p2_141); \
-  __ret_141; \
+#define vmuld_laneq_f64(__p0_534, __p1_534, __p2_534) __extension__ ({ \
+  float64_t __s0_534 = __p0_534; \
+  float64x2_t __s1_534 = __p1_534; \
+  float64_t __ret_534; \
+  __ret_534 = __s0_534 * vgetq_lane_f64(__s1_534, __p2_534); \
+  __ret_534; \
 })
 #else
-#define vmuld_laneq_f64(__p0_142, __p1_142, __p2_142) __extension__ ({ \
-  float64_t __s0_142 = __p0_142; \
-  float64x2_t __s1_142 = __p1_142; \
-  float64x2_t __rev1_142;  __rev1_142 = __builtin_shufflevector(__s1_142, __s1_142, 1, 0); \
-  float64_t __ret_142; \
-  __ret_142 = __s0_142 * __noswap_vgetq_lane_f64(__rev1_142, __p2_142); \
-  __ret_142; \
+#define vmuld_laneq_f64(__p0_535, __p1_535, __p2_535) __extension__ ({ \
+  float64_t __s0_535 = __p0_535; \
+  float64x2_t __s1_535 = __p1_535; \
+  float64x2_t __rev1_535;  __rev1_535 = __builtin_shufflevector(__s1_535, __s1_535, 1, 0); \
+  float64_t __ret_535; \
+  __ret_535 = __s0_535 * __noswap_vgetq_lane_f64(__rev1_535, __p2_535); \
+  __ret_535; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmuls_laneq_f32(__p0_143, __p1_143, __p2_143) __extension__ ({ \
-  float32_t __s0_143 = __p0_143; \
-  float32x4_t __s1_143 = __p1_143; \
-  float32_t __ret_143; \
-  __ret_143 = __s0_143 * vgetq_lane_f32(__s1_143, __p2_143); \
-  __ret_143; \
+#define vmuls_laneq_f32(__p0_536, __p1_536, __p2_536) __extension__ ({ \
+  float32_t __s0_536 = __p0_536; \
+  float32x4_t __s1_536 = __p1_536; \
+  float32_t __ret_536; \
+  __ret_536 = __s0_536 * vgetq_lane_f32(__s1_536, __p2_536); \
+  __ret_536; \
 })
 #else
-#define vmuls_laneq_f32(__p0_144, __p1_144, __p2_144) __extension__ ({ \
-  float32_t __s0_144 = __p0_144; \
-  float32x4_t __s1_144 = __p1_144; \
-  float32x4_t __rev1_144;  __rev1_144 = __builtin_shufflevector(__s1_144, __s1_144, 3, 2, 1, 0); \
-  float32_t __ret_144; \
-  __ret_144 = __s0_144 * __noswap_vgetq_lane_f32(__rev1_144, __p2_144); \
-  __ret_144; \
+#define vmuls_laneq_f32(__p0_537, __p1_537, __p2_537) __extension__ ({ \
+  float32_t __s0_537 = __p0_537; \
+  float32x4_t __s1_537 = __p1_537; \
+  float32x4_t __rev1_537;  __rev1_537 = __builtin_shufflevector(__s1_537, __s1_537, 3, 2, 1, 0); \
+  float32_t __ret_537; \
+  __ret_537 = __s0_537 * __noswap_vgetq_lane_f32(__rev1_537, __p2_537); \
+  __ret_537; \
 })
 #endif
 
@@ -50279,233 +54079,233 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmulq_laneq_u32(__p0_538, __p1_538, __p2_538) __extension__ ({ \
+  uint32x4_t __s0_538 = __p0_538; \
+  uint32x4_t __s1_538 = __p1_538; \
+  uint32x4_t __ret_538; \
+  __ret_538 = __s0_538 * splatq_laneq_u32(__s1_538, __p2_538); \
+  __ret_538; \
 })
 #else
-#define vmulq_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmulq_laneq_u32(__p0_539, __p1_539, __p2_539) __extension__ ({ \
+  uint32x4_t __s0_539 = __p0_539; \
+  uint32x4_t __s1_539 = __p1_539; \
+  uint32x4_t __rev0_539;  __rev0_539 = __builtin_shufflevector(__s0_539, __s0_539, 3, 2, 1, 0); \
+  uint32x4_t __rev1_539;  __rev1_539 = __builtin_shufflevector(__s1_539, __s1_539, 3, 2, 1, 0); \
+  uint32x4_t __ret_539; \
+  __ret_539 = __rev0_539 * __noswap_splatq_laneq_u32(__rev1_539, __p2_539); \
+  __ret_539 = __builtin_shufflevector(__ret_539, __ret_539, 3, 2, 1, 0); \
+  __ret_539; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmulq_laneq_u16(__p0_540, __p1_540, __p2_540) __extension__ ({ \
+  uint16x8_t __s0_540 = __p0_540; \
+  uint16x8_t __s1_540 = __p1_540; \
+  uint16x8_t __ret_540; \
+  __ret_540 = __s0_540 * splatq_laneq_u16(__s1_540, __p2_540); \
+  __ret_540; \
 })
 #else
-#define vmulq_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmulq_laneq_u16(__p0_541, __p1_541, __p2_541) __extension__ ({ \
+  uint16x8_t __s0_541 = __p0_541; \
+  uint16x8_t __s1_541 = __p1_541; \
+  uint16x8_t __rev0_541;  __rev0_541 = __builtin_shufflevector(__s0_541, __s0_541, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_541;  __rev1_541 = __builtin_shufflevector(__s1_541, __s1_541, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret_541; \
+  __ret_541 = __rev0_541 * __noswap_splatq_laneq_u16(__rev1_541, __p2_541); \
+  __ret_541 = __builtin_shufflevector(__ret_541, __ret_541, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_541; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
+#define vmulq_laneq_f64(__p0_542, __p1_542, __p2_542) __extension__ ({ \
+  float64x2_t __s0_542 = __p0_542; \
+  float64x2_t __s1_542 = __p1_542; \
+  float64x2_t __ret_542; \
+  __ret_542 = __s0_542 * splatq_laneq_f64(__s1_542, __p2_542); \
+  __ret_542; \
 })
 #else
-#define vmulq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmulq_laneq_f64(__p0_543, __p1_543, __p2_543) __extension__ ({ \
+  float64x2_t __s0_543 = __p0_543; \
+  float64x2_t __s1_543 = __p1_543; \
+  float64x2_t __rev0_543;  __rev0_543 = __builtin_shufflevector(__s0_543, __s0_543, 1, 0); \
+  float64x2_t __rev1_543;  __rev1_543 = __builtin_shufflevector(__s1_543, __s1_543, 1, 0); \
+  float64x2_t __ret_543; \
+  __ret_543 = __rev0_543 * __noswap_splatq_laneq_f64(__rev1_543, __p2_543); \
+  __ret_543 = __builtin_shufflevector(__ret_543, __ret_543, 1, 0); \
+  __ret_543; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmulq_laneq_f32(__p0_544, __p1_544, __p2_544) __extension__ ({ \
+  float32x4_t __s0_544 = __p0_544; \
+  float32x4_t __s1_544 = __p1_544; \
+  float32x4_t __ret_544; \
+  __ret_544 = __s0_544 * splatq_laneq_f32(__s1_544, __p2_544); \
+  __ret_544; \
 })
 #else
-#define vmulq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmulq_laneq_f32(__p0_545, __p1_545, __p2_545) __extension__ ({ \
+  float32x4_t __s0_545 = __p0_545; \
+  float32x4_t __s1_545 = __p1_545; \
+  float32x4_t __rev0_545;  __rev0_545 = __builtin_shufflevector(__s0_545, __s0_545, 3, 2, 1, 0); \
+  float32x4_t __rev1_545;  __rev1_545 = __builtin_shufflevector(__s1_545, __s1_545, 3, 2, 1, 0); \
+  float32x4_t __ret_545; \
+  __ret_545 = __rev0_545 * __noswap_splatq_laneq_f32(__rev1_545, __p2_545); \
+  __ret_545 = __builtin_shufflevector(__ret_545, __ret_545, 3, 2, 1, 0); \
+  __ret_545; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmulq_laneq_s32(__p0_546, __p1_546, __p2_546) __extension__ ({ \
+  int32x4_t __s0_546 = __p0_546; \
+  int32x4_t __s1_546 = __p1_546; \
+  int32x4_t __ret_546; \
+  __ret_546 = __s0_546 * splatq_laneq_s32(__s1_546, __p2_546); \
+  __ret_546; \
 })
 #else
-#define vmulq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmulq_laneq_s32(__p0_547, __p1_547, __p2_547) __extension__ ({ \
+  int32x4_t __s0_547 = __p0_547; \
+  int32x4_t __s1_547 = __p1_547; \
+  int32x4_t __rev0_547;  __rev0_547 = __builtin_shufflevector(__s0_547, __s0_547, 3, 2, 1, 0); \
+  int32x4_t __rev1_547;  __rev1_547 = __builtin_shufflevector(__s1_547, __s1_547, 3, 2, 1, 0); \
+  int32x4_t __ret_547; \
+  __ret_547 = __rev0_547 * __noswap_splatq_laneq_s32(__rev1_547, __p2_547); \
+  __ret_547 = __builtin_shufflevector(__ret_547, __ret_547, 3, 2, 1, 0); \
+  __ret_547; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmulq_laneq_s16(__p0_548, __p1_548, __p2_548) __extension__ ({ \
+  int16x8_t __s0_548 = __p0_548; \
+  int16x8_t __s1_548 = __p1_548; \
+  int16x8_t __ret_548; \
+  __ret_548 = __s0_548 * splatq_laneq_s16(__s1_548, __p2_548); \
+  __ret_548; \
 })
 #else
-#define vmulq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
+#define vmulq_laneq_s16(__p0_549, __p1_549, __p2_549) __extension__ ({ \
+  int16x8_t __s0_549 = __p0_549; \
+  int16x8_t __s1_549 = __p1_549; \
+  int16x8_t __rev0_549;  __rev0_549 = __builtin_shufflevector(__s0_549, __s0_549, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_549;  __rev1_549 = __builtin_shufflevector(__s1_549, __s1_549, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_549; \
+  __ret_549 = __rev0_549 * __noswap_splatq_laneq_s16(__rev1_549, __p2_549); \
+  __ret_549 = __builtin_shufflevector(__ret_549, __ret_549, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_549; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
+#define vmul_laneq_u32(__p0_550, __p1_550, __p2_550) __extension__ ({ \
+  uint32x2_t __s0_550 = __p0_550; \
+  uint32x4_t __s1_550 = __p1_550; \
+  uint32x2_t __ret_550; \
+  __ret_550 = __s0_550 * splat_laneq_u32(__s1_550, __p2_550); \
+  __ret_550; \
 })
 #else
-#define vmul_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmul_laneq_u32(__p0_551, __p1_551, __p2_551) __extension__ ({ \
+  uint32x2_t __s0_551 = __p0_551; \
+  uint32x4_t __s1_551 = __p1_551; \
+  uint32x2_t __rev0_551;  __rev0_551 = __builtin_shufflevector(__s0_551, __s0_551, 1, 0); \
+  uint32x4_t __rev1_551;  __rev1_551 = __builtin_shufflevector(__s1_551, __s1_551, 3, 2, 1, 0); \
+  uint32x2_t __ret_551; \
+  __ret_551 = __rev0_551 * __noswap_splat_laneq_u32(__rev1_551, __p2_551); \
+  __ret_551 = __builtin_shufflevector(__ret_551, __ret_551, 1, 0); \
+  __ret_551; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmul_laneq_u16(__p0_552, __p1_552, __p2_552) __extension__ ({ \
+  uint16x4_t __s0_552 = __p0_552; \
+  uint16x8_t __s1_552 = __p1_552; \
+  uint16x4_t __ret_552; \
+  __ret_552 = __s0_552 * splat_laneq_u16(__s1_552, __p2_552); \
+  __ret_552; \
 })
 #else
-#define vmul_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmul_laneq_u16(__p0_553, __p1_553, __p2_553) __extension__ ({ \
+  uint16x4_t __s0_553 = __p0_553; \
+  uint16x8_t __s1_553 = __p1_553; \
+  uint16x4_t __rev0_553;  __rev0_553 = __builtin_shufflevector(__s0_553, __s0_553, 3, 2, 1, 0); \
+  uint16x8_t __rev1_553;  __rev1_553 = __builtin_shufflevector(__s1_553, __s1_553, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __ret_553; \
+  __ret_553 = __rev0_553 * __noswap_splat_laneq_u16(__rev1_553, __p2_553); \
+  __ret_553 = __builtin_shufflevector(__ret_553, __ret_553, 3, 2, 1, 0); \
+  __ret_553; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
+#define vmul_laneq_f32(__p0_554, __p1_554, __p2_554) __extension__ ({ \
+  float32x2_t __s0_554 = __p0_554; \
+  float32x4_t __s1_554 = __p1_554; \
+  float32x2_t __ret_554; \
+  __ret_554 = __s0_554 * splat_laneq_f32(__s1_554, __p2_554); \
+  __ret_554; \
 })
 #else
-#define vmul_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmul_laneq_f32(__p0_555, __p1_555, __p2_555) __extension__ ({ \
+  float32x2_t __s0_555 = __p0_555; \
+  float32x4_t __s1_555 = __p1_555; \
+  float32x2_t __rev0_555;  __rev0_555 = __builtin_shufflevector(__s0_555, __s0_555, 1, 0); \
+  float32x4_t __rev1_555;  __rev1_555 = __builtin_shufflevector(__s1_555, __s1_555, 3, 2, 1, 0); \
+  float32x2_t __ret_555; \
+  __ret_555 = __rev0_555 * __noswap_splat_laneq_f32(__rev1_555, __p2_555); \
+  __ret_555 = __builtin_shufflevector(__ret_555, __ret_555, 1, 0); \
+  __ret_555; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
+#define vmul_laneq_s32(__p0_556, __p1_556, __p2_556) __extension__ ({ \
+  int32x2_t __s0_556 = __p0_556; \
+  int32x4_t __s1_556 = __p1_556; \
+  int32x2_t __ret_556; \
+  __ret_556 = __s0_556 * splat_laneq_s32(__s1_556, __p2_556); \
+  __ret_556; \
 })
 #else
-#define vmul_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmul_laneq_s32(__p0_557, __p1_557, __p2_557) __extension__ ({ \
+  int32x2_t __s0_557 = __p0_557; \
+  int32x4_t __s1_557 = __p1_557; \
+  int32x2_t __rev0_557;  __rev0_557 = __builtin_shufflevector(__s0_557, __s0_557, 1, 0); \
+  int32x4_t __rev1_557;  __rev1_557 = __builtin_shufflevector(__s1_557, __s1_557, 3, 2, 1, 0); \
+  int32x2_t __ret_557; \
+  __ret_557 = __rev0_557 * __noswap_splat_laneq_s32(__rev1_557, __p2_557); \
+  __ret_557 = __builtin_shufflevector(__ret_557, __ret_557, 1, 0); \
+  __ret_557; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
+#define vmul_laneq_s16(__p0_558, __p1_558, __p2_558) __extension__ ({ \
+  int16x4_t __s0_558 = __p0_558; \
+  int16x8_t __s1_558 = __p1_558; \
+  int16x4_t __ret_558; \
+  __ret_558 = __s0_558 * splat_laneq_s16(__s1_558, __p2_558); \
+  __ret_558; \
 })
 #else
-#define vmul_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmul_laneq_s16(__p0_559, __p1_559, __p2_559) __extension__ ({ \
+  int16x4_t __s0_559 = __p0_559; \
+  int16x8_t __s1_559 = __p1_559; \
+  int16x4_t __rev0_559;  __rev0_559 = __builtin_shufflevector(__s0_559, __s0_559, 3, 2, 1, 0); \
+  int16x8_t __rev1_559;  __rev1_559 = __builtin_shufflevector(__s1_559, __s1_559, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __ret_559; \
+  __ret_559 = __rev0_559 * __noswap_splat_laneq_s16(__rev1_559, __p2_559); \
+  __ret_559 = __builtin_shufflevector(__ret_559, __ret_559, 3, 2, 1, 0); \
+  __ret_559; \
 })
 #endif
 
@@ -50671,170 +54471,170 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = vmull_u32(vget_high_u32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vmull_high_lane_u32(__p0_560, __p1_560, __p2_560) __extension__ ({ \
+  uint32x4_t __s0_560 = __p0_560; \
+  uint32x2_t __s1_560 = __p1_560; \
+  uint64x2_t __ret_560; \
+  __ret_560 = vmull_u32(vget_high_u32(__s0_560), splat_lane_u32(__s1_560, __p2_560)); \
+  __ret_560; \
 })
 #else
-#define vmull_high_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmull_high_lane_u32(__p0_561, __p1_561, __p2_561) __extension__ ({ \
+  uint32x4_t __s0_561 = __p0_561; \
+  uint32x2_t __s1_561 = __p1_561; \
+  uint32x4_t __rev0_561;  __rev0_561 = __builtin_shufflevector(__s0_561, __s0_561, 3, 2, 1, 0); \
+  uint32x2_t __rev1_561;  __rev1_561 = __builtin_shufflevector(__s1_561, __s1_561, 1, 0); \
+  uint64x2_t __ret_561; \
+  __ret_561 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_561), __noswap_splat_lane_u32(__rev1_561, __p2_561)); \
+  __ret_561 = __builtin_shufflevector(__ret_561, __ret_561, 1, 0); \
+  __ret_561; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = vmull_u16(vget_high_u16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmull_high_lane_u16(__p0_562, __p1_562, __p2_562) __extension__ ({ \
+  uint16x8_t __s0_562 = __p0_562; \
+  uint16x4_t __s1_562 = __p1_562; \
+  uint32x4_t __ret_562; \
+  __ret_562 = vmull_u16(vget_high_u16(__s0_562), splat_lane_u16(__s1_562, __p2_562)); \
+  __ret_562; \
 })
 #else
-#define vmull_high_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmull_high_lane_u16(__p0_563, __p1_563, __p2_563) __extension__ ({ \
+  uint16x8_t __s0_563 = __p0_563; \
+  uint16x4_t __s1_563 = __p1_563; \
+  uint16x8_t __rev0_563;  __rev0_563 = __builtin_shufflevector(__s0_563, __s0_563, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x4_t __rev1_563;  __rev1_563 = __builtin_shufflevector(__s1_563, __s1_563, 3, 2, 1, 0); \
+  uint32x4_t __ret_563; \
+  __ret_563 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_563), __noswap_splat_lane_u16(__rev1_563, __p2_563)); \
+  __ret_563 = __builtin_shufflevector(__ret_563, __ret_563, 3, 2, 1, 0); \
+  __ret_563; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vmull_high_lane_s32(__p0_564, __p1_564, __p2_564) __extension__ ({ \
+  int32x4_t __s0_564 = __p0_564; \
+  int32x2_t __s1_564 = __p1_564; \
+  int64x2_t __ret_564; \
+  __ret_564 = vmull_s32(vget_high_s32(__s0_564), splat_lane_s32(__s1_564, __p2_564)); \
+  __ret_564; \
 })
 #else
-#define vmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmull_high_lane_s32(__p0_565, __p1_565, __p2_565) __extension__ ({ \
+  int32x4_t __s0_565 = __p0_565; \
+  int32x2_t __s1_565 = __p1_565; \
+  int32x4_t __rev0_565;  __rev0_565 = __builtin_shufflevector(__s0_565, __s0_565, 3, 2, 1, 0); \
+  int32x2_t __rev1_565;  __rev1_565 = __builtin_shufflevector(__s1_565, __s1_565, 1, 0); \
+  int64x2_t __ret_565; \
+  __ret_565 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_565), __noswap_splat_lane_s32(__rev1_565, __p2_565)); \
+  __ret_565 = __builtin_shufflevector(__ret_565, __ret_565, 1, 0); \
+  __ret_565; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmull_high_lane_s16(__p0_566, __p1_566, __p2_566) __extension__ ({ \
+  int16x8_t __s0_566 = __p0_566; \
+  int16x4_t __s1_566 = __p1_566; \
+  int32x4_t __ret_566; \
+  __ret_566 = vmull_s16(vget_high_s16(__s0_566), splat_lane_s16(__s1_566, __p2_566)); \
+  __ret_566; \
 })
 #else
-#define vmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmull_high_lane_s16(__p0_567, __p1_567, __p2_567) __extension__ ({ \
+  int16x8_t __s0_567 = __p0_567; \
+  int16x4_t __s1_567 = __p1_567; \
+  int16x8_t __rev0_567;  __rev0_567 = __builtin_shufflevector(__s0_567, __s0_567, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev1_567;  __rev1_567 = __builtin_shufflevector(__s1_567, __s1_567, 3, 2, 1, 0); \
+  int32x4_t __ret_567; \
+  __ret_567 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_567), __noswap_splat_lane_s16(__rev1_567, __p2_567)); \
+  __ret_567 = __builtin_shufflevector(__ret_567, __ret_567, 3, 2, 1, 0); \
+  __ret_567; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = vmull_u32(vget_high_u32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vmull_high_laneq_u32(__p0_568, __p1_568, __p2_568) __extension__ ({ \
+  uint32x4_t __s0_568 = __p0_568; \
+  uint32x4_t __s1_568 = __p1_568; \
+  uint64x2_t __ret_568; \
+  __ret_568 = vmull_u32(vget_high_u32(__s0_568), splat_laneq_u32(__s1_568, __p2_568)); \
+  __ret_568; \
 })
 #else
-#define vmull_high_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmull_high_laneq_u32(__p0_569, __p1_569, __p2_569) __extension__ ({ \
+  uint32x4_t __s0_569 = __p0_569; \
+  uint32x4_t __s1_569 = __p1_569; \
+  uint32x4_t __rev0_569;  __rev0_569 = __builtin_shufflevector(__s0_569, __s0_569, 3, 2, 1, 0); \
+  uint32x4_t __rev1_569;  __rev1_569 = __builtin_shufflevector(__s1_569, __s1_569, 3, 2, 1, 0); \
+  uint64x2_t __ret_569; \
+  __ret_569 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_569), __noswap_splat_laneq_u32(__rev1_569, __p2_569)); \
+  __ret_569 = __builtin_shufflevector(__ret_569, __ret_569, 1, 0); \
+  __ret_569; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = vmull_u16(vget_high_u16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmull_high_laneq_u16(__p0_570, __p1_570, __p2_570) __extension__ ({ \
+  uint16x8_t __s0_570 = __p0_570; \
+  uint16x8_t __s1_570 = __p1_570; \
+  uint32x4_t __ret_570; \
+  __ret_570 = vmull_u16(vget_high_u16(__s0_570), splat_laneq_u16(__s1_570, __p2_570)); \
+  __ret_570; \
 })
 #else
-#define vmull_high_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmull_high_laneq_u16(__p0_571, __p1_571, __p2_571) __extension__ ({ \
+  uint16x8_t __s0_571 = __p0_571; \
+  uint16x8_t __s1_571 = __p1_571; \
+  uint16x8_t __rev0_571;  __rev0_571 = __builtin_shufflevector(__s0_571, __s0_571, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_571;  __rev1_571 = __builtin_shufflevector(__s1_571, __s1_571, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_571; \
+  __ret_571 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_571), __noswap_splat_laneq_u16(__rev1_571, __p2_571)); \
+  __ret_571 = __builtin_shufflevector(__ret_571, __ret_571, 3, 2, 1, 0); \
+  __ret_571; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vmull_high_laneq_s32(__p0_572, __p1_572, __p2_572) __extension__ ({ \
+  int32x4_t __s0_572 = __p0_572; \
+  int32x4_t __s1_572 = __p1_572; \
+  int64x2_t __ret_572; \
+  __ret_572 = vmull_s32(vget_high_s32(__s0_572), splat_laneq_s32(__s1_572, __p2_572)); \
+  __ret_572; \
 })
 #else
-#define vmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmull_high_laneq_s32(__p0_573, __p1_573, __p2_573) __extension__ ({ \
+  int32x4_t __s0_573 = __p0_573; \
+  int32x4_t __s1_573 = __p1_573; \
+  int32x4_t __rev0_573;  __rev0_573 = __builtin_shufflevector(__s0_573, __s0_573, 3, 2, 1, 0); \
+  int32x4_t __rev1_573;  __rev1_573 = __builtin_shufflevector(__s1_573, __s1_573, 3, 2, 1, 0); \
+  int64x2_t __ret_573; \
+  __ret_573 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_573), __noswap_splat_laneq_s32(__rev1_573, __p2_573)); \
+  __ret_573 = __builtin_shufflevector(__ret_573, __ret_573, 1, 0); \
+  __ret_573; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmull_high_laneq_s16(__p0_574, __p1_574, __p2_574) __extension__ ({ \
+  int16x8_t __s0_574 = __p0_574; \
+  int16x8_t __s1_574 = __p1_574; \
+  int32x4_t __ret_574; \
+  __ret_574 = vmull_s16(vget_high_s16(__s0_574), splat_laneq_s16(__s1_574, __p2_574)); \
+  __ret_574; \
 })
 #else
-#define vmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmull_high_laneq_s16(__p0_575, __p1_575, __p2_575) __extension__ ({ \
+  int16x8_t __s0_575 = __p0_575; \
+  int16x8_t __s1_575 = __p1_575; \
+  int16x8_t __rev0_575;  __rev0_575 = __builtin_shufflevector(__s0_575, __s0_575, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_575;  __rev1_575 = __builtin_shufflevector(__s1_575, __s1_575, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_575; \
+  __ret_575 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_575), __noswap_splat_laneq_s16(__rev1_575, __p2_575)); \
+  __ret_575 = __builtin_shufflevector(__ret_575, __ret_575, 3, 2, 1, 0); \
+  __ret_575; \
 })
 #endif
 
@@ -50903,86 +54703,86 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = vmull_u32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vmull_laneq_u32(__p0_576, __p1_576, __p2_576) __extension__ ({ \
+  uint32x2_t __s0_576 = __p0_576; \
+  uint32x4_t __s1_576 = __p1_576; \
+  uint64x2_t __ret_576; \
+  __ret_576 = vmull_u32(__s0_576, splat_laneq_u32(__s1_576, __p2_576)); \
+  __ret_576; \
 })
 #else
-#define vmull_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __noswap_vmull_u32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmull_laneq_u32(__p0_577, __p1_577, __p2_577) __extension__ ({ \
+  uint32x2_t __s0_577 = __p0_577; \
+  uint32x4_t __s1_577 = __p1_577; \
+  uint32x2_t __rev0_577;  __rev0_577 = __builtin_shufflevector(__s0_577, __s0_577, 1, 0); \
+  uint32x4_t __rev1_577;  __rev1_577 = __builtin_shufflevector(__s1_577, __s1_577, 3, 2, 1, 0); \
+  uint64x2_t __ret_577; \
+  __ret_577 = __noswap_vmull_u32(__rev0_577, __noswap_splat_laneq_u32(__rev1_577, __p2_577)); \
+  __ret_577 = __builtin_shufflevector(__ret_577, __ret_577, 1, 0); \
+  __ret_577; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = vmull_u16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmull_laneq_u16(__p0_578, __p1_578, __p2_578) __extension__ ({ \
+  uint16x4_t __s0_578 = __p0_578; \
+  uint16x8_t __s1_578 = __p1_578; \
+  uint32x4_t __ret_578; \
+  __ret_578 = vmull_u16(__s0_578, splat_laneq_u16(__s1_578, __p2_578)); \
+  __ret_578; \
 })
 #else
-#define vmull_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __noswap_vmull_u16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmull_laneq_u16(__p0_579, __p1_579, __p2_579) __extension__ ({ \
+  uint16x4_t __s0_579 = __p0_579; \
+  uint16x8_t __s1_579 = __p1_579; \
+  uint16x4_t __rev0_579;  __rev0_579 = __builtin_shufflevector(__s0_579, __s0_579, 3, 2, 1, 0); \
+  uint16x8_t __rev1_579;  __rev1_579 = __builtin_shufflevector(__s1_579, __s1_579, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_579; \
+  __ret_579 = __noswap_vmull_u16(__rev0_579, __noswap_splat_laneq_u16(__rev1_579, __p2_579)); \
+  __ret_579 = __builtin_shufflevector(__ret_579, __ret_579, 3, 2, 1, 0); \
+  __ret_579; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vmull_laneq_s32(__p0_580, __p1_580, __p2_580) __extension__ ({ \
+  int32x2_t __s0_580 = __p0_580; \
+  int32x4_t __s1_580 = __p1_580; \
+  int64x2_t __ret_580; \
+  __ret_580 = vmull_s32(__s0_580, splat_laneq_s32(__s1_580, __p2_580)); \
+  __ret_580; \
 })
 #else
-#define vmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmull_laneq_s32(__p0_581, __p1_581, __p2_581) __extension__ ({ \
+  int32x2_t __s0_581 = __p0_581; \
+  int32x4_t __s1_581 = __p1_581; \
+  int32x2_t __rev0_581;  __rev0_581 = __builtin_shufflevector(__s0_581, __s0_581, 1, 0); \
+  int32x4_t __rev1_581;  __rev1_581 = __builtin_shufflevector(__s1_581, __s1_581, 3, 2, 1, 0); \
+  int64x2_t __ret_581; \
+  __ret_581 = __noswap_vmull_s32(__rev0_581, __noswap_splat_laneq_s32(__rev1_581, __p2_581)); \
+  __ret_581 = __builtin_shufflevector(__ret_581, __ret_581, 1, 0); \
+  __ret_581; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmull_laneq_s16(__p0_582, __p1_582, __p2_582) __extension__ ({ \
+  int16x4_t __s0_582 = __p0_582; \
+  int16x8_t __s1_582 = __p1_582; \
+  int32x4_t __ret_582; \
+  __ret_582 = vmull_s16(__s0_582, splat_laneq_s16(__s1_582, __p2_582)); \
+  __ret_582; \
 })
 #else
-#define vmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmull_laneq_s16(__p0_583, __p1_583, __p2_583) __extension__ ({ \
+  int16x4_t __s0_583 = __p0_583; \
+  int16x8_t __s1_583 = __p1_583; \
+  int16x4_t __rev0_583;  __rev0_583 = __builtin_shufflevector(__s0_583, __s0_583, 3, 2, 1, 0); \
+  int16x8_t __rev1_583;  __rev1_583 = __builtin_shufflevector(__s1_583, __s1_583, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_583; \
+  __ret_583 = __noswap_vmull_s16(__rev0_583, __noswap_splat_laneq_s16(__rev1_583, __p2_583)); \
+  __ret_583 = __builtin_shufflevector(__ret_583, __ret_583, 3, 2, 1, 0); \
+  __ret_583; \
 })
 #endif
 
@@ -51067,192 +54867,192 @@
   __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
   return __ret;
 }
-#define vmulxd_lane_f64(__p0_145, __p1_145, __p2_145) __extension__ ({ \
-  float64_t __s0_145 = __p0_145; \
-  float64x1_t __s1_145 = __p1_145; \
-  float64_t __ret_145; \
-  __ret_145 = vmulxd_f64(__s0_145, vget_lane_f64(__s1_145, __p2_145)); \
-  __ret_145; \
+#define vmulxd_lane_f64(__p0_584, __p1_584, __p2_584) __extension__ ({ \
+  float64_t __s0_584 = __p0_584; \
+  float64x1_t __s1_584 = __p1_584; \
+  float64_t __ret_584; \
+  __ret_584 = vmulxd_f64(__s0_584, vget_lane_f64(__s1_584, __p2_584)); \
+  __ret_584; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vmulxs_lane_f32(__p0_146, __p1_146, __p2_146) __extension__ ({ \
-  float32_t __s0_146 = __p0_146; \
-  float32x2_t __s1_146 = __p1_146; \
-  float32_t __ret_146; \
-  __ret_146 = vmulxs_f32(__s0_146, vget_lane_f32(__s1_146, __p2_146)); \
-  __ret_146; \
+#define vmulxs_lane_f32(__p0_585, __p1_585, __p2_585) __extension__ ({ \
+  float32_t __s0_585 = __p0_585; \
+  float32x2_t __s1_585 = __p1_585; \
+  float32_t __ret_585; \
+  __ret_585 = vmulxs_f32(__s0_585, vget_lane_f32(__s1_585, __p2_585)); \
+  __ret_585; \
 })
 #else
-#define vmulxs_lane_f32(__p0_147, __p1_147, __p2_147) __extension__ ({ \
-  float32_t __s0_147 = __p0_147; \
-  float32x2_t __s1_147 = __p1_147; \
-  float32x2_t __rev1_147;  __rev1_147 = __builtin_shufflevector(__s1_147, __s1_147, 1, 0); \
-  float32_t __ret_147; \
-  __ret_147 = vmulxs_f32(__s0_147, __noswap_vget_lane_f32(__rev1_147, __p2_147)); \
-  __ret_147; \
+#define vmulxs_lane_f32(__p0_586, __p1_586, __p2_586) __extension__ ({ \
+  float32_t __s0_586 = __p0_586; \
+  float32x2_t __s1_586 = __p1_586; \
+  float32x2_t __rev1_586;  __rev1_586 = __builtin_shufflevector(__s1_586, __s1_586, 1, 0); \
+  float32_t __ret_586; \
+  __ret_586 = vmulxs_f32(__s0_586, __noswap_vget_lane_f32(__rev1_586, __p2_586)); \
+  __ret_586; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = vmulxq_f64(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vmulxq_lane_f64(__p0_587, __p1_587, __p2_587) __extension__ ({ \
+  float64x2_t __s0_587 = __p0_587; \
+  float64x1_t __s1_587 = __p1_587; \
+  float64x2_t __ret_587; \
+  __ret_587 = vmulxq_f64(__s0_587, splatq_lane_f64(__s1_587, __p2_587)); \
+  __ret_587; \
 })
 #else
-#define vmulxq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = __noswap_vmulxq_f64(__rev0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmulxq_lane_f64(__p0_588, __p1_588, __p2_588) __extension__ ({ \
+  float64x2_t __s0_588 = __p0_588; \
+  float64x1_t __s1_588 = __p1_588; \
+  float64x2_t __rev0_588;  __rev0_588 = __builtin_shufflevector(__s0_588, __s0_588, 1, 0); \
+  float64x2_t __ret_588; \
+  __ret_588 = __noswap_vmulxq_f64(__rev0_588, __noswap_splatq_lane_f64(__s1_588, __p2_588)); \
+  __ret_588 = __builtin_shufflevector(__ret_588, __ret_588, 1, 0); \
+  __ret_588; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = vmulxq_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmulxq_lane_f32(__p0_589, __p1_589, __p2_589) __extension__ ({ \
+  float32x4_t __s0_589 = __p0_589; \
+  float32x2_t __s1_589 = __p1_589; \
+  float32x4_t __ret_589; \
+  __ret_589 = vmulxq_f32(__s0_589, splatq_lane_f32(__s1_589, __p2_589)); \
+  __ret_589; \
 })
 #else
-#define vmulxq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __noswap_vmulxq_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmulxq_lane_f32(__p0_590, __p1_590, __p2_590) __extension__ ({ \
+  float32x4_t __s0_590 = __p0_590; \
+  float32x2_t __s1_590 = __p1_590; \
+  float32x4_t __rev0_590;  __rev0_590 = __builtin_shufflevector(__s0_590, __s0_590, 3, 2, 1, 0); \
+  float32x2_t __rev1_590;  __rev1_590 = __builtin_shufflevector(__s1_590, __s1_590, 1, 0); \
+  float32x4_t __ret_590; \
+  __ret_590 = __noswap_vmulxq_f32(__rev0_590, __noswap_splatq_lane_f32(__rev1_590, __p2_590)); \
+  __ret_590 = __builtin_shufflevector(__ret_590, __ret_590, 3, 2, 1, 0); \
+  __ret_590; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulx_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = vmulx_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vmulx_lane_f32(__p0_591, __p1_591, __p2_591) __extension__ ({ \
+  float32x2_t __s0_591 = __p0_591; \
+  float32x2_t __s1_591 = __p1_591; \
+  float32x2_t __ret_591; \
+  __ret_591 = vmulx_f32(__s0_591, splat_lane_f32(__s1_591, __p2_591)); \
+  __ret_591; \
 })
 #else
-#define vmulx_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __noswap_vmulx_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmulx_lane_f32(__p0_592, __p1_592, __p2_592) __extension__ ({ \
+  float32x2_t __s0_592 = __p0_592; \
+  float32x2_t __s1_592 = __p1_592; \
+  float32x2_t __rev0_592;  __rev0_592 = __builtin_shufflevector(__s0_592, __s0_592, 1, 0); \
+  float32x2_t __rev1_592;  __rev1_592 = __builtin_shufflevector(__s1_592, __s1_592, 1, 0); \
+  float32x2_t __ret_592; \
+  __ret_592 = __noswap_vmulx_f32(__rev0_592, __noswap_splat_lane_f32(__rev1_592, __p2_592)); \
+  __ret_592 = __builtin_shufflevector(__ret_592, __ret_592, 1, 0); \
+  __ret_592; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulxd_laneq_f64(__p0_148, __p1_148, __p2_148) __extension__ ({ \
-  float64_t __s0_148 = __p0_148; \
-  float64x2_t __s1_148 = __p1_148; \
-  float64_t __ret_148; \
-  __ret_148 = vmulxd_f64(__s0_148, vgetq_lane_f64(__s1_148, __p2_148)); \
-  __ret_148; \
+#define vmulxd_laneq_f64(__p0_593, __p1_593, __p2_593) __extension__ ({ \
+  float64_t __s0_593 = __p0_593; \
+  float64x2_t __s1_593 = __p1_593; \
+  float64_t __ret_593; \
+  __ret_593 = vmulxd_f64(__s0_593, vgetq_lane_f64(__s1_593, __p2_593)); \
+  __ret_593; \
 })
 #else
-#define vmulxd_laneq_f64(__p0_149, __p1_149, __p2_149) __extension__ ({ \
-  float64_t __s0_149 = __p0_149; \
-  float64x2_t __s1_149 = __p1_149; \
-  float64x2_t __rev1_149;  __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 1, 0); \
-  float64_t __ret_149; \
-  __ret_149 = vmulxd_f64(__s0_149, __noswap_vgetq_lane_f64(__rev1_149, __p2_149)); \
-  __ret_149; \
+#define vmulxd_laneq_f64(__p0_594, __p1_594, __p2_594) __extension__ ({ \
+  float64_t __s0_594 = __p0_594; \
+  float64x2_t __s1_594 = __p1_594; \
+  float64x2_t __rev1_594;  __rev1_594 = __builtin_shufflevector(__s1_594, __s1_594, 1, 0); \
+  float64_t __ret_594; \
+  __ret_594 = vmulxd_f64(__s0_594, __noswap_vgetq_lane_f64(__rev1_594, __p2_594)); \
+  __ret_594; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulxs_laneq_f32(__p0_150, __p1_150, __p2_150) __extension__ ({ \
-  float32_t __s0_150 = __p0_150; \
-  float32x4_t __s1_150 = __p1_150; \
-  float32_t __ret_150; \
-  __ret_150 = vmulxs_f32(__s0_150, vgetq_lane_f32(__s1_150, __p2_150)); \
-  __ret_150; \
+#define vmulxs_laneq_f32(__p0_595, __p1_595, __p2_595) __extension__ ({ \
+  float32_t __s0_595 = __p0_595; \
+  float32x4_t __s1_595 = __p1_595; \
+  float32_t __ret_595; \
+  __ret_595 = vmulxs_f32(__s0_595, vgetq_lane_f32(__s1_595, __p2_595)); \
+  __ret_595; \
 })
 #else
-#define vmulxs_laneq_f32(__p0_151, __p1_151, __p2_151) __extension__ ({ \
-  float32_t __s0_151 = __p0_151; \
-  float32x4_t __s1_151 = __p1_151; \
-  float32x4_t __rev1_151;  __rev1_151 = __builtin_shufflevector(__s1_151, __s1_151, 3, 2, 1, 0); \
-  float32_t __ret_151; \
-  __ret_151 = vmulxs_f32(__s0_151, __noswap_vgetq_lane_f32(__rev1_151, __p2_151)); \
-  __ret_151; \
+#define vmulxs_laneq_f32(__p0_596, __p1_596, __p2_596) __extension__ ({ \
+  float32_t __s0_596 = __p0_596; \
+  float32x4_t __s1_596 = __p1_596; \
+  float32x4_t __rev1_596;  __rev1_596 = __builtin_shufflevector(__s1_596, __s1_596, 3, 2, 1, 0); \
+  float32_t __ret_596; \
+  __ret_596 = vmulxs_f32(__s0_596, __noswap_vgetq_lane_f32(__rev1_596, __p2_596)); \
+  __ret_596; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = vmulxq_f64(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vmulxq_laneq_f64(__p0_597, __p1_597, __p2_597) __extension__ ({ \
+  float64x2_t __s0_597 = __p0_597; \
+  float64x2_t __s1_597 = __p1_597; \
+  float64x2_t __ret_597; \
+  __ret_597 = vmulxq_f64(__s0_597, splatq_laneq_f64(__s1_597, __p2_597)); \
+  __ret_597; \
 })
 #else
-#define vmulxq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = __noswap_vmulxq_f64(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmulxq_laneq_f64(__p0_598, __p1_598, __p2_598) __extension__ ({ \
+  float64x2_t __s0_598 = __p0_598; \
+  float64x2_t __s1_598 = __p1_598; \
+  float64x2_t __rev0_598;  __rev0_598 = __builtin_shufflevector(__s0_598, __s0_598, 1, 0); \
+  float64x2_t __rev1_598;  __rev1_598 = __builtin_shufflevector(__s1_598, __s1_598, 1, 0); \
+  float64x2_t __ret_598; \
+  __ret_598 = __noswap_vmulxq_f64(__rev0_598, __noswap_splatq_laneq_f64(__rev1_598, __p2_598)); \
+  __ret_598 = __builtin_shufflevector(__ret_598, __ret_598, 1, 0); \
+  __ret_598; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = vmulxq_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vmulxq_laneq_f32(__p0_599, __p1_599, __p2_599) __extension__ ({ \
+  float32x4_t __s0_599 = __p0_599; \
+  float32x4_t __s1_599 = __p1_599; \
+  float32x4_t __ret_599; \
+  __ret_599 = vmulxq_f32(__s0_599, splatq_laneq_f32(__s1_599, __p2_599)); \
+  __ret_599; \
 })
 #else
-#define vmulxq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __noswap_vmulxq_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmulxq_laneq_f32(__p0_600, __p1_600, __p2_600) __extension__ ({ \
+  float32x4_t __s0_600 = __p0_600; \
+  float32x4_t __s1_600 = __p1_600; \
+  float32x4_t __rev0_600;  __rev0_600 = __builtin_shufflevector(__s0_600, __s0_600, 3, 2, 1, 0); \
+  float32x4_t __rev1_600;  __rev1_600 = __builtin_shufflevector(__s1_600, __s1_600, 3, 2, 1, 0); \
+  float32x4_t __ret_600; \
+  __ret_600 = __noswap_vmulxq_f32(__rev0_600, __noswap_splatq_laneq_f32(__rev1_600, __p2_600)); \
+  __ret_600 = __builtin_shufflevector(__ret_600, __ret_600, 3, 2, 1, 0); \
+  __ret_600; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = vmulx_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vmulx_laneq_f32(__p0_601, __p1_601, __p2_601) __extension__ ({ \
+  float32x2_t __s0_601 = __p0_601; \
+  float32x4_t __s1_601 = __p1_601; \
+  float32x2_t __ret_601; \
+  __ret_601 = vmulx_f32(__s0_601, splat_laneq_f32(__s1_601, __p2_601)); \
+  __ret_601; \
 })
 #else
-#define vmulx_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __noswap_vmulx_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmulx_laneq_f32(__p0_602, __p1_602, __p2_602) __extension__ ({ \
+  float32x2_t __s0_602 = __p0_602; \
+  float32x4_t __s1_602 = __p1_602; \
+  float32x2_t __rev0_602;  __rev0_602 = __builtin_shufflevector(__s0_602, __s0_602, 1, 0); \
+  float32x4_t __rev1_602;  __rev1_602 = __builtin_shufflevector(__s1_602, __s1_602, 3, 2, 1, 0); \
+  float32x2_t __ret_602; \
+  __ret_602 = __noswap_vmulx_f32(__rev0_602, __noswap_splat_laneq_f32(__rev1_602, __p2_602)); \
+  __ret_602 = __builtin_shufflevector(__ret_602, __ret_602, 1, 0); \
+  __ret_602; \
 })
 #endif
 
@@ -52155,98 +55955,98 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlal_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vqdmlal_high_lane_s32(__p0_603, __p1_603, __p2_603, __p3_603) __extension__ ({ \
+  int64x2_t __s0_603 = __p0_603; \
+  int32x4_t __s1_603 = __p1_603; \
+  int32x2_t __s2_603 = __p2_603; \
+  int64x2_t __ret_603; \
+  __ret_603 = vqdmlal_s32(__s0_603, vget_high_s32(__s1_603), splat_lane_s32(__s2_603, __p3_603)); \
+  __ret_603; \
 })
 #else
-#define vqdmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmlal_high_lane_s32(__p0_604, __p1_604, __p2_604, __p3_604) __extension__ ({ \
+  int64x2_t __s0_604 = __p0_604; \
+  int32x4_t __s1_604 = __p1_604; \
+  int32x2_t __s2_604 = __p2_604; \
+  int64x2_t __rev0_604;  __rev0_604 = __builtin_shufflevector(__s0_604, __s0_604, 1, 0); \
+  int32x4_t __rev1_604;  __rev1_604 = __builtin_shufflevector(__s1_604, __s1_604, 3, 2, 1, 0); \
+  int32x2_t __rev2_604;  __rev2_604 = __builtin_shufflevector(__s2_604, __s2_604, 1, 0); \
+  int64x2_t __ret_604; \
+  __ret_604 = __noswap_vqdmlal_s32(__rev0_604, __noswap_vget_high_s32(__rev1_604), __noswap_splat_lane_s32(__rev2_604, __p3_604)); \
+  __ret_604 = __builtin_shufflevector(__ret_604, __ret_604, 1, 0); \
+  __ret_604; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlal_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vqdmlal_high_lane_s16(__p0_605, __p1_605, __p2_605, __p3_605) __extension__ ({ \
+  int32x4_t __s0_605 = __p0_605; \
+  int16x8_t __s1_605 = __p1_605; \
+  int16x4_t __s2_605 = __p2_605; \
+  int32x4_t __ret_605; \
+  __ret_605 = vqdmlal_s16(__s0_605, vget_high_s16(__s1_605), splat_lane_s16(__s2_605, __p3_605)); \
+  __ret_605; \
 })
 #else
-#define vqdmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmlal_high_lane_s16(__p0_606, __p1_606, __p2_606, __p3_606) __extension__ ({ \
+  int32x4_t __s0_606 = __p0_606; \
+  int16x8_t __s1_606 = __p1_606; \
+  int16x4_t __s2_606 = __p2_606; \
+  int32x4_t __rev0_606;  __rev0_606 = __builtin_shufflevector(__s0_606, __s0_606, 3, 2, 1, 0); \
+  int16x8_t __rev1_606;  __rev1_606 = __builtin_shufflevector(__s1_606, __s1_606, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_606;  __rev2_606 = __builtin_shufflevector(__s2_606, __s2_606, 3, 2, 1, 0); \
+  int32x4_t __ret_606; \
+  __ret_606 = __noswap_vqdmlal_s16(__rev0_606, __noswap_vget_high_s16(__rev1_606), __noswap_splat_lane_s16(__rev2_606, __p3_606)); \
+  __ret_606 = __builtin_shufflevector(__ret_606, __ret_606, 3, 2, 1, 0); \
+  __ret_606; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlal_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vqdmlal_high_laneq_s32(__p0_607, __p1_607, __p2_607, __p3_607) __extension__ ({ \
+  int64x2_t __s0_607 = __p0_607; \
+  int32x4_t __s1_607 = __p1_607; \
+  int32x4_t __s2_607 = __p2_607; \
+  int64x2_t __ret_607; \
+  __ret_607 = vqdmlal_s32(__s0_607, vget_high_s32(__s1_607), splat_laneq_s32(__s2_607, __p3_607)); \
+  __ret_607; \
 })
 #else
-#define vqdmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmlal_high_laneq_s32(__p0_608, __p1_608, __p2_608, __p3_608) __extension__ ({ \
+  int64x2_t __s0_608 = __p0_608; \
+  int32x4_t __s1_608 = __p1_608; \
+  int32x4_t __s2_608 = __p2_608; \
+  int64x2_t __rev0_608;  __rev0_608 = __builtin_shufflevector(__s0_608, __s0_608, 1, 0); \
+  int32x4_t __rev1_608;  __rev1_608 = __builtin_shufflevector(__s1_608, __s1_608, 3, 2, 1, 0); \
+  int32x4_t __rev2_608;  __rev2_608 = __builtin_shufflevector(__s2_608, __s2_608, 3, 2, 1, 0); \
+  int64x2_t __ret_608; \
+  __ret_608 = __noswap_vqdmlal_s32(__rev0_608, __noswap_vget_high_s32(__rev1_608), __noswap_splat_laneq_s32(__rev2_608, __p3_608)); \
+  __ret_608 = __builtin_shufflevector(__ret_608, __ret_608, 1, 0); \
+  __ret_608; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlal_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vqdmlal_high_laneq_s16(__p0_609, __p1_609, __p2_609, __p3_609) __extension__ ({ \
+  int32x4_t __s0_609 = __p0_609; \
+  int16x8_t __s1_609 = __p1_609; \
+  int16x8_t __s2_609 = __p2_609; \
+  int32x4_t __ret_609; \
+  __ret_609 = vqdmlal_s16(__s0_609, vget_high_s16(__s1_609), splat_laneq_s16(__s2_609, __p3_609)); \
+  __ret_609; \
 })
 #else
-#define vqdmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmlal_high_laneq_s16(__p0_610, __p1_610, __p2_610, __p3_610) __extension__ ({ \
+  int32x4_t __s0_610 = __p0_610; \
+  int16x8_t __s1_610 = __p1_610; \
+  int16x8_t __s2_610 = __p2_610; \
+  int32x4_t __rev0_610;  __rev0_610 = __builtin_shufflevector(__s0_610, __s0_610, 3, 2, 1, 0); \
+  int16x8_t __rev1_610;  __rev1_610 = __builtin_shufflevector(__s1_610, __s1_610, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_610;  __rev2_610 = __builtin_shufflevector(__s2_610, __s2_610, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_610; \
+  __ret_610 = __noswap_vqdmlal_s16(__rev0_610, __noswap_vget_high_s16(__rev1_610), __noswap_splat_laneq_s16(__rev2_610, __p3_610)); \
+  __ret_610 = __builtin_shufflevector(__ret_610, __ret_610, 3, 2, 1, 0); \
+  __ret_610; \
 })
 #endif
 
@@ -52369,50 +56169,50 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlal_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vqdmlal_laneq_s32(__p0_611, __p1_611, __p2_611, __p3_611) __extension__ ({ \
+  int64x2_t __s0_611 = __p0_611; \
+  int32x2_t __s1_611 = __p1_611; \
+  int32x4_t __s2_611 = __p2_611; \
+  int64x2_t __ret_611; \
+  __ret_611 = vqdmlal_s32(__s0_611, __s1_611, splat_laneq_s32(__s2_611, __p3_611)); \
+  __ret_611; \
 })
 #else
-#define vqdmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlal_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmlal_laneq_s32(__p0_612, __p1_612, __p2_612, __p3_612) __extension__ ({ \
+  int64x2_t __s0_612 = __p0_612; \
+  int32x2_t __s1_612 = __p1_612; \
+  int32x4_t __s2_612 = __p2_612; \
+  int64x2_t __rev0_612;  __rev0_612 = __builtin_shufflevector(__s0_612, __s0_612, 1, 0); \
+  int32x2_t __rev1_612;  __rev1_612 = __builtin_shufflevector(__s1_612, __s1_612, 1, 0); \
+  int32x4_t __rev2_612;  __rev2_612 = __builtin_shufflevector(__s2_612, __s2_612, 3, 2, 1, 0); \
+  int64x2_t __ret_612; \
+  __ret_612 = __noswap_vqdmlal_s32(__rev0_612, __rev1_612, __noswap_splat_laneq_s32(__rev2_612, __p3_612)); \
+  __ret_612 = __builtin_shufflevector(__ret_612, __ret_612, 1, 0); \
+  __ret_612; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlal_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vqdmlal_laneq_s16(__p0_613, __p1_613, __p2_613, __p3_613) __extension__ ({ \
+  int32x4_t __s0_613 = __p0_613; \
+  int16x4_t __s1_613 = __p1_613; \
+  int16x8_t __s2_613 = __p2_613; \
+  int32x4_t __ret_613; \
+  __ret_613 = vqdmlal_s16(__s0_613, __s1_613, splat_laneq_s16(__s2_613, __p3_613)); \
+  __ret_613; \
 })
 #else
-#define vqdmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlal_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmlal_laneq_s16(__p0_614, __p1_614, __p2_614, __p3_614) __extension__ ({ \
+  int32x4_t __s0_614 = __p0_614; \
+  int16x4_t __s1_614 = __p1_614; \
+  int16x8_t __s2_614 = __p2_614; \
+  int32x4_t __rev0_614;  __rev0_614 = __builtin_shufflevector(__s0_614, __s0_614, 3, 2, 1, 0); \
+  int16x4_t __rev1_614;  __rev1_614 = __builtin_shufflevector(__s1_614, __s1_614, 3, 2, 1, 0); \
+  int16x8_t __rev2_614;  __rev2_614 = __builtin_shufflevector(__s2_614, __s2_614, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_614; \
+  __ret_614 = __noswap_vqdmlal_s16(__rev0_614, __rev1_614, __noswap_splat_laneq_s16(__rev2_614, __p3_614)); \
+  __ret_614 = __builtin_shufflevector(__ret_614, __ret_614, 3, 2, 1, 0); \
+  __ret_614; \
 })
 #endif
 
@@ -52463,98 +56263,98 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlsl_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vqdmlsl_high_lane_s32(__p0_615, __p1_615, __p2_615, __p3_615) __extension__ ({ \
+  int64x2_t __s0_615 = __p0_615; \
+  int32x4_t __s1_615 = __p1_615; \
+  int32x2_t __s2_615 = __p2_615; \
+  int64x2_t __ret_615; \
+  __ret_615 = vqdmlsl_s32(__s0_615, vget_high_s32(__s1_615), splat_lane_s32(__s2_615, __p3_615)); \
+  __ret_615; \
 })
 #else
-#define vqdmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmlsl_high_lane_s32(__p0_616, __p1_616, __p2_616, __p3_616) __extension__ ({ \
+  int64x2_t __s0_616 = __p0_616; \
+  int32x4_t __s1_616 = __p1_616; \
+  int32x2_t __s2_616 = __p2_616; \
+  int64x2_t __rev0_616;  __rev0_616 = __builtin_shufflevector(__s0_616, __s0_616, 1, 0); \
+  int32x4_t __rev1_616;  __rev1_616 = __builtin_shufflevector(__s1_616, __s1_616, 3, 2, 1, 0); \
+  int32x2_t __rev2_616;  __rev2_616 = __builtin_shufflevector(__s2_616, __s2_616, 1, 0); \
+  int64x2_t __ret_616; \
+  __ret_616 = __noswap_vqdmlsl_s32(__rev0_616, __noswap_vget_high_s32(__rev1_616), __noswap_splat_lane_s32(__rev2_616, __p3_616)); \
+  __ret_616 = __builtin_shufflevector(__ret_616, __ret_616, 1, 0); \
+  __ret_616; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlsl_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vqdmlsl_high_lane_s16(__p0_617, __p1_617, __p2_617, __p3_617) __extension__ ({ \
+  int32x4_t __s0_617 = __p0_617; \
+  int16x8_t __s1_617 = __p1_617; \
+  int16x4_t __s2_617 = __p2_617; \
+  int32x4_t __ret_617; \
+  __ret_617 = vqdmlsl_s16(__s0_617, vget_high_s16(__s1_617), splat_lane_s16(__s2_617, __p3_617)); \
+  __ret_617; \
 })
 #else
-#define vqdmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmlsl_high_lane_s16(__p0_618, __p1_618, __p2_618, __p3_618) __extension__ ({ \
+  int32x4_t __s0_618 = __p0_618; \
+  int16x8_t __s1_618 = __p1_618; \
+  int16x4_t __s2_618 = __p2_618; \
+  int32x4_t __rev0_618;  __rev0_618 = __builtin_shufflevector(__s0_618, __s0_618, 3, 2, 1, 0); \
+  int16x8_t __rev1_618;  __rev1_618 = __builtin_shufflevector(__s1_618, __s1_618, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev2_618;  __rev2_618 = __builtin_shufflevector(__s2_618, __s2_618, 3, 2, 1, 0); \
+  int32x4_t __ret_618; \
+  __ret_618 = __noswap_vqdmlsl_s16(__rev0_618, __noswap_vget_high_s16(__rev1_618), __noswap_splat_lane_s16(__rev2_618, __p3_618)); \
+  __ret_618 = __builtin_shufflevector(__ret_618, __ret_618, 3, 2, 1, 0); \
+  __ret_618; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlsl_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vqdmlsl_high_laneq_s32(__p0_619, __p1_619, __p2_619, __p3_619) __extension__ ({ \
+  int64x2_t __s0_619 = __p0_619; \
+  int32x4_t __s1_619 = __p1_619; \
+  int32x4_t __s2_619 = __p2_619; \
+  int64x2_t __ret_619; \
+  __ret_619 = vqdmlsl_s32(__s0_619, vget_high_s32(__s1_619), splat_laneq_s32(__s2_619, __p3_619)); \
+  __ret_619; \
 })
 #else
-#define vqdmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmlsl_high_laneq_s32(__p0_620, __p1_620, __p2_620, __p3_620) __extension__ ({ \
+  int64x2_t __s0_620 = __p0_620; \
+  int32x4_t __s1_620 = __p1_620; \
+  int32x4_t __s2_620 = __p2_620; \
+  int64x2_t __rev0_620;  __rev0_620 = __builtin_shufflevector(__s0_620, __s0_620, 1, 0); \
+  int32x4_t __rev1_620;  __rev1_620 = __builtin_shufflevector(__s1_620, __s1_620, 3, 2, 1, 0); \
+  int32x4_t __rev2_620;  __rev2_620 = __builtin_shufflevector(__s2_620, __s2_620, 3, 2, 1, 0); \
+  int64x2_t __ret_620; \
+  __ret_620 = __noswap_vqdmlsl_s32(__rev0_620, __noswap_vget_high_s32(__rev1_620), __noswap_splat_laneq_s32(__rev2_620, __p3_620)); \
+  __ret_620 = __builtin_shufflevector(__ret_620, __ret_620, 1, 0); \
+  __ret_620; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlsl_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vqdmlsl_high_laneq_s16(__p0_621, __p1_621, __p2_621, __p3_621) __extension__ ({ \
+  int32x4_t __s0_621 = __p0_621; \
+  int16x8_t __s1_621 = __p1_621; \
+  int16x8_t __s2_621 = __p2_621; \
+  int32x4_t __ret_621; \
+  __ret_621 = vqdmlsl_s16(__s0_621, vget_high_s16(__s1_621), splat_laneq_s16(__s2_621, __p3_621)); \
+  __ret_621; \
 })
 #else
-#define vqdmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmlsl_high_laneq_s16(__p0_622, __p1_622, __p2_622, __p3_622) __extension__ ({ \
+  int32x4_t __s0_622 = __p0_622; \
+  int16x8_t __s1_622 = __p1_622; \
+  int16x8_t __s2_622 = __p2_622; \
+  int32x4_t __rev0_622;  __rev0_622 = __builtin_shufflevector(__s0_622, __s0_622, 3, 2, 1, 0); \
+  int16x8_t __rev1_622;  __rev1_622 = __builtin_shufflevector(__s1_622, __s1_622, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev2_622;  __rev2_622 = __builtin_shufflevector(__s2_622, __s2_622, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_622; \
+  __ret_622 = __noswap_vqdmlsl_s16(__rev0_622, __noswap_vget_high_s16(__rev1_622), __noswap_splat_laneq_s16(__rev2_622, __p3_622)); \
+  __ret_622 = __builtin_shufflevector(__ret_622, __ret_622, 3, 2, 1, 0); \
+  __ret_622; \
 })
 #endif
 
@@ -52677,50 +56477,50 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlsl_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vqdmlsl_laneq_s32(__p0_623, __p1_623, __p2_623, __p3_623) __extension__ ({ \
+  int64x2_t __s0_623 = __p0_623; \
+  int32x2_t __s1_623 = __p1_623; \
+  int32x4_t __s2_623 = __p2_623; \
+  int64x2_t __ret_623; \
+  __ret_623 = vqdmlsl_s32(__s0_623, __s1_623, splat_laneq_s32(__s2_623, __p3_623)); \
+  __ret_623; \
 })
 #else
-#define vqdmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmlsl_laneq_s32(__p0_624, __p1_624, __p2_624, __p3_624) __extension__ ({ \
+  int64x2_t __s0_624 = __p0_624; \
+  int32x2_t __s1_624 = __p1_624; \
+  int32x4_t __s2_624 = __p2_624; \
+  int64x2_t __rev0_624;  __rev0_624 = __builtin_shufflevector(__s0_624, __s0_624, 1, 0); \
+  int32x2_t __rev1_624;  __rev1_624 = __builtin_shufflevector(__s1_624, __s1_624, 1, 0); \
+  int32x4_t __rev2_624;  __rev2_624 = __builtin_shufflevector(__s2_624, __s2_624, 3, 2, 1, 0); \
+  int64x2_t __ret_624; \
+  __ret_624 = __noswap_vqdmlsl_s32(__rev0_624, __rev1_624, __noswap_splat_laneq_s32(__rev2_624, __p3_624)); \
+  __ret_624 = __builtin_shufflevector(__ret_624, __ret_624, 1, 0); \
+  __ret_624; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlsl_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vqdmlsl_laneq_s16(__p0_625, __p1_625, __p2_625, __p3_625) __extension__ ({ \
+  int32x4_t __s0_625 = __p0_625; \
+  int16x4_t __s1_625 = __p1_625; \
+  int16x8_t __s2_625 = __p2_625; \
+  int32x4_t __ret_625; \
+  __ret_625 = vqdmlsl_s16(__s0_625, __s1_625, splat_laneq_s16(__s2_625, __p3_625)); \
+  __ret_625; \
 })
 #else
-#define vqdmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmlsl_laneq_s16(__p0_626, __p1_626, __p2_626, __p3_626) __extension__ ({ \
+  int32x4_t __s0_626 = __p0_626; \
+  int16x4_t __s1_626 = __p1_626; \
+  int16x8_t __s2_626 = __p2_626; \
+  int32x4_t __rev0_626;  __rev0_626 = __builtin_shufflevector(__s0_626, __s0_626, 3, 2, 1, 0); \
+  int16x4_t __rev1_626;  __rev1_626 = __builtin_shufflevector(__s1_626, __s1_626, 3, 2, 1, 0); \
+  int16x8_t __rev2_626;  __rev2_626 = __builtin_shufflevector(__s2_626, __s2_626, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_626; \
+  __ret_626 = __noswap_vqdmlsl_s16(__rev0_626, __rev1_626, __noswap_splat_laneq_s16(__rev2_626, __p3_626)); \
+  __ret_626 = __builtin_shufflevector(__ret_626, __ret_626, 3, 2, 1, 0); \
+  __ret_626; \
 })
 #endif
 
@@ -52739,7 +56539,7 @@
   int32x4_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
   int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 34); \
+  __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \
   __ret; \
 })
 #else
@@ -52749,7 +56549,7 @@
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
   int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 34); \
+  __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
@@ -52760,7 +56560,7 @@
   int16x8_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
   int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 33); \
+  __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \
   __ret; \
 })
 #else
@@ -52770,7 +56570,7 @@
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 33); \
+  __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
@@ -52819,78 +56619,78 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulhs_lane_s32(__p0_152, __p1_152, __p2_152) __extension__ ({ \
-  int32_t __s0_152 = __p0_152; \
-  int32x2_t __s1_152 = __p1_152; \
-  int32_t __ret_152; \
-  __ret_152 = vqdmulhs_s32(__s0_152, vget_lane_s32(__s1_152, __p2_152)); \
-  __ret_152; \
+#define vqdmulhs_lane_s32(__p0_627, __p1_627, __p2_627) __extension__ ({ \
+  int32_t __s0_627 = __p0_627; \
+  int32x2_t __s1_627 = __p1_627; \
+  int32_t __ret_627; \
+  __ret_627 = vqdmulhs_s32(__s0_627, vget_lane_s32(__s1_627, __p2_627)); \
+  __ret_627; \
 })
 #else
-#define vqdmulhs_lane_s32(__p0_153, __p1_153, __p2_153) __extension__ ({ \
-  int32_t __s0_153 = __p0_153; \
-  int32x2_t __s1_153 = __p1_153; \
-  int32x2_t __rev1_153;  __rev1_153 = __builtin_shufflevector(__s1_153, __s1_153, 1, 0); \
-  int32_t __ret_153; \
-  __ret_153 = vqdmulhs_s32(__s0_153, __noswap_vget_lane_s32(__rev1_153, __p2_153)); \
-  __ret_153; \
+#define vqdmulhs_lane_s32(__p0_628, __p1_628, __p2_628) __extension__ ({ \
+  int32_t __s0_628 = __p0_628; \
+  int32x2_t __s1_628 = __p1_628; \
+  int32x2_t __rev1_628;  __rev1_628 = __builtin_shufflevector(__s1_628, __s1_628, 1, 0); \
+  int32_t __ret_628; \
+  __ret_628 = vqdmulhs_s32(__s0_628, __noswap_vget_lane_s32(__rev1_628, __p2_628)); \
+  __ret_628; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulhh_lane_s16(__p0_154, __p1_154, __p2_154) __extension__ ({ \
-  int16_t __s0_154 = __p0_154; \
-  int16x4_t __s1_154 = __p1_154; \
-  int16_t __ret_154; \
-  __ret_154 = vqdmulhh_s16(__s0_154, vget_lane_s16(__s1_154, __p2_154)); \
-  __ret_154; \
+#define vqdmulhh_lane_s16(__p0_629, __p1_629, __p2_629) __extension__ ({ \
+  int16_t __s0_629 = __p0_629; \
+  int16x4_t __s1_629 = __p1_629; \
+  int16_t __ret_629; \
+  __ret_629 = vqdmulhh_s16(__s0_629, vget_lane_s16(__s1_629, __p2_629)); \
+  __ret_629; \
 })
 #else
-#define vqdmulhh_lane_s16(__p0_155, __p1_155, __p2_155) __extension__ ({ \
-  int16_t __s0_155 = __p0_155; \
-  int16x4_t __s1_155 = __p1_155; \
-  int16x4_t __rev1_155;  __rev1_155 = __builtin_shufflevector(__s1_155, __s1_155, 3, 2, 1, 0); \
-  int16_t __ret_155; \
-  __ret_155 = vqdmulhh_s16(__s0_155, __noswap_vget_lane_s16(__rev1_155, __p2_155)); \
-  __ret_155; \
+#define vqdmulhh_lane_s16(__p0_630, __p1_630, __p2_630) __extension__ ({ \
+  int16_t __s0_630 = __p0_630; \
+  int16x4_t __s1_630 = __p1_630; \
+  int16x4_t __rev1_630;  __rev1_630 = __builtin_shufflevector(__s1_630, __s1_630, 3, 2, 1, 0); \
+  int16_t __ret_630; \
+  __ret_630 = vqdmulhh_s16(__s0_630, __noswap_vget_lane_s16(__rev1_630, __p2_630)); \
+  __ret_630; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulhs_laneq_s32(__p0_156, __p1_156, __p2_156) __extension__ ({ \
-  int32_t __s0_156 = __p0_156; \
-  int32x4_t __s1_156 = __p1_156; \
-  int32_t __ret_156; \
-  __ret_156 = vqdmulhs_s32(__s0_156, vgetq_lane_s32(__s1_156, __p2_156)); \
-  __ret_156; \
+#define vqdmulhs_laneq_s32(__p0_631, __p1_631, __p2_631) __extension__ ({ \
+  int32_t __s0_631 = __p0_631; \
+  int32x4_t __s1_631 = __p1_631; \
+  int32_t __ret_631; \
+  __ret_631 = vqdmulhs_s32(__s0_631, vgetq_lane_s32(__s1_631, __p2_631)); \
+  __ret_631; \
 })
 #else
-#define vqdmulhs_laneq_s32(__p0_157, __p1_157, __p2_157) __extension__ ({ \
-  int32_t __s0_157 = __p0_157; \
-  int32x4_t __s1_157 = __p1_157; \
-  int32x4_t __rev1_157;  __rev1_157 = __builtin_shufflevector(__s1_157, __s1_157, 3, 2, 1, 0); \
-  int32_t __ret_157; \
-  __ret_157 = vqdmulhs_s32(__s0_157, __noswap_vgetq_lane_s32(__rev1_157, __p2_157)); \
-  __ret_157; \
+#define vqdmulhs_laneq_s32(__p0_632, __p1_632, __p2_632) __extension__ ({ \
+  int32_t __s0_632 = __p0_632; \
+  int32x4_t __s1_632 = __p1_632; \
+  int32x4_t __rev1_632;  __rev1_632 = __builtin_shufflevector(__s1_632, __s1_632, 3, 2, 1, 0); \
+  int32_t __ret_632; \
+  __ret_632 = vqdmulhs_s32(__s0_632, __noswap_vgetq_lane_s32(__rev1_632, __p2_632)); \
+  __ret_632; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulhh_laneq_s16(__p0_158, __p1_158, __p2_158) __extension__ ({ \
-  int16_t __s0_158 = __p0_158; \
-  int16x8_t __s1_158 = __p1_158; \
-  int16_t __ret_158; \
-  __ret_158 = vqdmulhh_s16(__s0_158, vgetq_lane_s16(__s1_158, __p2_158)); \
-  __ret_158; \
+#define vqdmulhh_laneq_s16(__p0_633, __p1_633, __p2_633) __extension__ ({ \
+  int16_t __s0_633 = __p0_633; \
+  int16x8_t __s1_633 = __p1_633; \
+  int16_t __ret_633; \
+  __ret_633 = vqdmulhh_s16(__s0_633, vgetq_lane_s16(__s1_633, __p2_633)); \
+  __ret_633; \
 })
 #else
-#define vqdmulhh_laneq_s16(__p0_159, __p1_159, __p2_159) __extension__ ({ \
-  int16_t __s0_159 = __p0_159; \
-  int16x8_t __s1_159 = __p1_159; \
-  int16x8_t __rev1_159;  __rev1_159 = __builtin_shufflevector(__s1_159, __s1_159, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_159; \
-  __ret_159 = vqdmulhh_s16(__s0_159, __noswap_vgetq_lane_s16(__rev1_159, __p2_159)); \
-  __ret_159; \
+#define vqdmulhh_laneq_s16(__p0_634, __p1_634, __p2_634) __extension__ ({ \
+  int16_t __s0_634 = __p0_634; \
+  int16x8_t __s1_634 = __p1_634; \
+  int16x8_t __rev1_634;  __rev1_634 = __builtin_shufflevector(__s1_634, __s1_634, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16_t __ret_634; \
+  __ret_634 = vqdmulhh_s16(__s0_634, __noswap_vgetq_lane_s16(__rev1_634, __p2_634)); \
+  __ret_634; \
 })
 #endif
 
@@ -53023,86 +56823,86 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vqdmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vqdmull_high_lane_s32(__p0_635, __p1_635, __p2_635) __extension__ ({ \
+  int32x4_t __s0_635 = __p0_635; \
+  int32x2_t __s1_635 = __p1_635; \
+  int64x2_t __ret_635; \
+  __ret_635 = vqdmull_s32(vget_high_s32(__s0_635), splat_lane_s32(__s1_635, __p2_635)); \
+  __ret_635; \
 })
 #else
-#define vqdmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmull_high_lane_s32(__p0_636, __p1_636, __p2_636) __extension__ ({ \
+  int32x4_t __s0_636 = __p0_636; \
+  int32x2_t __s1_636 = __p1_636; \
+  int32x4_t __rev0_636;  __rev0_636 = __builtin_shufflevector(__s0_636, __s0_636, 3, 2, 1, 0); \
+  int32x2_t __rev1_636;  __rev1_636 = __builtin_shufflevector(__s1_636, __s1_636, 1, 0); \
+  int64x2_t __ret_636; \
+  __ret_636 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_636), __noswap_splat_lane_s32(__rev1_636, __p2_636)); \
+  __ret_636 = __builtin_shufflevector(__ret_636, __ret_636, 1, 0); \
+  __ret_636; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqdmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vqdmull_high_lane_s16(__p0_637, __p1_637, __p2_637) __extension__ ({ \
+  int16x8_t __s0_637 = __p0_637; \
+  int16x4_t __s1_637 = __p1_637; \
+  int32x4_t __ret_637; \
+  __ret_637 = vqdmull_s16(vget_high_s16(__s0_637), splat_lane_s16(__s1_637, __p2_637)); \
+  __ret_637; \
 })
 #else
-#define vqdmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmull_high_lane_s16(__p0_638, __p1_638, __p2_638) __extension__ ({ \
+  int16x8_t __s0_638 = __p0_638; \
+  int16x4_t __s1_638 = __p1_638; \
+  int16x8_t __rev0_638;  __rev0_638 = __builtin_shufflevector(__s0_638, __s0_638, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x4_t __rev1_638;  __rev1_638 = __builtin_shufflevector(__s1_638, __s1_638, 3, 2, 1, 0); \
+  int32x4_t __ret_638; \
+  __ret_638 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_638), __noswap_splat_lane_s16(__rev1_638, __p2_638)); \
+  __ret_638 = __builtin_shufflevector(__ret_638, __ret_638, 3, 2, 1, 0); \
+  __ret_638; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vqdmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vqdmull_high_laneq_s32(__p0_639, __p1_639, __p2_639) __extension__ ({ \
+  int32x4_t __s0_639 = __p0_639; \
+  int32x4_t __s1_639 = __p1_639; \
+  int64x2_t __ret_639; \
+  __ret_639 = vqdmull_s32(vget_high_s32(__s0_639), splat_laneq_s32(__s1_639, __p2_639)); \
+  __ret_639; \
 })
 #else
-#define vqdmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmull_high_laneq_s32(__p0_640, __p1_640, __p2_640) __extension__ ({ \
+  int32x4_t __s0_640 = __p0_640; \
+  int32x4_t __s1_640 = __p1_640; \
+  int32x4_t __rev0_640;  __rev0_640 = __builtin_shufflevector(__s0_640, __s0_640, 3, 2, 1, 0); \
+  int32x4_t __rev1_640;  __rev1_640 = __builtin_shufflevector(__s1_640, __s1_640, 3, 2, 1, 0); \
+  int64x2_t __ret_640; \
+  __ret_640 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_640), __noswap_splat_laneq_s32(__rev1_640, __p2_640)); \
+  __ret_640 = __builtin_shufflevector(__ret_640, __ret_640, 1, 0); \
+  __ret_640; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqdmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vqdmull_high_laneq_s16(__p0_641, __p1_641, __p2_641) __extension__ ({ \
+  int16x8_t __s0_641 = __p0_641; \
+  int16x8_t __s1_641 = __p1_641; \
+  int32x4_t __ret_641; \
+  __ret_641 = vqdmull_s16(vget_high_s16(__s0_641), splat_laneq_s16(__s1_641, __p2_641)); \
+  __ret_641; \
 })
 #else
-#define vqdmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmull_high_laneq_s16(__p0_642, __p1_642, __p2_642) __extension__ ({ \
+  int16x8_t __s0_642 = __p0_642; \
+  int16x8_t __s1_642 = __p1_642; \
+  int16x8_t __rev0_642;  __rev0_642 = __builtin_shufflevector(__s0_642, __s0_642, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_642;  __rev1_642 = __builtin_shufflevector(__s1_642, __s1_642, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_642; \
+  __ret_642 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_642), __noswap_splat_laneq_s16(__rev1_642, __p2_642)); \
+  __ret_642 = __builtin_shufflevector(__ret_642, __ret_642, 3, 2, 1, 0); \
+  __ret_642; \
 })
 #endif
 
@@ -53139,120 +56939,120 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulls_lane_s32(__p0_160, __p1_160, __p2_160) __extension__ ({ \
-  int32_t __s0_160 = __p0_160; \
-  int32x2_t __s1_160 = __p1_160; \
-  int64_t __ret_160; \
-  __ret_160 = vqdmulls_s32(__s0_160, vget_lane_s32(__s1_160, __p2_160)); \
-  __ret_160; \
+#define vqdmulls_lane_s32(__p0_643, __p1_643, __p2_643) __extension__ ({ \
+  int32_t __s0_643 = __p0_643; \
+  int32x2_t __s1_643 = __p1_643; \
+  int64_t __ret_643; \
+  __ret_643 = vqdmulls_s32(__s0_643, vget_lane_s32(__s1_643, __p2_643)); \
+  __ret_643; \
 })
 #else
-#define vqdmulls_lane_s32(__p0_161, __p1_161, __p2_161) __extension__ ({ \
-  int32_t __s0_161 = __p0_161; \
-  int32x2_t __s1_161 = __p1_161; \
-  int32x2_t __rev1_161;  __rev1_161 = __builtin_shufflevector(__s1_161, __s1_161, 1, 0); \
-  int64_t __ret_161; \
-  __ret_161 = vqdmulls_s32(__s0_161, __noswap_vget_lane_s32(__rev1_161, __p2_161)); \
-  __ret_161; \
+#define vqdmulls_lane_s32(__p0_644, __p1_644, __p2_644) __extension__ ({ \
+  int32_t __s0_644 = __p0_644; \
+  int32x2_t __s1_644 = __p1_644; \
+  int32x2_t __rev1_644;  __rev1_644 = __builtin_shufflevector(__s1_644, __s1_644, 1, 0); \
+  int64_t __ret_644; \
+  __ret_644 = vqdmulls_s32(__s0_644, __noswap_vget_lane_s32(__rev1_644, __p2_644)); \
+  __ret_644; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmullh_lane_s16(__p0_162, __p1_162, __p2_162) __extension__ ({ \
-  int16_t __s0_162 = __p0_162; \
-  int16x4_t __s1_162 = __p1_162; \
-  int32_t __ret_162; \
-  __ret_162 = vqdmullh_s16(__s0_162, vget_lane_s16(__s1_162, __p2_162)); \
-  __ret_162; \
+#define vqdmullh_lane_s16(__p0_645, __p1_645, __p2_645) __extension__ ({ \
+  int16_t __s0_645 = __p0_645; \
+  int16x4_t __s1_645 = __p1_645; \
+  int32_t __ret_645; \
+  __ret_645 = vqdmullh_s16(__s0_645, vget_lane_s16(__s1_645, __p2_645)); \
+  __ret_645; \
 })
 #else
-#define vqdmullh_lane_s16(__p0_163, __p1_163, __p2_163) __extension__ ({ \
-  int16_t __s0_163 = __p0_163; \
-  int16x4_t __s1_163 = __p1_163; \
-  int16x4_t __rev1_163;  __rev1_163 = __builtin_shufflevector(__s1_163, __s1_163, 3, 2, 1, 0); \
-  int32_t __ret_163; \
-  __ret_163 = vqdmullh_s16(__s0_163, __noswap_vget_lane_s16(__rev1_163, __p2_163)); \
-  __ret_163; \
+#define vqdmullh_lane_s16(__p0_646, __p1_646, __p2_646) __extension__ ({ \
+  int16_t __s0_646 = __p0_646; \
+  int16x4_t __s1_646 = __p1_646; \
+  int16x4_t __rev1_646;  __rev1_646 = __builtin_shufflevector(__s1_646, __s1_646, 3, 2, 1, 0); \
+  int32_t __ret_646; \
+  __ret_646 = vqdmullh_s16(__s0_646, __noswap_vget_lane_s16(__rev1_646, __p2_646)); \
+  __ret_646; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmulls_laneq_s32(__p0_164, __p1_164, __p2_164) __extension__ ({ \
-  int32_t __s0_164 = __p0_164; \
-  int32x4_t __s1_164 = __p1_164; \
-  int64_t __ret_164; \
-  __ret_164 = vqdmulls_s32(__s0_164, vgetq_lane_s32(__s1_164, __p2_164)); \
-  __ret_164; \
+#define vqdmulls_laneq_s32(__p0_647, __p1_647, __p2_647) __extension__ ({ \
+  int32_t __s0_647 = __p0_647; \
+  int32x4_t __s1_647 = __p1_647; \
+  int64_t __ret_647; \
+  __ret_647 = vqdmulls_s32(__s0_647, vgetq_lane_s32(__s1_647, __p2_647)); \
+  __ret_647; \
 })
 #else
-#define vqdmulls_laneq_s32(__p0_165, __p1_165, __p2_165) __extension__ ({ \
-  int32_t __s0_165 = __p0_165; \
-  int32x4_t __s1_165 = __p1_165; \
-  int32x4_t __rev1_165;  __rev1_165 = __builtin_shufflevector(__s1_165, __s1_165, 3, 2, 1, 0); \
-  int64_t __ret_165; \
-  __ret_165 = vqdmulls_s32(__s0_165, __noswap_vgetq_lane_s32(__rev1_165, __p2_165)); \
-  __ret_165; \
+#define vqdmulls_laneq_s32(__p0_648, __p1_648, __p2_648) __extension__ ({ \
+  int32_t __s0_648 = __p0_648; \
+  int32x4_t __s1_648 = __p1_648; \
+  int32x4_t __rev1_648;  __rev1_648 = __builtin_shufflevector(__s1_648, __s1_648, 3, 2, 1, 0); \
+  int64_t __ret_648; \
+  __ret_648 = vqdmulls_s32(__s0_648, __noswap_vgetq_lane_s32(__rev1_648, __p2_648)); \
+  __ret_648; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmullh_laneq_s16(__p0_166, __p1_166, __p2_166) __extension__ ({ \
-  int16_t __s0_166 = __p0_166; \
-  int16x8_t __s1_166 = __p1_166; \
-  int32_t __ret_166; \
-  __ret_166 = vqdmullh_s16(__s0_166, vgetq_lane_s16(__s1_166, __p2_166)); \
-  __ret_166; \
+#define vqdmullh_laneq_s16(__p0_649, __p1_649, __p2_649) __extension__ ({ \
+  int16_t __s0_649 = __p0_649; \
+  int16x8_t __s1_649 = __p1_649; \
+  int32_t __ret_649; \
+  __ret_649 = vqdmullh_s16(__s0_649, vgetq_lane_s16(__s1_649, __p2_649)); \
+  __ret_649; \
 })
 #else
-#define vqdmullh_laneq_s16(__p0_167, __p1_167, __p2_167) __extension__ ({ \
-  int16_t __s0_167 = __p0_167; \
-  int16x8_t __s1_167 = __p1_167; \
-  int16x8_t __rev1_167;  __rev1_167 = __builtin_shufflevector(__s1_167, __s1_167, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32_t __ret_167; \
-  __ret_167 = vqdmullh_s16(__s0_167, __noswap_vgetq_lane_s16(__rev1_167, __p2_167)); \
-  __ret_167; \
+#define vqdmullh_laneq_s16(__p0_650, __p1_650, __p2_650) __extension__ ({ \
+  int16_t __s0_650 = __p0_650; \
+  int16x8_t __s1_650 = __p1_650; \
+  int16x8_t __rev1_650;  __rev1_650 = __builtin_shufflevector(__s1_650, __s1_650, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32_t __ret_650; \
+  __ret_650 = vqdmullh_s16(__s0_650, __noswap_vgetq_lane_s16(__rev1_650, __p2_650)); \
+  __ret_650; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vqdmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
+#define vqdmull_laneq_s32(__p0_651, __p1_651, __p2_651) __extension__ ({ \
+  int32x2_t __s0_651 = __p0_651; \
+  int32x4_t __s1_651 = __p1_651; \
+  int64x2_t __ret_651; \
+  __ret_651 = vqdmull_s32(__s0_651, splat_laneq_s32(__s1_651, __p2_651)); \
+  __ret_651; \
 })
 #else
-#define vqdmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vqdmull_laneq_s32(__p0_652, __p1_652, __p2_652) __extension__ ({ \
+  int32x2_t __s0_652 = __p0_652; \
+  int32x4_t __s1_652 = __p1_652; \
+  int32x2_t __rev0_652;  __rev0_652 = __builtin_shufflevector(__s0_652, __s0_652, 1, 0); \
+  int32x4_t __rev1_652;  __rev1_652 = __builtin_shufflevector(__s1_652, __s1_652, 3, 2, 1, 0); \
+  int64x2_t __ret_652; \
+  __ret_652 = __noswap_vqdmull_s32(__rev0_652, __noswap_splat_laneq_s32(__rev1_652, __p2_652)); \
+  __ret_652 = __builtin_shufflevector(__ret_652, __ret_652, 1, 0); \
+  __ret_652; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqdmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqdmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
+#define vqdmull_laneq_s16(__p0_653, __p1_653, __p2_653) __extension__ ({ \
+  int16x4_t __s0_653 = __p0_653; \
+  int16x8_t __s1_653 = __p1_653; \
+  int32x4_t __ret_653; \
+  __ret_653 = vqdmull_s16(__s0_653, splat_laneq_s16(__s1_653, __p2_653)); \
+  __ret_653; \
 })
 #else
-#define vqdmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vqdmull_laneq_s16(__p0_654, __p1_654, __p2_654) __extension__ ({ \
+  int16x4_t __s0_654 = __p0_654; \
+  int16x8_t __s1_654 = __p1_654; \
+  int16x4_t __rev0_654;  __rev0_654 = __builtin_shufflevector(__s0_654, __s0_654, 3, 2, 1, 0); \
+  int16x8_t __rev1_654;  __rev1_654 = __builtin_shufflevector(__s1_654, __s1_654, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_654; \
+  __ret_654 = __noswap_vqdmull_s16(__rev0_654, __noswap_splat_laneq_s16(__rev1_654, __p2_654)); \
+  __ret_654 = __builtin_shufflevector(__ret_654, __ret_654, 3, 2, 1, 0); \
+  __ret_654; \
 })
 #endif
 
@@ -53510,7 +57310,7 @@
   int32x4_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
   int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 34); \
+  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \
   __ret; \
 })
 #else
@@ -53520,7 +57320,7 @@
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
   int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 34); \
+  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
@@ -53531,7 +57331,7 @@
   int16x8_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
   int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 33); \
+  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \
   __ret; \
 })
 #else
@@ -53541,7 +57341,7 @@
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 33); \
+  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
@@ -53590,78 +57390,78 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmulhs_lane_s32(__p0_168, __p1_168, __p2_168) __extension__ ({ \
-  int32_t __s0_168 = __p0_168; \
-  int32x2_t __s1_168 = __p1_168; \
-  int32_t __ret_168; \
-  __ret_168 = vqrdmulhs_s32(__s0_168, vget_lane_s32(__s1_168, __p2_168)); \
-  __ret_168; \
+#define vqrdmulhs_lane_s32(__p0_655, __p1_655, __p2_655) __extension__ ({ \
+  int32_t __s0_655 = __p0_655; \
+  int32x2_t __s1_655 = __p1_655; \
+  int32_t __ret_655; \
+  __ret_655 = vqrdmulhs_s32(__s0_655, vget_lane_s32(__s1_655, __p2_655)); \
+  __ret_655; \
 })
 #else
-#define vqrdmulhs_lane_s32(__p0_169, __p1_169, __p2_169) __extension__ ({ \
-  int32_t __s0_169 = __p0_169; \
-  int32x2_t __s1_169 = __p1_169; \
-  int32x2_t __rev1_169;  __rev1_169 = __builtin_shufflevector(__s1_169, __s1_169, 1, 0); \
-  int32_t __ret_169; \
-  __ret_169 = vqrdmulhs_s32(__s0_169, __noswap_vget_lane_s32(__rev1_169, __p2_169)); \
-  __ret_169; \
+#define vqrdmulhs_lane_s32(__p0_656, __p1_656, __p2_656) __extension__ ({ \
+  int32_t __s0_656 = __p0_656; \
+  int32x2_t __s1_656 = __p1_656; \
+  int32x2_t __rev1_656;  __rev1_656 = __builtin_shufflevector(__s1_656, __s1_656, 1, 0); \
+  int32_t __ret_656; \
+  __ret_656 = vqrdmulhs_s32(__s0_656, __noswap_vget_lane_s32(__rev1_656, __p2_656)); \
+  __ret_656; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmulhh_lane_s16(__p0_170, __p1_170, __p2_170) __extension__ ({ \
-  int16_t __s0_170 = __p0_170; \
-  int16x4_t __s1_170 = __p1_170; \
-  int16_t __ret_170; \
-  __ret_170 = vqrdmulhh_s16(__s0_170, vget_lane_s16(__s1_170, __p2_170)); \
-  __ret_170; \
+#define vqrdmulhh_lane_s16(__p0_657, __p1_657, __p2_657) __extension__ ({ \
+  int16_t __s0_657 = __p0_657; \
+  int16x4_t __s1_657 = __p1_657; \
+  int16_t __ret_657; \
+  __ret_657 = vqrdmulhh_s16(__s0_657, vget_lane_s16(__s1_657, __p2_657)); \
+  __ret_657; \
 })
 #else
-#define vqrdmulhh_lane_s16(__p0_171, __p1_171, __p2_171) __extension__ ({ \
-  int16_t __s0_171 = __p0_171; \
-  int16x4_t __s1_171 = __p1_171; \
-  int16x4_t __rev1_171;  __rev1_171 = __builtin_shufflevector(__s1_171, __s1_171, 3, 2, 1, 0); \
-  int16_t __ret_171; \
-  __ret_171 = vqrdmulhh_s16(__s0_171, __noswap_vget_lane_s16(__rev1_171, __p2_171)); \
-  __ret_171; \
+#define vqrdmulhh_lane_s16(__p0_658, __p1_658, __p2_658) __extension__ ({ \
+  int16_t __s0_658 = __p0_658; \
+  int16x4_t __s1_658 = __p1_658; \
+  int16x4_t __rev1_658;  __rev1_658 = __builtin_shufflevector(__s1_658, __s1_658, 3, 2, 1, 0); \
+  int16_t __ret_658; \
+  __ret_658 = vqrdmulhh_s16(__s0_658, __noswap_vget_lane_s16(__rev1_658, __p2_658)); \
+  __ret_658; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmulhs_laneq_s32(__p0_172, __p1_172, __p2_172) __extension__ ({ \
-  int32_t __s0_172 = __p0_172; \
-  int32x4_t __s1_172 = __p1_172; \
-  int32_t __ret_172; \
-  __ret_172 = vqrdmulhs_s32(__s0_172, vgetq_lane_s32(__s1_172, __p2_172)); \
-  __ret_172; \
+#define vqrdmulhs_laneq_s32(__p0_659, __p1_659, __p2_659) __extension__ ({ \
+  int32_t __s0_659 = __p0_659; \
+  int32x4_t __s1_659 = __p1_659; \
+  int32_t __ret_659; \
+  __ret_659 = vqrdmulhs_s32(__s0_659, vgetq_lane_s32(__s1_659, __p2_659)); \
+  __ret_659; \
 })
 #else
-#define vqrdmulhs_laneq_s32(__p0_173, __p1_173, __p2_173) __extension__ ({ \
-  int32_t __s0_173 = __p0_173; \
-  int32x4_t __s1_173 = __p1_173; \
-  int32x4_t __rev1_173;  __rev1_173 = __builtin_shufflevector(__s1_173, __s1_173, 3, 2, 1, 0); \
-  int32_t __ret_173; \
-  __ret_173 = vqrdmulhs_s32(__s0_173, __noswap_vgetq_lane_s32(__rev1_173, __p2_173)); \
-  __ret_173; \
+#define vqrdmulhs_laneq_s32(__p0_660, __p1_660, __p2_660) __extension__ ({ \
+  int32_t __s0_660 = __p0_660; \
+  int32x4_t __s1_660 = __p1_660; \
+  int32x4_t __rev1_660;  __rev1_660 = __builtin_shufflevector(__s1_660, __s1_660, 3, 2, 1, 0); \
+  int32_t __ret_660; \
+  __ret_660 = vqrdmulhs_s32(__s0_660, __noswap_vgetq_lane_s32(__rev1_660, __p2_660)); \
+  __ret_660; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmulhh_laneq_s16(__p0_174, __p1_174, __p2_174) __extension__ ({ \
-  int16_t __s0_174 = __p0_174; \
-  int16x8_t __s1_174 = __p1_174; \
-  int16_t __ret_174; \
-  __ret_174 = vqrdmulhh_s16(__s0_174, vgetq_lane_s16(__s1_174, __p2_174)); \
-  __ret_174; \
+#define vqrdmulhh_laneq_s16(__p0_661, __p1_661, __p2_661) __extension__ ({ \
+  int16_t __s0_661 = __p0_661; \
+  int16x8_t __s1_661 = __p1_661; \
+  int16_t __ret_661; \
+  __ret_661 = vqrdmulhh_s16(__s0_661, vgetq_lane_s16(__s1_661, __p2_661)); \
+  __ret_661; \
 })
 #else
-#define vqrdmulhh_laneq_s16(__p0_175, __p1_175, __p2_175) __extension__ ({ \
-  int16_t __s0_175 = __p0_175; \
-  int16x8_t __s1_175 = __p1_175; \
-  int16x8_t __rev1_175;  __rev1_175 = __builtin_shufflevector(__s1_175, __s1_175, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_175; \
-  __ret_175 = vqrdmulhh_s16(__s0_175, __noswap_vgetq_lane_s16(__rev1_175, __p2_175)); \
-  __ret_175; \
+#define vqrdmulhh_laneq_s16(__p0_662, __p1_662, __p2_662) __extension__ ({ \
+  int16_t __s0_662 = __p0_662; \
+  int16x8_t __s1_662 = __p1_662; \
+  int16x8_t __rev1_662;  __rev1_662 = __builtin_shufflevector(__s1_662, __s1_662, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16_t __ret_662; \
+  __ret_662 = vqrdmulhh_s16(__s0_662, __noswap_vgetq_lane_s16(__rev1_662, __p2_662)); \
+  __ret_662; \
 })
 #endif
 
@@ -53790,128 +57590,128 @@
   return __ret;
 }
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u32(__p0_176, __p1_176, __p2_176) __extension__ ({ \
-  uint16x4_t __s0_176 = __p0_176; \
-  uint32x4_t __s1_176 = __p1_176; \
-  uint16x8_t __ret_176; \
-  __ret_176 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_176), (uint16x4_t)(vqrshrn_n_u32(__s1_176, __p2_176)))); \
-  __ret_176; \
+#define vqrshrn_high_n_u32(__p0_663, __p1_663, __p2_663) __extension__ ({ \
+  uint16x4_t __s0_663 = __p0_663; \
+  uint32x4_t __s1_663 = __p1_663; \
+  uint16x8_t __ret_663; \
+  __ret_663 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_663), (uint16x4_t)(vqrshrn_n_u32(__s1_663, __p2_663)))); \
+  __ret_663; \
 })
 #else
-#define vqrshrn_high_n_u32(__p0_177, __p1_177, __p2_177) __extension__ ({ \
-  uint16x4_t __s0_177 = __p0_177; \
-  uint32x4_t __s1_177 = __p1_177; \
-  uint16x4_t __rev0_177;  __rev0_177 = __builtin_shufflevector(__s0_177, __s0_177, 3, 2, 1, 0); \
-  uint32x4_t __rev1_177;  __rev1_177 = __builtin_shufflevector(__s1_177, __s1_177, 3, 2, 1, 0); \
-  uint16x8_t __ret_177; \
-  __ret_177 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_177), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_177, __p2_177)))); \
-  __ret_177 = __builtin_shufflevector(__ret_177, __ret_177, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_177; \
+#define vqrshrn_high_n_u32(__p0_664, __p1_664, __p2_664) __extension__ ({ \
+  uint16x4_t __s0_664 = __p0_664; \
+  uint32x4_t __s1_664 = __p1_664; \
+  uint16x4_t __rev0_664;  __rev0_664 = __builtin_shufflevector(__s0_664, __s0_664, 3, 2, 1, 0); \
+  uint32x4_t __rev1_664;  __rev1_664 = __builtin_shufflevector(__s1_664, __s1_664, 3, 2, 1, 0); \
+  uint16x8_t __ret_664; \
+  __ret_664 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_664), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_664, __p2_664)))); \
+  __ret_664 = __builtin_shufflevector(__ret_664, __ret_664, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_664; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u64(__p0_178, __p1_178, __p2_178) __extension__ ({ \
-  uint32x2_t __s0_178 = __p0_178; \
-  uint64x2_t __s1_178 = __p1_178; \
-  uint32x4_t __ret_178; \
-  __ret_178 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_178), (uint32x2_t)(vqrshrn_n_u64(__s1_178, __p2_178)))); \
-  __ret_178; \
+#define vqrshrn_high_n_u64(__p0_665, __p1_665, __p2_665) __extension__ ({ \
+  uint32x2_t __s0_665 = __p0_665; \
+  uint64x2_t __s1_665 = __p1_665; \
+  uint32x4_t __ret_665; \
+  __ret_665 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_665), (uint32x2_t)(vqrshrn_n_u64(__s1_665, __p2_665)))); \
+  __ret_665; \
 })
 #else
-#define vqrshrn_high_n_u64(__p0_179, __p1_179, __p2_179) __extension__ ({ \
-  uint32x2_t __s0_179 = __p0_179; \
-  uint64x2_t __s1_179 = __p1_179; \
-  uint32x2_t __rev0_179;  __rev0_179 = __builtin_shufflevector(__s0_179, __s0_179, 1, 0); \
-  uint64x2_t __rev1_179;  __rev1_179 = __builtin_shufflevector(__s1_179, __s1_179, 1, 0); \
-  uint32x4_t __ret_179; \
-  __ret_179 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_179), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_179, __p2_179)))); \
-  __ret_179 = __builtin_shufflevector(__ret_179, __ret_179, 3, 2, 1, 0); \
-  __ret_179; \
+#define vqrshrn_high_n_u64(__p0_666, __p1_666, __p2_666) __extension__ ({ \
+  uint32x2_t __s0_666 = __p0_666; \
+  uint64x2_t __s1_666 = __p1_666; \
+  uint32x2_t __rev0_666;  __rev0_666 = __builtin_shufflevector(__s0_666, __s0_666, 1, 0); \
+  uint64x2_t __rev1_666;  __rev1_666 = __builtin_shufflevector(__s1_666, __s1_666, 1, 0); \
+  uint32x4_t __ret_666; \
+  __ret_666 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_666), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_666, __p2_666)))); \
+  __ret_666 = __builtin_shufflevector(__ret_666, __ret_666, 3, 2, 1, 0); \
+  __ret_666; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u16(__p0_180, __p1_180, __p2_180) __extension__ ({ \
-  uint8x8_t __s0_180 = __p0_180; \
-  uint16x8_t __s1_180 = __p1_180; \
-  uint8x16_t __ret_180; \
-  __ret_180 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_180), (uint8x8_t)(vqrshrn_n_u16(__s1_180, __p2_180)))); \
-  __ret_180; \
+#define vqrshrn_high_n_u16(__p0_667, __p1_667, __p2_667) __extension__ ({ \
+  uint8x8_t __s0_667 = __p0_667; \
+  uint16x8_t __s1_667 = __p1_667; \
+  uint8x16_t __ret_667; \
+  __ret_667 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_667), (uint8x8_t)(vqrshrn_n_u16(__s1_667, __p2_667)))); \
+  __ret_667; \
 })
 #else
-#define vqrshrn_high_n_u16(__p0_181, __p1_181, __p2_181) __extension__ ({ \
-  uint8x8_t __s0_181 = __p0_181; \
-  uint16x8_t __s1_181 = __p1_181; \
-  uint8x8_t __rev0_181;  __rev0_181 = __builtin_shufflevector(__s0_181, __s0_181, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_181;  __rev1_181 = __builtin_shufflevector(__s1_181, __s1_181, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_181; \
-  __ret_181 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_181), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_181, __p2_181)))); \
-  __ret_181 = __builtin_shufflevector(__ret_181, __ret_181, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_181; \
+#define vqrshrn_high_n_u16(__p0_668, __p1_668, __p2_668) __extension__ ({ \
+  uint8x8_t __s0_668 = __p0_668; \
+  uint16x8_t __s1_668 = __p1_668; \
+  uint8x8_t __rev0_668;  __rev0_668 = __builtin_shufflevector(__s0_668, __s0_668, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_668;  __rev1_668 = __builtin_shufflevector(__s1_668, __s1_668, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_668; \
+  __ret_668 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_668), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_668, __p2_668)))); \
+  __ret_668 = __builtin_shufflevector(__ret_668, __ret_668, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_668; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s32(__p0_182, __p1_182, __p2_182) __extension__ ({ \
-  int16x4_t __s0_182 = __p0_182; \
-  int32x4_t __s1_182 = __p1_182; \
-  int16x8_t __ret_182; \
-  __ret_182 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_182), (int16x4_t)(vqrshrn_n_s32(__s1_182, __p2_182)))); \
-  __ret_182; \
+#define vqrshrn_high_n_s32(__p0_669, __p1_669, __p2_669) __extension__ ({ \
+  int16x4_t __s0_669 = __p0_669; \
+  int32x4_t __s1_669 = __p1_669; \
+  int16x8_t __ret_669; \
+  __ret_669 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_669), (int16x4_t)(vqrshrn_n_s32(__s1_669, __p2_669)))); \
+  __ret_669; \
 })
 #else
-#define vqrshrn_high_n_s32(__p0_183, __p1_183, __p2_183) __extension__ ({ \
-  int16x4_t __s0_183 = __p0_183; \
-  int32x4_t __s1_183 = __p1_183; \
-  int16x4_t __rev0_183;  __rev0_183 = __builtin_shufflevector(__s0_183, __s0_183, 3, 2, 1, 0); \
-  int32x4_t __rev1_183;  __rev1_183 = __builtin_shufflevector(__s1_183, __s1_183, 3, 2, 1, 0); \
-  int16x8_t __ret_183; \
-  __ret_183 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_183), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_183, __p2_183)))); \
-  __ret_183 = __builtin_shufflevector(__ret_183, __ret_183, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_183; \
+#define vqrshrn_high_n_s32(__p0_670, __p1_670, __p2_670) __extension__ ({ \
+  int16x4_t __s0_670 = __p0_670; \
+  int32x4_t __s1_670 = __p1_670; \
+  int16x4_t __rev0_670;  __rev0_670 = __builtin_shufflevector(__s0_670, __s0_670, 3, 2, 1, 0); \
+  int32x4_t __rev1_670;  __rev1_670 = __builtin_shufflevector(__s1_670, __s1_670, 3, 2, 1, 0); \
+  int16x8_t __ret_670; \
+  __ret_670 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_670), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_670, __p2_670)))); \
+  __ret_670 = __builtin_shufflevector(__ret_670, __ret_670, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_670; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s64(__p0_184, __p1_184, __p2_184) __extension__ ({ \
-  int32x2_t __s0_184 = __p0_184; \
-  int64x2_t __s1_184 = __p1_184; \
-  int32x4_t __ret_184; \
-  __ret_184 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_184), (int32x2_t)(vqrshrn_n_s64(__s1_184, __p2_184)))); \
-  __ret_184; \
+#define vqrshrn_high_n_s64(__p0_671, __p1_671, __p2_671) __extension__ ({ \
+  int32x2_t __s0_671 = __p0_671; \
+  int64x2_t __s1_671 = __p1_671; \
+  int32x4_t __ret_671; \
+  __ret_671 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_671), (int32x2_t)(vqrshrn_n_s64(__s1_671, __p2_671)))); \
+  __ret_671; \
 })
 #else
-#define vqrshrn_high_n_s64(__p0_185, __p1_185, __p2_185) __extension__ ({ \
-  int32x2_t __s0_185 = __p0_185; \
-  int64x2_t __s1_185 = __p1_185; \
-  int32x2_t __rev0_185;  __rev0_185 = __builtin_shufflevector(__s0_185, __s0_185, 1, 0); \
-  int64x2_t __rev1_185;  __rev1_185 = __builtin_shufflevector(__s1_185, __s1_185, 1, 0); \
-  int32x4_t __ret_185; \
-  __ret_185 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_185), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_185, __p2_185)))); \
-  __ret_185 = __builtin_shufflevector(__ret_185, __ret_185, 3, 2, 1, 0); \
-  __ret_185; \
+#define vqrshrn_high_n_s64(__p0_672, __p1_672, __p2_672) __extension__ ({ \
+  int32x2_t __s0_672 = __p0_672; \
+  int64x2_t __s1_672 = __p1_672; \
+  int32x2_t __rev0_672;  __rev0_672 = __builtin_shufflevector(__s0_672, __s0_672, 1, 0); \
+  int64x2_t __rev1_672;  __rev1_672 = __builtin_shufflevector(__s1_672, __s1_672, 1, 0); \
+  int32x4_t __ret_672; \
+  __ret_672 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_672), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_672, __p2_672)))); \
+  __ret_672 = __builtin_shufflevector(__ret_672, __ret_672, 3, 2, 1, 0); \
+  __ret_672; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s16(__p0_186, __p1_186, __p2_186) __extension__ ({ \
-  int8x8_t __s0_186 = __p0_186; \
-  int16x8_t __s1_186 = __p1_186; \
-  int8x16_t __ret_186; \
-  __ret_186 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_186), (int8x8_t)(vqrshrn_n_s16(__s1_186, __p2_186)))); \
-  __ret_186; \
+#define vqrshrn_high_n_s16(__p0_673, __p1_673, __p2_673) __extension__ ({ \
+  int8x8_t __s0_673 = __p0_673; \
+  int16x8_t __s1_673 = __p1_673; \
+  int8x16_t __ret_673; \
+  __ret_673 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_673), (int8x8_t)(vqrshrn_n_s16(__s1_673, __p2_673)))); \
+  __ret_673; \
 })
 #else
-#define vqrshrn_high_n_s16(__p0_187, __p1_187, __p2_187) __extension__ ({ \
-  int8x8_t __s0_187 = __p0_187; \
-  int16x8_t __s1_187 = __p1_187; \
-  int8x8_t __rev0_187;  __rev0_187 = __builtin_shufflevector(__s0_187, __s0_187, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_187;  __rev1_187 = __builtin_shufflevector(__s1_187, __s1_187, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_187; \
-  __ret_187 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_187), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_187, __p2_187)))); \
-  __ret_187 = __builtin_shufflevector(__ret_187, __ret_187, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_187; \
+#define vqrshrn_high_n_s16(__p0_674, __p1_674, __p2_674) __extension__ ({ \
+  int8x8_t __s0_674 = __p0_674; \
+  int16x8_t __s1_674 = __p1_674; \
+  int8x8_t __rev0_674;  __rev0_674 = __builtin_shufflevector(__s0_674, __s0_674, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_674;  __rev1_674 = __builtin_shufflevector(__s1_674, __s1_674, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_674; \
+  __ret_674 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_674), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_674, __p2_674)))); \
+  __ret_674 = __builtin_shufflevector(__ret_674, __ret_674, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_674; \
 })
 #endif
 
@@ -53952,65 +57752,65 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s32(__p0_188, __p1_188, __p2_188) __extension__ ({ \
-  int16x4_t __s0_188 = __p0_188; \
-  int32x4_t __s1_188 = __p1_188; \
-  int16x8_t __ret_188; \
-  __ret_188 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_188), (int16x4_t)(vqrshrun_n_s32(__s1_188, __p2_188)))); \
-  __ret_188; \
+#define vqrshrun_high_n_s32(__p0_675, __p1_675, __p2_675) __extension__ ({ \
+  int16x4_t __s0_675 = __p0_675; \
+  int32x4_t __s1_675 = __p1_675; \
+  int16x8_t __ret_675; \
+  __ret_675 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_675), (int16x4_t)(vqrshrun_n_s32(__s1_675, __p2_675)))); \
+  __ret_675; \
 })
 #else
-#define vqrshrun_high_n_s32(__p0_189, __p1_189, __p2_189) __extension__ ({ \
-  int16x4_t __s0_189 = __p0_189; \
-  int32x4_t __s1_189 = __p1_189; \
-  int16x4_t __rev0_189;  __rev0_189 = __builtin_shufflevector(__s0_189, __s0_189, 3, 2, 1, 0); \
-  int32x4_t __rev1_189;  __rev1_189 = __builtin_shufflevector(__s1_189, __s1_189, 3, 2, 1, 0); \
-  int16x8_t __ret_189; \
-  __ret_189 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_189), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_189, __p2_189)))); \
-  __ret_189 = __builtin_shufflevector(__ret_189, __ret_189, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_189; \
+#define vqrshrun_high_n_s32(__p0_676, __p1_676, __p2_676) __extension__ ({ \
+  int16x4_t __s0_676 = __p0_676; \
+  int32x4_t __s1_676 = __p1_676; \
+  int16x4_t __rev0_676;  __rev0_676 = __builtin_shufflevector(__s0_676, __s0_676, 3, 2, 1, 0); \
+  int32x4_t __rev1_676;  __rev1_676 = __builtin_shufflevector(__s1_676, __s1_676, 3, 2, 1, 0); \
+  int16x8_t __ret_676; \
+  __ret_676 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_676), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_676, __p2_676)))); \
+  __ret_676 = __builtin_shufflevector(__ret_676, __ret_676, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_676; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s64(__p0_190, __p1_190, __p2_190) __extension__ ({ \
-  int32x2_t __s0_190 = __p0_190; \
-  int64x2_t __s1_190 = __p1_190; \
-  int32x4_t __ret_190; \
-  __ret_190 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_190), (int32x2_t)(vqrshrun_n_s64(__s1_190, __p2_190)))); \
-  __ret_190; \
+#define vqrshrun_high_n_s64(__p0_677, __p1_677, __p2_677) __extension__ ({ \
+  int32x2_t __s0_677 = __p0_677; \
+  int64x2_t __s1_677 = __p1_677; \
+  int32x4_t __ret_677; \
+  __ret_677 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_677), (int32x2_t)(vqrshrun_n_s64(__s1_677, __p2_677)))); \
+  __ret_677; \
 })
 #else
-#define vqrshrun_high_n_s64(__p0_191, __p1_191, __p2_191) __extension__ ({ \
-  int32x2_t __s0_191 = __p0_191; \
-  int64x2_t __s1_191 = __p1_191; \
-  int32x2_t __rev0_191;  __rev0_191 = __builtin_shufflevector(__s0_191, __s0_191, 1, 0); \
-  int64x2_t __rev1_191;  __rev1_191 = __builtin_shufflevector(__s1_191, __s1_191, 1, 0); \
-  int32x4_t __ret_191; \
-  __ret_191 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_191), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_191, __p2_191)))); \
-  __ret_191 = __builtin_shufflevector(__ret_191, __ret_191, 3, 2, 1, 0); \
-  __ret_191; \
+#define vqrshrun_high_n_s64(__p0_678, __p1_678, __p2_678) __extension__ ({ \
+  int32x2_t __s0_678 = __p0_678; \
+  int64x2_t __s1_678 = __p1_678; \
+  int32x2_t __rev0_678;  __rev0_678 = __builtin_shufflevector(__s0_678, __s0_678, 1, 0); \
+  int64x2_t __rev1_678;  __rev1_678 = __builtin_shufflevector(__s1_678, __s1_678, 1, 0); \
+  int32x4_t __ret_678; \
+  __ret_678 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_678), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_678, __p2_678)))); \
+  __ret_678 = __builtin_shufflevector(__ret_678, __ret_678, 3, 2, 1, 0); \
+  __ret_678; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s16(__p0_192, __p1_192, __p2_192) __extension__ ({ \
-  int8x8_t __s0_192 = __p0_192; \
-  int16x8_t __s1_192 = __p1_192; \
-  int8x16_t __ret_192; \
-  __ret_192 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_192), (int8x8_t)(vqrshrun_n_s16(__s1_192, __p2_192)))); \
-  __ret_192; \
+#define vqrshrun_high_n_s16(__p0_679, __p1_679, __p2_679) __extension__ ({ \
+  int8x8_t __s0_679 = __p0_679; \
+  int16x8_t __s1_679 = __p1_679; \
+  int8x16_t __ret_679; \
+  __ret_679 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_679), (int8x8_t)(vqrshrun_n_s16(__s1_679, __p2_679)))); \
+  __ret_679; \
 })
 #else
-#define vqrshrun_high_n_s16(__p0_193, __p1_193, __p2_193) __extension__ ({ \
-  int8x8_t __s0_193 = __p0_193; \
-  int16x8_t __s1_193 = __p1_193; \
-  int8x8_t __rev0_193;  __rev0_193 = __builtin_shufflevector(__s0_193, __s0_193, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_193;  __rev1_193 = __builtin_shufflevector(__s1_193, __s1_193, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_193; \
-  __ret_193 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_193), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_193, __p2_193)))); \
-  __ret_193 = __builtin_shufflevector(__ret_193, __ret_193, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_193; \
+#define vqrshrun_high_n_s16(__p0_680, __p1_680, __p2_680) __extension__ ({ \
+  int8x8_t __s0_680 = __p0_680; \
+  int16x8_t __s1_680 = __p1_680; \
+  int8x8_t __rev0_680;  __rev0_680 = __builtin_shufflevector(__s0_680, __s0_680, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_680;  __rev1_680 = __builtin_shufflevector(__s1_680, __s1_680, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_680; \
+  __ret_680 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_680), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_680, __p2_680)))); \
+  __ret_680 = __builtin_shufflevector(__ret_680, __ret_680, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_680; \
 })
 #endif
 
@@ -54145,128 +57945,128 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u32(__p0_194, __p1_194, __p2_194) __extension__ ({ \
-  uint16x4_t __s0_194 = __p0_194; \
-  uint32x4_t __s1_194 = __p1_194; \
-  uint16x8_t __ret_194; \
-  __ret_194 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_194), (uint16x4_t)(vqshrn_n_u32(__s1_194, __p2_194)))); \
-  __ret_194; \
+#define vqshrn_high_n_u32(__p0_681, __p1_681, __p2_681) __extension__ ({ \
+  uint16x4_t __s0_681 = __p0_681; \
+  uint32x4_t __s1_681 = __p1_681; \
+  uint16x8_t __ret_681; \
+  __ret_681 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_681), (uint16x4_t)(vqshrn_n_u32(__s1_681, __p2_681)))); \
+  __ret_681; \
 })
 #else
-#define vqshrn_high_n_u32(__p0_195, __p1_195, __p2_195) __extension__ ({ \
-  uint16x4_t __s0_195 = __p0_195; \
-  uint32x4_t __s1_195 = __p1_195; \
-  uint16x4_t __rev0_195;  __rev0_195 = __builtin_shufflevector(__s0_195, __s0_195, 3, 2, 1, 0); \
-  uint32x4_t __rev1_195;  __rev1_195 = __builtin_shufflevector(__s1_195, __s1_195, 3, 2, 1, 0); \
-  uint16x8_t __ret_195; \
-  __ret_195 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_195), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_195, __p2_195)))); \
-  __ret_195 = __builtin_shufflevector(__ret_195, __ret_195, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_195; \
+#define vqshrn_high_n_u32(__p0_682, __p1_682, __p2_682) __extension__ ({ \
+  uint16x4_t __s0_682 = __p0_682; \
+  uint32x4_t __s1_682 = __p1_682; \
+  uint16x4_t __rev0_682;  __rev0_682 = __builtin_shufflevector(__s0_682, __s0_682, 3, 2, 1, 0); \
+  uint32x4_t __rev1_682;  __rev1_682 = __builtin_shufflevector(__s1_682, __s1_682, 3, 2, 1, 0); \
+  uint16x8_t __ret_682; \
+  __ret_682 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_682), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_682, __p2_682)))); \
+  __ret_682 = __builtin_shufflevector(__ret_682, __ret_682, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_682; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u64(__p0_196, __p1_196, __p2_196) __extension__ ({ \
-  uint32x2_t __s0_196 = __p0_196; \
-  uint64x2_t __s1_196 = __p1_196; \
-  uint32x4_t __ret_196; \
-  __ret_196 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_196), (uint32x2_t)(vqshrn_n_u64(__s1_196, __p2_196)))); \
-  __ret_196; \
+#define vqshrn_high_n_u64(__p0_683, __p1_683, __p2_683) __extension__ ({ \
+  uint32x2_t __s0_683 = __p0_683; \
+  uint64x2_t __s1_683 = __p1_683; \
+  uint32x4_t __ret_683; \
+  __ret_683 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_683), (uint32x2_t)(vqshrn_n_u64(__s1_683, __p2_683)))); \
+  __ret_683; \
 })
 #else
-#define vqshrn_high_n_u64(__p0_197, __p1_197, __p2_197) __extension__ ({ \
-  uint32x2_t __s0_197 = __p0_197; \
-  uint64x2_t __s1_197 = __p1_197; \
-  uint32x2_t __rev0_197;  __rev0_197 = __builtin_shufflevector(__s0_197, __s0_197, 1, 0); \
-  uint64x2_t __rev1_197;  __rev1_197 = __builtin_shufflevector(__s1_197, __s1_197, 1, 0); \
-  uint32x4_t __ret_197; \
-  __ret_197 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_197), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_197, __p2_197)))); \
-  __ret_197 = __builtin_shufflevector(__ret_197, __ret_197, 3, 2, 1, 0); \
-  __ret_197; \
+#define vqshrn_high_n_u64(__p0_684, __p1_684, __p2_684) __extension__ ({ \
+  uint32x2_t __s0_684 = __p0_684; \
+  uint64x2_t __s1_684 = __p1_684; \
+  uint32x2_t __rev0_684;  __rev0_684 = __builtin_shufflevector(__s0_684, __s0_684, 1, 0); \
+  uint64x2_t __rev1_684;  __rev1_684 = __builtin_shufflevector(__s1_684, __s1_684, 1, 0); \
+  uint32x4_t __ret_684; \
+  __ret_684 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_684), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_684, __p2_684)))); \
+  __ret_684 = __builtin_shufflevector(__ret_684, __ret_684, 3, 2, 1, 0); \
+  __ret_684; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u16(__p0_198, __p1_198, __p2_198) __extension__ ({ \
-  uint8x8_t __s0_198 = __p0_198; \
-  uint16x8_t __s1_198 = __p1_198; \
-  uint8x16_t __ret_198; \
-  __ret_198 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_198), (uint8x8_t)(vqshrn_n_u16(__s1_198, __p2_198)))); \
-  __ret_198; \
+#define vqshrn_high_n_u16(__p0_685, __p1_685, __p2_685) __extension__ ({ \
+  uint8x8_t __s0_685 = __p0_685; \
+  uint16x8_t __s1_685 = __p1_685; \
+  uint8x16_t __ret_685; \
+  __ret_685 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_685), (uint8x8_t)(vqshrn_n_u16(__s1_685, __p2_685)))); \
+  __ret_685; \
 })
 #else
-#define vqshrn_high_n_u16(__p0_199, __p1_199, __p2_199) __extension__ ({ \
-  uint8x8_t __s0_199 = __p0_199; \
-  uint16x8_t __s1_199 = __p1_199; \
-  uint8x8_t __rev0_199;  __rev0_199 = __builtin_shufflevector(__s0_199, __s0_199, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_199;  __rev1_199 = __builtin_shufflevector(__s1_199, __s1_199, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_199; \
-  __ret_199 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_199), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_199, __p2_199)))); \
-  __ret_199 = __builtin_shufflevector(__ret_199, __ret_199, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_199; \
+#define vqshrn_high_n_u16(__p0_686, __p1_686, __p2_686) __extension__ ({ \
+  uint8x8_t __s0_686 = __p0_686; \
+  uint16x8_t __s1_686 = __p1_686; \
+  uint8x8_t __rev0_686;  __rev0_686 = __builtin_shufflevector(__s0_686, __s0_686, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_686;  __rev1_686 = __builtin_shufflevector(__s1_686, __s1_686, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_686; \
+  __ret_686 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_686), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_686, __p2_686)))); \
+  __ret_686 = __builtin_shufflevector(__ret_686, __ret_686, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_686; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s32(__p0_200, __p1_200, __p2_200) __extension__ ({ \
-  int16x4_t __s0_200 = __p0_200; \
-  int32x4_t __s1_200 = __p1_200; \
-  int16x8_t __ret_200; \
-  __ret_200 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_200), (int16x4_t)(vqshrn_n_s32(__s1_200, __p2_200)))); \
-  __ret_200; \
+#define vqshrn_high_n_s32(__p0_687, __p1_687, __p2_687) __extension__ ({ \
+  int16x4_t __s0_687 = __p0_687; \
+  int32x4_t __s1_687 = __p1_687; \
+  int16x8_t __ret_687; \
+  __ret_687 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_687), (int16x4_t)(vqshrn_n_s32(__s1_687, __p2_687)))); \
+  __ret_687; \
 })
 #else
-#define vqshrn_high_n_s32(__p0_201, __p1_201, __p2_201) __extension__ ({ \
-  int16x4_t __s0_201 = __p0_201; \
-  int32x4_t __s1_201 = __p1_201; \
-  int16x4_t __rev0_201;  __rev0_201 = __builtin_shufflevector(__s0_201, __s0_201, 3, 2, 1, 0); \
-  int32x4_t __rev1_201;  __rev1_201 = __builtin_shufflevector(__s1_201, __s1_201, 3, 2, 1, 0); \
-  int16x8_t __ret_201; \
-  __ret_201 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_201), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_201, __p2_201)))); \
-  __ret_201 = __builtin_shufflevector(__ret_201, __ret_201, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_201; \
+#define vqshrn_high_n_s32(__p0_688, __p1_688, __p2_688) __extension__ ({ \
+  int16x4_t __s0_688 = __p0_688; \
+  int32x4_t __s1_688 = __p1_688; \
+  int16x4_t __rev0_688;  __rev0_688 = __builtin_shufflevector(__s0_688, __s0_688, 3, 2, 1, 0); \
+  int32x4_t __rev1_688;  __rev1_688 = __builtin_shufflevector(__s1_688, __s1_688, 3, 2, 1, 0); \
+  int16x8_t __ret_688; \
+  __ret_688 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_688), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_688, __p2_688)))); \
+  __ret_688 = __builtin_shufflevector(__ret_688, __ret_688, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_688; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s64(__p0_202, __p1_202, __p2_202) __extension__ ({ \
-  int32x2_t __s0_202 = __p0_202; \
-  int64x2_t __s1_202 = __p1_202; \
-  int32x4_t __ret_202; \
-  __ret_202 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_202), (int32x2_t)(vqshrn_n_s64(__s1_202, __p2_202)))); \
-  __ret_202; \
+#define vqshrn_high_n_s64(__p0_689, __p1_689, __p2_689) __extension__ ({ \
+  int32x2_t __s0_689 = __p0_689; \
+  int64x2_t __s1_689 = __p1_689; \
+  int32x4_t __ret_689; \
+  __ret_689 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_689), (int32x2_t)(vqshrn_n_s64(__s1_689, __p2_689)))); \
+  __ret_689; \
 })
 #else
-#define vqshrn_high_n_s64(__p0_203, __p1_203, __p2_203) __extension__ ({ \
-  int32x2_t __s0_203 = __p0_203; \
-  int64x2_t __s1_203 = __p1_203; \
-  int32x2_t __rev0_203;  __rev0_203 = __builtin_shufflevector(__s0_203, __s0_203, 1, 0); \
-  int64x2_t __rev1_203;  __rev1_203 = __builtin_shufflevector(__s1_203, __s1_203, 1, 0); \
-  int32x4_t __ret_203; \
-  __ret_203 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_203), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_203, __p2_203)))); \
-  __ret_203 = __builtin_shufflevector(__ret_203, __ret_203, 3, 2, 1, 0); \
-  __ret_203; \
+#define vqshrn_high_n_s64(__p0_690, __p1_690, __p2_690) __extension__ ({ \
+  int32x2_t __s0_690 = __p0_690; \
+  int64x2_t __s1_690 = __p1_690; \
+  int32x2_t __rev0_690;  __rev0_690 = __builtin_shufflevector(__s0_690, __s0_690, 1, 0); \
+  int64x2_t __rev1_690;  __rev1_690 = __builtin_shufflevector(__s1_690, __s1_690, 1, 0); \
+  int32x4_t __ret_690; \
+  __ret_690 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_690), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_690, __p2_690)))); \
+  __ret_690 = __builtin_shufflevector(__ret_690, __ret_690, 3, 2, 1, 0); \
+  __ret_690; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s16(__p0_204, __p1_204, __p2_204) __extension__ ({ \
-  int8x8_t __s0_204 = __p0_204; \
-  int16x8_t __s1_204 = __p1_204; \
-  int8x16_t __ret_204; \
-  __ret_204 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_204), (int8x8_t)(vqshrn_n_s16(__s1_204, __p2_204)))); \
-  __ret_204; \
+#define vqshrn_high_n_s16(__p0_691, __p1_691, __p2_691) __extension__ ({ \
+  int8x8_t __s0_691 = __p0_691; \
+  int16x8_t __s1_691 = __p1_691; \
+  int8x16_t __ret_691; \
+  __ret_691 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_691), (int8x8_t)(vqshrn_n_s16(__s1_691, __p2_691)))); \
+  __ret_691; \
 })
 #else
-#define vqshrn_high_n_s16(__p0_205, __p1_205, __p2_205) __extension__ ({ \
-  int8x8_t __s0_205 = __p0_205; \
-  int16x8_t __s1_205 = __p1_205; \
-  int8x8_t __rev0_205;  __rev0_205 = __builtin_shufflevector(__s0_205, __s0_205, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_205;  __rev1_205 = __builtin_shufflevector(__s1_205, __s1_205, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_205; \
-  __ret_205 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_205), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_205, __p2_205)))); \
-  __ret_205 = __builtin_shufflevector(__ret_205, __ret_205, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_205; \
+#define vqshrn_high_n_s16(__p0_692, __p1_692, __p2_692) __extension__ ({ \
+  int8x8_t __s0_692 = __p0_692; \
+  int16x8_t __s1_692 = __p1_692; \
+  int8x8_t __rev0_692;  __rev0_692 = __builtin_shufflevector(__s0_692, __s0_692, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_692;  __rev1_692 = __builtin_shufflevector(__s1_692, __s1_692, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_692; \
+  __ret_692 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_692), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_692, __p2_692)))); \
+  __ret_692 = __builtin_shufflevector(__ret_692, __ret_692, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_692; \
 })
 #endif
 
@@ -54307,65 +58107,65 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s32(__p0_206, __p1_206, __p2_206) __extension__ ({ \
-  int16x4_t __s0_206 = __p0_206; \
-  int32x4_t __s1_206 = __p1_206; \
-  int16x8_t __ret_206; \
-  __ret_206 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_206), (int16x4_t)(vqshrun_n_s32(__s1_206, __p2_206)))); \
-  __ret_206; \
+#define vqshrun_high_n_s32(__p0_693, __p1_693, __p2_693) __extension__ ({ \
+  int16x4_t __s0_693 = __p0_693; \
+  int32x4_t __s1_693 = __p1_693; \
+  int16x8_t __ret_693; \
+  __ret_693 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_693), (int16x4_t)(vqshrun_n_s32(__s1_693, __p2_693)))); \
+  __ret_693; \
 })
 #else
-#define vqshrun_high_n_s32(__p0_207, __p1_207, __p2_207) __extension__ ({ \
-  int16x4_t __s0_207 = __p0_207; \
-  int32x4_t __s1_207 = __p1_207; \
-  int16x4_t __rev0_207;  __rev0_207 = __builtin_shufflevector(__s0_207, __s0_207, 3, 2, 1, 0); \
-  int32x4_t __rev1_207;  __rev1_207 = __builtin_shufflevector(__s1_207, __s1_207, 3, 2, 1, 0); \
-  int16x8_t __ret_207; \
-  __ret_207 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_207), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_207, __p2_207)))); \
-  __ret_207 = __builtin_shufflevector(__ret_207, __ret_207, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_207; \
+#define vqshrun_high_n_s32(__p0_694, __p1_694, __p2_694) __extension__ ({ \
+  int16x4_t __s0_694 = __p0_694; \
+  int32x4_t __s1_694 = __p1_694; \
+  int16x4_t __rev0_694;  __rev0_694 = __builtin_shufflevector(__s0_694, __s0_694, 3, 2, 1, 0); \
+  int32x4_t __rev1_694;  __rev1_694 = __builtin_shufflevector(__s1_694, __s1_694, 3, 2, 1, 0); \
+  int16x8_t __ret_694; \
+  __ret_694 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_694), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_694, __p2_694)))); \
+  __ret_694 = __builtin_shufflevector(__ret_694, __ret_694, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_694; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s64(__p0_208, __p1_208, __p2_208) __extension__ ({ \
-  int32x2_t __s0_208 = __p0_208; \
-  int64x2_t __s1_208 = __p1_208; \
-  int32x4_t __ret_208; \
-  __ret_208 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_208), (int32x2_t)(vqshrun_n_s64(__s1_208, __p2_208)))); \
-  __ret_208; \
+#define vqshrun_high_n_s64(__p0_695, __p1_695, __p2_695) __extension__ ({ \
+  int32x2_t __s0_695 = __p0_695; \
+  int64x2_t __s1_695 = __p1_695; \
+  int32x4_t __ret_695; \
+  __ret_695 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_695), (int32x2_t)(vqshrun_n_s64(__s1_695, __p2_695)))); \
+  __ret_695; \
 })
 #else
-#define vqshrun_high_n_s64(__p0_209, __p1_209, __p2_209) __extension__ ({ \
-  int32x2_t __s0_209 = __p0_209; \
-  int64x2_t __s1_209 = __p1_209; \
-  int32x2_t __rev0_209;  __rev0_209 = __builtin_shufflevector(__s0_209, __s0_209, 1, 0); \
-  int64x2_t __rev1_209;  __rev1_209 = __builtin_shufflevector(__s1_209, __s1_209, 1, 0); \
-  int32x4_t __ret_209; \
-  __ret_209 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_209), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_209, __p2_209)))); \
-  __ret_209 = __builtin_shufflevector(__ret_209, __ret_209, 3, 2, 1, 0); \
-  __ret_209; \
+#define vqshrun_high_n_s64(__p0_696, __p1_696, __p2_696) __extension__ ({ \
+  int32x2_t __s0_696 = __p0_696; \
+  int64x2_t __s1_696 = __p1_696; \
+  int32x2_t __rev0_696;  __rev0_696 = __builtin_shufflevector(__s0_696, __s0_696, 1, 0); \
+  int64x2_t __rev1_696;  __rev1_696 = __builtin_shufflevector(__s1_696, __s1_696, 1, 0); \
+  int32x4_t __ret_696; \
+  __ret_696 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_696), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_696, __p2_696)))); \
+  __ret_696 = __builtin_shufflevector(__ret_696, __ret_696, 3, 2, 1, 0); \
+  __ret_696; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s16(__p0_210, __p1_210, __p2_210) __extension__ ({ \
-  int8x8_t __s0_210 = __p0_210; \
-  int16x8_t __s1_210 = __p1_210; \
-  int8x16_t __ret_210; \
-  __ret_210 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_210), (int8x8_t)(vqshrun_n_s16(__s1_210, __p2_210)))); \
-  __ret_210; \
+#define vqshrun_high_n_s16(__p0_697, __p1_697, __p2_697) __extension__ ({ \
+  int8x8_t __s0_697 = __p0_697; \
+  int16x8_t __s1_697 = __p1_697; \
+  int8x16_t __ret_697; \
+  __ret_697 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_697), (int8x8_t)(vqshrun_n_s16(__s1_697, __p2_697)))); \
+  __ret_697; \
 })
 #else
-#define vqshrun_high_n_s16(__p0_211, __p1_211, __p2_211) __extension__ ({ \
-  int8x8_t __s0_211 = __p0_211; \
-  int16x8_t __s1_211 = __p1_211; \
-  int8x8_t __rev0_211;  __rev0_211 = __builtin_shufflevector(__s0_211, __s0_211, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_211;  __rev1_211 = __builtin_shufflevector(__s1_211, __s1_211, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_211; \
-  __ret_211 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_211), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_211, __p2_211)))); \
-  __ret_211 = __builtin_shufflevector(__ret_211, __ret_211, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_211; \
+#define vqshrun_high_n_s16(__p0_698, __p1_698, __p2_698) __extension__ ({ \
+  int8x8_t __s0_698 = __p0_698; \
+  int16x8_t __s1_698 = __p1_698; \
+  int8x8_t __rev0_698;  __rev0_698 = __builtin_shufflevector(__s0_698, __s0_698, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_698;  __rev1_698 = __builtin_shufflevector(__s1_698, __s1_698, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_698; \
+  __ret_698 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_698), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_698, __p2_698)))); \
+  __ret_698 = __builtin_shufflevector(__ret_698, __ret_698, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_698; \
 })
 #endif
 
@@ -55675,128 +59475,128 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u32(__p0_212, __p1_212, __p2_212) __extension__ ({ \
-  uint16x4_t __s0_212 = __p0_212; \
-  uint32x4_t __s1_212 = __p1_212; \
-  uint16x8_t __ret_212; \
-  __ret_212 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_212), (uint16x4_t)(vrshrn_n_u32(__s1_212, __p2_212)))); \
-  __ret_212; \
+#define vrshrn_high_n_u32(__p0_699, __p1_699, __p2_699) __extension__ ({ \
+  uint16x4_t __s0_699 = __p0_699; \
+  uint32x4_t __s1_699 = __p1_699; \
+  uint16x8_t __ret_699; \
+  __ret_699 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_699), (uint16x4_t)(vrshrn_n_u32(__s1_699, __p2_699)))); \
+  __ret_699; \
 })
 #else
-#define vrshrn_high_n_u32(__p0_213, __p1_213, __p2_213) __extension__ ({ \
-  uint16x4_t __s0_213 = __p0_213; \
-  uint32x4_t __s1_213 = __p1_213; \
-  uint16x4_t __rev0_213;  __rev0_213 = __builtin_shufflevector(__s0_213, __s0_213, 3, 2, 1, 0); \
-  uint32x4_t __rev1_213;  __rev1_213 = __builtin_shufflevector(__s1_213, __s1_213, 3, 2, 1, 0); \
-  uint16x8_t __ret_213; \
-  __ret_213 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_213), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_213, __p2_213)))); \
-  __ret_213 = __builtin_shufflevector(__ret_213, __ret_213, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_213; \
+#define vrshrn_high_n_u32(__p0_700, __p1_700, __p2_700) __extension__ ({ \
+  uint16x4_t __s0_700 = __p0_700; \
+  uint32x4_t __s1_700 = __p1_700; \
+  uint16x4_t __rev0_700;  __rev0_700 = __builtin_shufflevector(__s0_700, __s0_700, 3, 2, 1, 0); \
+  uint32x4_t __rev1_700;  __rev1_700 = __builtin_shufflevector(__s1_700, __s1_700, 3, 2, 1, 0); \
+  uint16x8_t __ret_700; \
+  __ret_700 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_700), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_700, __p2_700)))); \
+  __ret_700 = __builtin_shufflevector(__ret_700, __ret_700, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_700; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u64(__p0_214, __p1_214, __p2_214) __extension__ ({ \
-  uint32x2_t __s0_214 = __p0_214; \
-  uint64x2_t __s1_214 = __p1_214; \
-  uint32x4_t __ret_214; \
-  __ret_214 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_214), (uint32x2_t)(vrshrn_n_u64(__s1_214, __p2_214)))); \
-  __ret_214; \
+#define vrshrn_high_n_u64(__p0_701, __p1_701, __p2_701) __extension__ ({ \
+  uint32x2_t __s0_701 = __p0_701; \
+  uint64x2_t __s1_701 = __p1_701; \
+  uint32x4_t __ret_701; \
+  __ret_701 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_701), (uint32x2_t)(vrshrn_n_u64(__s1_701, __p2_701)))); \
+  __ret_701; \
 })
 #else
-#define vrshrn_high_n_u64(__p0_215, __p1_215, __p2_215) __extension__ ({ \
-  uint32x2_t __s0_215 = __p0_215; \
-  uint64x2_t __s1_215 = __p1_215; \
-  uint32x2_t __rev0_215;  __rev0_215 = __builtin_shufflevector(__s0_215, __s0_215, 1, 0); \
-  uint64x2_t __rev1_215;  __rev1_215 = __builtin_shufflevector(__s1_215, __s1_215, 1, 0); \
-  uint32x4_t __ret_215; \
-  __ret_215 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_215), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_215, __p2_215)))); \
-  __ret_215 = __builtin_shufflevector(__ret_215, __ret_215, 3, 2, 1, 0); \
-  __ret_215; \
+#define vrshrn_high_n_u64(__p0_702, __p1_702, __p2_702) __extension__ ({ \
+  uint32x2_t __s0_702 = __p0_702; \
+  uint64x2_t __s1_702 = __p1_702; \
+  uint32x2_t __rev0_702;  __rev0_702 = __builtin_shufflevector(__s0_702, __s0_702, 1, 0); \
+  uint64x2_t __rev1_702;  __rev1_702 = __builtin_shufflevector(__s1_702, __s1_702, 1, 0); \
+  uint32x4_t __ret_702; \
+  __ret_702 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_702), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_702, __p2_702)))); \
+  __ret_702 = __builtin_shufflevector(__ret_702, __ret_702, 3, 2, 1, 0); \
+  __ret_702; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u16(__p0_216, __p1_216, __p2_216) __extension__ ({ \
-  uint8x8_t __s0_216 = __p0_216; \
-  uint16x8_t __s1_216 = __p1_216; \
-  uint8x16_t __ret_216; \
-  __ret_216 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_216), (uint8x8_t)(vrshrn_n_u16(__s1_216, __p2_216)))); \
-  __ret_216; \
+#define vrshrn_high_n_u16(__p0_703, __p1_703, __p2_703) __extension__ ({ \
+  uint8x8_t __s0_703 = __p0_703; \
+  uint16x8_t __s1_703 = __p1_703; \
+  uint8x16_t __ret_703; \
+  __ret_703 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_703), (uint8x8_t)(vrshrn_n_u16(__s1_703, __p2_703)))); \
+  __ret_703; \
 })
 #else
-#define vrshrn_high_n_u16(__p0_217, __p1_217, __p2_217) __extension__ ({ \
-  uint8x8_t __s0_217 = __p0_217; \
-  uint16x8_t __s1_217 = __p1_217; \
-  uint8x8_t __rev0_217;  __rev0_217 = __builtin_shufflevector(__s0_217, __s0_217, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_217;  __rev1_217 = __builtin_shufflevector(__s1_217, __s1_217, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_217; \
-  __ret_217 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_217), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_217, __p2_217)))); \
-  __ret_217 = __builtin_shufflevector(__ret_217, __ret_217, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_217; \
+#define vrshrn_high_n_u16(__p0_704, __p1_704, __p2_704) __extension__ ({ \
+  uint8x8_t __s0_704 = __p0_704; \
+  uint16x8_t __s1_704 = __p1_704; \
+  uint8x8_t __rev0_704;  __rev0_704 = __builtin_shufflevector(__s0_704, __s0_704, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_704;  __rev1_704 = __builtin_shufflevector(__s1_704, __s1_704, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_704; \
+  __ret_704 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_704), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_704, __p2_704)))); \
+  __ret_704 = __builtin_shufflevector(__ret_704, __ret_704, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_704; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s32(__p0_218, __p1_218, __p2_218) __extension__ ({ \
-  int16x4_t __s0_218 = __p0_218; \
-  int32x4_t __s1_218 = __p1_218; \
-  int16x8_t __ret_218; \
-  __ret_218 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_218), (int16x4_t)(vrshrn_n_s32(__s1_218, __p2_218)))); \
-  __ret_218; \
+#define vrshrn_high_n_s32(__p0_705, __p1_705, __p2_705) __extension__ ({ \
+  int16x4_t __s0_705 = __p0_705; \
+  int32x4_t __s1_705 = __p1_705; \
+  int16x8_t __ret_705; \
+  __ret_705 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_705), (int16x4_t)(vrshrn_n_s32(__s1_705, __p2_705)))); \
+  __ret_705; \
 })
 #else
-#define vrshrn_high_n_s32(__p0_219, __p1_219, __p2_219) __extension__ ({ \
-  int16x4_t __s0_219 = __p0_219; \
-  int32x4_t __s1_219 = __p1_219; \
-  int16x4_t __rev0_219;  __rev0_219 = __builtin_shufflevector(__s0_219, __s0_219, 3, 2, 1, 0); \
-  int32x4_t __rev1_219;  __rev1_219 = __builtin_shufflevector(__s1_219, __s1_219, 3, 2, 1, 0); \
-  int16x8_t __ret_219; \
-  __ret_219 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_219), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_219, __p2_219)))); \
-  __ret_219 = __builtin_shufflevector(__ret_219, __ret_219, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_219; \
+#define vrshrn_high_n_s32(__p0_706, __p1_706, __p2_706) __extension__ ({ \
+  int16x4_t __s0_706 = __p0_706; \
+  int32x4_t __s1_706 = __p1_706; \
+  int16x4_t __rev0_706;  __rev0_706 = __builtin_shufflevector(__s0_706, __s0_706, 3, 2, 1, 0); \
+  int32x4_t __rev1_706;  __rev1_706 = __builtin_shufflevector(__s1_706, __s1_706, 3, 2, 1, 0); \
+  int16x8_t __ret_706; \
+  __ret_706 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_706), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_706, __p2_706)))); \
+  __ret_706 = __builtin_shufflevector(__ret_706, __ret_706, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_706; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s64(__p0_220, __p1_220, __p2_220) __extension__ ({ \
-  int32x2_t __s0_220 = __p0_220; \
-  int64x2_t __s1_220 = __p1_220; \
-  int32x4_t __ret_220; \
-  __ret_220 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_220), (int32x2_t)(vrshrn_n_s64(__s1_220, __p2_220)))); \
-  __ret_220; \
+#define vrshrn_high_n_s64(__p0_707, __p1_707, __p2_707) __extension__ ({ \
+  int32x2_t __s0_707 = __p0_707; \
+  int64x2_t __s1_707 = __p1_707; \
+  int32x4_t __ret_707; \
+  __ret_707 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_707), (int32x2_t)(vrshrn_n_s64(__s1_707, __p2_707)))); \
+  __ret_707; \
 })
 #else
-#define vrshrn_high_n_s64(__p0_221, __p1_221, __p2_221) __extension__ ({ \
-  int32x2_t __s0_221 = __p0_221; \
-  int64x2_t __s1_221 = __p1_221; \
-  int32x2_t __rev0_221;  __rev0_221 = __builtin_shufflevector(__s0_221, __s0_221, 1, 0); \
-  int64x2_t __rev1_221;  __rev1_221 = __builtin_shufflevector(__s1_221, __s1_221, 1, 0); \
-  int32x4_t __ret_221; \
-  __ret_221 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_221), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_221, __p2_221)))); \
-  __ret_221 = __builtin_shufflevector(__ret_221, __ret_221, 3, 2, 1, 0); \
-  __ret_221; \
+#define vrshrn_high_n_s64(__p0_708, __p1_708, __p2_708) __extension__ ({ \
+  int32x2_t __s0_708 = __p0_708; \
+  int64x2_t __s1_708 = __p1_708; \
+  int32x2_t __rev0_708;  __rev0_708 = __builtin_shufflevector(__s0_708, __s0_708, 1, 0); \
+  int64x2_t __rev1_708;  __rev1_708 = __builtin_shufflevector(__s1_708, __s1_708, 1, 0); \
+  int32x4_t __ret_708; \
+  __ret_708 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_708), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_708, __p2_708)))); \
+  __ret_708 = __builtin_shufflevector(__ret_708, __ret_708, 3, 2, 1, 0); \
+  __ret_708; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s16(__p0_222, __p1_222, __p2_222) __extension__ ({ \
-  int8x8_t __s0_222 = __p0_222; \
-  int16x8_t __s1_222 = __p1_222; \
-  int8x16_t __ret_222; \
-  __ret_222 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_222), (int8x8_t)(vrshrn_n_s16(__s1_222, __p2_222)))); \
-  __ret_222; \
+#define vrshrn_high_n_s16(__p0_709, __p1_709, __p2_709) __extension__ ({ \
+  int8x8_t __s0_709 = __p0_709; \
+  int16x8_t __s1_709 = __p1_709; \
+  int8x16_t __ret_709; \
+  __ret_709 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_709), (int8x8_t)(vrshrn_n_s16(__s1_709, __p2_709)))); \
+  __ret_709; \
 })
 #else
-#define vrshrn_high_n_s16(__p0_223, __p1_223, __p2_223) __extension__ ({ \
-  int8x8_t __s0_223 = __p0_223; \
-  int16x8_t __s1_223 = __p1_223; \
-  int8x8_t __rev0_223;  __rev0_223 = __builtin_shufflevector(__s0_223, __s0_223, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_223;  __rev1_223 = __builtin_shufflevector(__s1_223, __s1_223, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_223; \
-  __ret_223 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_223), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_223, __p2_223)))); \
-  __ret_223 = __builtin_shufflevector(__ret_223, __ret_223, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_223; \
+#define vrshrn_high_n_s16(__p0_710, __p1_710, __p2_710) __extension__ ({ \
+  int8x8_t __s0_710 = __p0_710; \
+  int16x8_t __s1_710 = __p1_710; \
+  int8x8_t __rev0_710;  __rev0_710 = __builtin_shufflevector(__s0_710, __s0_710, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_710;  __rev1_710 = __builtin_shufflevector(__s1_710, __s1_710, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_710; \
+  __ret_710 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_710), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_710, __p2_710)))); \
+  __ret_710 = __builtin_shufflevector(__ret_710, __ret_710, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_710; \
 })
 #endif
 
@@ -56076,110 +59876,110 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u8(__p0_224, __p1_224) __extension__ ({ \
-  uint8x16_t __s0_224 = __p0_224; \
-  uint16x8_t __ret_224; \
-  __ret_224 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_224), __p1_224)); \
-  __ret_224; \
+#define vshll_high_n_u8(__p0_711, __p1_711) __extension__ ({ \
+  uint8x16_t __s0_711 = __p0_711; \
+  uint16x8_t __ret_711; \
+  __ret_711 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_711), __p1_711)); \
+  __ret_711; \
 })
 #else
-#define vshll_high_n_u8(__p0_225, __p1_225) __extension__ ({ \
-  uint8x16_t __s0_225 = __p0_225; \
-  uint8x16_t __rev0_225;  __rev0_225 = __builtin_shufflevector(__s0_225, __s0_225, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_225; \
-  __ret_225 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_225), __p1_225)); \
-  __ret_225 = __builtin_shufflevector(__ret_225, __ret_225, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_225; \
+#define vshll_high_n_u8(__p0_712, __p1_712) __extension__ ({ \
+  uint8x16_t __s0_712 = __p0_712; \
+  uint8x16_t __rev0_712;  __rev0_712 = __builtin_shufflevector(__s0_712, __s0_712, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __ret_712; \
+  __ret_712 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_712), __p1_712)); \
+  __ret_712 = __builtin_shufflevector(__ret_712, __ret_712, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_712; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u32(__p0_226, __p1_226) __extension__ ({ \
-  uint32x4_t __s0_226 = __p0_226; \
-  uint64x2_t __ret_226; \
-  __ret_226 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_226), __p1_226)); \
-  __ret_226; \
+#define vshll_high_n_u32(__p0_713, __p1_713) __extension__ ({ \
+  uint32x4_t __s0_713 = __p0_713; \
+  uint64x2_t __ret_713; \
+  __ret_713 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_713), __p1_713)); \
+  __ret_713; \
 })
 #else
-#define vshll_high_n_u32(__p0_227, __p1_227) __extension__ ({ \
-  uint32x4_t __s0_227 = __p0_227; \
-  uint32x4_t __rev0_227;  __rev0_227 = __builtin_shufflevector(__s0_227, __s0_227, 3, 2, 1, 0); \
-  uint64x2_t __ret_227; \
-  __ret_227 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_227), __p1_227)); \
-  __ret_227 = __builtin_shufflevector(__ret_227, __ret_227, 1, 0); \
-  __ret_227; \
+#define vshll_high_n_u32(__p0_714, __p1_714) __extension__ ({ \
+  uint32x4_t __s0_714 = __p0_714; \
+  uint32x4_t __rev0_714;  __rev0_714 = __builtin_shufflevector(__s0_714, __s0_714, 3, 2, 1, 0); \
+  uint64x2_t __ret_714; \
+  __ret_714 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_714), __p1_714)); \
+  __ret_714 = __builtin_shufflevector(__ret_714, __ret_714, 1, 0); \
+  __ret_714; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u16(__p0_228, __p1_228) __extension__ ({ \
-  uint16x8_t __s0_228 = __p0_228; \
-  uint32x4_t __ret_228; \
-  __ret_228 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_228), __p1_228)); \
-  __ret_228; \
+#define vshll_high_n_u16(__p0_715, __p1_715) __extension__ ({ \
+  uint16x8_t __s0_715 = __p0_715; \
+  uint32x4_t __ret_715; \
+  __ret_715 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_715), __p1_715)); \
+  __ret_715; \
 })
 #else
-#define vshll_high_n_u16(__p0_229, __p1_229) __extension__ ({ \
-  uint16x8_t __s0_229 = __p0_229; \
-  uint16x8_t __rev0_229;  __rev0_229 = __builtin_shufflevector(__s0_229, __s0_229, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_229; \
-  __ret_229 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_229), __p1_229)); \
-  __ret_229 = __builtin_shufflevector(__ret_229, __ret_229, 3, 2, 1, 0); \
-  __ret_229; \
+#define vshll_high_n_u16(__p0_716, __p1_716) __extension__ ({ \
+  uint16x8_t __s0_716 = __p0_716; \
+  uint16x8_t __rev0_716;  __rev0_716 = __builtin_shufflevector(__s0_716, __s0_716, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint32x4_t __ret_716; \
+  __ret_716 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_716), __p1_716)); \
+  __ret_716 = __builtin_shufflevector(__ret_716, __ret_716, 3, 2, 1, 0); \
+  __ret_716; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s8(__p0_230, __p1_230) __extension__ ({ \
-  int8x16_t __s0_230 = __p0_230; \
-  int16x8_t __ret_230; \
-  __ret_230 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_230), __p1_230)); \
-  __ret_230; \
+#define vshll_high_n_s8(__p0_717, __p1_717) __extension__ ({ \
+  int8x16_t __s0_717 = __p0_717; \
+  int16x8_t __ret_717; \
+  __ret_717 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_717), __p1_717)); \
+  __ret_717; \
 })
 #else
-#define vshll_high_n_s8(__p0_231, __p1_231) __extension__ ({ \
-  int8x16_t __s0_231 = __p0_231; \
-  int8x16_t __rev0_231;  __rev0_231 = __builtin_shufflevector(__s0_231, __s0_231, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_231; \
-  __ret_231 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_231), __p1_231)); \
-  __ret_231 = __builtin_shufflevector(__ret_231, __ret_231, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_231; \
+#define vshll_high_n_s8(__p0_718, __p1_718) __extension__ ({ \
+  int8x16_t __s0_718 = __p0_718; \
+  int8x16_t __rev0_718;  __rev0_718 = __builtin_shufflevector(__s0_718, __s0_718, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __ret_718; \
+  __ret_718 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_718), __p1_718)); \
+  __ret_718 = __builtin_shufflevector(__ret_718, __ret_718, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_718; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s32(__p0_232, __p1_232) __extension__ ({ \
-  int32x4_t __s0_232 = __p0_232; \
-  int64x2_t __ret_232; \
-  __ret_232 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_232), __p1_232)); \
-  __ret_232; \
+#define vshll_high_n_s32(__p0_719, __p1_719) __extension__ ({ \
+  int32x4_t __s0_719 = __p0_719; \
+  int64x2_t __ret_719; \
+  __ret_719 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_719), __p1_719)); \
+  __ret_719; \
 })
 #else
-#define vshll_high_n_s32(__p0_233, __p1_233) __extension__ ({ \
-  int32x4_t __s0_233 = __p0_233; \
-  int32x4_t __rev0_233;  __rev0_233 = __builtin_shufflevector(__s0_233, __s0_233, 3, 2, 1, 0); \
-  int64x2_t __ret_233; \
-  __ret_233 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_233), __p1_233)); \
-  __ret_233 = __builtin_shufflevector(__ret_233, __ret_233, 1, 0); \
-  __ret_233; \
+#define vshll_high_n_s32(__p0_720, __p1_720) __extension__ ({ \
+  int32x4_t __s0_720 = __p0_720; \
+  int32x4_t __rev0_720;  __rev0_720 = __builtin_shufflevector(__s0_720, __s0_720, 3, 2, 1, 0); \
+  int64x2_t __ret_720; \
+  __ret_720 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_720), __p1_720)); \
+  __ret_720 = __builtin_shufflevector(__ret_720, __ret_720, 1, 0); \
+  __ret_720; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s16(__p0_234, __p1_234) __extension__ ({ \
-  int16x8_t __s0_234 = __p0_234; \
-  int32x4_t __ret_234; \
-  __ret_234 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_234), __p1_234)); \
-  __ret_234; \
+#define vshll_high_n_s16(__p0_721, __p1_721) __extension__ ({ \
+  int16x8_t __s0_721 = __p0_721; \
+  int32x4_t __ret_721; \
+  __ret_721 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_721), __p1_721)); \
+  __ret_721; \
 })
 #else
-#define vshll_high_n_s16(__p0_235, __p1_235) __extension__ ({ \
-  int16x8_t __s0_235 = __p0_235; \
-  int16x8_t __rev0_235;  __rev0_235 = __builtin_shufflevector(__s0_235, __s0_235, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_235; \
-  __ret_235 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_235), __p1_235)); \
-  __ret_235 = __builtin_shufflevector(__ret_235, __ret_235, 3, 2, 1, 0); \
-  __ret_235; \
+#define vshll_high_n_s16(__p0_722, __p1_722) __extension__ ({ \
+  int16x8_t __s0_722 = __p0_722; \
+  int16x8_t __rev0_722;  __rev0_722 = __builtin_shufflevector(__s0_722, __s0_722, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_722; \
+  __ret_722 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_722), __p1_722)); \
+  __ret_722 = __builtin_shufflevector(__ret_722, __ret_722, 3, 2, 1, 0); \
+  __ret_722; \
 })
 #endif
 
@@ -56196,128 +59996,128 @@
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u32(__p0_236, __p1_236, __p2_236) __extension__ ({ \
-  uint16x4_t __s0_236 = __p0_236; \
-  uint32x4_t __s1_236 = __p1_236; \
-  uint16x8_t __ret_236; \
-  __ret_236 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_236), (uint16x4_t)(vshrn_n_u32(__s1_236, __p2_236)))); \
-  __ret_236; \
+#define vshrn_high_n_u32(__p0_723, __p1_723, __p2_723) __extension__ ({ \
+  uint16x4_t __s0_723 = __p0_723; \
+  uint32x4_t __s1_723 = __p1_723; \
+  uint16x8_t __ret_723; \
+  __ret_723 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_723), (uint16x4_t)(vshrn_n_u32(__s1_723, __p2_723)))); \
+  __ret_723; \
 })
 #else
-#define vshrn_high_n_u32(__p0_237, __p1_237, __p2_237) __extension__ ({ \
-  uint16x4_t __s0_237 = __p0_237; \
-  uint32x4_t __s1_237 = __p1_237; \
-  uint16x4_t __rev0_237;  __rev0_237 = __builtin_shufflevector(__s0_237, __s0_237, 3, 2, 1, 0); \
-  uint32x4_t __rev1_237;  __rev1_237 = __builtin_shufflevector(__s1_237, __s1_237, 3, 2, 1, 0); \
-  uint16x8_t __ret_237; \
-  __ret_237 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_237), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_237, __p2_237)))); \
-  __ret_237 = __builtin_shufflevector(__ret_237, __ret_237, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_237; \
+#define vshrn_high_n_u32(__p0_724, __p1_724, __p2_724) __extension__ ({ \
+  uint16x4_t __s0_724 = __p0_724; \
+  uint32x4_t __s1_724 = __p1_724; \
+  uint16x4_t __rev0_724;  __rev0_724 = __builtin_shufflevector(__s0_724, __s0_724, 3, 2, 1, 0); \
+  uint32x4_t __rev1_724;  __rev1_724 = __builtin_shufflevector(__s1_724, __s1_724, 3, 2, 1, 0); \
+  uint16x8_t __ret_724; \
+  __ret_724 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_724), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_724, __p2_724)))); \
+  __ret_724 = __builtin_shufflevector(__ret_724, __ret_724, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_724; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u64(__p0_238, __p1_238, __p2_238) __extension__ ({ \
-  uint32x2_t __s0_238 = __p0_238; \
-  uint64x2_t __s1_238 = __p1_238; \
-  uint32x4_t __ret_238; \
-  __ret_238 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_238), (uint32x2_t)(vshrn_n_u64(__s1_238, __p2_238)))); \
-  __ret_238; \
+#define vshrn_high_n_u64(__p0_725, __p1_725, __p2_725) __extension__ ({ \
+  uint32x2_t __s0_725 = __p0_725; \
+  uint64x2_t __s1_725 = __p1_725; \
+  uint32x4_t __ret_725; \
+  __ret_725 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_725), (uint32x2_t)(vshrn_n_u64(__s1_725, __p2_725)))); \
+  __ret_725; \
 })
 #else
-#define vshrn_high_n_u64(__p0_239, __p1_239, __p2_239) __extension__ ({ \
-  uint32x2_t __s0_239 = __p0_239; \
-  uint64x2_t __s1_239 = __p1_239; \
-  uint32x2_t __rev0_239;  __rev0_239 = __builtin_shufflevector(__s0_239, __s0_239, 1, 0); \
-  uint64x2_t __rev1_239;  __rev1_239 = __builtin_shufflevector(__s1_239, __s1_239, 1, 0); \
-  uint32x4_t __ret_239; \
-  __ret_239 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_239), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_239, __p2_239)))); \
-  __ret_239 = __builtin_shufflevector(__ret_239, __ret_239, 3, 2, 1, 0); \
-  __ret_239; \
+#define vshrn_high_n_u64(__p0_726, __p1_726, __p2_726) __extension__ ({ \
+  uint32x2_t __s0_726 = __p0_726; \
+  uint64x2_t __s1_726 = __p1_726; \
+  uint32x2_t __rev0_726;  __rev0_726 = __builtin_shufflevector(__s0_726, __s0_726, 1, 0); \
+  uint64x2_t __rev1_726;  __rev1_726 = __builtin_shufflevector(__s1_726, __s1_726, 1, 0); \
+  uint32x4_t __ret_726; \
+  __ret_726 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_726), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_726, __p2_726)))); \
+  __ret_726 = __builtin_shufflevector(__ret_726, __ret_726, 3, 2, 1, 0); \
+  __ret_726; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u16(__p0_240, __p1_240, __p2_240) __extension__ ({ \
-  uint8x8_t __s0_240 = __p0_240; \
-  uint16x8_t __s1_240 = __p1_240; \
-  uint8x16_t __ret_240; \
-  __ret_240 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_240), (uint8x8_t)(vshrn_n_u16(__s1_240, __p2_240)))); \
-  __ret_240; \
+#define vshrn_high_n_u16(__p0_727, __p1_727, __p2_727) __extension__ ({ \
+  uint8x8_t __s0_727 = __p0_727; \
+  uint16x8_t __s1_727 = __p1_727; \
+  uint8x16_t __ret_727; \
+  __ret_727 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_727), (uint8x8_t)(vshrn_n_u16(__s1_727, __p2_727)))); \
+  __ret_727; \
 })
 #else
-#define vshrn_high_n_u16(__p0_241, __p1_241, __p2_241) __extension__ ({ \
-  uint8x8_t __s0_241 = __p0_241; \
-  uint16x8_t __s1_241 = __p1_241; \
-  uint8x8_t __rev0_241;  __rev0_241 = __builtin_shufflevector(__s0_241, __s0_241, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_241;  __rev1_241 = __builtin_shufflevector(__s1_241, __s1_241, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_241; \
-  __ret_241 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_241), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_241, __p2_241)))); \
-  __ret_241 = __builtin_shufflevector(__ret_241, __ret_241, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_241; \
+#define vshrn_high_n_u16(__p0_728, __p1_728, __p2_728) __extension__ ({ \
+  uint8x8_t __s0_728 = __p0_728; \
+  uint16x8_t __s1_728 = __p1_728; \
+  uint8x8_t __rev0_728;  __rev0_728 = __builtin_shufflevector(__s0_728, __s0_728, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint16x8_t __rev1_728;  __rev1_728 = __builtin_shufflevector(__s1_728, __s1_728, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __ret_728; \
+  __ret_728 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_728), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_728, __p2_728)))); \
+  __ret_728 = __builtin_shufflevector(__ret_728, __ret_728, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_728; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s32(__p0_242, __p1_242, __p2_242) __extension__ ({ \
-  int16x4_t __s0_242 = __p0_242; \
-  int32x4_t __s1_242 = __p1_242; \
-  int16x8_t __ret_242; \
-  __ret_242 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_242), (int16x4_t)(vshrn_n_s32(__s1_242, __p2_242)))); \
-  __ret_242; \
+#define vshrn_high_n_s32(__p0_729, __p1_729, __p2_729) __extension__ ({ \
+  int16x4_t __s0_729 = __p0_729; \
+  int32x4_t __s1_729 = __p1_729; \
+  int16x8_t __ret_729; \
+  __ret_729 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_729), (int16x4_t)(vshrn_n_s32(__s1_729, __p2_729)))); \
+  __ret_729; \
 })
 #else
-#define vshrn_high_n_s32(__p0_243, __p1_243, __p2_243) __extension__ ({ \
-  int16x4_t __s0_243 = __p0_243; \
-  int32x4_t __s1_243 = __p1_243; \
-  int16x4_t __rev0_243;  __rev0_243 = __builtin_shufflevector(__s0_243, __s0_243, 3, 2, 1, 0); \
-  int32x4_t __rev1_243;  __rev1_243 = __builtin_shufflevector(__s1_243, __s1_243, 3, 2, 1, 0); \
-  int16x8_t __ret_243; \
-  __ret_243 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_243), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_243, __p2_243)))); \
-  __ret_243 = __builtin_shufflevector(__ret_243, __ret_243, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_243; \
+#define vshrn_high_n_s32(__p0_730, __p1_730, __p2_730) __extension__ ({ \
+  int16x4_t __s0_730 = __p0_730; \
+  int32x4_t __s1_730 = __p1_730; \
+  int16x4_t __rev0_730;  __rev0_730 = __builtin_shufflevector(__s0_730, __s0_730, 3, 2, 1, 0); \
+  int32x4_t __rev1_730;  __rev1_730 = __builtin_shufflevector(__s1_730, __s1_730, 3, 2, 1, 0); \
+  int16x8_t __ret_730; \
+  __ret_730 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_730), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_730, __p2_730)))); \
+  __ret_730 = __builtin_shufflevector(__ret_730, __ret_730, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_730; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s64(__p0_244, __p1_244, __p2_244) __extension__ ({ \
-  int32x2_t __s0_244 = __p0_244; \
-  int64x2_t __s1_244 = __p1_244; \
-  int32x4_t __ret_244; \
-  __ret_244 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_244), (int32x2_t)(vshrn_n_s64(__s1_244, __p2_244)))); \
-  __ret_244; \
+#define vshrn_high_n_s64(__p0_731, __p1_731, __p2_731) __extension__ ({ \
+  int32x2_t __s0_731 = __p0_731; \
+  int64x2_t __s1_731 = __p1_731; \
+  int32x4_t __ret_731; \
+  __ret_731 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_731), (int32x2_t)(vshrn_n_s64(__s1_731, __p2_731)))); \
+  __ret_731; \
 })
 #else
-#define vshrn_high_n_s64(__p0_245, __p1_245, __p2_245) __extension__ ({ \
-  int32x2_t __s0_245 = __p0_245; \
-  int64x2_t __s1_245 = __p1_245; \
-  int32x2_t __rev0_245;  __rev0_245 = __builtin_shufflevector(__s0_245, __s0_245, 1, 0); \
-  int64x2_t __rev1_245;  __rev1_245 = __builtin_shufflevector(__s1_245, __s1_245, 1, 0); \
-  int32x4_t __ret_245; \
-  __ret_245 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_245), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_245, __p2_245)))); \
-  __ret_245 = __builtin_shufflevector(__ret_245, __ret_245, 3, 2, 1, 0); \
-  __ret_245; \
+#define vshrn_high_n_s64(__p0_732, __p1_732, __p2_732) __extension__ ({ \
+  int32x2_t __s0_732 = __p0_732; \
+  int64x2_t __s1_732 = __p1_732; \
+  int32x2_t __rev0_732;  __rev0_732 = __builtin_shufflevector(__s0_732, __s0_732, 1, 0); \
+  int64x2_t __rev1_732;  __rev1_732 = __builtin_shufflevector(__s1_732, __s1_732, 1, 0); \
+  int32x4_t __ret_732; \
+  __ret_732 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_732), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_732, __p2_732)))); \
+  __ret_732 = __builtin_shufflevector(__ret_732, __ret_732, 3, 2, 1, 0); \
+  __ret_732; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s16(__p0_246, __p1_246, __p2_246) __extension__ ({ \
-  int8x8_t __s0_246 = __p0_246; \
-  int16x8_t __s1_246 = __p1_246; \
-  int8x16_t __ret_246; \
-  __ret_246 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_246), (int8x8_t)(vshrn_n_s16(__s1_246, __p2_246)))); \
-  __ret_246; \
+#define vshrn_high_n_s16(__p0_733, __p1_733, __p2_733) __extension__ ({ \
+  int8x8_t __s0_733 = __p0_733; \
+  int16x8_t __s1_733 = __p1_733; \
+  int8x16_t __ret_733; \
+  __ret_733 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_733), (int8x8_t)(vshrn_n_s16(__s1_733, __p2_733)))); \
+  __ret_733; \
 })
 #else
-#define vshrn_high_n_s16(__p0_247, __p1_247, __p2_247) __extension__ ({ \
-  int8x8_t __s0_247 = __p0_247; \
-  int16x8_t __s1_247 = __p1_247; \
-  int8x8_t __rev0_247;  __rev0_247 = __builtin_shufflevector(__s0_247, __s0_247, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_247;  __rev1_247 = __builtin_shufflevector(__s1_247, __s1_247, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_247; \
-  __ret_247 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_247), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_247, __p2_247)))); \
-  __ret_247 = __builtin_shufflevector(__ret_247, __ret_247, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_247; \
+#define vshrn_high_n_s16(__p0_734, __p1_734, __p2_734) __extension__ ({ \
+  int8x8_t __s0_734 = __p0_734; \
+  int16x8_t __s1_734 = __p1_734; \
+  int8x8_t __rev0_734;  __rev0_734 = __builtin_shufflevector(__s0_734, __s0_734, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16x8_t __rev1_734;  __rev1_734 = __builtin_shufflevector(__s1_734, __s1_734, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __ret_734; \
+  __ret_734 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_734), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_734, __p2_734)))); \
+  __ret_734 = __builtin_shufflevector(__ret_734, __ret_734, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_734; \
 })
 #endif
 
@@ -57753,6 +61553,58 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
+#define vsudotq_laneq_s32(__p0_735, __p1_735, __p2_735, __p3_735) __extension__ ({ \
+  int32x4_t __s0_735 = __p0_735; \
+  int8x16_t __s1_735 = __p1_735; \
+  uint8x16_t __s2_735 = __p2_735; \
+  int32x4_t __ret_735; \
+uint8x16_t __reint_735 = __s2_735; \
+  __ret_735 = vusdotq_s32(__s0_735, (uint8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_735, __p3_735)), __s1_735); \
+  __ret_735; \
+})
+#else
+#define vsudotq_laneq_s32(__p0_736, __p1_736, __p2_736, __p3_736) __extension__ ({ \
+  int32x4_t __s0_736 = __p0_736; \
+  int8x16_t __s1_736 = __p1_736; \
+  uint8x16_t __s2_736 = __p2_736; \
+  int32x4_t __rev0_736;  __rev0_736 = __builtin_shufflevector(__s0_736, __s0_736, 3, 2, 1, 0); \
+  int8x16_t __rev1_736;  __rev1_736 = __builtin_shufflevector(__s1_736, __s1_736, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_736;  __rev2_736 = __builtin_shufflevector(__s2_736, __s2_736, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_736; \
+uint8x16_t __reint_736 = __rev2_736; \
+  __ret_736 = __noswap_vusdotq_s32(__rev0_736, (uint8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_736, __p3_736)), __rev1_736); \
+  __ret_736 = __builtin_shufflevector(__ret_736, __ret_736, 3, 2, 1, 0); \
+  __ret_736; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsudot_laneq_s32(__p0_737, __p1_737, __p2_737, __p3_737) __extension__ ({ \
+  int32x2_t __s0_737 = __p0_737; \
+  int8x8_t __s1_737 = __p1_737; \
+  uint8x16_t __s2_737 = __p2_737; \
+  int32x2_t __ret_737; \
+uint8x16_t __reint_737 = __s2_737; \
+  __ret_737 = vusdot_s32(__s0_737, (uint8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_737, __p3_737)), __s1_737); \
+  __ret_737; \
+})
+#else
+#define vsudot_laneq_s32(__p0_738, __p1_738, __p2_738, __p3_738) __extension__ ({ \
+  int32x2_t __s0_738 = __p0_738; \
+  int8x8_t __s1_738 = __p1_738; \
+  uint8x16_t __s2_738 = __p2_738; \
+  int32x2_t __rev0_738;  __rev0_738 = __builtin_shufflevector(__s0_738, __s0_738, 1, 0); \
+  int8x8_t __rev1_738;  __rev1_738 = __builtin_shufflevector(__s1_738, __s1_738, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x16_t __rev2_738;  __rev2_738 = __builtin_shufflevector(__s2_738, __s2_738, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x2_t __ret_738; \
+uint8x16_t __reint_738 = __rev2_738; \
+  __ret_738 = __noswap_vusdot_s32(__rev0_738, (uint8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_738, __p3_738)), __rev1_738); \
+  __ret_738 = __builtin_shufflevector(__ret_738, __ret_738, 1, 0); \
+  __ret_738; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
 __ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
   poly8x8_t __ret;
   __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
@@ -58721,6 +62573,58 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
+#define vusdotq_laneq_s32(__p0_739, __p1_739, __p2_739, __p3_739) __extension__ ({ \
+  int32x4_t __s0_739 = __p0_739; \
+  uint8x16_t __s1_739 = __p1_739; \
+  int8x16_t __s2_739 = __p2_739; \
+  int32x4_t __ret_739; \
+int8x16_t __reint_739 = __s2_739; \
+  __ret_739 = vusdotq_s32(__s0_739, __s1_739, (int8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_739, __p3_739))); \
+  __ret_739; \
+})
+#else
+#define vusdotq_laneq_s32(__p0_740, __p1_740, __p2_740, __p3_740) __extension__ ({ \
+  int32x4_t __s0_740 = __p0_740; \
+  uint8x16_t __s1_740 = __p1_740; \
+  int8x16_t __s2_740 = __p2_740; \
+  int32x4_t __rev0_740;  __rev0_740 = __builtin_shufflevector(__s0_740, __s0_740, 3, 2, 1, 0); \
+  uint8x16_t __rev1_740;  __rev1_740 = __builtin_shufflevector(__s1_740, __s1_740, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_740;  __rev2_740 = __builtin_shufflevector(__s2_740, __s2_740, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_740; \
+int8x16_t __reint_740 = __rev2_740; \
+  __ret_740 = __noswap_vusdotq_s32(__rev0_740, __rev1_740, (int8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_740, __p3_740))); \
+  __ret_740 = __builtin_shufflevector(__ret_740, __ret_740, 3, 2, 1, 0); \
+  __ret_740; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vusdot_laneq_s32(__p0_741, __p1_741, __p2_741, __p3_741) __extension__ ({ \
+  int32x2_t __s0_741 = __p0_741; \
+  uint8x8_t __s1_741 = __p1_741; \
+  int8x16_t __s2_741 = __p2_741; \
+  int32x2_t __ret_741; \
+int8x16_t __reint_741 = __s2_741; \
+  __ret_741 = vusdot_s32(__s0_741, __s1_741, (int8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_741, __p3_741))); \
+  __ret_741; \
+})
+#else
+#define vusdot_laneq_s32(__p0_742, __p1_742, __p2_742, __p3_742) __extension__ ({ \
+  int32x2_t __s0_742 = __p0_742; \
+  uint8x8_t __s1_742 = __p1_742; \
+  int8x16_t __s2_742 = __p2_742; \
+  int32x2_t __rev0_742;  __rev0_742 = __builtin_shufflevector(__s0_742, __s0_742, 1, 0); \
+  uint8x8_t __rev1_742;  __rev1_742 = __builtin_shufflevector(__s1_742, __s1_742, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int8x16_t __rev2_742;  __rev2_742 = __builtin_shufflevector(__s2_742, __s2_742, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x2_t __ret_742; \
+int8x16_t __reint_742 = __rev2_742; \
+  __ret_742 = __noswap_vusdot_s32(__rev0_742, __rev1_742, (int8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_742, __p3_742))); \
+  __ret_742 = __builtin_shufflevector(__ret_742, __ret_742, 1, 0); \
+  __ret_742; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
 __ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
   poly8x8_t __ret;
   __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
@@ -60770,60 +64674,60 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vget_lane_f16(__p0_248, __p1_248) __extension__ ({ \
-  float16x4_t __s0_248 = __p0_248; \
-  float16_t __ret_248; \
-float16x4_t __reint_248 = __s0_248; \
-int16_t __reint1_248 = vget_lane_s16(*(int16x4_t *) &__reint_248, __p1_248); \
-  __ret_248 = *(float16_t *) &__reint1_248; \
-  __ret_248; \
+#define vget_lane_f16(__p0_743, __p1_743) __extension__ ({ \
+  float16x4_t __s0_743 = __p0_743; \
+  float16_t __ret_743; \
+float16x4_t __reint_743 = __s0_743; \
+int16_t __reint1_743 = vget_lane_s16(*(int16x4_t *) &__reint_743, __p1_743); \
+  __ret_743 = *(float16_t *) &__reint1_743; \
+  __ret_743; \
 })
 #else
-#define vget_lane_f16(__p0_249, __p1_249) __extension__ ({ \
-  float16x4_t __s0_249 = __p0_249; \
-  float16x4_t __rev0_249;  __rev0_249 = __builtin_shufflevector(__s0_249, __s0_249, 3, 2, 1, 0); \
-  float16_t __ret_249; \
-float16x4_t __reint_249 = __rev0_249; \
-int16_t __reint1_249 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_249, __p1_249); \
-  __ret_249 = *(float16_t *) &__reint1_249; \
-  __ret_249; \
+#define vget_lane_f16(__p0_744, __p1_744) __extension__ ({ \
+  float16x4_t __s0_744 = __p0_744; \
+  float16x4_t __rev0_744;  __rev0_744 = __builtin_shufflevector(__s0_744, __s0_744, 3, 2, 1, 0); \
+  float16_t __ret_744; \
+float16x4_t __reint_744 = __rev0_744; \
+int16_t __reint1_744 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_744, __p1_744); \
+  __ret_744 = *(float16_t *) &__reint1_744; \
+  __ret_744; \
 })
-#define __noswap_vget_lane_f16(__p0_250, __p1_250) __extension__ ({ \
-  float16x4_t __s0_250 = __p0_250; \
-  float16_t __ret_250; \
-float16x4_t __reint_250 = __s0_250; \
-int16_t __reint1_250 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_250, __p1_250); \
-  __ret_250 = *(float16_t *) &__reint1_250; \
-  __ret_250; \
+#define __noswap_vget_lane_f16(__p0_745, __p1_745) __extension__ ({ \
+  float16x4_t __s0_745 = __p0_745; \
+  float16_t __ret_745; \
+float16x4_t __reint_745 = __s0_745; \
+int16_t __reint1_745 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_745, __p1_745); \
+  __ret_745 = *(float16_t *) &__reint1_745; \
+  __ret_745; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_f16(__p0_251, __p1_251) __extension__ ({ \
-  float16x8_t __s0_251 = __p0_251; \
-  float16_t __ret_251; \
-float16x8_t __reint_251 = __s0_251; \
-int16_t __reint1_251 = vgetq_lane_s16(*(int16x8_t *) &__reint_251, __p1_251); \
-  __ret_251 = *(float16_t *) &__reint1_251; \
-  __ret_251; \
+#define vgetq_lane_f16(__p0_746, __p1_746) __extension__ ({ \
+  float16x8_t __s0_746 = __p0_746; \
+  float16_t __ret_746; \
+float16x8_t __reint_746 = __s0_746; \
+int16_t __reint1_746 = vgetq_lane_s16(*(int16x8_t *) &__reint_746, __p1_746); \
+  __ret_746 = *(float16_t *) &__reint1_746; \
+  __ret_746; \
 })
 #else
-#define vgetq_lane_f16(__p0_252, __p1_252) __extension__ ({ \
-  float16x8_t __s0_252 = __p0_252; \
-  float16x8_t __rev0_252;  __rev0_252 = __builtin_shufflevector(__s0_252, __s0_252, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_252; \
-float16x8_t __reint_252 = __rev0_252; \
-int16_t __reint1_252 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_252, __p1_252); \
-  __ret_252 = *(float16_t *) &__reint1_252; \
-  __ret_252; \
+#define vgetq_lane_f16(__p0_747, __p1_747) __extension__ ({ \
+  float16x8_t __s0_747 = __p0_747; \
+  float16x8_t __rev0_747;  __rev0_747 = __builtin_shufflevector(__s0_747, __s0_747, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16_t __ret_747; \
+float16x8_t __reint_747 = __rev0_747; \
+int16_t __reint1_747 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_747, __p1_747); \
+  __ret_747 = *(float16_t *) &__reint1_747; \
+  __ret_747; \
 })
-#define __noswap_vgetq_lane_f16(__p0_253, __p1_253) __extension__ ({ \
-  float16x8_t __s0_253 = __p0_253; \
-  float16_t __ret_253; \
-float16x8_t __reint_253 = __s0_253; \
-int16_t __reint1_253 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_253, __p1_253); \
-  __ret_253 = *(float16_t *) &__reint1_253; \
-  __ret_253; \
+#define __noswap_vgetq_lane_f16(__p0_748, __p1_748) __extension__ ({ \
+  float16x8_t __s0_748 = __p0_748; \
+  float16_t __ret_748; \
+float16x8_t __reint_748 = __s0_748; \
+int16_t __reint1_748 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_748, __p1_748); \
+  __ret_748 = *(float16_t *) &__reint1_748; \
+  __ret_748; \
 })
 #endif
 
@@ -60966,98 +64870,98 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 + vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlal_lane_u32(__p0_749, __p1_749, __p2_749, __p3_749) __extension__ ({ \
+  uint64x2_t __s0_749 = __p0_749; \
+  uint32x2_t __s1_749 = __p1_749; \
+  uint32x2_t __s2_749 = __p2_749; \
+  uint64x2_t __ret_749; \
+  __ret_749 = __s0_749 + vmull_u32(__s1_749, splat_lane_u32(__s2_749, __p3_749)); \
+  __ret_749; \
 })
 #else
-#define vmlal_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlal_lane_u32(__p0_750, __p1_750, __p2_750, __p3_750) __extension__ ({ \
+  uint64x2_t __s0_750 = __p0_750; \
+  uint32x2_t __s1_750 = __p1_750; \
+  uint32x2_t __s2_750 = __p2_750; \
+  uint64x2_t __rev0_750;  __rev0_750 = __builtin_shufflevector(__s0_750, __s0_750, 1, 0); \
+  uint32x2_t __rev1_750;  __rev1_750 = __builtin_shufflevector(__s1_750, __s1_750, 1, 0); \
+  uint32x2_t __rev2_750;  __rev2_750 = __builtin_shufflevector(__s2_750, __s2_750, 1, 0); \
+  uint64x2_t __ret_750; \
+  __ret_750 = __rev0_750 + __noswap_vmull_u32(__rev1_750, __noswap_splat_lane_u32(__rev2_750, __p3_750)); \
+  __ret_750 = __builtin_shufflevector(__ret_750, __ret_750, 1, 0); \
+  __ret_750; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 + vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlal_lane_u16(__p0_751, __p1_751, __p2_751, __p3_751) __extension__ ({ \
+  uint32x4_t __s0_751 = __p0_751; \
+  uint16x4_t __s1_751 = __p1_751; \
+  uint16x4_t __s2_751 = __p2_751; \
+  uint32x4_t __ret_751; \
+  __ret_751 = __s0_751 + vmull_u16(__s1_751, splat_lane_u16(__s2_751, __p3_751)); \
+  __ret_751; \
 })
 #else
-#define vmlal_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlal_lane_u16(__p0_752, __p1_752, __p2_752, __p3_752) __extension__ ({ \
+  uint32x4_t __s0_752 = __p0_752; \
+  uint16x4_t __s1_752 = __p1_752; \
+  uint16x4_t __s2_752 = __p2_752; \
+  uint32x4_t __rev0_752;  __rev0_752 = __builtin_shufflevector(__s0_752, __s0_752, 3, 2, 1, 0); \
+  uint16x4_t __rev1_752;  __rev1_752 = __builtin_shufflevector(__s1_752, __s1_752, 3, 2, 1, 0); \
+  uint16x4_t __rev2_752;  __rev2_752 = __builtin_shufflevector(__s2_752, __s2_752, 3, 2, 1, 0); \
+  uint32x4_t __ret_752; \
+  __ret_752 = __rev0_752 + __noswap_vmull_u16(__rev1_752, __noswap_splat_lane_u16(__rev2_752, __p3_752)); \
+  __ret_752 = __builtin_shufflevector(__ret_752, __ret_752, 3, 2, 1, 0); \
+  __ret_752; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 + vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlal_lane_s32(__p0_753, __p1_753, __p2_753, __p3_753) __extension__ ({ \
+  int64x2_t __s0_753 = __p0_753; \
+  int32x2_t __s1_753 = __p1_753; \
+  int32x2_t __s2_753 = __p2_753; \
+  int64x2_t __ret_753; \
+  __ret_753 = __s0_753 + vmull_s32(__s1_753, splat_lane_s32(__s2_753, __p3_753)); \
+  __ret_753; \
 })
 #else
-#define vmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlal_lane_s32(__p0_754, __p1_754, __p2_754, __p3_754) __extension__ ({ \
+  int64x2_t __s0_754 = __p0_754; \
+  int32x2_t __s1_754 = __p1_754; \
+  int32x2_t __s2_754 = __p2_754; \
+  int64x2_t __rev0_754;  __rev0_754 = __builtin_shufflevector(__s0_754, __s0_754, 1, 0); \
+  int32x2_t __rev1_754;  __rev1_754 = __builtin_shufflevector(__s1_754, __s1_754, 1, 0); \
+  int32x2_t __rev2_754;  __rev2_754 = __builtin_shufflevector(__s2_754, __s2_754, 1, 0); \
+  int64x2_t __ret_754; \
+  __ret_754 = __rev0_754 + __noswap_vmull_s32(__rev1_754, __noswap_splat_lane_s32(__rev2_754, __p3_754)); \
+  __ret_754 = __builtin_shufflevector(__ret_754, __ret_754, 1, 0); \
+  __ret_754; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 + vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlal_lane_s16(__p0_755, __p1_755, __p2_755, __p3_755) __extension__ ({ \
+  int32x4_t __s0_755 = __p0_755; \
+  int16x4_t __s1_755 = __p1_755; \
+  int16x4_t __s2_755 = __p2_755; \
+  int32x4_t __ret_755; \
+  __ret_755 = __s0_755 + vmull_s16(__s1_755, splat_lane_s16(__s2_755, __p3_755)); \
+  __ret_755; \
 })
 #else
-#define vmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlal_lane_s16(__p0_756, __p1_756, __p2_756, __p3_756) __extension__ ({ \
+  int32x4_t __s0_756 = __p0_756; \
+  int16x4_t __s1_756 = __p1_756; \
+  int16x4_t __s2_756 = __p2_756; \
+  int32x4_t __rev0_756;  __rev0_756 = __builtin_shufflevector(__s0_756, __s0_756, 3, 2, 1, 0); \
+  int16x4_t __rev1_756;  __rev1_756 = __builtin_shufflevector(__s1_756, __s1_756, 3, 2, 1, 0); \
+  int16x4_t __rev2_756;  __rev2_756 = __builtin_shufflevector(__s2_756, __s2_756, 3, 2, 1, 0); \
+  int32x4_t __ret_756; \
+  __ret_756 = __rev0_756 + __noswap_vmull_s16(__rev1_756, __noswap_splat_lane_s16(__rev2_756, __p3_756)); \
+  __ret_756 = __builtin_shufflevector(__ret_756, __ret_756, 3, 2, 1, 0); \
+  __ret_756; \
 })
 #endif
 
@@ -61288,98 +65192,98 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 - vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlsl_lane_u32(__p0_757, __p1_757, __p2_757, __p3_757) __extension__ ({ \
+  uint64x2_t __s0_757 = __p0_757; \
+  uint32x2_t __s1_757 = __p1_757; \
+  uint32x2_t __s2_757 = __p2_757; \
+  uint64x2_t __ret_757; \
+  __ret_757 = __s0_757 - vmull_u32(__s1_757, splat_lane_u32(__s2_757, __p3_757)); \
+  __ret_757; \
 })
 #else
-#define vmlsl_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlsl_lane_u32(__p0_758, __p1_758, __p2_758, __p3_758) __extension__ ({ \
+  uint64x2_t __s0_758 = __p0_758; \
+  uint32x2_t __s1_758 = __p1_758; \
+  uint32x2_t __s2_758 = __p2_758; \
+  uint64x2_t __rev0_758;  __rev0_758 = __builtin_shufflevector(__s0_758, __s0_758, 1, 0); \
+  uint32x2_t __rev1_758;  __rev1_758 = __builtin_shufflevector(__s1_758, __s1_758, 1, 0); \
+  uint32x2_t __rev2_758;  __rev2_758 = __builtin_shufflevector(__s2_758, __s2_758, 1, 0); \
+  uint64x2_t __ret_758; \
+  __ret_758 = __rev0_758 - __noswap_vmull_u32(__rev1_758, __noswap_splat_lane_u32(__rev2_758, __p3_758)); \
+  __ret_758 = __builtin_shufflevector(__ret_758, __ret_758, 1, 0); \
+  __ret_758; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 - vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlsl_lane_u16(__p0_759, __p1_759, __p2_759, __p3_759) __extension__ ({ \
+  uint32x4_t __s0_759 = __p0_759; \
+  uint16x4_t __s1_759 = __p1_759; \
+  uint16x4_t __s2_759 = __p2_759; \
+  uint32x4_t __ret_759; \
+  __ret_759 = __s0_759 - vmull_u16(__s1_759, splat_lane_u16(__s2_759, __p3_759)); \
+  __ret_759; \
 })
 #else
-#define vmlsl_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsl_lane_u16(__p0_760, __p1_760, __p2_760, __p3_760) __extension__ ({ \
+  uint32x4_t __s0_760 = __p0_760; \
+  uint16x4_t __s1_760 = __p1_760; \
+  uint16x4_t __s2_760 = __p2_760; \
+  uint32x4_t __rev0_760;  __rev0_760 = __builtin_shufflevector(__s0_760, __s0_760, 3, 2, 1, 0); \
+  uint16x4_t __rev1_760;  __rev1_760 = __builtin_shufflevector(__s1_760, __s1_760, 3, 2, 1, 0); \
+  uint16x4_t __rev2_760;  __rev2_760 = __builtin_shufflevector(__s2_760, __s2_760, 3, 2, 1, 0); \
+  uint32x4_t __ret_760; \
+  __ret_760 = __rev0_760 - __noswap_vmull_u16(__rev1_760, __noswap_splat_lane_u16(__rev2_760, __p3_760)); \
+  __ret_760 = __builtin_shufflevector(__ret_760, __ret_760, 3, 2, 1, 0); \
+  __ret_760; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 - vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
+#define vmlsl_lane_s32(__p0_761, __p1_761, __p2_761, __p3_761) __extension__ ({ \
+  int64x2_t __s0_761 = __p0_761; \
+  int32x2_t __s1_761 = __p1_761; \
+  int32x2_t __s2_761 = __p2_761; \
+  int64x2_t __ret_761; \
+  __ret_761 = __s0_761 - vmull_s32(__s1_761, splat_lane_s32(__s2_761, __p3_761)); \
+  __ret_761; \
 })
 #else
-#define vmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
+#define vmlsl_lane_s32(__p0_762, __p1_762, __p2_762, __p3_762) __extension__ ({ \
+  int64x2_t __s0_762 = __p0_762; \
+  int32x2_t __s1_762 = __p1_762; \
+  int32x2_t __s2_762 = __p2_762; \
+  int64x2_t __rev0_762;  __rev0_762 = __builtin_shufflevector(__s0_762, __s0_762, 1, 0); \
+  int32x2_t __rev1_762;  __rev1_762 = __builtin_shufflevector(__s1_762, __s1_762, 1, 0); \
+  int32x2_t __rev2_762;  __rev2_762 = __builtin_shufflevector(__s2_762, __s2_762, 1, 0); \
+  int64x2_t __ret_762; \
+  __ret_762 = __rev0_762 - __noswap_vmull_s32(__rev1_762, __noswap_splat_lane_s32(__rev2_762, __p3_762)); \
+  __ret_762 = __builtin_shufflevector(__ret_762, __ret_762, 1, 0); \
+  __ret_762; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 - vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
+#define vmlsl_lane_s16(__p0_763, __p1_763, __p2_763, __p3_763) __extension__ ({ \
+  int32x4_t __s0_763 = __p0_763; \
+  int16x4_t __s1_763 = __p1_763; \
+  int16x4_t __s2_763 = __p2_763; \
+  int32x4_t __ret_763; \
+  __ret_763 = __s0_763 - vmull_s16(__s1_763, splat_lane_s16(__s2_763, __p3_763)); \
+  __ret_763; \
 })
 #else
-#define vmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
+#define vmlsl_lane_s16(__p0_764, __p1_764, __p2_764, __p3_764) __extension__ ({ \
+  int32x4_t __s0_764 = __p0_764; \
+  int16x4_t __s1_764 = __p1_764; \
+  int16x4_t __s2_764 = __p2_764; \
+  int32x4_t __rev0_764;  __rev0_764 = __builtin_shufflevector(__s0_764, __s0_764, 3, 2, 1, 0); \
+  int16x4_t __rev1_764;  __rev1_764 = __builtin_shufflevector(__s1_764, __s1_764, 3, 2, 1, 0); \
+  int16x4_t __rev2_764;  __rev2_764 = __builtin_shufflevector(__s2_764, __s2_764, 3, 2, 1, 0); \
+  int32x4_t __ret_764; \
+  __ret_764 = __rev0_764 - __noswap_vmull_s16(__rev1_764, __noswap_splat_lane_s16(__rev2_764, __p3_764)); \
+  __ret_764 = __builtin_shufflevector(__ret_764, __ret_764, 3, 2, 1, 0); \
+  __ret_764; \
 })
 #endif
 
@@ -61472,479 +65376,663 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vset_lane_f16(__p0_254, __p1_254, __p2_254) __extension__ ({ \
-  float16_t __s0_254 = __p0_254; \
-  float16x4_t __s1_254 = __p1_254; \
-  float16x4_t __ret_254; \
-float16_t __reint_254 = __s0_254; \
-float16x4_t __reint1_254 = __s1_254; \
-int16x4_t __reint2_254 = vset_lane_s16(*(int16_t *) &__reint_254, *(int16x4_t *) &__reint1_254, __p2_254); \
-  __ret_254 = *(float16x4_t *) &__reint2_254; \
-  __ret_254; \
+#define vset_lane_f16(__p0_765, __p1_765, __p2_765) __extension__ ({ \
+  float16_t __s0_765 = __p0_765; \
+  float16x4_t __s1_765 = __p1_765; \
+  float16x4_t __ret_765; \
+float16_t __reint_765 = __s0_765; \
+float16x4_t __reint1_765 = __s1_765; \
+int16x4_t __reint2_765 = vset_lane_s16(*(int16_t *) &__reint_765, *(int16x4_t *) &__reint1_765, __p2_765); \
+  __ret_765 = *(float16x4_t *) &__reint2_765; \
+  __ret_765; \
 })
 #else
-#define vset_lane_f16(__p0_255, __p1_255, __p2_255) __extension__ ({ \
-  float16_t __s0_255 = __p0_255; \
-  float16x4_t __s1_255 = __p1_255; \
-  float16x4_t __rev1_255;  __rev1_255 = __builtin_shufflevector(__s1_255, __s1_255, 3, 2, 1, 0); \
-  float16x4_t __ret_255; \
-float16_t __reint_255 = __s0_255; \
-float16x4_t __reint1_255 = __rev1_255; \
-int16x4_t __reint2_255 = __noswap_vset_lane_s16(*(int16_t *) &__reint_255, *(int16x4_t *) &__reint1_255, __p2_255); \
-  __ret_255 = *(float16x4_t *) &__reint2_255; \
-  __ret_255 = __builtin_shufflevector(__ret_255, __ret_255, 3, 2, 1, 0); \
-  __ret_255; \
+#define vset_lane_f16(__p0_766, __p1_766, __p2_766) __extension__ ({ \
+  float16_t __s0_766 = __p0_766; \
+  float16x4_t __s1_766 = __p1_766; \
+  float16x4_t __rev1_766;  __rev1_766 = __builtin_shufflevector(__s1_766, __s1_766, 3, 2, 1, 0); \
+  float16x4_t __ret_766; \
+float16_t __reint_766 = __s0_766; \
+float16x4_t __reint1_766 = __rev1_766; \
+int16x4_t __reint2_766 = __noswap_vset_lane_s16(*(int16_t *) &__reint_766, *(int16x4_t *) &__reint1_766, __p2_766); \
+  __ret_766 = *(float16x4_t *) &__reint2_766; \
+  __ret_766 = __builtin_shufflevector(__ret_766, __ret_766, 3, 2, 1, 0); \
+  __ret_766; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_f16(__p0_256, __p1_256, __p2_256) __extension__ ({ \
-  float16_t __s0_256 = __p0_256; \
-  float16x8_t __s1_256 = __p1_256; \
-  float16x8_t __ret_256; \
-float16_t __reint_256 = __s0_256; \
-float16x8_t __reint1_256 = __s1_256; \
-int16x8_t __reint2_256 = vsetq_lane_s16(*(int16_t *) &__reint_256, *(int16x8_t *) &__reint1_256, __p2_256); \
-  __ret_256 = *(float16x8_t *) &__reint2_256; \
-  __ret_256; \
+#define vsetq_lane_f16(__p0_767, __p1_767, __p2_767) __extension__ ({ \
+  float16_t __s0_767 = __p0_767; \
+  float16x8_t __s1_767 = __p1_767; \
+  float16x8_t __ret_767; \
+float16_t __reint_767 = __s0_767; \
+float16x8_t __reint1_767 = __s1_767; \
+int16x8_t __reint2_767 = vsetq_lane_s16(*(int16_t *) &__reint_767, *(int16x8_t *) &__reint1_767, __p2_767); \
+  __ret_767 = *(float16x8_t *) &__reint2_767; \
+  __ret_767; \
 })
 #else
-#define vsetq_lane_f16(__p0_257, __p1_257, __p2_257) __extension__ ({ \
-  float16_t __s0_257 = __p0_257; \
-  float16x8_t __s1_257 = __p1_257; \
-  float16x8_t __rev1_257;  __rev1_257 = __builtin_shufflevector(__s1_257, __s1_257, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_257; \
-float16_t __reint_257 = __s0_257; \
-float16x8_t __reint1_257 = __rev1_257; \
-int16x8_t __reint2_257 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_257, *(int16x8_t *) &__reint1_257, __p2_257); \
-  __ret_257 = *(float16x8_t *) &__reint2_257; \
-  __ret_257 = __builtin_shufflevector(__ret_257, __ret_257, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_257; \
+#define vsetq_lane_f16(__p0_768, __p1_768, __p2_768) __extension__ ({ \
+  float16_t __s0_768 = __p0_768; \
+  float16x8_t __s1_768 = __p1_768; \
+  float16x8_t __rev1_768;  __rev1_768 = __builtin_shufflevector(__s1_768, __s1_768, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __ret_768; \
+float16_t __reint_768 = __s0_768; \
+float16x8_t __reint1_768 = __rev1_768; \
+int16x8_t __reint2_768 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_768, *(int16x8_t *) &__reint1_768, __p2_768); \
+  __ret_768 = *(float16x8_t *) &__reint2_768; \
+  __ret_768 = __builtin_shufflevector(__ret_768, __ret_768, 7, 6, 5, 4, 3, 2, 1, 0); \
+  __ret_768; \
 })
 #endif
 
+#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)
+#ifdef __LITTLE_ENDIAN__
+#define vbfmlalbq_lane_f32(__p0_769, __p1_769, __p2_769, __p3_769) __extension__ ({ \
+  float32x4_t __s0_769 = __p0_769; \
+  bfloat16x8_t __s1_769 = __p1_769; \
+  bfloat16x4_t __s2_769 = __p2_769; \
+  float32x4_t __ret_769; \
+  __ret_769 = vbfmlalbq_f32(__s0_769, __s1_769, (bfloat16x8_t) {vget_lane_bf16(__s2_769, __p3_769), vget_lane_bf16(__s2_769, __p3_769), vget_lane_bf16(__s2_769, __p3_769), vget_lane_bf16(__s2_769, __p3_769), vget_lane_bf16(__s2_769, __p3_769), vget_lane_bf16(__s2_769, __p3_769), vget_lane_bf16(__s2_769, __p3_769), vget_lane_bf16(__s2_769, __p3_769)}); \
+  __ret_769; \
+})
+#else
+#define vbfmlalbq_lane_f32(__p0_770, __p1_770, __p2_770, __p3_770) __extension__ ({ \
+  float32x4_t __s0_770 = __p0_770; \
+  bfloat16x8_t __s1_770 = __p1_770; \
+  bfloat16x4_t __s2_770 = __p2_770; \
+  float32x4_t __rev0_770;  __rev0_770 = __builtin_shufflevector(__s0_770, __s0_770, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_770;  __rev1_770 = __builtin_shufflevector(__s1_770, __s1_770, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_770;  __rev2_770 = __builtin_shufflevector(__s2_770, __s2_770, 3, 2, 1, 0); \
+  float32x4_t __ret_770; \
+  __ret_770 = __noswap_vbfmlalbq_f32(__rev0_770, __rev1_770, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_770, __p3_770), __noswap_vget_lane_bf16(__rev2_770, __p3_770), __noswap_vget_lane_bf16(__rev2_770, __p3_770), __noswap_vget_lane_bf16(__rev2_770, __p3_770), __noswap_vget_lane_bf16(__rev2_770, __p3_770), __noswap_vget_lane_bf16(__rev2_770, __p3_770), __noswap_vget_lane_bf16(__rev2_770, __p3_770), __noswap_vget_lane_bf16(__rev2_770, __p3_770)}); \
+  __ret_770 = __builtin_shufflevector(__ret_770, __ret_770, 3, 2, 1, 0); \
+  __ret_770; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vbfmlalbq_laneq_f32(__p0_771, __p1_771, __p2_771, __p3_771) __extension__ ({ \
+  float32x4_t __s0_771 = __p0_771; \
+  bfloat16x8_t __s1_771 = __p1_771; \
+  bfloat16x8_t __s2_771 = __p2_771; \
+  float32x4_t __ret_771; \
+  __ret_771 = vbfmlalbq_f32(__s0_771, __s1_771, (bfloat16x8_t) {vgetq_lane_bf16(__s2_771, __p3_771), vgetq_lane_bf16(__s2_771, __p3_771), vgetq_lane_bf16(__s2_771, __p3_771), vgetq_lane_bf16(__s2_771, __p3_771), vgetq_lane_bf16(__s2_771, __p3_771), vgetq_lane_bf16(__s2_771, __p3_771), vgetq_lane_bf16(__s2_771, __p3_771), vgetq_lane_bf16(__s2_771, __p3_771)}); \
+  __ret_771; \
+})
+#else
+#define vbfmlalbq_laneq_f32(__p0_772, __p1_772, __p2_772, __p3_772) __extension__ ({ \
+  float32x4_t __s0_772 = __p0_772; \
+  bfloat16x8_t __s1_772 = __p1_772; \
+  bfloat16x8_t __s2_772 = __p2_772; \
+  float32x4_t __rev0_772;  __rev0_772 = __builtin_shufflevector(__s0_772, __s0_772, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_772;  __rev1_772 = __builtin_shufflevector(__s1_772, __s1_772, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_772;  __rev2_772 = __builtin_shufflevector(__s2_772, __s2_772, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x4_t __ret_772; \
+  __ret_772 = __noswap_vbfmlalbq_f32(__rev0_772, __rev1_772, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_772, __p3_772), __noswap_vgetq_lane_bf16(__rev2_772, __p3_772), __noswap_vgetq_lane_bf16(__rev2_772, __p3_772), __noswap_vgetq_lane_bf16(__rev2_772, __p3_772), __noswap_vgetq_lane_bf16(__rev2_772, __p3_772), __noswap_vgetq_lane_bf16(__rev2_772, __p3_772), __noswap_vgetq_lane_bf16(__rev2_772, __p3_772), __noswap_vgetq_lane_bf16(__rev2_772, __p3_772)}); \
+  __ret_772 = __builtin_shufflevector(__ret_772, __ret_772, 3, 2, 1, 0); \
+  __ret_772; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vbfmlaltq_lane_f32(__p0_773, __p1_773, __p2_773, __p3_773) __extension__ ({ \
+  float32x4_t __s0_773 = __p0_773; \
+  bfloat16x8_t __s1_773 = __p1_773; \
+  bfloat16x4_t __s2_773 = __p2_773; \
+  float32x4_t __ret_773; \
+  __ret_773 = vbfmlaltq_f32(__s0_773, __s1_773, (bfloat16x8_t) {vget_lane_bf16(__s2_773, __p3_773), vget_lane_bf16(__s2_773, __p3_773), vget_lane_bf16(__s2_773, __p3_773), vget_lane_bf16(__s2_773, __p3_773), vget_lane_bf16(__s2_773, __p3_773), vget_lane_bf16(__s2_773, __p3_773), vget_lane_bf16(__s2_773, __p3_773), vget_lane_bf16(__s2_773, __p3_773)}); \
+  __ret_773; \
+})
+#else
+#define vbfmlaltq_lane_f32(__p0_774, __p1_774, __p2_774, __p3_774) __extension__ ({ \
+  float32x4_t __s0_774 = __p0_774; \
+  bfloat16x8_t __s1_774 = __p1_774; \
+  bfloat16x4_t __s2_774 = __p2_774; \
+  float32x4_t __rev0_774;  __rev0_774 = __builtin_shufflevector(__s0_774, __s0_774, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_774;  __rev1_774 = __builtin_shufflevector(__s1_774, __s1_774, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x4_t __rev2_774;  __rev2_774 = __builtin_shufflevector(__s2_774, __s2_774, 3, 2, 1, 0); \
+  float32x4_t __ret_774; \
+  __ret_774 = __noswap_vbfmlaltq_f32(__rev0_774, __rev1_774, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_774, __p3_774), __noswap_vget_lane_bf16(__rev2_774, __p3_774), __noswap_vget_lane_bf16(__rev2_774, __p3_774), __noswap_vget_lane_bf16(__rev2_774, __p3_774), __noswap_vget_lane_bf16(__rev2_774, __p3_774), __noswap_vget_lane_bf16(__rev2_774, __p3_774), __noswap_vget_lane_bf16(__rev2_774, __p3_774), __noswap_vget_lane_bf16(__rev2_774, __p3_774)}); \
+  __ret_774 = __builtin_shufflevector(__ret_774, __ret_774, 3, 2, 1, 0); \
+  __ret_774; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vbfmlaltq_laneq_f32(__p0_775, __p1_775, __p2_775, __p3_775) __extension__ ({ \
+  float32x4_t __s0_775 = __p0_775; \
+  bfloat16x8_t __s1_775 = __p1_775; \
+  bfloat16x8_t __s2_775 = __p2_775; \
+  float32x4_t __ret_775; \
+  __ret_775 = vbfmlaltq_f32(__s0_775, __s1_775, (bfloat16x8_t) {vgetq_lane_bf16(__s2_775, __p3_775), vgetq_lane_bf16(__s2_775, __p3_775), vgetq_lane_bf16(__s2_775, __p3_775), vgetq_lane_bf16(__s2_775, __p3_775), vgetq_lane_bf16(__s2_775, __p3_775), vgetq_lane_bf16(__s2_775, __p3_775), vgetq_lane_bf16(__s2_775, __p3_775), vgetq_lane_bf16(__s2_775, __p3_775)}); \
+  __ret_775; \
+})
+#else
+#define vbfmlaltq_laneq_f32(__p0_776, __p1_776, __p2_776, __p3_776) __extension__ ({ \
+  float32x4_t __s0_776 = __p0_776; \
+  bfloat16x8_t __s1_776 = __p1_776; \
+  bfloat16x8_t __s2_776 = __p2_776; \
+  float32x4_t __rev0_776;  __rev0_776 = __builtin_shufflevector(__s0_776, __s0_776, 3, 2, 1, 0); \
+  bfloat16x8_t __rev1_776;  __rev1_776 = __builtin_shufflevector(__s1_776, __s1_776, 7, 6, 5, 4, 3, 2, 1, 0); \
+  bfloat16x8_t __rev2_776;  __rev2_776 = __builtin_shufflevector(__s2_776, __s2_776, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x4_t __ret_776; \
+  __ret_776 = __noswap_vbfmlaltq_f32(__rev0_776, __rev1_776, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_776, __p3_776), __noswap_vgetq_lane_bf16(__rev2_776, __p3_776), __noswap_vgetq_lane_bf16(__rev2_776, __p3_776), __noswap_vgetq_lane_bf16(__rev2_776, __p3_776), __noswap_vgetq_lane_bf16(__rev2_776, __p3_776), __noswap_vgetq_lane_bf16(__rev2_776, __p3_776), __noswap_vgetq_lane_bf16(__rev2_776, __p3_776), __noswap_vgetq_lane_bf16(__rev2_776, __p3_776)}); \
+  __ret_776 = __builtin_shufflevector(__ret_776, __ret_776, 3, 2, 1, 0); \
+  __ret_776; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) {
+  float32x4_t __ret;
+  __ret = vcvt_f32_bf16(vget_high_bf16(__p0));
+  return __ret;
+}
+#else
+__ai float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) {
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float32x4_t __ret;
+  __ret = __noswap_vcvt_f32_bf16(__noswap_vget_high_bf16(__rev0));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) {
+  float32x4_t __ret;
+  __ret = vcvt_f32_bf16(vget_low_bf16(__p0));
+  return __ret;
+}
+#else
+__ai float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) {
+  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+  float32x4_t __ret;
+  __ret = __noswap_vcvt_f32_bf16(__noswap_vget_low_bf16(__rev0));
+  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+  return __ret;
+}
+#endif
+
+#endif
 #if defined(__ARM_FEATURE_FP16FML) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
-#define vfmlalq_lane_high_f16(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \
-  float32x4_t __s0_258 = __p0_258; \
-  float16x8_t __s1_258 = __p1_258; \
-  float16x4_t __s2_258 = __p2_258; \
-  float32x4_t __ret_258; \
-  __ret_258 = vfmlalq_high_f16(__s0_258, __s1_258, (float16x8_t) {vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258)}); \
-  __ret_258; \
+#define vfmlalq_lane_high_f16(__p0_777, __p1_777, __p2_777, __p3_777) __extension__ ({ \
+  float32x4_t __s0_777 = __p0_777; \
+  float16x8_t __s1_777 = __p1_777; \
+  float16x4_t __s2_777 = __p2_777; \
+  float32x4_t __ret_777; \
+  __ret_777 = vfmlalq_high_f16(__s0_777, __s1_777, (float16x8_t) {vget_lane_f16(__s2_777, __p3_777), vget_lane_f16(__s2_777, __p3_777), vget_lane_f16(__s2_777, __p3_777), vget_lane_f16(__s2_777, __p3_777), vget_lane_f16(__s2_777, __p3_777), vget_lane_f16(__s2_777, __p3_777), vget_lane_f16(__s2_777, __p3_777), vget_lane_f16(__s2_777, __p3_777)}); \
+  __ret_777; \
 })
 #else
-#define vfmlalq_lane_high_f16(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \
-  float32x4_t __s0_259 = __p0_259; \
-  float16x8_t __s1_259 = __p1_259; \
-  float16x4_t __s2_259 = __p2_259; \
-  float32x4_t __rev0_259;  __rev0_259 = __builtin_shufflevector(__s0_259, __s0_259, 3, 2, 1, 0); \
-  float16x8_t __rev1_259;  __rev1_259 = __builtin_shufflevector(__s1_259, __s1_259, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_259;  __rev2_259 = __builtin_shufflevector(__s2_259, __s2_259, 3, 2, 1, 0); \
-  float32x4_t __ret_259; \
-  __ret_259 = __noswap_vfmlalq_high_f16(__rev0_259, __rev1_259, (float16x8_t) {__noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259)}); \
-  __ret_259 = __builtin_shufflevector(__ret_259, __ret_259, 3, 2, 1, 0); \
-  __ret_259; \
+#define vfmlalq_lane_high_f16(__p0_778, __p1_778, __p2_778, __p3_778) __extension__ ({ \
+  float32x4_t __s0_778 = __p0_778; \
+  float16x8_t __s1_778 = __p1_778; \
+  float16x4_t __s2_778 = __p2_778; \
+  float32x4_t __rev0_778;  __rev0_778 = __builtin_shufflevector(__s0_778, __s0_778, 3, 2, 1, 0); \
+  float16x8_t __rev1_778;  __rev1_778 = __builtin_shufflevector(__s1_778, __s1_778, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_778;  __rev2_778 = __builtin_shufflevector(__s2_778, __s2_778, 3, 2, 1, 0); \
+  float32x4_t __ret_778; \
+  __ret_778 = __noswap_vfmlalq_high_f16(__rev0_778, __rev1_778, (float16x8_t) {__noswap_vget_lane_f16(__rev2_778, __p3_778), __noswap_vget_lane_f16(__rev2_778, __p3_778), __noswap_vget_lane_f16(__rev2_778, __p3_778), __noswap_vget_lane_f16(__rev2_778, __p3_778), __noswap_vget_lane_f16(__rev2_778, __p3_778), __noswap_vget_lane_f16(__rev2_778, __p3_778), __noswap_vget_lane_f16(__rev2_778, __p3_778), __noswap_vget_lane_f16(__rev2_778, __p3_778)}); \
+  __ret_778 = __builtin_shufflevector(__ret_778, __ret_778, 3, 2, 1, 0); \
+  __ret_778; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlal_lane_high_f16(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \
-  float32x2_t __s0_260 = __p0_260; \
-  float16x4_t __s1_260 = __p1_260; \
-  float16x4_t __s2_260 = __p2_260; \
-  float32x2_t __ret_260; \
-  __ret_260 = vfmlal_high_f16(__s0_260, __s1_260, (float16x4_t) {vget_lane_f16(__s2_260, __p3_260), vget_lane_f16(__s2_260, __p3_260), vget_lane_f16(__s2_260, __p3_260), vget_lane_f16(__s2_260, __p3_260)}); \
-  __ret_260; \
+#define vfmlal_lane_high_f16(__p0_779, __p1_779, __p2_779, __p3_779) __extension__ ({ \
+  float32x2_t __s0_779 = __p0_779; \
+  float16x4_t __s1_779 = __p1_779; \
+  float16x4_t __s2_779 = __p2_779; \
+  float32x2_t __ret_779; \
+  __ret_779 = vfmlal_high_f16(__s0_779, __s1_779, (float16x4_t) {vget_lane_f16(__s2_779, __p3_779), vget_lane_f16(__s2_779, __p3_779), vget_lane_f16(__s2_779, __p3_779), vget_lane_f16(__s2_779, __p3_779)}); \
+  __ret_779; \
 })
 #else
-#define vfmlal_lane_high_f16(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \
-  float32x2_t __s0_261 = __p0_261; \
-  float16x4_t __s1_261 = __p1_261; \
-  float16x4_t __s2_261 = __p2_261; \
-  float32x2_t __rev0_261;  __rev0_261 = __builtin_shufflevector(__s0_261, __s0_261, 1, 0); \
-  float16x4_t __rev1_261;  __rev1_261 = __builtin_shufflevector(__s1_261, __s1_261, 3, 2, 1, 0); \
-  float16x4_t __rev2_261;  __rev2_261 = __builtin_shufflevector(__s2_261, __s2_261, 3, 2, 1, 0); \
-  float32x2_t __ret_261; \
-  __ret_261 = __noswap_vfmlal_high_f16(__rev0_261, __rev1_261, (float16x4_t) {__noswap_vget_lane_f16(__rev2_261, __p3_261), __noswap_vget_lane_f16(__rev2_261, __p3_261), __noswap_vget_lane_f16(__rev2_261, __p3_261), __noswap_vget_lane_f16(__rev2_261, __p3_261)}); \
-  __ret_261 = __builtin_shufflevector(__ret_261, __ret_261, 1, 0); \
-  __ret_261; \
+#define vfmlal_lane_high_f16(__p0_780, __p1_780, __p2_780, __p3_780) __extension__ ({ \
+  float32x2_t __s0_780 = __p0_780; \
+  float16x4_t __s1_780 = __p1_780; \
+  float16x4_t __s2_780 = __p2_780; \
+  float32x2_t __rev0_780;  __rev0_780 = __builtin_shufflevector(__s0_780, __s0_780, 1, 0); \
+  float16x4_t __rev1_780;  __rev1_780 = __builtin_shufflevector(__s1_780, __s1_780, 3, 2, 1, 0); \
+  float16x4_t __rev2_780;  __rev2_780 = __builtin_shufflevector(__s2_780, __s2_780, 3, 2, 1, 0); \
+  float32x2_t __ret_780; \
+  __ret_780 = __noswap_vfmlal_high_f16(__rev0_780, __rev1_780, (float16x4_t) {__noswap_vget_lane_f16(__rev2_780, __p3_780), __noswap_vget_lane_f16(__rev2_780, __p3_780), __noswap_vget_lane_f16(__rev2_780, __p3_780), __noswap_vget_lane_f16(__rev2_780, __p3_780)}); \
+  __ret_780 = __builtin_shufflevector(__ret_780, __ret_780, 1, 0); \
+  __ret_780; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlalq_lane_low_f16(__p0_262, __p1_262, __p2_262, __p3_262) __extension__ ({ \
-  float32x4_t __s0_262 = __p0_262; \
-  float16x8_t __s1_262 = __p1_262; \
-  float16x4_t __s2_262 = __p2_262; \
-  float32x4_t __ret_262; \
-  __ret_262 = vfmlalq_low_f16(__s0_262, __s1_262, (float16x8_t) {vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262)}); \
-  __ret_262; \
+#define vfmlalq_lane_low_f16(__p0_781, __p1_781, __p2_781, __p3_781) __extension__ ({ \
+  float32x4_t __s0_781 = __p0_781; \
+  float16x8_t __s1_781 = __p1_781; \
+  float16x4_t __s2_781 = __p2_781; \
+  float32x4_t __ret_781; \
+  __ret_781 = vfmlalq_low_f16(__s0_781, __s1_781, (float16x8_t) {vget_lane_f16(__s2_781, __p3_781), vget_lane_f16(__s2_781, __p3_781), vget_lane_f16(__s2_781, __p3_781), vget_lane_f16(__s2_781, __p3_781), vget_lane_f16(__s2_781, __p3_781), vget_lane_f16(__s2_781, __p3_781), vget_lane_f16(__s2_781, __p3_781), vget_lane_f16(__s2_781, __p3_781)}); \
+  __ret_781; \
 })
 #else
-#define vfmlalq_lane_low_f16(__p0_263, __p1_263, __p2_263, __p3_263) __extension__ ({ \
-  float32x4_t __s0_263 = __p0_263; \
-  float16x8_t __s1_263 = __p1_263; \
-  float16x4_t __s2_263 = __p2_263; \
-  float32x4_t __rev0_263;  __rev0_263 = __builtin_shufflevector(__s0_263, __s0_263, 3, 2, 1, 0); \
-  float16x8_t __rev1_263;  __rev1_263 = __builtin_shufflevector(__s1_263, __s1_263, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_263;  __rev2_263 = __builtin_shufflevector(__s2_263, __s2_263, 3, 2, 1, 0); \
-  float32x4_t __ret_263; \
-  __ret_263 = __noswap_vfmlalq_low_f16(__rev0_263, __rev1_263, (float16x8_t) {__noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263)}); \
-  __ret_263 = __builtin_shufflevector(__ret_263, __ret_263, 3, 2, 1, 0); \
-  __ret_263; \
+#define vfmlalq_lane_low_f16(__p0_782, __p1_782, __p2_782, __p3_782) __extension__ ({ \
+  float32x4_t __s0_782 = __p0_782; \
+  float16x8_t __s1_782 = __p1_782; \
+  float16x4_t __s2_782 = __p2_782; \
+  float32x4_t __rev0_782;  __rev0_782 = __builtin_shufflevector(__s0_782, __s0_782, 3, 2, 1, 0); \
+  float16x8_t __rev1_782;  __rev1_782 = __builtin_shufflevector(__s1_782, __s1_782, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_782;  __rev2_782 = __builtin_shufflevector(__s2_782, __s2_782, 3, 2, 1, 0); \
+  float32x4_t __ret_782; \
+  __ret_782 = __noswap_vfmlalq_low_f16(__rev0_782, __rev1_782, (float16x8_t) {__noswap_vget_lane_f16(__rev2_782, __p3_782), __noswap_vget_lane_f16(__rev2_782, __p3_782), __noswap_vget_lane_f16(__rev2_782, __p3_782), __noswap_vget_lane_f16(__rev2_782, __p3_782), __noswap_vget_lane_f16(__rev2_782, __p3_782), __noswap_vget_lane_f16(__rev2_782, __p3_782), __noswap_vget_lane_f16(__rev2_782, __p3_782), __noswap_vget_lane_f16(__rev2_782, __p3_782)}); \
+  __ret_782 = __builtin_shufflevector(__ret_782, __ret_782, 3, 2, 1, 0); \
+  __ret_782; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlal_lane_low_f16(__p0_264, __p1_264, __p2_264, __p3_264) __extension__ ({ \
-  float32x2_t __s0_264 = __p0_264; \
-  float16x4_t __s1_264 = __p1_264; \
-  float16x4_t __s2_264 = __p2_264; \
-  float32x2_t __ret_264; \
-  __ret_264 = vfmlal_low_f16(__s0_264, __s1_264, (float16x4_t) {vget_lane_f16(__s2_264, __p3_264), vget_lane_f16(__s2_264, __p3_264), vget_lane_f16(__s2_264, __p3_264), vget_lane_f16(__s2_264, __p3_264)}); \
-  __ret_264; \
+#define vfmlal_lane_low_f16(__p0_783, __p1_783, __p2_783, __p3_783) __extension__ ({ \
+  float32x2_t __s0_783 = __p0_783; \
+  float16x4_t __s1_783 = __p1_783; \
+  float16x4_t __s2_783 = __p2_783; \
+  float32x2_t __ret_783; \
+  __ret_783 = vfmlal_low_f16(__s0_783, __s1_783, (float16x4_t) {vget_lane_f16(__s2_783, __p3_783), vget_lane_f16(__s2_783, __p3_783), vget_lane_f16(__s2_783, __p3_783), vget_lane_f16(__s2_783, __p3_783)}); \
+  __ret_783; \
 })
 #else
-#define vfmlal_lane_low_f16(__p0_265, __p1_265, __p2_265, __p3_265) __extension__ ({ \
-  float32x2_t __s0_265 = __p0_265; \
-  float16x4_t __s1_265 = __p1_265; \
-  float16x4_t __s2_265 = __p2_265; \
-  float32x2_t __rev0_265;  __rev0_265 = __builtin_shufflevector(__s0_265, __s0_265, 1, 0); \
-  float16x4_t __rev1_265;  __rev1_265 = __builtin_shufflevector(__s1_265, __s1_265, 3, 2, 1, 0); \
-  float16x4_t __rev2_265;  __rev2_265 = __builtin_shufflevector(__s2_265, __s2_265, 3, 2, 1, 0); \
-  float32x2_t __ret_265; \
-  __ret_265 = __noswap_vfmlal_low_f16(__rev0_265, __rev1_265, (float16x4_t) {__noswap_vget_lane_f16(__rev2_265, __p3_265), __noswap_vget_lane_f16(__rev2_265, __p3_265), __noswap_vget_lane_f16(__rev2_265, __p3_265), __noswap_vget_lane_f16(__rev2_265, __p3_265)}); \
-  __ret_265 = __builtin_shufflevector(__ret_265, __ret_265, 1, 0); \
-  __ret_265; \
+#define vfmlal_lane_low_f16(__p0_784, __p1_784, __p2_784, __p3_784) __extension__ ({ \
+  float32x2_t __s0_784 = __p0_784; \
+  float16x4_t __s1_784 = __p1_784; \
+  float16x4_t __s2_784 = __p2_784; \
+  float32x2_t __rev0_784;  __rev0_784 = __builtin_shufflevector(__s0_784, __s0_784, 1, 0); \
+  float16x4_t __rev1_784;  __rev1_784 = __builtin_shufflevector(__s1_784, __s1_784, 3, 2, 1, 0); \
+  float16x4_t __rev2_784;  __rev2_784 = __builtin_shufflevector(__s2_784, __s2_784, 3, 2, 1, 0); \
+  float32x2_t __ret_784; \
+  __ret_784 = __noswap_vfmlal_low_f16(__rev0_784, __rev1_784, (float16x4_t) {__noswap_vget_lane_f16(__rev2_784, __p3_784), __noswap_vget_lane_f16(__rev2_784, __p3_784), __noswap_vget_lane_f16(__rev2_784, __p3_784), __noswap_vget_lane_f16(__rev2_784, __p3_784)}); \
+  __ret_784 = __builtin_shufflevector(__ret_784, __ret_784, 1, 0); \
+  __ret_784; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlalq_laneq_high_f16(__p0_266, __p1_266, __p2_266, __p3_266) __extension__ ({ \
-  float32x4_t __s0_266 = __p0_266; \
-  float16x8_t __s1_266 = __p1_266; \
-  float16x8_t __s2_266 = __p2_266; \
-  float32x4_t __ret_266; \
-  __ret_266 = vfmlalq_high_f16(__s0_266, __s1_266, (float16x8_t) {vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266)}); \
-  __ret_266; \
+#define vfmlalq_laneq_high_f16(__p0_785, __p1_785, __p2_785, __p3_785) __extension__ ({ \
+  float32x4_t __s0_785 = __p0_785; \
+  float16x8_t __s1_785 = __p1_785; \
+  float16x8_t __s2_785 = __p2_785; \
+  float32x4_t __ret_785; \
+  __ret_785 = vfmlalq_high_f16(__s0_785, __s1_785, (float16x8_t) {vgetq_lane_f16(__s2_785, __p3_785), vgetq_lane_f16(__s2_785, __p3_785), vgetq_lane_f16(__s2_785, __p3_785), vgetq_lane_f16(__s2_785, __p3_785), vgetq_lane_f16(__s2_785, __p3_785), vgetq_lane_f16(__s2_785, __p3_785), vgetq_lane_f16(__s2_785, __p3_785), vgetq_lane_f16(__s2_785, __p3_785)}); \
+  __ret_785; \
 })
 #else
-#define vfmlalq_laneq_high_f16(__p0_267, __p1_267, __p2_267, __p3_267) __extension__ ({ \
-  float32x4_t __s0_267 = __p0_267; \
-  float16x8_t __s1_267 = __p1_267; \
-  float16x8_t __s2_267 = __p2_267; \
-  float32x4_t __rev0_267;  __rev0_267 = __builtin_shufflevector(__s0_267, __s0_267, 3, 2, 1, 0); \
-  float16x8_t __rev1_267;  __rev1_267 = __builtin_shufflevector(__s1_267, __s1_267, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_267;  __rev2_267 = __builtin_shufflevector(__s2_267, __s2_267, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_267; \
-  __ret_267 = __noswap_vfmlalq_high_f16(__rev0_267, __rev1_267, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267)}); \
-  __ret_267 = __builtin_shufflevector(__ret_267, __ret_267, 3, 2, 1, 0); \
-  __ret_267; \
+#define vfmlalq_laneq_high_f16(__p0_786, __p1_786, __p2_786, __p3_786) __extension__ ({ \
+  float32x4_t __s0_786 = __p0_786; \
+  float16x8_t __s1_786 = __p1_786; \
+  float16x8_t __s2_786 = __p2_786; \
+  float32x4_t __rev0_786;  __rev0_786 = __builtin_shufflevector(__s0_786, __s0_786, 3, 2, 1, 0); \
+  float16x8_t __rev1_786;  __rev1_786 = __builtin_shufflevector(__s1_786, __s1_786, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_786;  __rev2_786 = __builtin_shufflevector(__s2_786, __s2_786, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x4_t __ret_786; \
+  __ret_786 = __noswap_vfmlalq_high_f16(__rev0_786, __rev1_786, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_786, __p3_786), __noswap_vgetq_lane_f16(__rev2_786, __p3_786), __noswap_vgetq_lane_f16(__rev2_786, __p3_786), __noswap_vgetq_lane_f16(__rev2_786, __p3_786), __noswap_vgetq_lane_f16(__rev2_786, __p3_786), __noswap_vgetq_lane_f16(__rev2_786, __p3_786), __noswap_vgetq_lane_f16(__rev2_786, __p3_786), __noswap_vgetq_lane_f16(__rev2_786, __p3_786)}); \
+  __ret_786 = __builtin_shufflevector(__ret_786, __ret_786, 3, 2, 1, 0); \
+  __ret_786; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlal_laneq_high_f16(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \
-  float32x2_t __s0_268 = __p0_268; \
-  float16x4_t __s1_268 = __p1_268; \
-  float16x8_t __s2_268 = __p2_268; \
-  float32x2_t __ret_268; \
-  __ret_268 = vfmlal_high_f16(__s0_268, __s1_268, (float16x4_t) {vgetq_lane_f16(__s2_268, __p3_268), vgetq_lane_f16(__s2_268, __p3_268), vgetq_lane_f16(__s2_268, __p3_268), vgetq_lane_f16(__s2_268, __p3_268)}); \
-  __ret_268; \
+#define vfmlal_laneq_high_f16(__p0_787, __p1_787, __p2_787, __p3_787) __extension__ ({ \
+  float32x2_t __s0_787 = __p0_787; \
+  float16x4_t __s1_787 = __p1_787; \
+  float16x8_t __s2_787 = __p2_787; \
+  float32x2_t __ret_787; \
+  __ret_787 = vfmlal_high_f16(__s0_787, __s1_787, (float16x4_t) {vgetq_lane_f16(__s2_787, __p3_787), vgetq_lane_f16(__s2_787, __p3_787), vgetq_lane_f16(__s2_787, __p3_787), vgetq_lane_f16(__s2_787, __p3_787)}); \
+  __ret_787; \
 })
 #else
-#define vfmlal_laneq_high_f16(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \
-  float32x2_t __s0_269 = __p0_269; \
-  float16x4_t __s1_269 = __p1_269; \
-  float16x8_t __s2_269 = __p2_269; \
-  float32x2_t __rev0_269;  __rev0_269 = __builtin_shufflevector(__s0_269, __s0_269, 1, 0); \
-  float16x4_t __rev1_269;  __rev1_269 = __builtin_shufflevector(__s1_269, __s1_269, 3, 2, 1, 0); \
-  float16x8_t __rev2_269;  __rev2_269 = __builtin_shufflevector(__s2_269, __s2_269, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_269; \
-  __ret_269 = __noswap_vfmlal_high_f16(__rev0_269, __rev1_269, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_269, __p3_269), __noswap_vgetq_lane_f16(__rev2_269, __p3_269), __noswap_vgetq_lane_f16(__rev2_269, __p3_269), __noswap_vgetq_lane_f16(__rev2_269, __p3_269)}); \
-  __ret_269 = __builtin_shufflevector(__ret_269, __ret_269, 1, 0); \
-  __ret_269; \
+#define vfmlal_laneq_high_f16(__p0_788, __p1_788, __p2_788, __p3_788) __extension__ ({ \
+  float32x2_t __s0_788 = __p0_788; \
+  float16x4_t __s1_788 = __p1_788; \
+  float16x8_t __s2_788 = __p2_788; \
+  float32x2_t __rev0_788;  __rev0_788 = __builtin_shufflevector(__s0_788, __s0_788, 1, 0); \
+  float16x4_t __rev1_788;  __rev1_788 = __builtin_shufflevector(__s1_788, __s1_788, 3, 2, 1, 0); \
+  float16x8_t __rev2_788;  __rev2_788 = __builtin_shufflevector(__s2_788, __s2_788, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x2_t __ret_788; \
+  __ret_788 = __noswap_vfmlal_high_f16(__rev0_788, __rev1_788, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_788, __p3_788), __noswap_vgetq_lane_f16(__rev2_788, __p3_788), __noswap_vgetq_lane_f16(__rev2_788, __p3_788), __noswap_vgetq_lane_f16(__rev2_788, __p3_788)}); \
+  __ret_788 = __builtin_shufflevector(__ret_788, __ret_788, 1, 0); \
+  __ret_788; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlalq_laneq_low_f16(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \
-  float32x4_t __s0_270 = __p0_270; \
-  float16x8_t __s1_270 = __p1_270; \
-  float16x8_t __s2_270 = __p2_270; \
-  float32x4_t __ret_270; \
-  __ret_270 = vfmlalq_low_f16(__s0_270, __s1_270, (float16x8_t) {vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270)}); \
-  __ret_270; \
+#define vfmlalq_laneq_low_f16(__p0_789, __p1_789, __p2_789, __p3_789) __extension__ ({ \
+  float32x4_t __s0_789 = __p0_789; \
+  float16x8_t __s1_789 = __p1_789; \
+  float16x8_t __s2_789 = __p2_789; \
+  float32x4_t __ret_789; \
+  __ret_789 = vfmlalq_low_f16(__s0_789, __s1_789, (float16x8_t) {vgetq_lane_f16(__s2_789, __p3_789), vgetq_lane_f16(__s2_789, __p3_789), vgetq_lane_f16(__s2_789, __p3_789), vgetq_lane_f16(__s2_789, __p3_789), vgetq_lane_f16(__s2_789, __p3_789), vgetq_lane_f16(__s2_789, __p3_789), vgetq_lane_f16(__s2_789, __p3_789), vgetq_lane_f16(__s2_789, __p3_789)}); \
+  __ret_789; \
 })
 #else
-#define vfmlalq_laneq_low_f16(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \
-  float32x4_t __s0_271 = __p0_271; \
-  float16x8_t __s1_271 = __p1_271; \
-  float16x8_t __s2_271 = __p2_271; \
-  float32x4_t __rev0_271;  __rev0_271 = __builtin_shufflevector(__s0_271, __s0_271, 3, 2, 1, 0); \
-  float16x8_t __rev1_271;  __rev1_271 = __builtin_shufflevector(__s1_271, __s1_271, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_271;  __rev2_271 = __builtin_shufflevector(__s2_271, __s2_271, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_271; \
-  __ret_271 = __noswap_vfmlalq_low_f16(__rev0_271, __rev1_271, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271)}); \
-  __ret_271 = __builtin_shufflevector(__ret_271, __ret_271, 3, 2, 1, 0); \
-  __ret_271; \
+#define vfmlalq_laneq_low_f16(__p0_790, __p1_790, __p2_790, __p3_790) __extension__ ({ \
+  float32x4_t __s0_790 = __p0_790; \
+  float16x8_t __s1_790 = __p1_790; \
+  float16x8_t __s2_790 = __p2_790; \
+  float32x4_t __rev0_790;  __rev0_790 = __builtin_shufflevector(__s0_790, __s0_790, 3, 2, 1, 0); \
+  float16x8_t __rev1_790;  __rev1_790 = __builtin_shufflevector(__s1_790, __s1_790, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_790;  __rev2_790 = __builtin_shufflevector(__s2_790, __s2_790, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x4_t __ret_790; \
+  __ret_790 = __noswap_vfmlalq_low_f16(__rev0_790, __rev1_790, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_790, __p3_790), __noswap_vgetq_lane_f16(__rev2_790, __p3_790), __noswap_vgetq_lane_f16(__rev2_790, __p3_790), __noswap_vgetq_lane_f16(__rev2_790, __p3_790), __noswap_vgetq_lane_f16(__rev2_790, __p3_790), __noswap_vgetq_lane_f16(__rev2_790, __p3_790), __noswap_vgetq_lane_f16(__rev2_790, __p3_790), __noswap_vgetq_lane_f16(__rev2_790, __p3_790)}); \
+  __ret_790 = __builtin_shufflevector(__ret_790, __ret_790, 3, 2, 1, 0); \
+  __ret_790; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlal_laneq_low_f16(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \
-  float32x2_t __s0_272 = __p0_272; \
-  float16x4_t __s1_272 = __p1_272; \
-  float16x8_t __s2_272 = __p2_272; \
-  float32x2_t __ret_272; \
-  __ret_272 = vfmlal_low_f16(__s0_272, __s1_272, (float16x4_t) {vgetq_lane_f16(__s2_272, __p3_272), vgetq_lane_f16(__s2_272, __p3_272), vgetq_lane_f16(__s2_272, __p3_272), vgetq_lane_f16(__s2_272, __p3_272)}); \
-  __ret_272; \
+#define vfmlal_laneq_low_f16(__p0_791, __p1_791, __p2_791, __p3_791) __extension__ ({ \
+  float32x2_t __s0_791 = __p0_791; \
+  float16x4_t __s1_791 = __p1_791; \
+  float16x8_t __s2_791 = __p2_791; \
+  float32x2_t __ret_791; \
+  __ret_791 = vfmlal_low_f16(__s0_791, __s1_791, (float16x4_t) {vgetq_lane_f16(__s2_791, __p3_791), vgetq_lane_f16(__s2_791, __p3_791), vgetq_lane_f16(__s2_791, __p3_791), vgetq_lane_f16(__s2_791, __p3_791)}); \
+  __ret_791; \
 })
 #else
-#define vfmlal_laneq_low_f16(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \
-  float32x2_t __s0_273 = __p0_273; \
-  float16x4_t __s1_273 = __p1_273; \
-  float16x8_t __s2_273 = __p2_273; \
-  float32x2_t __rev0_273;  __rev0_273 = __builtin_shufflevector(__s0_273, __s0_273, 1, 0); \
-  float16x4_t __rev1_273;  __rev1_273 = __builtin_shufflevector(__s1_273, __s1_273, 3, 2, 1, 0); \
-  float16x8_t __rev2_273;  __rev2_273 = __builtin_shufflevector(__s2_273, __s2_273, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_273; \
-  __ret_273 = __noswap_vfmlal_low_f16(__rev0_273, __rev1_273, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_273, __p3_273), __noswap_vgetq_lane_f16(__rev2_273, __p3_273), __noswap_vgetq_lane_f16(__rev2_273, __p3_273), __noswap_vgetq_lane_f16(__rev2_273, __p3_273)}); \
-  __ret_273 = __builtin_shufflevector(__ret_273, __ret_273, 1, 0); \
-  __ret_273; \
+#define vfmlal_laneq_low_f16(__p0_792, __p1_792, __p2_792, __p3_792) __extension__ ({ \
+  float32x2_t __s0_792 = __p0_792; \
+  float16x4_t __s1_792 = __p1_792; \
+  float16x8_t __s2_792 = __p2_792; \
+  float32x2_t __rev0_792;  __rev0_792 = __builtin_shufflevector(__s0_792, __s0_792, 1, 0); \
+  float16x4_t __rev1_792;  __rev1_792 = __builtin_shufflevector(__s1_792, __s1_792, 3, 2, 1, 0); \
+  float16x8_t __rev2_792;  __rev2_792 = __builtin_shufflevector(__s2_792, __s2_792, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x2_t __ret_792; \
+  __ret_792 = __noswap_vfmlal_low_f16(__rev0_792, __rev1_792, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_792, __p3_792), __noswap_vgetq_lane_f16(__rev2_792, __p3_792), __noswap_vgetq_lane_f16(__rev2_792, __p3_792), __noswap_vgetq_lane_f16(__rev2_792, __p3_792)}); \
+  __ret_792 = __builtin_shufflevector(__ret_792, __ret_792, 1, 0); \
+  __ret_792; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlslq_lane_high_f16(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \
-  float32x4_t __s0_274 = __p0_274; \
-  float16x8_t __s1_274 = __p1_274; \
-  float16x4_t __s2_274 = __p2_274; \
-  float32x4_t __ret_274; \
-  __ret_274 = vfmlslq_high_f16(__s0_274, __s1_274, (float16x8_t) {vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274)}); \
-  __ret_274; \
+#define vfmlslq_lane_high_f16(__p0_793, __p1_793, __p2_793, __p3_793) __extension__ ({ \
+  float32x4_t __s0_793 = __p0_793; \
+  float16x8_t __s1_793 = __p1_793; \
+  float16x4_t __s2_793 = __p2_793; \
+  float32x4_t __ret_793; \
+  __ret_793 = vfmlslq_high_f16(__s0_793, __s1_793, (float16x8_t) {vget_lane_f16(__s2_793, __p3_793), vget_lane_f16(__s2_793, __p3_793), vget_lane_f16(__s2_793, __p3_793), vget_lane_f16(__s2_793, __p3_793), vget_lane_f16(__s2_793, __p3_793), vget_lane_f16(__s2_793, __p3_793), vget_lane_f16(__s2_793, __p3_793), vget_lane_f16(__s2_793, __p3_793)}); \
+  __ret_793; \
 })
 #else
-#define vfmlslq_lane_high_f16(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \
-  float32x4_t __s0_275 = __p0_275; \
-  float16x8_t __s1_275 = __p1_275; \
-  float16x4_t __s2_275 = __p2_275; \
-  float32x4_t __rev0_275;  __rev0_275 = __builtin_shufflevector(__s0_275, __s0_275, 3, 2, 1, 0); \
-  float16x8_t __rev1_275;  __rev1_275 = __builtin_shufflevector(__s1_275, __s1_275, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_275;  __rev2_275 = __builtin_shufflevector(__s2_275, __s2_275, 3, 2, 1, 0); \
-  float32x4_t __ret_275; \
-  __ret_275 = __noswap_vfmlslq_high_f16(__rev0_275, __rev1_275, (float16x8_t) {__noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275)}); \
-  __ret_275 = __builtin_shufflevector(__ret_275, __ret_275, 3, 2, 1, 0); \
-  __ret_275; \
+#define vfmlslq_lane_high_f16(__p0_794, __p1_794, __p2_794, __p3_794) __extension__ ({ \
+  float32x4_t __s0_794 = __p0_794; \
+  float16x8_t __s1_794 = __p1_794; \
+  float16x4_t __s2_794 = __p2_794; \
+  float32x4_t __rev0_794;  __rev0_794 = __builtin_shufflevector(__s0_794, __s0_794, 3, 2, 1, 0); \
+  float16x8_t __rev1_794;  __rev1_794 = __builtin_shufflevector(__s1_794, __s1_794, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_794;  __rev2_794 = __builtin_shufflevector(__s2_794, __s2_794, 3, 2, 1, 0); \
+  float32x4_t __ret_794; \
+  __ret_794 = __noswap_vfmlslq_high_f16(__rev0_794, __rev1_794, (float16x8_t) {__noswap_vget_lane_f16(__rev2_794, __p3_794), __noswap_vget_lane_f16(__rev2_794, __p3_794), __noswap_vget_lane_f16(__rev2_794, __p3_794), __noswap_vget_lane_f16(__rev2_794, __p3_794), __noswap_vget_lane_f16(__rev2_794, __p3_794), __noswap_vget_lane_f16(__rev2_794, __p3_794), __noswap_vget_lane_f16(__rev2_794, __p3_794), __noswap_vget_lane_f16(__rev2_794, __p3_794)}); \
+  __ret_794 = __builtin_shufflevector(__ret_794, __ret_794, 3, 2, 1, 0); \
+  __ret_794; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlsl_lane_high_f16(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \
-  float32x2_t __s0_276 = __p0_276; \
-  float16x4_t __s1_276 = __p1_276; \
-  float16x4_t __s2_276 = __p2_276; \
-  float32x2_t __ret_276; \
-  __ret_276 = vfmlsl_high_f16(__s0_276, __s1_276, (float16x4_t) {vget_lane_f16(__s2_276, __p3_276), vget_lane_f16(__s2_276, __p3_276), vget_lane_f16(__s2_276, __p3_276), vget_lane_f16(__s2_276, __p3_276)}); \
-  __ret_276; \
+#define vfmlsl_lane_high_f16(__p0_795, __p1_795, __p2_795, __p3_795) __extension__ ({ \
+  float32x2_t __s0_795 = __p0_795; \
+  float16x4_t __s1_795 = __p1_795; \
+  float16x4_t __s2_795 = __p2_795; \
+  float32x2_t __ret_795; \
+  __ret_795 = vfmlsl_high_f16(__s0_795, __s1_795, (float16x4_t) {vget_lane_f16(__s2_795, __p3_795), vget_lane_f16(__s2_795, __p3_795), vget_lane_f16(__s2_795, __p3_795), vget_lane_f16(__s2_795, __p3_795)}); \
+  __ret_795; \
 })
 #else
-#define vfmlsl_lane_high_f16(__p0_277, __p1_277, __p2_277, __p3_277) __extension__ ({ \
-  float32x2_t __s0_277 = __p0_277; \
-  float16x4_t __s1_277 = __p1_277; \
-  float16x4_t __s2_277 = __p2_277; \
-  float32x2_t __rev0_277;  __rev0_277 = __builtin_shufflevector(__s0_277, __s0_277, 1, 0); \
-  float16x4_t __rev1_277;  __rev1_277 = __builtin_shufflevector(__s1_277, __s1_277, 3, 2, 1, 0); \
-  float16x4_t __rev2_277;  __rev2_277 = __builtin_shufflevector(__s2_277, __s2_277, 3, 2, 1, 0); \
-  float32x2_t __ret_277; \
-  __ret_277 = __noswap_vfmlsl_high_f16(__rev0_277, __rev1_277, (float16x4_t) {__noswap_vget_lane_f16(__rev2_277, __p3_277), __noswap_vget_lane_f16(__rev2_277, __p3_277), __noswap_vget_lane_f16(__rev2_277, __p3_277), __noswap_vget_lane_f16(__rev2_277, __p3_277)}); \
-  __ret_277 = __builtin_shufflevector(__ret_277, __ret_277, 1, 0); \
-  __ret_277; \
+#define vfmlsl_lane_high_f16(__p0_796, __p1_796, __p2_796, __p3_796) __extension__ ({ \
+  float32x2_t __s0_796 = __p0_796; \
+  float16x4_t __s1_796 = __p1_796; \
+  float16x4_t __s2_796 = __p2_796; \
+  float32x2_t __rev0_796;  __rev0_796 = __builtin_shufflevector(__s0_796, __s0_796, 1, 0); \
+  float16x4_t __rev1_796;  __rev1_796 = __builtin_shufflevector(__s1_796, __s1_796, 3, 2, 1, 0); \
+  float16x4_t __rev2_796;  __rev2_796 = __builtin_shufflevector(__s2_796, __s2_796, 3, 2, 1, 0); \
+  float32x2_t __ret_796; \
+  __ret_796 = __noswap_vfmlsl_high_f16(__rev0_796, __rev1_796, (float16x4_t) {__noswap_vget_lane_f16(__rev2_796, __p3_796), __noswap_vget_lane_f16(__rev2_796, __p3_796), __noswap_vget_lane_f16(__rev2_796, __p3_796), __noswap_vget_lane_f16(__rev2_796, __p3_796)}); \
+  __ret_796 = __builtin_shufflevector(__ret_796, __ret_796, 1, 0); \
+  __ret_796; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlslq_lane_low_f16(__p0_278, __p1_278, __p2_278, __p3_278) __extension__ ({ \
-  float32x4_t __s0_278 = __p0_278; \
-  float16x8_t __s1_278 = __p1_278; \
-  float16x4_t __s2_278 = __p2_278; \
-  float32x4_t __ret_278; \
-  __ret_278 = vfmlslq_low_f16(__s0_278, __s1_278, (float16x8_t) {vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278)}); \
-  __ret_278; \
+#define vfmlslq_lane_low_f16(__p0_797, __p1_797, __p2_797, __p3_797) __extension__ ({ \
+  float32x4_t __s0_797 = __p0_797; \
+  float16x8_t __s1_797 = __p1_797; \
+  float16x4_t __s2_797 = __p2_797; \
+  float32x4_t __ret_797; \
+  __ret_797 = vfmlslq_low_f16(__s0_797, __s1_797, (float16x8_t) {vget_lane_f16(__s2_797, __p3_797), vget_lane_f16(__s2_797, __p3_797), vget_lane_f16(__s2_797, __p3_797), vget_lane_f16(__s2_797, __p3_797), vget_lane_f16(__s2_797, __p3_797), vget_lane_f16(__s2_797, __p3_797), vget_lane_f16(__s2_797, __p3_797), vget_lane_f16(__s2_797, __p3_797)}); \
+  __ret_797; \
 })
 #else
-#define vfmlslq_lane_low_f16(__p0_279, __p1_279, __p2_279, __p3_279) __extension__ ({ \
-  float32x4_t __s0_279 = __p0_279; \
-  float16x8_t __s1_279 = __p1_279; \
-  float16x4_t __s2_279 = __p2_279; \
-  float32x4_t __rev0_279;  __rev0_279 = __builtin_shufflevector(__s0_279, __s0_279, 3, 2, 1, 0); \
-  float16x8_t __rev1_279;  __rev1_279 = __builtin_shufflevector(__s1_279, __s1_279, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_279;  __rev2_279 = __builtin_shufflevector(__s2_279, __s2_279, 3, 2, 1, 0); \
-  float32x4_t __ret_279; \
-  __ret_279 = __noswap_vfmlslq_low_f16(__rev0_279, __rev1_279, (float16x8_t) {__noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279)}); \
-  __ret_279 = __builtin_shufflevector(__ret_279, __ret_279, 3, 2, 1, 0); \
-  __ret_279; \
+#define vfmlslq_lane_low_f16(__p0_798, __p1_798, __p2_798, __p3_798) __extension__ ({ \
+  float32x4_t __s0_798 = __p0_798; \
+  float16x8_t __s1_798 = __p1_798; \
+  float16x4_t __s2_798 = __p2_798; \
+  float32x4_t __rev0_798;  __rev0_798 = __builtin_shufflevector(__s0_798, __s0_798, 3, 2, 1, 0); \
+  float16x8_t __rev1_798;  __rev1_798 = __builtin_shufflevector(__s1_798, __s1_798, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x4_t __rev2_798;  __rev2_798 = __builtin_shufflevector(__s2_798, __s2_798, 3, 2, 1, 0); \
+  float32x4_t __ret_798; \
+  __ret_798 = __noswap_vfmlslq_low_f16(__rev0_798, __rev1_798, (float16x8_t) {__noswap_vget_lane_f16(__rev2_798, __p3_798), __noswap_vget_lane_f16(__rev2_798, __p3_798), __noswap_vget_lane_f16(__rev2_798, __p3_798), __noswap_vget_lane_f16(__rev2_798, __p3_798), __noswap_vget_lane_f16(__rev2_798, __p3_798), __noswap_vget_lane_f16(__rev2_798, __p3_798), __noswap_vget_lane_f16(__rev2_798, __p3_798), __noswap_vget_lane_f16(__rev2_798, __p3_798)}); \
+  __ret_798 = __builtin_shufflevector(__ret_798, __ret_798, 3, 2, 1, 0); \
+  __ret_798; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlsl_lane_low_f16(__p0_280, __p1_280, __p2_280, __p3_280) __extension__ ({ \
-  float32x2_t __s0_280 = __p0_280; \
-  float16x4_t __s1_280 = __p1_280; \
-  float16x4_t __s2_280 = __p2_280; \
-  float32x2_t __ret_280; \
-  __ret_280 = vfmlsl_low_f16(__s0_280, __s1_280, (float16x4_t) {vget_lane_f16(__s2_280, __p3_280), vget_lane_f16(__s2_280, __p3_280), vget_lane_f16(__s2_280, __p3_280), vget_lane_f16(__s2_280, __p3_280)}); \
-  __ret_280; \
+#define vfmlsl_lane_low_f16(__p0_799, __p1_799, __p2_799, __p3_799) __extension__ ({ \
+  float32x2_t __s0_799 = __p0_799; \
+  float16x4_t __s1_799 = __p1_799; \
+  float16x4_t __s2_799 = __p2_799; \
+  float32x2_t __ret_799; \
+  __ret_799 = vfmlsl_low_f16(__s0_799, __s1_799, (float16x4_t) {vget_lane_f16(__s2_799, __p3_799), vget_lane_f16(__s2_799, __p3_799), vget_lane_f16(__s2_799, __p3_799), vget_lane_f16(__s2_799, __p3_799)}); \
+  __ret_799; \
 })
 #else
-#define vfmlsl_lane_low_f16(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \
-  float32x2_t __s0_281 = __p0_281; \
-  float16x4_t __s1_281 = __p1_281; \
-  float16x4_t __s2_281 = __p2_281; \
-  float32x2_t __rev0_281;  __rev0_281 = __builtin_shufflevector(__s0_281, __s0_281, 1, 0); \
-  float16x4_t __rev1_281;  __rev1_281 = __builtin_shufflevector(__s1_281, __s1_281, 3, 2, 1, 0); \
-  float16x4_t __rev2_281;  __rev2_281 = __builtin_shufflevector(__s2_281, __s2_281, 3, 2, 1, 0); \
-  float32x2_t __ret_281; \
-  __ret_281 = __noswap_vfmlsl_low_f16(__rev0_281, __rev1_281, (float16x4_t) {__noswap_vget_lane_f16(__rev2_281, __p3_281), __noswap_vget_lane_f16(__rev2_281, __p3_281), __noswap_vget_lane_f16(__rev2_281, __p3_281), __noswap_vget_lane_f16(__rev2_281, __p3_281)}); \
-  __ret_281 = __builtin_shufflevector(__ret_281, __ret_281, 1, 0); \
-  __ret_281; \
+#define vfmlsl_lane_low_f16(__p0_800, __p1_800, __p2_800, __p3_800) __extension__ ({ \
+  float32x2_t __s0_800 = __p0_800; \
+  float16x4_t __s1_800 = __p1_800; \
+  float16x4_t __s2_800 = __p2_800; \
+  float32x2_t __rev0_800;  __rev0_800 = __builtin_shufflevector(__s0_800, __s0_800, 1, 0); \
+  float16x4_t __rev1_800;  __rev1_800 = __builtin_shufflevector(__s1_800, __s1_800, 3, 2, 1, 0); \
+  float16x4_t __rev2_800;  __rev2_800 = __builtin_shufflevector(__s2_800, __s2_800, 3, 2, 1, 0); \
+  float32x2_t __ret_800; \
+  __ret_800 = __noswap_vfmlsl_low_f16(__rev0_800, __rev1_800, (float16x4_t) {__noswap_vget_lane_f16(__rev2_800, __p3_800), __noswap_vget_lane_f16(__rev2_800, __p3_800), __noswap_vget_lane_f16(__rev2_800, __p3_800), __noswap_vget_lane_f16(__rev2_800, __p3_800)}); \
+  __ret_800 = __builtin_shufflevector(__ret_800, __ret_800, 1, 0); \
+  __ret_800; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlslq_laneq_high_f16(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \
-  float32x4_t __s0_282 = __p0_282; \
-  float16x8_t __s1_282 = __p1_282; \
-  float16x8_t __s2_282 = __p2_282; \
-  float32x4_t __ret_282; \
-  __ret_282 = vfmlslq_high_f16(__s0_282, __s1_282, (float16x8_t) {vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282)}); \
-  __ret_282; \
+#define vfmlslq_laneq_high_f16(__p0_801, __p1_801, __p2_801, __p3_801) __extension__ ({ \
+  float32x4_t __s0_801 = __p0_801; \
+  float16x8_t __s1_801 = __p1_801; \
+  float16x8_t __s2_801 = __p2_801; \
+  float32x4_t __ret_801; \
+  __ret_801 = vfmlslq_high_f16(__s0_801, __s1_801, (float16x8_t) {vgetq_lane_f16(__s2_801, __p3_801), vgetq_lane_f16(__s2_801, __p3_801), vgetq_lane_f16(__s2_801, __p3_801), vgetq_lane_f16(__s2_801, __p3_801), vgetq_lane_f16(__s2_801, __p3_801), vgetq_lane_f16(__s2_801, __p3_801), vgetq_lane_f16(__s2_801, __p3_801), vgetq_lane_f16(__s2_801, __p3_801)}); \
+  __ret_801; \
 })
 #else
-#define vfmlslq_laneq_high_f16(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \
-  float32x4_t __s0_283 = __p0_283; \
-  float16x8_t __s1_283 = __p1_283; \
-  float16x8_t __s2_283 = __p2_283; \
-  float32x4_t __rev0_283;  __rev0_283 = __builtin_shufflevector(__s0_283, __s0_283, 3, 2, 1, 0); \
-  float16x8_t __rev1_283;  __rev1_283 = __builtin_shufflevector(__s1_283, __s1_283, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_283;  __rev2_283 = __builtin_shufflevector(__s2_283, __s2_283, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_283; \
-  __ret_283 = __noswap_vfmlslq_high_f16(__rev0_283, __rev1_283, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283)}); \
-  __ret_283 = __builtin_shufflevector(__ret_283, __ret_283, 3, 2, 1, 0); \
-  __ret_283; \
+#define vfmlslq_laneq_high_f16(__p0_802, __p1_802, __p2_802, __p3_802) __extension__ ({ \
+  float32x4_t __s0_802 = __p0_802; \
+  float16x8_t __s1_802 = __p1_802; \
+  float16x8_t __s2_802 = __p2_802; \
+  float32x4_t __rev0_802;  __rev0_802 = __builtin_shufflevector(__s0_802, __s0_802, 3, 2, 1, 0); \
+  float16x8_t __rev1_802;  __rev1_802 = __builtin_shufflevector(__s1_802, __s1_802, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_802;  __rev2_802 = __builtin_shufflevector(__s2_802, __s2_802, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x4_t __ret_802; \
+  __ret_802 = __noswap_vfmlslq_high_f16(__rev0_802, __rev1_802, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_802, __p3_802), __noswap_vgetq_lane_f16(__rev2_802, __p3_802), __noswap_vgetq_lane_f16(__rev2_802, __p3_802), __noswap_vgetq_lane_f16(__rev2_802, __p3_802), __noswap_vgetq_lane_f16(__rev2_802, __p3_802), __noswap_vgetq_lane_f16(__rev2_802, __p3_802), __noswap_vgetq_lane_f16(__rev2_802, __p3_802), __noswap_vgetq_lane_f16(__rev2_802, __p3_802)}); \
+  __ret_802 = __builtin_shufflevector(__ret_802, __ret_802, 3, 2, 1, 0); \
+  __ret_802; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlsl_laneq_high_f16(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \
-  float32x2_t __s0_284 = __p0_284; \
-  float16x4_t __s1_284 = __p1_284; \
-  float16x8_t __s2_284 = __p2_284; \
-  float32x2_t __ret_284; \
-  __ret_284 = vfmlsl_high_f16(__s0_284, __s1_284, (float16x4_t) {vgetq_lane_f16(__s2_284, __p3_284), vgetq_lane_f16(__s2_284, __p3_284), vgetq_lane_f16(__s2_284, __p3_284), vgetq_lane_f16(__s2_284, __p3_284)}); \
-  __ret_284; \
+#define vfmlsl_laneq_high_f16(__p0_803, __p1_803, __p2_803, __p3_803) __extension__ ({ \
+  float32x2_t __s0_803 = __p0_803; \
+  float16x4_t __s1_803 = __p1_803; \
+  float16x8_t __s2_803 = __p2_803; \
+  float32x2_t __ret_803; \
+  __ret_803 = vfmlsl_high_f16(__s0_803, __s1_803, (float16x4_t) {vgetq_lane_f16(__s2_803, __p3_803), vgetq_lane_f16(__s2_803, __p3_803), vgetq_lane_f16(__s2_803, __p3_803), vgetq_lane_f16(__s2_803, __p3_803)}); \
+  __ret_803; \
 })
 #else
-#define vfmlsl_laneq_high_f16(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \
-  float32x2_t __s0_285 = __p0_285; \
-  float16x4_t __s1_285 = __p1_285; \
-  float16x8_t __s2_285 = __p2_285; \
-  float32x2_t __rev0_285;  __rev0_285 = __builtin_shufflevector(__s0_285, __s0_285, 1, 0); \
-  float16x4_t __rev1_285;  __rev1_285 = __builtin_shufflevector(__s1_285, __s1_285, 3, 2, 1, 0); \
-  float16x8_t __rev2_285;  __rev2_285 = __builtin_shufflevector(__s2_285, __s2_285, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_285; \
-  __ret_285 = __noswap_vfmlsl_high_f16(__rev0_285, __rev1_285, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_285, __p3_285), __noswap_vgetq_lane_f16(__rev2_285, __p3_285), __noswap_vgetq_lane_f16(__rev2_285, __p3_285), __noswap_vgetq_lane_f16(__rev2_285, __p3_285)}); \
-  __ret_285 = __builtin_shufflevector(__ret_285, __ret_285, 1, 0); \
-  __ret_285; \
+#define vfmlsl_laneq_high_f16(__p0_804, __p1_804, __p2_804, __p3_804) __extension__ ({ \
+  float32x2_t __s0_804 = __p0_804; \
+  float16x4_t __s1_804 = __p1_804; \
+  float16x8_t __s2_804 = __p2_804; \
+  float32x2_t __rev0_804;  __rev0_804 = __builtin_shufflevector(__s0_804, __s0_804, 1, 0); \
+  float16x4_t __rev1_804;  __rev1_804 = __builtin_shufflevector(__s1_804, __s1_804, 3, 2, 1, 0); \
+  float16x8_t __rev2_804;  __rev2_804 = __builtin_shufflevector(__s2_804, __s2_804, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x2_t __ret_804; \
+  __ret_804 = __noswap_vfmlsl_high_f16(__rev0_804, __rev1_804, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_804, __p3_804), __noswap_vgetq_lane_f16(__rev2_804, __p3_804), __noswap_vgetq_lane_f16(__rev2_804, __p3_804), __noswap_vgetq_lane_f16(__rev2_804, __p3_804)}); \
+  __ret_804 = __builtin_shufflevector(__ret_804, __ret_804, 1, 0); \
+  __ret_804; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlslq_laneq_low_f16(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \
-  float32x4_t __s0_286 = __p0_286; \
-  float16x8_t __s1_286 = __p1_286; \
-  float16x8_t __s2_286 = __p2_286; \
-  float32x4_t __ret_286; \
-  __ret_286 = vfmlslq_low_f16(__s0_286, __s1_286, (float16x8_t) {vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286)}); \
-  __ret_286; \
+#define vfmlslq_laneq_low_f16(__p0_805, __p1_805, __p2_805, __p3_805) __extension__ ({ \
+  float32x4_t __s0_805 = __p0_805; \
+  float16x8_t __s1_805 = __p1_805; \
+  float16x8_t __s2_805 = __p2_805; \
+  float32x4_t __ret_805; \
+  __ret_805 = vfmlslq_low_f16(__s0_805, __s1_805, (float16x8_t) {vgetq_lane_f16(__s2_805, __p3_805), vgetq_lane_f16(__s2_805, __p3_805), vgetq_lane_f16(__s2_805, __p3_805), vgetq_lane_f16(__s2_805, __p3_805), vgetq_lane_f16(__s2_805, __p3_805), vgetq_lane_f16(__s2_805, __p3_805), vgetq_lane_f16(__s2_805, __p3_805), vgetq_lane_f16(__s2_805, __p3_805)}); \
+  __ret_805; \
 })
 #else
-#define vfmlslq_laneq_low_f16(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \
-  float32x4_t __s0_287 = __p0_287; \
-  float16x8_t __s1_287 = __p1_287; \
-  float16x8_t __s2_287 = __p2_287; \
-  float32x4_t __rev0_287;  __rev0_287 = __builtin_shufflevector(__s0_287, __s0_287, 3, 2, 1, 0); \
-  float16x8_t __rev1_287;  __rev1_287 = __builtin_shufflevector(__s1_287, __s1_287, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_287;  __rev2_287 = __builtin_shufflevector(__s2_287, __s2_287, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_287; \
-  __ret_287 = __noswap_vfmlslq_low_f16(__rev0_287, __rev1_287, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287)}); \
-  __ret_287 = __builtin_shufflevector(__ret_287, __ret_287, 3, 2, 1, 0); \
-  __ret_287; \
+#define vfmlslq_laneq_low_f16(__p0_806, __p1_806, __p2_806, __p3_806) __extension__ ({ \
+  float32x4_t __s0_806 = __p0_806; \
+  float16x8_t __s1_806 = __p1_806; \
+  float16x8_t __s2_806 = __p2_806; \
+  float32x4_t __rev0_806;  __rev0_806 = __builtin_shufflevector(__s0_806, __s0_806, 3, 2, 1, 0); \
+  float16x8_t __rev1_806;  __rev1_806 = __builtin_shufflevector(__s1_806, __s1_806, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16x8_t __rev2_806;  __rev2_806 = __builtin_shufflevector(__s2_806, __s2_806, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x4_t __ret_806; \
+  __ret_806 = __noswap_vfmlslq_low_f16(__rev0_806, __rev1_806, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_806, __p3_806), __noswap_vgetq_lane_f16(__rev2_806, __p3_806), __noswap_vgetq_lane_f16(__rev2_806, __p3_806), __noswap_vgetq_lane_f16(__rev2_806, __p3_806), __noswap_vgetq_lane_f16(__rev2_806, __p3_806), __noswap_vgetq_lane_f16(__rev2_806, __p3_806), __noswap_vgetq_lane_f16(__rev2_806, __p3_806), __noswap_vgetq_lane_f16(__rev2_806, __p3_806)}); \
+  __ret_806 = __builtin_shufflevector(__ret_806, __ret_806, 3, 2, 1, 0); \
+  __ret_806; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vfmlsl_laneq_low_f16(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \
-  float32x2_t __s0_288 = __p0_288; \
-  float16x4_t __s1_288 = __p1_288; \
-  float16x8_t __s2_288 = __p2_288; \
-  float32x2_t __ret_288; \
-  __ret_288 = vfmlsl_low_f16(__s0_288, __s1_288, (float16x4_t) {vgetq_lane_f16(__s2_288, __p3_288), vgetq_lane_f16(__s2_288, __p3_288), vgetq_lane_f16(__s2_288, __p3_288), vgetq_lane_f16(__s2_288, __p3_288)}); \
-  __ret_288; \
+#define vfmlsl_laneq_low_f16(__p0_807, __p1_807, __p2_807, __p3_807) __extension__ ({ \
+  float32x2_t __s0_807 = __p0_807; \
+  float16x4_t __s1_807 = __p1_807; \
+  float16x8_t __s2_807 = __p2_807; \
+  float32x2_t __ret_807; \
+  __ret_807 = vfmlsl_low_f16(__s0_807, __s1_807, (float16x4_t) {vgetq_lane_f16(__s2_807, __p3_807), vgetq_lane_f16(__s2_807, __p3_807), vgetq_lane_f16(__s2_807, __p3_807), vgetq_lane_f16(__s2_807, __p3_807)}); \
+  __ret_807; \
 })
 #else
-#define vfmlsl_laneq_low_f16(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \
-  float32x2_t __s0_289 = __p0_289; \
-  float16x4_t __s1_289 = __p1_289; \
-  float16x8_t __s2_289 = __p2_289; \
-  float32x2_t __rev0_289;  __rev0_289 = __builtin_shufflevector(__s0_289, __s0_289, 1, 0); \
-  float16x4_t __rev1_289;  __rev1_289 = __builtin_shufflevector(__s1_289, __s1_289, 3, 2, 1, 0); \
-  float16x8_t __rev2_289;  __rev2_289 = __builtin_shufflevector(__s2_289, __s2_289, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_289; \
-  __ret_289 = __noswap_vfmlsl_low_f16(__rev0_289, __rev1_289, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_289, __p3_289), __noswap_vgetq_lane_f16(__rev2_289, __p3_289), __noswap_vgetq_lane_f16(__rev2_289, __p3_289), __noswap_vgetq_lane_f16(__rev2_289, __p3_289)}); \
-  __ret_289 = __builtin_shufflevector(__ret_289, __ret_289, 1, 0); \
-  __ret_289; \
+#define vfmlsl_laneq_low_f16(__p0_808, __p1_808, __p2_808, __p3_808) __extension__ ({ \
+  float32x2_t __s0_808 = __p0_808; \
+  float16x4_t __s1_808 = __p1_808; \
+  float16x8_t __s2_808 = __p2_808; \
+  float32x2_t __rev0_808;  __rev0_808 = __builtin_shufflevector(__s0_808, __s0_808, 1, 0); \
+  float16x4_t __rev1_808;  __rev1_808 = __builtin_shufflevector(__s1_808, __s1_808, 3, 2, 1, 0); \
+  float16x8_t __rev2_808;  __rev2_808 = __builtin_shufflevector(__s2_808, __s2_808, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float32x2_t __ret_808; \
+  __ret_808 = __noswap_vfmlsl_low_f16(__rev0_808, __rev1_808, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_808, __p3_808), __noswap_vgetq_lane_f16(__rev2_808, __p3_808), __noswap_vgetq_lane_f16(__rev2_808, __p3_808), __noswap_vgetq_lane_f16(__rev2_808, __p3_808)}); \
+  __ret_808 = __builtin_shufflevector(__ret_808, __ret_808, 1, 0); \
+  __ret_808; \
 })
 #endif
 
 #endif
 #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
 #ifdef __LITTLE_ENDIAN__
-#define vmulh_lane_f16(__p0_290, __p1_290, __p2_290) __extension__ ({ \
-  float16_t __s0_290 = __p0_290; \
-  float16x4_t __s1_290 = __p1_290; \
-  float16_t __ret_290; \
-  __ret_290 = __s0_290 * vget_lane_f16(__s1_290, __p2_290); \
-  __ret_290; \
+#define vmulh_lane_f16(__p0_809, __p1_809, __p2_809) __extension__ ({ \
+  float16_t __s0_809 = __p0_809; \
+  float16x4_t __s1_809 = __p1_809; \
+  float16_t __ret_809; \
+  __ret_809 = __s0_809 * vget_lane_f16(__s1_809, __p2_809); \
+  __ret_809; \
 })
 #else
-#define vmulh_lane_f16(__p0_291, __p1_291, __p2_291) __extension__ ({ \
-  float16_t __s0_291 = __p0_291; \
-  float16x4_t __s1_291 = __p1_291; \
-  float16x4_t __rev1_291;  __rev1_291 = __builtin_shufflevector(__s1_291, __s1_291, 3, 2, 1, 0); \
-  float16_t __ret_291; \
-  __ret_291 = __s0_291 * __noswap_vget_lane_f16(__rev1_291, __p2_291); \
-  __ret_291; \
+#define vmulh_lane_f16(__p0_810, __p1_810, __p2_810) __extension__ ({ \
+  float16_t __s0_810 = __p0_810; \
+  float16x4_t __s1_810 = __p1_810; \
+  float16x4_t __rev1_810;  __rev1_810 = __builtin_shufflevector(__s1_810, __s1_810, 3, 2, 1, 0); \
+  float16_t __ret_810; \
+  __ret_810 = __s0_810 * __noswap_vget_lane_f16(__rev1_810, __p2_810); \
+  __ret_810; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vmulh_laneq_f16(__p0_292, __p1_292, __p2_292) __extension__ ({ \
-  float16_t __s0_292 = __p0_292; \
-  float16x8_t __s1_292 = __p1_292; \
-  float16_t __ret_292; \
-  __ret_292 = __s0_292 * vgetq_lane_f16(__s1_292, __p2_292); \
-  __ret_292; \
+#define vmulh_laneq_f16(__p0_811, __p1_811, __p2_811) __extension__ ({ \
+  float16_t __s0_811 = __p0_811; \
+  float16x8_t __s1_811 = __p1_811; \
+  float16_t __ret_811; \
+  __ret_811 = __s0_811 * vgetq_lane_f16(__s1_811, __p2_811); \
+  __ret_811; \
 })
 #else
-#define vmulh_laneq_f16(__p0_293, __p1_293, __p2_293) __extension__ ({ \
-  float16_t __s0_293 = __p0_293; \
-  float16x8_t __s1_293 = __p1_293; \
-  float16x8_t __rev1_293;  __rev1_293 = __builtin_shufflevector(__s1_293, __s1_293, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_293; \
-  __ret_293 = __s0_293 * __noswap_vgetq_lane_f16(__rev1_293, __p2_293); \
-  __ret_293; \
+#define vmulh_laneq_f16(__p0_812, __p1_812, __p2_812) __extension__ ({ \
+  float16_t __s0_812 = __p0_812; \
+  float16x8_t __s1_812 = __p1_812; \
+  float16x8_t __rev1_812;  __rev1_812 = __builtin_shufflevector(__s1_812, __s1_812, 7, 6, 5, 4, 3, 2, 1, 0); \
+  float16_t __ret_812; \
+  __ret_812 = __s0_812 * __noswap_vgetq_lane_f16(__rev1_812, __p2_812); \
+  __ret_812; \
+})
+#endif
+
+#endif
+#if defined(__ARM_FEATURE_MATMUL_INT8)
+#ifdef __LITTLE_ENDIAN__
+#define vsudotq_lane_s32(__p0_813, __p1_813, __p2_813, __p3_813) __extension__ ({ \
+  int32x4_t __s0_813 = __p0_813; \
+  int8x16_t __s1_813 = __p1_813; \
+  uint8x8_t __s2_813 = __p2_813; \
+  int32x4_t __ret_813; \
+uint8x8_t __reint_813 = __s2_813; \
+  __ret_813 = vusdotq_s32(__s0_813, (uint8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_813, __p3_813)), __s1_813); \
+  __ret_813; \
+})
+#else
+#define vsudotq_lane_s32(__p0_814, __p1_814, __p2_814, __p3_814) __extension__ ({ \
+  int32x4_t __s0_814 = __p0_814; \
+  int8x16_t __s1_814 = __p1_814; \
+  uint8x8_t __s2_814 = __p2_814; \
+  int32x4_t __rev0_814;  __rev0_814 = __builtin_shufflevector(__s0_814, __s0_814, 3, 2, 1, 0); \
+  int8x16_t __rev1_814;  __rev1_814 = __builtin_shufflevector(__s1_814, __s1_814, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_814;  __rev2_814 = __builtin_shufflevector(__s2_814, __s2_814, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x4_t __ret_814; \
+uint8x8_t __reint_814 = __rev2_814; \
+  __ret_814 = __noswap_vusdotq_s32(__rev0_814, (uint8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_814, __p3_814)), __rev1_814); \
+  __ret_814 = __builtin_shufflevector(__ret_814, __ret_814, 3, 2, 1, 0); \
+  __ret_814; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsudot_lane_s32(__p0_815, __p1_815, __p2_815, __p3_815) __extension__ ({ \
+  int32x2_t __s0_815 = __p0_815; \
+  int8x8_t __s1_815 = __p1_815; \
+  uint8x8_t __s2_815 = __p2_815; \
+  int32x2_t __ret_815; \
+uint8x8_t __reint_815 = __s2_815; \
+  __ret_815 = vusdot_s32(__s0_815, (uint8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_815, __p3_815)), __s1_815); \
+  __ret_815; \
+})
+#else
+#define vsudot_lane_s32(__p0_816, __p1_816, __p2_816, __p3_816) __extension__ ({ \
+  int32x2_t __s0_816 = __p0_816; \
+  int8x8_t __s1_816 = __p1_816; \
+  uint8x8_t __s2_816 = __p2_816; \
+  int32x2_t __rev0_816;  __rev0_816 = __builtin_shufflevector(__s0_816, __s0_816, 1, 0); \
+  int8x8_t __rev1_816;  __rev1_816 = __builtin_shufflevector(__s1_816, __s1_816, 7, 6, 5, 4, 3, 2, 1, 0); \
+  uint8x8_t __rev2_816;  __rev2_816 = __builtin_shufflevector(__s2_816, __s2_816, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int32x2_t __ret_816; \
+uint8x8_t __reint_816 = __rev2_816; \
+  __ret_816 = __noswap_vusdot_s32(__rev0_816, (uint8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_816, __p3_816)), __rev1_816); \
+  __ret_816 = __builtin_shufflevector(__ret_816, __ret_816, 1, 0); \
+  __ret_816; \
 })
 #endif
 
@@ -61961,86 +66049,86 @@
   return __ret;
 }
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahs_lane_s32(__p0_294, __p1_294, __p2_294, __p3_294) __extension__ ({ \
-  int32_t __s0_294 = __p0_294; \
-  int32_t __s1_294 = __p1_294; \
-  int32x2_t __s2_294 = __p2_294; \
-  int32_t __ret_294; \
-  __ret_294 = vqadds_s32(__s0_294, vqrdmulhs_s32(__s1_294, vget_lane_s32(__s2_294, __p3_294))); \
-  __ret_294; \
+#define vqrdmlahs_lane_s32(__p0_817, __p1_817, __p2_817, __p3_817) __extension__ ({ \
+  int32_t __s0_817 = __p0_817; \
+  int32_t __s1_817 = __p1_817; \
+  int32x2_t __s2_817 = __p2_817; \
+  int32_t __ret_817; \
+  __ret_817 = vqadds_s32(__s0_817, vqrdmulhs_s32(__s1_817, vget_lane_s32(__s2_817, __p3_817))); \
+  __ret_817; \
 })
 #else
-#define vqrdmlahs_lane_s32(__p0_295, __p1_295, __p2_295, __p3_295) __extension__ ({ \
-  int32_t __s0_295 = __p0_295; \
-  int32_t __s1_295 = __p1_295; \
-  int32x2_t __s2_295 = __p2_295; \
-  int32x2_t __rev2_295;  __rev2_295 = __builtin_shufflevector(__s2_295, __s2_295, 1, 0); \
-  int32_t __ret_295; \
-  __ret_295 = vqadds_s32(__s0_295, vqrdmulhs_s32(__s1_295, __noswap_vget_lane_s32(__rev2_295, __p3_295))); \
-  __ret_295; \
+#define vqrdmlahs_lane_s32(__p0_818, __p1_818, __p2_818, __p3_818) __extension__ ({ \
+  int32_t __s0_818 = __p0_818; \
+  int32_t __s1_818 = __p1_818; \
+  int32x2_t __s2_818 = __p2_818; \
+  int32x2_t __rev2_818;  __rev2_818 = __builtin_shufflevector(__s2_818, __s2_818, 1, 0); \
+  int32_t __ret_818; \
+  __ret_818 = vqadds_s32(__s0_818, vqrdmulhs_s32(__s1_818, __noswap_vget_lane_s32(__rev2_818, __p3_818))); \
+  __ret_818; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahh_lane_s16(__p0_296, __p1_296, __p2_296, __p3_296) __extension__ ({ \
-  int16_t __s0_296 = __p0_296; \
-  int16_t __s1_296 = __p1_296; \
-  int16x4_t __s2_296 = __p2_296; \
-  int16_t __ret_296; \
-  __ret_296 = vqaddh_s16(__s0_296, vqrdmulhh_s16(__s1_296, vget_lane_s16(__s2_296, __p3_296))); \
-  __ret_296; \
+#define vqrdmlahh_lane_s16(__p0_819, __p1_819, __p2_819, __p3_819) __extension__ ({ \
+  int16_t __s0_819 = __p0_819; \
+  int16_t __s1_819 = __p1_819; \
+  int16x4_t __s2_819 = __p2_819; \
+  int16_t __ret_819; \
+  __ret_819 = vqaddh_s16(__s0_819, vqrdmulhh_s16(__s1_819, vget_lane_s16(__s2_819, __p3_819))); \
+  __ret_819; \
 })
 #else
-#define vqrdmlahh_lane_s16(__p0_297, __p1_297, __p2_297, __p3_297) __extension__ ({ \
-  int16_t __s0_297 = __p0_297; \
-  int16_t __s1_297 = __p1_297; \
-  int16x4_t __s2_297 = __p2_297; \
-  int16x4_t __rev2_297;  __rev2_297 = __builtin_shufflevector(__s2_297, __s2_297, 3, 2, 1, 0); \
-  int16_t __ret_297; \
-  __ret_297 = vqaddh_s16(__s0_297, vqrdmulhh_s16(__s1_297, __noswap_vget_lane_s16(__rev2_297, __p3_297))); \
-  __ret_297; \
+#define vqrdmlahh_lane_s16(__p0_820, __p1_820, __p2_820, __p3_820) __extension__ ({ \
+  int16_t __s0_820 = __p0_820; \
+  int16_t __s1_820 = __p1_820; \
+  int16x4_t __s2_820 = __p2_820; \
+  int16x4_t __rev2_820;  __rev2_820 = __builtin_shufflevector(__s2_820, __s2_820, 3, 2, 1, 0); \
+  int16_t __ret_820; \
+  __ret_820 = vqaddh_s16(__s0_820, vqrdmulhh_s16(__s1_820, __noswap_vget_lane_s16(__rev2_820, __p3_820))); \
+  __ret_820; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahs_laneq_s32(__p0_298, __p1_298, __p2_298, __p3_298) __extension__ ({ \
-  int32_t __s0_298 = __p0_298; \
-  int32_t __s1_298 = __p1_298; \
-  int32x4_t __s2_298 = __p2_298; \
-  int32_t __ret_298; \
-  __ret_298 = vqadds_s32(__s0_298, vqrdmulhs_s32(__s1_298, vgetq_lane_s32(__s2_298, __p3_298))); \
-  __ret_298; \
+#define vqrdmlahs_laneq_s32(__p0_821, __p1_821, __p2_821, __p3_821) __extension__ ({ \
+  int32_t __s0_821 = __p0_821; \
+  int32_t __s1_821 = __p1_821; \
+  int32x4_t __s2_821 = __p2_821; \
+  int32_t __ret_821; \
+  __ret_821 = vqadds_s32(__s0_821, vqrdmulhs_s32(__s1_821, vgetq_lane_s32(__s2_821, __p3_821))); \
+  __ret_821; \
 })
 #else
-#define vqrdmlahs_laneq_s32(__p0_299, __p1_299, __p2_299, __p3_299) __extension__ ({ \
-  int32_t __s0_299 = __p0_299; \
-  int32_t __s1_299 = __p1_299; \
-  int32x4_t __s2_299 = __p2_299; \
-  int32x4_t __rev2_299;  __rev2_299 = __builtin_shufflevector(__s2_299, __s2_299, 3, 2, 1, 0); \
-  int32_t __ret_299; \
-  __ret_299 = vqadds_s32(__s0_299, vqrdmulhs_s32(__s1_299, __noswap_vgetq_lane_s32(__rev2_299, __p3_299))); \
-  __ret_299; \
+#define vqrdmlahs_laneq_s32(__p0_822, __p1_822, __p2_822, __p3_822) __extension__ ({ \
+  int32_t __s0_822 = __p0_822; \
+  int32_t __s1_822 = __p1_822; \
+  int32x4_t __s2_822 = __p2_822; \
+  int32x4_t __rev2_822;  __rev2_822 = __builtin_shufflevector(__s2_822, __s2_822, 3, 2, 1, 0); \
+  int32_t __ret_822; \
+  __ret_822 = vqadds_s32(__s0_822, vqrdmulhs_s32(__s1_822, __noswap_vgetq_lane_s32(__rev2_822, __p3_822))); \
+  __ret_822; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlahh_laneq_s16(__p0_300, __p1_300, __p2_300, __p3_300) __extension__ ({ \
-  int16_t __s0_300 = __p0_300; \
-  int16_t __s1_300 = __p1_300; \
-  int16x8_t __s2_300 = __p2_300; \
-  int16_t __ret_300; \
-  __ret_300 = vqaddh_s16(__s0_300, vqrdmulhh_s16(__s1_300, vgetq_lane_s16(__s2_300, __p3_300))); \
-  __ret_300; \
+#define vqrdmlahh_laneq_s16(__p0_823, __p1_823, __p2_823, __p3_823) __extension__ ({ \
+  int16_t __s0_823 = __p0_823; \
+  int16_t __s1_823 = __p1_823; \
+  int16x8_t __s2_823 = __p2_823; \
+  int16_t __ret_823; \
+  __ret_823 = vqaddh_s16(__s0_823, vqrdmulhh_s16(__s1_823, vgetq_lane_s16(__s2_823, __p3_823))); \
+  __ret_823; \
 })
 #else
-#define vqrdmlahh_laneq_s16(__p0_301, __p1_301, __p2_301, __p3_301) __extension__ ({ \
-  int16_t __s0_301 = __p0_301; \
-  int16_t __s1_301 = __p1_301; \
-  int16x8_t __s2_301 = __p2_301; \
-  int16x8_t __rev2_301;  __rev2_301 = __builtin_shufflevector(__s2_301, __s2_301, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_301; \
-  __ret_301 = vqaddh_s16(__s0_301, vqrdmulhh_s16(__s1_301, __noswap_vgetq_lane_s16(__rev2_301, __p3_301))); \
-  __ret_301; \
+#define vqrdmlahh_laneq_s16(__p0_824, __p1_824, __p2_824, __p3_824) __extension__ ({ \
+  int16_t __s0_824 = __p0_824; \
+  int16_t __s1_824 = __p1_824; \
+  int16x8_t __s2_824 = __p2_824; \
+  int16x8_t __rev2_824;  __rev2_824 = __builtin_shufflevector(__s2_824, __s2_824, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16_t __ret_824; \
+  __ret_824 = vqaddh_s16(__s0_824, vqrdmulhh_s16(__s1_824, __noswap_vgetq_lane_s16(__rev2_824, __p3_824))); \
+  __ret_824; \
 })
 #endif
 
@@ -62055,86 +66143,86 @@
   return __ret;
 }
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshs_lane_s32(__p0_302, __p1_302, __p2_302, __p3_302) __extension__ ({ \
-  int32_t __s0_302 = __p0_302; \
-  int32_t __s1_302 = __p1_302; \
-  int32x2_t __s2_302 = __p2_302; \
-  int32_t __ret_302; \
-  __ret_302 = vqsubs_s32(__s0_302, vqrdmulhs_s32(__s1_302, vget_lane_s32(__s2_302, __p3_302))); \
-  __ret_302; \
+#define vqrdmlshs_lane_s32(__p0_825, __p1_825, __p2_825, __p3_825) __extension__ ({ \
+  int32_t __s0_825 = __p0_825; \
+  int32_t __s1_825 = __p1_825; \
+  int32x2_t __s2_825 = __p2_825; \
+  int32_t __ret_825; \
+  __ret_825 = vqsubs_s32(__s0_825, vqrdmulhs_s32(__s1_825, vget_lane_s32(__s2_825, __p3_825))); \
+  __ret_825; \
 })
 #else
-#define vqrdmlshs_lane_s32(__p0_303, __p1_303, __p2_303, __p3_303) __extension__ ({ \
-  int32_t __s0_303 = __p0_303; \
-  int32_t __s1_303 = __p1_303; \
-  int32x2_t __s2_303 = __p2_303; \
-  int32x2_t __rev2_303;  __rev2_303 = __builtin_shufflevector(__s2_303, __s2_303, 1, 0); \
-  int32_t __ret_303; \
-  __ret_303 = vqsubs_s32(__s0_303, vqrdmulhs_s32(__s1_303, __noswap_vget_lane_s32(__rev2_303, __p3_303))); \
-  __ret_303; \
+#define vqrdmlshs_lane_s32(__p0_826, __p1_826, __p2_826, __p3_826) __extension__ ({ \
+  int32_t __s0_826 = __p0_826; \
+  int32_t __s1_826 = __p1_826; \
+  int32x2_t __s2_826 = __p2_826; \
+  int32x2_t __rev2_826;  __rev2_826 = __builtin_shufflevector(__s2_826, __s2_826, 1, 0); \
+  int32_t __ret_826; \
+  __ret_826 = vqsubs_s32(__s0_826, vqrdmulhs_s32(__s1_826, __noswap_vget_lane_s32(__rev2_826, __p3_826))); \
+  __ret_826; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshh_lane_s16(__p0_304, __p1_304, __p2_304, __p3_304) __extension__ ({ \
-  int16_t __s0_304 = __p0_304; \
-  int16_t __s1_304 = __p1_304; \
-  int16x4_t __s2_304 = __p2_304; \
-  int16_t __ret_304; \
-  __ret_304 = vqsubh_s16(__s0_304, vqrdmulhh_s16(__s1_304, vget_lane_s16(__s2_304, __p3_304))); \
-  __ret_304; \
+#define vqrdmlshh_lane_s16(__p0_827, __p1_827, __p2_827, __p3_827) __extension__ ({ \
+  int16_t __s0_827 = __p0_827; \
+  int16_t __s1_827 = __p1_827; \
+  int16x4_t __s2_827 = __p2_827; \
+  int16_t __ret_827; \
+  __ret_827 = vqsubh_s16(__s0_827, vqrdmulhh_s16(__s1_827, vget_lane_s16(__s2_827, __p3_827))); \
+  __ret_827; \
 })
 #else
-#define vqrdmlshh_lane_s16(__p0_305, __p1_305, __p2_305, __p3_305) __extension__ ({ \
-  int16_t __s0_305 = __p0_305; \
-  int16_t __s1_305 = __p1_305; \
-  int16x4_t __s2_305 = __p2_305; \
-  int16x4_t __rev2_305;  __rev2_305 = __builtin_shufflevector(__s2_305, __s2_305, 3, 2, 1, 0); \
-  int16_t __ret_305; \
-  __ret_305 = vqsubh_s16(__s0_305, vqrdmulhh_s16(__s1_305, __noswap_vget_lane_s16(__rev2_305, __p3_305))); \
-  __ret_305; \
+#define vqrdmlshh_lane_s16(__p0_828, __p1_828, __p2_828, __p3_828) __extension__ ({ \
+  int16_t __s0_828 = __p0_828; \
+  int16_t __s1_828 = __p1_828; \
+  int16x4_t __s2_828 = __p2_828; \
+  int16x4_t __rev2_828;  __rev2_828 = __builtin_shufflevector(__s2_828, __s2_828, 3, 2, 1, 0); \
+  int16_t __ret_828; \
+  __ret_828 = vqsubh_s16(__s0_828, vqrdmulhh_s16(__s1_828, __noswap_vget_lane_s16(__rev2_828, __p3_828))); \
+  __ret_828; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshs_laneq_s32(__p0_306, __p1_306, __p2_306, __p3_306) __extension__ ({ \
-  int32_t __s0_306 = __p0_306; \
-  int32_t __s1_306 = __p1_306; \
-  int32x4_t __s2_306 = __p2_306; \
-  int32_t __ret_306; \
-  __ret_306 = vqsubs_s32(__s0_306, vqrdmulhs_s32(__s1_306, vgetq_lane_s32(__s2_306, __p3_306))); \
-  __ret_306; \
+#define vqrdmlshs_laneq_s32(__p0_829, __p1_829, __p2_829, __p3_829) __extension__ ({ \
+  int32_t __s0_829 = __p0_829; \
+  int32_t __s1_829 = __p1_829; \
+  int32x4_t __s2_829 = __p2_829; \
+  int32_t __ret_829; \
+  __ret_829 = vqsubs_s32(__s0_829, vqrdmulhs_s32(__s1_829, vgetq_lane_s32(__s2_829, __p3_829))); \
+  __ret_829; \
 })
 #else
-#define vqrdmlshs_laneq_s32(__p0_307, __p1_307, __p2_307, __p3_307) __extension__ ({ \
-  int32_t __s0_307 = __p0_307; \
-  int32_t __s1_307 = __p1_307; \
-  int32x4_t __s2_307 = __p2_307; \
-  int32x4_t __rev2_307;  __rev2_307 = __builtin_shufflevector(__s2_307, __s2_307, 3, 2, 1, 0); \
-  int32_t __ret_307; \
-  __ret_307 = vqsubs_s32(__s0_307, vqrdmulhs_s32(__s1_307, __noswap_vgetq_lane_s32(__rev2_307, __p3_307))); \
-  __ret_307; \
+#define vqrdmlshs_laneq_s32(__p0_830, __p1_830, __p2_830, __p3_830) __extension__ ({ \
+  int32_t __s0_830 = __p0_830; \
+  int32_t __s1_830 = __p1_830; \
+  int32x4_t __s2_830 = __p2_830; \
+  int32x4_t __rev2_830;  __rev2_830 = __builtin_shufflevector(__s2_830, __s2_830, 3, 2, 1, 0); \
+  int32_t __ret_830; \
+  __ret_830 = vqsubs_s32(__s0_830, vqrdmulhs_s32(__s1_830, __noswap_vgetq_lane_s32(__rev2_830, __p3_830))); \
+  __ret_830; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vqrdmlshh_laneq_s16(__p0_308, __p1_308, __p2_308, __p3_308) __extension__ ({ \
-  int16_t __s0_308 = __p0_308; \
-  int16_t __s1_308 = __p1_308; \
-  int16x8_t __s2_308 = __p2_308; \
-  int16_t __ret_308; \
-  __ret_308 = vqsubh_s16(__s0_308, vqrdmulhh_s16(__s1_308, vgetq_lane_s16(__s2_308, __p3_308))); \
-  __ret_308; \
+#define vqrdmlshh_laneq_s16(__p0_831, __p1_831, __p2_831, __p3_831) __extension__ ({ \
+  int16_t __s0_831 = __p0_831; \
+  int16_t __s1_831 = __p1_831; \
+  int16x8_t __s2_831 = __p2_831; \
+  int16_t __ret_831; \
+  __ret_831 = vqsubh_s16(__s0_831, vqrdmulhh_s16(__s1_831, vgetq_lane_s16(__s2_831, __p3_831))); \
+  __ret_831; \
 })
 #else
-#define vqrdmlshh_laneq_s16(__p0_309, __p1_309, __p2_309, __p3_309) __extension__ ({ \
-  int16_t __s0_309 = __p0_309; \
-  int16_t __s1_309 = __p1_309; \
-  int16x8_t __s2_309 = __p2_309; \
-  int16x8_t __rev2_309;  __rev2_309 = __builtin_shufflevector(__s2_309, __s2_309, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_309; \
-  __ret_309 = vqsubh_s16(__s0_309, vqrdmulhh_s16(__s1_309, __noswap_vgetq_lane_s16(__rev2_309, __p3_309))); \
-  __ret_309; \
+#define vqrdmlshh_laneq_s16(__p0_832, __p1_832, __p2_832, __p3_832) __extension__ ({ \
+  int16_t __s0_832 = __p0_832; \
+  int16_t __s1_832 = __p1_832; \
+  int16x8_t __s2_832 = __p2_832; \
+  int16x8_t __rev2_832;  __rev2_832 = __builtin_shufflevector(__s2_832, __s2_832, 7, 6, 5, 4, 3, 2, 1, 0); \
+  int16_t __ret_832; \
+  __ret_832 = vqsubh_s16(__s0_832, vqrdmulhh_s16(__s1_832, __noswap_vgetq_lane_s16(__rev2_832, __p3_832))); \
+  __ret_832; \
 })
 #endif
 
@@ -62447,136 +66535,136 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p64(__p0_310, __p1_310, __p2_310, __p3_310) __extension__ ({ \
-  poly64x2_t __s0_310 = __p0_310; \
-  poly64x1_t __s2_310 = __p2_310; \
-  poly64x2_t __ret_310; \
-  __ret_310 = vsetq_lane_p64(vget_lane_p64(__s2_310, __p3_310), __s0_310, __p1_310); \
-  __ret_310; \
+#define vcopyq_lane_p64(__p0_833, __p1_833, __p2_833, __p3_833) __extension__ ({ \
+  poly64x2_t __s0_833 = __p0_833; \
+  poly64x1_t __s2_833 = __p2_833; \
+  poly64x2_t __ret_833; \
+  __ret_833 = vsetq_lane_p64(vget_lane_p64(__s2_833, __p3_833), __s0_833, __p1_833); \
+  __ret_833; \
 })
 #else
-#define vcopyq_lane_p64(__p0_311, __p1_311, __p2_311, __p3_311) __extension__ ({ \
-  poly64x2_t __s0_311 = __p0_311; \
-  poly64x1_t __s2_311 = __p2_311; \
-  poly64x2_t __rev0_311;  __rev0_311 = __builtin_shufflevector(__s0_311, __s0_311, 1, 0); \
-  poly64x2_t __ret_311; \
-  __ret_311 = __noswap_vsetq_lane_p64(vget_lane_p64(__s2_311, __p3_311), __rev0_311, __p1_311); \
-  __ret_311 = __builtin_shufflevector(__ret_311, __ret_311, 1, 0); \
-  __ret_311; \
+#define vcopyq_lane_p64(__p0_834, __p1_834, __p2_834, __p3_834) __extension__ ({ \
+  poly64x2_t __s0_834 = __p0_834; \
+  poly64x1_t __s2_834 = __p2_834; \
+  poly64x2_t __rev0_834;  __rev0_834 = __builtin_shufflevector(__s0_834, __s0_834, 1, 0); \
+  poly64x2_t __ret_834; \
+  __ret_834 = __noswap_vsetq_lane_p64(vget_lane_p64(__s2_834, __p3_834), __rev0_834, __p1_834); \
+  __ret_834 = __builtin_shufflevector(__ret_834, __ret_834, 1, 0); \
+  __ret_834; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_f64(__p0_312, __p1_312, __p2_312, __p3_312) __extension__ ({ \
-  float64x2_t __s0_312 = __p0_312; \
-  float64x1_t __s2_312 = __p2_312; \
-  float64x2_t __ret_312; \
-  __ret_312 = vsetq_lane_f64(vget_lane_f64(__s2_312, __p3_312), __s0_312, __p1_312); \
-  __ret_312; \
+#define vcopyq_lane_f64(__p0_835, __p1_835, __p2_835, __p3_835) __extension__ ({ \
+  float64x2_t __s0_835 = __p0_835; \
+  float64x1_t __s2_835 = __p2_835; \
+  float64x2_t __ret_835; \
+  __ret_835 = vsetq_lane_f64(vget_lane_f64(__s2_835, __p3_835), __s0_835, __p1_835); \
+  __ret_835; \
 })
 #else
-#define vcopyq_lane_f64(__p0_313, __p1_313, __p2_313, __p3_313) __extension__ ({ \
-  float64x2_t __s0_313 = __p0_313; \
-  float64x1_t __s2_313 = __p2_313; \
-  float64x2_t __rev0_313;  __rev0_313 = __builtin_shufflevector(__s0_313, __s0_313, 1, 0); \
-  float64x2_t __ret_313; \
-  __ret_313 = __noswap_vsetq_lane_f64(vget_lane_f64(__s2_313, __p3_313), __rev0_313, __p1_313); \
-  __ret_313 = __builtin_shufflevector(__ret_313, __ret_313, 1, 0); \
-  __ret_313; \
+#define vcopyq_lane_f64(__p0_836, __p1_836, __p2_836, __p3_836) __extension__ ({ \
+  float64x2_t __s0_836 = __p0_836; \
+  float64x1_t __s2_836 = __p2_836; \
+  float64x2_t __rev0_836;  __rev0_836 = __builtin_shufflevector(__s0_836, __s0_836, 1, 0); \
+  float64x2_t __ret_836; \
+  __ret_836 = __noswap_vsetq_lane_f64(vget_lane_f64(__s2_836, __p3_836), __rev0_836, __p1_836); \
+  __ret_836 = __builtin_shufflevector(__ret_836, __ret_836, 1, 0); \
+  __ret_836; \
 })
 #endif
 
-#define vcopy_lane_p64(__p0_314, __p1_314, __p2_314, __p3_314) __extension__ ({ \
-  poly64x1_t __s0_314 = __p0_314; \
-  poly64x1_t __s2_314 = __p2_314; \
-  poly64x1_t __ret_314; \
-  __ret_314 = vset_lane_p64(vget_lane_p64(__s2_314, __p3_314), __s0_314, __p1_314); \
-  __ret_314; \
+#define vcopy_lane_p64(__p0_837, __p1_837, __p2_837, __p3_837) __extension__ ({ \
+  poly64x1_t __s0_837 = __p0_837; \
+  poly64x1_t __s2_837 = __p2_837; \
+  poly64x1_t __ret_837; \
+  __ret_837 = vset_lane_p64(vget_lane_p64(__s2_837, __p3_837), __s0_837, __p1_837); \
+  __ret_837; \
 })
-#define vcopy_lane_f64(__p0_315, __p1_315, __p2_315, __p3_315) __extension__ ({ \
-  float64x1_t __s0_315 = __p0_315; \
-  float64x1_t __s2_315 = __p2_315; \
-  float64x1_t __ret_315; \
-  __ret_315 = vset_lane_f64(vget_lane_f64(__s2_315, __p3_315), __s0_315, __p1_315); \
-  __ret_315; \
+#define vcopy_lane_f64(__p0_838, __p1_838, __p2_838, __p3_838) __extension__ ({ \
+  float64x1_t __s0_838 = __p0_838; \
+  float64x1_t __s2_838 = __p2_838; \
+  float64x1_t __ret_838; \
+  __ret_838 = vset_lane_f64(vget_lane_f64(__s2_838, __p3_838), __s0_838, __p1_838); \
+  __ret_838; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p64(__p0_316, __p1_316, __p2_316, __p3_316) __extension__ ({ \
-  poly64x2_t __s0_316 = __p0_316; \
-  poly64x2_t __s2_316 = __p2_316; \
-  poly64x2_t __ret_316; \
-  __ret_316 = vsetq_lane_p64(vgetq_lane_p64(__s2_316, __p3_316), __s0_316, __p1_316); \
-  __ret_316; \
+#define vcopyq_laneq_p64(__p0_839, __p1_839, __p2_839, __p3_839) __extension__ ({ \
+  poly64x2_t __s0_839 = __p0_839; \
+  poly64x2_t __s2_839 = __p2_839; \
+  poly64x2_t __ret_839; \
+  __ret_839 = vsetq_lane_p64(vgetq_lane_p64(__s2_839, __p3_839), __s0_839, __p1_839); \
+  __ret_839; \
 })
 #else
-#define vcopyq_laneq_p64(__p0_317, __p1_317, __p2_317, __p3_317) __extension__ ({ \
-  poly64x2_t __s0_317 = __p0_317; \
-  poly64x2_t __s2_317 = __p2_317; \
-  poly64x2_t __rev0_317;  __rev0_317 = __builtin_shufflevector(__s0_317, __s0_317, 1, 0); \
-  poly64x2_t __rev2_317;  __rev2_317 = __builtin_shufflevector(__s2_317, __s2_317, 1, 0); \
-  poly64x2_t __ret_317; \
-  __ret_317 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_317, __p3_317), __rev0_317, __p1_317); \
-  __ret_317 = __builtin_shufflevector(__ret_317, __ret_317, 1, 0); \
-  __ret_317; \
+#define vcopyq_laneq_p64(__p0_840, __p1_840, __p2_840, __p3_840) __extension__ ({ \
+  poly64x2_t __s0_840 = __p0_840; \
+  poly64x2_t __s2_840 = __p2_840; \
+  poly64x2_t __rev0_840;  __rev0_840 = __builtin_shufflevector(__s0_840, __s0_840, 1, 0); \
+  poly64x2_t __rev2_840;  __rev2_840 = __builtin_shufflevector(__s2_840, __s2_840, 1, 0); \
+  poly64x2_t __ret_840; \
+  __ret_840 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_840, __p3_840), __rev0_840, __p1_840); \
+  __ret_840 = __builtin_shufflevector(__ret_840, __ret_840, 1, 0); \
+  __ret_840; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_f64(__p0_318, __p1_318, __p2_318, __p3_318) __extension__ ({ \
-  float64x2_t __s0_318 = __p0_318; \
-  float64x2_t __s2_318 = __p2_318; \
-  float64x2_t __ret_318; \
-  __ret_318 = vsetq_lane_f64(vgetq_lane_f64(__s2_318, __p3_318), __s0_318, __p1_318); \
-  __ret_318; \
+#define vcopyq_laneq_f64(__p0_841, __p1_841, __p2_841, __p3_841) __extension__ ({ \
+  float64x2_t __s0_841 = __p0_841; \
+  float64x2_t __s2_841 = __p2_841; \
+  float64x2_t __ret_841; \
+  __ret_841 = vsetq_lane_f64(vgetq_lane_f64(__s2_841, __p3_841), __s0_841, __p1_841); \
+  __ret_841; \
 })
 #else
-#define vcopyq_laneq_f64(__p0_319, __p1_319, __p2_319, __p3_319) __extension__ ({ \
-  float64x2_t __s0_319 = __p0_319; \
-  float64x2_t __s2_319 = __p2_319; \
-  float64x2_t __rev0_319;  __rev0_319 = __builtin_shufflevector(__s0_319, __s0_319, 1, 0); \
-  float64x2_t __rev2_319;  __rev2_319 = __builtin_shufflevector(__s2_319, __s2_319, 1, 0); \
-  float64x2_t __ret_319; \
-  __ret_319 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_319, __p3_319), __rev0_319, __p1_319); \
-  __ret_319 = __builtin_shufflevector(__ret_319, __ret_319, 1, 0); \
-  __ret_319; \
+#define vcopyq_laneq_f64(__p0_842, __p1_842, __p2_842, __p3_842) __extension__ ({ \
+  float64x2_t __s0_842 = __p0_842; \
+  float64x2_t __s2_842 = __p2_842; \
+  float64x2_t __rev0_842;  __rev0_842 = __builtin_shufflevector(__s0_842, __s0_842, 1, 0); \
+  float64x2_t __rev2_842;  __rev2_842 = __builtin_shufflevector(__s2_842, __s2_842, 1, 0); \
+  float64x2_t __ret_842; \
+  __ret_842 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_842, __p3_842), __rev0_842, __p1_842); \
+  __ret_842 = __builtin_shufflevector(__ret_842, __ret_842, 1, 0); \
+  __ret_842; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p64(__p0_320, __p1_320, __p2_320, __p3_320) __extension__ ({ \
-  poly64x1_t __s0_320 = __p0_320; \
-  poly64x2_t __s2_320 = __p2_320; \
-  poly64x1_t __ret_320; \
-  __ret_320 = vset_lane_p64(vgetq_lane_p64(__s2_320, __p3_320), __s0_320, __p1_320); \
-  __ret_320; \
+#define vcopy_laneq_p64(__p0_843, __p1_843, __p2_843, __p3_843) __extension__ ({ \
+  poly64x1_t __s0_843 = __p0_843; \
+  poly64x2_t __s2_843 = __p2_843; \
+  poly64x1_t __ret_843; \
+  __ret_843 = vset_lane_p64(vgetq_lane_p64(__s2_843, __p3_843), __s0_843, __p1_843); \
+  __ret_843; \
 })
 #else
-#define vcopy_laneq_p64(__p0_321, __p1_321, __p2_321, __p3_321) __extension__ ({ \
-  poly64x1_t __s0_321 = __p0_321; \
-  poly64x2_t __s2_321 = __p2_321; \
-  poly64x2_t __rev2_321;  __rev2_321 = __builtin_shufflevector(__s2_321, __s2_321, 1, 0); \
-  poly64x1_t __ret_321; \
-  __ret_321 = vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_321, __p3_321), __s0_321, __p1_321); \
-  __ret_321; \
+#define vcopy_laneq_p64(__p0_844, __p1_844, __p2_844, __p3_844) __extension__ ({ \
+  poly64x1_t __s0_844 = __p0_844; \
+  poly64x2_t __s2_844 = __p2_844; \
+  poly64x2_t __rev2_844;  __rev2_844 = __builtin_shufflevector(__s2_844, __s2_844, 1, 0); \
+  poly64x1_t __ret_844; \
+  __ret_844 = vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_844, __p3_844), __s0_844, __p1_844); \
+  __ret_844; \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_f64(__p0_322, __p1_322, __p2_322, __p3_322) __extension__ ({ \
-  float64x1_t __s0_322 = __p0_322; \
-  float64x2_t __s2_322 = __p2_322; \
-  float64x1_t __ret_322; \
-  __ret_322 = vset_lane_f64(vgetq_lane_f64(__s2_322, __p3_322), __s0_322, __p1_322); \
-  __ret_322; \
+#define vcopy_laneq_f64(__p0_845, __p1_845, __p2_845, __p3_845) __extension__ ({ \
+  float64x1_t __s0_845 = __p0_845; \
+  float64x2_t __s2_845 = __p2_845; \
+  float64x1_t __ret_845; \
+  __ret_845 = vset_lane_f64(vgetq_lane_f64(__s2_845, __p3_845), __s0_845, __p1_845); \
+  __ret_845; \
 })
 #else
-#define vcopy_laneq_f64(__p0_323, __p1_323, __p2_323, __p3_323) __extension__ ({ \
-  float64x1_t __s0_323 = __p0_323; \
-  float64x2_t __s2_323 = __p2_323; \
-  float64x2_t __rev2_323;  __rev2_323 = __builtin_shufflevector(__s2_323, __s2_323, 1, 0); \
-  float64x1_t __ret_323; \
-  __ret_323 = vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_323, __p3_323), __s0_323, __p1_323); \
-  __ret_323; \
+#define vcopy_laneq_f64(__p0_846, __p1_846, __p2_846, __p3_846) __extension__ ({ \
+  float64x1_t __s0_846 = __p0_846; \
+  float64x2_t __s2_846 = __p2_846; \
+  float64x2_t __rev2_846;  __rev2_846 = __builtin_shufflevector(__s2_846, __s2_846, 1, 0); \
+  float64x1_t __ret_846; \
+  __ret_846 = vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_846, __p3_846), __s0_846, __p1_846); \
+  __ret_846; \
 })
 #endif
 
@@ -62932,38 +67020,38 @@
 }
 #endif
 
-#define vmulx_lane_f64(__p0_324, __p1_324, __p2_324) __extension__ ({ \
-  float64x1_t __s0_324 = __p0_324; \
-  float64x1_t __s1_324 = __p1_324; \
-  float64x1_t __ret_324; \
-  float64_t __x_324 = vget_lane_f64(__s0_324, 0); \
-  float64_t __y_324 = vget_lane_f64(__s1_324, __p2_324); \
-  float64_t __z_324 = vmulxd_f64(__x_324, __y_324); \
-  __ret_324 = vset_lane_f64(__z_324, __s0_324, __p2_324); \
-  __ret_324; \
+#define vmulx_lane_f64(__p0_847, __p1_847, __p2_847) __extension__ ({ \
+  float64x1_t __s0_847 = __p0_847; \
+  float64x1_t __s1_847 = __p1_847; \
+  float64x1_t __ret_847; \
+  float64_t __x_847 = vget_lane_f64(__s0_847, 0); \
+  float64_t __y_847 = vget_lane_f64(__s1_847, __p2_847); \
+  float64_t __z_847 = vmulxd_f64(__x_847, __y_847); \
+  __ret_847 = vset_lane_f64(__z_847, __s0_847, __p2_847); \
+  __ret_847; \
 })
 #ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f64(__p0_325, __p1_325, __p2_325) __extension__ ({ \
-  float64x1_t __s0_325 = __p0_325; \
-  float64x2_t __s1_325 = __p1_325; \
-  float64x1_t __ret_325; \
-  float64_t __x_325 = vget_lane_f64(__s0_325, 0); \
-  float64_t __y_325 = vgetq_lane_f64(__s1_325, __p2_325); \
-  float64_t __z_325 = vmulxd_f64(__x_325, __y_325); \
-  __ret_325 = vset_lane_f64(__z_325, __s0_325, 0); \
-  __ret_325; \
+#define vmulx_laneq_f64(__p0_848, __p1_848, __p2_848) __extension__ ({ \
+  float64x1_t __s0_848 = __p0_848; \
+  float64x2_t __s1_848 = __p1_848; \
+  float64x1_t __ret_848; \
+  float64_t __x_848 = vget_lane_f64(__s0_848, 0); \
+  float64_t __y_848 = vgetq_lane_f64(__s1_848, __p2_848); \
+  float64_t __z_848 = vmulxd_f64(__x_848, __y_848); \
+  __ret_848 = vset_lane_f64(__z_848, __s0_848, 0); \
+  __ret_848; \
 })
 #else
-#define vmulx_laneq_f64(__p0_326, __p1_326, __p2_326) __extension__ ({ \
-  float64x1_t __s0_326 = __p0_326; \
-  float64x2_t __s1_326 = __p1_326; \
-  float64x2_t __rev1_326;  __rev1_326 = __builtin_shufflevector(__s1_326, __s1_326, 1, 0); \
-  float64x1_t __ret_326; \
-  float64_t __x_326 = vget_lane_f64(__s0_326, 0); \
-  float64_t __y_326 = __noswap_vgetq_lane_f64(__rev1_326, __p2_326); \
-  float64_t __z_326 = vmulxd_f64(__x_326, __y_326); \
-  __ret_326 = vset_lane_f64(__z_326, __s0_326, 0); \
-  __ret_326; \
+#define vmulx_laneq_f64(__p0_849, __p1_849, __p2_849) __extension__ ({ \
+  float64x1_t __s0_849 = __p0_849; \
+  float64x2_t __s1_849 = __p1_849; \
+  float64x2_t __rev1_849;  __rev1_849 = __builtin_shufflevector(__s1_849, __s1_849, 1, 0); \
+  float64x1_t __ret_849; \
+  float64_t __x_849 = vget_lane_f64(__s0_849, 0); \
+  float64_t __y_849 = __noswap_vgetq_lane_f64(__rev1_849, __p2_849); \
+  float64_t __z_849 = vmulxd_f64(__x_849, __y_849); \
+  __ret_849 = vset_lane_f64(__z_849, __s0_849, 0); \
+  __ret_849; \
 })
 #endif
 
@@ -63219,4 +67307,6 @@
 
 #undef __ai
 
+#endif /* if !defined(__ARM_NEON) */
+#endif /* ifndef __ARM_FP */
 #endif /* __ARM_NEON_H */
diff --git a/linux-x86/lib64/clang/11.0.5/include/arm_sve.h b/linux-x86/lib64/clang/11.0.5/include/arm_sve.h
new file mode 100644
index 0000000..1035d41
--- /dev/null
+++ b/linux-x86/lib64/clang/11.0.5/include/arm_sve.h
@@ -0,0 +1,18148 @@
+/*===---- arm_sve.h - ARM SVE intrinsics -----------------------------------===
+ *
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ARM_SVE_H
+#define __ARM_SVE_H
+
+#if !defined(__ARM_FEATURE_SVE)
+#error "SVE support not enabled"
+#else
+
+#if !defined(__LITTLE_ENDIAN__)
+#error "Big endian is currently not supported for arm_sve.h"
+#endif
+#include <stdint.h>
+
+#ifdef  __cplusplus
+extern "C" {
+#else
+#include <stdbool.h>
+#endif
+
+typedef __fp16 float16_t;
+typedef float float32_t;
+typedef double float64_t;
+typedef __SVInt8_t svint8_t;
+typedef __SVInt16_t svint16_t;
+typedef __SVInt32_t svint32_t;
+typedef __SVInt64_t svint64_t;
+typedef __SVUint8_t svuint8_t;
+typedef __SVUint16_t svuint16_t;
+typedef __SVUint32_t svuint32_t;
+typedef __SVUint64_t svuint64_t;
+typedef __SVFloat16_t svfloat16_t;
+
+#if defined(__ARM_FEATURE_SVE_BF16) && !defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC)
+#error "__ARM_FEATURE_BF16_SCALAR_ARITHMETIC must be defined when __ARM_FEATURE_SVE_BF16 is defined"
+#endif
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+typedef __SVBFloat16_t svbfloat16_t;
+#endif
+
+#if defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC)
+#include <arm_bf16.h>
+typedef __bf16 bfloat16_t;
+#endif
+
+typedef __SVFloat32_t svfloat32_t;
+typedef __SVFloat64_t svfloat64_t;
+typedef __clang_svint8x2_t svint8x2_t;
+typedef __clang_svint16x2_t svint16x2_t;
+typedef __clang_svint32x2_t svint32x2_t;
+typedef __clang_svint64x2_t svint64x2_t;
+typedef __clang_svuint8x2_t svuint8x2_t;
+typedef __clang_svuint16x2_t svuint16x2_t;
+typedef __clang_svuint32x2_t svuint32x2_t;
+typedef __clang_svuint64x2_t svuint64x2_t;
+typedef __clang_svfloat16x2_t svfloat16x2_t;
+typedef __clang_svfloat32x2_t svfloat32x2_t;
+typedef __clang_svfloat64x2_t svfloat64x2_t;
+typedef __clang_svint8x3_t svint8x3_t;
+typedef __clang_svint16x3_t svint16x3_t;
+typedef __clang_svint32x3_t svint32x3_t;
+typedef __clang_svint64x3_t svint64x3_t;
+typedef __clang_svuint8x3_t svuint8x3_t;
+typedef __clang_svuint16x3_t svuint16x3_t;
+typedef __clang_svuint32x3_t svuint32x3_t;
+typedef __clang_svuint64x3_t svuint64x3_t;
+typedef __clang_svfloat16x3_t svfloat16x3_t;
+typedef __clang_svfloat32x3_t svfloat32x3_t;
+typedef __clang_svfloat64x3_t svfloat64x3_t;
+typedef __clang_svint8x4_t svint8x4_t;
+typedef __clang_svint16x4_t svint16x4_t;
+typedef __clang_svint32x4_t svint32x4_t;
+typedef __clang_svint64x4_t svint64x4_t;
+typedef __clang_svuint8x4_t svuint8x4_t;
+typedef __clang_svuint16x4_t svuint16x4_t;
+typedef __clang_svuint32x4_t svuint32x4_t;
+typedef __clang_svuint64x4_t svuint64x4_t;
+typedef __clang_svfloat16x4_t svfloat16x4_t;
+typedef __clang_svfloat32x4_t svfloat32x4_t;
+typedef __clang_svfloat64x4_t svfloat64x4_t;
+typedef __SVBool_t  svbool_t;
+
+#ifdef __ARM_FEATURE_SVE_BF16
+typedef __clang_svbfloat16x2_t svbfloat16x2_t;
+typedef __clang_svbfloat16x3_t svbfloat16x3_t;
+typedef __clang_svbfloat16x4_t svbfloat16x4_t;
+#endif
+typedef enum
+{
+  SV_POW2 = 0,
+  SV_VL1 = 1,
+  SV_VL2 = 2,
+  SV_VL3 = 3,
+  SV_VL4 = 4,
+  SV_VL5 = 5,
+  SV_VL6 = 6,
+  SV_VL7 = 7,
+  SV_VL8 = 8,
+  SV_VL16 = 9,
+  SV_VL32 = 10,
+  SV_VL64 = 11,
+  SV_VL128 = 12,
+  SV_VL256 = 13,
+  SV_MUL4 = 29,
+  SV_MUL3 = 30,
+  SV_ALL = 31
+} sv_pattern;
+
+typedef enum
+{
+  SV_PLDL1KEEP = 0,
+  SV_PLDL1STRM = 1,
+  SV_PLDL2KEEP = 2,
+  SV_PLDL2STRM = 3,
+  SV_PLDL3KEEP = 4,
+  SV_PLDL3STRM = 5,
+  SV_PSTL1KEEP = 8,
+  SV_PSTL1STRM = 9,
+  SV_PSTL2KEEP = 10,
+  SV_PSTL2STRM = 11,
+  SV_PSTL3KEEP = 12,
+  SV_PSTL3STRM = 13
+} sv_prfop;
+
+/* Function attributes */
+#define __aio static inline __attribute__((__always_inline__, __nodebug__, __overloadable__))
+
+#define svreinterpret_s8_s8(...) __builtin_sve_reinterpret_s8_s8(__VA_ARGS__)
+#define svreinterpret_s8_s16(...) __builtin_sve_reinterpret_s8_s16(__VA_ARGS__)
+#define svreinterpret_s8_s32(...) __builtin_sve_reinterpret_s8_s32(__VA_ARGS__)
+#define svreinterpret_s8_s64(...) __builtin_sve_reinterpret_s8_s64(__VA_ARGS__)
+#define svreinterpret_s8_u8(...) __builtin_sve_reinterpret_s8_u8(__VA_ARGS__)
+#define svreinterpret_s8_u16(...) __builtin_sve_reinterpret_s8_u16(__VA_ARGS__)
+#define svreinterpret_s8_u32(...) __builtin_sve_reinterpret_s8_u32(__VA_ARGS__)
+#define svreinterpret_s8_u64(...) __builtin_sve_reinterpret_s8_u64(__VA_ARGS__)
+#define svreinterpret_s8_f16(...) __builtin_sve_reinterpret_s8_f16(__VA_ARGS__)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_s8_bf16(...) __builtin_sve_reinterpret_s8_bf16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#define svreinterpret_s8_f32(...) __builtin_sve_reinterpret_s8_f32(__VA_ARGS__)
+#define svreinterpret_s8_f64(...) __builtin_sve_reinterpret_s8_f64(__VA_ARGS__)
+#define svreinterpret_s16_s8(...) __builtin_sve_reinterpret_s16_s8(__VA_ARGS__)
+#define svreinterpret_s16_s16(...) __builtin_sve_reinterpret_s16_s16(__VA_ARGS__)
+#define svreinterpret_s16_s32(...) __builtin_sve_reinterpret_s16_s32(__VA_ARGS__)
+#define svreinterpret_s16_s64(...) __builtin_sve_reinterpret_s16_s64(__VA_ARGS__)
+#define svreinterpret_s16_u8(...) __builtin_sve_reinterpret_s16_u8(__VA_ARGS__)
+#define svreinterpret_s16_u16(...) __builtin_sve_reinterpret_s16_u16(__VA_ARGS__)
+#define svreinterpret_s16_u32(...) __builtin_sve_reinterpret_s16_u32(__VA_ARGS__)
+#define svreinterpret_s16_u64(...) __builtin_sve_reinterpret_s16_u64(__VA_ARGS__)
+#define svreinterpret_s16_f16(...) __builtin_sve_reinterpret_s16_f16(__VA_ARGS__)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_s16_bf16(...) __builtin_sve_reinterpret_s16_bf16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#define svreinterpret_s16_f32(...) __builtin_sve_reinterpret_s16_f32(__VA_ARGS__)
+#define svreinterpret_s16_f64(...) __builtin_sve_reinterpret_s16_f64(__VA_ARGS__)
+#define svreinterpret_s32_s8(...) __builtin_sve_reinterpret_s32_s8(__VA_ARGS__)
+#define svreinterpret_s32_s16(...) __builtin_sve_reinterpret_s32_s16(__VA_ARGS__)
+#define svreinterpret_s32_s32(...) __builtin_sve_reinterpret_s32_s32(__VA_ARGS__)
+#define svreinterpret_s32_s64(...) __builtin_sve_reinterpret_s32_s64(__VA_ARGS__)
+#define svreinterpret_s32_u8(...) __builtin_sve_reinterpret_s32_u8(__VA_ARGS__)
+#define svreinterpret_s32_u16(...) __builtin_sve_reinterpret_s32_u16(__VA_ARGS__)
+#define svreinterpret_s32_u32(...) __builtin_sve_reinterpret_s32_u32(__VA_ARGS__)
+#define svreinterpret_s32_u64(...) __builtin_sve_reinterpret_s32_u64(__VA_ARGS__)
+#define svreinterpret_s32_f16(...) __builtin_sve_reinterpret_s32_f16(__VA_ARGS__)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_s32_bf16(...) __builtin_sve_reinterpret_s32_bf16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#define svreinterpret_s32_f32(...) __builtin_sve_reinterpret_s32_f32(__VA_ARGS__)
+#define svreinterpret_s32_f64(...) __builtin_sve_reinterpret_s32_f64(__VA_ARGS__)
+#define svreinterpret_s64_s8(...) __builtin_sve_reinterpret_s64_s8(__VA_ARGS__)
+#define svreinterpret_s64_s16(...) __builtin_sve_reinterpret_s64_s16(__VA_ARGS__)
+#define svreinterpret_s64_s32(...) __builtin_sve_reinterpret_s64_s32(__VA_ARGS__)
+#define svreinterpret_s64_s64(...) __builtin_sve_reinterpret_s64_s64(__VA_ARGS__)
+#define svreinterpret_s64_u8(...) __builtin_sve_reinterpret_s64_u8(__VA_ARGS__)
+#define svreinterpret_s64_u16(...) __builtin_sve_reinterpret_s64_u16(__VA_ARGS__)
+#define svreinterpret_s64_u32(...) __builtin_sve_reinterpret_s64_u32(__VA_ARGS__)
+#define svreinterpret_s64_u64(...) __builtin_sve_reinterpret_s64_u64(__VA_ARGS__)
+#define svreinterpret_s64_f16(...) __builtin_sve_reinterpret_s64_f16(__VA_ARGS__)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_s64_bf16(...) __builtin_sve_reinterpret_s64_bf16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#define svreinterpret_s64_f32(...) __builtin_sve_reinterpret_s64_f32(__VA_ARGS__)
+#define svreinterpret_s64_f64(...) __builtin_sve_reinterpret_s64_f64(__VA_ARGS__)
+#define svreinterpret_u8_s8(...) __builtin_sve_reinterpret_u8_s8(__VA_ARGS__)
+#define svreinterpret_u8_s16(...) __builtin_sve_reinterpret_u8_s16(__VA_ARGS__)
+#define svreinterpret_u8_s32(...) __builtin_sve_reinterpret_u8_s32(__VA_ARGS__)
+#define svreinterpret_u8_s64(...) __builtin_sve_reinterpret_u8_s64(__VA_ARGS__)
+#define svreinterpret_u8_u8(...) __builtin_sve_reinterpret_u8_u8(__VA_ARGS__)
+#define svreinterpret_u8_u16(...) __builtin_sve_reinterpret_u8_u16(__VA_ARGS__)
+#define svreinterpret_u8_u32(...) __builtin_sve_reinterpret_u8_u32(__VA_ARGS__)
+#define svreinterpret_u8_u64(...) __builtin_sve_reinterpret_u8_u64(__VA_ARGS__)
+#define svreinterpret_u8_f16(...) __builtin_sve_reinterpret_u8_f16(__VA_ARGS__)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_u8_bf16(...) __builtin_sve_reinterpret_u8_bf16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#define svreinterpret_u8_f32(...) __builtin_sve_reinterpret_u8_f32(__VA_ARGS__)
+#define svreinterpret_u8_f64(...) __builtin_sve_reinterpret_u8_f64(__VA_ARGS__)
+#define svreinterpret_u16_s8(...) __builtin_sve_reinterpret_u16_s8(__VA_ARGS__)
+#define svreinterpret_u16_s16(...) __builtin_sve_reinterpret_u16_s16(__VA_ARGS__)
+#define svreinterpret_u16_s32(...) __builtin_sve_reinterpret_u16_s32(__VA_ARGS__)
+#define svreinterpret_u16_s64(...) __builtin_sve_reinterpret_u16_s64(__VA_ARGS__)
+#define svreinterpret_u16_u8(...) __builtin_sve_reinterpret_u16_u8(__VA_ARGS__)
+#define svreinterpret_u16_u16(...) __builtin_sve_reinterpret_u16_u16(__VA_ARGS__)
+#define svreinterpret_u16_u32(...) __builtin_sve_reinterpret_u16_u32(__VA_ARGS__)
+#define svreinterpret_u16_u64(...) __builtin_sve_reinterpret_u16_u64(__VA_ARGS__)
+#define svreinterpret_u16_f16(...) __builtin_sve_reinterpret_u16_f16(__VA_ARGS__)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_u16_bf16(...) __builtin_sve_reinterpret_u16_bf16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#define svreinterpret_u16_f32(...) __builtin_sve_reinterpret_u16_f32(__VA_ARGS__)
+#define svreinterpret_u16_f64(...) __builtin_sve_reinterpret_u16_f64(__VA_ARGS__)
+#define svreinterpret_u32_s8(...) __builtin_sve_reinterpret_u32_s8(__VA_ARGS__)
+#define svreinterpret_u32_s16(...) __builtin_sve_reinterpret_u32_s16(__VA_ARGS__)
+#define svreinterpret_u32_s32(...) __builtin_sve_reinterpret_u32_s32(__VA_ARGS__)
+#define svreinterpret_u32_s64(...) __builtin_sve_reinterpret_u32_s64(__VA_ARGS__)
+#define svreinterpret_u32_u8(...) __builtin_sve_reinterpret_u32_u8(__VA_ARGS__)
+#define svreinterpret_u32_u16(...) __builtin_sve_reinterpret_u32_u16(__VA_ARGS__)
+#define svreinterpret_u32_u32(...) __builtin_sve_reinterpret_u32_u32(__VA_ARGS__)
+#define svreinterpret_u32_u64(...) __builtin_sve_reinterpret_u32_u64(__VA_ARGS__)
+#define svreinterpret_u32_f16(...) __builtin_sve_reinterpret_u32_f16(__VA_ARGS__)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_u32_bf16(...) __builtin_sve_reinterpret_u32_bf16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#define svreinterpret_u32_f32(...) __builtin_sve_reinterpret_u32_f32(__VA_ARGS__)
+#define svreinterpret_u32_f64(...) __builtin_sve_reinterpret_u32_f64(__VA_ARGS__)
+#define svreinterpret_u64_s8(...) __builtin_sve_reinterpret_u64_s8(__VA_ARGS__)
+#define svreinterpret_u64_s16(...) __builtin_sve_reinterpret_u64_s16(__VA_ARGS__)
+#define svreinterpret_u64_s32(...) __builtin_sve_reinterpret_u64_s32(__VA_ARGS__)
+#define svreinterpret_u64_s64(...) __builtin_sve_reinterpret_u64_s64(__VA_ARGS__)
+#define svreinterpret_u64_u8(...) __builtin_sve_reinterpret_u64_u8(__VA_ARGS__)
+#define svreinterpret_u64_u16(...) __builtin_sve_reinterpret_u64_u16(__VA_ARGS__)
+#define svreinterpret_u64_u32(...) __builtin_sve_reinterpret_u64_u32(__VA_ARGS__)
+#define svreinterpret_u64_u64(...) __builtin_sve_reinterpret_u64_u64(__VA_ARGS__)
+#define svreinterpret_u64_f16(...) __builtin_sve_reinterpret_u64_f16(__VA_ARGS__)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_u64_bf16(...) __builtin_sve_reinterpret_u64_bf16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#define svreinterpret_u64_f32(...) __builtin_sve_reinterpret_u64_f32(__VA_ARGS__)
+#define svreinterpret_u64_f64(...) __builtin_sve_reinterpret_u64_f64(__VA_ARGS__)
+#define svreinterpret_f16_s8(...) __builtin_sve_reinterpret_f16_s8(__VA_ARGS__)
+#define svreinterpret_f16_s16(...) __builtin_sve_reinterpret_f16_s16(__VA_ARGS__)
+#define svreinterpret_f16_s32(...) __builtin_sve_reinterpret_f16_s32(__VA_ARGS__)
+#define svreinterpret_f16_s64(...) __builtin_sve_reinterpret_f16_s64(__VA_ARGS__)
+#define svreinterpret_f16_u8(...) __builtin_sve_reinterpret_f16_u8(__VA_ARGS__)
+#define svreinterpret_f16_u16(...) __builtin_sve_reinterpret_f16_u16(__VA_ARGS__)
+#define svreinterpret_f16_u32(...) __builtin_sve_reinterpret_f16_u32(__VA_ARGS__)
+#define svreinterpret_f16_u64(...) __builtin_sve_reinterpret_f16_u64(__VA_ARGS__)
+#define svreinterpret_f16_f16(...) __builtin_sve_reinterpret_f16_f16(__VA_ARGS__)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_f16_bf16(...) __builtin_sve_reinterpret_f16_bf16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#define svreinterpret_f16_f32(...) __builtin_sve_reinterpret_f16_f32(__VA_ARGS__)
+#define svreinterpret_f16_f64(...) __builtin_sve_reinterpret_f16_f64(__VA_ARGS__)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_bf16_s8(...) __builtin_sve_reinterpret_bf16_s8(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_bf16_s16(...) __builtin_sve_reinterpret_bf16_s16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_bf16_s32(...) __builtin_sve_reinterpret_bf16_s32(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_bf16_s64(...) __builtin_sve_reinterpret_bf16_s64(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_bf16_u8(...) __builtin_sve_reinterpret_bf16_u8(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_bf16_u16(...) __builtin_sve_reinterpret_bf16_u16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_bf16_u32(...) __builtin_sve_reinterpret_bf16_u32(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_bf16_u64(...) __builtin_sve_reinterpret_bf16_u64(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_bf16_f16(...) __builtin_sve_reinterpret_bf16_f16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_bf16_bf16(...) __builtin_sve_reinterpret_bf16_bf16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_bf16_f32(...) __builtin_sve_reinterpret_bf16_f32(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_bf16_f64(...) __builtin_sve_reinterpret_bf16_f64(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#define svreinterpret_f32_s8(...) __builtin_sve_reinterpret_f32_s8(__VA_ARGS__)
+#define svreinterpret_f32_s16(...) __builtin_sve_reinterpret_f32_s16(__VA_ARGS__)
+#define svreinterpret_f32_s32(...) __builtin_sve_reinterpret_f32_s32(__VA_ARGS__)
+#define svreinterpret_f32_s64(...) __builtin_sve_reinterpret_f32_s64(__VA_ARGS__)
+#define svreinterpret_f32_u8(...) __builtin_sve_reinterpret_f32_u8(__VA_ARGS__)
+#define svreinterpret_f32_u16(...) __builtin_sve_reinterpret_f32_u16(__VA_ARGS__)
+#define svreinterpret_f32_u32(...) __builtin_sve_reinterpret_f32_u32(__VA_ARGS__)
+#define svreinterpret_f32_u64(...) __builtin_sve_reinterpret_f32_u64(__VA_ARGS__)
+#define svreinterpret_f32_f16(...) __builtin_sve_reinterpret_f32_f16(__VA_ARGS__)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_f32_bf16(...) __builtin_sve_reinterpret_f32_bf16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#define svreinterpret_f32_f32(...) __builtin_sve_reinterpret_f32_f32(__VA_ARGS__)
+#define svreinterpret_f32_f64(...) __builtin_sve_reinterpret_f32_f64(__VA_ARGS__)
+#define svreinterpret_f64_s8(...) __builtin_sve_reinterpret_f64_s8(__VA_ARGS__)
+#define svreinterpret_f64_s16(...) __builtin_sve_reinterpret_f64_s16(__VA_ARGS__)
+#define svreinterpret_f64_s32(...) __builtin_sve_reinterpret_f64_s32(__VA_ARGS__)
+#define svreinterpret_f64_s64(...) __builtin_sve_reinterpret_f64_s64(__VA_ARGS__)
+#define svreinterpret_f64_u8(...) __builtin_sve_reinterpret_f64_u8(__VA_ARGS__)
+#define svreinterpret_f64_u16(...) __builtin_sve_reinterpret_f64_u16(__VA_ARGS__)
+#define svreinterpret_f64_u32(...) __builtin_sve_reinterpret_f64_u32(__VA_ARGS__)
+#define svreinterpret_f64_u64(...) __builtin_sve_reinterpret_f64_u64(__VA_ARGS__)
+#define svreinterpret_f64_f16(...) __builtin_sve_reinterpret_f64_f16(__VA_ARGS__)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svreinterpret_f64_bf16(...) __builtin_sve_reinterpret_f64_bf16(__VA_ARGS__)
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#define svreinterpret_f64_f32(...) __builtin_sve_reinterpret_f64_f32(__VA_ARGS__)
+#define svreinterpret_f64_f64(...) __builtin_sve_reinterpret_f64_f64(__VA_ARGS__)
+__aio svint8_t svreinterpret_s8(svint8_t op) {
+  return __builtin_sve_reinterpret_s8_s8(op);
+}
+
+__aio svint8_t svreinterpret_s8(svint16_t op) {
+  return __builtin_sve_reinterpret_s8_s16(op);
+}
+
+__aio svint8_t svreinterpret_s8(svint32_t op) {
+  return __builtin_sve_reinterpret_s8_s32(op);
+}
+
+__aio svint8_t svreinterpret_s8(svint64_t op) {
+  return __builtin_sve_reinterpret_s8_s64(op);
+}
+
+__aio svint8_t svreinterpret_s8(svuint8_t op) {
+  return __builtin_sve_reinterpret_s8_u8(op);
+}
+
+__aio svint8_t svreinterpret_s8(svuint16_t op) {
+  return __builtin_sve_reinterpret_s8_u16(op);
+}
+
+__aio svint8_t svreinterpret_s8(svuint32_t op) {
+  return __builtin_sve_reinterpret_s8_u32(op);
+}
+
+__aio svint8_t svreinterpret_s8(svuint64_t op) {
+  return __builtin_sve_reinterpret_s8_u64(op);
+}
+
+__aio svint8_t svreinterpret_s8(svfloat16_t op) {
+  return __builtin_sve_reinterpret_s8_f16(op);
+}
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svint8_t svreinterpret_s8(svbfloat16_t op) {
+  return __builtin_sve_reinterpret_s8_bf16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+__aio svint8_t svreinterpret_s8(svfloat32_t op) {
+  return __builtin_sve_reinterpret_s8_f32(op);
+}
+
+__aio svint8_t svreinterpret_s8(svfloat64_t op) {
+  return __builtin_sve_reinterpret_s8_f64(op);
+}
+
+__aio svint16_t svreinterpret_s16(svint8_t op) {
+  return __builtin_sve_reinterpret_s16_s8(op);
+}
+
+__aio svint16_t svreinterpret_s16(svint16_t op) {
+  return __builtin_sve_reinterpret_s16_s16(op);
+}
+
+__aio svint16_t svreinterpret_s16(svint32_t op) {
+  return __builtin_sve_reinterpret_s16_s32(op);
+}
+
+__aio svint16_t svreinterpret_s16(svint64_t op) {
+  return __builtin_sve_reinterpret_s16_s64(op);
+}
+
+__aio svint16_t svreinterpret_s16(svuint8_t op) {
+  return __builtin_sve_reinterpret_s16_u8(op);
+}
+
+__aio svint16_t svreinterpret_s16(svuint16_t op) {
+  return __builtin_sve_reinterpret_s16_u16(op);
+}
+
+__aio svint16_t svreinterpret_s16(svuint32_t op) {
+  return __builtin_sve_reinterpret_s16_u32(op);
+}
+
+__aio svint16_t svreinterpret_s16(svuint64_t op) {
+  return __builtin_sve_reinterpret_s16_u64(op);
+}
+
+__aio svint16_t svreinterpret_s16(svfloat16_t op) {
+  return __builtin_sve_reinterpret_s16_f16(op);
+}
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svint16_t svreinterpret_s16(svbfloat16_t op) {
+  return __builtin_sve_reinterpret_s16_bf16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+__aio svint16_t svreinterpret_s16(svfloat32_t op) {
+  return __builtin_sve_reinterpret_s16_f32(op);
+}
+
+__aio svint16_t svreinterpret_s16(svfloat64_t op) {
+  return __builtin_sve_reinterpret_s16_f64(op);
+}
+
+__aio svint32_t svreinterpret_s32(svint8_t op) {
+  return __builtin_sve_reinterpret_s32_s8(op);
+}
+
+__aio svint32_t svreinterpret_s32(svint16_t op) {
+  return __builtin_sve_reinterpret_s32_s16(op);
+}
+
+__aio svint32_t svreinterpret_s32(svint32_t op) {
+  return __builtin_sve_reinterpret_s32_s32(op);
+}
+
+__aio svint32_t svreinterpret_s32(svint64_t op) {
+  return __builtin_sve_reinterpret_s32_s64(op);
+}
+
+__aio svint32_t svreinterpret_s32(svuint8_t op) {
+  return __builtin_sve_reinterpret_s32_u8(op);
+}
+
+__aio svint32_t svreinterpret_s32(svuint16_t op) {
+  return __builtin_sve_reinterpret_s32_u16(op);
+}
+
+__aio svint32_t svreinterpret_s32(svuint32_t op) {
+  return __builtin_sve_reinterpret_s32_u32(op);
+}
+
+__aio svint32_t svreinterpret_s32(svuint64_t op) {
+  return __builtin_sve_reinterpret_s32_u64(op);
+}
+
+__aio svint32_t svreinterpret_s32(svfloat16_t op) {
+  return __builtin_sve_reinterpret_s32_f16(op);
+}
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svint32_t svreinterpret_s32(svbfloat16_t op) {
+  return __builtin_sve_reinterpret_s32_bf16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+__aio svint32_t svreinterpret_s32(svfloat32_t op) {
+  return __builtin_sve_reinterpret_s32_f32(op);
+}
+
+__aio svint32_t svreinterpret_s32(svfloat64_t op) {
+  return __builtin_sve_reinterpret_s32_f64(op);
+}
+
+__aio svint64_t svreinterpret_s64(svint8_t op) {
+  return __builtin_sve_reinterpret_s64_s8(op);
+}
+
+__aio svint64_t svreinterpret_s64(svint16_t op) {
+  return __builtin_sve_reinterpret_s64_s16(op);
+}
+
+__aio svint64_t svreinterpret_s64(svint32_t op) {
+  return __builtin_sve_reinterpret_s64_s32(op);
+}
+
+__aio svint64_t svreinterpret_s64(svint64_t op) {
+  return __builtin_sve_reinterpret_s64_s64(op);
+}
+
+__aio svint64_t svreinterpret_s64(svuint8_t op) {
+  return __builtin_sve_reinterpret_s64_u8(op);
+}
+
+__aio svint64_t svreinterpret_s64(svuint16_t op) {
+  return __builtin_sve_reinterpret_s64_u16(op);
+}
+
+__aio svint64_t svreinterpret_s64(svuint32_t op) {
+  return __builtin_sve_reinterpret_s64_u32(op);
+}
+
+__aio svint64_t svreinterpret_s64(svuint64_t op) {
+  return __builtin_sve_reinterpret_s64_u64(op);
+}
+
+__aio svint64_t svreinterpret_s64(svfloat16_t op) {
+  return __builtin_sve_reinterpret_s64_f16(op);
+}
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svint64_t svreinterpret_s64(svbfloat16_t op) {
+  return __builtin_sve_reinterpret_s64_bf16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+__aio svint64_t svreinterpret_s64(svfloat32_t op) {
+  return __builtin_sve_reinterpret_s64_f32(op);
+}
+
+__aio svint64_t svreinterpret_s64(svfloat64_t op) {
+  return __builtin_sve_reinterpret_s64_f64(op);
+}
+
+__aio svuint8_t svreinterpret_u8(svint8_t op) {
+  return __builtin_sve_reinterpret_u8_s8(op);
+}
+
+__aio svuint8_t svreinterpret_u8(svint16_t op) {
+  return __builtin_sve_reinterpret_u8_s16(op);
+}
+
+__aio svuint8_t svreinterpret_u8(svint32_t op) {
+  return __builtin_sve_reinterpret_u8_s32(op);
+}
+
+__aio svuint8_t svreinterpret_u8(svint64_t op) {
+  return __builtin_sve_reinterpret_u8_s64(op);
+}
+
+__aio svuint8_t svreinterpret_u8(svuint8_t op) {
+  return __builtin_sve_reinterpret_u8_u8(op);
+}
+
+__aio svuint8_t svreinterpret_u8(svuint16_t op) {
+  return __builtin_sve_reinterpret_u8_u16(op);
+}
+
+__aio svuint8_t svreinterpret_u8(svuint32_t op) {
+  return __builtin_sve_reinterpret_u8_u32(op);
+}
+
+__aio svuint8_t svreinterpret_u8(svuint64_t op) {
+  return __builtin_sve_reinterpret_u8_u64(op);
+}
+
+__aio svuint8_t svreinterpret_u8(svfloat16_t op) {
+  return __builtin_sve_reinterpret_u8_f16(op);
+}
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svuint8_t svreinterpret_u8(svbfloat16_t op) {
+  return __builtin_sve_reinterpret_u8_bf16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+__aio svuint8_t svreinterpret_u8(svfloat32_t op) {
+  return __builtin_sve_reinterpret_u8_f32(op);
+}
+
+__aio svuint8_t svreinterpret_u8(svfloat64_t op) {
+  return __builtin_sve_reinterpret_u8_f64(op);
+}
+
+__aio svuint16_t svreinterpret_u16(svint8_t op) {
+  return __builtin_sve_reinterpret_u16_s8(op);
+}
+
+__aio svuint16_t svreinterpret_u16(svint16_t op) {
+  return __builtin_sve_reinterpret_u16_s16(op);
+}
+
+__aio svuint16_t svreinterpret_u16(svint32_t op) {
+  return __builtin_sve_reinterpret_u16_s32(op);
+}
+
+__aio svuint16_t svreinterpret_u16(svint64_t op) {
+  return __builtin_sve_reinterpret_u16_s64(op);
+}
+
+__aio svuint16_t svreinterpret_u16(svuint8_t op) {
+  return __builtin_sve_reinterpret_u16_u8(op);
+}
+
+__aio svuint16_t svreinterpret_u16(svuint16_t op) {
+  return __builtin_sve_reinterpret_u16_u16(op);
+}
+
+__aio svuint16_t svreinterpret_u16(svuint32_t op) {
+  return __builtin_sve_reinterpret_u16_u32(op);
+}
+
+__aio svuint16_t svreinterpret_u16(svuint64_t op) {
+  return __builtin_sve_reinterpret_u16_u64(op);
+}
+
+__aio svuint16_t svreinterpret_u16(svfloat16_t op) {
+  return __builtin_sve_reinterpret_u16_f16(op);
+}
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svuint16_t svreinterpret_u16(svbfloat16_t op) {
+  return __builtin_sve_reinterpret_u16_bf16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+__aio svuint16_t svreinterpret_u16(svfloat32_t op) {
+  return __builtin_sve_reinterpret_u16_f32(op);
+}
+
+__aio svuint16_t svreinterpret_u16(svfloat64_t op) {
+  return __builtin_sve_reinterpret_u16_f64(op);
+}
+
+__aio svuint32_t svreinterpret_u32(svint8_t op) {
+  return __builtin_sve_reinterpret_u32_s8(op);
+}
+
+__aio svuint32_t svreinterpret_u32(svint16_t op) {
+  return __builtin_sve_reinterpret_u32_s16(op);
+}
+
+__aio svuint32_t svreinterpret_u32(svint32_t op) {
+  return __builtin_sve_reinterpret_u32_s32(op);
+}
+
+__aio svuint32_t svreinterpret_u32(svint64_t op) {
+  return __builtin_sve_reinterpret_u32_s64(op);
+}
+
+__aio svuint32_t svreinterpret_u32(svuint8_t op) {
+  return __builtin_sve_reinterpret_u32_u8(op);
+}
+
+__aio svuint32_t svreinterpret_u32(svuint16_t op) {
+  return __builtin_sve_reinterpret_u32_u16(op);
+}
+
+__aio svuint32_t svreinterpret_u32(svuint32_t op) {
+  return __builtin_sve_reinterpret_u32_u32(op);
+}
+
+__aio svuint32_t svreinterpret_u32(svuint64_t op) {
+  return __builtin_sve_reinterpret_u32_u64(op);
+}
+
+__aio svuint32_t svreinterpret_u32(svfloat16_t op) {
+  return __builtin_sve_reinterpret_u32_f16(op);
+}
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svuint32_t svreinterpret_u32(svbfloat16_t op) {
+  return __builtin_sve_reinterpret_u32_bf16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+__aio svuint32_t svreinterpret_u32(svfloat32_t op) {
+  return __builtin_sve_reinterpret_u32_f32(op);
+}
+
+__aio svuint32_t svreinterpret_u32(svfloat64_t op) {
+  return __builtin_sve_reinterpret_u32_f64(op);
+}
+
+__aio svuint64_t svreinterpret_u64(svint8_t op) {
+  return __builtin_sve_reinterpret_u64_s8(op);
+}
+
+__aio svuint64_t svreinterpret_u64(svint16_t op) {
+  return __builtin_sve_reinterpret_u64_s16(op);
+}
+
+__aio svuint64_t svreinterpret_u64(svint32_t op) {
+  return __builtin_sve_reinterpret_u64_s32(op);
+}
+
+__aio svuint64_t svreinterpret_u64(svint64_t op) {
+  return __builtin_sve_reinterpret_u64_s64(op);
+}
+
+__aio svuint64_t svreinterpret_u64(svuint8_t op) {
+  return __builtin_sve_reinterpret_u64_u8(op);
+}
+
+__aio svuint64_t svreinterpret_u64(svuint16_t op) {
+  return __builtin_sve_reinterpret_u64_u16(op);
+}
+
+__aio svuint64_t svreinterpret_u64(svuint32_t op) {
+  return __builtin_sve_reinterpret_u64_u32(op);
+}
+
+__aio svuint64_t svreinterpret_u64(svuint64_t op) {
+  return __builtin_sve_reinterpret_u64_u64(op);
+}
+
+__aio svuint64_t svreinterpret_u64(svfloat16_t op) {
+  return __builtin_sve_reinterpret_u64_f16(op);
+}
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svuint64_t svreinterpret_u64(svbfloat16_t op) {
+  return __builtin_sve_reinterpret_u64_bf16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+__aio svuint64_t svreinterpret_u64(svfloat32_t op) {
+  return __builtin_sve_reinterpret_u64_f32(op);
+}
+
+__aio svuint64_t svreinterpret_u64(svfloat64_t op) {
+  return __builtin_sve_reinterpret_u64_f64(op);
+}
+
+__aio svfloat16_t svreinterpret_f16(svint8_t op) {
+  return __builtin_sve_reinterpret_f16_s8(op);
+}
+
+__aio svfloat16_t svreinterpret_f16(svint16_t op) {
+  return __builtin_sve_reinterpret_f16_s16(op);
+}
+
+__aio svfloat16_t svreinterpret_f16(svint32_t op) {
+  return __builtin_sve_reinterpret_f16_s32(op);
+}
+
+__aio svfloat16_t svreinterpret_f16(svint64_t op) {
+  return __builtin_sve_reinterpret_f16_s64(op);
+}
+
+__aio svfloat16_t svreinterpret_f16(svuint8_t op) {
+  return __builtin_sve_reinterpret_f16_u8(op);
+}
+
+__aio svfloat16_t svreinterpret_f16(svuint16_t op) {
+  return __builtin_sve_reinterpret_f16_u16(op);
+}
+
+__aio svfloat16_t svreinterpret_f16(svuint32_t op) {
+  return __builtin_sve_reinterpret_f16_u32(op);
+}
+
+__aio svfloat16_t svreinterpret_f16(svuint64_t op) {
+  return __builtin_sve_reinterpret_f16_u64(op);
+}
+
+__aio svfloat16_t svreinterpret_f16(svfloat16_t op) {
+  return __builtin_sve_reinterpret_f16_f16(op);
+}
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svfloat16_t svreinterpret_f16(svbfloat16_t op) {
+  return __builtin_sve_reinterpret_f16_bf16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+__aio svfloat16_t svreinterpret_f16(svfloat32_t op) {
+  return __builtin_sve_reinterpret_f16_f32(op);
+}
+
+__aio svfloat16_t svreinterpret_f16(svfloat64_t op) {
+  return __builtin_sve_reinterpret_f16_f64(op);
+}
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svbfloat16_t svreinterpret_bf16(svint8_t op) {
+  return __builtin_sve_reinterpret_bf16_s8(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svbfloat16_t svreinterpret_bf16(svint16_t op) {
+  return __builtin_sve_reinterpret_bf16_s16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svbfloat16_t svreinterpret_bf16(svint32_t op) {
+  return __builtin_sve_reinterpret_bf16_s32(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svbfloat16_t svreinterpret_bf16(svint64_t op) {
+  return __builtin_sve_reinterpret_bf16_s64(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svbfloat16_t svreinterpret_bf16(svuint8_t op) {
+  return __builtin_sve_reinterpret_bf16_u8(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svbfloat16_t svreinterpret_bf16(svuint16_t op) {
+  return __builtin_sve_reinterpret_bf16_u16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svbfloat16_t svreinterpret_bf16(svuint32_t op) {
+  return __builtin_sve_reinterpret_bf16_u32(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svbfloat16_t svreinterpret_bf16(svuint64_t op) {
+  return __builtin_sve_reinterpret_bf16_u64(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svbfloat16_t svreinterpret_bf16(svfloat16_t op) {
+  return __builtin_sve_reinterpret_bf16_f16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svbfloat16_t svreinterpret_bf16(svbfloat16_t op) {
+  return __builtin_sve_reinterpret_bf16_bf16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svbfloat16_t svreinterpret_bf16(svfloat32_t op) {
+  return __builtin_sve_reinterpret_bf16_f32(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svbfloat16_t svreinterpret_bf16(svfloat64_t op) {
+  return __builtin_sve_reinterpret_bf16_f64(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+__aio svfloat32_t svreinterpret_f32(svint8_t op) {
+  return __builtin_sve_reinterpret_f32_s8(op);
+}
+
+__aio svfloat32_t svreinterpret_f32(svint16_t op) {
+  return __builtin_sve_reinterpret_f32_s16(op);
+}
+
+__aio svfloat32_t svreinterpret_f32(svint32_t op) {
+  return __builtin_sve_reinterpret_f32_s32(op);
+}
+
+__aio svfloat32_t svreinterpret_f32(svint64_t op) {
+  return __builtin_sve_reinterpret_f32_s64(op);
+}
+
+__aio svfloat32_t svreinterpret_f32(svuint8_t op) {
+  return __builtin_sve_reinterpret_f32_u8(op);
+}
+
+__aio svfloat32_t svreinterpret_f32(svuint16_t op) {
+  return __builtin_sve_reinterpret_f32_u16(op);
+}
+
+__aio svfloat32_t svreinterpret_f32(svuint32_t op) {
+  return __builtin_sve_reinterpret_f32_u32(op);
+}
+
+__aio svfloat32_t svreinterpret_f32(svuint64_t op) {
+  return __builtin_sve_reinterpret_f32_u64(op);
+}
+
+__aio svfloat32_t svreinterpret_f32(svfloat16_t op) {
+  return __builtin_sve_reinterpret_f32_f16(op);
+}
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svfloat32_t svreinterpret_f32(svbfloat16_t op) {
+  return __builtin_sve_reinterpret_f32_bf16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+__aio svfloat32_t svreinterpret_f32(svfloat32_t op) {
+  return __builtin_sve_reinterpret_f32_f32(op);
+}
+
+__aio svfloat32_t svreinterpret_f32(svfloat64_t op) {
+  return __builtin_sve_reinterpret_f32_f64(op);
+}
+
+__aio svfloat64_t svreinterpret_f64(svint8_t op) {
+  return __builtin_sve_reinterpret_f64_s8(op);
+}
+
+__aio svfloat64_t svreinterpret_f64(svint16_t op) {
+  return __builtin_sve_reinterpret_f64_s16(op);
+}
+
+__aio svfloat64_t svreinterpret_f64(svint32_t op) {
+  return __builtin_sve_reinterpret_f64_s32(op);
+}
+
+__aio svfloat64_t svreinterpret_f64(svint64_t op) {
+  return __builtin_sve_reinterpret_f64_s64(op);
+}
+
+__aio svfloat64_t svreinterpret_f64(svuint8_t op) {
+  return __builtin_sve_reinterpret_f64_u8(op);
+}
+
+__aio svfloat64_t svreinterpret_f64(svuint16_t op) {
+  return __builtin_sve_reinterpret_f64_u16(op);
+}
+
+__aio svfloat64_t svreinterpret_f64(svuint32_t op) {
+  return __builtin_sve_reinterpret_f64_u32(op);
+}
+
+__aio svfloat64_t svreinterpret_f64(svuint64_t op) {
+  return __builtin_sve_reinterpret_f64_u64(op);
+}
+
+__aio svfloat64_t svreinterpret_f64(svfloat16_t op) {
+  return __builtin_sve_reinterpret_f64_f16(op);
+}
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio svfloat64_t svreinterpret_f64(svbfloat16_t op) {
+  return __builtin_sve_reinterpret_f64_bf16(op);
+}
+
+#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */
+__aio svfloat64_t svreinterpret_f64(svfloat32_t op) {
+  return __builtin_sve_reinterpret_f64_f32(op);
+}
+
+__aio svfloat64_t svreinterpret_f64(svfloat64_t op) {
+  return __builtin_sve_reinterpret_f64_f64(op);
+}
+
+#define svabd_n_f64_m(...) __builtin_sve_svabd_n_f64_m(__VA_ARGS__)
+#define svabd_n_f32_m(...) __builtin_sve_svabd_n_f32_m(__VA_ARGS__)
+#define svabd_n_f16_m(...) __builtin_sve_svabd_n_f16_m(__VA_ARGS__)
+#define svabd_n_f64_x(...) __builtin_sve_svabd_n_f64_x(__VA_ARGS__)
+#define svabd_n_f32_x(...) __builtin_sve_svabd_n_f32_x(__VA_ARGS__)
+#define svabd_n_f16_x(...) __builtin_sve_svabd_n_f16_x(__VA_ARGS__)
+#define svabd_n_f64_z(...) __builtin_sve_svabd_n_f64_z(__VA_ARGS__)
+#define svabd_n_f32_z(...) __builtin_sve_svabd_n_f32_z(__VA_ARGS__)
+#define svabd_n_f16_z(...) __builtin_sve_svabd_n_f16_z(__VA_ARGS__)
+#define svabd_n_s8_m(...) __builtin_sve_svabd_n_s8_m(__VA_ARGS__)
+#define svabd_n_s32_m(...) __builtin_sve_svabd_n_s32_m(__VA_ARGS__)
+#define svabd_n_s64_m(...) __builtin_sve_svabd_n_s64_m(__VA_ARGS__)
+#define svabd_n_s16_m(...) __builtin_sve_svabd_n_s16_m(__VA_ARGS__)
+#define svabd_n_s8_x(...) __builtin_sve_svabd_n_s8_x(__VA_ARGS__)
+#define svabd_n_s32_x(...) __builtin_sve_svabd_n_s32_x(__VA_ARGS__)
+#define svabd_n_s64_x(...) __builtin_sve_svabd_n_s64_x(__VA_ARGS__)
+#define svabd_n_s16_x(...) __builtin_sve_svabd_n_s16_x(__VA_ARGS__)
+#define svabd_n_s8_z(...) __builtin_sve_svabd_n_s8_z(__VA_ARGS__)
+#define svabd_n_s32_z(...) __builtin_sve_svabd_n_s32_z(__VA_ARGS__)
+#define svabd_n_s64_z(...) __builtin_sve_svabd_n_s64_z(__VA_ARGS__)
+#define svabd_n_s16_z(...) __builtin_sve_svabd_n_s16_z(__VA_ARGS__)
+#define svabd_n_u8_m(...) __builtin_sve_svabd_n_u8_m(__VA_ARGS__)
+#define svabd_n_u32_m(...) __builtin_sve_svabd_n_u32_m(__VA_ARGS__)
+#define svabd_n_u64_m(...) __builtin_sve_svabd_n_u64_m(__VA_ARGS__)
+#define svabd_n_u16_m(...) __builtin_sve_svabd_n_u16_m(__VA_ARGS__)
+#define svabd_n_u8_x(...) __builtin_sve_svabd_n_u8_x(__VA_ARGS__)
+#define svabd_n_u32_x(...) __builtin_sve_svabd_n_u32_x(__VA_ARGS__)
+#define svabd_n_u64_x(...) __builtin_sve_svabd_n_u64_x(__VA_ARGS__)
+#define svabd_n_u16_x(...) __builtin_sve_svabd_n_u16_x(__VA_ARGS__)
+#define svabd_n_u8_z(...) __builtin_sve_svabd_n_u8_z(__VA_ARGS__)
+#define svabd_n_u32_z(...) __builtin_sve_svabd_n_u32_z(__VA_ARGS__)
+#define svabd_n_u64_z(...) __builtin_sve_svabd_n_u64_z(__VA_ARGS__)
+#define svabd_n_u16_z(...) __builtin_sve_svabd_n_u16_z(__VA_ARGS__)
+#define svabd_f64_m(...) __builtin_sve_svabd_f64_m(__VA_ARGS__)
+#define svabd_f32_m(...) __builtin_sve_svabd_f32_m(__VA_ARGS__)
+#define svabd_f16_m(...) __builtin_sve_svabd_f16_m(__VA_ARGS__)
+#define svabd_f64_x(...) __builtin_sve_svabd_f64_x(__VA_ARGS__)
+#define svabd_f32_x(...) __builtin_sve_svabd_f32_x(__VA_ARGS__)
+#define svabd_f16_x(...) __builtin_sve_svabd_f16_x(__VA_ARGS__)
+#define svabd_f64_z(...) __builtin_sve_svabd_f64_z(__VA_ARGS__)
+#define svabd_f32_z(...) __builtin_sve_svabd_f32_z(__VA_ARGS__)
+#define svabd_f16_z(...) __builtin_sve_svabd_f16_z(__VA_ARGS__)
+#define svabd_s8_m(...) __builtin_sve_svabd_s8_m(__VA_ARGS__)
+#define svabd_s32_m(...) __builtin_sve_svabd_s32_m(__VA_ARGS__)
+#define svabd_s64_m(...) __builtin_sve_svabd_s64_m(__VA_ARGS__)
+#define svabd_s16_m(...) __builtin_sve_svabd_s16_m(__VA_ARGS__)
+#define svabd_s8_x(...) __builtin_sve_svabd_s8_x(__VA_ARGS__)
+#define svabd_s32_x(...) __builtin_sve_svabd_s32_x(__VA_ARGS__)
+#define svabd_s64_x(...) __builtin_sve_svabd_s64_x(__VA_ARGS__)
+#define svabd_s16_x(...) __builtin_sve_svabd_s16_x(__VA_ARGS__)
+#define svabd_s8_z(...) __builtin_sve_svabd_s8_z(__VA_ARGS__)
+#define svabd_s32_z(...) __builtin_sve_svabd_s32_z(__VA_ARGS__)
+#define svabd_s64_z(...) __builtin_sve_svabd_s64_z(__VA_ARGS__)
+#define svabd_s16_z(...) __builtin_sve_svabd_s16_z(__VA_ARGS__)
+#define svabd_u8_m(...) __builtin_sve_svabd_u8_m(__VA_ARGS__)
+#define svabd_u32_m(...) __builtin_sve_svabd_u32_m(__VA_ARGS__)
+#define svabd_u64_m(...) __builtin_sve_svabd_u64_m(__VA_ARGS__)
+#define svabd_u16_m(...) __builtin_sve_svabd_u16_m(__VA_ARGS__)
+#define svabd_u8_x(...) __builtin_sve_svabd_u8_x(__VA_ARGS__)
+#define svabd_u32_x(...) __builtin_sve_svabd_u32_x(__VA_ARGS__)
+#define svabd_u64_x(...) __builtin_sve_svabd_u64_x(__VA_ARGS__)
+#define svabd_u16_x(...) __builtin_sve_svabd_u16_x(__VA_ARGS__)
+#define svabd_u8_z(...) __builtin_sve_svabd_u8_z(__VA_ARGS__)
+#define svabd_u32_z(...) __builtin_sve_svabd_u32_z(__VA_ARGS__)
+#define svabd_u64_z(...) __builtin_sve_svabd_u64_z(__VA_ARGS__)
+#define svabd_u16_z(...) __builtin_sve_svabd_u16_z(__VA_ARGS__)
+#define svabs_f64_m(...) __builtin_sve_svabs_f64_m(__VA_ARGS__)
+#define svabs_f32_m(...) __builtin_sve_svabs_f32_m(__VA_ARGS__)
+#define svabs_f16_m(...) __builtin_sve_svabs_f16_m(__VA_ARGS__)
+#define svabs_f64_x(...) __builtin_sve_svabs_f64_x(__VA_ARGS__)
+#define svabs_f32_x(...) __builtin_sve_svabs_f32_x(__VA_ARGS__)
+#define svabs_f16_x(...) __builtin_sve_svabs_f16_x(__VA_ARGS__)
+#define svabs_f64_z(...) __builtin_sve_svabs_f64_z(__VA_ARGS__)
+#define svabs_f32_z(...) __builtin_sve_svabs_f32_z(__VA_ARGS__)
+#define svabs_f16_z(...) __builtin_sve_svabs_f16_z(__VA_ARGS__)
+#define svabs_s8_m(...) __builtin_sve_svabs_s8_m(__VA_ARGS__)
+#define svabs_s32_m(...) __builtin_sve_svabs_s32_m(__VA_ARGS__)
+#define svabs_s64_m(...) __builtin_sve_svabs_s64_m(__VA_ARGS__)
+#define svabs_s16_m(...) __builtin_sve_svabs_s16_m(__VA_ARGS__)
+#define svabs_s8_x(...) __builtin_sve_svabs_s8_x(__VA_ARGS__)
+#define svabs_s32_x(...) __builtin_sve_svabs_s32_x(__VA_ARGS__)
+#define svabs_s64_x(...) __builtin_sve_svabs_s64_x(__VA_ARGS__)
+#define svabs_s16_x(...) __builtin_sve_svabs_s16_x(__VA_ARGS__)
+#define svabs_s8_z(...) __builtin_sve_svabs_s8_z(__VA_ARGS__)
+#define svabs_s32_z(...) __builtin_sve_svabs_s32_z(__VA_ARGS__)
+#define svabs_s64_z(...) __builtin_sve_svabs_s64_z(__VA_ARGS__)
+#define svabs_s16_z(...) __builtin_sve_svabs_s16_z(__VA_ARGS__)
+#define svacge_n_f64(...) __builtin_sve_svacge_n_f64(__VA_ARGS__)
+#define svacge_n_f32(...) __builtin_sve_svacge_n_f32(__VA_ARGS__)
+#define svacge_n_f16(...) __builtin_sve_svacge_n_f16(__VA_ARGS__)
+#define svacge_f64(...) __builtin_sve_svacge_f64(__VA_ARGS__)
+#define svacge_f32(...) __builtin_sve_svacge_f32(__VA_ARGS__)
+#define svacge_f16(...) __builtin_sve_svacge_f16(__VA_ARGS__)
+#define svacgt_n_f64(...) __builtin_sve_svacgt_n_f64(__VA_ARGS__)
+#define svacgt_n_f32(...) __builtin_sve_svacgt_n_f32(__VA_ARGS__)
+#define svacgt_n_f16(...) __builtin_sve_svacgt_n_f16(__VA_ARGS__)
+#define svacgt_f64(...) __builtin_sve_svacgt_f64(__VA_ARGS__)
+#define svacgt_f32(...) __builtin_sve_svacgt_f32(__VA_ARGS__)
+#define svacgt_f16(...) __builtin_sve_svacgt_f16(__VA_ARGS__)
+#define svacle_n_f64(...) __builtin_sve_svacle_n_f64(__VA_ARGS__)
+#define svacle_n_f32(...) __builtin_sve_svacle_n_f32(__VA_ARGS__)
+#define svacle_n_f16(...) __builtin_sve_svacle_n_f16(__VA_ARGS__)
+#define svacle_f64(...) __builtin_sve_svacle_f64(__VA_ARGS__)
+#define svacle_f32(...) __builtin_sve_svacle_f32(__VA_ARGS__)
+#define svacle_f16(...) __builtin_sve_svacle_f16(__VA_ARGS__)
+#define svaclt_n_f64(...) __builtin_sve_svaclt_n_f64(__VA_ARGS__)
+#define svaclt_n_f32(...) __builtin_sve_svaclt_n_f32(__VA_ARGS__)
+#define svaclt_n_f16(...) __builtin_sve_svaclt_n_f16(__VA_ARGS__)
+#define svaclt_f64(...) __builtin_sve_svaclt_f64(__VA_ARGS__)
+#define svaclt_f32(...) __builtin_sve_svaclt_f32(__VA_ARGS__)
+#define svaclt_f16(...) __builtin_sve_svaclt_f16(__VA_ARGS__)
+#define svadd_n_f64_m(...) __builtin_sve_svadd_n_f64_m(__VA_ARGS__)
+#define svadd_n_f32_m(...) __builtin_sve_svadd_n_f32_m(__VA_ARGS__)
+#define svadd_n_f16_m(...) __builtin_sve_svadd_n_f16_m(__VA_ARGS__)
+#define svadd_n_f64_x(...) __builtin_sve_svadd_n_f64_x(__VA_ARGS__)
+#define svadd_n_f32_x(...) __builtin_sve_svadd_n_f32_x(__VA_ARGS__)
+#define svadd_n_f16_x(...) __builtin_sve_svadd_n_f16_x(__VA_ARGS__)
+#define svadd_n_f64_z(...) __builtin_sve_svadd_n_f64_z(__VA_ARGS__)
+#define svadd_n_f32_z(...) __builtin_sve_svadd_n_f32_z(__VA_ARGS__)
+#define svadd_n_f16_z(...) __builtin_sve_svadd_n_f16_z(__VA_ARGS__)
+#define svadd_n_u8_m(...) __builtin_sve_svadd_n_u8_m(__VA_ARGS__)
+#define svadd_n_u32_m(...) __builtin_sve_svadd_n_u32_m(__VA_ARGS__)
+#define svadd_n_u64_m(...) __builtin_sve_svadd_n_u64_m(__VA_ARGS__)
+#define svadd_n_u16_m(...) __builtin_sve_svadd_n_u16_m(__VA_ARGS__)
+#define svadd_n_s8_m(...) __builtin_sve_svadd_n_s8_m(__VA_ARGS__)
+#define svadd_n_s32_m(...) __builtin_sve_svadd_n_s32_m(__VA_ARGS__)
+#define svadd_n_s64_m(...) __builtin_sve_svadd_n_s64_m(__VA_ARGS__)
+#define svadd_n_s16_m(...) __builtin_sve_svadd_n_s16_m(__VA_ARGS__)
+#define svadd_n_u8_x(...) __builtin_sve_svadd_n_u8_x(__VA_ARGS__)
+#define svadd_n_u32_x(...) __builtin_sve_svadd_n_u32_x(__VA_ARGS__)
+#define svadd_n_u64_x(...) __builtin_sve_svadd_n_u64_x(__VA_ARGS__)
+#define svadd_n_u16_x(...) __builtin_sve_svadd_n_u16_x(__VA_ARGS__)
+#define svadd_n_s8_x(...) __builtin_sve_svadd_n_s8_x(__VA_ARGS__)
+#define svadd_n_s32_x(...) __builtin_sve_svadd_n_s32_x(__VA_ARGS__)
+#define svadd_n_s64_x(...) __builtin_sve_svadd_n_s64_x(__VA_ARGS__)
+#define svadd_n_s16_x(...) __builtin_sve_svadd_n_s16_x(__VA_ARGS__)
+#define svadd_n_u8_z(...) __builtin_sve_svadd_n_u8_z(__VA_ARGS__)
+#define svadd_n_u32_z(...) __builtin_sve_svadd_n_u32_z(__VA_ARGS__)
+#define svadd_n_u64_z(...) __builtin_sve_svadd_n_u64_z(__VA_ARGS__)
+#define svadd_n_u16_z(...) __builtin_sve_svadd_n_u16_z(__VA_ARGS__)
+#define svadd_n_s8_z(...) __builtin_sve_svadd_n_s8_z(__VA_ARGS__)
+#define svadd_n_s32_z(...) __builtin_sve_svadd_n_s32_z(__VA_ARGS__)
+#define svadd_n_s64_z(...) __builtin_sve_svadd_n_s64_z(__VA_ARGS__)
+#define svadd_n_s16_z(...) __builtin_sve_svadd_n_s16_z(__VA_ARGS__)
+#define svadd_f64_m(...) __builtin_sve_svadd_f64_m(__VA_ARGS__)
+#define svadd_f32_m(...) __builtin_sve_svadd_f32_m(__VA_ARGS__)
+#define svadd_f16_m(...) __builtin_sve_svadd_f16_m(__VA_ARGS__)
+#define svadd_f64_x(...) __builtin_sve_svadd_f64_x(__VA_ARGS__)
+#define svadd_f32_x(...) __builtin_sve_svadd_f32_x(__VA_ARGS__)
+#define svadd_f16_x(...) __builtin_sve_svadd_f16_x(__VA_ARGS__)
+#define svadd_f64_z(...) __builtin_sve_svadd_f64_z(__VA_ARGS__)
+#define svadd_f32_z(...) __builtin_sve_svadd_f32_z(__VA_ARGS__)
+#define svadd_f16_z(...) __builtin_sve_svadd_f16_z(__VA_ARGS__)
+#define svadd_u8_m(...) __builtin_sve_svadd_u8_m(__VA_ARGS__)
+#define svadd_u32_m(...) __builtin_sve_svadd_u32_m(__VA_ARGS__)
+#define svadd_u64_m(...) __builtin_sve_svadd_u64_m(__VA_ARGS__)
+#define svadd_u16_m(...) __builtin_sve_svadd_u16_m(__VA_ARGS__)
+#define svadd_s8_m(...) __builtin_sve_svadd_s8_m(__VA_ARGS__)
+#define svadd_s32_m(...) __builtin_sve_svadd_s32_m(__VA_ARGS__)
+#define svadd_s64_m(...) __builtin_sve_svadd_s64_m(__VA_ARGS__)
+#define svadd_s16_m(...) __builtin_sve_svadd_s16_m(__VA_ARGS__)
+#define svadd_u8_x(...) __builtin_sve_svadd_u8_x(__VA_ARGS__)
+#define svadd_u32_x(...) __builtin_sve_svadd_u32_x(__VA_ARGS__)
+#define svadd_u64_x(...) __builtin_sve_svadd_u64_x(__VA_ARGS__)
+#define svadd_u16_x(...) __builtin_sve_svadd_u16_x(__VA_ARGS__)
+#define svadd_s8_x(...) __builtin_sve_svadd_s8_x(__VA_ARGS__)
+#define svadd_s32_x(...) __builtin_sve_svadd_s32_x(__VA_ARGS__)
+#define svadd_s64_x(...) __builtin_sve_svadd_s64_x(__VA_ARGS__)
+#define svadd_s16_x(...) __builtin_sve_svadd_s16_x(__VA_ARGS__)
+#define svadd_u8_z(...) __builtin_sve_svadd_u8_z(__VA_ARGS__)
+#define svadd_u32_z(...) __builtin_sve_svadd_u32_z(__VA_ARGS__)
+#define svadd_u64_z(...) __builtin_sve_svadd_u64_z(__VA_ARGS__)
+#define svadd_u16_z(...) __builtin_sve_svadd_u16_z(__VA_ARGS__)
+#define svadd_s8_z(...) __builtin_sve_svadd_s8_z(__VA_ARGS__)
+#define svadd_s32_z(...) __builtin_sve_svadd_s32_z(__VA_ARGS__)
+#define svadd_s64_z(...) __builtin_sve_svadd_s64_z(__VA_ARGS__)
+#define svadd_s16_z(...) __builtin_sve_svadd_s16_z(__VA_ARGS__)
+#define svadda_f64(...) __builtin_sve_svadda_f64(__VA_ARGS__)
+#define svadda_f32(...) __builtin_sve_svadda_f32(__VA_ARGS__)
+#define svadda_f16(...) __builtin_sve_svadda_f16(__VA_ARGS__)
+#define svaddv_s8(...) __builtin_sve_svaddv_s8(__VA_ARGS__)
+#define svaddv_s32(...) __builtin_sve_svaddv_s32(__VA_ARGS__)
+#define svaddv_s64(...) __builtin_sve_svaddv_s64(__VA_ARGS__)
+#define svaddv_s16(...) __builtin_sve_svaddv_s16(__VA_ARGS__)
+#define svaddv_u8(...) __builtin_sve_svaddv_u8(__VA_ARGS__)
+#define svaddv_u32(...) __builtin_sve_svaddv_u32(__VA_ARGS__)
+#define svaddv_u64(...) __builtin_sve_svaddv_u64(__VA_ARGS__)
+#define svaddv_u16(...) __builtin_sve_svaddv_u16(__VA_ARGS__)
+#define svaddv_f64(...) __builtin_sve_svaddv_f64(__VA_ARGS__)
+#define svaddv_f32(...) __builtin_sve_svaddv_f32(__VA_ARGS__)
+#define svaddv_f16(...) __builtin_sve_svaddv_f16(__VA_ARGS__)
+#define svadrb_u32base_u32offset(...) __builtin_sve_svadrb_u32base_u32offset(__VA_ARGS__)
+#define svadrb_u64base_u64offset(...) __builtin_sve_svadrb_u64base_u64offset(__VA_ARGS__)
+#define svadrb_u32base_s32offset(...) __builtin_sve_svadrb_u32base_s32offset(__VA_ARGS__)
+#define svadrb_u64base_s64offset(...) __builtin_sve_svadrb_u64base_s64offset(__VA_ARGS__)
+#define svadrd_u32base_u32index(...) __builtin_sve_svadrd_u32base_u32index(__VA_ARGS__)
+#define svadrd_u64base_u64index(...) __builtin_sve_svadrd_u64base_u64index(__VA_ARGS__)
+#define svadrd_u32base_s32index(...) __builtin_sve_svadrd_u32base_s32index(__VA_ARGS__)
+#define svadrd_u64base_s64index(...) __builtin_sve_svadrd_u64base_s64index(__VA_ARGS__)
+#define svadrh_u32base_u32index(...) __builtin_sve_svadrh_u32base_u32index(__VA_ARGS__)
+#define svadrh_u64base_u64index(...) __builtin_sve_svadrh_u64base_u64index(__VA_ARGS__)
+#define svadrh_u32base_s32index(...) __builtin_sve_svadrh_u32base_s32index(__VA_ARGS__)
+#define svadrh_u64base_s64index(...) __builtin_sve_svadrh_u64base_s64index(__VA_ARGS__)
+#define svadrw_u32base_u32index(...) __builtin_sve_svadrw_u32base_u32index(__VA_ARGS__)
+#define svadrw_u64base_u64index(...) __builtin_sve_svadrw_u64base_u64index(__VA_ARGS__)
+#define svadrw_u32base_s32index(...) __builtin_sve_svadrw_u32base_s32index(__VA_ARGS__)
+#define svadrw_u64base_s64index(...) __builtin_sve_svadrw_u64base_s64index(__VA_ARGS__)
+#define svand_b_z(...) __builtin_sve_svand_b_z(__VA_ARGS__)
+#define svand_n_u8_m(...) __builtin_sve_svand_n_u8_m(__VA_ARGS__)
+#define svand_n_u32_m(...) __builtin_sve_svand_n_u32_m(__VA_ARGS__)
+#define svand_n_u64_m(...) __builtin_sve_svand_n_u64_m(__VA_ARGS__)
+#define svand_n_u16_m(...) __builtin_sve_svand_n_u16_m(__VA_ARGS__)
+#define svand_n_s8_m(...) __builtin_sve_svand_n_s8_m(__VA_ARGS__)
+#define svand_n_s32_m(...) __builtin_sve_svand_n_s32_m(__VA_ARGS__)
+#define svand_n_s64_m(...) __builtin_sve_svand_n_s64_m(__VA_ARGS__)
+#define svand_n_s16_m(...) __builtin_sve_svand_n_s16_m(__VA_ARGS__)
+#define svand_n_u8_x(...) __builtin_sve_svand_n_u8_x(__VA_ARGS__)
+#define svand_n_u32_x(...) __builtin_sve_svand_n_u32_x(__VA_ARGS__)
+#define svand_n_u64_x(...) __builtin_sve_svand_n_u64_x(__VA_ARGS__)
+#define svand_n_u16_x(...) __builtin_sve_svand_n_u16_x(__VA_ARGS__)
+#define svand_n_s8_x(...) __builtin_sve_svand_n_s8_x(__VA_ARGS__)
+#define svand_n_s32_x(...) __builtin_sve_svand_n_s32_x(__VA_ARGS__)
+#define svand_n_s64_x(...) __builtin_sve_svand_n_s64_x(__VA_ARGS__)
+#define svand_n_s16_x(...) __builtin_sve_svand_n_s16_x(__VA_ARGS__)
+#define svand_n_u8_z(...) __builtin_sve_svand_n_u8_z(__VA_ARGS__)
+#define svand_n_u32_z(...) __builtin_sve_svand_n_u32_z(__VA_ARGS__)
+#define svand_n_u64_z(...) __builtin_sve_svand_n_u64_z(__VA_ARGS__)
+#define svand_n_u16_z(...) __builtin_sve_svand_n_u16_z(__VA_ARGS__)
+#define svand_n_s8_z(...) __builtin_sve_svand_n_s8_z(__VA_ARGS__)
+#define svand_n_s32_z(...) __builtin_sve_svand_n_s32_z(__VA_ARGS__)
+#define svand_n_s64_z(...) __builtin_sve_svand_n_s64_z(__VA_ARGS__)
+#define svand_n_s16_z(...) __builtin_sve_svand_n_s16_z(__VA_ARGS__)
+#define svand_u8_m(...) __builtin_sve_svand_u8_m(__VA_ARGS__)
+#define svand_u32_m(...) __builtin_sve_svand_u32_m(__VA_ARGS__)
+#define svand_u64_m(...) __builtin_sve_svand_u64_m(__VA_ARGS__)
+#define svand_u16_m(...) __builtin_sve_svand_u16_m(__VA_ARGS__)
+#define svand_s8_m(...) __builtin_sve_svand_s8_m(__VA_ARGS__)
+#define svand_s32_m(...) __builtin_sve_svand_s32_m(__VA_ARGS__)
+#define svand_s64_m(...) __builtin_sve_svand_s64_m(__VA_ARGS__)
+#define svand_s16_m(...) __builtin_sve_svand_s16_m(__VA_ARGS__)
+#define svand_u8_x(...) __builtin_sve_svand_u8_x(__VA_ARGS__)
+#define svand_u32_x(...) __builtin_sve_svand_u32_x(__VA_ARGS__)
+#define svand_u64_x(...) __builtin_sve_svand_u64_x(__VA_ARGS__)
+#define svand_u16_x(...) __builtin_sve_svand_u16_x(__VA_ARGS__)
+#define svand_s8_x(...) __builtin_sve_svand_s8_x(__VA_ARGS__)
+#define svand_s32_x(...) __builtin_sve_svand_s32_x(__VA_ARGS__)
+#define svand_s64_x(...) __builtin_sve_svand_s64_x(__VA_ARGS__)
+#define svand_s16_x(...) __builtin_sve_svand_s16_x(__VA_ARGS__)
+#define svand_u8_z(...) __builtin_sve_svand_u8_z(__VA_ARGS__)
+#define svand_u32_z(...) __builtin_sve_svand_u32_z(__VA_ARGS__)
+#define svand_u64_z(...) __builtin_sve_svand_u64_z(__VA_ARGS__)
+#define svand_u16_z(...) __builtin_sve_svand_u16_z(__VA_ARGS__)
+#define svand_s8_z(...) __builtin_sve_svand_s8_z(__VA_ARGS__)
+#define svand_s32_z(...) __builtin_sve_svand_s32_z(__VA_ARGS__)
+#define svand_s64_z(...) __builtin_sve_svand_s64_z(__VA_ARGS__)
+#define svand_s16_z(...) __builtin_sve_svand_s16_z(__VA_ARGS__)
+#define svandv_u8(...) __builtin_sve_svandv_u8(__VA_ARGS__)
+#define svandv_u32(...) __builtin_sve_svandv_u32(__VA_ARGS__)
+#define svandv_u64(...) __builtin_sve_svandv_u64(__VA_ARGS__)
+#define svandv_u16(...) __builtin_sve_svandv_u16(__VA_ARGS__)
+#define svandv_s8(...) __builtin_sve_svandv_s8(__VA_ARGS__)
+#define svandv_s32(...) __builtin_sve_svandv_s32(__VA_ARGS__)
+#define svandv_s64(...) __builtin_sve_svandv_s64(__VA_ARGS__)
+#define svandv_s16(...) __builtin_sve_svandv_s16(__VA_ARGS__)
+#define svasr_n_s8_m(...) __builtin_sve_svasr_n_s8_m(__VA_ARGS__)
+#define svasr_n_s32_m(...) __builtin_sve_svasr_n_s32_m(__VA_ARGS__)
+#define svasr_n_s64_m(...) __builtin_sve_svasr_n_s64_m(__VA_ARGS__)
+#define svasr_n_s16_m(...) __builtin_sve_svasr_n_s16_m(__VA_ARGS__)
+#define svasr_n_s8_x(...) __builtin_sve_svasr_n_s8_x(__VA_ARGS__)
+#define svasr_n_s32_x(...) __builtin_sve_svasr_n_s32_x(__VA_ARGS__)
+#define svasr_n_s64_x(...) __builtin_sve_svasr_n_s64_x(__VA_ARGS__)
+#define svasr_n_s16_x(...) __builtin_sve_svasr_n_s16_x(__VA_ARGS__)
+#define svasr_n_s8_z(...) __builtin_sve_svasr_n_s8_z(__VA_ARGS__)
+#define svasr_n_s32_z(...) __builtin_sve_svasr_n_s32_z(__VA_ARGS__)
+#define svasr_n_s64_z(...) __builtin_sve_svasr_n_s64_z(__VA_ARGS__)
+#define svasr_n_s16_z(...) __builtin_sve_svasr_n_s16_z(__VA_ARGS__)
+#define svasr_s8_m(...) __builtin_sve_svasr_s8_m(__VA_ARGS__)
+#define svasr_s32_m(...) __builtin_sve_svasr_s32_m(__VA_ARGS__)
+#define svasr_s64_m(...) __builtin_sve_svasr_s64_m(__VA_ARGS__)
+#define svasr_s16_m(...) __builtin_sve_svasr_s16_m(__VA_ARGS__)
+#define svasr_s8_x(...) __builtin_sve_svasr_s8_x(__VA_ARGS__)
+#define svasr_s32_x(...) __builtin_sve_svasr_s32_x(__VA_ARGS__)
+#define svasr_s64_x(...) __builtin_sve_svasr_s64_x(__VA_ARGS__)
+#define svasr_s16_x(...) __builtin_sve_svasr_s16_x(__VA_ARGS__)
+#define svasr_s8_z(...) __builtin_sve_svasr_s8_z(__VA_ARGS__)
+#define svasr_s32_z(...) __builtin_sve_svasr_s32_z(__VA_ARGS__)
+#define svasr_s64_z(...) __builtin_sve_svasr_s64_z(__VA_ARGS__)
+#define svasr_s16_z(...) __builtin_sve_svasr_s16_z(__VA_ARGS__)
+#define svasr_wide_n_s8_m(...) __builtin_sve_svasr_wide_n_s8_m(__VA_ARGS__)
+#define svasr_wide_n_s32_m(...) __builtin_sve_svasr_wide_n_s32_m(__VA_ARGS__)
+#define svasr_wide_n_s16_m(...) __builtin_sve_svasr_wide_n_s16_m(__VA_ARGS__)
+#define svasr_wide_n_s8_x(...) __builtin_sve_svasr_wide_n_s8_x(__VA_ARGS__)
+#define svasr_wide_n_s32_x(...) __builtin_sve_svasr_wide_n_s32_x(__VA_ARGS__)
+#define svasr_wide_n_s16_x(...) __builtin_sve_svasr_wide_n_s16_x(__VA_ARGS__)
+#define svasr_wide_n_s8_z(...) __builtin_sve_svasr_wide_n_s8_z(__VA_ARGS__)
+#define svasr_wide_n_s32_z(...) __builtin_sve_svasr_wide_n_s32_z(__VA_ARGS__)
+#define svasr_wide_n_s16_z(...) __builtin_sve_svasr_wide_n_s16_z(__VA_ARGS__)
+#define svasr_wide_s8_m(...) __builtin_sve_svasr_wide_s8_m(__VA_ARGS__)
+#define svasr_wide_s32_m(...) __builtin_sve_svasr_wide_s32_m(__VA_ARGS__)
+#define svasr_wide_s16_m(...) __builtin_sve_svasr_wide_s16_m(__VA_ARGS__)
+#define svasr_wide_s8_x(...) __builtin_sve_svasr_wide_s8_x(__VA_ARGS__)
+#define svasr_wide_s32_x(...) __builtin_sve_svasr_wide_s32_x(__VA_ARGS__)
+#define svasr_wide_s16_x(...) __builtin_sve_svasr_wide_s16_x(__VA_ARGS__)
+#define svasr_wide_s8_z(...) __builtin_sve_svasr_wide_s8_z(__VA_ARGS__)
+#define svasr_wide_s32_z(...) __builtin_sve_svasr_wide_s32_z(__VA_ARGS__)
+#define svasr_wide_s16_z(...) __builtin_sve_svasr_wide_s16_z(__VA_ARGS__)
+#define svasrd_n_s8_m(...) __builtin_sve_svasrd_n_s8_m(__VA_ARGS__)
+#define svasrd_n_s32_m(...) __builtin_sve_svasrd_n_s32_m(__VA_ARGS__)
+#define svasrd_n_s64_m(...) __builtin_sve_svasrd_n_s64_m(__VA_ARGS__)
+#define svasrd_n_s16_m(...) __builtin_sve_svasrd_n_s16_m(__VA_ARGS__)
+#define svasrd_n_s8_x(...) __builtin_sve_svasrd_n_s8_x(__VA_ARGS__)
+#define svasrd_n_s32_x(...) __builtin_sve_svasrd_n_s32_x(__VA_ARGS__)
+#define svasrd_n_s64_x(...) __builtin_sve_svasrd_n_s64_x(__VA_ARGS__)
+#define svasrd_n_s16_x(...) __builtin_sve_svasrd_n_s16_x(__VA_ARGS__)
+#define svasrd_n_s8_z(...) __builtin_sve_svasrd_n_s8_z(__VA_ARGS__)
+#define svasrd_n_s32_z(...) __builtin_sve_svasrd_n_s32_z(__VA_ARGS__)
+#define svasrd_n_s64_z(...) __builtin_sve_svasrd_n_s64_z(__VA_ARGS__)
+#define svasrd_n_s16_z(...) __builtin_sve_svasrd_n_s16_z(__VA_ARGS__)
+#define svbic_b_z(...) __builtin_sve_svbic_b_z(__VA_ARGS__)
+#define svbic_n_u8_m(...) __builtin_sve_svbic_n_u8_m(__VA_ARGS__)
+#define svbic_n_u32_m(...) __builtin_sve_svbic_n_u32_m(__VA_ARGS__)
+#define svbic_n_u64_m(...) __builtin_sve_svbic_n_u64_m(__VA_ARGS__)
+#define svbic_n_u16_m(...) __builtin_sve_svbic_n_u16_m(__VA_ARGS__)
+#define svbic_n_s8_m(...) __builtin_sve_svbic_n_s8_m(__VA_ARGS__)
+#define svbic_n_s32_m(...) __builtin_sve_svbic_n_s32_m(__VA_ARGS__)
+#define svbic_n_s64_m(...) __builtin_sve_svbic_n_s64_m(__VA_ARGS__)
+#define svbic_n_s16_m(...) __builtin_sve_svbic_n_s16_m(__VA_ARGS__)
+#define svbic_n_u8_x(...) __builtin_sve_svbic_n_u8_x(__VA_ARGS__)
+#define svbic_n_u32_x(...) __builtin_sve_svbic_n_u32_x(__VA_ARGS__)
+#define svbic_n_u64_x(...) __builtin_sve_svbic_n_u64_x(__VA_ARGS__)
+#define svbic_n_u16_x(...) __builtin_sve_svbic_n_u16_x(__VA_ARGS__)
+#define svbic_n_s8_x(...) __builtin_sve_svbic_n_s8_x(__VA_ARGS__)
+#define svbic_n_s32_x(...) __builtin_sve_svbic_n_s32_x(__VA_ARGS__)
+#define svbic_n_s64_x(...) __builtin_sve_svbic_n_s64_x(__VA_ARGS__)
+#define svbic_n_s16_x(...) __builtin_sve_svbic_n_s16_x(__VA_ARGS__)
+#define svbic_n_u8_z(...) __builtin_sve_svbic_n_u8_z(__VA_ARGS__)
+#define svbic_n_u32_z(...) __builtin_sve_svbic_n_u32_z(__VA_ARGS__)
+#define svbic_n_u64_z(...) __builtin_sve_svbic_n_u64_z(__VA_ARGS__)
+#define svbic_n_u16_z(...) __builtin_sve_svbic_n_u16_z(__VA_ARGS__)
+#define svbic_n_s8_z(...) __builtin_sve_svbic_n_s8_z(__VA_ARGS__)
+#define svbic_n_s32_z(...) __builtin_sve_svbic_n_s32_z(__VA_ARGS__)
+#define svbic_n_s64_z(...) __builtin_sve_svbic_n_s64_z(__VA_ARGS__)
+#define svbic_n_s16_z(...) __builtin_sve_svbic_n_s16_z(__VA_ARGS__)
+#define svbic_u8_m(...) __builtin_sve_svbic_u8_m(__VA_ARGS__)
+#define svbic_u32_m(...) __builtin_sve_svbic_u32_m(__VA_ARGS__)
+#define svbic_u64_m(...) __builtin_sve_svbic_u64_m(__VA_ARGS__)
+#define svbic_u16_m(...) __builtin_sve_svbic_u16_m(__VA_ARGS__)
+#define svbic_s8_m(...) __builtin_sve_svbic_s8_m(__VA_ARGS__)
+#define svbic_s32_m(...) __builtin_sve_svbic_s32_m(__VA_ARGS__)
+#define svbic_s64_m(...) __builtin_sve_svbic_s64_m(__VA_ARGS__)
+#define svbic_s16_m(...) __builtin_sve_svbic_s16_m(__VA_ARGS__)
+#define svbic_u8_x(...) __builtin_sve_svbic_u8_x(__VA_ARGS__)
+#define svbic_u32_x(...) __builtin_sve_svbic_u32_x(__VA_ARGS__)
+#define svbic_u64_x(...) __builtin_sve_svbic_u64_x(__VA_ARGS__)
+#define svbic_u16_x(...) __builtin_sve_svbic_u16_x(__VA_ARGS__)
+#define svbic_s8_x(...) __builtin_sve_svbic_s8_x(__VA_ARGS__)
+#define svbic_s32_x(...) __builtin_sve_svbic_s32_x(__VA_ARGS__)
+#define svbic_s64_x(...) __builtin_sve_svbic_s64_x(__VA_ARGS__)
+#define svbic_s16_x(...) __builtin_sve_svbic_s16_x(__VA_ARGS__)
+#define svbic_u8_z(...) __builtin_sve_svbic_u8_z(__VA_ARGS__)
+#define svbic_u32_z(...) __builtin_sve_svbic_u32_z(__VA_ARGS__)
+#define svbic_u64_z(...) __builtin_sve_svbic_u64_z(__VA_ARGS__)
+#define svbic_u16_z(...) __builtin_sve_svbic_u16_z(__VA_ARGS__)
+#define svbic_s8_z(...) __builtin_sve_svbic_s8_z(__VA_ARGS__)
+#define svbic_s32_z(...) __builtin_sve_svbic_s32_z(__VA_ARGS__)
+#define svbic_s64_z(...) __builtin_sve_svbic_s64_z(__VA_ARGS__)
+#define svbic_s16_z(...) __builtin_sve_svbic_s16_z(__VA_ARGS__)
+#define svbrka_b_m(...) __builtin_sve_svbrka_b_m(__VA_ARGS__)
+#define svbrka_b_z(...) __builtin_sve_svbrka_b_z(__VA_ARGS__)
+#define svbrkb_b_m(...) __builtin_sve_svbrkb_b_m(__VA_ARGS__)
+#define svbrkb_b_z(...) __builtin_sve_svbrkb_b_z(__VA_ARGS__)
+#define svbrkn_b_z(...) __builtin_sve_svbrkn_b_z(__VA_ARGS__)
+#define svbrkpa_b_z(...) __builtin_sve_svbrkpa_b_z(__VA_ARGS__)
+#define svbrkpb_b_z(...) __builtin_sve_svbrkpb_b_z(__VA_ARGS__)
+#define svcadd_f64_m(...) __builtin_sve_svcadd_f64_m(__VA_ARGS__)
+#define svcadd_f32_m(...) __builtin_sve_svcadd_f32_m(__VA_ARGS__)
+#define svcadd_f16_m(...) __builtin_sve_svcadd_f16_m(__VA_ARGS__)
+#define svcadd_f64_x(...) __builtin_sve_svcadd_f64_x(__VA_ARGS__)
+#define svcadd_f32_x(...) __builtin_sve_svcadd_f32_x(__VA_ARGS__)
+#define svcadd_f16_x(...) __builtin_sve_svcadd_f16_x(__VA_ARGS__)
+#define svcadd_f64_z(...) __builtin_sve_svcadd_f64_z(__VA_ARGS__)
+#define svcadd_f32_z(...) __builtin_sve_svcadd_f32_z(__VA_ARGS__)
+#define svcadd_f16_z(...) __builtin_sve_svcadd_f16_z(__VA_ARGS__)
+#define svclasta_n_u8(...) __builtin_sve_svclasta_n_u8(__VA_ARGS__)
+#define svclasta_n_u32(...) __builtin_sve_svclasta_n_u32(__VA_ARGS__)
+#define svclasta_n_u64(...) __builtin_sve_svclasta_n_u64(__VA_ARGS__)
+#define svclasta_n_u16(...) __builtin_sve_svclasta_n_u16(__VA_ARGS__)
+#define svclasta_n_s8(...) __builtin_sve_svclasta_n_s8(__VA_ARGS__)
+#define svclasta_n_f64(...) __builtin_sve_svclasta_n_f64(__VA_ARGS__)
+#define svclasta_n_f32(...) __builtin_sve_svclasta_n_f32(__VA_ARGS__)
+#define svclasta_n_f16(...) __builtin_sve_svclasta_n_f16(__VA_ARGS__)
+#define svclasta_n_s32(...) __builtin_sve_svclasta_n_s32(__VA_ARGS__)
+#define svclasta_n_s64(...) __builtin_sve_svclasta_n_s64(__VA_ARGS__)
+#define svclasta_n_s16(...) __builtin_sve_svclasta_n_s16(__VA_ARGS__)
+#define svclasta_u8(...) __builtin_sve_svclasta_u8(__VA_ARGS__)
+#define svclasta_u32(...) __builtin_sve_svclasta_u32(__VA_ARGS__)
+#define svclasta_u64(...) __builtin_sve_svclasta_u64(__VA_ARGS__)
+#define svclasta_u16(...) __builtin_sve_svclasta_u16(__VA_ARGS__)
+#define svclasta_s8(...) __builtin_sve_svclasta_s8(__VA_ARGS__)
+#define svclasta_f64(...) __builtin_sve_svclasta_f64(__VA_ARGS__)
+#define svclasta_f32(...) __builtin_sve_svclasta_f32(__VA_ARGS__)
+#define svclasta_f16(...) __builtin_sve_svclasta_f16(__VA_ARGS__)
+#define svclasta_s32(...) __builtin_sve_svclasta_s32(__VA_ARGS__)
+#define svclasta_s64(...) __builtin_sve_svclasta_s64(__VA_ARGS__)
+#define svclasta_s16(...) __builtin_sve_svclasta_s16(__VA_ARGS__)
+#define svclastb_n_u8(...) __builtin_sve_svclastb_n_u8(__VA_ARGS__)
+#define svclastb_n_u32(...) __builtin_sve_svclastb_n_u32(__VA_ARGS__)
+#define svclastb_n_u64(...) __builtin_sve_svclastb_n_u64(__VA_ARGS__)
+#define svclastb_n_u16(...) __builtin_sve_svclastb_n_u16(__VA_ARGS__)
+#define svclastb_n_s8(...) __builtin_sve_svclastb_n_s8(__VA_ARGS__)
+#define svclastb_n_f64(...) __builtin_sve_svclastb_n_f64(__VA_ARGS__)
+#define svclastb_n_f32(...) __builtin_sve_svclastb_n_f32(__VA_ARGS__)
+#define svclastb_n_f16(...) __builtin_sve_svclastb_n_f16(__VA_ARGS__)
+#define svclastb_n_s32(...) __builtin_sve_svclastb_n_s32(__VA_ARGS__)
+#define svclastb_n_s64(...) __builtin_sve_svclastb_n_s64(__VA_ARGS__)
+#define svclastb_n_s16(...) __builtin_sve_svclastb_n_s16(__VA_ARGS__)
+#define svclastb_u8(...) __builtin_sve_svclastb_u8(__VA_ARGS__)
+#define svclastb_u32(...) __builtin_sve_svclastb_u32(__VA_ARGS__)
+#define svclastb_u64(...) __builtin_sve_svclastb_u64(__VA_ARGS__)
+#define svclastb_u16(...) __builtin_sve_svclastb_u16(__VA_ARGS__)
+#define svclastb_s8(...) __builtin_sve_svclastb_s8(__VA_ARGS__)
+#define svclastb_f64(...) __builtin_sve_svclastb_f64(__VA_ARGS__)
+#define svclastb_f32(...) __builtin_sve_svclastb_f32(__VA_ARGS__)
+#define svclastb_f16(...) __builtin_sve_svclastb_f16(__VA_ARGS__)
+#define svclastb_s32(...) __builtin_sve_svclastb_s32(__VA_ARGS__)
+#define svclastb_s64(...) __builtin_sve_svclastb_s64(__VA_ARGS__)
+#define svclastb_s16(...) __builtin_sve_svclastb_s16(__VA_ARGS__)
+#define svcls_s8_m(...) __builtin_sve_svcls_s8_m(__VA_ARGS__)
+#define svcls_s32_m(...) __builtin_sve_svcls_s32_m(__VA_ARGS__)
+#define svcls_s64_m(...) __builtin_sve_svcls_s64_m(__VA_ARGS__)
+#define svcls_s16_m(...) __builtin_sve_svcls_s16_m(__VA_ARGS__)
+#define svcls_s8_x(...) __builtin_sve_svcls_s8_x(__VA_ARGS__)
+#define svcls_s32_x(...) __builtin_sve_svcls_s32_x(__VA_ARGS__)
+#define svcls_s64_x(...) __builtin_sve_svcls_s64_x(__VA_ARGS__)
+#define svcls_s16_x(...) __builtin_sve_svcls_s16_x(__VA_ARGS__)
+#define svcls_s8_z(...) __builtin_sve_svcls_s8_z(__VA_ARGS__)
+#define svcls_s32_z(...) __builtin_sve_svcls_s32_z(__VA_ARGS__)
+#define svcls_s64_z(...) __builtin_sve_svcls_s64_z(__VA_ARGS__)
+#define svcls_s16_z(...) __builtin_sve_svcls_s16_z(__VA_ARGS__)
+#define svclz_u8_m(...) __builtin_sve_svclz_u8_m(__VA_ARGS__)
+#define svclz_u32_m(...) __builtin_sve_svclz_u32_m(__VA_ARGS__)
+#define svclz_u64_m(...) __builtin_sve_svclz_u64_m(__VA_ARGS__)
+#define svclz_u16_m(...) __builtin_sve_svclz_u16_m(__VA_ARGS__)
+#define svclz_s8_m(...) __builtin_sve_svclz_s8_m(__VA_ARGS__)
+#define svclz_s32_m(...) __builtin_sve_svclz_s32_m(__VA_ARGS__)
+#define svclz_s64_m(...) __builtin_sve_svclz_s64_m(__VA_ARGS__)
+#define svclz_s16_m(...) __builtin_sve_svclz_s16_m(__VA_ARGS__)
+#define svclz_u8_x(...) __builtin_sve_svclz_u8_x(__VA_ARGS__)
+#define svclz_u32_x(...) __builtin_sve_svclz_u32_x(__VA_ARGS__)
+#define svclz_u64_x(...) __builtin_sve_svclz_u64_x(__VA_ARGS__)
+#define svclz_u16_x(...) __builtin_sve_svclz_u16_x(__VA_ARGS__)
+#define svclz_s8_x(...) __builtin_sve_svclz_s8_x(__VA_ARGS__)
+#define svclz_s32_x(...) __builtin_sve_svclz_s32_x(__VA_ARGS__)
+#define svclz_s64_x(...) __builtin_sve_svclz_s64_x(__VA_ARGS__)
+#define svclz_s16_x(...) __builtin_sve_svclz_s16_x(__VA_ARGS__)
+#define svclz_u8_z(...) __builtin_sve_svclz_u8_z(__VA_ARGS__)
+#define svclz_u32_z(...) __builtin_sve_svclz_u32_z(__VA_ARGS__)
+#define svclz_u64_z(...) __builtin_sve_svclz_u64_z(__VA_ARGS__)
+#define svclz_u16_z(...) __builtin_sve_svclz_u16_z(__VA_ARGS__)
+#define svclz_s8_z(...) __builtin_sve_svclz_s8_z(__VA_ARGS__)
+#define svclz_s32_z(...) __builtin_sve_svclz_s32_z(__VA_ARGS__)
+#define svclz_s64_z(...) __builtin_sve_svclz_s64_z(__VA_ARGS__)
+#define svclz_s16_z(...) __builtin_sve_svclz_s16_z(__VA_ARGS__)
+#define svcmla_f64_m(...) __builtin_sve_svcmla_f64_m(__VA_ARGS__)
+#define svcmla_f32_m(...) __builtin_sve_svcmla_f32_m(__VA_ARGS__)
+#define svcmla_f16_m(...) __builtin_sve_svcmla_f16_m(__VA_ARGS__)
+#define svcmla_f64_x(...) __builtin_sve_svcmla_f64_x(__VA_ARGS__)
+#define svcmla_f32_x(...) __builtin_sve_svcmla_f32_x(__VA_ARGS__)
+#define svcmla_f16_x(...) __builtin_sve_svcmla_f16_x(__VA_ARGS__)
+#define svcmla_f64_z(...) __builtin_sve_svcmla_f64_z(__VA_ARGS__)
+#define svcmla_f32_z(...) __builtin_sve_svcmla_f32_z(__VA_ARGS__)
+#define svcmla_f16_z(...) __builtin_sve_svcmla_f16_z(__VA_ARGS__)
+#define svcmla_lane_f32(...) __builtin_sve_svcmla_lane_f32(__VA_ARGS__)
+#define svcmla_lane_f16(...) __builtin_sve_svcmla_lane_f16(__VA_ARGS__)
+#define svcmpeq_n_f64(...) __builtin_sve_svcmpeq_n_f64(__VA_ARGS__)
+#define svcmpeq_n_f32(...) __builtin_sve_svcmpeq_n_f32(__VA_ARGS__)
+#define svcmpeq_n_f16(...) __builtin_sve_svcmpeq_n_f16(__VA_ARGS__)
+#define svcmpeq_n_u8(...) __builtin_sve_svcmpeq_n_u8(__VA_ARGS__)
+#define svcmpeq_n_u32(...) __builtin_sve_svcmpeq_n_u32(__VA_ARGS__)
+#define svcmpeq_n_u64(...) __builtin_sve_svcmpeq_n_u64(__VA_ARGS__)
+#define svcmpeq_n_u16(...) __builtin_sve_svcmpeq_n_u16(__VA_ARGS__)
+#define svcmpeq_n_s8(...) __builtin_sve_svcmpeq_n_s8(__VA_ARGS__)
+#define svcmpeq_n_s32(...) __builtin_sve_svcmpeq_n_s32(__VA_ARGS__)
+#define svcmpeq_n_s64(...) __builtin_sve_svcmpeq_n_s64(__VA_ARGS__)
+#define svcmpeq_n_s16(...) __builtin_sve_svcmpeq_n_s16(__VA_ARGS__)
+#define svcmpeq_u8(...) __builtin_sve_svcmpeq_u8(__VA_ARGS__)
+#define svcmpeq_u32(...) __builtin_sve_svcmpeq_u32(__VA_ARGS__)
+#define svcmpeq_u64(...) __builtin_sve_svcmpeq_u64(__VA_ARGS__)
+#define svcmpeq_u16(...) __builtin_sve_svcmpeq_u16(__VA_ARGS__)
+#define svcmpeq_s8(...) __builtin_sve_svcmpeq_s8(__VA_ARGS__)
+#define svcmpeq_s32(...) __builtin_sve_svcmpeq_s32(__VA_ARGS__)
+#define svcmpeq_s64(...) __builtin_sve_svcmpeq_s64(__VA_ARGS__)
+#define svcmpeq_s16(...) __builtin_sve_svcmpeq_s16(__VA_ARGS__)
+#define svcmpeq_f64(...) __builtin_sve_svcmpeq_f64(__VA_ARGS__)
+#define svcmpeq_f32(...) __builtin_sve_svcmpeq_f32(__VA_ARGS__)
+#define svcmpeq_f16(...) __builtin_sve_svcmpeq_f16(__VA_ARGS__)
+#define svcmpeq_wide_n_s8(...) __builtin_sve_svcmpeq_wide_n_s8(__VA_ARGS__)
+#define svcmpeq_wide_n_s32(...) __builtin_sve_svcmpeq_wide_n_s32(__VA_ARGS__)
+#define svcmpeq_wide_n_s16(...) __builtin_sve_svcmpeq_wide_n_s16(__VA_ARGS__)
+#define svcmpeq_wide_s8(...) __builtin_sve_svcmpeq_wide_s8(__VA_ARGS__)
+#define svcmpeq_wide_s32(...) __builtin_sve_svcmpeq_wide_s32(__VA_ARGS__)
+#define svcmpeq_wide_s16(...) __builtin_sve_svcmpeq_wide_s16(__VA_ARGS__)
+#define svcmpge_n_f64(...) __builtin_sve_svcmpge_n_f64(__VA_ARGS__)
+#define svcmpge_n_f32(...) __builtin_sve_svcmpge_n_f32(__VA_ARGS__)
+#define svcmpge_n_f16(...) __builtin_sve_svcmpge_n_f16(__VA_ARGS__)
+#define svcmpge_n_s8(...) __builtin_sve_svcmpge_n_s8(__VA_ARGS__)
+#define svcmpge_n_s32(...) __builtin_sve_svcmpge_n_s32(__VA_ARGS__)
+#define svcmpge_n_s64(...) __builtin_sve_svcmpge_n_s64(__VA_ARGS__)
+#define svcmpge_n_s16(...) __builtin_sve_svcmpge_n_s16(__VA_ARGS__)
+#define svcmpge_n_u8(...) __builtin_sve_svcmpge_n_u8(__VA_ARGS__)
+#define svcmpge_n_u32(...) __builtin_sve_svcmpge_n_u32(__VA_ARGS__)
+#define svcmpge_n_u64(...) __builtin_sve_svcmpge_n_u64(__VA_ARGS__)
+#define svcmpge_n_u16(...) __builtin_sve_svcmpge_n_u16(__VA_ARGS__)
+#define svcmpge_s8(...) __builtin_sve_svcmpge_s8(__VA_ARGS__)
+#define svcmpge_s32(...) __builtin_sve_svcmpge_s32(__VA_ARGS__)
+#define svcmpge_s64(...) __builtin_sve_svcmpge_s64(__VA_ARGS__)
+#define svcmpge_s16(...) __builtin_sve_svcmpge_s16(__VA_ARGS__)
+#define svcmpge_f64(...) __builtin_sve_svcmpge_f64(__VA_ARGS__)
+#define svcmpge_f32(...) __builtin_sve_svcmpge_f32(__VA_ARGS__)
+#define svcmpge_f16(...) __builtin_sve_svcmpge_f16(__VA_ARGS__)
+#define svcmpge_u8(...) __builtin_sve_svcmpge_u8(__VA_ARGS__)
+#define svcmpge_u32(...) __builtin_sve_svcmpge_u32(__VA_ARGS__)
+#define svcmpge_u64(...) __builtin_sve_svcmpge_u64(__VA_ARGS__)
+#define svcmpge_u16(...) __builtin_sve_svcmpge_u16(__VA_ARGS__)
+#define svcmpge_wide_n_s8(...) __builtin_sve_svcmpge_wide_n_s8(__VA_ARGS__)
+#define svcmpge_wide_n_s32(...) __builtin_sve_svcmpge_wide_n_s32(__VA_ARGS__)
+#define svcmpge_wide_n_s16(...) __builtin_sve_svcmpge_wide_n_s16(__VA_ARGS__)
+#define svcmpge_wide_n_u8(...) __builtin_sve_svcmpge_wide_n_u8(__VA_ARGS__)
+#define svcmpge_wide_n_u32(...) __builtin_sve_svcmpge_wide_n_u32(__VA_ARGS__)
+#define svcmpge_wide_n_u16(...) __builtin_sve_svcmpge_wide_n_u16(__VA_ARGS__)
+#define svcmpge_wide_s8(...) __builtin_sve_svcmpge_wide_s8(__VA_ARGS__)
+#define svcmpge_wide_s32(...) __builtin_sve_svcmpge_wide_s32(__VA_ARGS__)
+#define svcmpge_wide_s16(...) __builtin_sve_svcmpge_wide_s16(__VA_ARGS__)
+#define svcmpge_wide_u8(...) __builtin_sve_svcmpge_wide_u8(__VA_ARGS__)
+#define svcmpge_wide_u32(...) __builtin_sve_svcmpge_wide_u32(__VA_ARGS__)
+#define svcmpge_wide_u16(...) __builtin_sve_svcmpge_wide_u16(__VA_ARGS__)
+#define svcmpgt_n_f64(...) __builtin_sve_svcmpgt_n_f64(__VA_ARGS__)
+#define svcmpgt_n_f32(...) __builtin_sve_svcmpgt_n_f32(__VA_ARGS__)
+#define svcmpgt_n_f16(...) __builtin_sve_svcmpgt_n_f16(__VA_ARGS__)
+#define svcmpgt_n_s8(...) __builtin_sve_svcmpgt_n_s8(__VA_ARGS__)
+#define svcmpgt_n_s32(...) __builtin_sve_svcmpgt_n_s32(__VA_ARGS__)
+#define svcmpgt_n_s64(...) __builtin_sve_svcmpgt_n_s64(__VA_ARGS__)
+#define svcmpgt_n_s16(...) __builtin_sve_svcmpgt_n_s16(__VA_ARGS__)
+#define svcmpgt_n_u8(...) __builtin_sve_svcmpgt_n_u8(__VA_ARGS__)
+#define svcmpgt_n_u32(...) __builtin_sve_svcmpgt_n_u32(__VA_ARGS__)
+#define svcmpgt_n_u64(...) __builtin_sve_svcmpgt_n_u64(__VA_ARGS__)
+#define svcmpgt_n_u16(...) __builtin_sve_svcmpgt_n_u16(__VA_ARGS__)
+#define svcmpgt_s8(...) __builtin_sve_svcmpgt_s8(__VA_ARGS__)
+#define svcmpgt_s32(...) __builtin_sve_svcmpgt_s32(__VA_ARGS__)
+#define svcmpgt_s64(...) __builtin_sve_svcmpgt_s64(__VA_ARGS__)
+#define svcmpgt_s16(...) __builtin_sve_svcmpgt_s16(__VA_ARGS__)
+#define svcmpgt_f64(...) __builtin_sve_svcmpgt_f64(__VA_ARGS__)
+#define svcmpgt_f32(...) __builtin_sve_svcmpgt_f32(__VA_ARGS__)
+#define svcmpgt_f16(...) __builtin_sve_svcmpgt_f16(__VA_ARGS__)
+#define svcmpgt_u8(...) __builtin_sve_svcmpgt_u8(__VA_ARGS__)
+#define svcmpgt_u32(...) __builtin_sve_svcmpgt_u32(__VA_ARGS__)
+#define svcmpgt_u64(...) __builtin_sve_svcmpgt_u64(__VA_ARGS__)
+#define svcmpgt_u16(...) __builtin_sve_svcmpgt_u16(__VA_ARGS__)
+#define svcmpgt_wide_n_s8(...) __builtin_sve_svcmpgt_wide_n_s8(__VA_ARGS__)
+#define svcmpgt_wide_n_s32(...) __builtin_sve_svcmpgt_wide_n_s32(__VA_ARGS__)
+#define svcmpgt_wide_n_s16(...) __builtin_sve_svcmpgt_wide_n_s16(__VA_ARGS__)
+#define svcmpgt_wide_n_u8(...) __builtin_sve_svcmpgt_wide_n_u8(__VA_ARGS__)
+#define svcmpgt_wide_n_u32(...) __builtin_sve_svcmpgt_wide_n_u32(__VA_ARGS__)
+#define svcmpgt_wide_n_u16(...) __builtin_sve_svcmpgt_wide_n_u16(__VA_ARGS__)
+#define svcmpgt_wide_s8(...) __builtin_sve_svcmpgt_wide_s8(__VA_ARGS__)
+#define svcmpgt_wide_s32(...) __builtin_sve_svcmpgt_wide_s32(__VA_ARGS__)
+#define svcmpgt_wide_s16(...) __builtin_sve_svcmpgt_wide_s16(__VA_ARGS__)
+#define svcmpgt_wide_u8(...) __builtin_sve_svcmpgt_wide_u8(__VA_ARGS__)
+#define svcmpgt_wide_u32(...) __builtin_sve_svcmpgt_wide_u32(__VA_ARGS__)
+#define svcmpgt_wide_u16(...) __builtin_sve_svcmpgt_wide_u16(__VA_ARGS__)
+#define svcmple_n_f64(...) __builtin_sve_svcmple_n_f64(__VA_ARGS__)
+#define svcmple_n_f32(...) __builtin_sve_svcmple_n_f32(__VA_ARGS__)
+#define svcmple_n_f16(...) __builtin_sve_svcmple_n_f16(__VA_ARGS__)
+#define svcmple_n_s8(...) __builtin_sve_svcmple_n_s8(__VA_ARGS__)
+#define svcmple_n_s32(...) __builtin_sve_svcmple_n_s32(__VA_ARGS__)
+#define svcmple_n_s64(...) __builtin_sve_svcmple_n_s64(__VA_ARGS__)
+#define svcmple_n_s16(...) __builtin_sve_svcmple_n_s16(__VA_ARGS__)
+#define svcmple_n_u8(...) __builtin_sve_svcmple_n_u8(__VA_ARGS__)
+#define svcmple_n_u32(...) __builtin_sve_svcmple_n_u32(__VA_ARGS__)
+#define svcmple_n_u64(...) __builtin_sve_svcmple_n_u64(__VA_ARGS__)
+#define svcmple_n_u16(...) __builtin_sve_svcmple_n_u16(__VA_ARGS__)
+#define svcmple_s8(...) __builtin_sve_svcmple_s8(__VA_ARGS__)
+#define svcmple_s32(...) __builtin_sve_svcmple_s32(__VA_ARGS__)
+#define svcmple_s64(...) __builtin_sve_svcmple_s64(__VA_ARGS__)
+#define svcmple_s16(...) __builtin_sve_svcmple_s16(__VA_ARGS__)
+#define svcmple_f64(...) __builtin_sve_svcmple_f64(__VA_ARGS__)
+#define svcmple_f32(...) __builtin_sve_svcmple_f32(__VA_ARGS__)
+#define svcmple_f16(...) __builtin_sve_svcmple_f16(__VA_ARGS__)
+#define svcmple_u8(...) __builtin_sve_svcmple_u8(__VA_ARGS__)
+#define svcmple_u32(...) __builtin_sve_svcmple_u32(__VA_ARGS__)
+#define svcmple_u64(...) __builtin_sve_svcmple_u64(__VA_ARGS__)
+#define svcmple_u16(...) __builtin_sve_svcmple_u16(__VA_ARGS__)
+#define svcmple_wide_n_s8(...) __builtin_sve_svcmple_wide_n_s8(__VA_ARGS__)
+#define svcmple_wide_n_s32(...) __builtin_sve_svcmple_wide_n_s32(__VA_ARGS__)
+#define svcmple_wide_n_s16(...) __builtin_sve_svcmple_wide_n_s16(__VA_ARGS__)
+#define svcmple_wide_n_u8(...) __builtin_sve_svcmple_wide_n_u8(__VA_ARGS__)
+#define svcmple_wide_n_u32(...) __builtin_sve_svcmple_wide_n_u32(__VA_ARGS__)
+#define svcmple_wide_n_u16(...) __builtin_sve_svcmple_wide_n_u16(__VA_ARGS__)
+#define svcmple_wide_s8(...) __builtin_sve_svcmple_wide_s8(__VA_ARGS__)
+#define svcmple_wide_s32(...) __builtin_sve_svcmple_wide_s32(__VA_ARGS__)
+#define svcmple_wide_s16(...) __builtin_sve_svcmple_wide_s16(__VA_ARGS__)
+#define svcmple_wide_u8(...) __builtin_sve_svcmple_wide_u8(__VA_ARGS__)
+#define svcmple_wide_u32(...) __builtin_sve_svcmple_wide_u32(__VA_ARGS__)
+#define svcmple_wide_u16(...) __builtin_sve_svcmple_wide_u16(__VA_ARGS__)
+#define svcmplt_n_u8(...) __builtin_sve_svcmplt_n_u8(__VA_ARGS__)
+#define svcmplt_n_u32(...) __builtin_sve_svcmplt_n_u32(__VA_ARGS__)
+#define svcmplt_n_u64(...) __builtin_sve_svcmplt_n_u64(__VA_ARGS__)
+#define svcmplt_n_u16(...) __builtin_sve_svcmplt_n_u16(__VA_ARGS__)
+#define svcmplt_n_f64(...) __builtin_sve_svcmplt_n_f64(__VA_ARGS__)
+#define svcmplt_n_f32(...) __builtin_sve_svcmplt_n_f32(__VA_ARGS__)
+#define svcmplt_n_f16(...) __builtin_sve_svcmplt_n_f16(__VA_ARGS__)
+#define svcmplt_n_s8(...) __builtin_sve_svcmplt_n_s8(__VA_ARGS__)
+#define svcmplt_n_s32(...) __builtin_sve_svcmplt_n_s32(__VA_ARGS__)
+#define svcmplt_n_s64(...) __builtin_sve_svcmplt_n_s64(__VA_ARGS__)
+#define svcmplt_n_s16(...) __builtin_sve_svcmplt_n_s16(__VA_ARGS__)
+#define svcmplt_u8(...) __builtin_sve_svcmplt_u8(__VA_ARGS__)
+#define svcmplt_u32(...) __builtin_sve_svcmplt_u32(__VA_ARGS__)
+#define svcmplt_u64(...) __builtin_sve_svcmplt_u64(__VA_ARGS__)
+#define svcmplt_u16(...) __builtin_sve_svcmplt_u16(__VA_ARGS__)
+#define svcmplt_s8(...) __builtin_sve_svcmplt_s8(__VA_ARGS__)
+#define svcmplt_s32(...) __builtin_sve_svcmplt_s32(__VA_ARGS__)
+#define svcmplt_s64(...) __builtin_sve_svcmplt_s64(__VA_ARGS__)
+#define svcmplt_s16(...) __builtin_sve_svcmplt_s16(__VA_ARGS__)
+#define svcmplt_f64(...) __builtin_sve_svcmplt_f64(__VA_ARGS__)
+#define svcmplt_f32(...) __builtin_sve_svcmplt_f32(__VA_ARGS__)
+#define svcmplt_f16(...) __builtin_sve_svcmplt_f16(__VA_ARGS__)
+#define svcmplt_wide_n_u8(...) __builtin_sve_svcmplt_wide_n_u8(__VA_ARGS__)
+#define svcmplt_wide_n_u32(...) __builtin_sve_svcmplt_wide_n_u32(__VA_ARGS__)
+#define svcmplt_wide_n_u16(...) __builtin_sve_svcmplt_wide_n_u16(__VA_ARGS__)
+#define svcmplt_wide_n_s8(...) __builtin_sve_svcmplt_wide_n_s8(__VA_ARGS__)
+#define svcmplt_wide_n_s32(...) __builtin_sve_svcmplt_wide_n_s32(__VA_ARGS__)
+#define svcmplt_wide_n_s16(...) __builtin_sve_svcmplt_wide_n_s16(__VA_ARGS__)
+#define svcmplt_wide_u8(...) __builtin_sve_svcmplt_wide_u8(__VA_ARGS__)
+#define svcmplt_wide_u32(...) __builtin_sve_svcmplt_wide_u32(__VA_ARGS__)
+#define svcmplt_wide_u16(...) __builtin_sve_svcmplt_wide_u16(__VA_ARGS__)
+#define svcmplt_wide_s8(...) __builtin_sve_svcmplt_wide_s8(__VA_ARGS__)
+#define svcmplt_wide_s32(...) __builtin_sve_svcmplt_wide_s32(__VA_ARGS__)
+#define svcmplt_wide_s16(...) __builtin_sve_svcmplt_wide_s16(__VA_ARGS__)
+#define svcmpne_n_f64(...) __builtin_sve_svcmpne_n_f64(__VA_ARGS__)
+#define svcmpne_n_f32(...) __builtin_sve_svcmpne_n_f32(__VA_ARGS__)
+#define svcmpne_n_f16(...) __builtin_sve_svcmpne_n_f16(__VA_ARGS__)
+#define svcmpne_n_u8(...) __builtin_sve_svcmpne_n_u8(__VA_ARGS__)
+#define svcmpne_n_u32(...) __builtin_sve_svcmpne_n_u32(__VA_ARGS__)
+#define svcmpne_n_u64(...) __builtin_sve_svcmpne_n_u64(__VA_ARGS__)
+#define svcmpne_n_u16(...) __builtin_sve_svcmpne_n_u16(__VA_ARGS__)
+#define svcmpne_n_s8(...) __builtin_sve_svcmpne_n_s8(__VA_ARGS__)
+#define svcmpne_n_s32(...) __builtin_sve_svcmpne_n_s32(__VA_ARGS__)
+#define svcmpne_n_s64(...) __builtin_sve_svcmpne_n_s64(__VA_ARGS__)
+#define svcmpne_n_s16(...) __builtin_sve_svcmpne_n_s16(__VA_ARGS__)
+#define svcmpne_u8(...) __builtin_sve_svcmpne_u8(__VA_ARGS__)
+#define svcmpne_u32(...) __builtin_sve_svcmpne_u32(__VA_ARGS__)
+#define svcmpne_u64(...) __builtin_sve_svcmpne_u64(__VA_ARGS__)
+#define svcmpne_u16(...) __builtin_sve_svcmpne_u16(__VA_ARGS__)
+#define svcmpne_s8(...) __builtin_sve_svcmpne_s8(__VA_ARGS__)
+#define svcmpne_s32(...) __builtin_sve_svcmpne_s32(__VA_ARGS__)
+#define svcmpne_s64(...) __builtin_sve_svcmpne_s64(__VA_ARGS__)
+#define svcmpne_s16(...) __builtin_sve_svcmpne_s16(__VA_ARGS__)
+#define svcmpne_f64(...) __builtin_sve_svcmpne_f64(__VA_ARGS__)
+#define svcmpne_f32(...) __builtin_sve_svcmpne_f32(__VA_ARGS__)
+#define svcmpne_f16(...) __builtin_sve_svcmpne_f16(__VA_ARGS__)
+#define svcmpne_wide_n_s8(...) __builtin_sve_svcmpne_wide_n_s8(__VA_ARGS__)
+#define svcmpne_wide_n_s32(...) __builtin_sve_svcmpne_wide_n_s32(__VA_ARGS__)
+#define svcmpne_wide_n_s16(...) __builtin_sve_svcmpne_wide_n_s16(__VA_ARGS__)
+#define svcmpne_wide_s8(...) __builtin_sve_svcmpne_wide_s8(__VA_ARGS__)
+#define svcmpne_wide_s32(...) __builtin_sve_svcmpne_wide_s32(__VA_ARGS__)
+#define svcmpne_wide_s16(...) __builtin_sve_svcmpne_wide_s16(__VA_ARGS__)
+#define svcmpuo_n_f64(...) __builtin_sve_svcmpuo_n_f64(__VA_ARGS__)
+#define svcmpuo_n_f32(...) __builtin_sve_svcmpuo_n_f32(__VA_ARGS__)
+#define svcmpuo_n_f16(...) __builtin_sve_svcmpuo_n_f16(__VA_ARGS__)
+#define svcmpuo_f64(...) __builtin_sve_svcmpuo_f64(__VA_ARGS__)
+#define svcmpuo_f32(...) __builtin_sve_svcmpuo_f32(__VA_ARGS__)
+#define svcmpuo_f16(...) __builtin_sve_svcmpuo_f16(__VA_ARGS__)
+#define svcnot_u8_m(...) __builtin_sve_svcnot_u8_m(__VA_ARGS__)
+#define svcnot_u32_m(...) __builtin_sve_svcnot_u32_m(__VA_ARGS__)
+#define svcnot_u64_m(...) __builtin_sve_svcnot_u64_m(__VA_ARGS__)
+#define svcnot_u16_m(...) __builtin_sve_svcnot_u16_m(__VA_ARGS__)
+#define svcnot_s8_m(...) __builtin_sve_svcnot_s8_m(__VA_ARGS__)
+#define svcnot_s32_m(...) __builtin_sve_svcnot_s32_m(__VA_ARGS__)
+#define svcnot_s64_m(...) __builtin_sve_svcnot_s64_m(__VA_ARGS__)
+#define svcnot_s16_m(...) __builtin_sve_svcnot_s16_m(__VA_ARGS__)
+#define svcnot_u8_x(...) __builtin_sve_svcnot_u8_x(__VA_ARGS__)
+#define svcnot_u32_x(...) __builtin_sve_svcnot_u32_x(__VA_ARGS__)
+#define svcnot_u64_x(...) __builtin_sve_svcnot_u64_x(__VA_ARGS__)
+#define svcnot_u16_x(...) __builtin_sve_svcnot_u16_x(__VA_ARGS__)
+#define svcnot_s8_x(...) __builtin_sve_svcnot_s8_x(__VA_ARGS__)
+#define svcnot_s32_x(...) __builtin_sve_svcnot_s32_x(__VA_ARGS__)
+#define svcnot_s64_x(...) __builtin_sve_svcnot_s64_x(__VA_ARGS__)
+#define svcnot_s16_x(...) __builtin_sve_svcnot_s16_x(__VA_ARGS__)
+#define svcnot_u8_z(...) __builtin_sve_svcnot_u8_z(__VA_ARGS__)
+#define svcnot_u32_z(...) __builtin_sve_svcnot_u32_z(__VA_ARGS__)
+#define svcnot_u64_z(...) __builtin_sve_svcnot_u64_z(__VA_ARGS__)
+#define svcnot_u16_z(...) __builtin_sve_svcnot_u16_z(__VA_ARGS__)
+#define svcnot_s8_z(...) __builtin_sve_svcnot_s8_z(__VA_ARGS__)
+#define svcnot_s32_z(...) __builtin_sve_svcnot_s32_z(__VA_ARGS__)
+#define svcnot_s64_z(...) __builtin_sve_svcnot_s64_z(__VA_ARGS__)
+#define svcnot_s16_z(...) __builtin_sve_svcnot_s16_z(__VA_ARGS__)
+#define svcnt_u8_m(...) __builtin_sve_svcnt_u8_m(__VA_ARGS__)
+#define svcnt_u32_m(...) __builtin_sve_svcnt_u32_m(__VA_ARGS__)
+#define svcnt_u64_m(...) __builtin_sve_svcnt_u64_m(__VA_ARGS__)
+#define svcnt_u16_m(...) __builtin_sve_svcnt_u16_m(__VA_ARGS__)
+#define svcnt_s8_m(...) __builtin_sve_svcnt_s8_m(__VA_ARGS__)
+#define svcnt_f64_m(...) __builtin_sve_svcnt_f64_m(__VA_ARGS__)
+#define svcnt_f32_m(...) __builtin_sve_svcnt_f32_m(__VA_ARGS__)
+#define svcnt_f16_m(...) __builtin_sve_svcnt_f16_m(__VA_ARGS__)
+#define svcnt_s32_m(...) __builtin_sve_svcnt_s32_m(__VA_ARGS__)
+#define svcnt_s64_m(...) __builtin_sve_svcnt_s64_m(__VA_ARGS__)
+#define svcnt_s16_m(...) __builtin_sve_svcnt_s16_m(__VA_ARGS__)
+#define svcnt_u8_x(...) __builtin_sve_svcnt_u8_x(__VA_ARGS__)
+#define svcnt_u32_x(...) __builtin_sve_svcnt_u32_x(__VA_ARGS__)
+#define svcnt_u64_x(...) __builtin_sve_svcnt_u64_x(__VA_ARGS__)
+#define svcnt_u16_x(...) __builtin_sve_svcnt_u16_x(__VA_ARGS__)
+#define svcnt_s8_x(...) __builtin_sve_svcnt_s8_x(__VA_ARGS__)
+#define svcnt_f64_x(...) __builtin_sve_svcnt_f64_x(__VA_ARGS__)
+#define svcnt_f32_x(...) __builtin_sve_svcnt_f32_x(__VA_ARGS__)
+#define svcnt_f16_x(...) __builtin_sve_svcnt_f16_x(__VA_ARGS__)
+#define svcnt_s32_x(...) __builtin_sve_svcnt_s32_x(__VA_ARGS__)
+#define svcnt_s64_x(...) __builtin_sve_svcnt_s64_x(__VA_ARGS__)
+#define svcnt_s16_x(...) __builtin_sve_svcnt_s16_x(__VA_ARGS__)
+#define svcnt_u8_z(...) __builtin_sve_svcnt_u8_z(__VA_ARGS__)
+#define svcnt_u32_z(...) __builtin_sve_svcnt_u32_z(__VA_ARGS__)
+#define svcnt_u64_z(...) __builtin_sve_svcnt_u64_z(__VA_ARGS__)
+#define svcnt_u16_z(...) __builtin_sve_svcnt_u16_z(__VA_ARGS__)
+#define svcnt_s8_z(...) __builtin_sve_svcnt_s8_z(__VA_ARGS__)
+#define svcnt_f64_z(...) __builtin_sve_svcnt_f64_z(__VA_ARGS__)
+#define svcnt_f32_z(...) __builtin_sve_svcnt_f32_z(__VA_ARGS__)
+#define svcnt_f16_z(...) __builtin_sve_svcnt_f16_z(__VA_ARGS__)
+#define svcnt_s32_z(...) __builtin_sve_svcnt_s32_z(__VA_ARGS__)
+#define svcnt_s64_z(...) __builtin_sve_svcnt_s64_z(__VA_ARGS__)
+#define svcnt_s16_z(...) __builtin_sve_svcnt_s16_z(__VA_ARGS__)
+#define svcntb(...) __builtin_sve_svcntb(__VA_ARGS__)
+#define svcntb_pat(...) __builtin_sve_svcntb_pat(__VA_ARGS__)
+#define svcntd(...) __builtin_sve_svcntd(__VA_ARGS__)
+#define svcntd_pat(...) __builtin_sve_svcntd_pat(__VA_ARGS__)
+#define svcnth(...) __builtin_sve_svcnth(__VA_ARGS__)
+#define svcnth_pat(...) __builtin_sve_svcnth_pat(__VA_ARGS__)
+#define svcntp_b8(...) __builtin_sve_svcntp_b8(__VA_ARGS__)
+#define svcntp_b32(...) __builtin_sve_svcntp_b32(__VA_ARGS__)
+#define svcntp_b64(...) __builtin_sve_svcntp_b64(__VA_ARGS__)
+#define svcntp_b16(...) __builtin_sve_svcntp_b16(__VA_ARGS__)
+#define svcntw(...) __builtin_sve_svcntw(__VA_ARGS__)
+#define svcntw_pat(...) __builtin_sve_svcntw_pat(__VA_ARGS__)
+#define svcompact_u32(...) __builtin_sve_svcompact_u32(__VA_ARGS__)
+#define svcompact_u64(...) __builtin_sve_svcompact_u64(__VA_ARGS__)
+#define svcompact_f64(...) __builtin_sve_svcompact_f64(__VA_ARGS__)
+#define svcompact_f32(...) __builtin_sve_svcompact_f32(__VA_ARGS__)
+#define svcompact_s32(...) __builtin_sve_svcompact_s32(__VA_ARGS__)
+#define svcompact_s64(...) __builtin_sve_svcompact_s64(__VA_ARGS__)
+#define svcreate2_u8(...) __builtin_sve_svcreate2_u8(__VA_ARGS__)
+#define svcreate2_u32(...) __builtin_sve_svcreate2_u32(__VA_ARGS__)
+#define svcreate2_u64(...) __builtin_sve_svcreate2_u64(__VA_ARGS__)
+#define svcreate2_u16(...) __builtin_sve_svcreate2_u16(__VA_ARGS__)
+#define svcreate2_s8(...) __builtin_sve_svcreate2_s8(__VA_ARGS__)
+#define svcreate2_f64(...) __builtin_sve_svcreate2_f64(__VA_ARGS__)
+#define svcreate2_f32(...) __builtin_sve_svcreate2_f32(__VA_ARGS__)
+#define svcreate2_f16(...) __builtin_sve_svcreate2_f16(__VA_ARGS__)
+#define svcreate2_s32(...) __builtin_sve_svcreate2_s32(__VA_ARGS__)
+#define svcreate2_s64(...) __builtin_sve_svcreate2_s64(__VA_ARGS__)
+#define svcreate2_s16(...) __builtin_sve_svcreate2_s16(__VA_ARGS__)
+#define svcreate3_u8(...) __builtin_sve_svcreate3_u8(__VA_ARGS__)
+#define svcreate3_u32(...) __builtin_sve_svcreate3_u32(__VA_ARGS__)
+#define svcreate3_u64(...) __builtin_sve_svcreate3_u64(__VA_ARGS__)
+#define svcreate3_u16(...) __builtin_sve_svcreate3_u16(__VA_ARGS__)
+#define svcreate3_s8(...) __builtin_sve_svcreate3_s8(__VA_ARGS__)
+#define svcreate3_f64(...) __builtin_sve_svcreate3_f64(__VA_ARGS__)
+#define svcreate3_f32(...) __builtin_sve_svcreate3_f32(__VA_ARGS__)
+#define svcreate3_f16(...) __builtin_sve_svcreate3_f16(__VA_ARGS__)
+#define svcreate3_s32(...) __builtin_sve_svcreate3_s32(__VA_ARGS__)
+#define svcreate3_s64(...) __builtin_sve_svcreate3_s64(__VA_ARGS__)
+#define svcreate3_s16(...) __builtin_sve_svcreate3_s16(__VA_ARGS__)
+#define svcreate4_u8(...) __builtin_sve_svcreate4_u8(__VA_ARGS__)
+#define svcreate4_u32(...) __builtin_sve_svcreate4_u32(__VA_ARGS__)
+#define svcreate4_u64(...) __builtin_sve_svcreate4_u64(__VA_ARGS__)
+#define svcreate4_u16(...) __builtin_sve_svcreate4_u16(__VA_ARGS__)
+#define svcreate4_s8(...) __builtin_sve_svcreate4_s8(__VA_ARGS__)
+#define svcreate4_f64(...) __builtin_sve_svcreate4_f64(__VA_ARGS__)
+#define svcreate4_f32(...) __builtin_sve_svcreate4_f32(__VA_ARGS__)
+#define svcreate4_f16(...) __builtin_sve_svcreate4_f16(__VA_ARGS__)
+#define svcreate4_s32(...) __builtin_sve_svcreate4_s32(__VA_ARGS__)
+#define svcreate4_s64(...) __builtin_sve_svcreate4_s64(__VA_ARGS__)
+#define svcreate4_s16(...) __builtin_sve_svcreate4_s16(__VA_ARGS__)
+#define svcvt_f16_f32_m(...) __builtin_sve_svcvt_f16_f32_m(__VA_ARGS__)
+#define svcvt_f16_f32_x(...) __builtin_sve_svcvt_f16_f32_x(__VA_ARGS__)
+#define svcvt_f16_f32_z(...) __builtin_sve_svcvt_f16_f32_z(__VA_ARGS__)
+#define svcvt_f16_f64_m(...) __builtin_sve_svcvt_f16_f64_m(__VA_ARGS__)
+#define svcvt_f16_f64_x(...) __builtin_sve_svcvt_f16_f64_x(__VA_ARGS__)
+#define svcvt_f16_f64_z(...) __builtin_sve_svcvt_f16_f64_z(__VA_ARGS__)
+#define svcvt_f16_s16_m(...) __builtin_sve_svcvt_f16_s16_m(__VA_ARGS__)
+#define svcvt_f16_s16_x(...) __builtin_sve_svcvt_f16_s16_x(__VA_ARGS__)
+#define svcvt_f16_s16_z(...) __builtin_sve_svcvt_f16_s16_z(__VA_ARGS__)
+#define svcvt_f16_s32_m(...) __builtin_sve_svcvt_f16_s32_m(__VA_ARGS__)
+#define svcvt_f16_s32_x(...) __builtin_sve_svcvt_f16_s32_x(__VA_ARGS__)
+#define svcvt_f16_s32_z(...) __builtin_sve_svcvt_f16_s32_z(__VA_ARGS__)
+#define svcvt_f16_s64_m(...) __builtin_sve_svcvt_f16_s64_m(__VA_ARGS__)
+#define svcvt_f16_s64_x(...) __builtin_sve_svcvt_f16_s64_x(__VA_ARGS__)
+#define svcvt_f16_s64_z(...) __builtin_sve_svcvt_f16_s64_z(__VA_ARGS__)
+#define svcvt_f16_u16_m(...) __builtin_sve_svcvt_f16_u16_m(__VA_ARGS__)
+#define svcvt_f16_u16_x(...) __builtin_sve_svcvt_f16_u16_x(__VA_ARGS__)
+#define svcvt_f16_u16_z(...) __builtin_sve_svcvt_f16_u16_z(__VA_ARGS__)
+#define svcvt_f16_u32_m(...) __builtin_sve_svcvt_f16_u32_m(__VA_ARGS__)
+#define svcvt_f16_u32_x(...) __builtin_sve_svcvt_f16_u32_x(__VA_ARGS__)
+#define svcvt_f16_u32_z(...) __builtin_sve_svcvt_f16_u32_z(__VA_ARGS__)
+#define svcvt_f16_u64_m(...) __builtin_sve_svcvt_f16_u64_m(__VA_ARGS__)
+#define svcvt_f16_u64_x(...) __builtin_sve_svcvt_f16_u64_x(__VA_ARGS__)
+#define svcvt_f16_u64_z(...) __builtin_sve_svcvt_f16_u64_z(__VA_ARGS__)
+#define svcvt_f32_f16_m(...) __builtin_sve_svcvt_f32_f16_m(__VA_ARGS__)
+#define svcvt_f32_f16_x(...) __builtin_sve_svcvt_f32_f16_x(__VA_ARGS__)
+#define svcvt_f32_f16_z(...) __builtin_sve_svcvt_f32_f16_z(__VA_ARGS__)
+#define svcvt_f32_f64_m(...) __builtin_sve_svcvt_f32_f64_m(__VA_ARGS__)
+#define svcvt_f32_f64_x(...) __builtin_sve_svcvt_f32_f64_x(__VA_ARGS__)
+#define svcvt_f32_f64_z(...) __builtin_sve_svcvt_f32_f64_z(__VA_ARGS__)
+#define svcvt_f32_s32_m(...) __builtin_sve_svcvt_f32_s32_m(__VA_ARGS__)
+#define svcvt_f32_s32_x(...) __builtin_sve_svcvt_f32_s32_x(__VA_ARGS__)
+#define svcvt_f32_s32_z(...) __builtin_sve_svcvt_f32_s32_z(__VA_ARGS__)
+#define svcvt_f32_s64_m(...) __builtin_sve_svcvt_f32_s64_m(__VA_ARGS__)
+#define svcvt_f32_s64_x(...) __builtin_sve_svcvt_f32_s64_x(__VA_ARGS__)
+#define svcvt_f32_s64_z(...) __builtin_sve_svcvt_f32_s64_z(__VA_ARGS__)
+#define svcvt_f32_u32_m(...) __builtin_sve_svcvt_f32_u32_m(__VA_ARGS__)
+#define svcvt_f32_u32_x(...) __builtin_sve_svcvt_f32_u32_x(__VA_ARGS__)
+#define svcvt_f32_u32_z(...) __builtin_sve_svcvt_f32_u32_z(__VA_ARGS__)
+#define svcvt_f32_u64_m(...) __builtin_sve_svcvt_f32_u64_m(__VA_ARGS__)
+#define svcvt_f32_u64_x(...) __builtin_sve_svcvt_f32_u64_x(__VA_ARGS__)
+#define svcvt_f32_u64_z(...) __builtin_sve_svcvt_f32_u64_z(__VA_ARGS__)
+#define svcvt_f64_f16_m(...) __builtin_sve_svcvt_f64_f16_m(__VA_ARGS__)
+#define svcvt_f64_f16_x(...) __builtin_sve_svcvt_f64_f16_x(__VA_ARGS__)
+#define svcvt_f64_f16_z(...) __builtin_sve_svcvt_f64_f16_z(__VA_ARGS__)
+#define svcvt_f64_f32_m(...) __builtin_sve_svcvt_f64_f32_m(__VA_ARGS__)
+#define svcvt_f64_f32_x(...) __builtin_sve_svcvt_f64_f32_x(__VA_ARGS__)
+#define svcvt_f64_f32_z(...) __builtin_sve_svcvt_f64_f32_z(__VA_ARGS__)
+#define svcvt_f64_s32_m(...) __builtin_sve_svcvt_f64_s32_m(__VA_ARGS__)
+#define svcvt_f64_s32_x(...) __builtin_sve_svcvt_f64_s32_x(__VA_ARGS__)
+#define svcvt_f64_s32_z(...) __builtin_sve_svcvt_f64_s32_z(__VA_ARGS__)
+#define svcvt_f64_s64_m(...) __builtin_sve_svcvt_f64_s64_m(__VA_ARGS__)
+#define svcvt_f64_s64_x(...) __builtin_sve_svcvt_f64_s64_x(__VA_ARGS__)
+#define svcvt_f64_s64_z(...) __builtin_sve_svcvt_f64_s64_z(__VA_ARGS__)
+#define svcvt_f64_u32_m(...) __builtin_sve_svcvt_f64_u32_m(__VA_ARGS__)
+#define svcvt_f64_u32_x(...) __builtin_sve_svcvt_f64_u32_x(__VA_ARGS__)
+#define svcvt_f64_u32_z(...) __builtin_sve_svcvt_f64_u32_z(__VA_ARGS__)
+#define svcvt_f64_u64_m(...) __builtin_sve_svcvt_f64_u64_m(__VA_ARGS__)
+#define svcvt_f64_u64_x(...) __builtin_sve_svcvt_f64_u64_x(__VA_ARGS__)
+#define svcvt_f64_u64_z(...) __builtin_sve_svcvt_f64_u64_z(__VA_ARGS__)
+#define svcvt_s16_f16_m(...) __builtin_sve_svcvt_s16_f16_m(__VA_ARGS__)
+#define svcvt_s16_f16_x(...) __builtin_sve_svcvt_s16_f16_x(__VA_ARGS__)
+#define svcvt_s16_f16_z(...) __builtin_sve_svcvt_s16_f16_z(__VA_ARGS__)
+#define svcvt_s32_f16_m(...) __builtin_sve_svcvt_s32_f16_m(__VA_ARGS__)
+#define svcvt_s32_f16_x(...) __builtin_sve_svcvt_s32_f16_x(__VA_ARGS__)
+#define svcvt_s32_f16_z(...) __builtin_sve_svcvt_s32_f16_z(__VA_ARGS__)
+#define svcvt_s32_f32_m(...) __builtin_sve_svcvt_s32_f32_m(__VA_ARGS__)
+#define svcvt_s32_f32_x(...) __builtin_sve_svcvt_s32_f32_x(__VA_ARGS__)
+#define svcvt_s32_f32_z(...) __builtin_sve_svcvt_s32_f32_z(__VA_ARGS__)
+#define svcvt_s32_f64_m(...) __builtin_sve_svcvt_s32_f64_m(__VA_ARGS__)
+#define svcvt_s32_f64_x(...) __builtin_sve_svcvt_s32_f64_x(__VA_ARGS__)
+#define svcvt_s32_f64_z(...) __builtin_sve_svcvt_s32_f64_z(__VA_ARGS__)
+#define svcvt_s64_f16_m(...) __builtin_sve_svcvt_s64_f16_m(__VA_ARGS__)
+#define svcvt_s64_f16_x(...) __builtin_sve_svcvt_s64_f16_x(__VA_ARGS__)
+#define svcvt_s64_f16_z(...) __builtin_sve_svcvt_s64_f16_z(__VA_ARGS__)
+#define svcvt_s64_f32_m(...) __builtin_sve_svcvt_s64_f32_m(__VA_ARGS__)
+#define svcvt_s64_f32_x(...) __builtin_sve_svcvt_s64_f32_x(__VA_ARGS__)
+#define svcvt_s64_f32_z(...) __builtin_sve_svcvt_s64_f32_z(__VA_ARGS__)
+#define svcvt_s64_f64_m(...) __builtin_sve_svcvt_s64_f64_m(__VA_ARGS__)
+#define svcvt_s64_f64_x(...) __builtin_sve_svcvt_s64_f64_x(__VA_ARGS__)
+#define svcvt_s64_f64_z(...) __builtin_sve_svcvt_s64_f64_z(__VA_ARGS__)
+#define svcvt_u16_f16_m(...) __builtin_sve_svcvt_u16_f16_m(__VA_ARGS__)
+#define svcvt_u16_f16_x(...) __builtin_sve_svcvt_u16_f16_x(__VA_ARGS__)
+#define svcvt_u16_f16_z(...) __builtin_sve_svcvt_u16_f16_z(__VA_ARGS__)
+#define svcvt_u32_f16_m(...) __builtin_sve_svcvt_u32_f16_m(__VA_ARGS__)
+#define svcvt_u32_f16_x(...) __builtin_sve_svcvt_u32_f16_x(__VA_ARGS__)
+#define svcvt_u32_f16_z(...) __builtin_sve_svcvt_u32_f16_z(__VA_ARGS__)
+#define svcvt_u32_f32_m(...) __builtin_sve_svcvt_u32_f32_m(__VA_ARGS__)
+#define svcvt_u32_f32_x(...) __builtin_sve_svcvt_u32_f32_x(__VA_ARGS__)
+#define svcvt_u32_f32_z(...) __builtin_sve_svcvt_u32_f32_z(__VA_ARGS__)
+#define svcvt_u32_f64_m(...) __builtin_sve_svcvt_u32_f64_m(__VA_ARGS__)
+#define svcvt_u32_f64_x(...) __builtin_sve_svcvt_u32_f64_x(__VA_ARGS__)
+#define svcvt_u32_f64_z(...) __builtin_sve_svcvt_u32_f64_z(__VA_ARGS__)
+#define svcvt_u64_f16_m(...) __builtin_sve_svcvt_u64_f16_m(__VA_ARGS__)
+#define svcvt_u64_f16_x(...) __builtin_sve_svcvt_u64_f16_x(__VA_ARGS__)
+#define svcvt_u64_f16_z(...) __builtin_sve_svcvt_u64_f16_z(__VA_ARGS__)
+#define svcvt_u64_f32_m(...) __builtin_sve_svcvt_u64_f32_m(__VA_ARGS__)
+#define svcvt_u64_f32_x(...) __builtin_sve_svcvt_u64_f32_x(__VA_ARGS__)
+#define svcvt_u64_f32_z(...) __builtin_sve_svcvt_u64_f32_z(__VA_ARGS__)
+#define svcvt_u64_f64_m(...) __builtin_sve_svcvt_u64_f64_m(__VA_ARGS__)
+#define svcvt_u64_f64_x(...) __builtin_sve_svcvt_u64_f64_x(__VA_ARGS__)
+#define svcvt_u64_f64_z(...) __builtin_sve_svcvt_u64_f64_z(__VA_ARGS__)
+#define svdiv_n_f64_m(...) __builtin_sve_svdiv_n_f64_m(__VA_ARGS__)
+#define svdiv_n_f32_m(...) __builtin_sve_svdiv_n_f32_m(__VA_ARGS__)
+#define svdiv_n_f16_m(...) __builtin_sve_svdiv_n_f16_m(__VA_ARGS__)
+#define svdiv_n_f64_x(...) __builtin_sve_svdiv_n_f64_x(__VA_ARGS__)
+#define svdiv_n_f32_x(...) __builtin_sve_svdiv_n_f32_x(__VA_ARGS__)
+#define svdiv_n_f16_x(...) __builtin_sve_svdiv_n_f16_x(__VA_ARGS__)
+#define svdiv_n_f64_z(...) __builtin_sve_svdiv_n_f64_z(__VA_ARGS__)
+#define svdiv_n_f32_z(...) __builtin_sve_svdiv_n_f32_z(__VA_ARGS__)
+#define svdiv_n_f16_z(...) __builtin_sve_svdiv_n_f16_z(__VA_ARGS__)
+#define svdiv_n_s32_m(...) __builtin_sve_svdiv_n_s32_m(__VA_ARGS__)
+#define svdiv_n_s64_m(...) __builtin_sve_svdiv_n_s64_m(__VA_ARGS__)
+#define svdiv_n_s32_x(...) __builtin_sve_svdiv_n_s32_x(__VA_ARGS__)
+#define svdiv_n_s64_x(...) __builtin_sve_svdiv_n_s64_x(__VA_ARGS__)
+#define svdiv_n_s32_z(...) __builtin_sve_svdiv_n_s32_z(__VA_ARGS__)
+#define svdiv_n_s64_z(...) __builtin_sve_svdiv_n_s64_z(__VA_ARGS__)
+#define svdiv_n_u32_m(...) __builtin_sve_svdiv_n_u32_m(__VA_ARGS__)
+#define svdiv_n_u64_m(...) __builtin_sve_svdiv_n_u64_m(__VA_ARGS__)
+#define svdiv_n_u32_x(...) __builtin_sve_svdiv_n_u32_x(__VA_ARGS__)
+#define svdiv_n_u64_x(...) __builtin_sve_svdiv_n_u64_x(__VA_ARGS__)
+#define svdiv_n_u32_z(...) __builtin_sve_svdiv_n_u32_z(__VA_ARGS__)
+#define svdiv_n_u64_z(...) __builtin_sve_svdiv_n_u64_z(__VA_ARGS__)
+#define svdiv_f64_m(...) __builtin_sve_svdiv_f64_m(__VA_ARGS__)
+#define svdiv_f32_m(...) __builtin_sve_svdiv_f32_m(__VA_ARGS__)
+#define svdiv_f16_m(...) __builtin_sve_svdiv_f16_m(__VA_ARGS__)
+#define svdiv_f64_x(...) __builtin_sve_svdiv_f64_x(__VA_ARGS__)
+#define svdiv_f32_x(...) __builtin_sve_svdiv_f32_x(__VA_ARGS__)
+#define svdiv_f16_x(...) __builtin_sve_svdiv_f16_x(__VA_ARGS__)
+#define svdiv_f64_z(...) __builtin_sve_svdiv_f64_z(__VA_ARGS__)
+#define svdiv_f32_z(...) __builtin_sve_svdiv_f32_z(__VA_ARGS__)
+#define svdiv_f16_z(...) __builtin_sve_svdiv_f16_z(__VA_ARGS__)
+#define svdiv_s32_m(...) __builtin_sve_svdiv_s32_m(__VA_ARGS__)
+#define svdiv_s64_m(...) __builtin_sve_svdiv_s64_m(__VA_ARGS__)
+#define svdiv_s32_x(...) __builtin_sve_svdiv_s32_x(__VA_ARGS__)
+#define svdiv_s64_x(...) __builtin_sve_svdiv_s64_x(__VA_ARGS__)
+#define svdiv_s32_z(...) __builtin_sve_svdiv_s32_z(__VA_ARGS__)
+#define svdiv_s64_z(...) __builtin_sve_svdiv_s64_z(__VA_ARGS__)
+#define svdiv_u32_m(...) __builtin_sve_svdiv_u32_m(__VA_ARGS__)
+#define svdiv_u64_m(...) __builtin_sve_svdiv_u64_m(__VA_ARGS__)
+#define svdiv_u32_x(...) __builtin_sve_svdiv_u32_x(__VA_ARGS__)
+#define svdiv_u64_x(...) __builtin_sve_svdiv_u64_x(__VA_ARGS__)
+#define svdiv_u32_z(...) __builtin_sve_svdiv_u32_z(__VA_ARGS__)
+#define svdiv_u64_z(...) __builtin_sve_svdiv_u64_z(__VA_ARGS__)
+#define svdivr_n_f64_m(...) __builtin_sve_svdivr_n_f64_m(__VA_ARGS__)
+#define svdivr_n_f32_m(...) __builtin_sve_svdivr_n_f32_m(__VA_ARGS__)
+#define svdivr_n_f16_m(...) __builtin_sve_svdivr_n_f16_m(__VA_ARGS__)
+#define svdivr_n_f64_x(...) __builtin_sve_svdivr_n_f64_x(__VA_ARGS__)
+#define svdivr_n_f32_x(...) __builtin_sve_svdivr_n_f32_x(__VA_ARGS__)
+#define svdivr_n_f16_x(...) __builtin_sve_svdivr_n_f16_x(__VA_ARGS__)
+#define svdivr_n_f64_z(...) __builtin_sve_svdivr_n_f64_z(__VA_ARGS__)
+#define svdivr_n_f32_z(...) __builtin_sve_svdivr_n_f32_z(__VA_ARGS__)
+#define svdivr_n_f16_z(...) __builtin_sve_svdivr_n_f16_z(__VA_ARGS__)
+#define svdivr_n_s32_m(...) __builtin_sve_svdivr_n_s32_m(__VA_ARGS__)
+#define svdivr_n_s64_m(...) __builtin_sve_svdivr_n_s64_m(__VA_ARGS__)
+#define svdivr_n_s32_x(...) __builtin_sve_svdivr_n_s32_x(__VA_ARGS__)
+#define svdivr_n_s64_x(...) __builtin_sve_svdivr_n_s64_x(__VA_ARGS__)
+#define svdivr_n_s32_z(...) __builtin_sve_svdivr_n_s32_z(__VA_ARGS__)
+#define svdivr_n_s64_z(...) __builtin_sve_svdivr_n_s64_z(__VA_ARGS__)
+#define svdivr_n_u32_m(...) __builtin_sve_svdivr_n_u32_m(__VA_ARGS__)
+#define svdivr_n_u64_m(...) __builtin_sve_svdivr_n_u64_m(__VA_ARGS__)
+#define svdivr_n_u32_x(...) __builtin_sve_svdivr_n_u32_x(__VA_ARGS__)
+#define svdivr_n_u64_x(...) __builtin_sve_svdivr_n_u64_x(__VA_ARGS__)
+#define svdivr_n_u32_z(...) __builtin_sve_svdivr_n_u32_z(__VA_ARGS__)
+#define svdivr_n_u64_z(...) __builtin_sve_svdivr_n_u64_z(__VA_ARGS__)
+#define svdivr_f64_m(...) __builtin_sve_svdivr_f64_m(__VA_ARGS__)
+#define svdivr_f32_m(...) __builtin_sve_svdivr_f32_m(__VA_ARGS__)
+#define svdivr_f16_m(...) __builtin_sve_svdivr_f16_m(__VA_ARGS__)
+#define svdivr_f64_x(...) __builtin_sve_svdivr_f64_x(__VA_ARGS__)
+#define svdivr_f32_x(...) __builtin_sve_svdivr_f32_x(__VA_ARGS__)
+#define svdivr_f16_x(...) __builtin_sve_svdivr_f16_x(__VA_ARGS__)
+#define svdivr_f64_z(...) __builtin_sve_svdivr_f64_z(__VA_ARGS__)
+#define svdivr_f32_z(...) __builtin_sve_svdivr_f32_z(__VA_ARGS__)
+#define svdivr_f16_z(...) __builtin_sve_svdivr_f16_z(__VA_ARGS__)
+#define svdivr_s32_m(...) __builtin_sve_svdivr_s32_m(__VA_ARGS__)
+#define svdivr_s64_m(...) __builtin_sve_svdivr_s64_m(__VA_ARGS__)
+#define svdivr_s32_x(...) __builtin_sve_svdivr_s32_x(__VA_ARGS__)
+#define svdivr_s64_x(...) __builtin_sve_svdivr_s64_x(__VA_ARGS__)
+#define svdivr_s32_z(...) __builtin_sve_svdivr_s32_z(__VA_ARGS__)
+#define svdivr_s64_z(...) __builtin_sve_svdivr_s64_z(__VA_ARGS__)
+#define svdivr_u32_m(...) __builtin_sve_svdivr_u32_m(__VA_ARGS__)
+#define svdivr_u64_m(...) __builtin_sve_svdivr_u64_m(__VA_ARGS__)
+#define svdivr_u32_x(...) __builtin_sve_svdivr_u32_x(__VA_ARGS__)
+#define svdivr_u64_x(...) __builtin_sve_svdivr_u64_x(__VA_ARGS__)
+#define svdivr_u32_z(...) __builtin_sve_svdivr_u32_z(__VA_ARGS__)
+#define svdivr_u64_z(...) __builtin_sve_svdivr_u64_z(__VA_ARGS__)
+#define svdot_n_s32(...) __builtin_sve_svdot_n_s32(__VA_ARGS__)
+#define svdot_n_s64(...) __builtin_sve_svdot_n_s64(__VA_ARGS__)
+#define svdot_n_u32(...) __builtin_sve_svdot_n_u32(__VA_ARGS__)
+#define svdot_n_u64(...) __builtin_sve_svdot_n_u64(__VA_ARGS__)
+#define svdot_s32(...) __builtin_sve_svdot_s32(__VA_ARGS__)
+#define svdot_s64(...) __builtin_sve_svdot_s64(__VA_ARGS__)
+#define svdot_u32(...) __builtin_sve_svdot_u32(__VA_ARGS__)
+#define svdot_u64(...) __builtin_sve_svdot_u64(__VA_ARGS__)
+#define svdot_lane_s32(...) __builtin_sve_svdot_lane_s32(__VA_ARGS__)
+#define svdot_lane_s64(...) __builtin_sve_svdot_lane_s64(__VA_ARGS__)
+#define svdot_lane_u32(...) __builtin_sve_svdot_lane_u32(__VA_ARGS__)
+#define svdot_lane_u64(...) __builtin_sve_svdot_lane_u64(__VA_ARGS__)
+#define svdup_n_u8(...) __builtin_sve_svdup_n_u8(__VA_ARGS__)
+#define svdup_n_u32(...) __builtin_sve_svdup_n_u32(__VA_ARGS__)
+#define svdup_n_u64(...) __builtin_sve_svdup_n_u64(__VA_ARGS__)
+#define svdup_n_u16(...) __builtin_sve_svdup_n_u16(__VA_ARGS__)
+#define svdup_n_s8(...) __builtin_sve_svdup_n_s8(__VA_ARGS__)
+#define svdup_n_f64(...) __builtin_sve_svdup_n_f64(__VA_ARGS__)
+#define svdup_n_f32(...) __builtin_sve_svdup_n_f32(__VA_ARGS__)
+#define svdup_n_f16(...) __builtin_sve_svdup_n_f16(__VA_ARGS__)
+#define svdup_n_s32(...) __builtin_sve_svdup_n_s32(__VA_ARGS__)
+#define svdup_n_s64(...) __builtin_sve_svdup_n_s64(__VA_ARGS__)
+#define svdup_n_s16(...) __builtin_sve_svdup_n_s16(__VA_ARGS__)
+#define svdup_n_u8_m(...) __builtin_sve_svdup_n_u8_m(__VA_ARGS__)
+#define svdup_n_u32_m(...) __builtin_sve_svdup_n_u32_m(__VA_ARGS__)
+#define svdup_n_u64_m(...) __builtin_sve_svdup_n_u64_m(__VA_ARGS__)
+#define svdup_n_u16_m(...) __builtin_sve_svdup_n_u16_m(__VA_ARGS__)
+#define svdup_n_s8_m(...) __builtin_sve_svdup_n_s8_m(__VA_ARGS__)
+#define svdup_n_f64_m(...) __builtin_sve_svdup_n_f64_m(__VA_ARGS__)
+#define svdup_n_f32_m(...) __builtin_sve_svdup_n_f32_m(__VA_ARGS__)
+#define svdup_n_f16_m(...) __builtin_sve_svdup_n_f16_m(__VA_ARGS__)
+#define svdup_n_s32_m(...) __builtin_sve_svdup_n_s32_m(__VA_ARGS__)
+#define svdup_n_s64_m(...) __builtin_sve_svdup_n_s64_m(__VA_ARGS__)
+#define svdup_n_s16_m(...) __builtin_sve_svdup_n_s16_m(__VA_ARGS__)
+#define svdup_n_b8(...) __builtin_sve_svdup_n_b8(__VA_ARGS__)
+#define svdup_n_b32(...) __builtin_sve_svdup_n_b32(__VA_ARGS__)
+#define svdup_n_b64(...) __builtin_sve_svdup_n_b64(__VA_ARGS__)
+#define svdup_n_b16(...) __builtin_sve_svdup_n_b16(__VA_ARGS__)
+#define svdup_n_u8_x(...) __builtin_sve_svdup_n_u8_x(__VA_ARGS__)
+#define svdup_n_u32_x(...) __builtin_sve_svdup_n_u32_x(__VA_ARGS__)
+#define svdup_n_u64_x(...) __builtin_sve_svdup_n_u64_x(__VA_ARGS__)
+#define svdup_n_u16_x(...) __builtin_sve_svdup_n_u16_x(__VA_ARGS__)
+#define svdup_n_s8_x(...) __builtin_sve_svdup_n_s8_x(__VA_ARGS__)
+#define svdup_n_f64_x(...) __builtin_sve_svdup_n_f64_x(__VA_ARGS__)
+#define svdup_n_f32_x(...) __builtin_sve_svdup_n_f32_x(__VA_ARGS__)
+#define svdup_n_f16_x(...) __builtin_sve_svdup_n_f16_x(__VA_ARGS__)
+#define svdup_n_s32_x(...) __builtin_sve_svdup_n_s32_x(__VA_ARGS__)
+#define svdup_n_s64_x(...) __builtin_sve_svdup_n_s64_x(__VA_ARGS__)
+#define svdup_n_s16_x(...) __builtin_sve_svdup_n_s16_x(__VA_ARGS__)
+#define svdup_n_u8_z(...) __builtin_sve_svdup_n_u8_z(__VA_ARGS__)
+#define svdup_n_u32_z(...) __builtin_sve_svdup_n_u32_z(__VA_ARGS__)
+#define svdup_n_u64_z(...) __builtin_sve_svdup_n_u64_z(__VA_ARGS__)
+#define svdup_n_u16_z(...) __builtin_sve_svdup_n_u16_z(__VA_ARGS__)
+#define svdup_n_s8_z(...) __builtin_sve_svdup_n_s8_z(__VA_ARGS__)
+#define svdup_n_f64_z(...) __builtin_sve_svdup_n_f64_z(__VA_ARGS__)
+#define svdup_n_f32_z(...) __builtin_sve_svdup_n_f32_z(__VA_ARGS__)
+#define svdup_n_f16_z(...) __builtin_sve_svdup_n_f16_z(__VA_ARGS__)
+#define svdup_n_s32_z(...) __builtin_sve_svdup_n_s32_z(__VA_ARGS__)
+#define svdup_n_s64_z(...) __builtin_sve_svdup_n_s64_z(__VA_ARGS__)
+#define svdup_n_s16_z(...) __builtin_sve_svdup_n_s16_z(__VA_ARGS__)
+#define svdup_lane_u8(...) __builtin_sve_svdup_lane_u8(__VA_ARGS__)
+#define svdup_lane_u32(...) __builtin_sve_svdup_lane_u32(__VA_ARGS__)
+#define svdup_lane_u64(...) __builtin_sve_svdup_lane_u64(__VA_ARGS__)
+#define svdup_lane_u16(...) __builtin_sve_svdup_lane_u16(__VA_ARGS__)
+#define svdup_lane_s8(...) __builtin_sve_svdup_lane_s8(__VA_ARGS__)
+#define svdup_lane_f64(...) __builtin_sve_svdup_lane_f64(__VA_ARGS__)
+#define svdup_lane_f32(...) __builtin_sve_svdup_lane_f32(__VA_ARGS__)
+#define svdup_lane_f16(...) __builtin_sve_svdup_lane_f16(__VA_ARGS__)
+#define svdup_lane_s32(...) __builtin_sve_svdup_lane_s32(__VA_ARGS__)
+#define svdup_lane_s64(...) __builtin_sve_svdup_lane_s64(__VA_ARGS__)
+#define svdup_lane_s16(...) __builtin_sve_svdup_lane_s16(__VA_ARGS__)
+#define svdupq_n_u16(...) __builtin_sve_svdupq_n_u16(__VA_ARGS__)
+#define svdupq_n_f16(...) __builtin_sve_svdupq_n_f16(__VA_ARGS__)
+#define svdupq_n_s16(...) __builtin_sve_svdupq_n_s16(__VA_ARGS__)
+#define svdupq_n_u32(...) __builtin_sve_svdupq_n_u32(__VA_ARGS__)
+#define svdupq_n_f32(...) __builtin_sve_svdupq_n_f32(__VA_ARGS__)
+#define svdupq_n_s32(...) __builtin_sve_svdupq_n_s32(__VA_ARGS__)
+#define svdupq_n_u64(...) __builtin_sve_svdupq_n_u64(__VA_ARGS__)
+#define svdupq_n_f64(...) __builtin_sve_svdupq_n_f64(__VA_ARGS__)
+#define svdupq_n_s64(...) __builtin_sve_svdupq_n_s64(__VA_ARGS__)
+#define svdupq_n_u8(...) __builtin_sve_svdupq_n_u8(__VA_ARGS__)
+#define svdupq_n_s8(...) __builtin_sve_svdupq_n_s8(__VA_ARGS__)
+#define svdupq_n_b16(...) __builtin_sve_svdupq_n_b16(__VA_ARGS__)
+#define svdupq_n_b32(...) __builtin_sve_svdupq_n_b32(__VA_ARGS__)
+#define svdupq_n_b64(...) __builtin_sve_svdupq_n_b64(__VA_ARGS__)
+#define svdupq_n_b8(...) __builtin_sve_svdupq_n_b8(__VA_ARGS__)
+#define svdupq_lane_u8(...) __builtin_sve_svdupq_lane_u8(__VA_ARGS__)
+#define svdupq_lane_u32(...) __builtin_sve_svdupq_lane_u32(__VA_ARGS__)
+#define svdupq_lane_u64(...) __builtin_sve_svdupq_lane_u64(__VA_ARGS__)
+#define svdupq_lane_u16(...) __builtin_sve_svdupq_lane_u16(__VA_ARGS__)
+#define svdupq_lane_s8(...) __builtin_sve_svdupq_lane_s8(__VA_ARGS__)
+#define svdupq_lane_f64(...) __builtin_sve_svdupq_lane_f64(__VA_ARGS__)
+#define svdupq_lane_f32(...) __builtin_sve_svdupq_lane_f32(__VA_ARGS__)
+#define svdupq_lane_f16(...) __builtin_sve_svdupq_lane_f16(__VA_ARGS__)
+#define svdupq_lane_s32(...) __builtin_sve_svdupq_lane_s32(__VA_ARGS__)
+#define svdupq_lane_s64(...) __builtin_sve_svdupq_lane_s64(__VA_ARGS__)
+#define svdupq_lane_s16(...) __builtin_sve_svdupq_lane_s16(__VA_ARGS__)
+#define sveor_b_z(...) __builtin_sve_sveor_b_z(__VA_ARGS__)
+#define sveor_n_u8_m(...) __builtin_sve_sveor_n_u8_m(__VA_ARGS__)
+#define sveor_n_u32_m(...) __builtin_sve_sveor_n_u32_m(__VA_ARGS__)
+#define sveor_n_u64_m(...) __builtin_sve_sveor_n_u64_m(__VA_ARGS__)
+#define sveor_n_u16_m(...) __builtin_sve_sveor_n_u16_m(__VA_ARGS__)
+#define sveor_n_s8_m(...) __builtin_sve_sveor_n_s8_m(__VA_ARGS__)
+#define sveor_n_s32_m(...) __builtin_sve_sveor_n_s32_m(__VA_ARGS__)
+#define sveor_n_s64_m(...) __builtin_sve_sveor_n_s64_m(__VA_ARGS__)
+#define sveor_n_s16_m(...) __builtin_sve_sveor_n_s16_m(__VA_ARGS__)
+#define sveor_n_u8_x(...) __builtin_sve_sveor_n_u8_x(__VA_ARGS__)
+#define sveor_n_u32_x(...) __builtin_sve_sveor_n_u32_x(__VA_ARGS__)
+#define sveor_n_u64_x(...) __builtin_sve_sveor_n_u64_x(__VA_ARGS__)
+#define sveor_n_u16_x(...) __builtin_sve_sveor_n_u16_x(__VA_ARGS__)
+#define sveor_n_s8_x(...) __builtin_sve_sveor_n_s8_x(__VA_ARGS__)
+#define sveor_n_s32_x(...) __builtin_sve_sveor_n_s32_x(__VA_ARGS__)
+#define sveor_n_s64_x(...) __builtin_sve_sveor_n_s64_x(__VA_ARGS__)
+#define sveor_n_s16_x(...) __builtin_sve_sveor_n_s16_x(__VA_ARGS__)
+#define sveor_n_u8_z(...) __builtin_sve_sveor_n_u8_z(__VA_ARGS__)
+#define sveor_n_u32_z(...) __builtin_sve_sveor_n_u32_z(__VA_ARGS__)
+#define sveor_n_u64_z(...) __builtin_sve_sveor_n_u64_z(__VA_ARGS__)
+#define sveor_n_u16_z(...) __builtin_sve_sveor_n_u16_z(__VA_ARGS__)
+#define sveor_n_s8_z(...) __builtin_sve_sveor_n_s8_z(__VA_ARGS__)
+#define sveor_n_s32_z(...) __builtin_sve_sveor_n_s32_z(__VA_ARGS__)
+#define sveor_n_s64_z(...) __builtin_sve_sveor_n_s64_z(__VA_ARGS__)
+#define sveor_n_s16_z(...) __builtin_sve_sveor_n_s16_z(__VA_ARGS__)
+#define sveor_u8_m(...) __builtin_sve_sveor_u8_m(__VA_ARGS__)
+#define sveor_u32_m(...) __builtin_sve_sveor_u32_m(__VA_ARGS__)
+#define sveor_u64_m(...) __builtin_sve_sveor_u64_m(__VA_ARGS__)
+#define sveor_u16_m(...) __builtin_sve_sveor_u16_m(__VA_ARGS__)
+#define sveor_s8_m(...) __builtin_sve_sveor_s8_m(__VA_ARGS__)
+#define sveor_s32_m(...) __builtin_sve_sveor_s32_m(__VA_ARGS__)
+#define sveor_s64_m(...) __builtin_sve_sveor_s64_m(__VA_ARGS__)
+#define sveor_s16_m(...) __builtin_sve_sveor_s16_m(__VA_ARGS__)
+#define sveor_u8_x(...) __builtin_sve_sveor_u8_x(__VA_ARGS__)
+#define sveor_u32_x(...) __builtin_sve_sveor_u32_x(__VA_ARGS__)
+#define sveor_u64_x(...) __builtin_sve_sveor_u64_x(__VA_ARGS__)
+#define sveor_u16_x(...) __builtin_sve_sveor_u16_x(__VA_ARGS__)
+#define sveor_s8_x(...) __builtin_sve_sveor_s8_x(__VA_ARGS__)
+#define sveor_s32_x(...) __builtin_sve_sveor_s32_x(__VA_ARGS__)
+#define sveor_s64_x(...) __builtin_sve_sveor_s64_x(__VA_ARGS__)
+#define sveor_s16_x(...) __builtin_sve_sveor_s16_x(__VA_ARGS__)
+#define sveor_u8_z(...) __builtin_sve_sveor_u8_z(__VA_ARGS__)
+#define sveor_u32_z(...) __builtin_sve_sveor_u32_z(__VA_ARGS__)
+#define sveor_u64_z(...) __builtin_sve_sveor_u64_z(__VA_ARGS__)
+#define sveor_u16_z(...) __builtin_sve_sveor_u16_z(__VA_ARGS__)
+#define sveor_s8_z(...) __builtin_sve_sveor_s8_z(__VA_ARGS__)
+#define sveor_s32_z(...) __builtin_sve_sveor_s32_z(__VA_ARGS__)
+#define sveor_s64_z(...) __builtin_sve_sveor_s64_z(__VA_ARGS__)
+#define sveor_s16_z(...) __builtin_sve_sveor_s16_z(__VA_ARGS__)
+#define sveorv_u8(...) __builtin_sve_sveorv_u8(__VA_ARGS__)
+#define sveorv_u32(...) __builtin_sve_sveorv_u32(__VA_ARGS__)
+#define sveorv_u64(...) __builtin_sve_sveorv_u64(__VA_ARGS__)
+#define sveorv_u16(...) __builtin_sve_sveorv_u16(__VA_ARGS__)
+#define sveorv_s8(...) __builtin_sve_sveorv_s8(__VA_ARGS__)
+#define sveorv_s32(...) __builtin_sve_sveorv_s32(__VA_ARGS__)
+#define sveorv_s64(...) __builtin_sve_sveorv_s64(__VA_ARGS__)
+#define sveorv_s16(...) __builtin_sve_sveorv_s16(__VA_ARGS__)
+#define svexpa_f64(...) __builtin_sve_svexpa_f64(__VA_ARGS__)
+#define svexpa_f32(...) __builtin_sve_svexpa_f32(__VA_ARGS__)
+#define svexpa_f16(...) __builtin_sve_svexpa_f16(__VA_ARGS__)
+#define svext_u8(...) __builtin_sve_svext_u8(__VA_ARGS__)
+#define svext_u32(...) __builtin_sve_svext_u32(__VA_ARGS__)
+#define svext_u64(...) __builtin_sve_svext_u64(__VA_ARGS__)
+#define svext_u16(...) __builtin_sve_svext_u16(__VA_ARGS__)
+#define svext_s8(...) __builtin_sve_svext_s8(__VA_ARGS__)
+#define svext_f64(...) __builtin_sve_svext_f64(__VA_ARGS__)
+#define svext_f32(...) __builtin_sve_svext_f32(__VA_ARGS__)
+#define svext_f16(...) __builtin_sve_svext_f16(__VA_ARGS__)
+#define svext_s32(...) __builtin_sve_svext_s32(__VA_ARGS__)
+#define svext_s64(...) __builtin_sve_svext_s64(__VA_ARGS__)
+#define svext_s16(...) __builtin_sve_svext_s16(__VA_ARGS__)
+#define svextb_s32_m(...) __builtin_sve_svextb_s32_m(__VA_ARGS__)
+#define svextb_s64_m(...) __builtin_sve_svextb_s64_m(__VA_ARGS__)
+#define svextb_s16_m(...) __builtin_sve_svextb_s16_m(__VA_ARGS__)
+#define svextb_s32_x(...) __builtin_sve_svextb_s32_x(__VA_ARGS__)
+#define svextb_s64_x(...) __builtin_sve_svextb_s64_x(__VA_ARGS__)
+#define svextb_s16_x(...) __builtin_sve_svextb_s16_x(__VA_ARGS__)
+#define svextb_s32_z(...) __builtin_sve_svextb_s32_z(__VA_ARGS__)
+#define svextb_s64_z(...) __builtin_sve_svextb_s64_z(__VA_ARGS__)
+#define svextb_s16_z(...) __builtin_sve_svextb_s16_z(__VA_ARGS__)
+#define svextb_u32_m(...) __builtin_sve_svextb_u32_m(__VA_ARGS__)
+#define svextb_u64_m(...) __builtin_sve_svextb_u64_m(__VA_ARGS__)
+#define svextb_u16_m(...) __builtin_sve_svextb_u16_m(__VA_ARGS__)
+#define svextb_u32_x(...) __builtin_sve_svextb_u32_x(__VA_ARGS__)
+#define svextb_u64_x(...) __builtin_sve_svextb_u64_x(__VA_ARGS__)
+#define svextb_u16_x(...) __builtin_sve_svextb_u16_x(__VA_ARGS__)
+#define svextb_u32_z(...) __builtin_sve_svextb_u32_z(__VA_ARGS__)
+#define svextb_u64_z(...) __builtin_sve_svextb_u64_z(__VA_ARGS__)
+#define svextb_u16_z(...) __builtin_sve_svextb_u16_z(__VA_ARGS__)
+#define svexth_s32_m(...) __builtin_sve_svexth_s32_m(__VA_ARGS__)
+#define svexth_s64_m(...) __builtin_sve_svexth_s64_m(__VA_ARGS__)
+#define svexth_s32_x(...) __builtin_sve_svexth_s32_x(__VA_ARGS__)
+#define svexth_s64_x(...) __builtin_sve_svexth_s64_x(__VA_ARGS__)
+#define svexth_s32_z(...) __builtin_sve_svexth_s32_z(__VA_ARGS__)
+#define svexth_s64_z(...) __builtin_sve_svexth_s64_z(__VA_ARGS__)
+#define svexth_u32_m(...) __builtin_sve_svexth_u32_m(__VA_ARGS__)
+#define svexth_u64_m(...) __builtin_sve_svexth_u64_m(__VA_ARGS__)
+#define svexth_u32_x(...) __builtin_sve_svexth_u32_x(__VA_ARGS__)
+#define svexth_u64_x(...) __builtin_sve_svexth_u64_x(__VA_ARGS__)
+#define svexth_u32_z(...) __builtin_sve_svexth_u32_z(__VA_ARGS__)
+#define svexth_u64_z(...) __builtin_sve_svexth_u64_z(__VA_ARGS__)
+#define svextw_s64_m(...) __builtin_sve_svextw_s64_m(__VA_ARGS__)
+#define svextw_s64_x(...) __builtin_sve_svextw_s64_x(__VA_ARGS__)
+#define svextw_s64_z(...) __builtin_sve_svextw_s64_z(__VA_ARGS__)
+#define svextw_u64_m(...) __builtin_sve_svextw_u64_m(__VA_ARGS__)
+#define svextw_u64_x(...) __builtin_sve_svextw_u64_x(__VA_ARGS__)
+#define svextw_u64_z(...) __builtin_sve_svextw_u64_z(__VA_ARGS__)
+#define svget2_u8(...) __builtin_sve_svget2_u8(__VA_ARGS__)
+#define svget2_u32(...) __builtin_sve_svget2_u32(__VA_ARGS__)
+#define svget2_u64(...) __builtin_sve_svget2_u64(__VA_ARGS__)
+#define svget2_u16(...) __builtin_sve_svget2_u16(__VA_ARGS__)
+#define svget2_s8(...) __builtin_sve_svget2_s8(__VA_ARGS__)
+#define svget2_f64(...) __builtin_sve_svget2_f64(__VA_ARGS__)
+#define svget2_f32(...) __builtin_sve_svget2_f32(__VA_ARGS__)
+#define svget2_f16(...) __builtin_sve_svget2_f16(__VA_ARGS__)
+#define svget2_s32(...) __builtin_sve_svget2_s32(__VA_ARGS__)
+#define svget2_s64(...) __builtin_sve_svget2_s64(__VA_ARGS__)
+#define svget2_s16(...) __builtin_sve_svget2_s16(__VA_ARGS__)
+#define svget3_u8(...) __builtin_sve_svget3_u8(__VA_ARGS__)
+#define svget3_u32(...) __builtin_sve_svget3_u32(__VA_ARGS__)
+#define svget3_u64(...) __builtin_sve_svget3_u64(__VA_ARGS__)
+#define svget3_u16(...) __builtin_sve_svget3_u16(__VA_ARGS__)
+#define svget3_s8(...) __builtin_sve_svget3_s8(__VA_ARGS__)
+#define svget3_f64(...) __builtin_sve_svget3_f64(__VA_ARGS__)
+#define svget3_f32(...) __builtin_sve_svget3_f32(__VA_ARGS__)
+#define svget3_f16(...) __builtin_sve_svget3_f16(__VA_ARGS__)
+#define svget3_s32(...) __builtin_sve_svget3_s32(__VA_ARGS__)
+#define svget3_s64(...) __builtin_sve_svget3_s64(__VA_ARGS__)
+#define svget3_s16(...) __builtin_sve_svget3_s16(__VA_ARGS__)
+#define svget4_u8(...) __builtin_sve_svget4_u8(__VA_ARGS__)
+#define svget4_u32(...) __builtin_sve_svget4_u32(__VA_ARGS__)
+#define svget4_u64(...) __builtin_sve_svget4_u64(__VA_ARGS__)
+#define svget4_u16(...) __builtin_sve_svget4_u16(__VA_ARGS__)
+#define svget4_s8(...) __builtin_sve_svget4_s8(__VA_ARGS__)
+#define svget4_f64(...) __builtin_sve_svget4_f64(__VA_ARGS__)
+#define svget4_f32(...) __builtin_sve_svget4_f32(__VA_ARGS__)
+#define svget4_f16(...) __builtin_sve_svget4_f16(__VA_ARGS__)
+#define svget4_s32(...) __builtin_sve_svget4_s32(__VA_ARGS__)
+#define svget4_s64(...) __builtin_sve_svget4_s64(__VA_ARGS__)
+#define svget4_s16(...) __builtin_sve_svget4_s16(__VA_ARGS__)
+#define svindex_u8(...) __builtin_sve_svindex_u8(__VA_ARGS__)
+#define svindex_u32(...) __builtin_sve_svindex_u32(__VA_ARGS__)
+#define svindex_u64(...) __builtin_sve_svindex_u64(__VA_ARGS__)
+#define svindex_u16(...) __builtin_sve_svindex_u16(__VA_ARGS__)
+#define svindex_s8(...) __builtin_sve_svindex_s8(__VA_ARGS__)
+#define svindex_s32(...) __builtin_sve_svindex_s32(__VA_ARGS__)
+#define svindex_s64(...) __builtin_sve_svindex_s64(__VA_ARGS__)
+#define svindex_s16(...) __builtin_sve_svindex_s16(__VA_ARGS__)
+#define svinsr_n_u8(...) __builtin_sve_svinsr_n_u8(__VA_ARGS__)
+#define svinsr_n_u32(...) __builtin_sve_svinsr_n_u32(__VA_ARGS__)
+#define svinsr_n_u64(...) __builtin_sve_svinsr_n_u64(__VA_ARGS__)
+#define svinsr_n_u16(...) __builtin_sve_svinsr_n_u16(__VA_ARGS__)
+#define svinsr_n_s8(...) __builtin_sve_svinsr_n_s8(__VA_ARGS__)
+#define svinsr_n_f64(...) __builtin_sve_svinsr_n_f64(__VA_ARGS__)
+#define svinsr_n_f32(...) __builtin_sve_svinsr_n_f32(__VA_ARGS__)
+#define svinsr_n_f16(...) __builtin_sve_svinsr_n_f16(__VA_ARGS__)
+#define svinsr_n_s32(...) __builtin_sve_svinsr_n_s32(__VA_ARGS__)
+#define svinsr_n_s64(...) __builtin_sve_svinsr_n_s64(__VA_ARGS__)
+#define svinsr_n_s16(...) __builtin_sve_svinsr_n_s16(__VA_ARGS__)
+#define svlasta_u8(...) __builtin_sve_svlasta_u8(__VA_ARGS__)
+#define svlasta_u32(...) __builtin_sve_svlasta_u32(__VA_ARGS__)
+#define svlasta_u64(...) __builtin_sve_svlasta_u64(__VA_ARGS__)
+#define svlasta_u16(...) __builtin_sve_svlasta_u16(__VA_ARGS__)
+#define svlasta_s8(...) __builtin_sve_svlasta_s8(__VA_ARGS__)
+#define svlasta_f64(...) __builtin_sve_svlasta_f64(__VA_ARGS__)
+#define svlasta_f32(...) __builtin_sve_svlasta_f32(__VA_ARGS__)
+#define svlasta_f16(...) __builtin_sve_svlasta_f16(__VA_ARGS__)
+#define svlasta_s32(...) __builtin_sve_svlasta_s32(__VA_ARGS__)
+#define svlasta_s64(...) __builtin_sve_svlasta_s64(__VA_ARGS__)
+#define svlasta_s16(...) __builtin_sve_svlasta_s16(__VA_ARGS__)
+#define svlastb_u8(...) __builtin_sve_svlastb_u8(__VA_ARGS__)
+#define svlastb_u32(...) __builtin_sve_svlastb_u32(__VA_ARGS__)
+#define svlastb_u64(...) __builtin_sve_svlastb_u64(__VA_ARGS__)
+#define svlastb_u16(...) __builtin_sve_svlastb_u16(__VA_ARGS__)
+#define svlastb_s8(...) __builtin_sve_svlastb_s8(__VA_ARGS__)
+#define svlastb_f64(...) __builtin_sve_svlastb_f64(__VA_ARGS__)
+#define svlastb_f32(...) __builtin_sve_svlastb_f32(__VA_ARGS__)
+#define svlastb_f16(...) __builtin_sve_svlastb_f16(__VA_ARGS__)
+#define svlastb_s32(...) __builtin_sve_svlastb_s32(__VA_ARGS__)
+#define svlastb_s64(...) __builtin_sve_svlastb_s64(__VA_ARGS__)
+#define svlastb_s16(...) __builtin_sve_svlastb_s16(__VA_ARGS__)
+#define svld1_u8(...) __builtin_sve_svld1_u8(__VA_ARGS__)
+#define svld1_u32(...) __builtin_sve_svld1_u32(__VA_ARGS__)
+#define svld1_u64(...) __builtin_sve_svld1_u64(__VA_ARGS__)
+#define svld1_u16(...) __builtin_sve_svld1_u16(__VA_ARGS__)
+#define svld1_s8(...) __builtin_sve_svld1_s8(__VA_ARGS__)
+#define svld1_f64(...) __builtin_sve_svld1_f64(__VA_ARGS__)
+#define svld1_f32(...) __builtin_sve_svld1_f32(__VA_ARGS__)
+#define svld1_f16(...) __builtin_sve_svld1_f16(__VA_ARGS__)
+#define svld1_s32(...) __builtin_sve_svld1_s32(__VA_ARGS__)
+#define svld1_s64(...) __builtin_sve_svld1_s64(__VA_ARGS__)
+#define svld1_s16(...) __builtin_sve_svld1_s16(__VA_ARGS__)
+#define svld1_gather_u32base_index_u32(...) __builtin_sve_svld1_gather_u32base_index_u32(__VA_ARGS__)
+#define svld1_gather_u64base_index_u64(...) __builtin_sve_svld1_gather_u64base_index_u64(__VA_ARGS__)
+#define svld1_gather_u64base_index_f64(...) __builtin_sve_svld1_gather_u64base_index_f64(__VA_ARGS__)
+#define svld1_gather_u32base_index_f32(...) __builtin_sve_svld1_gather_u32base_index_f32(__VA_ARGS__)
+#define svld1_gather_u32base_index_s32(...) __builtin_sve_svld1_gather_u32base_index_s32(__VA_ARGS__)
+#define svld1_gather_u64base_index_s64(...) __builtin_sve_svld1_gather_u64base_index_s64(__VA_ARGS__)
+#define svld1_gather_u32base_offset_u32(...) __builtin_sve_svld1_gather_u32base_offset_u32(__VA_ARGS__)
+#define svld1_gather_u64base_offset_u64(...) __builtin_sve_svld1_gather_u64base_offset_u64(__VA_ARGS__)
+#define svld1_gather_u64base_offset_f64(...) __builtin_sve_svld1_gather_u64base_offset_f64(__VA_ARGS__)
+#define svld1_gather_u32base_offset_f32(...) __builtin_sve_svld1_gather_u32base_offset_f32(__VA_ARGS__)
+#define svld1_gather_u32base_offset_s32(...) __builtin_sve_svld1_gather_u32base_offset_s32(__VA_ARGS__)
+#define svld1_gather_u64base_offset_s64(...) __builtin_sve_svld1_gather_u64base_offset_s64(__VA_ARGS__)
+#define svld1_gather_u32base_u32(...) __builtin_sve_svld1_gather_u32base_u32(__VA_ARGS__)
+#define svld1_gather_u64base_u64(...) __builtin_sve_svld1_gather_u64base_u64(__VA_ARGS__)
+#define svld1_gather_u64base_f64(...) __builtin_sve_svld1_gather_u64base_f64(__VA_ARGS__)
+#define svld1_gather_u32base_f32(...) __builtin_sve_svld1_gather_u32base_f32(__VA_ARGS__)
+#define svld1_gather_u32base_s32(...) __builtin_sve_svld1_gather_u32base_s32(__VA_ARGS__)
+#define svld1_gather_u64base_s64(...) __builtin_sve_svld1_gather_u64base_s64(__VA_ARGS__)
+#define svld1_gather_s32index_u32(...) __builtin_sve_svld1_gather_s32index_u32(__VA_ARGS__)
+#define svld1_gather_s32index_f32(...) __builtin_sve_svld1_gather_s32index_f32(__VA_ARGS__)
+#define svld1_gather_s32index_s32(...) __builtin_sve_svld1_gather_s32index_s32(__VA_ARGS__)
+#define svld1_gather_u32index_u32(...) __builtin_sve_svld1_gather_u32index_u32(__VA_ARGS__)
+#define svld1_gather_u32index_f32(...) __builtin_sve_svld1_gather_u32index_f32(__VA_ARGS__)
+#define svld1_gather_u32index_s32(...) __builtin_sve_svld1_gather_u32index_s32(__VA_ARGS__)
+#define svld1_gather_s64index_u64(...) __builtin_sve_svld1_gather_s64index_u64(__VA_ARGS__)
+#define svld1_gather_s64index_f64(...) __builtin_sve_svld1_gather_s64index_f64(__VA_ARGS__)
+#define svld1_gather_s64index_s64(...) __builtin_sve_svld1_gather_s64index_s64(__VA_ARGS__)
+#define svld1_gather_u64index_u64(...) __builtin_sve_svld1_gather_u64index_u64(__VA_ARGS__)
+#define svld1_gather_u64index_f64(...) __builtin_sve_svld1_gather_u64index_f64(__VA_ARGS__)
+#define svld1_gather_u64index_s64(...) __builtin_sve_svld1_gather_u64index_s64(__VA_ARGS__)
+#define svld1_gather_s32offset_u32(...) __builtin_sve_svld1_gather_s32offset_u32(__VA_ARGS__)
+#define svld1_gather_s32offset_f32(...) __builtin_sve_svld1_gather_s32offset_f32(__VA_ARGS__)
+#define svld1_gather_s32offset_s32(...) __builtin_sve_svld1_gather_s32offset_s32(__VA_ARGS__)
+#define svld1_gather_u32offset_u32(...) __builtin_sve_svld1_gather_u32offset_u32(__VA_ARGS__)
+#define svld1_gather_u32offset_f32(...) __builtin_sve_svld1_gather_u32offset_f32(__VA_ARGS__)
+#define svld1_gather_u32offset_s32(...) __builtin_sve_svld1_gather_u32offset_s32(__VA_ARGS__)
+#define svld1_gather_s64offset_u64(...) __builtin_sve_svld1_gather_s64offset_u64(__VA_ARGS__)
+#define svld1_gather_s64offset_f64(...) __builtin_sve_svld1_gather_s64offset_f64(__VA_ARGS__)
+#define svld1_gather_s64offset_s64(...) __builtin_sve_svld1_gather_s64offset_s64(__VA_ARGS__)
+#define svld1_gather_u64offset_u64(...) __builtin_sve_svld1_gather_u64offset_u64(__VA_ARGS__)
+#define svld1_gather_u64offset_f64(...) __builtin_sve_svld1_gather_u64offset_f64(__VA_ARGS__)
+#define svld1_gather_u64offset_s64(...) __builtin_sve_svld1_gather_u64offset_s64(__VA_ARGS__)
+#define svld1_vnum_u8(...) __builtin_sve_svld1_vnum_u8(__VA_ARGS__)
+#define svld1_vnum_u32(...) __builtin_sve_svld1_vnum_u32(__VA_ARGS__)
+#define svld1_vnum_u64(...) __builtin_sve_svld1_vnum_u64(__VA_ARGS__)
+#define svld1_vnum_u16(...) __builtin_sve_svld1_vnum_u16(__VA_ARGS__)
+#define svld1_vnum_s8(...) __builtin_sve_svld1_vnum_s8(__VA_ARGS__)
+#define svld1_vnum_f64(...) __builtin_sve_svld1_vnum_f64(__VA_ARGS__)
+#define svld1_vnum_f32(...) __builtin_sve_svld1_vnum_f32(__VA_ARGS__)
+#define svld1_vnum_f16(...) __builtin_sve_svld1_vnum_f16(__VA_ARGS__)
+#define svld1_vnum_s32(...) __builtin_sve_svld1_vnum_s32(__VA_ARGS__)
+#define svld1_vnum_s64(...) __builtin_sve_svld1_vnum_s64(__VA_ARGS__)
+#define svld1_vnum_s16(...) __builtin_sve_svld1_vnum_s16(__VA_ARGS__)
+#define svld1rq_u8(...) __builtin_sve_svld1rq_u8(__VA_ARGS__)
+#define svld1rq_u32(...) __builtin_sve_svld1rq_u32(__VA_ARGS__)
+#define svld1rq_u64(...) __builtin_sve_svld1rq_u64(__VA_ARGS__)
+#define svld1rq_u16(...) __builtin_sve_svld1rq_u16(__VA_ARGS__)
+#define svld1rq_s8(...) __builtin_sve_svld1rq_s8(__VA_ARGS__)
+#define svld1rq_f64(...) __builtin_sve_svld1rq_f64(__VA_ARGS__)
+#define svld1rq_f32(...) __builtin_sve_svld1rq_f32(__VA_ARGS__)
+#define svld1rq_f16(...) __builtin_sve_svld1rq_f16(__VA_ARGS__)
+#define svld1rq_s32(...) __builtin_sve_svld1rq_s32(__VA_ARGS__)
+#define svld1rq_s64(...) __builtin_sve_svld1rq_s64(__VA_ARGS__)
+#define svld1rq_s16(...) __builtin_sve_svld1rq_s16(__VA_ARGS__)
+#define svld1sb_gather_u32base_offset_u32(...) __builtin_sve_svld1sb_gather_u32base_offset_u32(__VA_ARGS__)
+#define svld1sb_gather_u64base_offset_u64(...) __builtin_sve_svld1sb_gather_u64base_offset_u64(__VA_ARGS__)
+#define svld1sb_gather_u32base_offset_s32(...) __builtin_sve_svld1sb_gather_u32base_offset_s32(__VA_ARGS__)
+#define svld1sb_gather_u64base_offset_s64(...) __builtin_sve_svld1sb_gather_u64base_offset_s64(__VA_ARGS__)
+#define svld1sb_gather_u32base_u32(...) __builtin_sve_svld1sb_gather_u32base_u32(__VA_ARGS__)
+#define svld1sb_gather_u64base_u64(...) __builtin_sve_svld1sb_gather_u64base_u64(__VA_ARGS__)
+#define svld1sb_gather_u32base_s32(...) __builtin_sve_svld1sb_gather_u32base_s32(__VA_ARGS__)
+#define svld1sb_gather_u64base_s64(...) __builtin_sve_svld1sb_gather_u64base_s64(__VA_ARGS__)
+#define svld1sb_gather_s32offset_u32(...) __builtin_sve_svld1sb_gather_s32offset_u32(__VA_ARGS__)
+#define svld1sb_gather_s32offset_s32(...) __builtin_sve_svld1sb_gather_s32offset_s32(__VA_ARGS__)
+#define svld1sb_gather_u32offset_u32(...) __builtin_sve_svld1sb_gather_u32offset_u32(__VA_ARGS__)
+#define svld1sb_gather_u32offset_s32(...) __builtin_sve_svld1sb_gather_u32offset_s32(__VA_ARGS__)
+#define svld1sb_gather_s64offset_u64(...) __builtin_sve_svld1sb_gather_s64offset_u64(__VA_ARGS__)
+#define svld1sb_gather_s64offset_s64(...) __builtin_sve_svld1sb_gather_s64offset_s64(__VA_ARGS__)
+#define svld1sb_gather_u64offset_u64(...) __builtin_sve_svld1sb_gather_u64offset_u64(__VA_ARGS__)
+#define svld1sb_gather_u64offset_s64(...) __builtin_sve_svld1sb_gather_u64offset_s64(__VA_ARGS__)
+#define svld1sb_vnum_u32(...) __builtin_sve_svld1sb_vnum_u32(__VA_ARGS__)
+#define svld1sb_vnum_u64(...) __builtin_sve_svld1sb_vnum_u64(__VA_ARGS__)
+#define svld1sb_vnum_u16(...) __builtin_sve_svld1sb_vnum_u16(__VA_ARGS__)
+#define svld1sb_vnum_s32(...) __builtin_sve_svld1sb_vnum_s32(__VA_ARGS__)
+#define svld1sb_vnum_s64(...) __builtin_sve_svld1sb_vnum_s64(__VA_ARGS__)
+#define svld1sb_vnum_s16(...) __builtin_sve_svld1sb_vnum_s16(__VA_ARGS__)
+#define svld1sb_u32(...) __builtin_sve_svld1sb_u32(__VA_ARGS__)
+#define svld1sb_u64(...) __builtin_sve_svld1sb_u64(__VA_ARGS__)
+#define svld1sb_u16(...) __builtin_sve_svld1sb_u16(__VA_ARGS__)
+#define svld1sb_s32(...) __builtin_sve_svld1sb_s32(__VA_ARGS__)
+#define svld1sb_s64(...) __builtin_sve_svld1sb_s64(__VA_ARGS__)
+#define svld1sb_s16(...) __builtin_sve_svld1sb_s16(__VA_ARGS__)
+#define svld1sh_gather_u32base_index_u32(...) __builtin_sve_svld1sh_gather_u32base_index_u32(__VA_ARGS__)
+#define svld1sh_gather_u64base_index_u64(...) __builtin_sve_svld1sh_gather_u64base_index_u64(__VA_ARGS__)
+#define svld1sh_gather_u32base_index_s32(...) __builtin_sve_svld1sh_gather_u32base_index_s32(__VA_ARGS__)
+#define svld1sh_gather_u64base_index_s64(...) __builtin_sve_svld1sh_gather_u64base_index_s64(__VA_ARGS__)
+#define svld1sh_gather_u32base_offset_u32(...) __builtin_sve_svld1sh_gather_u32base_offset_u32(__VA_ARGS__)
+#define svld1sh_gather_u64base_offset_u64(...) __builtin_sve_svld1sh_gather_u64base_offset_u64(__VA_ARGS__)
+#define svld1sh_gather_u32base_offset_s32(...) __builtin_sve_svld1sh_gather_u32base_offset_s32(__VA_ARGS__)
+#define svld1sh_gather_u64base_offset_s64(...) __builtin_sve_svld1sh_gather_u64base_offset_s64(__VA_ARGS__)
+#define svld1sh_gather_u32base_u32(...) __builtin_sve_svld1sh_gather_u32base_u32(__VA_ARGS__)
+#define svld1sh_gather_u64base_u64(...) __builtin_sve_svld1sh_gather_u64base_u64(__VA_ARGS__)
+#define svld1sh_gather_u32base_s32(...) __builtin_sve_svld1sh_gather_u32base_s32(__VA_ARGS__)
+#define svld1sh_gather_u64base_s64(...) __builtin_sve_svld1sh_gather_u64base_s64(__VA_ARGS__)
+#define svld1sh_gather_s32index_u32(...) __builtin_sve_svld1sh_gather_s32index_u32(__VA_ARGS__)
+#define svld1sh_gather_s32index_s32(...) __builtin_sve_svld1sh_gather_s32index_s32(__VA_ARGS__)
+#define svld1sh_gather_u32index_u32(...) __builtin_sve_svld1sh_gather_u32index_u32(__VA_ARGS__)
+#define svld1sh_gather_u32index_s32(...) __builtin_sve_svld1sh_gather_u32index_s32(__VA_ARGS__)
+#define svld1sh_gather_s64index_u64(...) __builtin_sve_svld1sh_gather_s64index_u64(__VA_ARGS__)
+#define svld1sh_gather_s64index_s64(...) __builtin_sve_svld1sh_gather_s64index_s64(__VA_ARGS__)
+#define svld1sh_gather_u64index_u64(...) __builtin_sve_svld1sh_gather_u64index_u64(__VA_ARGS__)
+#define svld1sh_gather_u64index_s64(...) __builtin_sve_svld1sh_gather_u64index_s64(__VA_ARGS__)
+#define svld1sh_gather_s32offset_u32(...) __builtin_sve_svld1sh_gather_s32offset_u32(__VA_ARGS__)
+#define svld1sh_gather_s32offset_s32(...) __builtin_sve_svld1sh_gather_s32offset_s32(__VA_ARGS__)
+#define svld1sh_gather_u32offset_u32(...) __builtin_sve_svld1sh_gather_u32offset_u32(__VA_ARGS__)
+#define svld1sh_gather_u32offset_s32(...) __builtin_sve_svld1sh_gather_u32offset_s32(__VA_ARGS__)
+#define svld1sh_gather_s64offset_u64(...) __builtin_sve_svld1sh_gather_s64offset_u64(__VA_ARGS__)
+#define svld1sh_gather_s64offset_s64(...) __builtin_sve_svld1sh_gather_s64offset_s64(__VA_ARGS__)
+#define svld1sh_gather_u64offset_u64(...) __builtin_sve_svld1sh_gather_u64offset_u64(__VA_ARGS__)
+#define svld1sh_gather_u64offset_s64(...) __builtin_sve_svld1sh_gather_u64offset_s64(__VA_ARGS__)
+#define svld1sh_vnum_u32(...) __builtin_sve_svld1sh_vnum_u32(__VA_ARGS__)
+#define svld1sh_vnum_u64(...) __builtin_sve_svld1sh_vnum_u64(__VA_ARGS__)
+#define svld1sh_vnum_s32(...) __builtin_sve_svld1sh_vnum_s32(__VA_ARGS__)
+#define svld1sh_vnum_s64(...) __builtin_sve_svld1sh_vnum_s64(__VA_ARGS__)
+#define svld1sh_u32(...) __builtin_sve_svld1sh_u32(__VA_ARGS__)
+#define svld1sh_u64(...) __builtin_sve_svld1sh_u64(__VA_ARGS__)
+#define svld1sh_s32(...) __builtin_sve_svld1sh_s32(__VA_ARGS__)
+#define svld1sh_s64(...) __builtin_sve_svld1sh_s64(__VA_ARGS__)
+#define svld1sw_gather_u64base_index_u64(...) __builtin_sve_svld1sw_gather_u64base_index_u64(__VA_ARGS__)
+#define svld1sw_gather_u64base_index_s64(...) __builtin_sve_svld1sw_gather_u64base_index_s64(__VA_ARGS__)
+#define svld1sw_gather_u64base_offset_u64(...) __builtin_sve_svld1sw_gather_u64base_offset_u64(__VA_ARGS__)
+#define svld1sw_gather_u64base_offset_s64(...) __builtin_sve_svld1sw_gather_u64base_offset_s64(__VA_ARGS__)
+#define svld1sw_gather_u64base_u64(...) __builtin_sve_svld1sw_gather_u64base_u64(__VA_ARGS__)
+#define svld1sw_gather_u64base_s64(...) __builtin_sve_svld1sw_gather_u64base_s64(__VA_ARGS__)
+#define svld1sw_gather_s64index_u64(...) __builtin_sve_svld1sw_gather_s64index_u64(__VA_ARGS__)
+#define svld1sw_gather_s64index_s64(...) __builtin_sve_svld1sw_gather_s64index_s64(__VA_ARGS__)
+#define svld1sw_gather_u64index_u64(...) __builtin_sve_svld1sw_gather_u64index_u64(__VA_ARGS__)
+#define svld1sw_gather_u64index_s64(...) __builtin_sve_svld1sw_gather_u64index_s64(__VA_ARGS__)
+#define svld1sw_gather_s64offset_u64(...) __builtin_sve_svld1sw_gather_s64offset_u64(__VA_ARGS__)
+#define svld1sw_gather_s64offset_s64(...) __builtin_sve_svld1sw_gather_s64offset_s64(__VA_ARGS__)
+#define svld1sw_gather_u64offset_u64(...) __builtin_sve_svld1sw_gather_u64offset_u64(__VA_ARGS__)
+#define svld1sw_gather_u64offset_s64(...) __builtin_sve_svld1sw_gather_u64offset_s64(__VA_ARGS__)
+#define svld1sw_vnum_u64(...) __builtin_sve_svld1sw_vnum_u64(__VA_ARGS__)
+#define svld1sw_vnum_s64(...) __builtin_sve_svld1sw_vnum_s64(__VA_ARGS__)
+#define svld1sw_u64(...) __builtin_sve_svld1sw_u64(__VA_ARGS__)
+#define svld1sw_s64(...) __builtin_sve_svld1sw_s64(__VA_ARGS__)
+#define svld1ub_gather_u32base_offset_u32(...) __builtin_sve_svld1ub_gather_u32base_offset_u32(__VA_ARGS__)
+#define svld1ub_gather_u64base_offset_u64(...) __builtin_sve_svld1ub_gather_u64base_offset_u64(__VA_ARGS__)
+#define svld1ub_gather_u32base_offset_s32(...) __builtin_sve_svld1ub_gather_u32base_offset_s32(__VA_ARGS__)
+#define svld1ub_gather_u64base_offset_s64(...) __builtin_sve_svld1ub_gather_u64base_offset_s64(__VA_ARGS__)
+#define svld1ub_gather_u32base_u32(...) __builtin_sve_svld1ub_gather_u32base_u32(__VA_ARGS__)
+#define svld1ub_gather_u64base_u64(...) __builtin_sve_svld1ub_gather_u64base_u64(__VA_ARGS__)
+#define svld1ub_gather_u32base_s32(...) __builtin_sve_svld1ub_gather_u32base_s32(__VA_ARGS__)
+#define svld1ub_gather_u64base_s64(...) __builtin_sve_svld1ub_gather_u64base_s64(__VA_ARGS__)
+#define svld1ub_gather_s32offset_u32(...) __builtin_sve_svld1ub_gather_s32offset_u32(__VA_ARGS__)
+#define svld1ub_gather_s32offset_s32(...) __builtin_sve_svld1ub_gather_s32offset_s32(__VA_ARGS__)
+#define svld1ub_gather_u32offset_u32(...) __builtin_sve_svld1ub_gather_u32offset_u32(__VA_ARGS__)
+#define svld1ub_gather_u32offset_s32(...) __builtin_sve_svld1ub_gather_u32offset_s32(__VA_ARGS__)
+#define svld1ub_gather_s64offset_u64(...) __builtin_sve_svld1ub_gather_s64offset_u64(__VA_ARGS__)
+#define svld1ub_gather_s64offset_s64(...) __builtin_sve_svld1ub_gather_s64offset_s64(__VA_ARGS__)
+#define svld1ub_gather_u64offset_u64(...) __builtin_sve_svld1ub_gather_u64offset_u64(__VA_ARGS__)
+#define svld1ub_gather_u64offset_s64(...) __builtin_sve_svld1ub_gather_u64offset_s64(__VA_ARGS__)
+#define svld1ub_vnum_u32(...) __builtin_sve_svld1ub_vnum_u32(__VA_ARGS__)
+#define svld1ub_vnum_u64(...) __builtin_sve_svld1ub_vnum_u64(__VA_ARGS__)
+#define svld1ub_vnum_u16(...) __builtin_sve_svld1ub_vnum_u16(__VA_ARGS__)
+#define svld1ub_vnum_s32(...) __builtin_sve_svld1ub_vnum_s32(__VA_ARGS__)
+#define svld1ub_vnum_s64(...) __builtin_sve_svld1ub_vnum_s64(__VA_ARGS__)
+#define svld1ub_vnum_s16(...) __builtin_sve_svld1ub_vnum_s16(__VA_ARGS__)
+#define svld1ub_u32(...) __builtin_sve_svld1ub_u32(__VA_ARGS__)
+#define svld1ub_u64(...) __builtin_sve_svld1ub_u64(__VA_ARGS__)
+#define svld1ub_u16(...) __builtin_sve_svld1ub_u16(__VA_ARGS__)
+#define svld1ub_s32(...) __builtin_sve_svld1ub_s32(__VA_ARGS__)
+#define svld1ub_s64(...) __builtin_sve_svld1ub_s64(__VA_ARGS__)
+#define svld1ub_s16(...) __builtin_sve_svld1ub_s16(__VA_ARGS__)
+#define svld1uh_gather_u32base_index_u32(...) __builtin_sve_svld1uh_gather_u32base_index_u32(__VA_ARGS__)
+#define svld1uh_gather_u64base_index_u64(...) __builtin_sve_svld1uh_gather_u64base_index_u64(__VA_ARGS__)
+#define svld1uh_gather_u32base_index_s32(...) __builtin_sve_svld1uh_gather_u32base_index_s32(__VA_ARGS__)
+#define svld1uh_gather_u64base_index_s64(...) __builtin_sve_svld1uh_gather_u64base_index_s64(__VA_ARGS__)
+#define svld1uh_gather_u32base_offset_u32(...) __builtin_sve_svld1uh_gather_u32base_offset_u32(__VA_ARGS__)
+#define svld1uh_gather_u64base_offset_u64(...) __builtin_sve_svld1uh_gather_u64base_offset_u64(__VA_ARGS__)
+#define svld1uh_gather_u32base_offset_s32(...) __builtin_sve_svld1uh_gather_u32base_offset_s32(__VA_ARGS__)
+#define svld1uh_gather_u64base_offset_s64(...) __builtin_sve_svld1uh_gather_u64base_offset_s64(__VA_ARGS__)
+#define svld1uh_gather_u32base_u32(...) __builtin_sve_svld1uh_gather_u32base_u32(__VA_ARGS__)
+#define svld1uh_gather_u64base_u64(...) __builtin_sve_svld1uh_gather_u64base_u64(__VA_ARGS__)
+#define svld1uh_gather_u32base_s32(...) __builtin_sve_svld1uh_gather_u32base_s32(__VA_ARGS__)
+#define svld1uh_gather_u64base_s64(...) __builtin_sve_svld1uh_gather_u64base_s64(__VA_ARGS__)
+#define svld1uh_gather_s32index_u32(...) __builtin_sve_svld1uh_gather_s32index_u32(__VA_ARGS__)
+#define svld1uh_gather_s32index_s32(...) __builtin_sve_svld1uh_gather_s32index_s32(__VA_ARGS__)
+#define svld1uh_gather_u32index_u32(...) __builtin_sve_svld1uh_gather_u32index_u32(__VA_ARGS__)
+#define svld1uh_gather_u32index_s32(...) __builtin_sve_svld1uh_gather_u32index_s32(__VA_ARGS__)
+#define svld1uh_gather_s64index_u64(...) __builtin_sve_svld1uh_gather_s64index_u64(__VA_ARGS__)
+#define svld1uh_gather_s64index_s64(...) __builtin_sve_svld1uh_gather_s64index_s64(__VA_ARGS__)
+#define svld1uh_gather_u64index_u64(...) __builtin_sve_svld1uh_gather_u64index_u64(__VA_ARGS__)
+#define svld1uh_gather_u64index_s64(...) __builtin_sve_svld1uh_gather_u64index_s64(__VA_ARGS__)
+#define svld1uh_gather_s32offset_u32(...) __builtin_sve_svld1uh_gather_s32offset_u32(__VA_ARGS__)
+#define svld1uh_gather_s32offset_s32(...) __builtin_sve_svld1uh_gather_s32offset_s32(__VA_ARGS__)
+#define svld1uh_gather_u32offset_u32(...) __builtin_sve_svld1uh_gather_u32offset_u32(__VA_ARGS__)
+#define svld1uh_gather_u32offset_s32(...) __builtin_sve_svld1uh_gather_u32offset_s32(__VA_ARGS__)
+#define svld1uh_gather_s64offset_u64(...) __builtin_sve_svld1uh_gather_s64offset_u64(__VA_ARGS__)
+#define svld1uh_gather_s64offset_s64(...) __builtin_sve_svld1uh_gather_s64offset_s64(__VA_ARGS__)
+#define svld1uh_gather_u64offset_u64(...) __builtin_sve_svld1uh_gather_u64offset_u64(__VA_ARGS__)
+#define svld1uh_gather_u64offset_s64(...) __builtin_sve_svld1uh_gather_u64offset_s64(__VA_ARGS__)
+#define svld1uh_vnum_u32(...) __builtin_sve_svld1uh_vnum_u32(__VA_ARGS__)
+#define svld1uh_vnum_u64(...) __builtin_sve_svld1uh_vnum_u64(__VA_ARGS__)
+#define svld1uh_vnum_s32(...) __builtin_sve_svld1uh_vnum_s32(__VA_ARGS__)
+#define svld1uh_vnum_s64(...) __builtin_sve_svld1uh_vnum_s64(__VA_ARGS__)
+#define svld1uh_u32(...) __builtin_sve_svld1uh_u32(__VA_ARGS__)
+#define svld1uh_u64(...) __builtin_sve_svld1uh_u64(__VA_ARGS__)
+#define svld1uh_s32(...) __builtin_sve_svld1uh_s32(__VA_ARGS__)
+#define svld1uh_s64(...) __builtin_sve_svld1uh_s64(__VA_ARGS__)
+#define svld1uw_gather_u64base_index_u64(...) __builtin_sve_svld1uw_gather_u64base_index_u64(__VA_ARGS__)
+#define svld1uw_gather_u64base_index_s64(...) __builtin_sve_svld1uw_gather_u64base_index_s64(__VA_ARGS__)
+#define svld1uw_gather_u64base_offset_u64(...) __builtin_sve_svld1uw_gather_u64base_offset_u64(__VA_ARGS__)
+#define svld1uw_gather_u64base_offset_s64(...) __builtin_sve_svld1uw_gather_u64base_offset_s64(__VA_ARGS__)
+#define svld1uw_gather_u64base_u64(...) __builtin_sve_svld1uw_gather_u64base_u64(__VA_ARGS__)
+#define svld1uw_gather_u64base_s64(...) __builtin_sve_svld1uw_gather_u64base_s64(__VA_ARGS__)
+#define svld1uw_gather_s64index_u64(...) __builtin_sve_svld1uw_gather_s64index_u64(__VA_ARGS__)
+#define svld1uw_gather_s64index_s64(...) __builtin_sve_svld1uw_gather_s64index_s64(__VA_ARGS__)
+#define svld1uw_gather_u64index_u64(...) __builtin_sve_svld1uw_gather_u64index_u64(__VA_ARGS__)
+#define svld1uw_gather_u64index_s64(...) __builtin_sve_svld1uw_gather_u64index_s64(__VA_ARGS__)
+#define svld1uw_gather_s64offset_u64(...) __builtin_sve_svld1uw_gather_s64offset_u64(__VA_ARGS__)
+#define svld1uw_gather_s64offset_s64(...) __builtin_sve_svld1uw_gather_s64offset_s64(__VA_ARGS__)
+#define svld1uw_gather_u64offset_u64(...) __builtin_sve_svld1uw_gather_u64offset_u64(__VA_ARGS__)
+#define svld1uw_gather_u64offset_s64(...) __builtin_sve_svld1uw_gather_u64offset_s64(__VA_ARGS__)
+#define svld1uw_vnum_u64(...) __builtin_sve_svld1uw_vnum_u64(__VA_ARGS__)
+#define svld1uw_vnum_s64(...) __builtin_sve_svld1uw_vnum_s64(__VA_ARGS__)
+#define svld1uw_u64(...) __builtin_sve_svld1uw_u64(__VA_ARGS__)
+#define svld1uw_s64(...) __builtin_sve_svld1uw_s64(__VA_ARGS__)
+#define svld2_u8(...) __builtin_sve_svld2_u8(__VA_ARGS__)
+#define svld2_u32(...) __builtin_sve_svld2_u32(__VA_ARGS__)
+#define svld2_u64(...) __builtin_sve_svld2_u64(__VA_ARGS__)
+#define svld2_u16(...) __builtin_sve_svld2_u16(__VA_ARGS__)
+#define svld2_s8(...) __builtin_sve_svld2_s8(__VA_ARGS__)
+#define svld2_f64(...) __builtin_sve_svld2_f64(__VA_ARGS__)
+#define svld2_f32(...) __builtin_sve_svld2_f32(__VA_ARGS__)
+#define svld2_f16(...) __builtin_sve_svld2_f16(__VA_ARGS__)
+#define svld2_s32(...) __builtin_sve_svld2_s32(__VA_ARGS__)
+#define svld2_s64(...) __builtin_sve_svld2_s64(__VA_ARGS__)
+#define svld2_s16(...) __builtin_sve_svld2_s16(__VA_ARGS__)
+#define svld2_vnum_u8(...) __builtin_sve_svld2_vnum_u8(__VA_ARGS__)
+#define svld2_vnum_u32(...) __builtin_sve_svld2_vnum_u32(__VA_ARGS__)
+#define svld2_vnum_u64(...) __builtin_sve_svld2_vnum_u64(__VA_ARGS__)
+#define svld2_vnum_u16(...) __builtin_sve_svld2_vnum_u16(__VA_ARGS__)
+#define svld2_vnum_s8(...) __builtin_sve_svld2_vnum_s8(__VA_ARGS__)
+#define svld2_vnum_f64(...) __builtin_sve_svld2_vnum_f64(__VA_ARGS__)
+#define svld2_vnum_f32(...) __builtin_sve_svld2_vnum_f32(__VA_ARGS__)
+#define svld2_vnum_f16(...) __builtin_sve_svld2_vnum_f16(__VA_ARGS__)
+#define svld2_vnum_s32(...) __builtin_sve_svld2_vnum_s32(__VA_ARGS__)
+#define svld2_vnum_s64(...) __builtin_sve_svld2_vnum_s64(__VA_ARGS__)
+#define svld2_vnum_s16(...) __builtin_sve_svld2_vnum_s16(__VA_ARGS__)
+#define svld3_u8(...) __builtin_sve_svld3_u8(__VA_ARGS__)
+#define svld3_u32(...) __builtin_sve_svld3_u32(__VA_ARGS__)
+#define svld3_u64(...) __builtin_sve_svld3_u64(__VA_ARGS__)
+#define svld3_u16(...) __builtin_sve_svld3_u16(__VA_ARGS__)
+#define svld3_s8(...) __builtin_sve_svld3_s8(__VA_ARGS__)
+#define svld3_f64(...) __builtin_sve_svld3_f64(__VA_ARGS__)
+#define svld3_f32(...) __builtin_sve_svld3_f32(__VA_ARGS__)
+#define svld3_f16(...) __builtin_sve_svld3_f16(__VA_ARGS__)
+#define svld3_s32(...) __builtin_sve_svld3_s32(__VA_ARGS__)
+#define svld3_s64(...) __builtin_sve_svld3_s64(__VA_ARGS__)
+#define svld3_s16(...) __builtin_sve_svld3_s16(__VA_ARGS__)
+#define svld3_vnum_u8(...) __builtin_sve_svld3_vnum_u8(__VA_ARGS__)
+#define svld3_vnum_u32(...) __builtin_sve_svld3_vnum_u32(__VA_ARGS__)
+#define svld3_vnum_u64(...) __builtin_sve_svld3_vnum_u64(__VA_ARGS__)
+#define svld3_vnum_u16(...) __builtin_sve_svld3_vnum_u16(__VA_ARGS__)
+#define svld3_vnum_s8(...) __builtin_sve_svld3_vnum_s8(__VA_ARGS__)
+#define svld3_vnum_f64(...) __builtin_sve_svld3_vnum_f64(__VA_ARGS__)
+#define svld3_vnum_f32(...) __builtin_sve_svld3_vnum_f32(__VA_ARGS__)
+#define svld3_vnum_f16(...) __builtin_sve_svld3_vnum_f16(__VA_ARGS__)
+#define svld3_vnum_s32(...) __builtin_sve_svld3_vnum_s32(__VA_ARGS__)
+#define svld3_vnum_s64(...) __builtin_sve_svld3_vnum_s64(__VA_ARGS__)
+#define svld3_vnum_s16(...) __builtin_sve_svld3_vnum_s16(__VA_ARGS__)
+#define svld4_u8(...) __builtin_sve_svld4_u8(__VA_ARGS__)
+#define svld4_u32(...) __builtin_sve_svld4_u32(__VA_ARGS__)
+#define svld4_u64(...) __builtin_sve_svld4_u64(__VA_ARGS__)
+#define svld4_u16(...) __builtin_sve_svld4_u16(__VA_ARGS__)
+#define svld4_s8(...) __builtin_sve_svld4_s8(__VA_ARGS__)
+#define svld4_f64(...) __builtin_sve_svld4_f64(__VA_ARGS__)
+#define svld4_f32(...) __builtin_sve_svld4_f32(__VA_ARGS__)
+#define svld4_f16(...) __builtin_sve_svld4_f16(__VA_ARGS__)
+#define svld4_s32(...) __builtin_sve_svld4_s32(__VA_ARGS__)
+#define svld4_s64(...) __builtin_sve_svld4_s64(__VA_ARGS__)
+#define svld4_s16(...) __builtin_sve_svld4_s16(__VA_ARGS__)
+#define svld4_vnum_u8(...) __builtin_sve_svld4_vnum_u8(__VA_ARGS__)
+#define svld4_vnum_u32(...) __builtin_sve_svld4_vnum_u32(__VA_ARGS__)
+#define svld4_vnum_u64(...) __builtin_sve_svld4_vnum_u64(__VA_ARGS__)
+#define svld4_vnum_u16(...) __builtin_sve_svld4_vnum_u16(__VA_ARGS__)
+#define svld4_vnum_s8(...) __builtin_sve_svld4_vnum_s8(__VA_ARGS__)
+#define svld4_vnum_f64(...) __builtin_sve_svld4_vnum_f64(__VA_ARGS__)
+#define svld4_vnum_f32(...) __builtin_sve_svld4_vnum_f32(__VA_ARGS__)
+#define svld4_vnum_f16(...) __builtin_sve_svld4_vnum_f16(__VA_ARGS__)
+#define svld4_vnum_s32(...) __builtin_sve_svld4_vnum_s32(__VA_ARGS__)
+#define svld4_vnum_s64(...) __builtin_sve_svld4_vnum_s64(__VA_ARGS__)
+#define svld4_vnum_s16(...) __builtin_sve_svld4_vnum_s16(__VA_ARGS__)
+#define svldff1_u8(...) __builtin_sve_svldff1_u8(__VA_ARGS__)
+#define svldff1_u32(...) __builtin_sve_svldff1_u32(__VA_ARGS__)
+#define svldff1_u64(...) __builtin_sve_svldff1_u64(__VA_ARGS__)
+#define svldff1_u16(...) __builtin_sve_svldff1_u16(__VA_ARGS__)
+#define svldff1_s8(...) __builtin_sve_svldff1_s8(__VA_ARGS__)
+#define svldff1_f64(...) __builtin_sve_svldff1_f64(__VA_ARGS__)
+#define svldff1_f32(...) __builtin_sve_svldff1_f32(__VA_ARGS__)
+#define svldff1_f16(...) __builtin_sve_svldff1_f16(__VA_ARGS__)
+#define svldff1_s32(...) __builtin_sve_svldff1_s32(__VA_ARGS__)
+#define svldff1_s64(...) __builtin_sve_svldff1_s64(__VA_ARGS__)
+#define svldff1_s16(...) __builtin_sve_svldff1_s16(__VA_ARGS__)
+#define svldff1_gather_u32base_index_u32(...) __builtin_sve_svldff1_gather_u32base_index_u32(__VA_ARGS__)
+#define svldff1_gather_u64base_index_u64(...) __builtin_sve_svldff1_gather_u64base_index_u64(__VA_ARGS__)
+#define svldff1_gather_u64base_index_f64(...) __builtin_sve_svldff1_gather_u64base_index_f64(__VA_ARGS__)
+#define svldff1_gather_u32base_index_f32(...) __builtin_sve_svldff1_gather_u32base_index_f32(__VA_ARGS__)
+#define svldff1_gather_u32base_index_s32(...) __builtin_sve_svldff1_gather_u32base_index_s32(__VA_ARGS__)
+#define svldff1_gather_u64base_index_s64(...) __builtin_sve_svldff1_gather_u64base_index_s64(__VA_ARGS__)
+#define svldff1_gather_u32base_offset_u32(...) __builtin_sve_svldff1_gather_u32base_offset_u32(__VA_ARGS__)
+#define svldff1_gather_u64base_offset_u64(...) __builtin_sve_svldff1_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldff1_gather_u64base_offset_f64(...) __builtin_sve_svldff1_gather_u64base_offset_f64(__VA_ARGS__)
+#define svldff1_gather_u32base_offset_f32(...) __builtin_sve_svldff1_gather_u32base_offset_f32(__VA_ARGS__)
+#define svldff1_gather_u32base_offset_s32(...) __builtin_sve_svldff1_gather_u32base_offset_s32(__VA_ARGS__)
+#define svldff1_gather_u64base_offset_s64(...) __builtin_sve_svldff1_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldff1_gather_u32base_u32(...) __builtin_sve_svldff1_gather_u32base_u32(__VA_ARGS__)
+#define svldff1_gather_u64base_u64(...) __builtin_sve_svldff1_gather_u64base_u64(__VA_ARGS__)
+#define svldff1_gather_u64base_f64(...) __builtin_sve_svldff1_gather_u64base_f64(__VA_ARGS__)
+#define svldff1_gather_u32base_f32(...) __builtin_sve_svldff1_gather_u32base_f32(__VA_ARGS__)
+#define svldff1_gather_u32base_s32(...) __builtin_sve_svldff1_gather_u32base_s32(__VA_ARGS__)
+#define svldff1_gather_u64base_s64(...) __builtin_sve_svldff1_gather_u64base_s64(__VA_ARGS__)
+#define svldff1_gather_s32index_u32(...) __builtin_sve_svldff1_gather_s32index_u32(__VA_ARGS__)
+#define svldff1_gather_s32index_f32(...) __builtin_sve_svldff1_gather_s32index_f32(__VA_ARGS__)
+#define svldff1_gather_s32index_s32(...) __builtin_sve_svldff1_gather_s32index_s32(__VA_ARGS__)
+#define svldff1_gather_u32index_u32(...) __builtin_sve_svldff1_gather_u32index_u32(__VA_ARGS__)
+#define svldff1_gather_u32index_f32(...) __builtin_sve_svldff1_gather_u32index_f32(__VA_ARGS__)
+#define svldff1_gather_u32index_s32(...) __builtin_sve_svldff1_gather_u32index_s32(__VA_ARGS__)
+#define svldff1_gather_s64index_u64(...) __builtin_sve_svldff1_gather_s64index_u64(__VA_ARGS__)
+#define svldff1_gather_s64index_f64(...) __builtin_sve_svldff1_gather_s64index_f64(__VA_ARGS__)
+#define svldff1_gather_s64index_s64(...) __builtin_sve_svldff1_gather_s64index_s64(__VA_ARGS__)
+#define svldff1_gather_u64index_u64(...) __builtin_sve_svldff1_gather_u64index_u64(__VA_ARGS__)
+#define svldff1_gather_u64index_f64(...) __builtin_sve_svldff1_gather_u64index_f64(__VA_ARGS__)
+#define svldff1_gather_u64index_s64(...) __builtin_sve_svldff1_gather_u64index_s64(__VA_ARGS__)
+#define svldff1_gather_s32offset_u32(...) __builtin_sve_svldff1_gather_s32offset_u32(__VA_ARGS__)
+#define svldff1_gather_s32offset_f32(...) __builtin_sve_svldff1_gather_s32offset_f32(__VA_ARGS__)
+#define svldff1_gather_s32offset_s32(...) __builtin_sve_svldff1_gather_s32offset_s32(__VA_ARGS__)
+#define svldff1_gather_u32offset_u32(...) __builtin_sve_svldff1_gather_u32offset_u32(__VA_ARGS__)
+#define svldff1_gather_u32offset_f32(...) __builtin_sve_svldff1_gather_u32offset_f32(__VA_ARGS__)
+#define svldff1_gather_u32offset_s32(...) __builtin_sve_svldff1_gather_u32offset_s32(__VA_ARGS__)
+#define svldff1_gather_s64offset_u64(...) __builtin_sve_svldff1_gather_s64offset_u64(__VA_ARGS__)
+#define svldff1_gather_s64offset_f64(...) __builtin_sve_svldff1_gather_s64offset_f64(__VA_ARGS__)
+#define svldff1_gather_s64offset_s64(...) __builtin_sve_svldff1_gather_s64offset_s64(__VA_ARGS__)
+#define svldff1_gather_u64offset_u64(...) __builtin_sve_svldff1_gather_u64offset_u64(__VA_ARGS__)
+#define svldff1_gather_u64offset_f64(...) __builtin_sve_svldff1_gather_u64offset_f64(__VA_ARGS__)
+#define svldff1_gather_u64offset_s64(...) __builtin_sve_svldff1_gather_u64offset_s64(__VA_ARGS__)
+#define svldff1_vnum_u8(...) __builtin_sve_svldff1_vnum_u8(__VA_ARGS__)
+#define svldff1_vnum_u32(...) __builtin_sve_svldff1_vnum_u32(__VA_ARGS__)
+#define svldff1_vnum_u64(...) __builtin_sve_svldff1_vnum_u64(__VA_ARGS__)
+#define svldff1_vnum_u16(...) __builtin_sve_svldff1_vnum_u16(__VA_ARGS__)
+#define svldff1_vnum_s8(...) __builtin_sve_svldff1_vnum_s8(__VA_ARGS__)
+#define svldff1_vnum_f64(...) __builtin_sve_svldff1_vnum_f64(__VA_ARGS__)
+#define svldff1_vnum_f32(...) __builtin_sve_svldff1_vnum_f32(__VA_ARGS__)
+#define svldff1_vnum_f16(...) __builtin_sve_svldff1_vnum_f16(__VA_ARGS__)
+#define svldff1_vnum_s32(...) __builtin_sve_svldff1_vnum_s32(__VA_ARGS__)
+#define svldff1_vnum_s64(...) __builtin_sve_svldff1_vnum_s64(__VA_ARGS__)
+#define svldff1_vnum_s16(...) __builtin_sve_svldff1_vnum_s16(__VA_ARGS__)
+#define svldff1sb_gather_u32base_offset_u32(...) __builtin_sve_svldff1sb_gather_u32base_offset_u32(__VA_ARGS__)
+#define svldff1sb_gather_u64base_offset_u64(...) __builtin_sve_svldff1sb_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldff1sb_gather_u32base_offset_s32(...) __builtin_sve_svldff1sb_gather_u32base_offset_s32(__VA_ARGS__)
+#define svldff1sb_gather_u64base_offset_s64(...) __builtin_sve_svldff1sb_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldff1sb_gather_u32base_u32(...) __builtin_sve_svldff1sb_gather_u32base_u32(__VA_ARGS__)
+#define svldff1sb_gather_u64base_u64(...) __builtin_sve_svldff1sb_gather_u64base_u64(__VA_ARGS__)
+#define svldff1sb_gather_u32base_s32(...) __builtin_sve_svldff1sb_gather_u32base_s32(__VA_ARGS__)
+#define svldff1sb_gather_u64base_s64(...) __builtin_sve_svldff1sb_gather_u64base_s64(__VA_ARGS__)
+#define svldff1sb_gather_s32offset_u32(...) __builtin_sve_svldff1sb_gather_s32offset_u32(__VA_ARGS__)
+#define svldff1sb_gather_s32offset_s32(...) __builtin_sve_svldff1sb_gather_s32offset_s32(__VA_ARGS__)
+#define svldff1sb_gather_u32offset_u32(...) __builtin_sve_svldff1sb_gather_u32offset_u32(__VA_ARGS__)
+#define svldff1sb_gather_u32offset_s32(...) __builtin_sve_svldff1sb_gather_u32offset_s32(__VA_ARGS__)
+#define svldff1sb_gather_s64offset_u64(...) __builtin_sve_svldff1sb_gather_s64offset_u64(__VA_ARGS__)
+#define svldff1sb_gather_s64offset_s64(...) __builtin_sve_svldff1sb_gather_s64offset_s64(__VA_ARGS__)
+#define svldff1sb_gather_u64offset_u64(...) __builtin_sve_svldff1sb_gather_u64offset_u64(__VA_ARGS__)
+#define svldff1sb_gather_u64offset_s64(...) __builtin_sve_svldff1sb_gather_u64offset_s64(__VA_ARGS__)
+#define svldff1sb_vnum_u32(...) __builtin_sve_svldff1sb_vnum_u32(__VA_ARGS__)
+#define svldff1sb_vnum_u64(...) __builtin_sve_svldff1sb_vnum_u64(__VA_ARGS__)
+#define svldff1sb_vnum_u16(...) __builtin_sve_svldff1sb_vnum_u16(__VA_ARGS__)
+#define svldff1sb_vnum_s32(...) __builtin_sve_svldff1sb_vnum_s32(__VA_ARGS__)
+#define svldff1sb_vnum_s64(...) __builtin_sve_svldff1sb_vnum_s64(__VA_ARGS__)
+#define svldff1sb_vnum_s16(...) __builtin_sve_svldff1sb_vnum_s16(__VA_ARGS__)
+#define svldff1sb_u32(...) __builtin_sve_svldff1sb_u32(__VA_ARGS__)
+#define svldff1sb_u64(...) __builtin_sve_svldff1sb_u64(__VA_ARGS__)
+#define svldff1sb_u16(...) __builtin_sve_svldff1sb_u16(__VA_ARGS__)
+#define svldff1sb_s32(...) __builtin_sve_svldff1sb_s32(__VA_ARGS__)
+#define svldff1sb_s64(...) __builtin_sve_svldff1sb_s64(__VA_ARGS__)
+#define svldff1sb_s16(...) __builtin_sve_svldff1sb_s16(__VA_ARGS__)
+#define svldff1sh_gather_u32base_index_u32(...) __builtin_sve_svldff1sh_gather_u32base_index_u32(__VA_ARGS__)
+#define svldff1sh_gather_u64base_index_u64(...) __builtin_sve_svldff1sh_gather_u64base_index_u64(__VA_ARGS__)
+#define svldff1sh_gather_u32base_index_s32(...) __builtin_sve_svldff1sh_gather_u32base_index_s32(__VA_ARGS__)
+#define svldff1sh_gather_u64base_index_s64(...) __builtin_sve_svldff1sh_gather_u64base_index_s64(__VA_ARGS__)
+#define svldff1sh_gather_u32base_offset_u32(...) __builtin_sve_svldff1sh_gather_u32base_offset_u32(__VA_ARGS__)
+#define svldff1sh_gather_u64base_offset_u64(...) __builtin_sve_svldff1sh_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldff1sh_gather_u32base_offset_s32(...) __builtin_sve_svldff1sh_gather_u32base_offset_s32(__VA_ARGS__)
+#define svldff1sh_gather_u64base_offset_s64(...) __builtin_sve_svldff1sh_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldff1sh_gather_u32base_u32(...) __builtin_sve_svldff1sh_gather_u32base_u32(__VA_ARGS__)
+#define svldff1sh_gather_u64base_u64(...) __builtin_sve_svldff1sh_gather_u64base_u64(__VA_ARGS__)
+#define svldff1sh_gather_u32base_s32(...) __builtin_sve_svldff1sh_gather_u32base_s32(__VA_ARGS__)
+#define svldff1sh_gather_u64base_s64(...) __builtin_sve_svldff1sh_gather_u64base_s64(__VA_ARGS__)
+#define svldff1sh_gather_s32index_u32(...) __builtin_sve_svldff1sh_gather_s32index_u32(__VA_ARGS__)
+#define svldff1sh_gather_s32index_s32(...) __builtin_sve_svldff1sh_gather_s32index_s32(__VA_ARGS__)
+#define svldff1sh_gather_u32index_u32(...) __builtin_sve_svldff1sh_gather_u32index_u32(__VA_ARGS__)
+#define svldff1sh_gather_u32index_s32(...) __builtin_sve_svldff1sh_gather_u32index_s32(__VA_ARGS__)
+#define svldff1sh_gather_s64index_u64(...) __builtin_sve_svldff1sh_gather_s64index_u64(__VA_ARGS__)
+#define svldff1sh_gather_s64index_s64(...) __builtin_sve_svldff1sh_gather_s64index_s64(__VA_ARGS__)
+#define svldff1sh_gather_u64index_u64(...) __builtin_sve_svldff1sh_gather_u64index_u64(__VA_ARGS__)
+#define svldff1sh_gather_u64index_s64(...) __builtin_sve_svldff1sh_gather_u64index_s64(__VA_ARGS__)
+#define svldff1sh_gather_s32offset_u32(...) __builtin_sve_svldff1sh_gather_s32offset_u32(__VA_ARGS__)
+#define svldff1sh_gather_s32offset_s32(...) __builtin_sve_svldff1sh_gather_s32offset_s32(__VA_ARGS__)
+#define svldff1sh_gather_u32offset_u32(...) __builtin_sve_svldff1sh_gather_u32offset_u32(__VA_ARGS__)
+#define svldff1sh_gather_u32offset_s32(...) __builtin_sve_svldff1sh_gather_u32offset_s32(__VA_ARGS__)
+#define svldff1sh_gather_s64offset_u64(...) __builtin_sve_svldff1sh_gather_s64offset_u64(__VA_ARGS__)
+#define svldff1sh_gather_s64offset_s64(...) __builtin_sve_svldff1sh_gather_s64offset_s64(__VA_ARGS__)
+#define svldff1sh_gather_u64offset_u64(...) __builtin_sve_svldff1sh_gather_u64offset_u64(__VA_ARGS__)
+#define svldff1sh_gather_u64offset_s64(...) __builtin_sve_svldff1sh_gather_u64offset_s64(__VA_ARGS__)
+#define svldff1sh_vnum_u32(...) __builtin_sve_svldff1sh_vnum_u32(__VA_ARGS__)
+#define svldff1sh_vnum_u64(...) __builtin_sve_svldff1sh_vnum_u64(__VA_ARGS__)
+#define svldff1sh_vnum_s32(...) __builtin_sve_svldff1sh_vnum_s32(__VA_ARGS__)
+#define svldff1sh_vnum_s64(...) __builtin_sve_svldff1sh_vnum_s64(__VA_ARGS__)
+#define svldff1sh_u32(...) __builtin_sve_svldff1sh_u32(__VA_ARGS__)
+#define svldff1sh_u64(...) __builtin_sve_svldff1sh_u64(__VA_ARGS__)
+#define svldff1sh_s32(...) __builtin_sve_svldff1sh_s32(__VA_ARGS__)
+#define svldff1sh_s64(...) __builtin_sve_svldff1sh_s64(__VA_ARGS__)
+#define svldff1sw_gather_u64base_index_u64(...) __builtin_sve_svldff1sw_gather_u64base_index_u64(__VA_ARGS__)
+#define svldff1sw_gather_u64base_index_s64(...) __builtin_sve_svldff1sw_gather_u64base_index_s64(__VA_ARGS__)
+#define svldff1sw_gather_u64base_offset_u64(...) __builtin_sve_svldff1sw_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldff1sw_gather_u64base_offset_s64(...) __builtin_sve_svldff1sw_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldff1sw_gather_u64base_u64(...) __builtin_sve_svldff1sw_gather_u64base_u64(__VA_ARGS__)
+#define svldff1sw_gather_u64base_s64(...) __builtin_sve_svldff1sw_gather_u64base_s64(__VA_ARGS__)
+#define svldff1sw_gather_s64index_u64(...) __builtin_sve_svldff1sw_gather_s64index_u64(__VA_ARGS__)
+#define svldff1sw_gather_s64index_s64(...) __builtin_sve_svldff1sw_gather_s64index_s64(__VA_ARGS__)
+#define svldff1sw_gather_u64index_u64(...) __builtin_sve_svldff1sw_gather_u64index_u64(__VA_ARGS__)
+#define svldff1sw_gather_u64index_s64(...) __builtin_sve_svldff1sw_gather_u64index_s64(__VA_ARGS__)
+#define svldff1sw_gather_s64offset_u64(...) __builtin_sve_svldff1sw_gather_s64offset_u64(__VA_ARGS__)
+#define svldff1sw_gather_s64offset_s64(...) __builtin_sve_svldff1sw_gather_s64offset_s64(__VA_ARGS__)
+#define svldff1sw_gather_u64offset_u64(...) __builtin_sve_svldff1sw_gather_u64offset_u64(__VA_ARGS__)
+#define svldff1sw_gather_u64offset_s64(...) __builtin_sve_svldff1sw_gather_u64offset_s64(__VA_ARGS__)
+#define svldff1sw_vnum_u64(...) __builtin_sve_svldff1sw_vnum_u64(__VA_ARGS__)
+#define svldff1sw_vnum_s64(...) __builtin_sve_svldff1sw_vnum_s64(__VA_ARGS__)
+#define svldff1sw_u64(...) __builtin_sve_svldff1sw_u64(__VA_ARGS__)
+#define svldff1sw_s64(...) __builtin_sve_svldff1sw_s64(__VA_ARGS__)
+#define svldff1ub_gather_u32base_offset_u32(...) __builtin_sve_svldff1ub_gather_u32base_offset_u32(__VA_ARGS__)
+#define svldff1ub_gather_u64base_offset_u64(...) __builtin_sve_svldff1ub_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldff1ub_gather_u32base_offset_s32(...) __builtin_sve_svldff1ub_gather_u32base_offset_s32(__VA_ARGS__)
+#define svldff1ub_gather_u64base_offset_s64(...) __builtin_sve_svldff1ub_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldff1ub_gather_u32base_u32(...) __builtin_sve_svldff1ub_gather_u32base_u32(__VA_ARGS__)
+#define svldff1ub_gather_u64base_u64(...) __builtin_sve_svldff1ub_gather_u64base_u64(__VA_ARGS__)
+#define svldff1ub_gather_u32base_s32(...) __builtin_sve_svldff1ub_gather_u32base_s32(__VA_ARGS__)
+#define svldff1ub_gather_u64base_s64(...) __builtin_sve_svldff1ub_gather_u64base_s64(__VA_ARGS__)
+#define svldff1ub_gather_s32offset_u32(...) __builtin_sve_svldff1ub_gather_s32offset_u32(__VA_ARGS__)
+#define svldff1ub_gather_s32offset_s32(...) __builtin_sve_svldff1ub_gather_s32offset_s32(__VA_ARGS__)
+#define svldff1ub_gather_u32offset_u32(...) __builtin_sve_svldff1ub_gather_u32offset_u32(__VA_ARGS__)
+#define svldff1ub_gather_u32offset_s32(...) __builtin_sve_svldff1ub_gather_u32offset_s32(__VA_ARGS__)
+#define svldff1ub_gather_s64offset_u64(...) __builtin_sve_svldff1ub_gather_s64offset_u64(__VA_ARGS__)
+#define svldff1ub_gather_s64offset_s64(...) __builtin_sve_svldff1ub_gather_s64offset_s64(__VA_ARGS__)
+#define svldff1ub_gather_u64offset_u64(...) __builtin_sve_svldff1ub_gather_u64offset_u64(__VA_ARGS__)
+#define svldff1ub_gather_u64offset_s64(...) __builtin_sve_svldff1ub_gather_u64offset_s64(__VA_ARGS__)
+#define svldff1ub_vnum_u32(...) __builtin_sve_svldff1ub_vnum_u32(__VA_ARGS__)
+#define svldff1ub_vnum_u64(...) __builtin_sve_svldff1ub_vnum_u64(__VA_ARGS__)
+#define svldff1ub_vnum_u16(...) __builtin_sve_svldff1ub_vnum_u16(__VA_ARGS__)
+#define svldff1ub_vnum_s32(...) __builtin_sve_svldff1ub_vnum_s32(__VA_ARGS__)
+#define svldff1ub_vnum_s64(...) __builtin_sve_svldff1ub_vnum_s64(__VA_ARGS__)
+#define svldff1ub_vnum_s16(...) __builtin_sve_svldff1ub_vnum_s16(__VA_ARGS__)
+#define svldff1ub_u32(...) __builtin_sve_svldff1ub_u32(__VA_ARGS__)
+#define svldff1ub_u64(...) __builtin_sve_svldff1ub_u64(__VA_ARGS__)
+#define svldff1ub_u16(...) __builtin_sve_svldff1ub_u16(__VA_ARGS__)
+#define svldff1ub_s32(...) __builtin_sve_svldff1ub_s32(__VA_ARGS__)
+#define svldff1ub_s64(...) __builtin_sve_svldff1ub_s64(__VA_ARGS__)
+#define svldff1ub_s16(...) __builtin_sve_svldff1ub_s16(__VA_ARGS__)
+#define svldff1uh_gather_u32base_index_u32(...) __builtin_sve_svldff1uh_gather_u32base_index_u32(__VA_ARGS__)
+#define svldff1uh_gather_u64base_index_u64(...) __builtin_sve_svldff1uh_gather_u64base_index_u64(__VA_ARGS__)
+#define svldff1uh_gather_u32base_index_s32(...) __builtin_sve_svldff1uh_gather_u32base_index_s32(__VA_ARGS__)
+#define svldff1uh_gather_u64base_index_s64(...) __builtin_sve_svldff1uh_gather_u64base_index_s64(__VA_ARGS__)
+#define svldff1uh_gather_u32base_offset_u32(...) __builtin_sve_svldff1uh_gather_u32base_offset_u32(__VA_ARGS__)
+#define svldff1uh_gather_u64base_offset_u64(...) __builtin_sve_svldff1uh_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldff1uh_gather_u32base_offset_s32(...) __builtin_sve_svldff1uh_gather_u32base_offset_s32(__VA_ARGS__)
+#define svldff1uh_gather_u64base_offset_s64(...) __builtin_sve_svldff1uh_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldff1uh_gather_u32base_u32(...) __builtin_sve_svldff1uh_gather_u32base_u32(__VA_ARGS__)
+#define svldff1uh_gather_u64base_u64(...) __builtin_sve_svldff1uh_gather_u64base_u64(__VA_ARGS__)
+#define svldff1uh_gather_u32base_s32(...) __builtin_sve_svldff1uh_gather_u32base_s32(__VA_ARGS__)
+#define svldff1uh_gather_u64base_s64(...) __builtin_sve_svldff1uh_gather_u64base_s64(__VA_ARGS__)
+#define svldff1uh_gather_s32index_u32(...) __builtin_sve_svldff1uh_gather_s32index_u32(__VA_ARGS__)
+#define svldff1uh_gather_s32index_s32(...) __builtin_sve_svldff1uh_gather_s32index_s32(__VA_ARGS__)
+#define svldff1uh_gather_u32index_u32(...) __builtin_sve_svldff1uh_gather_u32index_u32(__VA_ARGS__)
+#define svldff1uh_gather_u32index_s32(...) __builtin_sve_svldff1uh_gather_u32index_s32(__VA_ARGS__)
+#define svldff1uh_gather_s64index_u64(...) __builtin_sve_svldff1uh_gather_s64index_u64(__VA_ARGS__)
+#define svldff1uh_gather_s64index_s64(...) __builtin_sve_svldff1uh_gather_s64index_s64(__VA_ARGS__)
+#define svldff1uh_gather_u64index_u64(...) __builtin_sve_svldff1uh_gather_u64index_u64(__VA_ARGS__)
+#define svldff1uh_gather_u64index_s64(...) __builtin_sve_svldff1uh_gather_u64index_s64(__VA_ARGS__)
+#define svldff1uh_gather_s32offset_u32(...) __builtin_sve_svldff1uh_gather_s32offset_u32(__VA_ARGS__)
+#define svldff1uh_gather_s32offset_s32(...) __builtin_sve_svldff1uh_gather_s32offset_s32(__VA_ARGS__)
+#define svldff1uh_gather_u32offset_u32(...) __builtin_sve_svldff1uh_gather_u32offset_u32(__VA_ARGS__)
+#define svldff1uh_gather_u32offset_s32(...) __builtin_sve_svldff1uh_gather_u32offset_s32(__VA_ARGS__)
+#define svldff1uh_gather_s64offset_u64(...) __builtin_sve_svldff1uh_gather_s64offset_u64(__VA_ARGS__)
+#define svldff1uh_gather_s64offset_s64(...) __builtin_sve_svldff1uh_gather_s64offset_s64(__VA_ARGS__)
+#define svldff1uh_gather_u64offset_u64(...) __builtin_sve_svldff1uh_gather_u64offset_u64(__VA_ARGS__)
+#define svldff1uh_gather_u64offset_s64(...) __builtin_sve_svldff1uh_gather_u64offset_s64(__VA_ARGS__)
+#define svldff1uh_vnum_u32(...) __builtin_sve_svldff1uh_vnum_u32(__VA_ARGS__)
+#define svldff1uh_vnum_u64(...) __builtin_sve_svldff1uh_vnum_u64(__VA_ARGS__)
+#define svldff1uh_vnum_s32(...) __builtin_sve_svldff1uh_vnum_s32(__VA_ARGS__)
+#define svldff1uh_vnum_s64(...) __builtin_sve_svldff1uh_vnum_s64(__VA_ARGS__)
+#define svldff1uh_u32(...) __builtin_sve_svldff1uh_u32(__VA_ARGS__)
+#define svldff1uh_u64(...) __builtin_sve_svldff1uh_u64(__VA_ARGS__)
+#define svldff1uh_s32(...) __builtin_sve_svldff1uh_s32(__VA_ARGS__)
+#define svldff1uh_s64(...) __builtin_sve_svldff1uh_s64(__VA_ARGS__)
+#define svldff1uw_gather_u64base_index_u64(...) __builtin_sve_svldff1uw_gather_u64base_index_u64(__VA_ARGS__)
+#define svldff1uw_gather_u64base_index_s64(...) __builtin_sve_svldff1uw_gather_u64base_index_s64(__VA_ARGS__)
+#define svldff1uw_gather_u64base_offset_u64(...) __builtin_sve_svldff1uw_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldff1uw_gather_u64base_offset_s64(...) __builtin_sve_svldff1uw_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldff1uw_gather_u64base_u64(...) __builtin_sve_svldff1uw_gather_u64base_u64(__VA_ARGS__)
+#define svldff1uw_gather_u64base_s64(...) __builtin_sve_svldff1uw_gather_u64base_s64(__VA_ARGS__)
+#define svldff1uw_gather_s64index_u64(...) __builtin_sve_svldff1uw_gather_s64index_u64(__VA_ARGS__)
+#define svldff1uw_gather_s64index_s64(...) __builtin_sve_svldff1uw_gather_s64index_s64(__VA_ARGS__)
+#define svldff1uw_gather_u64index_u64(...) __builtin_sve_svldff1uw_gather_u64index_u64(__VA_ARGS__)
+#define svldff1uw_gather_u64index_s64(...) __builtin_sve_svldff1uw_gather_u64index_s64(__VA_ARGS__)
+#define svldff1uw_gather_s64offset_u64(...) __builtin_sve_svldff1uw_gather_s64offset_u64(__VA_ARGS__)
+#define svldff1uw_gather_s64offset_s64(...) __builtin_sve_svldff1uw_gather_s64offset_s64(__VA_ARGS__)
+#define svldff1uw_gather_u64offset_u64(...) __builtin_sve_svldff1uw_gather_u64offset_u64(__VA_ARGS__)
+#define svldff1uw_gather_u64offset_s64(...) __builtin_sve_svldff1uw_gather_u64offset_s64(__VA_ARGS__)
+#define svldff1uw_vnum_u64(...) __builtin_sve_svldff1uw_vnum_u64(__VA_ARGS__)
+#define svldff1uw_vnum_s64(...) __builtin_sve_svldff1uw_vnum_s64(__VA_ARGS__)
+#define svldff1uw_u64(...) __builtin_sve_svldff1uw_u64(__VA_ARGS__)
+#define svldff1uw_s64(...) __builtin_sve_svldff1uw_s64(__VA_ARGS__)
+#define svldnf1_u8(...) __builtin_sve_svldnf1_u8(__VA_ARGS__)
+#define svldnf1_u32(...) __builtin_sve_svldnf1_u32(__VA_ARGS__)
+#define svldnf1_u64(...) __builtin_sve_svldnf1_u64(__VA_ARGS__)
+#define svldnf1_u16(...) __builtin_sve_svldnf1_u16(__VA_ARGS__)
+#define svldnf1_s8(...) __builtin_sve_svldnf1_s8(__VA_ARGS__)
+#define svldnf1_f64(...) __builtin_sve_svldnf1_f64(__VA_ARGS__)
+#define svldnf1_f32(...) __builtin_sve_svldnf1_f32(__VA_ARGS__)
+#define svldnf1_f16(...) __builtin_sve_svldnf1_f16(__VA_ARGS__)
+#define svldnf1_s32(...) __builtin_sve_svldnf1_s32(__VA_ARGS__)
+#define svldnf1_s64(...) __builtin_sve_svldnf1_s64(__VA_ARGS__)
+#define svldnf1_s16(...) __builtin_sve_svldnf1_s16(__VA_ARGS__)
+#define svldnf1_vnum_u8(...) __builtin_sve_svldnf1_vnum_u8(__VA_ARGS__)
+#define svldnf1_vnum_u32(...) __builtin_sve_svldnf1_vnum_u32(__VA_ARGS__)
+#define svldnf1_vnum_u64(...) __builtin_sve_svldnf1_vnum_u64(__VA_ARGS__)
+#define svldnf1_vnum_u16(...) __builtin_sve_svldnf1_vnum_u16(__VA_ARGS__)
+#define svldnf1_vnum_s8(...) __builtin_sve_svldnf1_vnum_s8(__VA_ARGS__)
+#define svldnf1_vnum_f64(...) __builtin_sve_svldnf1_vnum_f64(__VA_ARGS__)
+#define svldnf1_vnum_f32(...) __builtin_sve_svldnf1_vnum_f32(__VA_ARGS__)
+#define svldnf1_vnum_f16(...) __builtin_sve_svldnf1_vnum_f16(__VA_ARGS__)
+#define svldnf1_vnum_s32(...) __builtin_sve_svldnf1_vnum_s32(__VA_ARGS__)
+#define svldnf1_vnum_s64(...) __builtin_sve_svldnf1_vnum_s64(__VA_ARGS__)
+#define svldnf1_vnum_s16(...) __builtin_sve_svldnf1_vnum_s16(__VA_ARGS__)
+#define svldnf1sb_vnum_u32(...) __builtin_sve_svldnf1sb_vnum_u32(__VA_ARGS__)
+#define svldnf1sb_vnum_u64(...) __builtin_sve_svldnf1sb_vnum_u64(__VA_ARGS__)
+#define svldnf1sb_vnum_u16(...) __builtin_sve_svldnf1sb_vnum_u16(__VA_ARGS__)
+#define svldnf1sb_vnum_s32(...) __builtin_sve_svldnf1sb_vnum_s32(__VA_ARGS__)
+#define svldnf1sb_vnum_s64(...) __builtin_sve_svldnf1sb_vnum_s64(__VA_ARGS__)
+#define svldnf1sb_vnum_s16(...) __builtin_sve_svldnf1sb_vnum_s16(__VA_ARGS__)
+#define svldnf1sb_u32(...) __builtin_sve_svldnf1sb_u32(__VA_ARGS__)
+#define svldnf1sb_u64(...) __builtin_sve_svldnf1sb_u64(__VA_ARGS__)
+#define svldnf1sb_u16(...) __builtin_sve_svldnf1sb_u16(__VA_ARGS__)
+#define svldnf1sb_s32(...) __builtin_sve_svldnf1sb_s32(__VA_ARGS__)
+#define svldnf1sb_s64(...) __builtin_sve_svldnf1sb_s64(__VA_ARGS__)
+#define svldnf1sb_s16(...) __builtin_sve_svldnf1sb_s16(__VA_ARGS__)
+#define svldnf1sh_vnum_u32(...) __builtin_sve_svldnf1sh_vnum_u32(__VA_ARGS__)
+#define svldnf1sh_vnum_u64(...) __builtin_sve_svldnf1sh_vnum_u64(__VA_ARGS__)
+#define svldnf1sh_vnum_s32(...) __builtin_sve_svldnf1sh_vnum_s32(__VA_ARGS__)
+#define svldnf1sh_vnum_s64(...) __builtin_sve_svldnf1sh_vnum_s64(__VA_ARGS__)
+#define svldnf1sh_u32(...) __builtin_sve_svldnf1sh_u32(__VA_ARGS__)
+#define svldnf1sh_u64(...) __builtin_sve_svldnf1sh_u64(__VA_ARGS__)
+#define svldnf1sh_s32(...) __builtin_sve_svldnf1sh_s32(__VA_ARGS__)
+#define svldnf1sh_s64(...) __builtin_sve_svldnf1sh_s64(__VA_ARGS__)
+#define svldnf1sw_vnum_u64(...) __builtin_sve_svldnf1sw_vnum_u64(__VA_ARGS__)
+#define svldnf1sw_vnum_s64(...) __builtin_sve_svldnf1sw_vnum_s64(__VA_ARGS__)
+#define svldnf1sw_u64(...) __builtin_sve_svldnf1sw_u64(__VA_ARGS__)
+#define svldnf1sw_s64(...) __builtin_sve_svldnf1sw_s64(__VA_ARGS__)
+#define svldnf1ub_vnum_u32(...) __builtin_sve_svldnf1ub_vnum_u32(__VA_ARGS__)
+#define svldnf1ub_vnum_u64(...) __builtin_sve_svldnf1ub_vnum_u64(__VA_ARGS__)
+#define svldnf1ub_vnum_u16(...) __builtin_sve_svldnf1ub_vnum_u16(__VA_ARGS__)
+#define svldnf1ub_vnum_s32(...) __builtin_sve_svldnf1ub_vnum_s32(__VA_ARGS__)
+#define svldnf1ub_vnum_s64(...) __builtin_sve_svldnf1ub_vnum_s64(__VA_ARGS__)
+#define svldnf1ub_vnum_s16(...) __builtin_sve_svldnf1ub_vnum_s16(__VA_ARGS__)
+#define svldnf1ub_u32(...) __builtin_sve_svldnf1ub_u32(__VA_ARGS__)
+#define svldnf1ub_u64(...) __builtin_sve_svldnf1ub_u64(__VA_ARGS__)
+#define svldnf1ub_u16(...) __builtin_sve_svldnf1ub_u16(__VA_ARGS__)
+#define svldnf1ub_s32(...) __builtin_sve_svldnf1ub_s32(__VA_ARGS__)
+#define svldnf1ub_s64(...) __builtin_sve_svldnf1ub_s64(__VA_ARGS__)
+#define svldnf1ub_s16(...) __builtin_sve_svldnf1ub_s16(__VA_ARGS__)
+#define svldnf1uh_vnum_u32(...) __builtin_sve_svldnf1uh_vnum_u32(__VA_ARGS__)
+#define svldnf1uh_vnum_u64(...) __builtin_sve_svldnf1uh_vnum_u64(__VA_ARGS__)
+#define svldnf1uh_vnum_s32(...) __builtin_sve_svldnf1uh_vnum_s32(__VA_ARGS__)
+#define svldnf1uh_vnum_s64(...) __builtin_sve_svldnf1uh_vnum_s64(__VA_ARGS__)
+#define svldnf1uh_u32(...) __builtin_sve_svldnf1uh_u32(__VA_ARGS__)
+#define svldnf1uh_u64(...) __builtin_sve_svldnf1uh_u64(__VA_ARGS__)
+#define svldnf1uh_s32(...) __builtin_sve_svldnf1uh_s32(__VA_ARGS__)
+#define svldnf1uh_s64(...) __builtin_sve_svldnf1uh_s64(__VA_ARGS__)
+#define svldnf1uw_vnum_u64(...) __builtin_sve_svldnf1uw_vnum_u64(__VA_ARGS__)
+#define svldnf1uw_vnum_s64(...) __builtin_sve_svldnf1uw_vnum_s64(__VA_ARGS__)
+#define svldnf1uw_u64(...) __builtin_sve_svldnf1uw_u64(__VA_ARGS__)
+#define svldnf1uw_s64(...) __builtin_sve_svldnf1uw_s64(__VA_ARGS__)
+#define svldnt1_u8(...) __builtin_sve_svldnt1_u8(__VA_ARGS__)
+#define svldnt1_u32(...) __builtin_sve_svldnt1_u32(__VA_ARGS__)
+#define svldnt1_u64(...) __builtin_sve_svldnt1_u64(__VA_ARGS__)
+#define svldnt1_u16(...) __builtin_sve_svldnt1_u16(__VA_ARGS__)
+#define svldnt1_s8(...) __builtin_sve_svldnt1_s8(__VA_ARGS__)
+#define svldnt1_f64(...) __builtin_sve_svldnt1_f64(__VA_ARGS__)
+#define svldnt1_f32(...) __builtin_sve_svldnt1_f32(__VA_ARGS__)
+#define svldnt1_f16(...) __builtin_sve_svldnt1_f16(__VA_ARGS__)
+#define svldnt1_s32(...) __builtin_sve_svldnt1_s32(__VA_ARGS__)
+#define svldnt1_s64(...) __builtin_sve_svldnt1_s64(__VA_ARGS__)
+#define svldnt1_s16(...) __builtin_sve_svldnt1_s16(__VA_ARGS__)
+#define svldnt1_vnum_u8(...) __builtin_sve_svldnt1_vnum_u8(__VA_ARGS__)
+#define svldnt1_vnum_u32(...) __builtin_sve_svldnt1_vnum_u32(__VA_ARGS__)
+#define svldnt1_vnum_u64(...) __builtin_sve_svldnt1_vnum_u64(__VA_ARGS__)
+#define svldnt1_vnum_u16(...) __builtin_sve_svldnt1_vnum_u16(__VA_ARGS__)
+#define svldnt1_vnum_s8(...) __builtin_sve_svldnt1_vnum_s8(__VA_ARGS__)
+#define svldnt1_vnum_f64(...) __builtin_sve_svldnt1_vnum_f64(__VA_ARGS__)
+#define svldnt1_vnum_f32(...) __builtin_sve_svldnt1_vnum_f32(__VA_ARGS__)
+#define svldnt1_vnum_f16(...) __builtin_sve_svldnt1_vnum_f16(__VA_ARGS__)
+#define svldnt1_vnum_s32(...) __builtin_sve_svldnt1_vnum_s32(__VA_ARGS__)
+#define svldnt1_vnum_s64(...) __builtin_sve_svldnt1_vnum_s64(__VA_ARGS__)
+#define svldnt1_vnum_s16(...) __builtin_sve_svldnt1_vnum_s16(__VA_ARGS__)
+#define svlen_u8(...) __builtin_sve_svlen_u8(__VA_ARGS__)
+#define svlen_u32(...) __builtin_sve_svlen_u32(__VA_ARGS__)
+#define svlen_u64(...) __builtin_sve_svlen_u64(__VA_ARGS__)
+#define svlen_u16(...) __builtin_sve_svlen_u16(__VA_ARGS__)
+#define svlen_s8(...) __builtin_sve_svlen_s8(__VA_ARGS__)
+#define svlen_f64(...) __builtin_sve_svlen_f64(__VA_ARGS__)
+#define svlen_f32(...) __builtin_sve_svlen_f32(__VA_ARGS__)
+#define svlen_f16(...) __builtin_sve_svlen_f16(__VA_ARGS__)
+#define svlen_s32(...) __builtin_sve_svlen_s32(__VA_ARGS__)
+#define svlen_s64(...) __builtin_sve_svlen_s64(__VA_ARGS__)
+#define svlen_s16(...) __builtin_sve_svlen_s16(__VA_ARGS__)
+#define svlsl_n_u8_m(...) __builtin_sve_svlsl_n_u8_m(__VA_ARGS__)
+#define svlsl_n_u32_m(...) __builtin_sve_svlsl_n_u32_m(__VA_ARGS__)
+#define svlsl_n_u64_m(...) __builtin_sve_svlsl_n_u64_m(__VA_ARGS__)
+#define svlsl_n_u16_m(...) __builtin_sve_svlsl_n_u16_m(__VA_ARGS__)
+#define svlsl_n_s8_m(...) __builtin_sve_svlsl_n_s8_m(__VA_ARGS__)
+#define svlsl_n_s32_m(...) __builtin_sve_svlsl_n_s32_m(__VA_ARGS__)
+#define svlsl_n_s64_m(...) __builtin_sve_svlsl_n_s64_m(__VA_ARGS__)
+#define svlsl_n_s16_m(...) __builtin_sve_svlsl_n_s16_m(__VA_ARGS__)
+#define svlsl_n_u8_x(...) __builtin_sve_svlsl_n_u8_x(__VA_ARGS__)
+#define svlsl_n_u32_x(...) __builtin_sve_svlsl_n_u32_x(__VA_ARGS__)
+#define svlsl_n_u64_x(...) __builtin_sve_svlsl_n_u64_x(__VA_ARGS__)
+#define svlsl_n_u16_x(...) __builtin_sve_svlsl_n_u16_x(__VA_ARGS__)
+#define svlsl_n_s8_x(...) __builtin_sve_svlsl_n_s8_x(__VA_ARGS__)
+#define svlsl_n_s32_x(...) __builtin_sve_svlsl_n_s32_x(__VA_ARGS__)
+#define svlsl_n_s64_x(...) __builtin_sve_svlsl_n_s64_x(__VA_ARGS__)
+#define svlsl_n_s16_x(...) __builtin_sve_svlsl_n_s16_x(__VA_ARGS__)
+#define svlsl_n_u8_z(...) __builtin_sve_svlsl_n_u8_z(__VA_ARGS__)
+#define svlsl_n_u32_z(...) __builtin_sve_svlsl_n_u32_z(__VA_ARGS__)
+#define svlsl_n_u64_z(...) __builtin_sve_svlsl_n_u64_z(__VA_ARGS__)
+#define svlsl_n_u16_z(...) __builtin_sve_svlsl_n_u16_z(__VA_ARGS__)
+#define svlsl_n_s8_z(...) __builtin_sve_svlsl_n_s8_z(__VA_ARGS__)
+#define svlsl_n_s32_z(...) __builtin_sve_svlsl_n_s32_z(__VA_ARGS__)
+#define svlsl_n_s64_z(...) __builtin_sve_svlsl_n_s64_z(__VA_ARGS__)
+#define svlsl_n_s16_z(...) __builtin_sve_svlsl_n_s16_z(__VA_ARGS__)
+#define svlsl_u8_m(...) __builtin_sve_svlsl_u8_m(__VA_ARGS__)
+#define svlsl_u32_m(...) __builtin_sve_svlsl_u32_m(__VA_ARGS__)
+#define svlsl_u64_m(...) __builtin_sve_svlsl_u64_m(__VA_ARGS__)
+#define svlsl_u16_m(...) __builtin_sve_svlsl_u16_m(__VA_ARGS__)
+#define svlsl_s8_m(...) __builtin_sve_svlsl_s8_m(__VA_ARGS__)
+#define svlsl_s32_m(...) __builtin_sve_svlsl_s32_m(__VA_ARGS__)
+#define svlsl_s64_m(...) __builtin_sve_svlsl_s64_m(__VA_ARGS__)
+#define svlsl_s16_m(...) __builtin_sve_svlsl_s16_m(__VA_ARGS__)
+#define svlsl_u8_x(...) __builtin_sve_svlsl_u8_x(__VA_ARGS__)
+#define svlsl_u32_x(...) __builtin_sve_svlsl_u32_x(__VA_ARGS__)
+#define svlsl_u64_x(...) __builtin_sve_svlsl_u64_x(__VA_ARGS__)
+#define svlsl_u16_x(...) __builtin_sve_svlsl_u16_x(__VA_ARGS__)
+#define svlsl_s8_x(...) __builtin_sve_svlsl_s8_x(__VA_ARGS__)
+#define svlsl_s32_x(...) __builtin_sve_svlsl_s32_x(__VA_ARGS__)
+#define svlsl_s64_x(...) __builtin_sve_svlsl_s64_x(__VA_ARGS__)
+#define svlsl_s16_x(...) __builtin_sve_svlsl_s16_x(__VA_ARGS__)
+#define svlsl_u8_z(...) __builtin_sve_svlsl_u8_z(__VA_ARGS__)
+#define svlsl_u32_z(...) __builtin_sve_svlsl_u32_z(__VA_ARGS__)
+#define svlsl_u64_z(...) __builtin_sve_svlsl_u64_z(__VA_ARGS__)
+#define svlsl_u16_z(...) __builtin_sve_svlsl_u16_z(__VA_ARGS__)
+#define svlsl_s8_z(...) __builtin_sve_svlsl_s8_z(__VA_ARGS__)
+#define svlsl_s32_z(...) __builtin_sve_svlsl_s32_z(__VA_ARGS__)
+#define svlsl_s64_z(...) __builtin_sve_svlsl_s64_z(__VA_ARGS__)
+#define svlsl_s16_z(...) __builtin_sve_svlsl_s16_z(__VA_ARGS__)
+#define svlsl_wide_n_u8_m(...) __builtin_sve_svlsl_wide_n_u8_m(__VA_ARGS__)
+#define svlsl_wide_n_u32_m(...) __builtin_sve_svlsl_wide_n_u32_m(__VA_ARGS__)
+#define svlsl_wide_n_u16_m(...) __builtin_sve_svlsl_wide_n_u16_m(__VA_ARGS__)
+#define svlsl_wide_n_s8_m(...) __builtin_sve_svlsl_wide_n_s8_m(__VA_ARGS__)
+#define svlsl_wide_n_s32_m(...) __builtin_sve_svlsl_wide_n_s32_m(__VA_ARGS__)
+#define svlsl_wide_n_s16_m(...) __builtin_sve_svlsl_wide_n_s16_m(__VA_ARGS__)
+#define svlsl_wide_n_u8_x(...) __builtin_sve_svlsl_wide_n_u8_x(__VA_ARGS__)
+#define svlsl_wide_n_u32_x(...) __builtin_sve_svlsl_wide_n_u32_x(__VA_ARGS__)
+#define svlsl_wide_n_u16_x(...) __builtin_sve_svlsl_wide_n_u16_x(__VA_ARGS__)
+#define svlsl_wide_n_s8_x(...) __builtin_sve_svlsl_wide_n_s8_x(__VA_ARGS__)
+#define svlsl_wide_n_s32_x(...) __builtin_sve_svlsl_wide_n_s32_x(__VA_ARGS__)
+#define svlsl_wide_n_s16_x(...) __builtin_sve_svlsl_wide_n_s16_x(__VA_ARGS__)
+#define svlsl_wide_n_u8_z(...) __builtin_sve_svlsl_wide_n_u8_z(__VA_ARGS__)
+#define svlsl_wide_n_u32_z(...) __builtin_sve_svlsl_wide_n_u32_z(__VA_ARGS__)
+#define svlsl_wide_n_u16_z(...) __builtin_sve_svlsl_wide_n_u16_z(__VA_ARGS__)
+#define svlsl_wide_n_s8_z(...) __builtin_sve_svlsl_wide_n_s8_z(__VA_ARGS__)
+#define svlsl_wide_n_s32_z(...) __builtin_sve_svlsl_wide_n_s32_z(__VA_ARGS__)
+#define svlsl_wide_n_s16_z(...) __builtin_sve_svlsl_wide_n_s16_z(__VA_ARGS__)
+#define svlsl_wide_u8_m(...) __builtin_sve_svlsl_wide_u8_m(__VA_ARGS__)
+#define svlsl_wide_u32_m(...) __builtin_sve_svlsl_wide_u32_m(__VA_ARGS__)
+#define svlsl_wide_u16_m(...) __builtin_sve_svlsl_wide_u16_m(__VA_ARGS__)
+#define svlsl_wide_s8_m(...) __builtin_sve_svlsl_wide_s8_m(__VA_ARGS__)
+#define svlsl_wide_s32_m(...) __builtin_sve_svlsl_wide_s32_m(__VA_ARGS__)
+#define svlsl_wide_s16_m(...) __builtin_sve_svlsl_wide_s16_m(__VA_ARGS__)
+#define svlsl_wide_u8_x(...) __builtin_sve_svlsl_wide_u8_x(__VA_ARGS__)
+#define svlsl_wide_u32_x(...) __builtin_sve_svlsl_wide_u32_x(__VA_ARGS__)
+#define svlsl_wide_u16_x(...) __builtin_sve_svlsl_wide_u16_x(__VA_ARGS__)
+#define svlsl_wide_s8_x(...) __builtin_sve_svlsl_wide_s8_x(__VA_ARGS__)
+#define svlsl_wide_s32_x(...) __builtin_sve_svlsl_wide_s32_x(__VA_ARGS__)
+#define svlsl_wide_s16_x(...) __builtin_sve_svlsl_wide_s16_x(__VA_ARGS__)
+#define svlsl_wide_u8_z(...) __builtin_sve_svlsl_wide_u8_z(__VA_ARGS__)
+#define svlsl_wide_u32_z(...) __builtin_sve_svlsl_wide_u32_z(__VA_ARGS__)
+#define svlsl_wide_u16_z(...) __builtin_sve_svlsl_wide_u16_z(__VA_ARGS__)
+#define svlsl_wide_s8_z(...) __builtin_sve_svlsl_wide_s8_z(__VA_ARGS__)
+#define svlsl_wide_s32_z(...) __builtin_sve_svlsl_wide_s32_z(__VA_ARGS__)
+#define svlsl_wide_s16_z(...) __builtin_sve_svlsl_wide_s16_z(__VA_ARGS__)
+#define svlsr_n_u8_m(...) __builtin_sve_svlsr_n_u8_m(__VA_ARGS__)
+#define svlsr_n_u32_m(...) __builtin_sve_svlsr_n_u32_m(__VA_ARGS__)
+#define svlsr_n_u64_m(...) __builtin_sve_svlsr_n_u64_m(__VA_ARGS__)
+#define svlsr_n_u16_m(...) __builtin_sve_svlsr_n_u16_m(__VA_ARGS__)
+#define svlsr_n_u8_x(...) __builtin_sve_svlsr_n_u8_x(__VA_ARGS__)
+#define svlsr_n_u32_x(...) __builtin_sve_svlsr_n_u32_x(__VA_ARGS__)
+#define svlsr_n_u64_x(...) __builtin_sve_svlsr_n_u64_x(__VA_ARGS__)
+#define svlsr_n_u16_x(...) __builtin_sve_svlsr_n_u16_x(__VA_ARGS__)
+#define svlsr_n_u8_z(...) __builtin_sve_svlsr_n_u8_z(__VA_ARGS__)
+#define svlsr_n_u32_z(...) __builtin_sve_svlsr_n_u32_z(__VA_ARGS__)
+#define svlsr_n_u64_z(...) __builtin_sve_svlsr_n_u64_z(__VA_ARGS__)
+#define svlsr_n_u16_z(...) __builtin_sve_svlsr_n_u16_z(__VA_ARGS__)
+#define svlsr_u8_m(...) __builtin_sve_svlsr_u8_m(__VA_ARGS__)
+#define svlsr_u32_m(...) __builtin_sve_svlsr_u32_m(__VA_ARGS__)
+#define svlsr_u64_m(...) __builtin_sve_svlsr_u64_m(__VA_ARGS__)
+#define svlsr_u16_m(...) __builtin_sve_svlsr_u16_m(__VA_ARGS__)
+#define svlsr_u8_x(...) __builtin_sve_svlsr_u8_x(__VA_ARGS__)
+#define svlsr_u32_x(...) __builtin_sve_svlsr_u32_x(__VA_ARGS__)
+#define svlsr_u64_x(...) __builtin_sve_svlsr_u64_x(__VA_ARGS__)
+#define svlsr_u16_x(...) __builtin_sve_svlsr_u16_x(__VA_ARGS__)
+#define svlsr_u8_z(...) __builtin_sve_svlsr_u8_z(__VA_ARGS__)
+#define svlsr_u32_z(...) __builtin_sve_svlsr_u32_z(__VA_ARGS__)
+#define svlsr_u64_z(...) __builtin_sve_svlsr_u64_z(__VA_ARGS__)
+#define svlsr_u16_z(...) __builtin_sve_svlsr_u16_z(__VA_ARGS__)
+#define svlsr_wide_n_u8_m(...) __builtin_sve_svlsr_wide_n_u8_m(__VA_ARGS__)
+#define svlsr_wide_n_u32_m(...) __builtin_sve_svlsr_wide_n_u32_m(__VA_ARGS__)
+#define svlsr_wide_n_u16_m(...) __builtin_sve_svlsr_wide_n_u16_m(__VA_ARGS__)
+#define svlsr_wide_n_u8_x(...) __builtin_sve_svlsr_wide_n_u8_x(__VA_ARGS__)
+#define svlsr_wide_n_u32_x(...) __builtin_sve_svlsr_wide_n_u32_x(__VA_ARGS__)
+#define svlsr_wide_n_u16_x(...) __builtin_sve_svlsr_wide_n_u16_x(__VA_ARGS__)
+#define svlsr_wide_n_u8_z(...) __builtin_sve_svlsr_wide_n_u8_z(__VA_ARGS__)
+#define svlsr_wide_n_u32_z(...) __builtin_sve_svlsr_wide_n_u32_z(__VA_ARGS__)
+#define svlsr_wide_n_u16_z(...) __builtin_sve_svlsr_wide_n_u16_z(__VA_ARGS__)
+#define svlsr_wide_u8_m(...) __builtin_sve_svlsr_wide_u8_m(__VA_ARGS__)
+#define svlsr_wide_u32_m(...) __builtin_sve_svlsr_wide_u32_m(__VA_ARGS__)
+#define svlsr_wide_u16_m(...) __builtin_sve_svlsr_wide_u16_m(__VA_ARGS__)
+#define svlsr_wide_u8_x(...) __builtin_sve_svlsr_wide_u8_x(__VA_ARGS__)
+#define svlsr_wide_u32_x(...) __builtin_sve_svlsr_wide_u32_x(__VA_ARGS__)
+#define svlsr_wide_u16_x(...) __builtin_sve_svlsr_wide_u16_x(__VA_ARGS__)
+#define svlsr_wide_u8_z(...) __builtin_sve_svlsr_wide_u8_z(__VA_ARGS__)
+#define svlsr_wide_u32_z(...) __builtin_sve_svlsr_wide_u32_z(__VA_ARGS__)
+#define svlsr_wide_u16_z(...) __builtin_sve_svlsr_wide_u16_z(__VA_ARGS__)
+#define svmad_n_f64_m(...) __builtin_sve_svmad_n_f64_m(__VA_ARGS__)
+#define svmad_n_f32_m(...) __builtin_sve_svmad_n_f32_m(__VA_ARGS__)
+#define svmad_n_f16_m(...) __builtin_sve_svmad_n_f16_m(__VA_ARGS__)
+#define svmad_n_f64_x(...) __builtin_sve_svmad_n_f64_x(__VA_ARGS__)
+#define svmad_n_f32_x(...) __builtin_sve_svmad_n_f32_x(__VA_ARGS__)
+#define svmad_n_f16_x(...) __builtin_sve_svmad_n_f16_x(__VA_ARGS__)
+#define svmad_n_f64_z(...) __builtin_sve_svmad_n_f64_z(__VA_ARGS__)
+#define svmad_n_f32_z(...) __builtin_sve_svmad_n_f32_z(__VA_ARGS__)
+#define svmad_n_f16_z(...) __builtin_sve_svmad_n_f16_z(__VA_ARGS__)
+#define svmad_n_u8_m(...) __builtin_sve_svmad_n_u8_m(__VA_ARGS__)
+#define svmad_n_u32_m(...) __builtin_sve_svmad_n_u32_m(__VA_ARGS__)
+#define svmad_n_u64_m(...) __builtin_sve_svmad_n_u64_m(__VA_ARGS__)
+#define svmad_n_u16_m(...) __builtin_sve_svmad_n_u16_m(__VA_ARGS__)
+#define svmad_n_s8_m(...) __builtin_sve_svmad_n_s8_m(__VA_ARGS__)
+#define svmad_n_s32_m(...) __builtin_sve_svmad_n_s32_m(__VA_ARGS__)
+#define svmad_n_s64_m(...) __builtin_sve_svmad_n_s64_m(__VA_ARGS__)
+#define svmad_n_s16_m(...) __builtin_sve_svmad_n_s16_m(__VA_ARGS__)
+#define svmad_n_u8_x(...) __builtin_sve_svmad_n_u8_x(__VA_ARGS__)
+#define svmad_n_u32_x(...) __builtin_sve_svmad_n_u32_x(__VA_ARGS__)
+#define svmad_n_u64_x(...) __builtin_sve_svmad_n_u64_x(__VA_ARGS__)
+#define svmad_n_u16_x(...) __builtin_sve_svmad_n_u16_x(__VA_ARGS__)
+#define svmad_n_s8_x(...) __builtin_sve_svmad_n_s8_x(__VA_ARGS__)
+#define svmad_n_s32_x(...) __builtin_sve_svmad_n_s32_x(__VA_ARGS__)
+#define svmad_n_s64_x(...) __builtin_sve_svmad_n_s64_x(__VA_ARGS__)
+#define svmad_n_s16_x(...) __builtin_sve_svmad_n_s16_x(__VA_ARGS__)
+#define svmad_n_u8_z(...) __builtin_sve_svmad_n_u8_z(__VA_ARGS__)
+#define svmad_n_u32_z(...) __builtin_sve_svmad_n_u32_z(__VA_ARGS__)
+#define svmad_n_u64_z(...) __builtin_sve_svmad_n_u64_z(__VA_ARGS__)
+#define svmad_n_u16_z(...) __builtin_sve_svmad_n_u16_z(__VA_ARGS__)
+#define svmad_n_s8_z(...) __builtin_sve_svmad_n_s8_z(__VA_ARGS__)
+#define svmad_n_s32_z(...) __builtin_sve_svmad_n_s32_z(__VA_ARGS__)
+#define svmad_n_s64_z(...) __builtin_sve_svmad_n_s64_z(__VA_ARGS__)
+#define svmad_n_s16_z(...) __builtin_sve_svmad_n_s16_z(__VA_ARGS__)
+#define svmad_f64_m(...) __builtin_sve_svmad_f64_m(__VA_ARGS__)
+#define svmad_f32_m(...) __builtin_sve_svmad_f32_m(__VA_ARGS__)
+#define svmad_f16_m(...) __builtin_sve_svmad_f16_m(__VA_ARGS__)
+#define svmad_f64_x(...) __builtin_sve_svmad_f64_x(__VA_ARGS__)
+#define svmad_f32_x(...) __builtin_sve_svmad_f32_x(__VA_ARGS__)
+#define svmad_f16_x(...) __builtin_sve_svmad_f16_x(__VA_ARGS__)
+#define svmad_f64_z(...) __builtin_sve_svmad_f64_z(__VA_ARGS__)
+#define svmad_f32_z(...) __builtin_sve_svmad_f32_z(__VA_ARGS__)
+#define svmad_f16_z(...) __builtin_sve_svmad_f16_z(__VA_ARGS__)
+#define svmad_u8_m(...) __builtin_sve_svmad_u8_m(__VA_ARGS__)
+#define svmad_u32_m(...) __builtin_sve_svmad_u32_m(__VA_ARGS__)
+#define svmad_u64_m(...) __builtin_sve_svmad_u64_m(__VA_ARGS__)
+#define svmad_u16_m(...) __builtin_sve_svmad_u16_m(__VA_ARGS__)
+#define svmad_s8_m(...) __builtin_sve_svmad_s8_m(__VA_ARGS__)
+#define svmad_s32_m(...) __builtin_sve_svmad_s32_m(__VA_ARGS__)
+#define svmad_s64_m(...) __builtin_sve_svmad_s64_m(__VA_ARGS__)
+#define svmad_s16_m(...) __builtin_sve_svmad_s16_m(__VA_ARGS__)
+#define svmad_u8_x(...) __builtin_sve_svmad_u8_x(__VA_ARGS__)
+#define svmad_u32_x(...) __builtin_sve_svmad_u32_x(__VA_ARGS__)
+#define svmad_u64_x(...) __builtin_sve_svmad_u64_x(__VA_ARGS__)
+#define svmad_u16_x(...) __builtin_sve_svmad_u16_x(__VA_ARGS__)
+#define svmad_s8_x(...) __builtin_sve_svmad_s8_x(__VA_ARGS__)
+#define svmad_s32_x(...) __builtin_sve_svmad_s32_x(__VA_ARGS__)
+#define svmad_s64_x(...) __builtin_sve_svmad_s64_x(__VA_ARGS__)
+#define svmad_s16_x(...) __builtin_sve_svmad_s16_x(__VA_ARGS__)
+#define svmad_u8_z(...) __builtin_sve_svmad_u8_z(__VA_ARGS__)
+#define svmad_u32_z(...) __builtin_sve_svmad_u32_z(__VA_ARGS__)
+#define svmad_u64_z(...) __builtin_sve_svmad_u64_z(__VA_ARGS__)
+#define svmad_u16_z(...) __builtin_sve_svmad_u16_z(__VA_ARGS__)
+#define svmad_s8_z(...) __builtin_sve_svmad_s8_z(__VA_ARGS__)
+#define svmad_s32_z(...) __builtin_sve_svmad_s32_z(__VA_ARGS__)
+#define svmad_s64_z(...) __builtin_sve_svmad_s64_z(__VA_ARGS__)
+#define svmad_s16_z(...) __builtin_sve_svmad_s16_z(__VA_ARGS__)
+#define svmax_n_f64_m(...) __builtin_sve_svmax_n_f64_m(__VA_ARGS__)
+#define svmax_n_f32_m(...) __builtin_sve_svmax_n_f32_m(__VA_ARGS__)
+#define svmax_n_f16_m(...) __builtin_sve_svmax_n_f16_m(__VA_ARGS__)
+#define svmax_n_f64_x(...) __builtin_sve_svmax_n_f64_x(__VA_ARGS__)
+#define svmax_n_f32_x(...) __builtin_sve_svmax_n_f32_x(__VA_ARGS__)
+#define svmax_n_f16_x(...) __builtin_sve_svmax_n_f16_x(__VA_ARGS__)
+#define svmax_n_f64_z(...) __builtin_sve_svmax_n_f64_z(__VA_ARGS__)
+#define svmax_n_f32_z(...) __builtin_sve_svmax_n_f32_z(__VA_ARGS__)
+#define svmax_n_f16_z(...) __builtin_sve_svmax_n_f16_z(__VA_ARGS__)
+#define svmax_n_s8_m(...) __builtin_sve_svmax_n_s8_m(__VA_ARGS__)
+#define svmax_n_s32_m(...) __builtin_sve_svmax_n_s32_m(__VA_ARGS__)
+#define svmax_n_s64_m(...) __builtin_sve_svmax_n_s64_m(__VA_ARGS__)
+#define svmax_n_s16_m(...) __builtin_sve_svmax_n_s16_m(__VA_ARGS__)
+#define svmax_n_s8_x(...) __builtin_sve_svmax_n_s8_x(__VA_ARGS__)
+#define svmax_n_s32_x(...) __builtin_sve_svmax_n_s32_x(__VA_ARGS__)
+#define svmax_n_s64_x(...) __builtin_sve_svmax_n_s64_x(__VA_ARGS__)
+#define svmax_n_s16_x(...) __builtin_sve_svmax_n_s16_x(__VA_ARGS__)
+#define svmax_n_s8_z(...) __builtin_sve_svmax_n_s8_z(__VA_ARGS__)
+#define svmax_n_s32_z(...) __builtin_sve_svmax_n_s32_z(__VA_ARGS__)
+#define svmax_n_s64_z(...) __builtin_sve_svmax_n_s64_z(__VA_ARGS__)
+#define svmax_n_s16_z(...) __builtin_sve_svmax_n_s16_z(__VA_ARGS__)
+#define svmax_n_u8_m(...) __builtin_sve_svmax_n_u8_m(__VA_ARGS__)
+#define svmax_n_u32_m(...) __builtin_sve_svmax_n_u32_m(__VA_ARGS__)
+#define svmax_n_u64_m(...) __builtin_sve_svmax_n_u64_m(__VA_ARGS__)
+#define svmax_n_u16_m(...) __builtin_sve_svmax_n_u16_m(__VA_ARGS__)
+#define svmax_n_u8_x(...) __builtin_sve_svmax_n_u8_x(__VA_ARGS__)
+#define svmax_n_u32_x(...) __builtin_sve_svmax_n_u32_x(__VA_ARGS__)
+#define svmax_n_u64_x(...) __builtin_sve_svmax_n_u64_x(__VA_ARGS__)
+#define svmax_n_u16_x(...) __builtin_sve_svmax_n_u16_x(__VA_ARGS__)
+#define svmax_n_u8_z(...) __builtin_sve_svmax_n_u8_z(__VA_ARGS__)
+#define svmax_n_u32_z(...) __builtin_sve_svmax_n_u32_z(__VA_ARGS__)
+#define svmax_n_u64_z(...) __builtin_sve_svmax_n_u64_z(__VA_ARGS__)
+#define svmax_n_u16_z(...) __builtin_sve_svmax_n_u16_z(__VA_ARGS__)
+#define svmax_f64_m(...) __builtin_sve_svmax_f64_m(__VA_ARGS__)
+#define svmax_f32_m(...) __builtin_sve_svmax_f32_m(__VA_ARGS__)
+#define svmax_f16_m(...) __builtin_sve_svmax_f16_m(__VA_ARGS__)
+#define svmax_f64_x(...) __builtin_sve_svmax_f64_x(__VA_ARGS__)
+#define svmax_f32_x(...) __builtin_sve_svmax_f32_x(__VA_ARGS__)
+#define svmax_f16_x(...) __builtin_sve_svmax_f16_x(__VA_ARGS__)
+#define svmax_f64_z(...) __builtin_sve_svmax_f64_z(__VA_ARGS__)
+#define svmax_f32_z(...) __builtin_sve_svmax_f32_z(__VA_ARGS__)
+#define svmax_f16_z(...) __builtin_sve_svmax_f16_z(__VA_ARGS__)
+#define svmax_s8_m(...) __builtin_sve_svmax_s8_m(__VA_ARGS__)
+#define svmax_s32_m(...) __builtin_sve_svmax_s32_m(__VA_ARGS__)
+#define svmax_s64_m(...) __builtin_sve_svmax_s64_m(__VA_ARGS__)
+#define svmax_s16_m(...) __builtin_sve_svmax_s16_m(__VA_ARGS__)
+#define svmax_s8_x(...) __builtin_sve_svmax_s8_x(__VA_ARGS__)
+#define svmax_s32_x(...) __builtin_sve_svmax_s32_x(__VA_ARGS__)
+#define svmax_s64_x(...) __builtin_sve_svmax_s64_x(__VA_ARGS__)
+#define svmax_s16_x(...) __builtin_sve_svmax_s16_x(__VA_ARGS__)
+#define svmax_s8_z(...) __builtin_sve_svmax_s8_z(__VA_ARGS__)
+#define svmax_s32_z(...) __builtin_sve_svmax_s32_z(__VA_ARGS__)
+#define svmax_s64_z(...) __builtin_sve_svmax_s64_z(__VA_ARGS__)
+#define svmax_s16_z(...) __builtin_sve_svmax_s16_z(__VA_ARGS__)
+#define svmax_u8_m(...) __builtin_sve_svmax_u8_m(__VA_ARGS__)
+#define svmax_u32_m(...) __builtin_sve_svmax_u32_m(__VA_ARGS__)
+#define svmax_u64_m(...) __builtin_sve_svmax_u64_m(__VA_ARGS__)
+#define svmax_u16_m(...) __builtin_sve_svmax_u16_m(__VA_ARGS__)
+#define svmax_u8_x(...) __builtin_sve_svmax_u8_x(__VA_ARGS__)
+#define svmax_u32_x(...) __builtin_sve_svmax_u32_x(__VA_ARGS__)
+#define svmax_u64_x(...) __builtin_sve_svmax_u64_x(__VA_ARGS__)
+#define svmax_u16_x(...) __builtin_sve_svmax_u16_x(__VA_ARGS__)
+#define svmax_u8_z(...) __builtin_sve_svmax_u8_z(__VA_ARGS__)
+#define svmax_u32_z(...) __builtin_sve_svmax_u32_z(__VA_ARGS__)
+#define svmax_u64_z(...) __builtin_sve_svmax_u64_z(__VA_ARGS__)
+#define svmax_u16_z(...) __builtin_sve_svmax_u16_z(__VA_ARGS__)
+#define svmaxnm_n_f64_m(...) __builtin_sve_svmaxnm_n_f64_m(__VA_ARGS__)
+#define svmaxnm_n_f32_m(...) __builtin_sve_svmaxnm_n_f32_m(__VA_ARGS__)
+#define svmaxnm_n_f16_m(...) __builtin_sve_svmaxnm_n_f16_m(__VA_ARGS__)
+#define svmaxnm_n_f64_x(...) __builtin_sve_svmaxnm_n_f64_x(__VA_ARGS__)
+#define svmaxnm_n_f32_x(...) __builtin_sve_svmaxnm_n_f32_x(__VA_ARGS__)
+#define svmaxnm_n_f16_x(...) __builtin_sve_svmaxnm_n_f16_x(__VA_ARGS__)
+#define svmaxnm_n_f64_z(...) __builtin_sve_svmaxnm_n_f64_z(__VA_ARGS__)
+#define svmaxnm_n_f32_z(...) __builtin_sve_svmaxnm_n_f32_z(__VA_ARGS__)
+#define svmaxnm_n_f16_z(...) __builtin_sve_svmaxnm_n_f16_z(__VA_ARGS__)
+#define svmaxnm_f64_m(...) __builtin_sve_svmaxnm_f64_m(__VA_ARGS__)
+#define svmaxnm_f32_m(...) __builtin_sve_svmaxnm_f32_m(__VA_ARGS__)
+#define svmaxnm_f16_m(...) __builtin_sve_svmaxnm_f16_m(__VA_ARGS__)
+#define svmaxnm_f64_x(...) __builtin_sve_svmaxnm_f64_x(__VA_ARGS__)
+#define svmaxnm_f32_x(...) __builtin_sve_svmaxnm_f32_x(__VA_ARGS__)
+#define svmaxnm_f16_x(...) __builtin_sve_svmaxnm_f16_x(__VA_ARGS__)
+#define svmaxnm_f64_z(...) __builtin_sve_svmaxnm_f64_z(__VA_ARGS__)
+#define svmaxnm_f32_z(...) __builtin_sve_svmaxnm_f32_z(__VA_ARGS__)
+#define svmaxnm_f16_z(...) __builtin_sve_svmaxnm_f16_z(__VA_ARGS__)
+#define svmaxnmv_f64(...) __builtin_sve_svmaxnmv_f64(__VA_ARGS__)
+#define svmaxnmv_f32(...) __builtin_sve_svmaxnmv_f32(__VA_ARGS__)
+#define svmaxnmv_f16(...) __builtin_sve_svmaxnmv_f16(__VA_ARGS__)
+#define svmaxv_f64(...) __builtin_sve_svmaxv_f64(__VA_ARGS__)
+#define svmaxv_f32(...) __builtin_sve_svmaxv_f32(__VA_ARGS__)
+#define svmaxv_f16(...) __builtin_sve_svmaxv_f16(__VA_ARGS__)
+#define svmaxv_s8(...) __builtin_sve_svmaxv_s8(__VA_ARGS__)
+#define svmaxv_s32(...) __builtin_sve_svmaxv_s32(__VA_ARGS__)
+#define svmaxv_s64(...) __builtin_sve_svmaxv_s64(__VA_ARGS__)
+#define svmaxv_s16(...) __builtin_sve_svmaxv_s16(__VA_ARGS__)
+#define svmaxv_u8(...) __builtin_sve_svmaxv_u8(__VA_ARGS__)
+#define svmaxv_u32(...) __builtin_sve_svmaxv_u32(__VA_ARGS__)
+#define svmaxv_u64(...) __builtin_sve_svmaxv_u64(__VA_ARGS__)
+#define svmaxv_u16(...) __builtin_sve_svmaxv_u16(__VA_ARGS__)
+#define svmin_n_f64_m(...) __builtin_sve_svmin_n_f64_m(__VA_ARGS__)
+#define svmin_n_f32_m(...) __builtin_sve_svmin_n_f32_m(__VA_ARGS__)
+#define svmin_n_f16_m(...) __builtin_sve_svmin_n_f16_m(__VA_ARGS__)
+#define svmin_n_f64_x(...) __builtin_sve_svmin_n_f64_x(__VA_ARGS__)
+#define svmin_n_f32_x(...) __builtin_sve_svmin_n_f32_x(__VA_ARGS__)
+#define svmin_n_f16_x(...) __builtin_sve_svmin_n_f16_x(__VA_ARGS__)
+#define svmin_n_f64_z(...) __builtin_sve_svmin_n_f64_z(__VA_ARGS__)
+#define svmin_n_f32_z(...) __builtin_sve_svmin_n_f32_z(__VA_ARGS__)
+#define svmin_n_f16_z(...) __builtin_sve_svmin_n_f16_z(__VA_ARGS__)
+#define svmin_n_s8_m(...) __builtin_sve_svmin_n_s8_m(__VA_ARGS__)
+#define svmin_n_s32_m(...) __builtin_sve_svmin_n_s32_m(__VA_ARGS__)
+#define svmin_n_s64_m(...) __builtin_sve_svmin_n_s64_m(__VA_ARGS__)
+#define svmin_n_s16_m(...) __builtin_sve_svmin_n_s16_m(__VA_ARGS__)
+#define svmin_n_s8_x(...) __builtin_sve_svmin_n_s8_x(__VA_ARGS__)
+#define svmin_n_s32_x(...) __builtin_sve_svmin_n_s32_x(__VA_ARGS__)
+#define svmin_n_s64_x(...) __builtin_sve_svmin_n_s64_x(__VA_ARGS__)
+#define svmin_n_s16_x(...) __builtin_sve_svmin_n_s16_x(__VA_ARGS__)
+#define svmin_n_s8_z(...) __builtin_sve_svmin_n_s8_z(__VA_ARGS__)
+#define svmin_n_s32_z(...) __builtin_sve_svmin_n_s32_z(__VA_ARGS__)
+#define svmin_n_s64_z(...) __builtin_sve_svmin_n_s64_z(__VA_ARGS__)
+#define svmin_n_s16_z(...) __builtin_sve_svmin_n_s16_z(__VA_ARGS__)
+#define svmin_n_u8_m(...) __builtin_sve_svmin_n_u8_m(__VA_ARGS__)
+#define svmin_n_u32_m(...) __builtin_sve_svmin_n_u32_m(__VA_ARGS__)
+#define svmin_n_u64_m(...) __builtin_sve_svmin_n_u64_m(__VA_ARGS__)
+#define svmin_n_u16_m(...) __builtin_sve_svmin_n_u16_m(__VA_ARGS__)
+#define svmin_n_u8_x(...) __builtin_sve_svmin_n_u8_x(__VA_ARGS__)
+#define svmin_n_u32_x(...) __builtin_sve_svmin_n_u32_x(__VA_ARGS__)
+#define svmin_n_u64_x(...) __builtin_sve_svmin_n_u64_x(__VA_ARGS__)
+#define svmin_n_u16_x(...) __builtin_sve_svmin_n_u16_x(__VA_ARGS__)
+#define svmin_n_u8_z(...) __builtin_sve_svmin_n_u8_z(__VA_ARGS__)
+#define svmin_n_u32_z(...) __builtin_sve_svmin_n_u32_z(__VA_ARGS__)
+#define svmin_n_u64_z(...) __builtin_sve_svmin_n_u64_z(__VA_ARGS__)
+#define svmin_n_u16_z(...) __builtin_sve_svmin_n_u16_z(__VA_ARGS__)
+#define svmin_f64_m(...) __builtin_sve_svmin_f64_m(__VA_ARGS__)
+#define svmin_f32_m(...) __builtin_sve_svmin_f32_m(__VA_ARGS__)
+#define svmin_f16_m(...) __builtin_sve_svmin_f16_m(__VA_ARGS__)
+#define svmin_f64_x(...) __builtin_sve_svmin_f64_x(__VA_ARGS__)
+#define svmin_f32_x(...) __builtin_sve_svmin_f32_x(__VA_ARGS__)
+#define svmin_f16_x(...) __builtin_sve_svmin_f16_x(__VA_ARGS__)
+#define svmin_f64_z(...) __builtin_sve_svmin_f64_z(__VA_ARGS__)
+#define svmin_f32_z(...) __builtin_sve_svmin_f32_z(__VA_ARGS__)
+#define svmin_f16_z(...) __builtin_sve_svmin_f16_z(__VA_ARGS__)
+#define svmin_s8_m(...) __builtin_sve_svmin_s8_m(__VA_ARGS__)
+#define svmin_s32_m(...) __builtin_sve_svmin_s32_m(__VA_ARGS__)
+#define svmin_s64_m(...) __builtin_sve_svmin_s64_m(__VA_ARGS__)
+#define svmin_s16_m(...) __builtin_sve_svmin_s16_m(__VA_ARGS__)
+#define svmin_s8_x(...) __builtin_sve_svmin_s8_x(__VA_ARGS__)
+#define svmin_s32_x(...) __builtin_sve_svmin_s32_x(__VA_ARGS__)
+#define svmin_s64_x(...) __builtin_sve_svmin_s64_x(__VA_ARGS__)
+#define svmin_s16_x(...) __builtin_sve_svmin_s16_x(__VA_ARGS__)
+#define svmin_s8_z(...) __builtin_sve_svmin_s8_z(__VA_ARGS__)
+#define svmin_s32_z(...) __builtin_sve_svmin_s32_z(__VA_ARGS__)
+#define svmin_s64_z(...) __builtin_sve_svmin_s64_z(__VA_ARGS__)
+#define svmin_s16_z(...) __builtin_sve_svmin_s16_z(__VA_ARGS__)
+#define svmin_u8_m(...) __builtin_sve_svmin_u8_m(__VA_ARGS__)
+#define svmin_u32_m(...) __builtin_sve_svmin_u32_m(__VA_ARGS__)
+#define svmin_u64_m(...) __builtin_sve_svmin_u64_m(__VA_ARGS__)
+#define svmin_u16_m(...) __builtin_sve_svmin_u16_m(__VA_ARGS__)
+#define svmin_u8_x(...) __builtin_sve_svmin_u8_x(__VA_ARGS__)
+#define svmin_u32_x(...) __builtin_sve_svmin_u32_x(__VA_ARGS__)
+#define svmin_u64_x(...) __builtin_sve_svmin_u64_x(__VA_ARGS__)
+#define svmin_u16_x(...) __builtin_sve_svmin_u16_x(__VA_ARGS__)
+#define svmin_u8_z(...) __builtin_sve_svmin_u8_z(__VA_ARGS__)
+#define svmin_u32_z(...) __builtin_sve_svmin_u32_z(__VA_ARGS__)
+#define svmin_u64_z(...) __builtin_sve_svmin_u64_z(__VA_ARGS__)
+#define svmin_u16_z(...) __builtin_sve_svmin_u16_z(__VA_ARGS__)
+#define svminnm_n_f64_m(...) __builtin_sve_svminnm_n_f64_m(__VA_ARGS__)
+#define svminnm_n_f32_m(...) __builtin_sve_svminnm_n_f32_m(__VA_ARGS__)
+#define svminnm_n_f16_m(...) __builtin_sve_svminnm_n_f16_m(__VA_ARGS__)
+#define svminnm_n_f64_x(...) __builtin_sve_svminnm_n_f64_x(__VA_ARGS__)
+#define svminnm_n_f32_x(...) __builtin_sve_svminnm_n_f32_x(__VA_ARGS__)
+#define svminnm_n_f16_x(...) __builtin_sve_svminnm_n_f16_x(__VA_ARGS__)
+#define svminnm_n_f64_z(...) __builtin_sve_svminnm_n_f64_z(__VA_ARGS__)
+#define svminnm_n_f32_z(...) __builtin_sve_svminnm_n_f32_z(__VA_ARGS__)
+#define svminnm_n_f16_z(...) __builtin_sve_svminnm_n_f16_z(__VA_ARGS__)
+#define svminnm_f64_m(...) __builtin_sve_svminnm_f64_m(__VA_ARGS__)
+#define svminnm_f32_m(...) __builtin_sve_svminnm_f32_m(__VA_ARGS__)
+#define svminnm_f16_m(...) __builtin_sve_svminnm_f16_m(__VA_ARGS__)
+#define svminnm_f64_x(...) __builtin_sve_svminnm_f64_x(__VA_ARGS__)
+#define svminnm_f32_x(...) __builtin_sve_svminnm_f32_x(__VA_ARGS__)
+#define svminnm_f16_x(...) __builtin_sve_svminnm_f16_x(__VA_ARGS__)
+#define svminnm_f64_z(...) __builtin_sve_svminnm_f64_z(__VA_ARGS__)
+#define svminnm_f32_z(...) __builtin_sve_svminnm_f32_z(__VA_ARGS__)
+#define svminnm_f16_z(...) __builtin_sve_svminnm_f16_z(__VA_ARGS__)
+#define svminnmv_f64(...) __builtin_sve_svminnmv_f64(__VA_ARGS__)
+#define svminnmv_f32(...) __builtin_sve_svminnmv_f32(__VA_ARGS__)
+#define svminnmv_f16(...) __builtin_sve_svminnmv_f16(__VA_ARGS__)
+#define svminv_f64(...) __builtin_sve_svminv_f64(__VA_ARGS__)
+#define svminv_f32(...) __builtin_sve_svminv_f32(__VA_ARGS__)
+#define svminv_f16(...) __builtin_sve_svminv_f16(__VA_ARGS__)
+#define svminv_s8(...) __builtin_sve_svminv_s8(__VA_ARGS__)
+#define svminv_s32(...) __builtin_sve_svminv_s32(__VA_ARGS__)
+#define svminv_s64(...) __builtin_sve_svminv_s64(__VA_ARGS__)
+#define svminv_s16(...) __builtin_sve_svminv_s16(__VA_ARGS__)
+#define svminv_u8(...) __builtin_sve_svminv_u8(__VA_ARGS__)
+#define svminv_u32(...) __builtin_sve_svminv_u32(__VA_ARGS__)
+#define svminv_u64(...) __builtin_sve_svminv_u64(__VA_ARGS__)
+#define svminv_u16(...) __builtin_sve_svminv_u16(__VA_ARGS__)
+#define svmla_n_f64_m(...) __builtin_sve_svmla_n_f64_m(__VA_ARGS__)
+#define svmla_n_f32_m(...) __builtin_sve_svmla_n_f32_m(__VA_ARGS__)
+#define svmla_n_f16_m(...) __builtin_sve_svmla_n_f16_m(__VA_ARGS__)
+#define svmla_n_f64_x(...) __builtin_sve_svmla_n_f64_x(__VA_ARGS__)
+#define svmla_n_f32_x(...) __builtin_sve_svmla_n_f32_x(__VA_ARGS__)
+#define svmla_n_f16_x(...) __builtin_sve_svmla_n_f16_x(__VA_ARGS__)
+#define svmla_n_f64_z(...) __builtin_sve_svmla_n_f64_z(__VA_ARGS__)
+#define svmla_n_f32_z(...) __builtin_sve_svmla_n_f32_z(__VA_ARGS__)
+#define svmla_n_f16_z(...) __builtin_sve_svmla_n_f16_z(__VA_ARGS__)
+#define svmla_n_u8_m(...) __builtin_sve_svmla_n_u8_m(__VA_ARGS__)
+#define svmla_n_u32_m(...) __builtin_sve_svmla_n_u32_m(__VA_ARGS__)
+#define svmla_n_u64_m(...) __builtin_sve_svmla_n_u64_m(__VA_ARGS__)
+#define svmla_n_u16_m(...) __builtin_sve_svmla_n_u16_m(__VA_ARGS__)
+#define svmla_n_s8_m(...) __builtin_sve_svmla_n_s8_m(__VA_ARGS__)
+#define svmla_n_s32_m(...) __builtin_sve_svmla_n_s32_m(__VA_ARGS__)
+#define svmla_n_s64_m(...) __builtin_sve_svmla_n_s64_m(__VA_ARGS__)
+#define svmla_n_s16_m(...) __builtin_sve_svmla_n_s16_m(__VA_ARGS__)
+#define svmla_n_u8_x(...) __builtin_sve_svmla_n_u8_x(__VA_ARGS__)
+#define svmla_n_u32_x(...) __builtin_sve_svmla_n_u32_x(__VA_ARGS__)
+#define svmla_n_u64_x(...) __builtin_sve_svmla_n_u64_x(__VA_ARGS__)
+#define svmla_n_u16_x(...) __builtin_sve_svmla_n_u16_x(__VA_ARGS__)
+#define svmla_n_s8_x(...) __builtin_sve_svmla_n_s8_x(__VA_ARGS__)
+#define svmla_n_s32_x(...) __builtin_sve_svmla_n_s32_x(__VA_ARGS__)
+#define svmla_n_s64_x(...) __builtin_sve_svmla_n_s64_x(__VA_ARGS__)
+#define svmla_n_s16_x(...) __builtin_sve_svmla_n_s16_x(__VA_ARGS__)
+#define svmla_n_u8_z(...) __builtin_sve_svmla_n_u8_z(__VA_ARGS__)
+#define svmla_n_u32_z(...) __builtin_sve_svmla_n_u32_z(__VA_ARGS__)
+#define svmla_n_u64_z(...) __builtin_sve_svmla_n_u64_z(__VA_ARGS__)
+#define svmla_n_u16_z(...) __builtin_sve_svmla_n_u16_z(__VA_ARGS__)
+#define svmla_n_s8_z(...) __builtin_sve_svmla_n_s8_z(__VA_ARGS__)
+#define svmla_n_s32_z(...) __builtin_sve_svmla_n_s32_z(__VA_ARGS__)
+#define svmla_n_s64_z(...) __builtin_sve_svmla_n_s64_z(__VA_ARGS__)
+#define svmla_n_s16_z(...) __builtin_sve_svmla_n_s16_z(__VA_ARGS__)
+#define svmla_f64_m(...) __builtin_sve_svmla_f64_m(__VA_ARGS__)
+#define svmla_f32_m(...) __builtin_sve_svmla_f32_m(__VA_ARGS__)
+#define svmla_f16_m(...) __builtin_sve_svmla_f16_m(__VA_ARGS__)
+#define svmla_f64_x(...) __builtin_sve_svmla_f64_x(__VA_ARGS__)
+#define svmla_f32_x(...) __builtin_sve_svmla_f32_x(__VA_ARGS__)
+#define svmla_f16_x(...) __builtin_sve_svmla_f16_x(__VA_ARGS__)
+#define svmla_f64_z(...) __builtin_sve_svmla_f64_z(__VA_ARGS__)
+#define svmla_f32_z(...) __builtin_sve_svmla_f32_z(__VA_ARGS__)
+#define svmla_f16_z(...) __builtin_sve_svmla_f16_z(__VA_ARGS__)
+#define svmla_u8_m(...) __builtin_sve_svmla_u8_m(__VA_ARGS__)
+#define svmla_u32_m(...) __builtin_sve_svmla_u32_m(__VA_ARGS__)
+#define svmla_u64_m(...) __builtin_sve_svmla_u64_m(__VA_ARGS__)
+#define svmla_u16_m(...) __builtin_sve_svmla_u16_m(__VA_ARGS__)
+#define svmla_s8_m(...) __builtin_sve_svmla_s8_m(__VA_ARGS__)
+#define svmla_s32_m(...) __builtin_sve_svmla_s32_m(__VA_ARGS__)
+#define svmla_s64_m(...) __builtin_sve_svmla_s64_m(__VA_ARGS__)
+#define svmla_s16_m(...) __builtin_sve_svmla_s16_m(__VA_ARGS__)
+#define svmla_u8_x(...) __builtin_sve_svmla_u8_x(__VA_ARGS__)
+#define svmla_u32_x(...) __builtin_sve_svmla_u32_x(__VA_ARGS__)
+#define svmla_u64_x(...) __builtin_sve_svmla_u64_x(__VA_ARGS__)
+#define svmla_u16_x(...) __builtin_sve_svmla_u16_x(__VA_ARGS__)
+#define svmla_s8_x(...) __builtin_sve_svmla_s8_x(__VA_ARGS__)
+#define svmla_s32_x(...) __builtin_sve_svmla_s32_x(__VA_ARGS__)
+#define svmla_s64_x(...) __builtin_sve_svmla_s64_x(__VA_ARGS__)
+#define svmla_s16_x(...) __builtin_sve_svmla_s16_x(__VA_ARGS__)
+#define svmla_u8_z(...) __builtin_sve_svmla_u8_z(__VA_ARGS__)
+#define svmla_u32_z(...) __builtin_sve_svmla_u32_z(__VA_ARGS__)
+#define svmla_u64_z(...) __builtin_sve_svmla_u64_z(__VA_ARGS__)
+#define svmla_u16_z(...) __builtin_sve_svmla_u16_z(__VA_ARGS__)
+#define svmla_s8_z(...) __builtin_sve_svmla_s8_z(__VA_ARGS__)
+#define svmla_s32_z(...) __builtin_sve_svmla_s32_z(__VA_ARGS__)
+#define svmla_s64_z(...) __builtin_sve_svmla_s64_z(__VA_ARGS__)
+#define svmla_s16_z(...) __builtin_sve_svmla_s16_z(__VA_ARGS__)
+#define svmla_lane_f64(...) __builtin_sve_svmla_lane_f64(__VA_ARGS__)
+#define svmla_lane_f32(...) __builtin_sve_svmla_lane_f32(__VA_ARGS__)
+#define svmla_lane_f16(...) __builtin_sve_svmla_lane_f16(__VA_ARGS__)
+#define svmls_n_f64_m(...) __builtin_sve_svmls_n_f64_m(__VA_ARGS__)
+#define svmls_n_f32_m(...) __builtin_sve_svmls_n_f32_m(__VA_ARGS__)
+#define svmls_n_f16_m(...) __builtin_sve_svmls_n_f16_m(__VA_ARGS__)
+#define svmls_n_f64_x(...) __builtin_sve_svmls_n_f64_x(__VA_ARGS__)
+#define svmls_n_f32_x(...) __builtin_sve_svmls_n_f32_x(__VA_ARGS__)
+#define svmls_n_f16_x(...) __builtin_sve_svmls_n_f16_x(__VA_ARGS__)
+#define svmls_n_f64_z(...) __builtin_sve_svmls_n_f64_z(__VA_ARGS__)
+#define svmls_n_f32_z(...) __builtin_sve_svmls_n_f32_z(__VA_ARGS__)
+#define svmls_n_f16_z(...) __builtin_sve_svmls_n_f16_z(__VA_ARGS__)
+#define svmls_n_u8_m(...) __builtin_sve_svmls_n_u8_m(__VA_ARGS__)
+#define svmls_n_u32_m(...) __builtin_sve_svmls_n_u32_m(__VA_ARGS__)
+#define svmls_n_u64_m(...) __builtin_sve_svmls_n_u64_m(__VA_ARGS__)
+#define svmls_n_u16_m(...) __builtin_sve_svmls_n_u16_m(__VA_ARGS__)
+#define svmls_n_s8_m(...) __builtin_sve_svmls_n_s8_m(__VA_ARGS__)
+#define svmls_n_s32_m(...) __builtin_sve_svmls_n_s32_m(__VA_ARGS__)
+#define svmls_n_s64_m(...) __builtin_sve_svmls_n_s64_m(__VA_ARGS__)
+#define svmls_n_s16_m(...) __builtin_sve_svmls_n_s16_m(__VA_ARGS__)
+#define svmls_n_u8_x(...) __builtin_sve_svmls_n_u8_x(__VA_ARGS__)
+#define svmls_n_u32_x(...) __builtin_sve_svmls_n_u32_x(__VA_ARGS__)
+#define svmls_n_u64_x(...) __builtin_sve_svmls_n_u64_x(__VA_ARGS__)
+#define svmls_n_u16_x(...) __builtin_sve_svmls_n_u16_x(__VA_ARGS__)
+#define svmls_n_s8_x(...) __builtin_sve_svmls_n_s8_x(__VA_ARGS__)
+#define svmls_n_s32_x(...) __builtin_sve_svmls_n_s32_x(__VA_ARGS__)
+#define svmls_n_s64_x(...) __builtin_sve_svmls_n_s64_x(__VA_ARGS__)
+#define svmls_n_s16_x(...) __builtin_sve_svmls_n_s16_x(__VA_ARGS__)
+#define svmls_n_u8_z(...) __builtin_sve_svmls_n_u8_z(__VA_ARGS__)
+#define svmls_n_u32_z(...) __builtin_sve_svmls_n_u32_z(__VA_ARGS__)
+#define svmls_n_u64_z(...) __builtin_sve_svmls_n_u64_z(__VA_ARGS__)
+#define svmls_n_u16_z(...) __builtin_sve_svmls_n_u16_z(__VA_ARGS__)
+#define svmls_n_s8_z(...) __builtin_sve_svmls_n_s8_z(__VA_ARGS__)
+#define svmls_n_s32_z(...) __builtin_sve_svmls_n_s32_z(__VA_ARGS__)
+#define svmls_n_s64_z(...) __builtin_sve_svmls_n_s64_z(__VA_ARGS__)
+#define svmls_n_s16_z(...) __builtin_sve_svmls_n_s16_z(__VA_ARGS__)
+#define svmls_f64_m(...) __builtin_sve_svmls_f64_m(__VA_ARGS__)
+#define svmls_f32_m(...) __builtin_sve_svmls_f32_m(__VA_ARGS__)
+#define svmls_f16_m(...) __builtin_sve_svmls_f16_m(__VA_ARGS__)
+#define svmls_f64_x(...) __builtin_sve_svmls_f64_x(__VA_ARGS__)
+#define svmls_f32_x(...) __builtin_sve_svmls_f32_x(__VA_ARGS__)
+#define svmls_f16_x(...) __builtin_sve_svmls_f16_x(__VA_ARGS__)
+#define svmls_f64_z(...) __builtin_sve_svmls_f64_z(__VA_ARGS__)
+#define svmls_f32_z(...) __builtin_sve_svmls_f32_z(__VA_ARGS__)
+#define svmls_f16_z(...) __builtin_sve_svmls_f16_z(__VA_ARGS__)
+#define svmls_u8_m(...) __builtin_sve_svmls_u8_m(__VA_ARGS__)
+#define svmls_u32_m(...) __builtin_sve_svmls_u32_m(__VA_ARGS__)
+#define svmls_u64_m(...) __builtin_sve_svmls_u64_m(__VA_ARGS__)
+#define svmls_u16_m(...) __builtin_sve_svmls_u16_m(__VA_ARGS__)
+#define svmls_s8_m(...) __builtin_sve_svmls_s8_m(__VA_ARGS__)
+#define svmls_s32_m(...) __builtin_sve_svmls_s32_m(__VA_ARGS__)
+#define svmls_s64_m(...) __builtin_sve_svmls_s64_m(__VA_ARGS__)
+#define svmls_s16_m(...) __builtin_sve_svmls_s16_m(__VA_ARGS__)
+#define svmls_u8_x(...) __builtin_sve_svmls_u8_x(__VA_ARGS__)
+#define svmls_u32_x(...) __builtin_sve_svmls_u32_x(__VA_ARGS__)
+#define svmls_u64_x(...) __builtin_sve_svmls_u64_x(__VA_ARGS__)
+#define svmls_u16_x(...) __builtin_sve_svmls_u16_x(__VA_ARGS__)
+#define svmls_s8_x(...) __builtin_sve_svmls_s8_x(__VA_ARGS__)
+#define svmls_s32_x(...) __builtin_sve_svmls_s32_x(__VA_ARGS__)
+#define svmls_s64_x(...) __builtin_sve_svmls_s64_x(__VA_ARGS__)
+#define svmls_s16_x(...) __builtin_sve_svmls_s16_x(__VA_ARGS__)
+#define svmls_u8_z(...) __builtin_sve_svmls_u8_z(__VA_ARGS__)
+#define svmls_u32_z(...) __builtin_sve_svmls_u32_z(__VA_ARGS__)
+#define svmls_u64_z(...) __builtin_sve_svmls_u64_z(__VA_ARGS__)
+#define svmls_u16_z(...) __builtin_sve_svmls_u16_z(__VA_ARGS__)
+#define svmls_s8_z(...) __builtin_sve_svmls_s8_z(__VA_ARGS__)
+#define svmls_s32_z(...) __builtin_sve_svmls_s32_z(__VA_ARGS__)
+#define svmls_s64_z(...) __builtin_sve_svmls_s64_z(__VA_ARGS__)
+#define svmls_s16_z(...) __builtin_sve_svmls_s16_z(__VA_ARGS__)
+#define svmls_lane_f64(...) __builtin_sve_svmls_lane_f64(__VA_ARGS__)
+#define svmls_lane_f32(...) __builtin_sve_svmls_lane_f32(__VA_ARGS__)
+#define svmls_lane_f16(...) __builtin_sve_svmls_lane_f16(__VA_ARGS__)
+#define svmov_b_z(...) __builtin_sve_svmov_b_z(__VA_ARGS__)
+#define svmsb_n_f64_m(...) __builtin_sve_svmsb_n_f64_m(__VA_ARGS__)
+#define svmsb_n_f32_m(...) __builtin_sve_svmsb_n_f32_m(__VA_ARGS__)
+#define svmsb_n_f16_m(...) __builtin_sve_svmsb_n_f16_m(__VA_ARGS__)
+#define svmsb_n_f64_x(...) __builtin_sve_svmsb_n_f64_x(__VA_ARGS__)
+#define svmsb_n_f32_x(...) __builtin_sve_svmsb_n_f32_x(__VA_ARGS__)
+#define svmsb_n_f16_x(...) __builtin_sve_svmsb_n_f16_x(__VA_ARGS__)
+#define svmsb_n_f64_z(...) __builtin_sve_svmsb_n_f64_z(__VA_ARGS__)
+#define svmsb_n_f32_z(...) __builtin_sve_svmsb_n_f32_z(__VA_ARGS__)
+#define svmsb_n_f16_z(...) __builtin_sve_svmsb_n_f16_z(__VA_ARGS__)
+#define svmsb_n_u8_m(...) __builtin_sve_svmsb_n_u8_m(__VA_ARGS__)
+#define svmsb_n_u32_m(...) __builtin_sve_svmsb_n_u32_m(__VA_ARGS__)
+#define svmsb_n_u64_m(...) __builtin_sve_svmsb_n_u64_m(__VA_ARGS__)
+#define svmsb_n_u16_m(...) __builtin_sve_svmsb_n_u16_m(__VA_ARGS__)
+#define svmsb_n_s8_m(...) __builtin_sve_svmsb_n_s8_m(__VA_ARGS__)
+#define svmsb_n_s32_m(...) __builtin_sve_svmsb_n_s32_m(__VA_ARGS__)
+#define svmsb_n_s64_m(...) __builtin_sve_svmsb_n_s64_m(__VA_ARGS__)
+#define svmsb_n_s16_m(...) __builtin_sve_svmsb_n_s16_m(__VA_ARGS__)
+#define svmsb_n_u8_x(...) __builtin_sve_svmsb_n_u8_x(__VA_ARGS__)
+#define svmsb_n_u32_x(...) __builtin_sve_svmsb_n_u32_x(__VA_ARGS__)
+#define svmsb_n_u64_x(...) __builtin_sve_svmsb_n_u64_x(__VA_ARGS__)
+#define svmsb_n_u16_x(...) __builtin_sve_svmsb_n_u16_x(__VA_ARGS__)
+#define svmsb_n_s8_x(...) __builtin_sve_svmsb_n_s8_x(__VA_ARGS__)
+#define svmsb_n_s32_x(...) __builtin_sve_svmsb_n_s32_x(__VA_ARGS__)
+#define svmsb_n_s64_x(...) __builtin_sve_svmsb_n_s64_x(__VA_ARGS__)
+#define svmsb_n_s16_x(...) __builtin_sve_svmsb_n_s16_x(__VA_ARGS__)
+#define svmsb_n_u8_z(...) __builtin_sve_svmsb_n_u8_z(__VA_ARGS__)
+#define svmsb_n_u32_z(...) __builtin_sve_svmsb_n_u32_z(__VA_ARGS__)
+#define svmsb_n_u64_z(...) __builtin_sve_svmsb_n_u64_z(__VA_ARGS__)
+#define svmsb_n_u16_z(...) __builtin_sve_svmsb_n_u16_z(__VA_ARGS__)
+#define svmsb_n_s8_z(...) __builtin_sve_svmsb_n_s8_z(__VA_ARGS__)
+#define svmsb_n_s32_z(...) __builtin_sve_svmsb_n_s32_z(__VA_ARGS__)
+#define svmsb_n_s64_z(...) __builtin_sve_svmsb_n_s64_z(__VA_ARGS__)
+#define svmsb_n_s16_z(...) __builtin_sve_svmsb_n_s16_z(__VA_ARGS__)
+#define svmsb_f64_m(...) __builtin_sve_svmsb_f64_m(__VA_ARGS__)
+#define svmsb_f32_m(...) __builtin_sve_svmsb_f32_m(__VA_ARGS__)
+#define svmsb_f16_m(...) __builtin_sve_svmsb_f16_m(__VA_ARGS__)
+#define svmsb_f64_x(...) __builtin_sve_svmsb_f64_x(__VA_ARGS__)
+#define svmsb_f32_x(...) __builtin_sve_svmsb_f32_x(__VA_ARGS__)
+#define svmsb_f16_x(...) __builtin_sve_svmsb_f16_x(__VA_ARGS__)
+#define svmsb_f64_z(...) __builtin_sve_svmsb_f64_z(__VA_ARGS__)
+#define svmsb_f32_z(...) __builtin_sve_svmsb_f32_z(__VA_ARGS__)
+#define svmsb_f16_z(...) __builtin_sve_svmsb_f16_z(__VA_ARGS__)
+#define svmsb_u8_m(...) __builtin_sve_svmsb_u8_m(__VA_ARGS__)
+#define svmsb_u32_m(...) __builtin_sve_svmsb_u32_m(__VA_ARGS__)
+#define svmsb_u64_m(...) __builtin_sve_svmsb_u64_m(__VA_ARGS__)
+#define svmsb_u16_m(...) __builtin_sve_svmsb_u16_m(__VA_ARGS__)
+#define svmsb_s8_m(...) __builtin_sve_svmsb_s8_m(__VA_ARGS__)
+#define svmsb_s32_m(...) __builtin_sve_svmsb_s32_m(__VA_ARGS__)
+#define svmsb_s64_m(...) __builtin_sve_svmsb_s64_m(__VA_ARGS__)
+#define svmsb_s16_m(...) __builtin_sve_svmsb_s16_m(__VA_ARGS__)
+#define svmsb_u8_x(...) __builtin_sve_svmsb_u8_x(__VA_ARGS__)
+#define svmsb_u32_x(...) __builtin_sve_svmsb_u32_x(__VA_ARGS__)
+#define svmsb_u64_x(...) __builtin_sve_svmsb_u64_x(__VA_ARGS__)
+#define svmsb_u16_x(...) __builtin_sve_svmsb_u16_x(__VA_ARGS__)
+#define svmsb_s8_x(...) __builtin_sve_svmsb_s8_x(__VA_ARGS__)
+#define svmsb_s32_x(...) __builtin_sve_svmsb_s32_x(__VA_ARGS__)
+#define svmsb_s64_x(...) __builtin_sve_svmsb_s64_x(__VA_ARGS__)
+#define svmsb_s16_x(...) __builtin_sve_svmsb_s16_x(__VA_ARGS__)
+#define svmsb_u8_z(...) __builtin_sve_svmsb_u8_z(__VA_ARGS__)
+#define svmsb_u32_z(...) __builtin_sve_svmsb_u32_z(__VA_ARGS__)
+#define svmsb_u64_z(...) __builtin_sve_svmsb_u64_z(__VA_ARGS__)
+#define svmsb_u16_z(...) __builtin_sve_svmsb_u16_z(__VA_ARGS__)
+#define svmsb_s8_z(...) __builtin_sve_svmsb_s8_z(__VA_ARGS__)
+#define svmsb_s32_z(...) __builtin_sve_svmsb_s32_z(__VA_ARGS__)
+#define svmsb_s64_z(...) __builtin_sve_svmsb_s64_z(__VA_ARGS__)
+#define svmsb_s16_z(...) __builtin_sve_svmsb_s16_z(__VA_ARGS__)
+#define svmul_n_f64_m(...) __builtin_sve_svmul_n_f64_m(__VA_ARGS__)
+#define svmul_n_f32_m(...) __builtin_sve_svmul_n_f32_m(__VA_ARGS__)
+#define svmul_n_f16_m(...) __builtin_sve_svmul_n_f16_m(__VA_ARGS__)
+#define svmul_n_f64_x(...) __builtin_sve_svmul_n_f64_x(__VA_ARGS__)
+#define svmul_n_f32_x(...) __builtin_sve_svmul_n_f32_x(__VA_ARGS__)
+#define svmul_n_f16_x(...) __builtin_sve_svmul_n_f16_x(__VA_ARGS__)
+#define svmul_n_f64_z(...) __builtin_sve_svmul_n_f64_z(__VA_ARGS__)
+#define svmul_n_f32_z(...) __builtin_sve_svmul_n_f32_z(__VA_ARGS__)
+#define svmul_n_f16_z(...) __builtin_sve_svmul_n_f16_z(__VA_ARGS__)
+#define svmul_n_u8_m(...) __builtin_sve_svmul_n_u8_m(__VA_ARGS__)
+#define svmul_n_u32_m(...) __builtin_sve_svmul_n_u32_m(__VA_ARGS__)
+#define svmul_n_u64_m(...) __builtin_sve_svmul_n_u64_m(__VA_ARGS__)
+#define svmul_n_u16_m(...) __builtin_sve_svmul_n_u16_m(__VA_ARGS__)
+#define svmul_n_s8_m(...) __builtin_sve_svmul_n_s8_m(__VA_ARGS__)
+#define svmul_n_s32_m(...) __builtin_sve_svmul_n_s32_m(__VA_ARGS__)
+#define svmul_n_s64_m(...) __builtin_sve_svmul_n_s64_m(__VA_ARGS__)
+#define svmul_n_s16_m(...) __builtin_sve_svmul_n_s16_m(__VA_ARGS__)
+#define svmul_n_u8_x(...) __builtin_sve_svmul_n_u8_x(__VA_ARGS__)
+#define svmul_n_u32_x(...) __builtin_sve_svmul_n_u32_x(__VA_ARGS__)
+#define svmul_n_u64_x(...) __builtin_sve_svmul_n_u64_x(__VA_ARGS__)
+#define svmul_n_u16_x(...) __builtin_sve_svmul_n_u16_x(__VA_ARGS__)
+#define svmul_n_s8_x(...) __builtin_sve_svmul_n_s8_x(__VA_ARGS__)
+#define svmul_n_s32_x(...) __builtin_sve_svmul_n_s32_x(__VA_ARGS__)
+#define svmul_n_s64_x(...) __builtin_sve_svmul_n_s64_x(__VA_ARGS__)
+#define svmul_n_s16_x(...) __builtin_sve_svmul_n_s16_x(__VA_ARGS__)
+#define svmul_n_u8_z(...) __builtin_sve_svmul_n_u8_z(__VA_ARGS__)
+#define svmul_n_u32_z(...) __builtin_sve_svmul_n_u32_z(__VA_ARGS__)
+#define svmul_n_u64_z(...) __builtin_sve_svmul_n_u64_z(__VA_ARGS__)
+#define svmul_n_u16_z(...) __builtin_sve_svmul_n_u16_z(__VA_ARGS__)
+#define svmul_n_s8_z(...) __builtin_sve_svmul_n_s8_z(__VA_ARGS__)
+#define svmul_n_s32_z(...) __builtin_sve_svmul_n_s32_z(__VA_ARGS__)
+#define svmul_n_s64_z(...) __builtin_sve_svmul_n_s64_z(__VA_ARGS__)
+#define svmul_n_s16_z(...) __builtin_sve_svmul_n_s16_z(__VA_ARGS__)
+#define svmul_f64_m(...) __builtin_sve_svmul_f64_m(__VA_ARGS__)
+#define svmul_f32_m(...) __builtin_sve_svmul_f32_m(__VA_ARGS__)
+#define svmul_f16_m(...) __builtin_sve_svmul_f16_m(__VA_ARGS__)
+#define svmul_f64_x(...) __builtin_sve_svmul_f64_x(__VA_ARGS__)
+#define svmul_f32_x(...) __builtin_sve_svmul_f32_x(__VA_ARGS__)
+#define svmul_f16_x(...) __builtin_sve_svmul_f16_x(__VA_ARGS__)
+#define svmul_f64_z(...) __builtin_sve_svmul_f64_z(__VA_ARGS__)
+#define svmul_f32_z(...) __builtin_sve_svmul_f32_z(__VA_ARGS__)
+#define svmul_f16_z(...) __builtin_sve_svmul_f16_z(__VA_ARGS__)
+#define svmul_u8_m(...) __builtin_sve_svmul_u8_m(__VA_ARGS__)
+#define svmul_u32_m(...) __builtin_sve_svmul_u32_m(__VA_ARGS__)
+#define svmul_u64_m(...) __builtin_sve_svmul_u64_m(__VA_ARGS__)
+#define svmul_u16_m(...) __builtin_sve_svmul_u16_m(__VA_ARGS__)
+#define svmul_s8_m(...) __builtin_sve_svmul_s8_m(__VA_ARGS__)
+#define svmul_s32_m(...) __builtin_sve_svmul_s32_m(__VA_ARGS__)
+#define svmul_s64_m(...) __builtin_sve_svmul_s64_m(__VA_ARGS__)
+#define svmul_s16_m(...) __builtin_sve_svmul_s16_m(__VA_ARGS__)
+#define svmul_u8_x(...) __builtin_sve_svmul_u8_x(__VA_ARGS__)
+#define svmul_u32_x(...) __builtin_sve_svmul_u32_x(__VA_ARGS__)
+#define svmul_u64_x(...) __builtin_sve_svmul_u64_x(__VA_ARGS__)
+#define svmul_u16_x(...) __builtin_sve_svmul_u16_x(__VA_ARGS__)
+#define svmul_s8_x(...) __builtin_sve_svmul_s8_x(__VA_ARGS__)
+#define svmul_s32_x(...) __builtin_sve_svmul_s32_x(__VA_ARGS__)
+#define svmul_s64_x(...) __builtin_sve_svmul_s64_x(__VA_ARGS__)
+#define svmul_s16_x(...) __builtin_sve_svmul_s16_x(__VA_ARGS__)
+#define svmul_u8_z(...) __builtin_sve_svmul_u8_z(__VA_ARGS__)
+#define svmul_u32_z(...) __builtin_sve_svmul_u32_z(__VA_ARGS__)
+#define svmul_u64_z(...) __builtin_sve_svmul_u64_z(__VA_ARGS__)
+#define svmul_u16_z(...) __builtin_sve_svmul_u16_z(__VA_ARGS__)
+#define svmul_s8_z(...) __builtin_sve_svmul_s8_z(__VA_ARGS__)
+#define svmul_s32_z(...) __builtin_sve_svmul_s32_z(__VA_ARGS__)
+#define svmul_s64_z(...) __builtin_sve_svmul_s64_z(__VA_ARGS__)
+#define svmul_s16_z(...) __builtin_sve_svmul_s16_z(__VA_ARGS__)
+#define svmul_lane_f64(...) __builtin_sve_svmul_lane_f64(__VA_ARGS__)
+#define svmul_lane_f32(...) __builtin_sve_svmul_lane_f32(__VA_ARGS__)
+#define svmul_lane_f16(...) __builtin_sve_svmul_lane_f16(__VA_ARGS__)
+#define svmulh_n_s8_m(...) __builtin_sve_svmulh_n_s8_m(__VA_ARGS__)
+#define svmulh_n_s32_m(...) __builtin_sve_svmulh_n_s32_m(__VA_ARGS__)
+#define svmulh_n_s64_m(...) __builtin_sve_svmulh_n_s64_m(__VA_ARGS__)
+#define svmulh_n_s16_m(...) __builtin_sve_svmulh_n_s16_m(__VA_ARGS__)
+#define svmulh_n_s8_x(...) __builtin_sve_svmulh_n_s8_x(__VA_ARGS__)
+#define svmulh_n_s32_x(...) __builtin_sve_svmulh_n_s32_x(__VA_ARGS__)
+#define svmulh_n_s64_x(...) __builtin_sve_svmulh_n_s64_x(__VA_ARGS__)
+#define svmulh_n_s16_x(...) __builtin_sve_svmulh_n_s16_x(__VA_ARGS__)
+#define svmulh_n_s8_z(...) __builtin_sve_svmulh_n_s8_z(__VA_ARGS__)
+#define svmulh_n_s32_z(...) __builtin_sve_svmulh_n_s32_z(__VA_ARGS__)
+#define svmulh_n_s64_z(...) __builtin_sve_svmulh_n_s64_z(__VA_ARGS__)
+#define svmulh_n_s16_z(...) __builtin_sve_svmulh_n_s16_z(__VA_ARGS__)
+#define svmulh_n_u8_m(...) __builtin_sve_svmulh_n_u8_m(__VA_ARGS__)
+#define svmulh_n_u32_m(...) __builtin_sve_svmulh_n_u32_m(__VA_ARGS__)
+#define svmulh_n_u64_m(...) __builtin_sve_svmulh_n_u64_m(__VA_ARGS__)
+#define svmulh_n_u16_m(...) __builtin_sve_svmulh_n_u16_m(__VA_ARGS__)
+#define svmulh_n_u8_x(...) __builtin_sve_svmulh_n_u8_x(__VA_ARGS__)
+#define svmulh_n_u32_x(...) __builtin_sve_svmulh_n_u32_x(__VA_ARGS__)
+#define svmulh_n_u64_x(...) __builtin_sve_svmulh_n_u64_x(__VA_ARGS__)
+#define svmulh_n_u16_x(...) __builtin_sve_svmulh_n_u16_x(__VA_ARGS__)
+#define svmulh_n_u8_z(...) __builtin_sve_svmulh_n_u8_z(__VA_ARGS__)
+#define svmulh_n_u32_z(...) __builtin_sve_svmulh_n_u32_z(__VA_ARGS__)
+#define svmulh_n_u64_z(...) __builtin_sve_svmulh_n_u64_z(__VA_ARGS__)
+#define svmulh_n_u16_z(...) __builtin_sve_svmulh_n_u16_z(__VA_ARGS__)
+#define svmulh_s8_m(...) __builtin_sve_svmulh_s8_m(__VA_ARGS__)
+#define svmulh_s32_m(...) __builtin_sve_svmulh_s32_m(__VA_ARGS__)
+#define svmulh_s64_m(...) __builtin_sve_svmulh_s64_m(__VA_ARGS__)
+#define svmulh_s16_m(...) __builtin_sve_svmulh_s16_m(__VA_ARGS__)
+#define svmulh_s8_x(...) __builtin_sve_svmulh_s8_x(__VA_ARGS__)
+#define svmulh_s32_x(...) __builtin_sve_svmulh_s32_x(__VA_ARGS__)
+#define svmulh_s64_x(...) __builtin_sve_svmulh_s64_x(__VA_ARGS__)
+#define svmulh_s16_x(...) __builtin_sve_svmulh_s16_x(__VA_ARGS__)
+#define svmulh_s8_z(...) __builtin_sve_svmulh_s8_z(__VA_ARGS__)
+#define svmulh_s32_z(...) __builtin_sve_svmulh_s32_z(__VA_ARGS__)
+#define svmulh_s64_z(...) __builtin_sve_svmulh_s64_z(__VA_ARGS__)
+#define svmulh_s16_z(...) __builtin_sve_svmulh_s16_z(__VA_ARGS__)
+#define svmulh_u8_m(...) __builtin_sve_svmulh_u8_m(__VA_ARGS__)
+#define svmulh_u32_m(...) __builtin_sve_svmulh_u32_m(__VA_ARGS__)
+#define svmulh_u64_m(...) __builtin_sve_svmulh_u64_m(__VA_ARGS__)
+#define svmulh_u16_m(...) __builtin_sve_svmulh_u16_m(__VA_ARGS__)
+#define svmulh_u8_x(...) __builtin_sve_svmulh_u8_x(__VA_ARGS__)
+#define svmulh_u32_x(...) __builtin_sve_svmulh_u32_x(__VA_ARGS__)
+#define svmulh_u64_x(...) __builtin_sve_svmulh_u64_x(__VA_ARGS__)
+#define svmulh_u16_x(...) __builtin_sve_svmulh_u16_x(__VA_ARGS__)
+#define svmulh_u8_z(...) __builtin_sve_svmulh_u8_z(__VA_ARGS__)
+#define svmulh_u32_z(...) __builtin_sve_svmulh_u32_z(__VA_ARGS__)
+#define svmulh_u64_z(...) __builtin_sve_svmulh_u64_z(__VA_ARGS__)
+#define svmulh_u16_z(...) __builtin_sve_svmulh_u16_z(__VA_ARGS__)
+#define svmulx_n_f64_m(...) __builtin_sve_svmulx_n_f64_m(__VA_ARGS__)
+#define svmulx_n_f32_m(...) __builtin_sve_svmulx_n_f32_m(__VA_ARGS__)
+#define svmulx_n_f16_m(...) __builtin_sve_svmulx_n_f16_m(__VA_ARGS__)
+#define svmulx_n_f64_x(...) __builtin_sve_svmulx_n_f64_x(__VA_ARGS__)
+#define svmulx_n_f32_x(...) __builtin_sve_svmulx_n_f32_x(__VA_ARGS__)
+#define svmulx_n_f16_x(...) __builtin_sve_svmulx_n_f16_x(__VA_ARGS__)
+#define svmulx_n_f64_z(...) __builtin_sve_svmulx_n_f64_z(__VA_ARGS__)
+#define svmulx_n_f32_z(...) __builtin_sve_svmulx_n_f32_z(__VA_ARGS__)
+#define svmulx_n_f16_z(...) __builtin_sve_svmulx_n_f16_z(__VA_ARGS__)
+#define svmulx_f64_m(...) __builtin_sve_svmulx_f64_m(__VA_ARGS__)
+#define svmulx_f32_m(...) __builtin_sve_svmulx_f32_m(__VA_ARGS__)
+#define svmulx_f16_m(...) __builtin_sve_svmulx_f16_m(__VA_ARGS__)
+#define svmulx_f64_x(...) __builtin_sve_svmulx_f64_x(__VA_ARGS__)
+#define svmulx_f32_x(...) __builtin_sve_svmulx_f32_x(__VA_ARGS__)
+#define svmulx_f16_x(...) __builtin_sve_svmulx_f16_x(__VA_ARGS__)
+#define svmulx_f64_z(...) __builtin_sve_svmulx_f64_z(__VA_ARGS__)
+#define svmulx_f32_z(...) __builtin_sve_svmulx_f32_z(__VA_ARGS__)
+#define svmulx_f16_z(...) __builtin_sve_svmulx_f16_z(__VA_ARGS__)
+#define svnand_b_z(...) __builtin_sve_svnand_b_z(__VA_ARGS__)
+#define svneg_f64_m(...) __builtin_sve_svneg_f64_m(__VA_ARGS__)
+#define svneg_f32_m(...) __builtin_sve_svneg_f32_m(__VA_ARGS__)
+#define svneg_f16_m(...) __builtin_sve_svneg_f16_m(__VA_ARGS__)
+#define svneg_f64_x(...) __builtin_sve_svneg_f64_x(__VA_ARGS__)
+#define svneg_f32_x(...) __builtin_sve_svneg_f32_x(__VA_ARGS__)
+#define svneg_f16_x(...) __builtin_sve_svneg_f16_x(__VA_ARGS__)
+#define svneg_f64_z(...) __builtin_sve_svneg_f64_z(__VA_ARGS__)
+#define svneg_f32_z(...) __builtin_sve_svneg_f32_z(__VA_ARGS__)
+#define svneg_f16_z(...) __builtin_sve_svneg_f16_z(__VA_ARGS__)
+#define svneg_s8_m(...) __builtin_sve_svneg_s8_m(__VA_ARGS__)
+#define svneg_s32_m(...) __builtin_sve_svneg_s32_m(__VA_ARGS__)
+#define svneg_s64_m(...) __builtin_sve_svneg_s64_m(__VA_ARGS__)
+#define svneg_s16_m(...) __builtin_sve_svneg_s16_m(__VA_ARGS__)
+#define svneg_s8_x(...) __builtin_sve_svneg_s8_x(__VA_ARGS__)
+#define svneg_s32_x(...) __builtin_sve_svneg_s32_x(__VA_ARGS__)
+#define svneg_s64_x(...) __builtin_sve_svneg_s64_x(__VA_ARGS__)
+#define svneg_s16_x(...) __builtin_sve_svneg_s16_x(__VA_ARGS__)
+#define svneg_s8_z(...) __builtin_sve_svneg_s8_z(__VA_ARGS__)
+#define svneg_s32_z(...) __builtin_sve_svneg_s32_z(__VA_ARGS__)
+#define svneg_s64_z(...) __builtin_sve_svneg_s64_z(__VA_ARGS__)
+#define svneg_s16_z(...) __builtin_sve_svneg_s16_z(__VA_ARGS__)
+#define svnmad_n_f64_m(...) __builtin_sve_svnmad_n_f64_m(__VA_ARGS__)
+#define svnmad_n_f32_m(...) __builtin_sve_svnmad_n_f32_m(__VA_ARGS__)
+#define svnmad_n_f16_m(...) __builtin_sve_svnmad_n_f16_m(__VA_ARGS__)
+#define svnmad_n_f64_x(...) __builtin_sve_svnmad_n_f64_x(__VA_ARGS__)
+#define svnmad_n_f32_x(...) __builtin_sve_svnmad_n_f32_x(__VA_ARGS__)
+#define svnmad_n_f16_x(...) __builtin_sve_svnmad_n_f16_x(__VA_ARGS__)
+#define svnmad_n_f64_z(...) __builtin_sve_svnmad_n_f64_z(__VA_ARGS__)
+#define svnmad_n_f32_z(...) __builtin_sve_svnmad_n_f32_z(__VA_ARGS__)
+#define svnmad_n_f16_z(...) __builtin_sve_svnmad_n_f16_z(__VA_ARGS__)
+#define svnmad_f64_m(...) __builtin_sve_svnmad_f64_m(__VA_ARGS__)
+#define svnmad_f32_m(...) __builtin_sve_svnmad_f32_m(__VA_ARGS__)
+#define svnmad_f16_m(...) __builtin_sve_svnmad_f16_m(__VA_ARGS__)
+#define svnmad_f64_x(...) __builtin_sve_svnmad_f64_x(__VA_ARGS__)
+#define svnmad_f32_x(...) __builtin_sve_svnmad_f32_x(__VA_ARGS__)
+#define svnmad_f16_x(...) __builtin_sve_svnmad_f16_x(__VA_ARGS__)
+#define svnmad_f64_z(...) __builtin_sve_svnmad_f64_z(__VA_ARGS__)
+#define svnmad_f32_z(...) __builtin_sve_svnmad_f32_z(__VA_ARGS__)
+#define svnmad_f16_z(...) __builtin_sve_svnmad_f16_z(__VA_ARGS__)
+#define svnmla_n_f64_m(...) __builtin_sve_svnmla_n_f64_m(__VA_ARGS__)
+#define svnmla_n_f32_m(...) __builtin_sve_svnmla_n_f32_m(__VA_ARGS__)
+#define svnmla_n_f16_m(...) __builtin_sve_svnmla_n_f16_m(__VA_ARGS__)
+#define svnmla_n_f64_x(...) __builtin_sve_svnmla_n_f64_x(__VA_ARGS__)
+#define svnmla_n_f32_x(...) __builtin_sve_svnmla_n_f32_x(__VA_ARGS__)
+#define svnmla_n_f16_x(...) __builtin_sve_svnmla_n_f16_x(__VA_ARGS__)
+#define svnmla_n_f64_z(...) __builtin_sve_svnmla_n_f64_z(__VA_ARGS__)
+#define svnmla_n_f32_z(...) __builtin_sve_svnmla_n_f32_z(__VA_ARGS__)
+#define svnmla_n_f16_z(...) __builtin_sve_svnmla_n_f16_z(__VA_ARGS__)
+#define svnmla_f64_m(...) __builtin_sve_svnmla_f64_m(__VA_ARGS__)
+#define svnmla_f32_m(...) __builtin_sve_svnmla_f32_m(__VA_ARGS__)
+#define svnmla_f16_m(...) __builtin_sve_svnmla_f16_m(__VA_ARGS__)
+#define svnmla_f64_x(...) __builtin_sve_svnmla_f64_x(__VA_ARGS__)
+#define svnmla_f32_x(...) __builtin_sve_svnmla_f32_x(__VA_ARGS__)
+#define svnmla_f16_x(...) __builtin_sve_svnmla_f16_x(__VA_ARGS__)
+#define svnmla_f64_z(...) __builtin_sve_svnmla_f64_z(__VA_ARGS__)
+#define svnmla_f32_z(...) __builtin_sve_svnmla_f32_z(__VA_ARGS__)
+#define svnmla_f16_z(...) __builtin_sve_svnmla_f16_z(__VA_ARGS__)
+#define svnmls_n_f64_m(...) __builtin_sve_svnmls_n_f64_m(__VA_ARGS__)
+#define svnmls_n_f32_m(...) __builtin_sve_svnmls_n_f32_m(__VA_ARGS__)
+#define svnmls_n_f16_m(...) __builtin_sve_svnmls_n_f16_m(__VA_ARGS__)
+#define svnmls_n_f64_x(...) __builtin_sve_svnmls_n_f64_x(__VA_ARGS__)
+#define svnmls_n_f32_x(...) __builtin_sve_svnmls_n_f32_x(__VA_ARGS__)
+#define svnmls_n_f16_x(...) __builtin_sve_svnmls_n_f16_x(__VA_ARGS__)
+#define svnmls_n_f64_z(...) __builtin_sve_svnmls_n_f64_z(__VA_ARGS__)
+#define svnmls_n_f32_z(...) __builtin_sve_svnmls_n_f32_z(__VA_ARGS__)
+#define svnmls_n_f16_z(...) __builtin_sve_svnmls_n_f16_z(__VA_ARGS__)
+#define svnmls_f64_m(...) __builtin_sve_svnmls_f64_m(__VA_ARGS__)
+#define svnmls_f32_m(...) __builtin_sve_svnmls_f32_m(__VA_ARGS__)
+#define svnmls_f16_m(...) __builtin_sve_svnmls_f16_m(__VA_ARGS__)
+#define svnmls_f64_x(...) __builtin_sve_svnmls_f64_x(__VA_ARGS__)
+#define svnmls_f32_x(...) __builtin_sve_svnmls_f32_x(__VA_ARGS__)
+#define svnmls_f16_x(...) __builtin_sve_svnmls_f16_x(__VA_ARGS__)
+#define svnmls_f64_z(...) __builtin_sve_svnmls_f64_z(__VA_ARGS__)
+#define svnmls_f32_z(...) __builtin_sve_svnmls_f32_z(__VA_ARGS__)
+#define svnmls_f16_z(...) __builtin_sve_svnmls_f16_z(__VA_ARGS__)
+#define svnmsb_n_f64_m(...) __builtin_sve_svnmsb_n_f64_m(__VA_ARGS__)
+#define svnmsb_n_f32_m(...) __builtin_sve_svnmsb_n_f32_m(__VA_ARGS__)
+#define svnmsb_n_f16_m(...) __builtin_sve_svnmsb_n_f16_m(__VA_ARGS__)
+#define svnmsb_n_f64_x(...) __builtin_sve_svnmsb_n_f64_x(__VA_ARGS__)
+#define svnmsb_n_f32_x(...) __builtin_sve_svnmsb_n_f32_x(__VA_ARGS__)
+#define svnmsb_n_f16_x(...) __builtin_sve_svnmsb_n_f16_x(__VA_ARGS__)
+#define svnmsb_n_f64_z(...) __builtin_sve_svnmsb_n_f64_z(__VA_ARGS__)
+#define svnmsb_n_f32_z(...) __builtin_sve_svnmsb_n_f32_z(__VA_ARGS__)
+#define svnmsb_n_f16_z(...) __builtin_sve_svnmsb_n_f16_z(__VA_ARGS__)
+#define svnmsb_f64_m(...) __builtin_sve_svnmsb_f64_m(__VA_ARGS__)
+#define svnmsb_f32_m(...) __builtin_sve_svnmsb_f32_m(__VA_ARGS__)
+#define svnmsb_f16_m(...) __builtin_sve_svnmsb_f16_m(__VA_ARGS__)
+#define svnmsb_f64_x(...) __builtin_sve_svnmsb_f64_x(__VA_ARGS__)
+#define svnmsb_f32_x(...) __builtin_sve_svnmsb_f32_x(__VA_ARGS__)
+#define svnmsb_f16_x(...) __builtin_sve_svnmsb_f16_x(__VA_ARGS__)
+#define svnmsb_f64_z(...) __builtin_sve_svnmsb_f64_z(__VA_ARGS__)
+#define svnmsb_f32_z(...) __builtin_sve_svnmsb_f32_z(__VA_ARGS__)
+#define svnmsb_f16_z(...) __builtin_sve_svnmsb_f16_z(__VA_ARGS__)
+#define svnor_b_z(...) __builtin_sve_svnor_b_z(__VA_ARGS__)
+#define svnot_b_z(...) __builtin_sve_svnot_b_z(__VA_ARGS__)
+#define svnot_u8_m(...) __builtin_sve_svnot_u8_m(__VA_ARGS__)
+#define svnot_u32_m(...) __builtin_sve_svnot_u32_m(__VA_ARGS__)
+#define svnot_u64_m(...) __builtin_sve_svnot_u64_m(__VA_ARGS__)
+#define svnot_u16_m(...) __builtin_sve_svnot_u16_m(__VA_ARGS__)
+#define svnot_s8_m(...) __builtin_sve_svnot_s8_m(__VA_ARGS__)
+#define svnot_s32_m(...) __builtin_sve_svnot_s32_m(__VA_ARGS__)
+#define svnot_s64_m(...) __builtin_sve_svnot_s64_m(__VA_ARGS__)
+#define svnot_s16_m(...) __builtin_sve_svnot_s16_m(__VA_ARGS__)
+#define svnot_u8_x(...) __builtin_sve_svnot_u8_x(__VA_ARGS__)
+#define svnot_u32_x(...) __builtin_sve_svnot_u32_x(__VA_ARGS__)
+#define svnot_u64_x(...) __builtin_sve_svnot_u64_x(__VA_ARGS__)
+#define svnot_u16_x(...) __builtin_sve_svnot_u16_x(__VA_ARGS__)
+#define svnot_s8_x(...) __builtin_sve_svnot_s8_x(__VA_ARGS__)
+#define svnot_s32_x(...) __builtin_sve_svnot_s32_x(__VA_ARGS__)
+#define svnot_s64_x(...) __builtin_sve_svnot_s64_x(__VA_ARGS__)
+#define svnot_s16_x(...) __builtin_sve_svnot_s16_x(__VA_ARGS__)
+#define svnot_u8_z(...) __builtin_sve_svnot_u8_z(__VA_ARGS__)
+#define svnot_u32_z(...) __builtin_sve_svnot_u32_z(__VA_ARGS__)
+#define svnot_u64_z(...) __builtin_sve_svnot_u64_z(__VA_ARGS__)
+#define svnot_u16_z(...) __builtin_sve_svnot_u16_z(__VA_ARGS__)
+#define svnot_s8_z(...) __builtin_sve_svnot_s8_z(__VA_ARGS__)
+#define svnot_s32_z(...) __builtin_sve_svnot_s32_z(__VA_ARGS__)
+#define svnot_s64_z(...) __builtin_sve_svnot_s64_z(__VA_ARGS__)
+#define svnot_s16_z(...) __builtin_sve_svnot_s16_z(__VA_ARGS__)
+#define svorn_b_z(...) __builtin_sve_svorn_b_z(__VA_ARGS__)
+#define svorr_b_z(...) __builtin_sve_svorr_b_z(__VA_ARGS__)
+#define svorr_n_u8_m(...) __builtin_sve_svorr_n_u8_m(__VA_ARGS__)
+#define svorr_n_u32_m(...) __builtin_sve_svorr_n_u32_m(__VA_ARGS__)
+#define svorr_n_u64_m(...) __builtin_sve_svorr_n_u64_m(__VA_ARGS__)
+#define svorr_n_u16_m(...) __builtin_sve_svorr_n_u16_m(__VA_ARGS__)
+#define svorr_n_s8_m(...) __builtin_sve_svorr_n_s8_m(__VA_ARGS__)
+#define svorr_n_s32_m(...) __builtin_sve_svorr_n_s32_m(__VA_ARGS__)
+#define svorr_n_s64_m(...) __builtin_sve_svorr_n_s64_m(__VA_ARGS__)
+#define svorr_n_s16_m(...) __builtin_sve_svorr_n_s16_m(__VA_ARGS__)
+#define svorr_n_u8_x(...) __builtin_sve_svorr_n_u8_x(__VA_ARGS__)
+#define svorr_n_u32_x(...) __builtin_sve_svorr_n_u32_x(__VA_ARGS__)
+#define svorr_n_u64_x(...) __builtin_sve_svorr_n_u64_x(__VA_ARGS__)
+#define svorr_n_u16_x(...) __builtin_sve_svorr_n_u16_x(__VA_ARGS__)
+#define svorr_n_s8_x(...) __builtin_sve_svorr_n_s8_x(__VA_ARGS__)
+#define svorr_n_s32_x(...) __builtin_sve_svorr_n_s32_x(__VA_ARGS__)
+#define svorr_n_s64_x(...) __builtin_sve_svorr_n_s64_x(__VA_ARGS__)
+#define svorr_n_s16_x(...) __builtin_sve_svorr_n_s16_x(__VA_ARGS__)
+#define svorr_n_u8_z(...) __builtin_sve_svorr_n_u8_z(__VA_ARGS__)
+#define svorr_n_u32_z(...) __builtin_sve_svorr_n_u32_z(__VA_ARGS__)
+#define svorr_n_u64_z(...) __builtin_sve_svorr_n_u64_z(__VA_ARGS__)
+#define svorr_n_u16_z(...) __builtin_sve_svorr_n_u16_z(__VA_ARGS__)
+#define svorr_n_s8_z(...) __builtin_sve_svorr_n_s8_z(__VA_ARGS__)
+#define svorr_n_s32_z(...) __builtin_sve_svorr_n_s32_z(__VA_ARGS__)
+#define svorr_n_s64_z(...) __builtin_sve_svorr_n_s64_z(__VA_ARGS__)
+#define svorr_n_s16_z(...) __builtin_sve_svorr_n_s16_z(__VA_ARGS__)
+#define svorr_u8_m(...) __builtin_sve_svorr_u8_m(__VA_ARGS__)
+#define svorr_u32_m(...) __builtin_sve_svorr_u32_m(__VA_ARGS__)
+#define svorr_u64_m(...) __builtin_sve_svorr_u64_m(__VA_ARGS__)
+#define svorr_u16_m(...) __builtin_sve_svorr_u16_m(__VA_ARGS__)
+#define svorr_s8_m(...) __builtin_sve_svorr_s8_m(__VA_ARGS__)
+#define svorr_s32_m(...) __builtin_sve_svorr_s32_m(__VA_ARGS__)
+#define svorr_s64_m(...) __builtin_sve_svorr_s64_m(__VA_ARGS__)
+#define svorr_s16_m(...) __builtin_sve_svorr_s16_m(__VA_ARGS__)
+#define svorr_u8_x(...) __builtin_sve_svorr_u8_x(__VA_ARGS__)
+#define svorr_u32_x(...) __builtin_sve_svorr_u32_x(__VA_ARGS__)
+#define svorr_u64_x(...) __builtin_sve_svorr_u64_x(__VA_ARGS__)
+#define svorr_u16_x(...) __builtin_sve_svorr_u16_x(__VA_ARGS__)
+#define svorr_s8_x(...) __builtin_sve_svorr_s8_x(__VA_ARGS__)
+#define svorr_s32_x(...) __builtin_sve_svorr_s32_x(__VA_ARGS__)
+#define svorr_s64_x(...) __builtin_sve_svorr_s64_x(__VA_ARGS__)
+#define svorr_s16_x(...) __builtin_sve_svorr_s16_x(__VA_ARGS__)
+#define svorr_u8_z(...) __builtin_sve_svorr_u8_z(__VA_ARGS__)
+#define svorr_u32_z(...) __builtin_sve_svorr_u32_z(__VA_ARGS__)
+#define svorr_u64_z(...) __builtin_sve_svorr_u64_z(__VA_ARGS__)
+#define svorr_u16_z(...) __builtin_sve_svorr_u16_z(__VA_ARGS__)
+#define svorr_s8_z(...) __builtin_sve_svorr_s8_z(__VA_ARGS__)
+#define svorr_s32_z(...) __builtin_sve_svorr_s32_z(__VA_ARGS__)
+#define svorr_s64_z(...) __builtin_sve_svorr_s64_z(__VA_ARGS__)
+#define svorr_s16_z(...) __builtin_sve_svorr_s16_z(__VA_ARGS__)
+#define svorv_u8(...) __builtin_sve_svorv_u8(__VA_ARGS__)
+#define svorv_u32(...) __builtin_sve_svorv_u32(__VA_ARGS__)
+#define svorv_u64(...) __builtin_sve_svorv_u64(__VA_ARGS__)
+#define svorv_u16(...) __builtin_sve_svorv_u16(__VA_ARGS__)
+#define svorv_s8(...) __builtin_sve_svorv_s8(__VA_ARGS__)
+#define svorv_s32(...) __builtin_sve_svorv_s32(__VA_ARGS__)
+#define svorv_s64(...) __builtin_sve_svorv_s64(__VA_ARGS__)
+#define svorv_s16(...) __builtin_sve_svorv_s16(__VA_ARGS__)
+#define svpfalse_b(...) __builtin_sve_svpfalse_b(__VA_ARGS__)
+#define svpfirst_b(...) __builtin_sve_svpfirst_b(__VA_ARGS__)
+#define svpnext_b8(...) __builtin_sve_svpnext_b8(__VA_ARGS__)
+#define svpnext_b32(...) __builtin_sve_svpnext_b32(__VA_ARGS__)
+#define svpnext_b64(...) __builtin_sve_svpnext_b64(__VA_ARGS__)
+#define svpnext_b16(...) __builtin_sve_svpnext_b16(__VA_ARGS__)
+#define svprfb(...) __builtin_sve_svprfb(__VA_ARGS__)
+#define svprfb_gather_u32base(...) __builtin_sve_svprfb_gather_u32base(__VA_ARGS__)
+#define svprfb_gather_u64base(...) __builtin_sve_svprfb_gather_u64base(__VA_ARGS__)
+#define svprfb_gather_u32base_offset(...) __builtin_sve_svprfb_gather_u32base_offset(__VA_ARGS__)
+#define svprfb_gather_u64base_offset(...) __builtin_sve_svprfb_gather_u64base_offset(__VA_ARGS__)
+#define svprfb_gather_s32offset(...) __builtin_sve_svprfb_gather_s32offset(__VA_ARGS__)
+#define svprfb_gather_u32offset(...) __builtin_sve_svprfb_gather_u32offset(__VA_ARGS__)
+#define svprfb_gather_s64offset(...) __builtin_sve_svprfb_gather_s64offset(__VA_ARGS__)
+#define svprfb_gather_u64offset(...) __builtin_sve_svprfb_gather_u64offset(__VA_ARGS__)
+#define svprfb_vnum(...) __builtin_sve_svprfb_vnum(__VA_ARGS__)
+#define svprfd(...) __builtin_sve_svprfd(__VA_ARGS__)
+#define svprfd_gather_u32base(...) __builtin_sve_svprfd_gather_u32base(__VA_ARGS__)
+#define svprfd_gather_u64base(...) __builtin_sve_svprfd_gather_u64base(__VA_ARGS__)
+#define svprfd_gather_u32base_index(...) __builtin_sve_svprfd_gather_u32base_index(__VA_ARGS__)
+#define svprfd_gather_u64base_index(...) __builtin_sve_svprfd_gather_u64base_index(__VA_ARGS__)
+#define svprfd_gather_s32index(...) __builtin_sve_svprfd_gather_s32index(__VA_ARGS__)
+#define svprfd_gather_u32index(...) __builtin_sve_svprfd_gather_u32index(__VA_ARGS__)
+#define svprfd_gather_s64index(...) __builtin_sve_svprfd_gather_s64index(__VA_ARGS__)
+#define svprfd_gather_u64index(...) __builtin_sve_svprfd_gather_u64index(__VA_ARGS__)
+#define svprfd_vnum(...) __builtin_sve_svprfd_vnum(__VA_ARGS__)
+#define svprfh(...) __builtin_sve_svprfh(__VA_ARGS__)
+#define svprfh_gather_u32base(...) __builtin_sve_svprfh_gather_u32base(__VA_ARGS__)
+#define svprfh_gather_u64base(...) __builtin_sve_svprfh_gather_u64base(__VA_ARGS__)
+#define svprfh_gather_u32base_index(...) __builtin_sve_svprfh_gather_u32base_index(__VA_ARGS__)
+#define svprfh_gather_u64base_index(...) __builtin_sve_svprfh_gather_u64base_index(__VA_ARGS__)
+#define svprfh_gather_s32index(...) __builtin_sve_svprfh_gather_s32index(__VA_ARGS__)
+#define svprfh_gather_u32index(...) __builtin_sve_svprfh_gather_u32index(__VA_ARGS__)
+#define svprfh_gather_s64index(...) __builtin_sve_svprfh_gather_s64index(__VA_ARGS__)
+#define svprfh_gather_u64index(...) __builtin_sve_svprfh_gather_u64index(__VA_ARGS__)
+#define svprfh_vnum(...) __builtin_sve_svprfh_vnum(__VA_ARGS__)
+#define svprfw(...) __builtin_sve_svprfw(__VA_ARGS__)
+#define svprfw_gather_u32base(...) __builtin_sve_svprfw_gather_u32base(__VA_ARGS__)
+#define svprfw_gather_u64base(...) __builtin_sve_svprfw_gather_u64base(__VA_ARGS__)
+#define svprfw_gather_u32base_index(...) __builtin_sve_svprfw_gather_u32base_index(__VA_ARGS__)
+#define svprfw_gather_u64base_index(...) __builtin_sve_svprfw_gather_u64base_index(__VA_ARGS__)
+#define svprfw_gather_s32index(...) __builtin_sve_svprfw_gather_s32index(__VA_ARGS__)
+#define svprfw_gather_u32index(...) __builtin_sve_svprfw_gather_u32index(__VA_ARGS__)
+#define svprfw_gather_s64index(...) __builtin_sve_svprfw_gather_s64index(__VA_ARGS__)
+#define svprfw_gather_u64index(...) __builtin_sve_svprfw_gather_u64index(__VA_ARGS__)
+#define svprfw_vnum(...) __builtin_sve_svprfw_vnum(__VA_ARGS__)
+#define svptest_any(...) __builtin_sve_svptest_any(__VA_ARGS__)
+#define svptest_first(...) __builtin_sve_svptest_first(__VA_ARGS__)
+#define svptest_last(...) __builtin_sve_svptest_last(__VA_ARGS__)
+#define svptrue_pat_b8(...) __builtin_sve_svptrue_pat_b8(__VA_ARGS__)
+#define svptrue_pat_b32(...) __builtin_sve_svptrue_pat_b32(__VA_ARGS__)
+#define svptrue_pat_b64(...) __builtin_sve_svptrue_pat_b64(__VA_ARGS__)
+#define svptrue_pat_b16(...) __builtin_sve_svptrue_pat_b16(__VA_ARGS__)
+#define svptrue_b8(...) __builtin_sve_svptrue_b8(__VA_ARGS__)
+#define svptrue_b32(...) __builtin_sve_svptrue_b32(__VA_ARGS__)
+#define svptrue_b64(...) __builtin_sve_svptrue_b64(__VA_ARGS__)
+#define svptrue_b16(...) __builtin_sve_svptrue_b16(__VA_ARGS__)
+#define svqadd_n_s8(...) __builtin_sve_svqadd_n_s8(__VA_ARGS__)
+#define svqadd_n_s32(...) __builtin_sve_svqadd_n_s32(__VA_ARGS__)
+#define svqadd_n_s64(...) __builtin_sve_svqadd_n_s64(__VA_ARGS__)
+#define svqadd_n_s16(...) __builtin_sve_svqadd_n_s16(__VA_ARGS__)
+#define svqadd_n_u8(...) __builtin_sve_svqadd_n_u8(__VA_ARGS__)
+#define svqadd_n_u32(...) __builtin_sve_svqadd_n_u32(__VA_ARGS__)
+#define svqadd_n_u64(...) __builtin_sve_svqadd_n_u64(__VA_ARGS__)
+#define svqadd_n_u16(...) __builtin_sve_svqadd_n_u16(__VA_ARGS__)
+#define svqadd_s8(...) __builtin_sve_svqadd_s8(__VA_ARGS__)
+#define svqadd_s32(...) __builtin_sve_svqadd_s32(__VA_ARGS__)
+#define svqadd_s64(...) __builtin_sve_svqadd_s64(__VA_ARGS__)
+#define svqadd_s16(...) __builtin_sve_svqadd_s16(__VA_ARGS__)
+#define svqadd_u8(...) __builtin_sve_svqadd_u8(__VA_ARGS__)
+#define svqadd_u32(...) __builtin_sve_svqadd_u32(__VA_ARGS__)
+#define svqadd_u64(...) __builtin_sve_svqadd_u64(__VA_ARGS__)
+#define svqadd_u16(...) __builtin_sve_svqadd_u16(__VA_ARGS__)
+#define svqdecb_n_s32(...) __builtin_sve_svqdecb_n_s32(__VA_ARGS__)
+#define svqdecb_n_s64(...) __builtin_sve_svqdecb_n_s64(__VA_ARGS__)
+#define svqdecb_n_u32(...) __builtin_sve_svqdecb_n_u32(__VA_ARGS__)
+#define svqdecb_n_u64(...) __builtin_sve_svqdecb_n_u64(__VA_ARGS__)
+#define svqdecb_pat_n_s32(...) __builtin_sve_svqdecb_pat_n_s32(__VA_ARGS__)
+#define svqdecb_pat_n_s64(...) __builtin_sve_svqdecb_pat_n_s64(__VA_ARGS__)
+#define svqdecb_pat_n_u32(...) __builtin_sve_svqdecb_pat_n_u32(__VA_ARGS__)
+#define svqdecb_pat_n_u64(...) __builtin_sve_svqdecb_pat_n_u64(__VA_ARGS__)
+#define svqdecd_n_s32(...) __builtin_sve_svqdecd_n_s32(__VA_ARGS__)
+#define svqdecd_n_s64(...) __builtin_sve_svqdecd_n_s64(__VA_ARGS__)
+#define svqdecd_n_u32(...) __builtin_sve_svqdecd_n_u32(__VA_ARGS__)
+#define svqdecd_n_u64(...) __builtin_sve_svqdecd_n_u64(__VA_ARGS__)
+#define svqdecd_s64(...) __builtin_sve_svqdecd_s64(__VA_ARGS__)
+#define svqdecd_u64(...) __builtin_sve_svqdecd_u64(__VA_ARGS__)
+#define svqdecd_pat_n_s32(...) __builtin_sve_svqdecd_pat_n_s32(__VA_ARGS__)
+#define svqdecd_pat_n_s64(...) __builtin_sve_svqdecd_pat_n_s64(__VA_ARGS__)
+#define svqdecd_pat_n_u32(...) __builtin_sve_svqdecd_pat_n_u32(__VA_ARGS__)
+#define svqdecd_pat_n_u64(...) __builtin_sve_svqdecd_pat_n_u64(__VA_ARGS__)
+#define svqdecd_pat_s64(...) __builtin_sve_svqdecd_pat_s64(__VA_ARGS__)
+#define svqdecd_pat_u64(...) __builtin_sve_svqdecd_pat_u64(__VA_ARGS__)
+#define svqdech_n_s32(...) __builtin_sve_svqdech_n_s32(__VA_ARGS__)
+#define svqdech_n_s64(...) __builtin_sve_svqdech_n_s64(__VA_ARGS__)
+#define svqdech_n_u32(...) __builtin_sve_svqdech_n_u32(__VA_ARGS__)
+#define svqdech_n_u64(...) __builtin_sve_svqdech_n_u64(__VA_ARGS__)
+#define svqdech_s16(...) __builtin_sve_svqdech_s16(__VA_ARGS__)
+#define svqdech_u16(...) __builtin_sve_svqdech_u16(__VA_ARGS__)
+#define svqdech_pat_n_s32(...) __builtin_sve_svqdech_pat_n_s32(__VA_ARGS__)
+#define svqdech_pat_n_s64(...) __builtin_sve_svqdech_pat_n_s64(__VA_ARGS__)
+#define svqdech_pat_n_u32(...) __builtin_sve_svqdech_pat_n_u32(__VA_ARGS__)
+#define svqdech_pat_n_u64(...) __builtin_sve_svqdech_pat_n_u64(__VA_ARGS__)
+#define svqdech_pat_s16(...) __builtin_sve_svqdech_pat_s16(__VA_ARGS__)
+#define svqdech_pat_u16(...) __builtin_sve_svqdech_pat_u16(__VA_ARGS__)
+#define svqdecp_n_s32_b8(...) __builtin_sve_svqdecp_n_s32_b8(__VA_ARGS__)
+#define svqdecp_n_s32_b32(...) __builtin_sve_svqdecp_n_s32_b32(__VA_ARGS__)
+#define svqdecp_n_s32_b64(...) __builtin_sve_svqdecp_n_s32_b64(__VA_ARGS__)
+#define svqdecp_n_s32_b16(...) __builtin_sve_svqdecp_n_s32_b16(__VA_ARGS__)
+#define svqdecp_n_s64_b8(...) __builtin_sve_svqdecp_n_s64_b8(__VA_ARGS__)
+#define svqdecp_n_s64_b32(...) __builtin_sve_svqdecp_n_s64_b32(__VA_ARGS__)
+#define svqdecp_n_s64_b64(...) __builtin_sve_svqdecp_n_s64_b64(__VA_ARGS__)
+#define svqdecp_n_s64_b16(...) __builtin_sve_svqdecp_n_s64_b16(__VA_ARGS__)
+#define svqdecp_n_u32_b8(...) __builtin_sve_svqdecp_n_u32_b8(__VA_ARGS__)
+#define svqdecp_n_u32_b32(...) __builtin_sve_svqdecp_n_u32_b32(__VA_ARGS__)
+#define svqdecp_n_u32_b64(...) __builtin_sve_svqdecp_n_u32_b64(__VA_ARGS__)
+#define svqdecp_n_u32_b16(...) __builtin_sve_svqdecp_n_u32_b16(__VA_ARGS__)
+#define svqdecp_n_u64_b8(...) __builtin_sve_svqdecp_n_u64_b8(__VA_ARGS__)
+#define svqdecp_n_u64_b32(...) __builtin_sve_svqdecp_n_u64_b32(__VA_ARGS__)
+#define svqdecp_n_u64_b64(...) __builtin_sve_svqdecp_n_u64_b64(__VA_ARGS__)
+#define svqdecp_n_u64_b16(...) __builtin_sve_svqdecp_n_u64_b16(__VA_ARGS__)
+#define svqdecp_s32(...) __builtin_sve_svqdecp_s32(__VA_ARGS__)
+#define svqdecp_s64(...) __builtin_sve_svqdecp_s64(__VA_ARGS__)
+#define svqdecp_s16(...) __builtin_sve_svqdecp_s16(__VA_ARGS__)
+#define svqdecp_u32(...) __builtin_sve_svqdecp_u32(__VA_ARGS__)
+#define svqdecp_u64(...) __builtin_sve_svqdecp_u64(__VA_ARGS__)
+#define svqdecp_u16(...) __builtin_sve_svqdecp_u16(__VA_ARGS__)
+#define svqdecw_n_s32(...) __builtin_sve_svqdecw_n_s32(__VA_ARGS__)
+#define svqdecw_n_s64(...) __builtin_sve_svqdecw_n_s64(__VA_ARGS__)
+#define svqdecw_n_u32(...) __builtin_sve_svqdecw_n_u32(__VA_ARGS__)
+#define svqdecw_n_u64(...) __builtin_sve_svqdecw_n_u64(__VA_ARGS__)
+#define svqdecw_s32(...) __builtin_sve_svqdecw_s32(__VA_ARGS__)
+#define svqdecw_u32(...) __builtin_sve_svqdecw_u32(__VA_ARGS__)
+#define svqdecw_pat_n_s32(...) __builtin_sve_svqdecw_pat_n_s32(__VA_ARGS__)
+#define svqdecw_pat_n_s64(...) __builtin_sve_svqdecw_pat_n_s64(__VA_ARGS__)
+#define svqdecw_pat_n_u32(...) __builtin_sve_svqdecw_pat_n_u32(__VA_ARGS__)
+#define svqdecw_pat_n_u64(...) __builtin_sve_svqdecw_pat_n_u64(__VA_ARGS__)
+#define svqdecw_pat_s32(...) __builtin_sve_svqdecw_pat_s32(__VA_ARGS__)
+#define svqdecw_pat_u32(...) __builtin_sve_svqdecw_pat_u32(__VA_ARGS__)
+#define svqincb_n_s32(...) __builtin_sve_svqincb_n_s32(__VA_ARGS__)
+#define svqincb_n_s64(...) __builtin_sve_svqincb_n_s64(__VA_ARGS__)
+#define svqincb_n_u32(...) __builtin_sve_svqincb_n_u32(__VA_ARGS__)
+#define svqincb_n_u64(...) __builtin_sve_svqincb_n_u64(__VA_ARGS__)
+#define svqincb_pat_n_s32(...) __builtin_sve_svqincb_pat_n_s32(__VA_ARGS__)
+#define svqincb_pat_n_s64(...) __builtin_sve_svqincb_pat_n_s64(__VA_ARGS__)
+#define svqincb_pat_n_u32(...) __builtin_sve_svqincb_pat_n_u32(__VA_ARGS__)
+#define svqincb_pat_n_u64(...) __builtin_sve_svqincb_pat_n_u64(__VA_ARGS__)
+#define svqincd_n_s32(...) __builtin_sve_svqincd_n_s32(__VA_ARGS__)
+#define svqincd_n_s64(...) __builtin_sve_svqincd_n_s64(__VA_ARGS__)
+#define svqincd_n_u32(...) __builtin_sve_svqincd_n_u32(__VA_ARGS__)
+#define svqincd_n_u64(...) __builtin_sve_svqincd_n_u64(__VA_ARGS__)
+#define svqincd_s64(...) __builtin_sve_svqincd_s64(__VA_ARGS__)
+#define svqincd_u64(...) __builtin_sve_svqincd_u64(__VA_ARGS__)
+#define svqincd_pat_n_s32(...) __builtin_sve_svqincd_pat_n_s32(__VA_ARGS__)
+#define svqincd_pat_n_s64(...) __builtin_sve_svqincd_pat_n_s64(__VA_ARGS__)
+#define svqincd_pat_n_u32(...) __builtin_sve_svqincd_pat_n_u32(__VA_ARGS__)
+#define svqincd_pat_n_u64(...) __builtin_sve_svqincd_pat_n_u64(__VA_ARGS__)
+#define svqincd_pat_s64(...) __builtin_sve_svqincd_pat_s64(__VA_ARGS__)
+#define svqincd_pat_u64(...) __builtin_sve_svqincd_pat_u64(__VA_ARGS__)
+#define svqinch_n_s32(...) __builtin_sve_svqinch_n_s32(__VA_ARGS__)
+#define svqinch_n_s64(...) __builtin_sve_svqinch_n_s64(__VA_ARGS__)
+#define svqinch_n_u32(...) __builtin_sve_svqinch_n_u32(__VA_ARGS__)
+#define svqinch_n_u64(...) __builtin_sve_svqinch_n_u64(__VA_ARGS__)
+#define svqinch_s16(...) __builtin_sve_svqinch_s16(__VA_ARGS__)
+#define svqinch_u16(...) __builtin_sve_svqinch_u16(__VA_ARGS__)
+#define svqinch_pat_n_s32(...) __builtin_sve_svqinch_pat_n_s32(__VA_ARGS__)
+#define svqinch_pat_n_s64(...) __builtin_sve_svqinch_pat_n_s64(__VA_ARGS__)
+#define svqinch_pat_n_u32(...) __builtin_sve_svqinch_pat_n_u32(__VA_ARGS__)
+#define svqinch_pat_n_u64(...) __builtin_sve_svqinch_pat_n_u64(__VA_ARGS__)
+#define svqinch_pat_s16(...) __builtin_sve_svqinch_pat_s16(__VA_ARGS__)
+#define svqinch_pat_u16(...) __builtin_sve_svqinch_pat_u16(__VA_ARGS__)
+#define svqincp_n_s32_b8(...) __builtin_sve_svqincp_n_s32_b8(__VA_ARGS__)
+#define svqincp_n_s32_b32(...) __builtin_sve_svqincp_n_s32_b32(__VA_ARGS__)
+#define svqincp_n_s32_b64(...) __builtin_sve_svqincp_n_s32_b64(__VA_ARGS__)
+#define svqincp_n_s32_b16(...) __builtin_sve_svqincp_n_s32_b16(__VA_ARGS__)
+#define svqincp_n_s64_b8(...) __builtin_sve_svqincp_n_s64_b8(__VA_ARGS__)
+#define svqincp_n_s64_b32(...) __builtin_sve_svqincp_n_s64_b32(__VA_ARGS__)
+#define svqincp_n_s64_b64(...) __builtin_sve_svqincp_n_s64_b64(__VA_ARGS__)
+#define svqincp_n_s64_b16(...) __builtin_sve_svqincp_n_s64_b16(__VA_ARGS__)
+#define svqincp_n_u32_b8(...) __builtin_sve_svqincp_n_u32_b8(__VA_ARGS__)
+#define svqincp_n_u32_b32(...) __builtin_sve_svqincp_n_u32_b32(__VA_ARGS__)
+#define svqincp_n_u32_b64(...) __builtin_sve_svqincp_n_u32_b64(__VA_ARGS__)
+#define svqincp_n_u32_b16(...) __builtin_sve_svqincp_n_u32_b16(__VA_ARGS__)
+#define svqincp_n_u64_b8(...) __builtin_sve_svqincp_n_u64_b8(__VA_ARGS__)
+#define svqincp_n_u64_b32(...) __builtin_sve_svqincp_n_u64_b32(__VA_ARGS__)
+#define svqincp_n_u64_b64(...) __builtin_sve_svqincp_n_u64_b64(__VA_ARGS__)
+#define svqincp_n_u64_b16(...) __builtin_sve_svqincp_n_u64_b16(__VA_ARGS__)
+#define svqincp_s32(...) __builtin_sve_svqincp_s32(__VA_ARGS__)
+#define svqincp_s64(...) __builtin_sve_svqincp_s64(__VA_ARGS__)
+#define svqincp_s16(...) __builtin_sve_svqincp_s16(__VA_ARGS__)
+#define svqincp_u32(...) __builtin_sve_svqincp_u32(__VA_ARGS__)
+#define svqincp_u64(...) __builtin_sve_svqincp_u64(__VA_ARGS__)
+#define svqincp_u16(...) __builtin_sve_svqincp_u16(__VA_ARGS__)
+#define svqincw_n_s32(...) __builtin_sve_svqincw_n_s32(__VA_ARGS__)
+#define svqincw_n_s64(...) __builtin_sve_svqincw_n_s64(__VA_ARGS__)
+#define svqincw_n_u32(...) __builtin_sve_svqincw_n_u32(__VA_ARGS__)
+#define svqincw_n_u64(...) __builtin_sve_svqincw_n_u64(__VA_ARGS__)
+#define svqincw_s32(...) __builtin_sve_svqincw_s32(__VA_ARGS__)
+#define svqincw_u32(...) __builtin_sve_svqincw_u32(__VA_ARGS__)
+#define svqincw_pat_n_s32(...) __builtin_sve_svqincw_pat_n_s32(__VA_ARGS__)
+#define svqincw_pat_n_s64(...) __builtin_sve_svqincw_pat_n_s64(__VA_ARGS__)
+#define svqincw_pat_n_u32(...) __builtin_sve_svqincw_pat_n_u32(__VA_ARGS__)
+#define svqincw_pat_n_u64(...) __builtin_sve_svqincw_pat_n_u64(__VA_ARGS__)
+#define svqincw_pat_s32(...) __builtin_sve_svqincw_pat_s32(__VA_ARGS__)
+#define svqincw_pat_u32(...) __builtin_sve_svqincw_pat_u32(__VA_ARGS__)
+#define svqsub_n_s8(...) __builtin_sve_svqsub_n_s8(__VA_ARGS__)
+#define svqsub_n_s32(...) __builtin_sve_svqsub_n_s32(__VA_ARGS__)
+#define svqsub_n_s64(...) __builtin_sve_svqsub_n_s64(__VA_ARGS__)
+#define svqsub_n_s16(...) __builtin_sve_svqsub_n_s16(__VA_ARGS__)
+#define svqsub_n_u8(...) __builtin_sve_svqsub_n_u8(__VA_ARGS__)
+#define svqsub_n_u32(...) __builtin_sve_svqsub_n_u32(__VA_ARGS__)
+#define svqsub_n_u64(...) __builtin_sve_svqsub_n_u64(__VA_ARGS__)
+#define svqsub_n_u16(...) __builtin_sve_svqsub_n_u16(__VA_ARGS__)
+#define svqsub_s8(...) __builtin_sve_svqsub_s8(__VA_ARGS__)
+#define svqsub_s32(...) __builtin_sve_svqsub_s32(__VA_ARGS__)
+#define svqsub_s64(...) __builtin_sve_svqsub_s64(__VA_ARGS__)
+#define svqsub_s16(...) __builtin_sve_svqsub_s16(__VA_ARGS__)
+#define svqsub_u8(...) __builtin_sve_svqsub_u8(__VA_ARGS__)
+#define svqsub_u32(...) __builtin_sve_svqsub_u32(__VA_ARGS__)
+#define svqsub_u64(...) __builtin_sve_svqsub_u64(__VA_ARGS__)
+#define svqsub_u16(...) __builtin_sve_svqsub_u16(__VA_ARGS__)
+#define svrbit_u8_m(...) __builtin_sve_svrbit_u8_m(__VA_ARGS__)
+#define svrbit_u32_m(...) __builtin_sve_svrbit_u32_m(__VA_ARGS__)
+#define svrbit_u64_m(...) __builtin_sve_svrbit_u64_m(__VA_ARGS__)
+#define svrbit_u16_m(...) __builtin_sve_svrbit_u16_m(__VA_ARGS__)
+#define svrbit_s8_m(...) __builtin_sve_svrbit_s8_m(__VA_ARGS__)
+#define svrbit_s32_m(...) __builtin_sve_svrbit_s32_m(__VA_ARGS__)
+#define svrbit_s64_m(...) __builtin_sve_svrbit_s64_m(__VA_ARGS__)
+#define svrbit_s16_m(...) __builtin_sve_svrbit_s16_m(__VA_ARGS__)
+#define svrbit_u8_x(...) __builtin_sve_svrbit_u8_x(__VA_ARGS__)
+#define svrbit_u32_x(...) __builtin_sve_svrbit_u32_x(__VA_ARGS__)
+#define svrbit_u64_x(...) __builtin_sve_svrbit_u64_x(__VA_ARGS__)
+#define svrbit_u16_x(...) __builtin_sve_svrbit_u16_x(__VA_ARGS__)
+#define svrbit_s8_x(...) __builtin_sve_svrbit_s8_x(__VA_ARGS__)
+#define svrbit_s32_x(...) __builtin_sve_svrbit_s32_x(__VA_ARGS__)
+#define svrbit_s64_x(...) __builtin_sve_svrbit_s64_x(__VA_ARGS__)
+#define svrbit_s16_x(...) __builtin_sve_svrbit_s16_x(__VA_ARGS__)
+#define svrbit_u8_z(...) __builtin_sve_svrbit_u8_z(__VA_ARGS__)
+#define svrbit_u32_z(...) __builtin_sve_svrbit_u32_z(__VA_ARGS__)
+#define svrbit_u64_z(...) __builtin_sve_svrbit_u64_z(__VA_ARGS__)
+#define svrbit_u16_z(...) __builtin_sve_svrbit_u16_z(__VA_ARGS__)
+#define svrbit_s8_z(...) __builtin_sve_svrbit_s8_z(__VA_ARGS__)
+#define svrbit_s32_z(...) __builtin_sve_svrbit_s32_z(__VA_ARGS__)
+#define svrbit_s64_z(...) __builtin_sve_svrbit_s64_z(__VA_ARGS__)
+#define svrbit_s16_z(...) __builtin_sve_svrbit_s16_z(__VA_ARGS__)
+#define svrdffr(...) __builtin_sve_svrdffr(__VA_ARGS__)
+#define svrdffr_z(...) __builtin_sve_svrdffr_z(__VA_ARGS__)
+#define svrecpe_f64(...) __builtin_sve_svrecpe_f64(__VA_ARGS__)
+#define svrecpe_f32(...) __builtin_sve_svrecpe_f32(__VA_ARGS__)
+#define svrecpe_f16(...) __builtin_sve_svrecpe_f16(__VA_ARGS__)
+#define svrecps_f64(...) __builtin_sve_svrecps_f64(__VA_ARGS__)
+#define svrecps_f32(...) __builtin_sve_svrecps_f32(__VA_ARGS__)
+#define svrecps_f16(...) __builtin_sve_svrecps_f16(__VA_ARGS__)
+#define svrecpx_f64_m(...) __builtin_sve_svrecpx_f64_m(__VA_ARGS__)
+#define svrecpx_f32_m(...) __builtin_sve_svrecpx_f32_m(__VA_ARGS__)
+#define svrecpx_f16_m(...) __builtin_sve_svrecpx_f16_m(__VA_ARGS__)
+#define svrecpx_f64_x(...) __builtin_sve_svrecpx_f64_x(__VA_ARGS__)
+#define svrecpx_f32_x(...) __builtin_sve_svrecpx_f32_x(__VA_ARGS__)
+#define svrecpx_f16_x(...) __builtin_sve_svrecpx_f16_x(__VA_ARGS__)
+#define svrecpx_f64_z(...) __builtin_sve_svrecpx_f64_z(__VA_ARGS__)
+#define svrecpx_f32_z(...) __builtin_sve_svrecpx_f32_z(__VA_ARGS__)
+#define svrecpx_f16_z(...) __builtin_sve_svrecpx_f16_z(__VA_ARGS__)
+#define svrev_u8(...) __builtin_sve_svrev_u8(__VA_ARGS__)
+#define svrev_u32(...) __builtin_sve_svrev_u32(__VA_ARGS__)
+#define svrev_u64(...) __builtin_sve_svrev_u64(__VA_ARGS__)
+#define svrev_u16(...) __builtin_sve_svrev_u16(__VA_ARGS__)
+#define svrev_s8(...) __builtin_sve_svrev_s8(__VA_ARGS__)
+#define svrev_f64(...) __builtin_sve_svrev_f64(__VA_ARGS__)
+#define svrev_f32(...) __builtin_sve_svrev_f32(__VA_ARGS__)
+#define svrev_f16(...) __builtin_sve_svrev_f16(__VA_ARGS__)
+#define svrev_s32(...) __builtin_sve_svrev_s32(__VA_ARGS__)
+#define svrev_s64(...) __builtin_sve_svrev_s64(__VA_ARGS__)
+#define svrev_s16(...) __builtin_sve_svrev_s16(__VA_ARGS__)
+#define svrev_b8(...) __builtin_sve_svrev_b8(__VA_ARGS__)
+#define svrev_b32(...) __builtin_sve_svrev_b32(__VA_ARGS__)
+#define svrev_b64(...) __builtin_sve_svrev_b64(__VA_ARGS__)
+#define svrev_b16(...) __builtin_sve_svrev_b16(__VA_ARGS__)
+#define svrevb_u32_m(...) __builtin_sve_svrevb_u32_m(__VA_ARGS__)
+#define svrevb_u64_m(...) __builtin_sve_svrevb_u64_m(__VA_ARGS__)
+#define svrevb_u16_m(...) __builtin_sve_svrevb_u16_m(__VA_ARGS__)
+#define svrevb_s32_m(...) __builtin_sve_svrevb_s32_m(__VA_ARGS__)
+#define svrevb_s64_m(...) __builtin_sve_svrevb_s64_m(__VA_ARGS__)
+#define svrevb_s16_m(...) __builtin_sve_svrevb_s16_m(__VA_ARGS__)
+#define svrevb_u32_x(...) __builtin_sve_svrevb_u32_x(__VA_ARGS__)
+#define svrevb_u64_x(...) __builtin_sve_svrevb_u64_x(__VA_ARGS__)
+#define svrevb_u16_x(...) __builtin_sve_svrevb_u16_x(__VA_ARGS__)
+#define svrevb_s32_x(...) __builtin_sve_svrevb_s32_x(__VA_ARGS__)
+#define svrevb_s64_x(...) __builtin_sve_svrevb_s64_x(__VA_ARGS__)
+#define svrevb_s16_x(...) __builtin_sve_svrevb_s16_x(__VA_ARGS__)
+#define svrevb_u32_z(...) __builtin_sve_svrevb_u32_z(__VA_ARGS__)
+#define svrevb_u64_z(...) __builtin_sve_svrevb_u64_z(__VA_ARGS__)
+#define svrevb_u16_z(...) __builtin_sve_svrevb_u16_z(__VA_ARGS__)
+#define svrevb_s32_z(...) __builtin_sve_svrevb_s32_z(__VA_ARGS__)
+#define svrevb_s64_z(...) __builtin_sve_svrevb_s64_z(__VA_ARGS__)
+#define svrevb_s16_z(...) __builtin_sve_svrevb_s16_z(__VA_ARGS__)
+#define svrevh_u32_m(...) __builtin_sve_svrevh_u32_m(__VA_ARGS__)
+#define svrevh_u64_m(...) __builtin_sve_svrevh_u64_m(__VA_ARGS__)
+#define svrevh_s32_m(...) __builtin_sve_svrevh_s32_m(__VA_ARGS__)
+#define svrevh_s64_m(...) __builtin_sve_svrevh_s64_m(__VA_ARGS__)
+#define svrevh_u32_x(...) __builtin_sve_svrevh_u32_x(__VA_ARGS__)
+#define svrevh_u64_x(...) __builtin_sve_svrevh_u64_x(__VA_ARGS__)
+#define svrevh_s32_x(...) __builtin_sve_svrevh_s32_x(__VA_ARGS__)
+#define svrevh_s64_x(...) __builtin_sve_svrevh_s64_x(__VA_ARGS__)
+#define svrevh_u32_z(...) __builtin_sve_svrevh_u32_z(__VA_ARGS__)
+#define svrevh_u64_z(...) __builtin_sve_svrevh_u64_z(__VA_ARGS__)
+#define svrevh_s32_z(...) __builtin_sve_svrevh_s32_z(__VA_ARGS__)
+#define svrevh_s64_z(...) __builtin_sve_svrevh_s64_z(__VA_ARGS__)
+#define svrevw_u64_m(...) __builtin_sve_svrevw_u64_m(__VA_ARGS__)
+#define svrevw_s64_m(...) __builtin_sve_svrevw_s64_m(__VA_ARGS__)
+#define svrevw_u64_x(...) __builtin_sve_svrevw_u64_x(__VA_ARGS__)
+#define svrevw_s64_x(...) __builtin_sve_svrevw_s64_x(__VA_ARGS__)
+#define svrevw_u64_z(...) __builtin_sve_svrevw_u64_z(__VA_ARGS__)
+#define svrevw_s64_z(...) __builtin_sve_svrevw_s64_z(__VA_ARGS__)
+#define svrinta_f64_m(...) __builtin_sve_svrinta_f64_m(__VA_ARGS__)
+#define svrinta_f32_m(...) __builtin_sve_svrinta_f32_m(__VA_ARGS__)
+#define svrinta_f16_m(...) __builtin_sve_svrinta_f16_m(__VA_ARGS__)
+#define svrinta_f64_x(...) __builtin_sve_svrinta_f64_x(__VA_ARGS__)
+#define svrinta_f32_x(...) __builtin_sve_svrinta_f32_x(__VA_ARGS__)
+#define svrinta_f16_x(...) __builtin_sve_svrinta_f16_x(__VA_ARGS__)
+#define svrinta_f64_z(...) __builtin_sve_svrinta_f64_z(__VA_ARGS__)
+#define svrinta_f32_z(...) __builtin_sve_svrinta_f32_z(__VA_ARGS__)
+#define svrinta_f16_z(...) __builtin_sve_svrinta_f16_z(__VA_ARGS__)
+#define svrinti_f64_m(...) __builtin_sve_svrinti_f64_m(__VA_ARGS__)
+#define svrinti_f32_m(...) __builtin_sve_svrinti_f32_m(__VA_ARGS__)
+#define svrinti_f16_m(...) __builtin_sve_svrinti_f16_m(__VA_ARGS__)
+#define svrinti_f64_x(...) __builtin_sve_svrinti_f64_x(__VA_ARGS__)
+#define svrinti_f32_x(...) __builtin_sve_svrinti_f32_x(__VA_ARGS__)
+#define svrinti_f16_x(...) __builtin_sve_svrinti_f16_x(__VA_ARGS__)
+#define svrinti_f64_z(...) __builtin_sve_svrinti_f64_z(__VA_ARGS__)
+#define svrinti_f32_z(...) __builtin_sve_svrinti_f32_z(__VA_ARGS__)
+#define svrinti_f16_z(...) __builtin_sve_svrinti_f16_z(__VA_ARGS__)
+#define svrintm_f64_m(...) __builtin_sve_svrintm_f64_m(__VA_ARGS__)
+#define svrintm_f32_m(...) __builtin_sve_svrintm_f32_m(__VA_ARGS__)
+#define svrintm_f16_m(...) __builtin_sve_svrintm_f16_m(__VA_ARGS__)
+#define svrintm_f64_x(...) __builtin_sve_svrintm_f64_x(__VA_ARGS__)
+#define svrintm_f32_x(...) __builtin_sve_svrintm_f32_x(__VA_ARGS__)
+#define svrintm_f16_x(...) __builtin_sve_svrintm_f16_x(__VA_ARGS__)
+#define svrintm_f64_z(...) __builtin_sve_svrintm_f64_z(__VA_ARGS__)
+#define svrintm_f32_z(...) __builtin_sve_svrintm_f32_z(__VA_ARGS__)
+#define svrintm_f16_z(...) __builtin_sve_svrintm_f16_z(__VA_ARGS__)
+#define svrintn_f64_m(...) __builtin_sve_svrintn_f64_m(__VA_ARGS__)
+#define svrintn_f32_m(...) __builtin_sve_svrintn_f32_m(__VA_ARGS__)
+#define svrintn_f16_m(...) __builtin_sve_svrintn_f16_m(__VA_ARGS__)
+#define svrintn_f64_x(...) __builtin_sve_svrintn_f64_x(__VA_ARGS__)
+#define svrintn_f32_x(...) __builtin_sve_svrintn_f32_x(__VA_ARGS__)
+#define svrintn_f16_x(...) __builtin_sve_svrintn_f16_x(__VA_ARGS__)
+#define svrintn_f64_z(...) __builtin_sve_svrintn_f64_z(__VA_ARGS__)
+#define svrintn_f32_z(...) __builtin_sve_svrintn_f32_z(__VA_ARGS__)
+#define svrintn_f16_z(...) __builtin_sve_svrintn_f16_z(__VA_ARGS__)
+#define svrintp_f64_m(...) __builtin_sve_svrintp_f64_m(__VA_ARGS__)
+#define svrintp_f32_m(...) __builtin_sve_svrintp_f32_m(__VA_ARGS__)
+#define svrintp_f16_m(...) __builtin_sve_svrintp_f16_m(__VA_ARGS__)
+#define svrintp_f64_x(...) __builtin_sve_svrintp_f64_x(__VA_ARGS__)
+#define svrintp_f32_x(...) __builtin_sve_svrintp_f32_x(__VA_ARGS__)
+#define svrintp_f16_x(...) __builtin_sve_svrintp_f16_x(__VA_ARGS__)
+#define svrintp_f64_z(...) __builtin_sve_svrintp_f64_z(__VA_ARGS__)
+#define svrintp_f32_z(...) __builtin_sve_svrintp_f32_z(__VA_ARGS__)
+#define svrintp_f16_z(...) __builtin_sve_svrintp_f16_z(__VA_ARGS__)
+#define svrintx_f64_m(...) __builtin_sve_svrintx_f64_m(__VA_ARGS__)
+#define svrintx_f32_m(...) __builtin_sve_svrintx_f32_m(__VA_ARGS__)
+#define svrintx_f16_m(...) __builtin_sve_svrintx_f16_m(__VA_ARGS__)
+#define svrintx_f64_x(...) __builtin_sve_svrintx_f64_x(__VA_ARGS__)
+#define svrintx_f32_x(...) __builtin_sve_svrintx_f32_x(__VA_ARGS__)
+#define svrintx_f16_x(...) __builtin_sve_svrintx_f16_x(__VA_ARGS__)
+#define svrintx_f64_z(...) __builtin_sve_svrintx_f64_z(__VA_ARGS__)
+#define svrintx_f32_z(...) __builtin_sve_svrintx_f32_z(__VA_ARGS__)
+#define svrintx_f16_z(...) __builtin_sve_svrintx_f16_z(__VA_ARGS__)
+#define svrintz_f64_m(...) __builtin_sve_svrintz_f64_m(__VA_ARGS__)
+#define svrintz_f32_m(...) __builtin_sve_svrintz_f32_m(__VA_ARGS__)
+#define svrintz_f16_m(...) __builtin_sve_svrintz_f16_m(__VA_ARGS__)
+#define svrintz_f64_x(...) __builtin_sve_svrintz_f64_x(__VA_ARGS__)
+#define svrintz_f32_x(...) __builtin_sve_svrintz_f32_x(__VA_ARGS__)
+#define svrintz_f16_x(...) __builtin_sve_svrintz_f16_x(__VA_ARGS__)
+#define svrintz_f64_z(...) __builtin_sve_svrintz_f64_z(__VA_ARGS__)
+#define svrintz_f32_z(...) __builtin_sve_svrintz_f32_z(__VA_ARGS__)
+#define svrintz_f16_z(...) __builtin_sve_svrintz_f16_z(__VA_ARGS__)
+#define svrsqrte_f64(...) __builtin_sve_svrsqrte_f64(__VA_ARGS__)
+#define svrsqrte_f32(...) __builtin_sve_svrsqrte_f32(__VA_ARGS__)
+#define svrsqrte_f16(...) __builtin_sve_svrsqrte_f16(__VA_ARGS__)
+#define svrsqrts_f64(...) __builtin_sve_svrsqrts_f64(__VA_ARGS__)
+#define svrsqrts_f32(...) __builtin_sve_svrsqrts_f32(__VA_ARGS__)
+#define svrsqrts_f16(...) __builtin_sve_svrsqrts_f16(__VA_ARGS__)
+#define svscale_n_f64_m(...) __builtin_sve_svscale_n_f64_m(__VA_ARGS__)
+#define svscale_n_f32_m(...) __builtin_sve_svscale_n_f32_m(__VA_ARGS__)
+#define svscale_n_f16_m(...) __builtin_sve_svscale_n_f16_m(__VA_ARGS__)
+#define svscale_n_f64_x(...) __builtin_sve_svscale_n_f64_x(__VA_ARGS__)
+#define svscale_n_f32_x(...) __builtin_sve_svscale_n_f32_x(__VA_ARGS__)
+#define svscale_n_f16_x(...) __builtin_sve_svscale_n_f16_x(__VA_ARGS__)
+#define svscale_n_f64_z(...) __builtin_sve_svscale_n_f64_z(__VA_ARGS__)
+#define svscale_n_f32_z(...) __builtin_sve_svscale_n_f32_z(__VA_ARGS__)
+#define svscale_n_f16_z(...) __builtin_sve_svscale_n_f16_z(__VA_ARGS__)
+#define svscale_f64_m(...) __builtin_sve_svscale_f64_m(__VA_ARGS__)
+#define svscale_f32_m(...) __builtin_sve_svscale_f32_m(__VA_ARGS__)
+#define svscale_f16_m(...) __builtin_sve_svscale_f16_m(__VA_ARGS__)
+#define svscale_f64_x(...) __builtin_sve_svscale_f64_x(__VA_ARGS__)
+#define svscale_f32_x(...) __builtin_sve_svscale_f32_x(__VA_ARGS__)
+#define svscale_f16_x(...) __builtin_sve_svscale_f16_x(__VA_ARGS__)
+#define svscale_f64_z(...) __builtin_sve_svscale_f64_z(__VA_ARGS__)
+#define svscale_f32_z(...) __builtin_sve_svscale_f32_z(__VA_ARGS__)
+#define svscale_f16_z(...) __builtin_sve_svscale_f16_z(__VA_ARGS__)
+#define svsel_b(...) __builtin_sve_svsel_b(__VA_ARGS__)
+#define svsel_u8(...) __builtin_sve_svsel_u8(__VA_ARGS__)
+#define svsel_u32(...) __builtin_sve_svsel_u32(__VA_ARGS__)
+#define svsel_u64(...) __builtin_sve_svsel_u64(__VA_ARGS__)
+#define svsel_u16(...) __builtin_sve_svsel_u16(__VA_ARGS__)
+#define svsel_s8(...) __builtin_sve_svsel_s8(__VA_ARGS__)
+#define svsel_f64(...) __builtin_sve_svsel_f64(__VA_ARGS__)
+#define svsel_f32(...) __builtin_sve_svsel_f32(__VA_ARGS__)
+#define svsel_f16(...) __builtin_sve_svsel_f16(__VA_ARGS__)
+#define svsel_s32(...) __builtin_sve_svsel_s32(__VA_ARGS__)
+#define svsel_s64(...) __builtin_sve_svsel_s64(__VA_ARGS__)
+#define svsel_s16(...) __builtin_sve_svsel_s16(__VA_ARGS__)
+#define svset2_u8(...) __builtin_sve_svset2_u8(__VA_ARGS__)
+#define svset2_u32(...) __builtin_sve_svset2_u32(__VA_ARGS__)
+#define svset2_u64(...) __builtin_sve_svset2_u64(__VA_ARGS__)
+#define svset2_u16(...) __builtin_sve_svset2_u16(__VA_ARGS__)
+#define svset2_s8(...) __builtin_sve_svset2_s8(__VA_ARGS__)
+#define svset2_f64(...) __builtin_sve_svset2_f64(__VA_ARGS__)
+#define svset2_f32(...) __builtin_sve_svset2_f32(__VA_ARGS__)
+#define svset2_f16(...) __builtin_sve_svset2_f16(__VA_ARGS__)
+#define svset2_s32(...) __builtin_sve_svset2_s32(__VA_ARGS__)
+#define svset2_s64(...) __builtin_sve_svset2_s64(__VA_ARGS__)
+#define svset2_s16(...) __builtin_sve_svset2_s16(__VA_ARGS__)
+#define svset3_u8(...) __builtin_sve_svset3_u8(__VA_ARGS__)
+#define svset3_u32(...) __builtin_sve_svset3_u32(__VA_ARGS__)
+#define svset3_u64(...) __builtin_sve_svset3_u64(__VA_ARGS__)
+#define svset3_u16(...) __builtin_sve_svset3_u16(__VA_ARGS__)
+#define svset3_s8(...) __builtin_sve_svset3_s8(__VA_ARGS__)
+#define svset3_f64(...) __builtin_sve_svset3_f64(__VA_ARGS__)
+#define svset3_f32(...) __builtin_sve_svset3_f32(__VA_ARGS__)
+#define svset3_f16(...) __builtin_sve_svset3_f16(__VA_ARGS__)
+#define svset3_s32(...) __builtin_sve_svset3_s32(__VA_ARGS__)
+#define svset3_s64(...) __builtin_sve_svset3_s64(__VA_ARGS__)
+#define svset3_s16(...) __builtin_sve_svset3_s16(__VA_ARGS__)
+#define svset4_u8(...) __builtin_sve_svset4_u8(__VA_ARGS__)
+#define svset4_u32(...) __builtin_sve_svset4_u32(__VA_ARGS__)
+#define svset4_u64(...) __builtin_sve_svset4_u64(__VA_ARGS__)
+#define svset4_u16(...) __builtin_sve_svset4_u16(__VA_ARGS__)
+#define svset4_s8(...) __builtin_sve_svset4_s8(__VA_ARGS__)
+#define svset4_f64(...) __builtin_sve_svset4_f64(__VA_ARGS__)
+#define svset4_f32(...) __builtin_sve_svset4_f32(__VA_ARGS__)
+#define svset4_f16(...) __builtin_sve_svset4_f16(__VA_ARGS__)
+#define svset4_s32(...) __builtin_sve_svset4_s32(__VA_ARGS__)
+#define svset4_s64(...) __builtin_sve_svset4_s64(__VA_ARGS__)
+#define svset4_s16(...) __builtin_sve_svset4_s16(__VA_ARGS__)
+#define svsetffr(...) __builtin_sve_svsetffr(__VA_ARGS__)
+#define svsplice_u8(...) __builtin_sve_svsplice_u8(__VA_ARGS__)
+#define svsplice_u32(...) __builtin_sve_svsplice_u32(__VA_ARGS__)
+#define svsplice_u64(...) __builtin_sve_svsplice_u64(__VA_ARGS__)
+#define svsplice_u16(...) __builtin_sve_svsplice_u16(__VA_ARGS__)
+#define svsplice_s8(...) __builtin_sve_svsplice_s8(__VA_ARGS__)
+#define svsplice_f64(...) __builtin_sve_svsplice_f64(__VA_ARGS__)
+#define svsplice_f32(...) __builtin_sve_svsplice_f32(__VA_ARGS__)
+#define svsplice_f16(...) __builtin_sve_svsplice_f16(__VA_ARGS__)
+#define svsplice_s32(...) __builtin_sve_svsplice_s32(__VA_ARGS__)
+#define svsplice_s64(...) __builtin_sve_svsplice_s64(__VA_ARGS__)
+#define svsplice_s16(...) __builtin_sve_svsplice_s16(__VA_ARGS__)
+#define svsqrt_f64_m(...) __builtin_sve_svsqrt_f64_m(__VA_ARGS__)
+#define svsqrt_f32_m(...) __builtin_sve_svsqrt_f32_m(__VA_ARGS__)
+#define svsqrt_f16_m(...) __builtin_sve_svsqrt_f16_m(__VA_ARGS__)
+#define svsqrt_f64_x(...) __builtin_sve_svsqrt_f64_x(__VA_ARGS__)
+#define svsqrt_f32_x(...) __builtin_sve_svsqrt_f32_x(__VA_ARGS__)
+#define svsqrt_f16_x(...) __builtin_sve_svsqrt_f16_x(__VA_ARGS__)
+#define svsqrt_f64_z(...) __builtin_sve_svsqrt_f64_z(__VA_ARGS__)
+#define svsqrt_f32_z(...) __builtin_sve_svsqrt_f32_z(__VA_ARGS__)
+#define svsqrt_f16_z(...) __builtin_sve_svsqrt_f16_z(__VA_ARGS__)
+#define svst1_u8(...) __builtin_sve_svst1_u8(__VA_ARGS__)
+#define svst1_u32(...) __builtin_sve_svst1_u32(__VA_ARGS__)
+#define svst1_u64(...) __builtin_sve_svst1_u64(__VA_ARGS__)
+#define svst1_u16(...) __builtin_sve_svst1_u16(__VA_ARGS__)
+#define svst1_s8(...) __builtin_sve_svst1_s8(__VA_ARGS__)
+#define svst1_f64(...) __builtin_sve_svst1_f64(__VA_ARGS__)
+#define svst1_f32(...) __builtin_sve_svst1_f32(__VA_ARGS__)
+#define svst1_f16(...) __builtin_sve_svst1_f16(__VA_ARGS__)
+#define svst1_s32(...) __builtin_sve_svst1_s32(__VA_ARGS__)
+#define svst1_s64(...) __builtin_sve_svst1_s64(__VA_ARGS__)
+#define svst1_s16(...) __builtin_sve_svst1_s16(__VA_ARGS__)
+#define svst1_scatter_u32base_index_u32(...) __builtin_sve_svst1_scatter_u32base_index_u32(__VA_ARGS__)
+#define svst1_scatter_u64base_index_u64(...) __builtin_sve_svst1_scatter_u64base_index_u64(__VA_ARGS__)
+#define svst1_scatter_u64base_index_f64(...) __builtin_sve_svst1_scatter_u64base_index_f64(__VA_ARGS__)
+#define svst1_scatter_u32base_index_f32(...) __builtin_sve_svst1_scatter_u32base_index_f32(__VA_ARGS__)
+#define svst1_scatter_u32base_index_s32(...) __builtin_sve_svst1_scatter_u32base_index_s32(__VA_ARGS__)
+#define svst1_scatter_u64base_index_s64(...) __builtin_sve_svst1_scatter_u64base_index_s64(__VA_ARGS__)
+#define svst1_scatter_u32base_offset_u32(...) __builtin_sve_svst1_scatter_u32base_offset_u32(__VA_ARGS__)
+#define svst1_scatter_u64base_offset_u64(...) __builtin_sve_svst1_scatter_u64base_offset_u64(__VA_ARGS__)
+#define svst1_scatter_u64base_offset_f64(...) __builtin_sve_svst1_scatter_u64base_offset_f64(__VA_ARGS__)
+#define svst1_scatter_u32base_offset_f32(...) __builtin_sve_svst1_scatter_u32base_offset_f32(__VA_ARGS__)
+#define svst1_scatter_u32base_offset_s32(...) __builtin_sve_svst1_scatter_u32base_offset_s32(__VA_ARGS__)
+#define svst1_scatter_u64base_offset_s64(...) __builtin_sve_svst1_scatter_u64base_offset_s64(__VA_ARGS__)
+#define svst1_scatter_u32base_u32(...) __builtin_sve_svst1_scatter_u32base_u32(__VA_ARGS__)
+#define svst1_scatter_u64base_u64(...) __builtin_sve_svst1_scatter_u64base_u64(__VA_ARGS__)
+#define svst1_scatter_u64base_f64(...) __builtin_sve_svst1_scatter_u64base_f64(__VA_ARGS__)
+#define svst1_scatter_u32base_f32(...) __builtin_sve_svst1_scatter_u32base_f32(__VA_ARGS__)
+#define svst1_scatter_u32base_s32(...) __builtin_sve_svst1_scatter_u32base_s32(__VA_ARGS__)
+#define svst1_scatter_u64base_s64(...) __builtin_sve_svst1_scatter_u64base_s64(__VA_ARGS__)
+#define svst1_scatter_s32index_u32(...) __builtin_sve_svst1_scatter_s32index_u32(__VA_ARGS__)
+#define svst1_scatter_s32index_f32(...) __builtin_sve_svst1_scatter_s32index_f32(__VA_ARGS__)
+#define svst1_scatter_s32index_s32(...) __builtin_sve_svst1_scatter_s32index_s32(__VA_ARGS__)
+#define svst1_scatter_u32index_u32(...) __builtin_sve_svst1_scatter_u32index_u32(__VA_ARGS__)
+#define svst1_scatter_u32index_f32(...) __builtin_sve_svst1_scatter_u32index_f32(__VA_ARGS__)
+#define svst1_scatter_u32index_s32(...) __builtin_sve_svst1_scatter_u32index_s32(__VA_ARGS__)
+#define svst1_scatter_s64index_u64(...) __builtin_sve_svst1_scatter_s64index_u64(__VA_ARGS__)
+#define svst1_scatter_s64index_f64(...) __builtin_sve_svst1_scatter_s64index_f64(__VA_ARGS__)
+#define svst1_scatter_s64index_s64(...) __builtin_sve_svst1_scatter_s64index_s64(__VA_ARGS__)
+#define svst1_scatter_u64index_u64(...) __builtin_sve_svst1_scatter_u64index_u64(__VA_ARGS__)
+#define svst1_scatter_u64index_f64(...) __builtin_sve_svst1_scatter_u64index_f64(__VA_ARGS__)
+#define svst1_scatter_u64index_s64(...) __builtin_sve_svst1_scatter_u64index_s64(__VA_ARGS__)
+#define svst1_scatter_s32offset_u32(...) __builtin_sve_svst1_scatter_s32offset_u32(__VA_ARGS__)
+#define svst1_scatter_s32offset_f32(...) __builtin_sve_svst1_scatter_s32offset_f32(__VA_ARGS__)
+#define svst1_scatter_s32offset_s32(...) __builtin_sve_svst1_scatter_s32offset_s32(__VA_ARGS__)
+#define svst1_scatter_u32offset_u32(...) __builtin_sve_svst1_scatter_u32offset_u32(__VA_ARGS__)
+#define svst1_scatter_u32offset_f32(...) __builtin_sve_svst1_scatter_u32offset_f32(__VA_ARGS__)
+#define svst1_scatter_u32offset_s32(...) __builtin_sve_svst1_scatter_u32offset_s32(__VA_ARGS__)
+#define svst1_scatter_s64offset_u64(...) __builtin_sve_svst1_scatter_s64offset_u64(__VA_ARGS__)
+#define svst1_scatter_s64offset_f64(...) __builtin_sve_svst1_scatter_s64offset_f64(__VA_ARGS__)
+#define svst1_scatter_s64offset_s64(...) __builtin_sve_svst1_scatter_s64offset_s64(__VA_ARGS__)
+#define svst1_scatter_u64offset_u64(...) __builtin_sve_svst1_scatter_u64offset_u64(__VA_ARGS__)
+#define svst1_scatter_u64offset_f64(...) __builtin_sve_svst1_scatter_u64offset_f64(__VA_ARGS__)
+#define svst1_scatter_u64offset_s64(...) __builtin_sve_svst1_scatter_u64offset_s64(__VA_ARGS__)
+#define svst1_vnum_u8(...) __builtin_sve_svst1_vnum_u8(__VA_ARGS__)
+#define svst1_vnum_u32(...) __builtin_sve_svst1_vnum_u32(__VA_ARGS__)
+#define svst1_vnum_u64(...) __builtin_sve_svst1_vnum_u64(__VA_ARGS__)
+#define svst1_vnum_u16(...) __builtin_sve_svst1_vnum_u16(__VA_ARGS__)
+#define svst1_vnum_s8(...) __builtin_sve_svst1_vnum_s8(__VA_ARGS__)
+#define svst1_vnum_f64(...) __builtin_sve_svst1_vnum_f64(__VA_ARGS__)
+#define svst1_vnum_f32(...) __builtin_sve_svst1_vnum_f32(__VA_ARGS__)
+#define svst1_vnum_f16(...) __builtin_sve_svst1_vnum_f16(__VA_ARGS__)
+#define svst1_vnum_s32(...) __builtin_sve_svst1_vnum_s32(__VA_ARGS__)
+#define svst1_vnum_s64(...) __builtin_sve_svst1_vnum_s64(__VA_ARGS__)
+#define svst1_vnum_s16(...) __builtin_sve_svst1_vnum_s16(__VA_ARGS__)
+#define svst1b_s32(...) __builtin_sve_svst1b_s32(__VA_ARGS__)
+#define svst1b_s64(...) __builtin_sve_svst1b_s64(__VA_ARGS__)
+#define svst1b_s16(...) __builtin_sve_svst1b_s16(__VA_ARGS__)
+#define svst1b_u32(...) __builtin_sve_svst1b_u32(__VA_ARGS__)
+#define svst1b_u64(...) __builtin_sve_svst1b_u64(__VA_ARGS__)
+#define svst1b_u16(...) __builtin_sve_svst1b_u16(__VA_ARGS__)
+#define svst1b_scatter_u32base_offset_u32(...) __builtin_sve_svst1b_scatter_u32base_offset_u32(__VA_ARGS__)
+#define svst1b_scatter_u64base_offset_u64(...) __builtin_sve_svst1b_scatter_u64base_offset_u64(__VA_ARGS__)
+#define svst1b_scatter_u32base_offset_s32(...) __builtin_sve_svst1b_scatter_u32base_offset_s32(__VA_ARGS__)
+#define svst1b_scatter_u64base_offset_s64(...) __builtin_sve_svst1b_scatter_u64base_offset_s64(__VA_ARGS__)
+#define svst1b_scatter_u32base_u32(...) __builtin_sve_svst1b_scatter_u32base_u32(__VA_ARGS__)
+#define svst1b_scatter_u64base_u64(...) __builtin_sve_svst1b_scatter_u64base_u64(__VA_ARGS__)
+#define svst1b_scatter_u32base_s32(...) __builtin_sve_svst1b_scatter_u32base_s32(__VA_ARGS__)
+#define svst1b_scatter_u64base_s64(...) __builtin_sve_svst1b_scatter_u64base_s64(__VA_ARGS__)
+#define svst1b_scatter_s32offset_s32(...) __builtin_sve_svst1b_scatter_s32offset_s32(__VA_ARGS__)
+#define svst1b_scatter_s32offset_u32(...) __builtin_sve_svst1b_scatter_s32offset_u32(__VA_ARGS__)
+#define svst1b_scatter_u32offset_s32(...) __builtin_sve_svst1b_scatter_u32offset_s32(__VA_ARGS__)
+#define svst1b_scatter_u32offset_u32(...) __builtin_sve_svst1b_scatter_u32offset_u32(__VA_ARGS__)
+#define svst1b_scatter_s64offset_s64(...) __builtin_sve_svst1b_scatter_s64offset_s64(__VA_ARGS__)
+#define svst1b_scatter_s64offset_u64(...) __builtin_sve_svst1b_scatter_s64offset_u64(__VA_ARGS__)
+#define svst1b_scatter_u64offset_s64(...) __builtin_sve_svst1b_scatter_u64offset_s64(__VA_ARGS__)
+#define svst1b_scatter_u64offset_u64(...) __builtin_sve_svst1b_scatter_u64offset_u64(__VA_ARGS__)
+#define svst1b_vnum_s32(...) __builtin_sve_svst1b_vnum_s32(__VA_ARGS__)
+#define svst1b_vnum_s64(...) __builtin_sve_svst1b_vnum_s64(__VA_ARGS__)
+#define svst1b_vnum_s16(...) __builtin_sve_svst1b_vnum_s16(__VA_ARGS__)
+#define svst1b_vnum_u32(...) __builtin_sve_svst1b_vnum_u32(__VA_ARGS__)
+#define svst1b_vnum_u64(...) __builtin_sve_svst1b_vnum_u64(__VA_ARGS__)
+#define svst1b_vnum_u16(...) __builtin_sve_svst1b_vnum_u16(__VA_ARGS__)
+#define svst1h_s32(...) __builtin_sve_svst1h_s32(__VA_ARGS__)
+#define svst1h_s64(...) __builtin_sve_svst1h_s64(__VA_ARGS__)
+#define svst1h_u32(...) __builtin_sve_svst1h_u32(__VA_ARGS__)
+#define svst1h_u64(...) __builtin_sve_svst1h_u64(__VA_ARGS__)
+#define svst1h_scatter_u32base_index_u32(...) __builtin_sve_svst1h_scatter_u32base_index_u32(__VA_ARGS__)
+#define svst1h_scatter_u64base_index_u64(...) __builtin_sve_svst1h_scatter_u64base_index_u64(__VA_ARGS__)
+#define svst1h_scatter_u32base_index_s32(...) __builtin_sve_svst1h_scatter_u32base_index_s32(__VA_ARGS__)
+#define svst1h_scatter_u64base_index_s64(...) __builtin_sve_svst1h_scatter_u64base_index_s64(__VA_ARGS__)
+#define svst1h_scatter_u32base_offset_u32(...) __builtin_sve_svst1h_scatter_u32base_offset_u32(__VA_ARGS__)
+#define svst1h_scatter_u64base_offset_u64(...) __builtin_sve_svst1h_scatter_u64base_offset_u64(__VA_ARGS__)
+#define svst1h_scatter_u32base_offset_s32(...) __builtin_sve_svst1h_scatter_u32base_offset_s32(__VA_ARGS__)
+#define svst1h_scatter_u64base_offset_s64(...) __builtin_sve_svst1h_scatter_u64base_offset_s64(__VA_ARGS__)
+#define svst1h_scatter_u32base_u32(...) __builtin_sve_svst1h_scatter_u32base_u32(__VA_ARGS__)
+#define svst1h_scatter_u64base_u64(...) __builtin_sve_svst1h_scatter_u64base_u64(__VA_ARGS__)
+#define svst1h_scatter_u32base_s32(...) __builtin_sve_svst1h_scatter_u32base_s32(__VA_ARGS__)
+#define svst1h_scatter_u64base_s64(...) __builtin_sve_svst1h_scatter_u64base_s64(__VA_ARGS__)
+#define svst1h_scatter_s32index_s32(...) __builtin_sve_svst1h_scatter_s32index_s32(__VA_ARGS__)
+#define svst1h_scatter_s32index_u32(...) __builtin_sve_svst1h_scatter_s32index_u32(__VA_ARGS__)
+#define svst1h_scatter_u32index_s32(...) __builtin_sve_svst1h_scatter_u32index_s32(__VA_ARGS__)
+#define svst1h_scatter_u32index_u32(...) __builtin_sve_svst1h_scatter_u32index_u32(__VA_ARGS__)
+#define svst1h_scatter_s64index_s64(...) __builtin_sve_svst1h_scatter_s64index_s64(__VA_ARGS__)
+#define svst1h_scatter_s64index_u64(...) __builtin_sve_svst1h_scatter_s64index_u64(__VA_ARGS__)
+#define svst1h_scatter_u64index_s64(...) __builtin_sve_svst1h_scatter_u64index_s64(__VA_ARGS__)
+#define svst1h_scatter_u64index_u64(...) __builtin_sve_svst1h_scatter_u64index_u64(__VA_ARGS__)
+#define svst1h_scatter_s32offset_s32(...) __builtin_sve_svst1h_scatter_s32offset_s32(__VA_ARGS__)
+#define svst1h_scatter_s32offset_u32(...) __builtin_sve_svst1h_scatter_s32offset_u32(__VA_ARGS__)
+#define svst1h_scatter_u32offset_s32(...) __builtin_sve_svst1h_scatter_u32offset_s32(__VA_ARGS__)
+#define svst1h_scatter_u32offset_u32(...) __builtin_sve_svst1h_scatter_u32offset_u32(__VA_ARGS__)
+#define svst1h_scatter_s64offset_s64(...) __builtin_sve_svst1h_scatter_s64offset_s64(__VA_ARGS__)
+#define svst1h_scatter_s64offset_u64(...) __builtin_sve_svst1h_scatter_s64offset_u64(__VA_ARGS__)
+#define svst1h_scatter_u64offset_s64(...) __builtin_sve_svst1h_scatter_u64offset_s64(__VA_ARGS__)
+#define svst1h_scatter_u64offset_u64(...) __builtin_sve_svst1h_scatter_u64offset_u64(__VA_ARGS__)
+#define svst1h_vnum_s32(...) __builtin_sve_svst1h_vnum_s32(__VA_ARGS__)
+#define svst1h_vnum_s64(...) __builtin_sve_svst1h_vnum_s64(__VA_ARGS__)
+#define svst1h_vnum_u32(...) __builtin_sve_svst1h_vnum_u32(__VA_ARGS__)
+#define svst1h_vnum_u64(...) __builtin_sve_svst1h_vnum_u64(__VA_ARGS__)
+#define svst1w_s64(...) __builtin_sve_svst1w_s64(__VA_ARGS__)
+#define svst1w_u64(...) __builtin_sve_svst1w_u64(__VA_ARGS__)
+#define svst1w_scatter_u64base_index_u64(...) __builtin_sve_svst1w_scatter_u64base_index_u64(__VA_ARGS__)
+#define svst1w_scatter_u64base_index_s64(...) __builtin_sve_svst1w_scatter_u64base_index_s64(__VA_ARGS__)
+#define svst1w_scatter_u64base_offset_u64(...) __builtin_sve_svst1w_scatter_u64base_offset_u64(__VA_ARGS__)
+#define svst1w_scatter_u64base_offset_s64(...) __builtin_sve_svst1w_scatter_u64base_offset_s64(__VA_ARGS__)
+#define svst1w_scatter_u64base_u64(...) __builtin_sve_svst1w_scatter_u64base_u64(__VA_ARGS__)
+#define svst1w_scatter_u64base_s64(...) __builtin_sve_svst1w_scatter_u64base_s64(__VA_ARGS__)
+#define svst1w_scatter_s64index_s64(...) __builtin_sve_svst1w_scatter_s64index_s64(__VA_ARGS__)
+#define svst1w_scatter_s64index_u64(...) __builtin_sve_svst1w_scatter_s64index_u64(__VA_ARGS__)
+#define svst1w_scatter_u64index_s64(...) __builtin_sve_svst1w_scatter_u64index_s64(__VA_ARGS__)
+#define svst1w_scatter_u64index_u64(...) __builtin_sve_svst1w_scatter_u64index_u64(__VA_ARGS__)
+#define svst1w_scatter_s64offset_s64(...) __builtin_sve_svst1w_scatter_s64offset_s64(__VA_ARGS__)
+#define svst1w_scatter_s64offset_u64(...) __builtin_sve_svst1w_scatter_s64offset_u64(__VA_ARGS__)
+#define svst1w_scatter_u64offset_s64(...) __builtin_sve_svst1w_scatter_u64offset_s64(__VA_ARGS__)
+#define svst1w_scatter_u64offset_u64(...) __builtin_sve_svst1w_scatter_u64offset_u64(__VA_ARGS__)
+#define svst1w_vnum_s64(...) __builtin_sve_svst1w_vnum_s64(__VA_ARGS__)
+#define svst1w_vnum_u64(...) __builtin_sve_svst1w_vnum_u64(__VA_ARGS__)
+#define svst2_u8(...) __builtin_sve_svst2_u8(__VA_ARGS__)
+#define svst2_u32(...) __builtin_sve_svst2_u32(__VA_ARGS__)
+#define svst2_u64(...) __builtin_sve_svst2_u64(__VA_ARGS__)
+#define svst2_u16(...) __builtin_sve_svst2_u16(__VA_ARGS__)
+#define svst2_s8(...) __builtin_sve_svst2_s8(__VA_ARGS__)
+#define svst2_f64(...) __builtin_sve_svst2_f64(__VA_ARGS__)
+#define svst2_f32(...) __builtin_sve_svst2_f32(__VA_ARGS__)
+#define svst2_f16(...) __builtin_sve_svst2_f16(__VA_ARGS__)
+#define svst2_s32(...) __builtin_sve_svst2_s32(__VA_ARGS__)
+#define svst2_s64(...) __builtin_sve_svst2_s64(__VA_ARGS__)
+#define svst2_s16(...) __builtin_sve_svst2_s16(__VA_ARGS__)
+#define svst2_vnum_u8(...) __builtin_sve_svst2_vnum_u8(__VA_ARGS__)
+#define svst2_vnum_u32(...) __builtin_sve_svst2_vnum_u32(__VA_ARGS__)
+#define svst2_vnum_u64(...) __builtin_sve_svst2_vnum_u64(__VA_ARGS__)
+#define svst2_vnum_u16(...) __builtin_sve_svst2_vnum_u16(__VA_ARGS__)
+#define svst2_vnum_s8(...) __builtin_sve_svst2_vnum_s8(__VA_ARGS__)
+#define svst2_vnum_f64(...) __builtin_sve_svst2_vnum_f64(__VA_ARGS__)
+#define svst2_vnum_f32(...) __builtin_sve_svst2_vnum_f32(__VA_ARGS__)
+#define svst2_vnum_f16(...) __builtin_sve_svst2_vnum_f16(__VA_ARGS__)
+#define svst2_vnum_s32(...) __builtin_sve_svst2_vnum_s32(__VA_ARGS__)
+#define svst2_vnum_s64(...) __builtin_sve_svst2_vnum_s64(__VA_ARGS__)
+#define svst2_vnum_s16(...) __builtin_sve_svst2_vnum_s16(__VA_ARGS__)
+#define svst3_u8(...) __builtin_sve_svst3_u8(__VA_ARGS__)
+#define svst3_u32(...) __builtin_sve_svst3_u32(__VA_ARGS__)
+#define svst3_u64(...) __builtin_sve_svst3_u64(__VA_ARGS__)
+#define svst3_u16(...) __builtin_sve_svst3_u16(__VA_ARGS__)
+#define svst3_s8(...) __builtin_sve_svst3_s8(__VA_ARGS__)
+#define svst3_f64(...) __builtin_sve_svst3_f64(__VA_ARGS__)
+#define svst3_f32(...) __builtin_sve_svst3_f32(__VA_ARGS__)
+#define svst3_f16(...) __builtin_sve_svst3_f16(__VA_ARGS__)
+#define svst3_s32(...) __builtin_sve_svst3_s32(__VA_ARGS__)
+#define svst3_s64(...) __builtin_sve_svst3_s64(__VA_ARGS__)
+#define svst3_s16(...) __builtin_sve_svst3_s16(__VA_ARGS__)
+#define svst3_vnum_u8(...) __builtin_sve_svst3_vnum_u8(__VA_ARGS__)
+#define svst3_vnum_u32(...) __builtin_sve_svst3_vnum_u32(__VA_ARGS__)
+#define svst3_vnum_u64(...) __builtin_sve_svst3_vnum_u64(__VA_ARGS__)
+#define svst3_vnum_u16(...) __builtin_sve_svst3_vnum_u16(__VA_ARGS__)
+#define svst3_vnum_s8(...) __builtin_sve_svst3_vnum_s8(__VA_ARGS__)
+#define svst3_vnum_f64(...) __builtin_sve_svst3_vnum_f64(__VA_ARGS__)
+#define svst3_vnum_f32(...) __builtin_sve_svst3_vnum_f32(__VA_ARGS__)
+#define svst3_vnum_f16(...) __builtin_sve_svst3_vnum_f16(__VA_ARGS__)
+#define svst3_vnum_s32(...) __builtin_sve_svst3_vnum_s32(__VA_ARGS__)
+#define svst3_vnum_s64(...) __builtin_sve_svst3_vnum_s64(__VA_ARGS__)
+#define svst3_vnum_s16(...) __builtin_sve_svst3_vnum_s16(__VA_ARGS__)
+#define svst4_u8(...) __builtin_sve_svst4_u8(__VA_ARGS__)
+#define svst4_u32(...) __builtin_sve_svst4_u32(__VA_ARGS__)
+#define svst4_u64(...) __builtin_sve_svst4_u64(__VA_ARGS__)
+#define svst4_u16(...) __builtin_sve_svst4_u16(__VA_ARGS__)
+#define svst4_s8(...) __builtin_sve_svst4_s8(__VA_ARGS__)
+#define svst4_f64(...) __builtin_sve_svst4_f64(__VA_ARGS__)
+#define svst4_f32(...) __builtin_sve_svst4_f32(__VA_ARGS__)
+#define svst4_f16(...) __builtin_sve_svst4_f16(__VA_ARGS__)
+#define svst4_s32(...) __builtin_sve_svst4_s32(__VA_ARGS__)
+#define svst4_s64(...) __builtin_sve_svst4_s64(__VA_ARGS__)
+#define svst4_s16(...) __builtin_sve_svst4_s16(__VA_ARGS__)
+#define svst4_vnum_u8(...) __builtin_sve_svst4_vnum_u8(__VA_ARGS__)
+#define svst4_vnum_u32(...) __builtin_sve_svst4_vnum_u32(__VA_ARGS__)
+#define svst4_vnum_u64(...) __builtin_sve_svst4_vnum_u64(__VA_ARGS__)
+#define svst4_vnum_u16(...) __builtin_sve_svst4_vnum_u16(__VA_ARGS__)
+#define svst4_vnum_s8(...) __builtin_sve_svst4_vnum_s8(__VA_ARGS__)
+#define svst4_vnum_f64(...) __builtin_sve_svst4_vnum_f64(__VA_ARGS__)
+#define svst4_vnum_f32(...) __builtin_sve_svst4_vnum_f32(__VA_ARGS__)
+#define svst4_vnum_f16(...) __builtin_sve_svst4_vnum_f16(__VA_ARGS__)
+#define svst4_vnum_s32(...) __builtin_sve_svst4_vnum_s32(__VA_ARGS__)
+#define svst4_vnum_s64(...) __builtin_sve_svst4_vnum_s64(__VA_ARGS__)
+#define svst4_vnum_s16(...) __builtin_sve_svst4_vnum_s16(__VA_ARGS__)
+#define svstnt1_u8(...) __builtin_sve_svstnt1_u8(__VA_ARGS__)
+#define svstnt1_u32(...) __builtin_sve_svstnt1_u32(__VA_ARGS__)
+#define svstnt1_u64(...) __builtin_sve_svstnt1_u64(__VA_ARGS__)
+#define svstnt1_u16(...) __builtin_sve_svstnt1_u16(__VA_ARGS__)
+#define svstnt1_s8(...) __builtin_sve_svstnt1_s8(__VA_ARGS__)
+#define svstnt1_f64(...) __builtin_sve_svstnt1_f64(__VA_ARGS__)
+#define svstnt1_f32(...) __builtin_sve_svstnt1_f32(__VA_ARGS__)
+#define svstnt1_f16(...) __builtin_sve_svstnt1_f16(__VA_ARGS__)
+#define svstnt1_s32(...) __builtin_sve_svstnt1_s32(__VA_ARGS__)
+#define svstnt1_s64(...) __builtin_sve_svstnt1_s64(__VA_ARGS__)
+#define svstnt1_s16(...) __builtin_sve_svstnt1_s16(__VA_ARGS__)
+#define svstnt1_vnum_u8(...) __builtin_sve_svstnt1_vnum_u8(__VA_ARGS__)
+#define svstnt1_vnum_u32(...) __builtin_sve_svstnt1_vnum_u32(__VA_ARGS__)
+#define svstnt1_vnum_u64(...) __builtin_sve_svstnt1_vnum_u64(__VA_ARGS__)
+#define svstnt1_vnum_u16(...) __builtin_sve_svstnt1_vnum_u16(__VA_ARGS__)
+#define svstnt1_vnum_s8(...) __builtin_sve_svstnt1_vnum_s8(__VA_ARGS__)
+#define svstnt1_vnum_f64(...) __builtin_sve_svstnt1_vnum_f64(__VA_ARGS__)
+#define svstnt1_vnum_f32(...) __builtin_sve_svstnt1_vnum_f32(__VA_ARGS__)
+#define svstnt1_vnum_f16(...) __builtin_sve_svstnt1_vnum_f16(__VA_ARGS__)
+#define svstnt1_vnum_s32(...) __builtin_sve_svstnt1_vnum_s32(__VA_ARGS__)
+#define svstnt1_vnum_s64(...) __builtin_sve_svstnt1_vnum_s64(__VA_ARGS__)
+#define svstnt1_vnum_s16(...) __builtin_sve_svstnt1_vnum_s16(__VA_ARGS__)
+#define svsub_n_f64_m(...) __builtin_sve_svsub_n_f64_m(__VA_ARGS__)
+#define svsub_n_f32_m(...) __builtin_sve_svsub_n_f32_m(__VA_ARGS__)
+#define svsub_n_f16_m(...) __builtin_sve_svsub_n_f16_m(__VA_ARGS__)
+#define svsub_n_f64_x(...) __builtin_sve_svsub_n_f64_x(__VA_ARGS__)
+#define svsub_n_f32_x(...) __builtin_sve_svsub_n_f32_x(__VA_ARGS__)
+#define svsub_n_f16_x(...) __builtin_sve_svsub_n_f16_x(__VA_ARGS__)
+#define svsub_n_f64_z(...) __builtin_sve_svsub_n_f64_z(__VA_ARGS__)
+#define svsub_n_f32_z(...) __builtin_sve_svsub_n_f32_z(__VA_ARGS__)
+#define svsub_n_f16_z(...) __builtin_sve_svsub_n_f16_z(__VA_ARGS__)
+#define svsub_n_u8_m(...) __builtin_sve_svsub_n_u8_m(__VA_ARGS__)
+#define svsub_n_u32_m(...) __builtin_sve_svsub_n_u32_m(__VA_ARGS__)
+#define svsub_n_u64_m(...) __builtin_sve_svsub_n_u64_m(__VA_ARGS__)
+#define svsub_n_u16_m(...) __builtin_sve_svsub_n_u16_m(__VA_ARGS__)
+#define svsub_n_s8_m(...) __builtin_sve_svsub_n_s8_m(__VA_ARGS__)
+#define svsub_n_s32_m(...) __builtin_sve_svsub_n_s32_m(__VA_ARGS__)
+#define svsub_n_s64_m(...) __builtin_sve_svsub_n_s64_m(__VA_ARGS__)
+#define svsub_n_s16_m(...) __builtin_sve_svsub_n_s16_m(__VA_ARGS__)
+#define svsub_n_u8_x(...) __builtin_sve_svsub_n_u8_x(__VA_ARGS__)
+#define svsub_n_u32_x(...) __builtin_sve_svsub_n_u32_x(__VA_ARGS__)
+#define svsub_n_u64_x(...) __builtin_sve_svsub_n_u64_x(__VA_ARGS__)
+#define svsub_n_u16_x(...) __builtin_sve_svsub_n_u16_x(__VA_ARGS__)
+#define svsub_n_s8_x(...) __builtin_sve_svsub_n_s8_x(__VA_ARGS__)
+#define svsub_n_s32_x(...) __builtin_sve_svsub_n_s32_x(__VA_ARGS__)
+#define svsub_n_s64_x(...) __builtin_sve_svsub_n_s64_x(__VA_ARGS__)
+#define svsub_n_s16_x(...) __builtin_sve_svsub_n_s16_x(__VA_ARGS__)
+#define svsub_n_u8_z(...) __builtin_sve_svsub_n_u8_z(__VA_ARGS__)
+#define svsub_n_u32_z(...) __builtin_sve_svsub_n_u32_z(__VA_ARGS__)
+#define svsub_n_u64_z(...) __builtin_sve_svsub_n_u64_z(__VA_ARGS__)
+#define svsub_n_u16_z(...) __builtin_sve_svsub_n_u16_z(__VA_ARGS__)
+#define svsub_n_s8_z(...) __builtin_sve_svsub_n_s8_z(__VA_ARGS__)
+#define svsub_n_s32_z(...) __builtin_sve_svsub_n_s32_z(__VA_ARGS__)
+#define svsub_n_s64_z(...) __builtin_sve_svsub_n_s64_z(__VA_ARGS__)
+#define svsub_n_s16_z(...) __builtin_sve_svsub_n_s16_z(__VA_ARGS__)
+#define svsub_f64_m(...) __builtin_sve_svsub_f64_m(__VA_ARGS__)
+#define svsub_f32_m(...) __builtin_sve_svsub_f32_m(__VA_ARGS__)
+#define svsub_f16_m(...) __builtin_sve_svsub_f16_m(__VA_ARGS__)
+#define svsub_f64_x(...) __builtin_sve_svsub_f64_x(__VA_ARGS__)
+#define svsub_f32_x(...) __builtin_sve_svsub_f32_x(__VA_ARGS__)
+#define svsub_f16_x(...) __builtin_sve_svsub_f16_x(__VA_ARGS__)
+#define svsub_f64_z(...) __builtin_sve_svsub_f64_z(__VA_ARGS__)
+#define svsub_f32_z(...) __builtin_sve_svsub_f32_z(__VA_ARGS__)
+#define svsub_f16_z(...) __builtin_sve_svsub_f16_z(__VA_ARGS__)
+#define svsub_u8_m(...) __builtin_sve_svsub_u8_m(__VA_ARGS__)
+#define svsub_u32_m(...) __builtin_sve_svsub_u32_m(__VA_ARGS__)
+#define svsub_u64_m(...) __builtin_sve_svsub_u64_m(__VA_ARGS__)
+#define svsub_u16_m(...) __builtin_sve_svsub_u16_m(__VA_ARGS__)
+#define svsub_s8_m(...) __builtin_sve_svsub_s8_m(__VA_ARGS__)
+#define svsub_s32_m(...) __builtin_sve_svsub_s32_m(__VA_ARGS__)
+#define svsub_s64_m(...) __builtin_sve_svsub_s64_m(__VA_ARGS__)
+#define svsub_s16_m(...) __builtin_sve_svsub_s16_m(__VA_ARGS__)
+#define svsub_u8_x(...) __builtin_sve_svsub_u8_x(__VA_ARGS__)
+#define svsub_u32_x(...) __builtin_sve_svsub_u32_x(__VA_ARGS__)
+#define svsub_u64_x(...) __builtin_sve_svsub_u64_x(__VA_ARGS__)
+#define svsub_u16_x(...) __builtin_sve_svsub_u16_x(__VA_ARGS__)
+#define svsub_s8_x(...) __builtin_sve_svsub_s8_x(__VA_ARGS__)
+#define svsub_s32_x(...) __builtin_sve_svsub_s32_x(__VA_ARGS__)
+#define svsub_s64_x(...) __builtin_sve_svsub_s64_x(__VA_ARGS__)
+#define svsub_s16_x(...) __builtin_sve_svsub_s16_x(__VA_ARGS__)
+#define svsub_u8_z(...) __builtin_sve_svsub_u8_z(__VA_ARGS__)
+#define svsub_u32_z(...) __builtin_sve_svsub_u32_z(__VA_ARGS__)
+#define svsub_u64_z(...) __builtin_sve_svsub_u64_z(__VA_ARGS__)
+#define svsub_u16_z(...) __builtin_sve_svsub_u16_z(__VA_ARGS__)
+#define svsub_s8_z(...) __builtin_sve_svsub_s8_z(__VA_ARGS__)
+#define svsub_s32_z(...) __builtin_sve_svsub_s32_z(__VA_ARGS__)
+#define svsub_s64_z(...) __builtin_sve_svsub_s64_z(__VA_ARGS__)
+#define svsub_s16_z(...) __builtin_sve_svsub_s16_z(__VA_ARGS__)
+#define svsubr_n_f64_m(...) __builtin_sve_svsubr_n_f64_m(__VA_ARGS__)
+#define svsubr_n_f32_m(...) __builtin_sve_svsubr_n_f32_m(__VA_ARGS__)
+#define svsubr_n_f16_m(...) __builtin_sve_svsubr_n_f16_m(__VA_ARGS__)
+#define svsubr_n_f64_x(...) __builtin_sve_svsubr_n_f64_x(__VA_ARGS__)
+#define svsubr_n_f32_x(...) __builtin_sve_svsubr_n_f32_x(__VA_ARGS__)
+#define svsubr_n_f16_x(...) __builtin_sve_svsubr_n_f16_x(__VA_ARGS__)
+#define svsubr_n_f64_z(...) __builtin_sve_svsubr_n_f64_z(__VA_ARGS__)
+#define svsubr_n_f32_z(...) __builtin_sve_svsubr_n_f32_z(__VA_ARGS__)
+#define svsubr_n_f16_z(...) __builtin_sve_svsubr_n_f16_z(__VA_ARGS__)
+#define svsubr_n_u8_m(...) __builtin_sve_svsubr_n_u8_m(__VA_ARGS__)
+#define svsubr_n_u32_m(...) __builtin_sve_svsubr_n_u32_m(__VA_ARGS__)
+#define svsubr_n_u64_m(...) __builtin_sve_svsubr_n_u64_m(__VA_ARGS__)
+#define svsubr_n_u16_m(...) __builtin_sve_svsubr_n_u16_m(__VA_ARGS__)
+#define svsubr_n_s8_m(...) __builtin_sve_svsubr_n_s8_m(__VA_ARGS__)
+#define svsubr_n_s32_m(...) __builtin_sve_svsubr_n_s32_m(__VA_ARGS__)
+#define svsubr_n_s64_m(...) __builtin_sve_svsubr_n_s64_m(__VA_ARGS__)
+#define svsubr_n_s16_m(...) __builtin_sve_svsubr_n_s16_m(__VA_ARGS__)
+#define svsubr_n_u8_x(...) __builtin_sve_svsubr_n_u8_x(__VA_ARGS__)
+#define svsubr_n_u32_x(...) __builtin_sve_svsubr_n_u32_x(__VA_ARGS__)
+#define svsubr_n_u64_x(...) __builtin_sve_svsubr_n_u64_x(__VA_ARGS__)
+#define svsubr_n_u16_x(...) __builtin_sve_svsubr_n_u16_x(__VA_ARGS__)
+#define svsubr_n_s8_x(...) __builtin_sve_svsubr_n_s8_x(__VA_ARGS__)
+#define svsubr_n_s32_x(...) __builtin_sve_svsubr_n_s32_x(__VA_ARGS__)
+#define svsubr_n_s64_x(...) __builtin_sve_svsubr_n_s64_x(__VA_ARGS__)
+#define svsubr_n_s16_x(...) __builtin_sve_svsubr_n_s16_x(__VA_ARGS__)
+#define svsubr_n_u8_z(...) __builtin_sve_svsubr_n_u8_z(__VA_ARGS__)
+#define svsubr_n_u32_z(...) __builtin_sve_svsubr_n_u32_z(__VA_ARGS__)
+#define svsubr_n_u64_z(...) __builtin_sve_svsubr_n_u64_z(__VA_ARGS__)
+#define svsubr_n_u16_z(...) __builtin_sve_svsubr_n_u16_z(__VA_ARGS__)
+#define svsubr_n_s8_z(...) __builtin_sve_svsubr_n_s8_z(__VA_ARGS__)
+#define svsubr_n_s32_z(...) __builtin_sve_svsubr_n_s32_z(__VA_ARGS__)
+#define svsubr_n_s64_z(...) __builtin_sve_svsubr_n_s64_z(__VA_ARGS__)
+#define svsubr_n_s16_z(...) __builtin_sve_svsubr_n_s16_z(__VA_ARGS__)
+#define svsubr_f64_m(...) __builtin_sve_svsubr_f64_m(__VA_ARGS__)
+#define svsubr_f32_m(...) __builtin_sve_svsubr_f32_m(__VA_ARGS__)
+#define svsubr_f16_m(...) __builtin_sve_svsubr_f16_m(__VA_ARGS__)
+#define svsubr_f64_x(...) __builtin_sve_svsubr_f64_x(__VA_ARGS__)
+#define svsubr_f32_x(...) __builtin_sve_svsubr_f32_x(__VA_ARGS__)
+#define svsubr_f16_x(...) __builtin_sve_svsubr_f16_x(__VA_ARGS__)
+#define svsubr_f64_z(...) __builtin_sve_svsubr_f64_z(__VA_ARGS__)
+#define svsubr_f32_z(...) __builtin_sve_svsubr_f32_z(__VA_ARGS__)
+#define svsubr_f16_z(...) __builtin_sve_svsubr_f16_z(__VA_ARGS__)
+#define svsubr_u8_m(...) __builtin_sve_svsubr_u8_m(__VA_ARGS__)
+#define svsubr_u32_m(...) __builtin_sve_svsubr_u32_m(__VA_ARGS__)
+#define svsubr_u64_m(...) __builtin_sve_svsubr_u64_m(__VA_ARGS__)
+#define svsubr_u16_m(...) __builtin_sve_svsubr_u16_m(__VA_ARGS__)
+#define svsubr_s8_m(...) __builtin_sve_svsubr_s8_m(__VA_ARGS__)
+#define svsubr_s32_m(...) __builtin_sve_svsubr_s32_m(__VA_ARGS__)
+#define svsubr_s64_m(...) __builtin_sve_svsubr_s64_m(__VA_ARGS__)
+#define svsubr_s16_m(...) __builtin_sve_svsubr_s16_m(__VA_ARGS__)
+#define svsubr_u8_x(...) __builtin_sve_svsubr_u8_x(__VA_ARGS__)
+#define svsubr_u32_x(...) __builtin_sve_svsubr_u32_x(__VA_ARGS__)
+#define svsubr_u64_x(...) __builtin_sve_svsubr_u64_x(__VA_ARGS__)
+#define svsubr_u16_x(...) __builtin_sve_svsubr_u16_x(__VA_ARGS__)
+#define svsubr_s8_x(...) __builtin_sve_svsubr_s8_x(__VA_ARGS__)
+#define svsubr_s32_x(...) __builtin_sve_svsubr_s32_x(__VA_ARGS__)
+#define svsubr_s64_x(...) __builtin_sve_svsubr_s64_x(__VA_ARGS__)
+#define svsubr_s16_x(...) __builtin_sve_svsubr_s16_x(__VA_ARGS__)
+#define svsubr_u8_z(...) __builtin_sve_svsubr_u8_z(__VA_ARGS__)
+#define svsubr_u32_z(...) __builtin_sve_svsubr_u32_z(__VA_ARGS__)
+#define svsubr_u64_z(...) __builtin_sve_svsubr_u64_z(__VA_ARGS__)
+#define svsubr_u16_z(...) __builtin_sve_svsubr_u16_z(__VA_ARGS__)
+#define svsubr_s8_z(...) __builtin_sve_svsubr_s8_z(__VA_ARGS__)
+#define svsubr_s32_z(...) __builtin_sve_svsubr_s32_z(__VA_ARGS__)
+#define svsubr_s64_z(...) __builtin_sve_svsubr_s64_z(__VA_ARGS__)
+#define svsubr_s16_z(...) __builtin_sve_svsubr_s16_z(__VA_ARGS__)
+#define svtbl_u8(...) __builtin_sve_svtbl_u8(__VA_ARGS__)
+#define svtbl_u32(...) __builtin_sve_svtbl_u32(__VA_ARGS__)
+#define svtbl_u64(...) __builtin_sve_svtbl_u64(__VA_ARGS__)
+#define svtbl_u16(...) __builtin_sve_svtbl_u16(__VA_ARGS__)
+#define svtbl_s8(...) __builtin_sve_svtbl_s8(__VA_ARGS__)
+#define svtbl_f64(...) __builtin_sve_svtbl_f64(__VA_ARGS__)
+#define svtbl_f32(...) __builtin_sve_svtbl_f32(__VA_ARGS__)
+#define svtbl_f16(...) __builtin_sve_svtbl_f16(__VA_ARGS__)
+#define svtbl_s32(...) __builtin_sve_svtbl_s32(__VA_ARGS__)
+#define svtbl_s64(...) __builtin_sve_svtbl_s64(__VA_ARGS__)
+#define svtbl_s16(...) __builtin_sve_svtbl_s16(__VA_ARGS__)
+#define svtmad_f64(...) __builtin_sve_svtmad_f64(__VA_ARGS__)
+#define svtmad_f32(...) __builtin_sve_svtmad_f32(__VA_ARGS__)
+#define svtmad_f16(...) __builtin_sve_svtmad_f16(__VA_ARGS__)
+#define svtrn1_u8(...) __builtin_sve_svtrn1_u8(__VA_ARGS__)
+#define svtrn1_u32(...) __builtin_sve_svtrn1_u32(__VA_ARGS__)
+#define svtrn1_u64(...) __builtin_sve_svtrn1_u64(__VA_ARGS__)
+#define svtrn1_u16(...) __builtin_sve_svtrn1_u16(__VA_ARGS__)
+#define svtrn1_s8(...) __builtin_sve_svtrn1_s8(__VA_ARGS__)
+#define svtrn1_f64(...) __builtin_sve_svtrn1_f64(__VA_ARGS__)
+#define svtrn1_f32(...) __builtin_sve_svtrn1_f32(__VA_ARGS__)
+#define svtrn1_f16(...) __builtin_sve_svtrn1_f16(__VA_ARGS__)
+#define svtrn1_s32(...) __builtin_sve_svtrn1_s32(__VA_ARGS__)
+#define svtrn1_s64(...) __builtin_sve_svtrn1_s64(__VA_ARGS__)
+#define svtrn1_s16(...) __builtin_sve_svtrn1_s16(__VA_ARGS__)
+#define svtrn1_b8(...) __builtin_sve_svtrn1_b8(__VA_ARGS__)
+#define svtrn1_b32(...) __builtin_sve_svtrn1_b32(__VA_ARGS__)
+#define svtrn1_b64(...) __builtin_sve_svtrn1_b64(__VA_ARGS__)
+#define svtrn1_b16(...) __builtin_sve_svtrn1_b16(__VA_ARGS__)
+#define svtrn2_u8(...) __builtin_sve_svtrn2_u8(__VA_ARGS__)
+#define svtrn2_u32(...) __builtin_sve_svtrn2_u32(__VA_ARGS__)
+#define svtrn2_u64(...) __builtin_sve_svtrn2_u64(__VA_ARGS__)
+#define svtrn2_u16(...) __builtin_sve_svtrn2_u16(__VA_ARGS__)
+#define svtrn2_s8(...) __builtin_sve_svtrn2_s8(__VA_ARGS__)
+#define svtrn2_f64(...) __builtin_sve_svtrn2_f64(__VA_ARGS__)
+#define svtrn2_f32(...) __builtin_sve_svtrn2_f32(__VA_ARGS__)
+#define svtrn2_f16(...) __builtin_sve_svtrn2_f16(__VA_ARGS__)
+#define svtrn2_s32(...) __builtin_sve_svtrn2_s32(__VA_ARGS__)
+#define svtrn2_s64(...) __builtin_sve_svtrn2_s64(__VA_ARGS__)
+#define svtrn2_s16(...) __builtin_sve_svtrn2_s16(__VA_ARGS__)
+#define svtrn2_b8(...) __builtin_sve_svtrn2_b8(__VA_ARGS__)
+#define svtrn2_b32(...) __builtin_sve_svtrn2_b32(__VA_ARGS__)
+#define svtrn2_b64(...) __builtin_sve_svtrn2_b64(__VA_ARGS__)
+#define svtrn2_b16(...) __builtin_sve_svtrn2_b16(__VA_ARGS__)
+#define svtsmul_f64(...) __builtin_sve_svtsmul_f64(__VA_ARGS__)
+#define svtsmul_f32(...) __builtin_sve_svtsmul_f32(__VA_ARGS__)
+#define svtsmul_f16(...) __builtin_sve_svtsmul_f16(__VA_ARGS__)
+#define svtssel_f64(...) __builtin_sve_svtssel_f64(__VA_ARGS__)
+#define svtssel_f32(...) __builtin_sve_svtssel_f32(__VA_ARGS__)
+#define svtssel_f16(...) __builtin_sve_svtssel_f16(__VA_ARGS__)
+#define svundef2_u8(...) __builtin_sve_svundef2_u8(__VA_ARGS__)
+#define svundef2_u32(...) __builtin_sve_svundef2_u32(__VA_ARGS__)
+#define svundef2_u64(...) __builtin_sve_svundef2_u64(__VA_ARGS__)
+#define svundef2_u16(...) __builtin_sve_svundef2_u16(__VA_ARGS__)
+#define svundef2_s8(...) __builtin_sve_svundef2_s8(__VA_ARGS__)
+#define svundef2_f64(...) __builtin_sve_svundef2_f64(__VA_ARGS__)
+#define svundef2_f32(...) __builtin_sve_svundef2_f32(__VA_ARGS__)
+#define svundef2_f16(...) __builtin_sve_svundef2_f16(__VA_ARGS__)
+#define svundef2_s32(...) __builtin_sve_svundef2_s32(__VA_ARGS__)
+#define svundef2_s64(...) __builtin_sve_svundef2_s64(__VA_ARGS__)
+#define svundef2_s16(...) __builtin_sve_svundef2_s16(__VA_ARGS__)
+#define svundef3_u8(...) __builtin_sve_svundef3_u8(__VA_ARGS__)
+#define svundef3_u32(...) __builtin_sve_svundef3_u32(__VA_ARGS__)
+#define svundef3_u64(...) __builtin_sve_svundef3_u64(__VA_ARGS__)
+#define svundef3_u16(...) __builtin_sve_svundef3_u16(__VA_ARGS__)
+#define svundef3_s8(...) __builtin_sve_svundef3_s8(__VA_ARGS__)
+#define svundef3_f64(...) __builtin_sve_svundef3_f64(__VA_ARGS__)
+#define svundef3_f32(...) __builtin_sve_svundef3_f32(__VA_ARGS__)
+#define svundef3_f16(...) __builtin_sve_svundef3_f16(__VA_ARGS__)
+#define svundef3_s32(...) __builtin_sve_svundef3_s32(__VA_ARGS__)
+#define svundef3_s64(...) __builtin_sve_svundef3_s64(__VA_ARGS__)
+#define svundef3_s16(...) __builtin_sve_svundef3_s16(__VA_ARGS__)
+#define svundef4_u8(...) __builtin_sve_svundef4_u8(__VA_ARGS__)
+#define svundef4_u32(...) __builtin_sve_svundef4_u32(__VA_ARGS__)
+#define svundef4_u64(...) __builtin_sve_svundef4_u64(__VA_ARGS__)
+#define svundef4_u16(...) __builtin_sve_svundef4_u16(__VA_ARGS__)
+#define svundef4_s8(...) __builtin_sve_svundef4_s8(__VA_ARGS__)
+#define svundef4_f64(...) __builtin_sve_svundef4_f64(__VA_ARGS__)
+#define svundef4_f32(...) __builtin_sve_svundef4_f32(__VA_ARGS__)
+#define svundef4_f16(...) __builtin_sve_svundef4_f16(__VA_ARGS__)
+#define svundef4_s32(...) __builtin_sve_svundef4_s32(__VA_ARGS__)
+#define svundef4_s64(...) __builtin_sve_svundef4_s64(__VA_ARGS__)
+#define svundef4_s16(...) __builtin_sve_svundef4_s16(__VA_ARGS__)
+#define svundef_u8(...) __builtin_sve_svundef_u8(__VA_ARGS__)
+#define svundef_u32(...) __builtin_sve_svundef_u32(__VA_ARGS__)
+#define svundef_u64(...) __builtin_sve_svundef_u64(__VA_ARGS__)
+#define svundef_u16(...) __builtin_sve_svundef_u16(__VA_ARGS__)
+#define svundef_s8(...) __builtin_sve_svundef_s8(__VA_ARGS__)
+#define svundef_f64(...) __builtin_sve_svundef_f64(__VA_ARGS__)
+#define svundef_f32(...) __builtin_sve_svundef_f32(__VA_ARGS__)
+#define svundef_f16(...) __builtin_sve_svundef_f16(__VA_ARGS__)
+#define svundef_s32(...) __builtin_sve_svundef_s32(__VA_ARGS__)
+#define svundef_s64(...) __builtin_sve_svundef_s64(__VA_ARGS__)
+#define svundef_s16(...) __builtin_sve_svundef_s16(__VA_ARGS__)
+#define svunpkhi_b(...) __builtin_sve_svunpkhi_b(__VA_ARGS__)
+#define svunpkhi_s32(...) __builtin_sve_svunpkhi_s32(__VA_ARGS__)
+#define svunpkhi_s64(...) __builtin_sve_svunpkhi_s64(__VA_ARGS__)
+#define svunpkhi_s16(...) __builtin_sve_svunpkhi_s16(__VA_ARGS__)
+#define svunpkhi_u32(...) __builtin_sve_svunpkhi_u32(__VA_ARGS__)
+#define svunpkhi_u64(...) __builtin_sve_svunpkhi_u64(__VA_ARGS__)
+#define svunpkhi_u16(...) __builtin_sve_svunpkhi_u16(__VA_ARGS__)
+#define svunpklo_b(...) __builtin_sve_svunpklo_b(__VA_ARGS__)
+#define svunpklo_s32(...) __builtin_sve_svunpklo_s32(__VA_ARGS__)
+#define svunpklo_s64(...) __builtin_sve_svunpklo_s64(__VA_ARGS__)
+#define svunpklo_s16(...) __builtin_sve_svunpklo_s16(__VA_ARGS__)
+#define svunpklo_u32(...) __builtin_sve_svunpklo_u32(__VA_ARGS__)
+#define svunpklo_u64(...) __builtin_sve_svunpklo_u64(__VA_ARGS__)
+#define svunpklo_u16(...) __builtin_sve_svunpklo_u16(__VA_ARGS__)
+#define svuzp1_u8(...) __builtin_sve_svuzp1_u8(__VA_ARGS__)
+#define svuzp1_u32(...) __builtin_sve_svuzp1_u32(__VA_ARGS__)
+#define svuzp1_u64(...) __builtin_sve_svuzp1_u64(__VA_ARGS__)
+#define svuzp1_u16(...) __builtin_sve_svuzp1_u16(__VA_ARGS__)
+#define svuzp1_s8(...) __builtin_sve_svuzp1_s8(__VA_ARGS__)
+#define svuzp1_f64(...) __builtin_sve_svuzp1_f64(__VA_ARGS__)
+#define svuzp1_f32(...) __builtin_sve_svuzp1_f32(__VA_ARGS__)
+#define svuzp1_f16(...) __builtin_sve_svuzp1_f16(__VA_ARGS__)
+#define svuzp1_s32(...) __builtin_sve_svuzp1_s32(__VA_ARGS__)
+#define svuzp1_s64(...) __builtin_sve_svuzp1_s64(__VA_ARGS__)
+#define svuzp1_s16(...) __builtin_sve_svuzp1_s16(__VA_ARGS__)
+#define svuzp1_b8(...) __builtin_sve_svuzp1_b8(__VA_ARGS__)
+#define svuzp1_b32(...) __builtin_sve_svuzp1_b32(__VA_ARGS__)
+#define svuzp1_b64(...) __builtin_sve_svuzp1_b64(__VA_ARGS__)
+#define svuzp1_b16(...) __builtin_sve_svuzp1_b16(__VA_ARGS__)
+#define svuzp2_u8(...) __builtin_sve_svuzp2_u8(__VA_ARGS__)
+#define svuzp2_u32(...) __builtin_sve_svuzp2_u32(__VA_ARGS__)
+#define svuzp2_u64(...) __builtin_sve_svuzp2_u64(__VA_ARGS__)
+#define svuzp2_u16(...) __builtin_sve_svuzp2_u16(__VA_ARGS__)
+#define svuzp2_s8(...) __builtin_sve_svuzp2_s8(__VA_ARGS__)
+#define svuzp2_f64(...) __builtin_sve_svuzp2_f64(__VA_ARGS__)
+#define svuzp2_f32(...) __builtin_sve_svuzp2_f32(__VA_ARGS__)
+#define svuzp2_f16(...) __builtin_sve_svuzp2_f16(__VA_ARGS__)
+#define svuzp2_s32(...) __builtin_sve_svuzp2_s32(__VA_ARGS__)
+#define svuzp2_s64(...) __builtin_sve_svuzp2_s64(__VA_ARGS__)
+#define svuzp2_s16(...) __builtin_sve_svuzp2_s16(__VA_ARGS__)
+#define svuzp2_b8(...) __builtin_sve_svuzp2_b8(__VA_ARGS__)
+#define svuzp2_b32(...) __builtin_sve_svuzp2_b32(__VA_ARGS__)
+#define svuzp2_b64(...) __builtin_sve_svuzp2_b64(__VA_ARGS__)
+#define svuzp2_b16(...) __builtin_sve_svuzp2_b16(__VA_ARGS__)
+#define svwhilele_b8_s32(...) __builtin_sve_svwhilele_b8_s32(__VA_ARGS__)
+#define svwhilele_b32_s32(...) __builtin_sve_svwhilele_b32_s32(__VA_ARGS__)
+#define svwhilele_b64_s32(...) __builtin_sve_svwhilele_b64_s32(__VA_ARGS__)
+#define svwhilele_b16_s32(...) __builtin_sve_svwhilele_b16_s32(__VA_ARGS__)
+#define svwhilele_b8_s64(...) __builtin_sve_svwhilele_b8_s64(__VA_ARGS__)
+#define svwhilele_b32_s64(...) __builtin_sve_svwhilele_b32_s64(__VA_ARGS__)
+#define svwhilele_b64_s64(...) __builtin_sve_svwhilele_b64_s64(__VA_ARGS__)
+#define svwhilele_b16_s64(...) __builtin_sve_svwhilele_b16_s64(__VA_ARGS__)
+#define svwhilele_b8_u32(...) __builtin_sve_svwhilele_b8_u32(__VA_ARGS__)
+#define svwhilele_b32_u32(...) __builtin_sve_svwhilele_b32_u32(__VA_ARGS__)
+#define svwhilele_b64_u32(...) __builtin_sve_svwhilele_b64_u32(__VA_ARGS__)
+#define svwhilele_b16_u32(...) __builtin_sve_svwhilele_b16_u32(__VA_ARGS__)
+#define svwhilele_b8_u64(...) __builtin_sve_svwhilele_b8_u64(__VA_ARGS__)
+#define svwhilele_b32_u64(...) __builtin_sve_svwhilele_b32_u64(__VA_ARGS__)
+#define svwhilele_b64_u64(...) __builtin_sve_svwhilele_b64_u64(__VA_ARGS__)
+#define svwhilele_b16_u64(...) __builtin_sve_svwhilele_b16_u64(__VA_ARGS__)
+#define svwhilelt_b8_u32(...) __builtin_sve_svwhilelt_b8_u32(__VA_ARGS__)
+#define svwhilelt_b32_u32(...) __builtin_sve_svwhilelt_b32_u32(__VA_ARGS__)
+#define svwhilelt_b64_u32(...) __builtin_sve_svwhilelt_b64_u32(__VA_ARGS__)
+#define svwhilelt_b16_u32(...) __builtin_sve_svwhilelt_b16_u32(__VA_ARGS__)
+#define svwhilelt_b8_u64(...) __builtin_sve_svwhilelt_b8_u64(__VA_ARGS__)
+#define svwhilelt_b32_u64(...) __builtin_sve_svwhilelt_b32_u64(__VA_ARGS__)
+#define svwhilelt_b64_u64(...) __builtin_sve_svwhilelt_b64_u64(__VA_ARGS__)
+#define svwhilelt_b16_u64(...) __builtin_sve_svwhilelt_b16_u64(__VA_ARGS__)
+#define svwhilelt_b8_s32(...) __builtin_sve_svwhilelt_b8_s32(__VA_ARGS__)
+#define svwhilelt_b32_s32(...) __builtin_sve_svwhilelt_b32_s32(__VA_ARGS__)
+#define svwhilelt_b64_s32(...) __builtin_sve_svwhilelt_b64_s32(__VA_ARGS__)
+#define svwhilelt_b16_s32(...) __builtin_sve_svwhilelt_b16_s32(__VA_ARGS__)
+#define svwhilelt_b8_s64(...) __builtin_sve_svwhilelt_b8_s64(__VA_ARGS__)
+#define svwhilelt_b32_s64(...) __builtin_sve_svwhilelt_b32_s64(__VA_ARGS__)
+#define svwhilelt_b64_s64(...) __builtin_sve_svwhilelt_b64_s64(__VA_ARGS__)
+#define svwhilelt_b16_s64(...) __builtin_sve_svwhilelt_b16_s64(__VA_ARGS__)
+#define svwrffr(...) __builtin_sve_svwrffr(__VA_ARGS__)
+#define svzip1_u8(...) __builtin_sve_svzip1_u8(__VA_ARGS__)
+#define svzip1_u32(...) __builtin_sve_svzip1_u32(__VA_ARGS__)
+#define svzip1_u64(...) __builtin_sve_svzip1_u64(__VA_ARGS__)
+#define svzip1_u16(...) __builtin_sve_svzip1_u16(__VA_ARGS__)
+#define svzip1_s8(...) __builtin_sve_svzip1_s8(__VA_ARGS__)
+#define svzip1_f64(...) __builtin_sve_svzip1_f64(__VA_ARGS__)
+#define svzip1_f32(...) __builtin_sve_svzip1_f32(__VA_ARGS__)
+#define svzip1_f16(...) __builtin_sve_svzip1_f16(__VA_ARGS__)
+#define svzip1_s32(...) __builtin_sve_svzip1_s32(__VA_ARGS__)
+#define svzip1_s64(...) __builtin_sve_svzip1_s64(__VA_ARGS__)
+#define svzip1_s16(...) __builtin_sve_svzip1_s16(__VA_ARGS__)
+#define svzip1_b8(...) __builtin_sve_svzip1_b8(__VA_ARGS__)
+#define svzip1_b32(...) __builtin_sve_svzip1_b32(__VA_ARGS__)
+#define svzip1_b64(...) __builtin_sve_svzip1_b64(__VA_ARGS__)
+#define svzip1_b16(...) __builtin_sve_svzip1_b16(__VA_ARGS__)
+#define svzip2_u8(...) __builtin_sve_svzip2_u8(__VA_ARGS__)
+#define svzip2_u32(...) __builtin_sve_svzip2_u32(__VA_ARGS__)
+#define svzip2_u64(...) __builtin_sve_svzip2_u64(__VA_ARGS__)
+#define svzip2_u16(...) __builtin_sve_svzip2_u16(__VA_ARGS__)
+#define svzip2_s8(...) __builtin_sve_svzip2_s8(__VA_ARGS__)
+#define svzip2_f64(...) __builtin_sve_svzip2_f64(__VA_ARGS__)
+#define svzip2_f32(...) __builtin_sve_svzip2_f32(__VA_ARGS__)
+#define svzip2_f16(...) __builtin_sve_svzip2_f16(__VA_ARGS__)
+#define svzip2_s32(...) __builtin_sve_svzip2_s32(__VA_ARGS__)
+#define svzip2_s64(...) __builtin_sve_svzip2_s64(__VA_ARGS__)
+#define svzip2_s16(...) __builtin_sve_svzip2_s16(__VA_ARGS__)
+#define svzip2_b8(...) __builtin_sve_svzip2_b8(__VA_ARGS__)
+#define svzip2_b32(...) __builtin_sve_svzip2_b32(__VA_ARGS__)
+#define svzip2_b64(...) __builtin_sve_svzip2_b64(__VA_ARGS__)
+#define svzip2_b16(...) __builtin_sve_svzip2_b16(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_m)))
+svfloat64_t svabd_m(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_m)))
+svfloat32_t svabd_m(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_m)))
+svfloat16_t svabd_m(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_x)))
+svfloat64_t svabd_x(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_x)))
+svfloat32_t svabd_x(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_x)))
+svfloat16_t svabd_x(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_z)))
+svfloat64_t svabd_z(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_z)))
+svfloat32_t svabd_z(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_z)))
+svfloat16_t svabd_z(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_m)))
+svint8_t svabd_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_m)))
+svint32_t svabd_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_m)))
+svint64_t svabd_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_m)))
+svint16_t svabd_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_x)))
+svint8_t svabd_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_x)))
+svint32_t svabd_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_x)))
+svint64_t svabd_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_x)))
+svint16_t svabd_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_z)))
+svint8_t svabd_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_z)))
+svint32_t svabd_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_z)))
+svint64_t svabd_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_z)))
+svint16_t svabd_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_m)))
+svuint8_t svabd_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_m)))
+svuint32_t svabd_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_m)))
+svuint64_t svabd_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_m)))
+svuint16_t svabd_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_x)))
+svuint8_t svabd_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_x)))
+svuint32_t svabd_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_x)))
+svuint64_t svabd_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_x)))
+svuint16_t svabd_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_z)))
+svuint8_t svabd_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_z)))
+svuint32_t svabd_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_z)))
+svuint64_t svabd_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_z)))
+svuint16_t svabd_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_m)))
+svfloat64_t svabd_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_m)))
+svfloat32_t svabd_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_m)))
+svfloat16_t svabd_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_x)))
+svfloat64_t svabd_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_x)))
+svfloat32_t svabd_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_x)))
+svfloat16_t svabd_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_z)))
+svfloat64_t svabd_z(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_z)))
+svfloat32_t svabd_z(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_z)))
+svfloat16_t svabd_z(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_m)))
+svint8_t svabd_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_m)))
+svint32_t svabd_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_m)))
+svint64_t svabd_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_m)))
+svint16_t svabd_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_x)))
+svint8_t svabd_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_x)))
+svint32_t svabd_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_x)))
+svint64_t svabd_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_x)))
+svint16_t svabd_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_z)))
+svint8_t svabd_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_z)))
+svint32_t svabd_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_z)))
+svint64_t svabd_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_z)))
+svint16_t svabd_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_m)))
+svuint8_t svabd_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_m)))
+svuint32_t svabd_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_m)))
+svuint64_t svabd_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_m)))
+svuint16_t svabd_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_x)))
+svuint8_t svabd_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_x)))
+svuint32_t svabd_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_x)))
+svuint64_t svabd_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_x)))
+svuint16_t svabd_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_z)))
+svuint8_t svabd_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_z)))
+svuint32_t svabd_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_z)))
+svuint64_t svabd_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_z)))
+svuint16_t svabd_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_m)))
+svfloat64_t svabs_m(svfloat64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_m)))
+svfloat32_t svabs_m(svfloat32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_m)))
+svfloat16_t svabs_m(svfloat16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_x)))
+svfloat64_t svabs_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_x)))
+svfloat32_t svabs_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_x)))
+svfloat16_t svabs_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_z)))
+svfloat64_t svabs_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_z)))
+svfloat32_t svabs_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_z)))
+svfloat16_t svabs_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_m)))
+svint8_t svabs_m(svint8_t, svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_m)))
+svint32_t svabs_m(svint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_m)))
+svint64_t svabs_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_m)))
+svint16_t svabs_m(svint16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_x)))
+svint8_t svabs_x(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_x)))
+svint32_t svabs_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_x)))
+svint64_t svabs_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_x)))
+svint16_t svabs_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_z)))
+svint8_t svabs_z(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_z)))
+svint32_t svabs_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_z)))
+svint64_t svabs_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_z)))
+svint16_t svabs_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f64)))
+svbool_t svacge(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f32)))
+svbool_t svacge(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f16)))
+svbool_t svacge(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f64)))
+svbool_t svacge(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f32)))
+svbool_t svacge(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f16)))
+svbool_t svacge(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f64)))
+svbool_t svacgt(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f32)))
+svbool_t svacgt(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f16)))
+svbool_t svacgt(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f64)))
+svbool_t svacgt(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f32)))
+svbool_t svacgt(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f16)))
+svbool_t svacgt(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f64)))
+svbool_t svacle(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f32)))
+svbool_t svacle(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f16)))
+svbool_t svacle(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f64)))
+svbool_t svacle(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f32)))
+svbool_t svacle(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f16)))
+svbool_t svacle(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f64)))
+svbool_t svaclt(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f32)))
+svbool_t svaclt(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f16)))
+svbool_t svaclt(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f64)))
+svbool_t svaclt(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f32)))
+svbool_t svaclt(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f16)))
+svbool_t svaclt(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_m)))
+svfloat64_t svadd_m(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_m)))
+svfloat32_t svadd_m(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_m)))
+svfloat16_t svadd_m(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_x)))
+svfloat64_t svadd_x(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_x)))
+svfloat32_t svadd_x(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_x)))
+svfloat16_t svadd_x(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_z)))
+svfloat64_t svadd_z(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_z)))
+svfloat32_t svadd_z(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_z)))
+svfloat16_t svadd_z(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_m)))
+svuint8_t svadd_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_m)))
+svuint32_t svadd_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_m)))
+svuint64_t svadd_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_m)))
+svuint16_t svadd_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_m)))
+svint8_t svadd_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_m)))
+svint32_t svadd_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_m)))
+svint64_t svadd_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_m)))
+svint16_t svadd_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_x)))
+svuint8_t svadd_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_x)))
+svuint32_t svadd_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_x)))
+svuint64_t svadd_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_x)))
+svuint16_t svadd_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_x)))
+svint8_t svadd_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_x)))
+svint32_t svadd_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_x)))
+svint64_t svadd_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_x)))
+svint16_t svadd_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_z)))
+svuint8_t svadd_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_z)))
+svuint32_t svadd_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_z)))
+svuint64_t svadd_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_z)))
+svuint16_t svadd_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_z)))
+svint8_t svadd_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_z)))
+svint32_t svadd_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_z)))
+svint64_t svadd_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_z)))
+svint16_t svadd_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_m)))
+svfloat64_t svadd_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_m)))
+svfloat32_t svadd_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_m)))
+svfloat16_t svadd_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_x)))
+svfloat64_t svadd_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_x)))
+svfloat32_t svadd_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_x)))
+svfloat16_t svadd_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_z)))
+svfloat64_t svadd_z(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_z)))
+svfloat32_t svadd_z(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_z)))
+svfloat16_t svadd_z(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_m)))
+svuint8_t svadd_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_m)))
+svuint32_t svadd_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_m)))
+svuint64_t svadd_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_m)))
+svuint16_t svadd_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_m)))
+svint8_t svadd_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_m)))
+svint32_t svadd_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_m)))
+svint64_t svadd_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_m)))
+svint16_t svadd_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_x)))
+svuint8_t svadd_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_x)))
+svuint32_t svadd_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_x)))
+svuint64_t svadd_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_x)))
+svuint16_t svadd_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_x)))
+svint8_t svadd_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_x)))
+svint32_t svadd_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_x)))
+svint64_t svadd_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_x)))
+svint16_t svadd_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_z)))
+svuint8_t svadd_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_z)))
+svuint32_t svadd_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_z)))
+svuint64_t svadd_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_z)))
+svuint16_t svadd_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_z)))
+svint8_t svadd_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_z)))
+svint32_t svadd_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_z)))
+svint64_t svadd_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_z)))
+svint16_t svadd_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f64)))
+float64_t svadda(svbool_t, float64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f32)))
+float32_t svadda(svbool_t, float32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f16)))
+float16_t svadda(svbool_t, float16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s8)))
+int64_t svaddv(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s32)))
+int64_t svaddv(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s64)))
+int64_t svaddv(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s16)))
+int64_t svaddv(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u8)))
+uint64_t svaddv(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u32)))
+uint64_t svaddv(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u64)))
+uint64_t svaddv(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u16)))
+uint64_t svaddv(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f64)))
+float64_t svaddv(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f32)))
+float32_t svaddv(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f16)))
+float16_t svaddv(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_u32offset)))
+svuint32_t svadrb_offset(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_u64offset)))
+svuint64_t svadrb_offset(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_s32offset)))
+svuint32_t svadrb_offset(svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_s64offset)))
+svuint64_t svadrb_offset(svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_u32index)))
+svuint32_t svadrd_index(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_u64index)))
+svuint64_t svadrd_index(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_s32index)))
+svuint32_t svadrd_index(svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_s64index)))
+svuint64_t svadrd_index(svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_u32index)))
+svuint32_t svadrh_index(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_u64index)))
+svuint64_t svadrh_index(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_s32index)))
+svuint32_t svadrh_index(svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_s64index)))
+svuint64_t svadrh_index(svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_u32index)))
+svuint32_t svadrw_index(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_u64index)))
+svuint64_t svadrw_index(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_s32index)))
+svuint32_t svadrw_index(svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_s64index)))
+svuint64_t svadrw_index(svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_b_z)))
+svbool_t svand_z(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_m)))
+svuint8_t svand_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_m)))
+svuint32_t svand_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_m)))
+svuint64_t svand_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_m)))
+svuint16_t svand_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_m)))
+svint8_t svand_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_m)))
+svint32_t svand_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_m)))
+svint64_t svand_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_m)))
+svint16_t svand_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_x)))
+svuint8_t svand_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_x)))
+svuint32_t svand_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_x)))
+svuint64_t svand_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_x)))
+svuint16_t svand_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_x)))
+svint8_t svand_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_x)))
+svint32_t svand_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_x)))
+svint64_t svand_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_x)))
+svint16_t svand_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_z)))
+svuint8_t svand_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_z)))
+svuint32_t svand_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_z)))
+svuint64_t svand_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_z)))
+svuint16_t svand_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_z)))
+svint8_t svand_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_z)))
+svint32_t svand_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_z)))
+svint64_t svand_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_z)))
+svint16_t svand_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_m)))
+svuint8_t svand_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_m)))
+svuint32_t svand_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_m)))
+svuint64_t svand_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_m)))
+svuint16_t svand_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_m)))
+svint8_t svand_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_m)))
+svint32_t svand_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_m)))
+svint64_t svand_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_m)))
+svint16_t svand_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_x)))
+svuint8_t svand_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_x)))
+svuint32_t svand_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_x)))
+svuint64_t svand_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_x)))
+svuint16_t svand_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_x)))
+svint8_t svand_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_x)))
+svint32_t svand_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_x)))
+svint64_t svand_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_x)))
+svint16_t svand_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_z)))
+svuint8_t svand_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_z)))
+svuint32_t svand_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_z)))
+svuint64_t svand_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_z)))
+svuint16_t svand_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_z)))
+svint8_t svand_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_z)))
+svint32_t svand_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_z)))
+svint64_t svand_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_z)))
+svint16_t svand_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u8)))
+uint8_t svandv(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u32)))
+uint32_t svandv(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u64)))
+uint64_t svandv(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u16)))
+uint16_t svandv(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s8)))
+int8_t svandv(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s32)))
+int32_t svandv(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s64)))
+int64_t svandv(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s16)))
+int16_t svandv(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_m)))
+svint8_t svasr_m(svbool_t, svint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_m)))
+svint32_t svasr_m(svbool_t, svint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_m)))
+svint64_t svasr_m(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_m)))
+svint16_t svasr_m(svbool_t, svint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_x)))
+svint8_t svasr_x(svbool_t, svint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_x)))
+svint32_t svasr_x(svbool_t, svint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_x)))
+svint64_t svasr_x(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_x)))
+svint16_t svasr_x(svbool_t, svint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_z)))
+svint8_t svasr_z(svbool_t, svint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_z)))
+svint32_t svasr_z(svbool_t, svint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_z)))
+svint64_t svasr_z(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_z)))
+svint16_t svasr_z(svbool_t, svint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_m)))
+svint8_t svasr_m(svbool_t, svint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_m)))
+svint32_t svasr_m(svbool_t, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_m)))
+svint64_t svasr_m(svbool_t, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_m)))
+svint16_t svasr_m(svbool_t, svint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_x)))
+svint8_t svasr_x(svbool_t, svint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_x)))
+svint32_t svasr_x(svbool_t, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_x)))
+svint64_t svasr_x(svbool_t, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_x)))
+svint16_t svasr_x(svbool_t, svint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_z)))
+svint8_t svasr_z(svbool_t, svint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_z)))
+svint32_t svasr_z(svbool_t, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_z)))
+svint64_t svasr_z(svbool_t, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_z)))
+svint16_t svasr_z(svbool_t, svint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_m)))
+svint8_t svasr_wide_m(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_m)))
+svint32_t svasr_wide_m(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_m)))
+svint16_t svasr_wide_m(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_x)))
+svint8_t svasr_wide_x(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_x)))
+svint32_t svasr_wide_x(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_x)))
+svint16_t svasr_wide_x(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_z)))
+svint8_t svasr_wide_z(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_z)))
+svint32_t svasr_wide_z(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_z)))
+svint16_t svasr_wide_z(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_m)))
+svint8_t svasr_wide_m(svbool_t, svint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_m)))
+svint32_t svasr_wide_m(svbool_t, svint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_m)))
+svint16_t svasr_wide_m(svbool_t, svint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_x)))
+svint8_t svasr_wide_x(svbool_t, svint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_x)))
+svint32_t svasr_wide_x(svbool_t, svint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_x)))
+svint16_t svasr_wide_x(svbool_t, svint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_z)))
+svint8_t svasr_wide_z(svbool_t, svint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_z)))
+svint32_t svasr_wide_z(svbool_t, svint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_z)))
+svint16_t svasr_wide_z(svbool_t, svint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_m)))
+svint8_t svasrd_m(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_m)))
+svint32_t svasrd_m(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_m)))
+svint64_t svasrd_m(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_m)))
+svint16_t svasrd_m(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_x)))
+svint8_t svasrd_x(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_x)))
+svint32_t svasrd_x(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_x)))
+svint64_t svasrd_x(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_x)))
+svint16_t svasrd_x(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_z)))
+svint8_t svasrd_z(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_z)))
+svint32_t svasrd_z(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_z)))
+svint64_t svasrd_z(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_z)))
+svint16_t svasrd_z(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_b_z)))
+svbool_t svbic_z(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_m)))
+svuint8_t svbic_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_m)))
+svuint32_t svbic_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_m)))
+svuint64_t svbic_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_m)))
+svuint16_t svbic_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_m)))
+svint8_t svbic_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_m)))
+svint32_t svbic_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_m)))
+svint64_t svbic_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_m)))
+svint16_t svbic_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_x)))
+svuint8_t svbic_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_x)))
+svuint32_t svbic_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_x)))
+svuint64_t svbic_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_x)))
+svuint16_t svbic_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_x)))
+svint8_t svbic_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_x)))
+svint32_t svbic_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_x)))
+svint64_t svbic_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_x)))
+svint16_t svbic_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_z)))
+svuint8_t svbic_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_z)))
+svuint32_t svbic_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_z)))
+svuint64_t svbic_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_z)))
+svuint16_t svbic_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_z)))
+svint8_t svbic_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_z)))
+svint32_t svbic_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_z)))
+svint64_t svbic_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_z)))
+svint16_t svbic_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_m)))
+svuint8_t svbic_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_m)))
+svuint32_t svbic_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_m)))
+svuint64_t svbic_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_m)))
+svuint16_t svbic_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_m)))
+svint8_t svbic_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_m)))
+svint32_t svbic_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_m)))
+svint64_t svbic_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_m)))
+svint16_t svbic_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_x)))
+svuint8_t svbic_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_x)))
+svuint32_t svbic_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_x)))
+svuint64_t svbic_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_x)))
+svuint16_t svbic_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_x)))
+svint8_t svbic_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_x)))
+svint32_t svbic_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_x)))
+svint64_t svbic_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_x)))
+svint16_t svbic_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_z)))
+svuint8_t svbic_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_z)))
+svuint32_t svbic_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_z)))
+svuint64_t svbic_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_z)))
+svuint16_t svbic_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_z)))
+svint8_t svbic_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_z)))
+svint32_t svbic_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_z)))
+svint64_t svbic_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_z)))
+svint16_t svbic_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_m)))
+svbool_t svbrka_m(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_z)))
+svbool_t svbrka_z(svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_m)))
+svbool_t svbrkb_m(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_z)))
+svbool_t svbrkb_z(svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkn_b_z)))
+svbool_t svbrkn_z(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpa_b_z)))
+svbool_t svbrkpa_z(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpb_b_z)))
+svbool_t svbrkpb_z(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_m)))
+svfloat64_t svcadd_m(svbool_t, svfloat64_t, svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_m)))
+svfloat32_t svcadd_m(svbool_t, svfloat32_t, svfloat32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_m)))
+svfloat16_t svcadd_m(svbool_t, svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_x)))
+svfloat64_t svcadd_x(svbool_t, svfloat64_t, svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_x)))
+svfloat32_t svcadd_x(svbool_t, svfloat32_t, svfloat32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_x)))
+svfloat16_t svcadd_x(svbool_t, svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_z)))
+svfloat64_t svcadd_z(svbool_t, svfloat64_t, svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_z)))
+svfloat32_t svcadd_z(svbool_t, svfloat32_t, svfloat32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_z)))
+svfloat16_t svcadd_z(svbool_t, svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u8)))
+uint8_t svclasta(svbool_t, uint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u32)))
+uint32_t svclasta(svbool_t, uint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u64)))
+uint64_t svclasta(svbool_t, uint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u16)))
+uint16_t svclasta(svbool_t, uint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s8)))
+int8_t svclasta(svbool_t, int8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f64)))
+float64_t svclasta(svbool_t, float64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f32)))
+float32_t svclasta(svbool_t, float32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f16)))
+float16_t svclasta(svbool_t, float16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s32)))
+int32_t svclasta(svbool_t, int32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s64)))
+int64_t svclasta(svbool_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s16)))
+int16_t svclasta(svbool_t, int16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u8)))
+svuint8_t svclasta(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u32)))
+svuint32_t svclasta(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u64)))
+svuint64_t svclasta(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u16)))
+svuint16_t svclasta(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s8)))
+svint8_t svclasta(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f64)))
+svfloat64_t svclasta(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f32)))
+svfloat32_t svclasta(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f16)))
+svfloat16_t svclasta(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s32)))
+svint32_t svclasta(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s64)))
+svint64_t svclasta(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s16)))
+svint16_t svclasta(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u8)))
+uint8_t svclastb(svbool_t, uint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u32)))
+uint32_t svclastb(svbool_t, uint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u64)))
+uint64_t svclastb(svbool_t, uint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u16)))
+uint16_t svclastb(svbool_t, uint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s8)))
+int8_t svclastb(svbool_t, int8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f64)))
+float64_t svclastb(svbool_t, float64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f32)))
+float32_t svclastb(svbool_t, float32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f16)))
+float16_t svclastb(svbool_t, float16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s32)))
+int32_t svclastb(svbool_t, int32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s64)))
+int64_t svclastb(svbool_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s16)))
+int16_t svclastb(svbool_t, int16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u8)))
+svuint8_t svclastb(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u32)))
+svuint32_t svclastb(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u64)))
+svuint64_t svclastb(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u16)))
+svuint16_t svclastb(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s8)))
+svint8_t svclastb(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f64)))
+svfloat64_t svclastb(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f32)))
+svfloat32_t svclastb(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f16)))
+svfloat16_t svclastb(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s32)))
+svint32_t svclastb(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s64)))
+svint64_t svclastb(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s16)))
+svint16_t svclastb(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_m)))
+svuint8_t svcls_m(svuint8_t, svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_m)))
+svuint32_t svcls_m(svuint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_m)))
+svuint64_t svcls_m(svuint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_m)))
+svuint16_t svcls_m(svuint16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_x)))
+svuint8_t svcls_x(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_x)))
+svuint32_t svcls_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_x)))
+svuint64_t svcls_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_x)))
+svuint16_t svcls_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_z)))
+svuint8_t svcls_z(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_z)))
+svuint32_t svcls_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_z)))
+svuint64_t svcls_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_z)))
+svuint16_t svcls_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_m)))
+svuint8_t svclz_m(svuint8_t, svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_m)))
+svuint32_t svclz_m(svuint32_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_m)))
+svuint64_t svclz_m(svuint64_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_m)))
+svuint16_t svclz_m(svuint16_t, svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_m)))
+svuint8_t svclz_m(svuint8_t, svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_m)))
+svuint32_t svclz_m(svuint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_m)))
+svuint64_t svclz_m(svuint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_m)))
+svuint16_t svclz_m(svuint16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_x)))
+svuint8_t svclz_x(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_x)))
+svuint32_t svclz_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_x)))
+svuint64_t svclz_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_x)))
+svuint16_t svclz_x(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_x)))
+svuint8_t svclz_x(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_x)))
+svuint32_t svclz_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_x)))
+svuint64_t svclz_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_x)))
+svuint16_t svclz_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_z)))
+svuint8_t svclz_z(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_z)))
+svuint32_t svclz_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_z)))
+svuint64_t svclz_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_z)))
+svuint16_t svclz_z(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_z)))
+svuint8_t svclz_z(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_z)))
+svuint32_t svclz_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_z)))
+svuint64_t svclz_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_z)))
+svuint16_t svclz_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_m)))
+svfloat64_t svcmla_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_m)))
+svfloat32_t svcmla_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_m)))
+svfloat16_t svcmla_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_x)))
+svfloat64_t svcmla_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_x)))
+svfloat32_t svcmla_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_x)))
+svfloat16_t svcmla_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_z)))
+svfloat64_t svcmla_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_z)))
+svfloat32_t svcmla_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_z)))
+svfloat16_t svcmla_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f32)))
+svfloat32_t svcmla_lane(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f16)))
+svfloat16_t svcmla_lane(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f64)))
+svbool_t svcmpeq(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f32)))
+svbool_t svcmpeq(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f16)))
+svbool_t svcmpeq(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u8)))
+svbool_t svcmpeq(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u32)))
+svbool_t svcmpeq(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u64)))
+svbool_t svcmpeq(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u16)))
+svbool_t svcmpeq(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s8)))
+svbool_t svcmpeq(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s32)))
+svbool_t svcmpeq(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s64)))
+svbool_t svcmpeq(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s16)))
+svbool_t svcmpeq(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u8)))
+svbool_t svcmpeq(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u32)))
+svbool_t svcmpeq(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u64)))
+svbool_t svcmpeq(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u16)))
+svbool_t svcmpeq(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s8)))
+svbool_t svcmpeq(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s32)))
+svbool_t svcmpeq(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s64)))
+svbool_t svcmpeq(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s16)))
+svbool_t svcmpeq(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f64)))
+svbool_t svcmpeq(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f32)))
+svbool_t svcmpeq(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f16)))
+svbool_t svcmpeq(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s8)))
+svbool_t svcmpeq_wide(svbool_t, svint8_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s32)))
+svbool_t svcmpeq_wide(svbool_t, svint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s16)))
+svbool_t svcmpeq_wide(svbool_t, svint16_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s8)))
+svbool_t svcmpeq_wide(svbool_t, svint8_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s32)))
+svbool_t svcmpeq_wide(svbool_t, svint32_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s16)))
+svbool_t svcmpeq_wide(svbool_t, svint16_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f64)))
+svbool_t svcmpge(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f32)))
+svbool_t svcmpge(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f16)))
+svbool_t svcmpge(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s8)))
+svbool_t svcmpge(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s32)))
+svbool_t svcmpge(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s64)))
+svbool_t svcmpge(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s16)))
+svbool_t svcmpge(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u8)))
+svbool_t svcmpge(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u32)))
+svbool_t svcmpge(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u64)))
+svbool_t svcmpge(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u16)))
+svbool_t svcmpge(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s8)))
+svbool_t svcmpge(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s32)))
+svbool_t svcmpge(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s64)))
+svbool_t svcmpge(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s16)))
+svbool_t svcmpge(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f64)))
+svbool_t svcmpge(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f32)))
+svbool_t svcmpge(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f16)))
+svbool_t svcmpge(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u8)))
+svbool_t svcmpge(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u32)))
+svbool_t svcmpge(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u64)))
+svbool_t svcmpge(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u16)))
+svbool_t svcmpge(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s8)))
+svbool_t svcmpge_wide(svbool_t, svint8_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s32)))
+svbool_t svcmpge_wide(svbool_t, svint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s16)))
+svbool_t svcmpge_wide(svbool_t, svint16_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u8)))
+svbool_t svcmpge_wide(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u32)))
+svbool_t svcmpge_wide(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u16)))
+svbool_t svcmpge_wide(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s8)))
+svbool_t svcmpge_wide(svbool_t, svint8_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s32)))
+svbool_t svcmpge_wide(svbool_t, svint32_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s16)))
+svbool_t svcmpge_wide(svbool_t, svint16_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u8)))
+svbool_t svcmpge_wide(svbool_t, svuint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u32)))
+svbool_t svcmpge_wide(svbool_t, svuint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u16)))
+svbool_t svcmpge_wide(svbool_t, svuint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f64)))
+svbool_t svcmpgt(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f32)))
+svbool_t svcmpgt(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f16)))
+svbool_t svcmpgt(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s8)))
+svbool_t svcmpgt(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s32)))
+svbool_t svcmpgt(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s64)))
+svbool_t svcmpgt(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s16)))
+svbool_t svcmpgt(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u8)))
+svbool_t svcmpgt(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u32)))
+svbool_t svcmpgt(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u64)))
+svbool_t svcmpgt(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u16)))
+svbool_t svcmpgt(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s8)))
+svbool_t svcmpgt(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s32)))
+svbool_t svcmpgt(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s64)))
+svbool_t svcmpgt(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s16)))
+svbool_t svcmpgt(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f64)))
+svbool_t svcmpgt(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f32)))
+svbool_t svcmpgt(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f16)))
+svbool_t svcmpgt(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u8)))
+svbool_t svcmpgt(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u32)))
+svbool_t svcmpgt(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u64)))
+svbool_t svcmpgt(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u16)))
+svbool_t svcmpgt(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s8)))
+svbool_t svcmpgt_wide(svbool_t, svint8_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s32)))
+svbool_t svcmpgt_wide(svbool_t, svint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s16)))
+svbool_t svcmpgt_wide(svbool_t, svint16_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u8)))
+svbool_t svcmpgt_wide(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u32)))
+svbool_t svcmpgt_wide(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u16)))
+svbool_t svcmpgt_wide(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s8)))
+svbool_t svcmpgt_wide(svbool_t, svint8_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s32)))
+svbool_t svcmpgt_wide(svbool_t, svint32_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s16)))
+svbool_t svcmpgt_wide(svbool_t, svint16_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u8)))
+svbool_t svcmpgt_wide(svbool_t, svuint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u32)))
+svbool_t svcmpgt_wide(svbool_t, svuint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u16)))
+svbool_t svcmpgt_wide(svbool_t, svuint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f64)))
+svbool_t svcmple(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f32)))
+svbool_t svcmple(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f16)))
+svbool_t svcmple(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s8)))
+svbool_t svcmple(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s32)))
+svbool_t svcmple(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s64)))
+svbool_t svcmple(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s16)))
+svbool_t svcmple(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u8)))
+svbool_t svcmple(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u32)))
+svbool_t svcmple(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u64)))
+svbool_t svcmple(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u16)))
+svbool_t svcmple(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s8)))
+svbool_t svcmple(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s32)))
+svbool_t svcmple(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s64)))
+svbool_t svcmple(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s16)))
+svbool_t svcmple(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f64)))
+svbool_t svcmple(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f32)))
+svbool_t svcmple(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f16)))
+svbool_t svcmple(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u8)))
+svbool_t svcmple(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u32)))
+svbool_t svcmple(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u64)))
+svbool_t svcmple(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u16)))
+svbool_t svcmple(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s8)))
+svbool_t svcmple_wide(svbool_t, svint8_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s32)))
+svbool_t svcmple_wide(svbool_t, svint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s16)))
+svbool_t svcmple_wide(svbool_t, svint16_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u8)))
+svbool_t svcmple_wide(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u32)))
+svbool_t svcmple_wide(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u16)))
+svbool_t svcmple_wide(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s8)))
+svbool_t svcmple_wide(svbool_t, svint8_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s32)))
+svbool_t svcmple_wide(svbool_t, svint32_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s16)))
+svbool_t svcmple_wide(svbool_t, svint16_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u8)))
+svbool_t svcmple_wide(svbool_t, svuint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u32)))
+svbool_t svcmple_wide(svbool_t, svuint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u16)))
+svbool_t svcmple_wide(svbool_t, svuint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u8)))
+svbool_t svcmplt(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u32)))
+svbool_t svcmplt(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u64)))
+svbool_t svcmplt(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u16)))
+svbool_t svcmplt(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f64)))
+svbool_t svcmplt(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f32)))
+svbool_t svcmplt(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f16)))
+svbool_t svcmplt(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s8)))
+svbool_t svcmplt(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s32)))
+svbool_t svcmplt(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s64)))
+svbool_t svcmplt(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s16)))
+svbool_t svcmplt(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u8)))
+svbool_t svcmplt(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u32)))
+svbool_t svcmplt(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u64)))
+svbool_t svcmplt(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u16)))
+svbool_t svcmplt(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s8)))
+svbool_t svcmplt(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s32)))
+svbool_t svcmplt(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s64)))
+svbool_t svcmplt(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s16)))
+svbool_t svcmplt(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f64)))
+svbool_t svcmplt(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f32)))
+svbool_t svcmplt(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f16)))
+svbool_t svcmplt(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u8)))
+svbool_t svcmplt_wide(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u32)))
+svbool_t svcmplt_wide(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u16)))
+svbool_t svcmplt_wide(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s8)))
+svbool_t svcmplt_wide(svbool_t, svint8_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s32)))
+svbool_t svcmplt_wide(svbool_t, svint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s16)))
+svbool_t svcmplt_wide(svbool_t, svint16_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u8)))
+svbool_t svcmplt_wide(svbool_t, svuint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u32)))
+svbool_t svcmplt_wide(svbool_t, svuint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u16)))
+svbool_t svcmplt_wide(svbool_t, svuint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s8)))
+svbool_t svcmplt_wide(svbool_t, svint8_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s32)))
+svbool_t svcmplt_wide(svbool_t, svint32_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s16)))
+svbool_t svcmplt_wide(svbool_t, svint16_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f64)))
+svbool_t svcmpne(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f32)))
+svbool_t svcmpne(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f16)))
+svbool_t svcmpne(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u8)))
+svbool_t svcmpne(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u32)))
+svbool_t svcmpne(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u64)))
+svbool_t svcmpne(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u16)))
+svbool_t svcmpne(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s8)))
+svbool_t svcmpne(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s32)))
+svbool_t svcmpne(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s64)))
+svbool_t svcmpne(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s16)))
+svbool_t svcmpne(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u8)))
+svbool_t svcmpne(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u32)))
+svbool_t svcmpne(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u64)))
+svbool_t svcmpne(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u16)))
+svbool_t svcmpne(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s8)))
+svbool_t svcmpne(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s32)))
+svbool_t svcmpne(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s64)))
+svbool_t svcmpne(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s16)))
+svbool_t svcmpne(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f64)))
+svbool_t svcmpne(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f32)))
+svbool_t svcmpne(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f16)))
+svbool_t svcmpne(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s8)))
+svbool_t svcmpne_wide(svbool_t, svint8_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s32)))
+svbool_t svcmpne_wide(svbool_t, svint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s16)))
+svbool_t svcmpne_wide(svbool_t, svint16_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s8)))
+svbool_t svcmpne_wide(svbool_t, svint8_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s32)))
+svbool_t svcmpne_wide(svbool_t, svint32_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s16)))
+svbool_t svcmpne_wide(svbool_t, svint16_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f64)))
+svbool_t svcmpuo(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f32)))
+svbool_t svcmpuo(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f16)))
+svbool_t svcmpuo(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f64)))
+svbool_t svcmpuo(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f32)))
+svbool_t svcmpuo(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f16)))
+svbool_t svcmpuo(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_m)))
+svuint8_t svcnot_m(svuint8_t, svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_m)))
+svuint32_t svcnot_m(svuint32_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_m)))
+svuint64_t svcnot_m(svuint64_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_m)))
+svuint16_t svcnot_m(svuint16_t, svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_m)))
+svint8_t svcnot_m(svint8_t, svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_m)))
+svint32_t svcnot_m(svint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_m)))
+svint64_t svcnot_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_m)))
+svint16_t svcnot_m(svint16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_x)))
+svuint8_t svcnot_x(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_x)))
+svuint32_t svcnot_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_x)))
+svuint64_t svcnot_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_x)))
+svuint16_t svcnot_x(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_x)))
+svint8_t svcnot_x(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_x)))
+svint32_t svcnot_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_x)))
+svint64_t svcnot_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_x)))
+svint16_t svcnot_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_z)))
+svuint8_t svcnot_z(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_z)))
+svuint32_t svcnot_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_z)))
+svuint64_t svcnot_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_z)))
+svuint16_t svcnot_z(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_z)))
+svint8_t svcnot_z(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_z)))
+svint32_t svcnot_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_z)))
+svint64_t svcnot_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_z)))
+svint16_t svcnot_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_m)))
+svuint8_t svcnt_m(svuint8_t, svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_m)))
+svuint32_t svcnt_m(svuint32_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_m)))
+svuint64_t svcnt_m(svuint64_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_m)))
+svuint16_t svcnt_m(svuint16_t, svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_m)))
+svuint8_t svcnt_m(svuint8_t, svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_m)))
+svuint64_t svcnt_m(svuint64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_m)))
+svuint32_t svcnt_m(svuint32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_m)))
+svuint16_t svcnt_m(svuint16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_m)))
+svuint32_t svcnt_m(svuint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_m)))
+svuint64_t svcnt_m(svuint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_m)))
+svuint16_t svcnt_m(svuint16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_x)))
+svuint8_t svcnt_x(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_x)))
+svuint32_t svcnt_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_x)))
+svuint64_t svcnt_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_x)))
+svuint16_t svcnt_x(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_x)))
+svuint8_t svcnt_x(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_x)))
+svuint64_t svcnt_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_x)))
+svuint32_t svcnt_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_x)))
+svuint16_t svcnt_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_x)))
+svuint32_t svcnt_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_x)))
+svuint64_t svcnt_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_x)))
+svuint16_t svcnt_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_z)))
+svuint8_t svcnt_z(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_z)))
+svuint32_t svcnt_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_z)))
+svuint64_t svcnt_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_z)))
+svuint16_t svcnt_z(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_z)))
+svuint8_t svcnt_z(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_z)))
+svuint64_t svcnt_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_z)))
+svuint32_t svcnt_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_z)))
+svuint16_t svcnt_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_z)))
+svuint32_t svcnt_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_z)))
+svuint64_t svcnt_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_z)))
+svuint16_t svcnt_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u32)))
+svuint32_t svcompact(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u64)))
+svuint64_t svcompact(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f64)))
+svfloat64_t svcompact(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f32)))
+svfloat32_t svcompact(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s32)))
+svint32_t svcompact(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s64)))
+svint64_t svcompact(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u8)))
+svuint8x2_t svcreate2(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u32)))
+svuint32x2_t svcreate2(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u64)))
+svuint64x2_t svcreate2(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u16)))
+svuint16x2_t svcreate2(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s8)))
+svint8x2_t svcreate2(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f64)))
+svfloat64x2_t svcreate2(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f32)))
+svfloat32x2_t svcreate2(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f16)))
+svfloat16x2_t svcreate2(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s32)))
+svint32x2_t svcreate2(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s64)))
+svint64x2_t svcreate2(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s16)))
+svint16x2_t svcreate2(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u8)))
+svuint8x3_t svcreate3(svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u32)))
+svuint32x3_t svcreate3(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u64)))
+svuint64x3_t svcreate3(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u16)))
+svuint16x3_t svcreate3(svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s8)))
+svint8x3_t svcreate3(svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f64)))
+svfloat64x3_t svcreate3(svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f32)))
+svfloat32x3_t svcreate3(svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f16)))
+svfloat16x3_t svcreate3(svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s32)))
+svint32x3_t svcreate3(svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s64)))
+svint64x3_t svcreate3(svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s16)))
+svint16x3_t svcreate3(svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u8)))
+svuint8x4_t svcreate4(svuint8_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u32)))
+svuint32x4_t svcreate4(svuint32_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u64)))
+svuint64x4_t svcreate4(svuint64_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u16)))
+svuint16x4_t svcreate4(svuint16_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s8)))
+svint8x4_t svcreate4(svint8_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f64)))
+svfloat64x4_t svcreate4(svfloat64_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f32)))
+svfloat32x4_t svcreate4(svfloat32_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f16)))
+svfloat16x4_t svcreate4(svfloat16_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s32)))
+svint32x4_t svcreate4(svint32_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s64)))
+svint64x4_t svcreate4(svint64_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s16)))
+svint16x4_t svcreate4(svint16_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_m)))
+svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_x)))
+svfloat16_t svcvt_f16_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_z)))
+svfloat16_t svcvt_f16_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_m)))
+svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_x)))
+svfloat16_t svcvt_f16_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_z)))
+svfloat16_t svcvt_f16_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_m)))
+svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_x)))
+svfloat16_t svcvt_f16_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_z)))
+svfloat16_t svcvt_f16_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_m)))
+svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_x)))
+svfloat16_t svcvt_f16_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_z)))
+svfloat16_t svcvt_f16_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_m)))
+svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_x)))
+svfloat16_t svcvt_f16_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_z)))
+svfloat16_t svcvt_f16_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_m)))
+svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_x)))
+svfloat16_t svcvt_f16_x(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_z)))
+svfloat16_t svcvt_f16_z(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_m)))
+svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_x)))
+svfloat16_t svcvt_f16_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_z)))
+svfloat16_t svcvt_f16_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_m)))
+svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_x)))
+svfloat16_t svcvt_f16_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_z)))
+svfloat16_t svcvt_f16_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_m)))
+svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_x)))
+svfloat32_t svcvt_f32_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_z)))
+svfloat32_t svcvt_f32_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_m)))
+svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_x)))
+svfloat32_t svcvt_f32_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_z)))
+svfloat32_t svcvt_f32_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_m)))
+svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x)))
+svfloat32_t svcvt_f32_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_z)))
+svfloat32_t svcvt_f32_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_m)))
+svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_x)))
+svfloat32_t svcvt_f32_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_z)))
+svfloat32_t svcvt_f32_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_m)))
+svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x)))
+svfloat32_t svcvt_f32_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_z)))
+svfloat32_t svcvt_f32_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_m)))
+svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_x)))
+svfloat32_t svcvt_f32_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_z)))
+svfloat32_t svcvt_f32_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_m)))
+svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_x)))
+svfloat64_t svcvt_f64_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_z)))
+svfloat64_t svcvt_f64_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_m)))
+svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_x)))
+svfloat64_t svcvt_f64_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_z)))
+svfloat64_t svcvt_f64_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_m)))
+svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_x)))
+svfloat64_t svcvt_f64_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_z)))
+svfloat64_t svcvt_f64_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_m)))
+svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_x)))
+svfloat64_t svcvt_f64_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_z)))
+svfloat64_t svcvt_f64_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_m)))
+svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_x)))
+svfloat64_t svcvt_f64_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_z)))
+svfloat64_t svcvt_f64_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_m)))
+svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_x)))
+svfloat64_t svcvt_f64_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_z)))
+svfloat64_t svcvt_f64_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_m)))
+svint16_t svcvt_s16_m(svint16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_x)))
+svint16_t svcvt_s16_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_z)))
+svint16_t svcvt_s16_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_m)))
+svint32_t svcvt_s32_m(svint32_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_x)))
+svint32_t svcvt_s32_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_z)))
+svint32_t svcvt_s32_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_m)))
+svint32_t svcvt_s32_m(svint32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x)))
+svint32_t svcvt_s32_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_z)))
+svint32_t svcvt_s32_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_m)))
+svint32_t svcvt_s32_m(svint32_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_x)))
+svint32_t svcvt_s32_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_z)))
+svint32_t svcvt_s32_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_m)))
+svint64_t svcvt_s64_m(svint64_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_x)))
+svint64_t svcvt_s64_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_z)))
+svint64_t svcvt_s64_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_m)))
+svint64_t svcvt_s64_m(svint64_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_x)))
+svint64_t svcvt_s64_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_z)))
+svint64_t svcvt_s64_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_m)))
+svint64_t svcvt_s64_m(svint64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_x)))
+svint64_t svcvt_s64_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_z)))
+svint64_t svcvt_s64_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_m)))
+svuint16_t svcvt_u16_m(svuint16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_x)))
+svuint16_t svcvt_u16_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_z)))
+svuint16_t svcvt_u16_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_m)))
+svuint32_t svcvt_u32_m(svuint32_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_x)))
+svuint32_t svcvt_u32_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_z)))
+svuint32_t svcvt_u32_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_m)))
+svuint32_t svcvt_u32_m(svuint32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x)))
+svuint32_t svcvt_u32_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_z)))
+svuint32_t svcvt_u32_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_m)))
+svuint32_t svcvt_u32_m(svuint32_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_x)))
+svuint32_t svcvt_u32_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_z)))
+svuint32_t svcvt_u32_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_m)))
+svuint64_t svcvt_u64_m(svuint64_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_x)))
+svuint64_t svcvt_u64_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_z)))
+svuint64_t svcvt_u64_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_m)))
+svuint64_t svcvt_u64_m(svuint64_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_x)))
+svuint64_t svcvt_u64_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_z)))
+svuint64_t svcvt_u64_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_m)))
+svuint64_t svcvt_u64_m(svuint64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_x)))
+svuint64_t svcvt_u64_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_z)))
+svuint64_t svcvt_u64_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_m)))
+svfloat64_t svdiv_m(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_m)))
+svfloat32_t svdiv_m(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_m)))
+svfloat16_t svdiv_m(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_x)))
+svfloat64_t svdiv_x(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_x)))
+svfloat32_t svdiv_x(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_x)))
+svfloat16_t svdiv_x(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_z)))
+svfloat64_t svdiv_z(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_z)))
+svfloat32_t svdiv_z(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_z)))
+svfloat16_t svdiv_z(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_m)))
+svint32_t svdiv_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_m)))
+svint64_t svdiv_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_x)))
+svint32_t svdiv_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_x)))
+svint64_t svdiv_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_z)))
+svint32_t svdiv_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_z)))
+svint64_t svdiv_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_m)))
+svuint32_t svdiv_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_m)))
+svuint64_t svdiv_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_x)))
+svuint32_t svdiv_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_x)))
+svuint64_t svdiv_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_z)))
+svuint32_t svdiv_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_z)))
+svuint64_t svdiv_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_m)))
+svfloat64_t svdiv_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_m)))
+svfloat32_t svdiv_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_m)))
+svfloat16_t svdiv_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_x)))
+svfloat64_t svdiv_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_x)))
+svfloat32_t svdiv_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_x)))
+svfloat16_t svdiv_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_z)))
+svfloat64_t svdiv_z(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_z)))
+svfloat32_t svdiv_z(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_z)))
+svfloat16_t svdiv_z(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_m)))
+svint32_t svdiv_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_m)))
+svint64_t svdiv_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_x)))
+svint32_t svdiv_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_x)))
+svint64_t svdiv_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_z)))
+svint32_t svdiv_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_z)))
+svint64_t svdiv_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_m)))
+svuint32_t svdiv_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_m)))
+svuint64_t svdiv_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_x)))
+svuint32_t svdiv_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_x)))
+svuint64_t svdiv_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_z)))
+svuint32_t svdiv_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_z)))
+svuint64_t svdiv_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_m)))
+svfloat64_t svdivr_m(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_m)))
+svfloat32_t svdivr_m(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_m)))
+svfloat16_t svdivr_m(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_x)))
+svfloat64_t svdivr_x(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_x)))
+svfloat32_t svdivr_x(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_x)))
+svfloat16_t svdivr_x(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_z)))
+svfloat64_t svdivr_z(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_z)))
+svfloat32_t svdivr_z(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_z)))
+svfloat16_t svdivr_z(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_m)))
+svint32_t svdivr_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_m)))
+svint64_t svdivr_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_x)))
+svint32_t svdivr_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_x)))
+svint64_t svdivr_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_z)))
+svint32_t svdivr_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_z)))
+svint64_t svdivr_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_m)))
+svuint32_t svdivr_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_m)))
+svuint64_t svdivr_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_x)))
+svuint32_t svdivr_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_x)))
+svuint64_t svdivr_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_z)))
+svuint32_t svdivr_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_z)))
+svuint64_t svdivr_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_m)))
+svfloat64_t svdivr_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_m)))
+svfloat32_t svdivr_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_m)))
+svfloat16_t svdivr_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_x)))
+svfloat64_t svdivr_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_x)))
+svfloat32_t svdivr_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_x)))
+svfloat16_t svdivr_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_z)))
+svfloat64_t svdivr_z(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_z)))
+svfloat32_t svdivr_z(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_z)))
+svfloat16_t svdivr_z(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_m)))
+svint32_t svdivr_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_m)))
+svint64_t svdivr_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_x)))
+svint32_t svdivr_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_x)))
+svint64_t svdivr_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_z)))
+svint32_t svdivr_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_z)))
+svint64_t svdivr_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_m)))
+svuint32_t svdivr_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_m)))
+svuint64_t svdivr_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_x)))
+svuint32_t svdivr_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_x)))
+svuint64_t svdivr_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_z)))
+svuint32_t svdivr_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_z)))
+svuint64_t svdivr_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s32)))
+svint32_t svdot(svint32_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s64)))
+svint64_t svdot(svint64_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u32)))
+svuint32_t svdot(svuint32_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u64)))
+svuint64_t svdot(svuint64_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s32)))
+svint32_t svdot(svint32_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s64)))
+svint64_t svdot(svint64_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u32)))
+svuint32_t svdot(svuint32_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u64)))
+svuint64_t svdot(svuint64_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s32)))
+svint32_t svdot_lane(svint32_t, svint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s64)))
+svint64_t svdot_lane(svint64_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u32)))
+svuint32_t svdot_lane(svuint32_t, svuint8_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u64)))
+svuint64_t svdot_lane(svuint64_t, svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8)))
+svuint8_t svdup_u8(uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32)))
+svuint32_t svdup_u32(uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64)))
+svuint64_t svdup_u64(uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16)))
+svuint16_t svdup_u16(uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8)))
+svint8_t svdup_s8(int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64)))
+svfloat64_t svdup_f64(float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32)))
+svfloat32_t svdup_f32(float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16)))
+svfloat16_t svdup_f16(float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32)))
+svint32_t svdup_s32(int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64)))
+svint64_t svdup_s64(int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16)))
+svint16_t svdup_s16(int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_m)))
+svuint8_t svdup_u8_m(svuint8_t, svbool_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_m)))
+svuint32_t svdup_u32_m(svuint32_t, svbool_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_m)))
+svuint64_t svdup_u64_m(svuint64_t, svbool_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_m)))
+svuint16_t svdup_u16_m(svuint16_t, svbool_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_m)))
+svint8_t svdup_s8_m(svint8_t, svbool_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_m)))
+svfloat64_t svdup_f64_m(svfloat64_t, svbool_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_m)))
+svfloat32_t svdup_f32_m(svfloat32_t, svbool_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_m)))
+svfloat16_t svdup_f16_m(svfloat16_t, svbool_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_m)))
+svint32_t svdup_s32_m(svint32_t, svbool_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_m)))
+svint64_t svdup_s64_m(svint64_t, svbool_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_m)))
+svint16_t svdup_s16_m(svint16_t, svbool_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b8)))
+svbool_t svdup_b8(bool);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b32)))
+svbool_t svdup_b32(bool);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b64)))
+svbool_t svdup_b64(bool);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b16)))
+svbool_t svdup_b16(bool);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_x)))
+svuint8_t svdup_u8_x(svbool_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_x)))
+svuint32_t svdup_u32_x(svbool_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_x)))
+svuint64_t svdup_u64_x(svbool_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_x)))
+svuint16_t svdup_u16_x(svbool_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_x)))
+svint8_t svdup_s8_x(svbool_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_x)))
+svfloat64_t svdup_f64_x(svbool_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_x)))
+svfloat32_t svdup_f32_x(svbool_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_x)))
+svfloat16_t svdup_f16_x(svbool_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_x)))
+svint32_t svdup_s32_x(svbool_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_x)))
+svint64_t svdup_s64_x(svbool_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_x)))
+svint16_t svdup_s16_x(svbool_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_z)))
+svuint8_t svdup_u8_z(svbool_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_z)))
+svuint32_t svdup_u32_z(svbool_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_z)))
+svuint64_t svdup_u64_z(svbool_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_z)))
+svuint16_t svdup_u16_z(svbool_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_z)))
+svint8_t svdup_s8_z(svbool_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_z)))
+svfloat64_t svdup_f64_z(svbool_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_z)))
+svfloat32_t svdup_f32_z(svbool_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_z)))
+svfloat16_t svdup_f16_z(svbool_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_z)))
+svint32_t svdup_s32_z(svbool_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_z)))
+svint64_t svdup_s64_z(svbool_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_z)))
+svint16_t svdup_s16_z(svbool_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u8)))
+svuint8_t svdup_lane(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u32)))
+svuint32_t svdup_lane(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u64)))
+svuint64_t svdup_lane(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u16)))
+svuint16_t svdup_lane(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s8)))
+svint8_t svdup_lane(svint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f64)))
+svfloat64_t svdup_lane(svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f32)))
+svfloat32_t svdup_lane(svfloat32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f16)))
+svfloat16_t svdup_lane(svfloat16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s32)))
+svint32_t svdup_lane(svint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s64)))
+svint64_t svdup_lane(svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s16)))
+svint16_t svdup_lane(svint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u16)))
+svuint16_t svdupq_u16(uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f16)))
+svfloat16_t svdupq_f16(float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s16)))
+svint16_t svdupq_s16(int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u32)))
+svuint32_t svdupq_u32(uint32_t, uint32_t, uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f32)))
+svfloat32_t svdupq_f32(float32_t, float32_t, float32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s32)))
+svint32_t svdupq_s32(int32_t, int32_t, int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u64)))
+svuint64_t svdupq_u64(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f64)))
+svfloat64_t svdupq_f64(float64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s64)))
+svint64_t svdupq_s64(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u8)))
+svuint8_t svdupq_u8(uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s8)))
+svint8_t svdupq_s8(int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b16)))
+svbool_t svdupq_b16(bool, bool, bool, bool, bool, bool, bool, bool);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b32)))
+svbool_t svdupq_b32(bool, bool, bool, bool);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b64)))
+svbool_t svdupq_b64(bool, bool);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b8)))
+svbool_t svdupq_b8(bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u8)))
+svuint8_t svdupq_lane(svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u32)))
+svuint32_t svdupq_lane(svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u64)))
+svuint64_t svdupq_lane(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u16)))
+svuint16_t svdupq_lane(svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s8)))
+svint8_t svdupq_lane(svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f64)))
+svfloat64_t svdupq_lane(svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f32)))
+svfloat32_t svdupq_lane(svfloat32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f16)))
+svfloat16_t svdupq_lane(svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s32)))
+svint32_t svdupq_lane(svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s64)))
+svint64_t svdupq_lane(svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s16)))
+svint16_t svdupq_lane(svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_b_z)))
+svbool_t sveor_z(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_m)))
+svuint8_t sveor_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_m)))
+svuint32_t sveor_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_m)))
+svuint64_t sveor_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_m)))
+svuint16_t sveor_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_m)))
+svint8_t sveor_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_m)))
+svint32_t sveor_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_m)))
+svint64_t sveor_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_m)))
+svint16_t sveor_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_x)))
+svuint8_t sveor_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_x)))
+svuint32_t sveor_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_x)))
+svuint64_t sveor_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_x)))
+svuint16_t sveor_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_x)))
+svint8_t sveor_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_x)))
+svint32_t sveor_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_x)))
+svint64_t sveor_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_x)))
+svint16_t sveor_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_z)))
+svuint8_t sveor_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_z)))
+svuint32_t sveor_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_z)))
+svuint64_t sveor_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_z)))
+svuint16_t sveor_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_z)))
+svint8_t sveor_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_z)))
+svint32_t sveor_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_z)))
+svint64_t sveor_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_z)))
+svint16_t sveor_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_m)))
+svuint8_t sveor_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_m)))
+svuint32_t sveor_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_m)))
+svuint64_t sveor_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_m)))
+svuint16_t sveor_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_m)))
+svint8_t sveor_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_m)))
+svint32_t sveor_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_m)))
+svint64_t sveor_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_m)))
+svint16_t sveor_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_x)))
+svuint8_t sveor_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_x)))
+svuint32_t sveor_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_x)))
+svuint64_t sveor_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_x)))
+svuint16_t sveor_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_x)))
+svint8_t sveor_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_x)))
+svint32_t sveor_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_x)))
+svint64_t sveor_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_x)))
+svint16_t sveor_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_z)))
+svuint8_t sveor_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_z)))
+svuint32_t sveor_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_z)))
+svuint64_t sveor_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_z)))
+svuint16_t sveor_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_z)))
+svint8_t sveor_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_z)))
+svint32_t sveor_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_z)))
+svint64_t sveor_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_z)))
+svint16_t sveor_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u8)))
+uint8_t sveorv(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u32)))
+uint32_t sveorv(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u64)))
+uint64_t sveorv(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u16)))
+uint16_t sveorv(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s8)))
+int8_t sveorv(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s32)))
+int32_t sveorv(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s64)))
+int64_t sveorv(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s16)))
+int16_t sveorv(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f64)))
+svfloat64_t svexpa(svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f32)))
+svfloat32_t svexpa(svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f16)))
+svfloat16_t svexpa(svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u8)))
+svuint8_t svext(svuint8_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u32)))
+svuint32_t svext(svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u64)))
+svuint64_t svext(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u16)))
+svuint16_t svext(svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s8)))
+svint8_t svext(svint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f64)))
+svfloat64_t svext(svfloat64_t, svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f32)))
+svfloat32_t svext(svfloat32_t, svfloat32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f16)))
+svfloat16_t svext(svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s32)))
+svint32_t svext(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s64)))
+svint64_t svext(svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s16)))
+svint16_t svext(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_m)))
+svint32_t svextb_m(svint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_m)))
+svint64_t svextb_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_m)))
+svint16_t svextb_m(svint16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_x)))
+svint32_t svextb_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_x)))
+svint64_t svextb_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_x)))
+svint16_t svextb_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_z)))
+svint32_t svextb_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_z)))
+svint64_t svextb_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_z)))
+svint16_t svextb_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_m)))
+svuint32_t svextb_m(svuint32_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_m)))
+svuint64_t svextb_m(svuint64_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_m)))
+svuint16_t svextb_m(svuint16_t, svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_x)))
+svuint32_t svextb_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_x)))
+svuint64_t svextb_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_x)))
+svuint16_t svextb_x(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_z)))
+svuint32_t svextb_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_z)))
+svuint64_t svextb_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_z)))
+svuint16_t svextb_z(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_m)))
+svint32_t svexth_m(svint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_m)))
+svint64_t svexth_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_x)))
+svint32_t svexth_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_x)))
+svint64_t svexth_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_z)))
+svint32_t svexth_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_z)))
+svint64_t svexth_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_m)))
+svuint32_t svexth_m(svuint32_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_m)))
+svuint64_t svexth_m(svuint64_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_x)))
+svuint32_t svexth_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_x)))
+svuint64_t svexth_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_z)))
+svuint32_t svexth_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_z)))
+svuint64_t svexth_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_m)))
+svint64_t svextw_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_x)))
+svint64_t svextw_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_z)))
+svint64_t svextw_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_m)))
+svuint64_t svextw_m(svuint64_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_x)))
+svuint64_t svextw_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_z)))
+svuint64_t svextw_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u8)))
+svuint8_t svget2(svuint8x2_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u32)))
+svuint32_t svget2(svuint32x2_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u64)))
+svuint64_t svget2(svuint64x2_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u16)))
+svuint16_t svget2(svuint16x2_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s8)))
+svint8_t svget2(svint8x2_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f64)))
+svfloat64_t svget2(svfloat64x2_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f32)))
+svfloat32_t svget2(svfloat32x2_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f16)))
+svfloat16_t svget2(svfloat16x2_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s32)))
+svint32_t svget2(svint32x2_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s64)))
+svint64_t svget2(svint64x2_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s16)))
+svint16_t svget2(svint16x2_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u8)))
+svuint8_t svget3(svuint8x3_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u32)))
+svuint32_t svget3(svuint32x3_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u64)))
+svuint64_t svget3(svuint64x3_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u16)))
+svuint16_t svget3(svuint16x3_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s8)))
+svint8_t svget3(svint8x3_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f64)))
+svfloat64_t svget3(svfloat64x3_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f32)))
+svfloat32_t svget3(svfloat32x3_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f16)))
+svfloat16_t svget3(svfloat16x3_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s32)))
+svint32_t svget3(svint32x3_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s64)))
+svint64_t svget3(svint64x3_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s16)))
+svint16_t svget3(svint16x3_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u8)))
+svuint8_t svget4(svuint8x4_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u32)))
+svuint32_t svget4(svuint32x4_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u64)))
+svuint64_t svget4(svuint64x4_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u16)))
+svuint16_t svget4(svuint16x4_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s8)))
+svint8_t svget4(svint8x4_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f64)))
+svfloat64_t svget4(svfloat64x4_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f32)))
+svfloat32_t svget4(svfloat32x4_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f16)))
+svfloat16_t svget4(svfloat16x4_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s32)))
+svint32_t svget4(svint32x4_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s64)))
+svint64_t svget4(svint64x4_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s16)))
+svint16_t svget4(svint16x4_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u8)))
+svuint8_t svinsr(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u32)))
+svuint32_t svinsr(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u64)))
+svuint64_t svinsr(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u16)))
+svuint16_t svinsr(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s8)))
+svint8_t svinsr(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f64)))
+svfloat64_t svinsr(svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f32)))
+svfloat32_t svinsr(svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f16)))
+svfloat16_t svinsr(svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s32)))
+svint32_t svinsr(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s64)))
+svint64_t svinsr(svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s16)))
+svint16_t svinsr(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u8)))
+uint8_t svlasta(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u32)))
+uint32_t svlasta(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u64)))
+uint64_t svlasta(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u16)))
+uint16_t svlasta(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s8)))
+int8_t svlasta(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f64)))
+float64_t svlasta(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f32)))
+float32_t svlasta(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f16)))
+float16_t svlasta(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s32)))
+int32_t svlasta(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s64)))
+int64_t svlasta(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s16)))
+int16_t svlasta(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u8)))
+uint8_t svlastb(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u32)))
+uint32_t svlastb(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u64)))
+uint64_t svlastb(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u16)))
+uint16_t svlastb(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s8)))
+int8_t svlastb(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f64)))
+float64_t svlastb(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f32)))
+float32_t svlastb(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f16)))
+float16_t svlastb(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s32)))
+int32_t svlastb(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s64)))
+int64_t svlastb(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s16)))
+int16_t svlastb(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8)))
+svuint8_t svld1(svbool_t, uint8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32)))
+svuint32_t svld1(svbool_t, uint32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64)))
+svuint64_t svld1(svbool_t, uint64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16)))
+svuint16_t svld1(svbool_t, uint16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8)))
+svint8_t svld1(svbool_t, int8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64)))
+svfloat64_t svld1(svbool_t, float64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32)))
+svfloat32_t svld1(svbool_t, float32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16)))
+svfloat16_t svld1(svbool_t, float16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32)))
+svint32_t svld1(svbool_t, int32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64)))
+svint64_t svld1(svbool_t, int64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16)))
+svint16_t svld1(svbool_t, int16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_u32)))
+svuint32_t svld1_gather_index_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_u64)))
+svuint64_t svld1_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_f64)))
+svfloat64_t svld1_gather_index_f64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_f32)))
+svfloat32_t svld1_gather_index_f32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_s32)))
+svint32_t svld1_gather_index_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_s64)))
+svint64_t svld1_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_u32)))
+svuint32_t svld1_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_u64)))
+svuint64_t svld1_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_f64)))
+svfloat64_t svld1_gather_offset_f64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_f32)))
+svfloat32_t svld1_gather_offset_f32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_s32)))
+svint32_t svld1_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_s64)))
+svint64_t svld1_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_u32)))
+svuint32_t svld1_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_u64)))
+svuint64_t svld1_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_f64)))
+svfloat64_t svld1_gather_f64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_f32)))
+svfloat32_t svld1_gather_f32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_s32)))
+svint32_t svld1_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_s64)))
+svint64_t svld1_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_u32)))
+svuint32_t svld1_gather_index(svbool_t, uint32_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_f32)))
+svfloat32_t svld1_gather_index(svbool_t, float32_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_s32)))
+svint32_t svld1_gather_index(svbool_t, int32_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_u32)))
+svuint32_t svld1_gather_index(svbool_t, uint32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_f32)))
+svfloat32_t svld1_gather_index(svbool_t, float32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_s32)))
+svint32_t svld1_gather_index(svbool_t, int32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_u64)))
+svuint64_t svld1_gather_index(svbool_t, uint64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_f64)))
+svfloat64_t svld1_gather_index(svbool_t, float64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_s64)))
+svint64_t svld1_gather_index(svbool_t, int64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_u64)))
+svuint64_t svld1_gather_index(svbool_t, uint64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_f64)))
+svfloat64_t svld1_gather_index(svbool_t, float64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_s64)))
+svint64_t svld1_gather_index(svbool_t, int64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_u32)))
+svuint32_t svld1_gather_offset(svbool_t, uint32_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_f32)))
+svfloat32_t svld1_gather_offset(svbool_t, float32_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_s32)))
+svint32_t svld1_gather_offset(svbool_t, int32_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_u32)))
+svuint32_t svld1_gather_offset(svbool_t, uint32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_f32)))
+svfloat32_t svld1_gather_offset(svbool_t, float32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_s32)))
+svint32_t svld1_gather_offset(svbool_t, int32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_u64)))
+svuint64_t svld1_gather_offset(svbool_t, uint64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_f64)))
+svfloat64_t svld1_gather_offset(svbool_t, float64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_s64)))
+svint64_t svld1_gather_offset(svbool_t, int64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_u64)))
+svuint64_t svld1_gather_offset(svbool_t, uint64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_f64)))
+svfloat64_t svld1_gather_offset(svbool_t, float64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_s64)))
+svint64_t svld1_gather_offset(svbool_t, int64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8)))
+svuint8_t svld1_vnum(svbool_t, uint8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32)))
+svuint32_t svld1_vnum(svbool_t, uint32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64)))
+svuint64_t svld1_vnum(svbool_t, uint64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16)))
+svuint16_t svld1_vnum(svbool_t, uint16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8)))
+svint8_t svld1_vnum(svbool_t, int8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64)))
+svfloat64_t svld1_vnum(svbool_t, float64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32)))
+svfloat32_t svld1_vnum(svbool_t, float32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16)))
+svfloat16_t svld1_vnum(svbool_t, float16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32)))
+svint32_t svld1_vnum(svbool_t, int32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64)))
+svint64_t svld1_vnum(svbool_t, int64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16)))
+svint16_t svld1_vnum(svbool_t, int16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u8)))
+svuint8_t svld1rq(svbool_t, uint8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u32)))
+svuint32_t svld1rq(svbool_t, uint32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u64)))
+svuint64_t svld1rq(svbool_t, uint64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u16)))
+svuint16_t svld1rq(svbool_t, uint16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s8)))
+svint8_t svld1rq(svbool_t, int8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f64)))
+svfloat64_t svld1rq(svbool_t, float64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f32)))
+svfloat32_t svld1rq(svbool_t, float32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f16)))
+svfloat16_t svld1rq(svbool_t, float16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s32)))
+svint32_t svld1rq(svbool_t, int32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s64)))
+svint64_t svld1rq(svbool_t, int64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s16)))
+svint16_t svld1rq(svbool_t, int16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_u32)))
+svuint32_t svld1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_u64)))
+svuint64_t svld1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_s32)))
+svint32_t svld1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_s64)))
+svint64_t svld1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_u32)))
+svuint32_t svld1sb_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_u64)))
+svuint64_t svld1sb_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_s32)))
+svint32_t svld1sb_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_s64)))
+svint64_t svld1sb_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_u32)))
+svuint32_t svld1sb_gather_offset_u32(svbool_t, int8_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_s32)))
+svint32_t svld1sb_gather_offset_s32(svbool_t, int8_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_u32)))
+svuint32_t svld1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_s32)))
+svint32_t svld1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_u64)))
+svuint64_t svld1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_s64)))
+svint64_t svld1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_u64)))
+svuint64_t svld1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_s64)))
+svint64_t svld1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_u32)))
+svuint32_t svld1sh_gather_index_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_u64)))
+svuint64_t svld1sh_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_s32)))
+svint32_t svld1sh_gather_index_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_s64)))
+svint64_t svld1sh_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_u32)))
+svuint32_t svld1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_u64)))
+svuint64_t svld1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_s32)))
+svint32_t svld1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_s64)))
+svint64_t svld1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_u32)))
+svuint32_t svld1sh_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_u64)))
+svuint64_t svld1sh_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_s32)))
+svint32_t svld1sh_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_s64)))
+svint64_t svld1sh_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_u32)))
+svuint32_t svld1sh_gather_index_u32(svbool_t, int16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_s32)))
+svint32_t svld1sh_gather_index_s32(svbool_t, int16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_u32)))
+svuint32_t svld1sh_gather_index_u32(svbool_t, int16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_s32)))
+svint32_t svld1sh_gather_index_s32(svbool_t, int16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_u64)))
+svuint64_t svld1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_s64)))
+svint64_t svld1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_u64)))
+svuint64_t svld1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_s64)))
+svint64_t svld1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_u32)))
+svuint32_t svld1sh_gather_offset_u32(svbool_t, int16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_s32)))
+svint32_t svld1sh_gather_offset_s32(svbool_t, int16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_u32)))
+svuint32_t svld1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_s32)))
+svint32_t svld1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_u64)))
+svuint64_t svld1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_s64)))
+svint64_t svld1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_u64)))
+svuint64_t svld1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_s64)))
+svint64_t svld1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_u64)))
+svuint64_t svld1sw_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_s64)))
+svint64_t svld1sw_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_u64)))
+svuint64_t svld1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_s64)))
+svint64_t svld1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_u64)))
+svuint64_t svld1sw_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_s64)))
+svint64_t svld1sw_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_u64)))
+svuint64_t svld1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_s64)))
+svint64_t svld1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_u64)))
+svuint64_t svld1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_s64)))
+svint64_t svld1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_u64)))
+svuint64_t svld1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_s64)))
+svint64_t svld1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_u64)))
+svuint64_t svld1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_s64)))
+svint64_t svld1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_u32)))
+svuint32_t svld1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_u64)))
+svuint64_t svld1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_s32)))
+svint32_t svld1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_s64)))
+svint64_t svld1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_u32)))
+svuint32_t svld1ub_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_u64)))
+svuint64_t svld1ub_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_s32)))
+svint32_t svld1ub_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_s64)))
+svint64_t svld1ub_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_u32)))
+svuint32_t svld1ub_gather_offset_u32(svbool_t, uint8_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_s32)))
+svint32_t svld1ub_gather_offset_s32(svbool_t, uint8_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_u32)))
+svuint32_t svld1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_s32)))
+svint32_t svld1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_u64)))
+svuint64_t svld1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_s64)))
+svint64_t svld1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_u64)))
+svuint64_t svld1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_s64)))
+svint64_t svld1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_u32)))
+svuint32_t svld1uh_gather_index_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_u64)))
+svuint64_t svld1uh_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_s32)))
+svint32_t svld1uh_gather_index_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_s64)))
+svint64_t svld1uh_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_u32)))
+svuint32_t svld1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_u64)))
+svuint64_t svld1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_s32)))
+svint32_t svld1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_s64)))
+svint64_t svld1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_u32)))
+svuint32_t svld1uh_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_u64)))
+svuint64_t svld1uh_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_s32)))
+svint32_t svld1uh_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_s64)))
+svint64_t svld1uh_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_u32)))
+svuint32_t svld1uh_gather_index_u32(svbool_t, uint16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_s32)))
+svint32_t svld1uh_gather_index_s32(svbool_t, uint16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_u32)))
+svuint32_t svld1uh_gather_index_u32(svbool_t, uint16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_s32)))
+svint32_t svld1uh_gather_index_s32(svbool_t, uint16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_u64)))
+svuint64_t svld1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_s64)))
+svint64_t svld1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_u64)))
+svuint64_t svld1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_s64)))
+svint64_t svld1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_u32)))
+svuint32_t svld1uh_gather_offset_u32(svbool_t, uint16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_s32)))
+svint32_t svld1uh_gather_offset_s32(svbool_t, uint16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_u32)))
+svuint32_t svld1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_s32)))
+svint32_t svld1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_u64)))
+svuint64_t svld1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_s64)))
+svint64_t svld1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_u64)))
+svuint64_t svld1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_s64)))
+svint64_t svld1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_u64)))
+svuint64_t svld1uw_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_s64)))
+svint64_t svld1uw_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_u64)))
+svuint64_t svld1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_s64)))
+svint64_t svld1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_u64)))
+svuint64_t svld1uw_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_s64)))
+svint64_t svld1uw_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_u64)))
+svuint64_t svld1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_s64)))
+svint64_t svld1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_u64)))
+svuint64_t svld1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_s64)))
+svint64_t svld1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_u64)))
+svuint64_t svld1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_s64)))
+svint64_t svld1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_u64)))
+svuint64_t svld1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_s64)))
+svint64_t svld1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u8)))
+svuint8x2_t svld2(svbool_t, uint8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u32)))
+svuint32x2_t svld2(svbool_t, uint32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u64)))
+svuint64x2_t svld2(svbool_t, uint64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u16)))
+svuint16x2_t svld2(svbool_t, uint16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s8)))
+svint8x2_t svld2(svbool_t, int8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f64)))
+svfloat64x2_t svld2(svbool_t, float64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f32)))
+svfloat32x2_t svld2(svbool_t, float32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f16)))
+svfloat16x2_t svld2(svbool_t, float16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s32)))
+svint32x2_t svld2(svbool_t, int32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s64)))
+svint64x2_t svld2(svbool_t, int64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s16)))
+svint16x2_t svld2(svbool_t, int16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u8)))
+svuint8x2_t svld2_vnum(svbool_t, uint8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u32)))
+svuint32x2_t svld2_vnum(svbool_t, uint32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u64)))
+svuint64x2_t svld2_vnum(svbool_t, uint64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u16)))
+svuint16x2_t svld2_vnum(svbool_t, uint16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s8)))
+svint8x2_t svld2_vnum(svbool_t, int8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f64)))
+svfloat64x2_t svld2_vnum(svbool_t, float64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f32)))
+svfloat32x2_t svld2_vnum(svbool_t, float32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f16)))
+svfloat16x2_t svld2_vnum(svbool_t, float16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s32)))
+svint32x2_t svld2_vnum(svbool_t, int32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s64)))
+svint64x2_t svld2_vnum(svbool_t, int64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s16)))
+svint16x2_t svld2_vnum(svbool_t, int16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u8)))
+svuint8x3_t svld3(svbool_t, uint8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u32)))
+svuint32x3_t svld3(svbool_t, uint32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u64)))
+svuint64x3_t svld3(svbool_t, uint64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u16)))
+svuint16x3_t svld3(svbool_t, uint16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s8)))
+svint8x3_t svld3(svbool_t, int8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f64)))
+svfloat64x3_t svld3(svbool_t, float64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f32)))
+svfloat32x3_t svld3(svbool_t, float32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f16)))
+svfloat16x3_t svld3(svbool_t, float16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s32)))
+svint32x3_t svld3(svbool_t, int32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s64)))
+svint64x3_t svld3(svbool_t, int64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s16)))
+svint16x3_t svld3(svbool_t, int16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u8)))
+svuint8x3_t svld3_vnum(svbool_t, uint8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u32)))
+svuint32x3_t svld3_vnum(svbool_t, uint32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u64)))
+svuint64x3_t svld3_vnum(svbool_t, uint64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u16)))
+svuint16x3_t svld3_vnum(svbool_t, uint16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s8)))
+svint8x3_t svld3_vnum(svbool_t, int8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f64)))
+svfloat64x3_t svld3_vnum(svbool_t, float64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f32)))
+svfloat32x3_t svld3_vnum(svbool_t, float32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f16)))
+svfloat16x3_t svld3_vnum(svbool_t, float16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s32)))
+svint32x3_t svld3_vnum(svbool_t, int32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s64)))
+svint64x3_t svld3_vnum(svbool_t, int64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s16)))
+svint16x3_t svld3_vnum(svbool_t, int16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u8)))
+svuint8x4_t svld4(svbool_t, uint8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u32)))
+svuint32x4_t svld4(svbool_t, uint32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u64)))
+svuint64x4_t svld4(svbool_t, uint64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u16)))
+svuint16x4_t svld4(svbool_t, uint16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s8)))
+svint8x4_t svld4(svbool_t, int8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f64)))
+svfloat64x4_t svld4(svbool_t, float64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f32)))
+svfloat32x4_t svld4(svbool_t, float32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f16)))
+svfloat16x4_t svld4(svbool_t, float16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s32)))
+svint32x4_t svld4(svbool_t, int32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s64)))
+svint64x4_t svld4(svbool_t, int64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s16)))
+svint16x4_t svld4(svbool_t, int16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u8)))
+svuint8x4_t svld4_vnum(svbool_t, uint8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u32)))
+svuint32x4_t svld4_vnum(svbool_t, uint32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u64)))
+svuint64x4_t svld4_vnum(svbool_t, uint64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u16)))
+svuint16x4_t svld4_vnum(svbool_t, uint16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s8)))
+svint8x4_t svld4_vnum(svbool_t, int8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f64)))
+svfloat64x4_t svld4_vnum(svbool_t, float64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f32)))
+svfloat32x4_t svld4_vnum(svbool_t, float32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f16)))
+svfloat16x4_t svld4_vnum(svbool_t, float16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s32)))
+svint32x4_t svld4_vnum(svbool_t, int32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s64)))
+svint64x4_t svld4_vnum(svbool_t, int64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s16)))
+svint16x4_t svld4_vnum(svbool_t, int16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u8)))
+svuint8_t svldff1(svbool_t, uint8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u32)))
+svuint32_t svldff1(svbool_t, uint32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u64)))
+svuint64_t svldff1(svbool_t, uint64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u16)))
+svuint16_t svldff1(svbool_t, uint16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s8)))
+svint8_t svldff1(svbool_t, int8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f64)))
+svfloat64_t svldff1(svbool_t, float64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f32)))
+svfloat32_t svldff1(svbool_t, float32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f16)))
+svfloat16_t svldff1(svbool_t, float16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s32)))
+svint32_t svldff1(svbool_t, int32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s64)))
+svint64_t svldff1(svbool_t, int64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s16)))
+svint16_t svldff1(svbool_t, int16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_u32)))
+svuint32_t svldff1_gather_index_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_u64)))
+svuint64_t svldff1_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_f64)))
+svfloat64_t svldff1_gather_index_f64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_f32)))
+svfloat32_t svldff1_gather_index_f32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_s32)))
+svint32_t svldff1_gather_index_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_s64)))
+svint64_t svldff1_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_u32)))
+svuint32_t svldff1_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_u64)))
+svuint64_t svldff1_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_f64)))
+svfloat64_t svldff1_gather_offset_f64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_f32)))
+svfloat32_t svldff1_gather_offset_f32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_s32)))
+svint32_t svldff1_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_s64)))
+svint64_t svldff1_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_u32)))
+svuint32_t svldff1_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_u64)))
+svuint64_t svldff1_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_f64)))
+svfloat64_t svldff1_gather_f64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_f32)))
+svfloat32_t svldff1_gather_f32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_s32)))
+svint32_t svldff1_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_s64)))
+svint64_t svldff1_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_u32)))
+svuint32_t svldff1_gather_index(svbool_t, uint32_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_f32)))
+svfloat32_t svldff1_gather_index(svbool_t, float32_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_s32)))
+svint32_t svldff1_gather_index(svbool_t, int32_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_u32)))
+svuint32_t svldff1_gather_index(svbool_t, uint32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_f32)))
+svfloat32_t svldff1_gather_index(svbool_t, float32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_s32)))
+svint32_t svldff1_gather_index(svbool_t, int32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_u64)))
+svuint64_t svldff1_gather_index(svbool_t, uint64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_f64)))
+svfloat64_t svldff1_gather_index(svbool_t, float64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_s64)))
+svint64_t svldff1_gather_index(svbool_t, int64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_u64)))
+svuint64_t svldff1_gather_index(svbool_t, uint64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_f64)))
+svfloat64_t svldff1_gather_index(svbool_t, float64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_s64)))
+svint64_t svldff1_gather_index(svbool_t, int64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_u32)))
+svuint32_t svldff1_gather_offset(svbool_t, uint32_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_f32)))
+svfloat32_t svldff1_gather_offset(svbool_t, float32_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_s32)))
+svint32_t svldff1_gather_offset(svbool_t, int32_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_u32)))
+svuint32_t svldff1_gather_offset(svbool_t, uint32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_f32)))
+svfloat32_t svldff1_gather_offset(svbool_t, float32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_s32)))
+svint32_t svldff1_gather_offset(svbool_t, int32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_u64)))
+svuint64_t svldff1_gather_offset(svbool_t, uint64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_f64)))
+svfloat64_t svldff1_gather_offset(svbool_t, float64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_s64)))
+svint64_t svldff1_gather_offset(svbool_t, int64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_u64)))
+svuint64_t svldff1_gather_offset(svbool_t, uint64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_f64)))
+svfloat64_t svldff1_gather_offset(svbool_t, float64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_s64)))
+svint64_t svldff1_gather_offset(svbool_t, int64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u8)))
+svuint8_t svldff1_vnum(svbool_t, uint8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u32)))
+svuint32_t svldff1_vnum(svbool_t, uint32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u64)))
+svuint64_t svldff1_vnum(svbool_t, uint64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u16)))
+svuint16_t svldff1_vnum(svbool_t, uint16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s8)))
+svint8_t svldff1_vnum(svbool_t, int8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f64)))
+svfloat64_t svldff1_vnum(svbool_t, float64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f32)))
+svfloat32_t svldff1_vnum(svbool_t, float32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f16)))
+svfloat16_t svldff1_vnum(svbool_t, float16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s32)))
+svint32_t svldff1_vnum(svbool_t, int32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s64)))
+svint64_t svldff1_vnum(svbool_t, int64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s16)))
+svint16_t svldff1_vnum(svbool_t, int16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_u32)))
+svuint32_t svldff1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_u64)))
+svuint64_t svldff1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_s32)))
+svint32_t svldff1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_s64)))
+svint64_t svldff1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_u32)))
+svuint32_t svldff1sb_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_u64)))
+svuint64_t svldff1sb_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_s32)))
+svint32_t svldff1sb_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_s64)))
+svint64_t svldff1sb_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_u32)))
+svuint32_t svldff1sb_gather_offset_u32(svbool_t, int8_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_s32)))
+svint32_t svldff1sb_gather_offset_s32(svbool_t, int8_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_u32)))
+svuint32_t svldff1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_s32)))
+svint32_t svldff1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_u64)))
+svuint64_t svldff1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_s64)))
+svint64_t svldff1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_u64)))
+svuint64_t svldff1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_s64)))
+svint64_t svldff1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_u32)))
+svuint32_t svldff1sh_gather_index_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_u64)))
+svuint64_t svldff1sh_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_s32)))
+svint32_t svldff1sh_gather_index_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_s64)))
+svint64_t svldff1sh_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_u32)))
+svuint32_t svldff1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_u64)))
+svuint64_t svldff1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_s32)))
+svint32_t svldff1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_s64)))
+svint64_t svldff1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_u32)))
+svuint32_t svldff1sh_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_u64)))
+svuint64_t svldff1sh_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_s32)))
+svint32_t svldff1sh_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_s64)))
+svint64_t svldff1sh_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_u32)))
+svuint32_t svldff1sh_gather_index_u32(svbool_t, int16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_s32)))
+svint32_t svldff1sh_gather_index_s32(svbool_t, int16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_u32)))
+svuint32_t svldff1sh_gather_index_u32(svbool_t, int16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_s32)))
+svint32_t svldff1sh_gather_index_s32(svbool_t, int16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_u64)))
+svuint64_t svldff1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_s64)))
+svint64_t svldff1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_u64)))
+svuint64_t svldff1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_s64)))
+svint64_t svldff1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_u32)))
+svuint32_t svldff1sh_gather_offset_u32(svbool_t, int16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_s32)))
+svint32_t svldff1sh_gather_offset_s32(svbool_t, int16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_u32)))
+svuint32_t svldff1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_s32)))
+svint32_t svldff1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_u64)))
+svuint64_t svldff1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_s64)))
+svint64_t svldff1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_u64)))
+svuint64_t svldff1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_s64)))
+svint64_t svldff1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_u64)))
+svuint64_t svldff1sw_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_s64)))
+svint64_t svldff1sw_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_u64)))
+svuint64_t svldff1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_s64)))
+svint64_t svldff1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_u64)))
+svuint64_t svldff1sw_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_s64)))
+svint64_t svldff1sw_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_u64)))
+svuint64_t svldff1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_s64)))
+svint64_t svldff1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_u64)))
+svuint64_t svldff1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_s64)))
+svint64_t svldff1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_u64)))
+svuint64_t svldff1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_s64)))
+svint64_t svldff1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_u64)))
+svuint64_t svldff1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_s64)))
+svint64_t svldff1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_u32)))
+svuint32_t svldff1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_u64)))
+svuint64_t svldff1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_s32)))
+svint32_t svldff1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_s64)))
+svint64_t svldff1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_u32)))
+svuint32_t svldff1ub_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_u64)))
+svuint64_t svldff1ub_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_s32)))
+svint32_t svldff1ub_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_s64)))
+svint64_t svldff1ub_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_u32)))
+svuint32_t svldff1ub_gather_offset_u32(svbool_t, uint8_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_s32)))
+svint32_t svldff1ub_gather_offset_s32(svbool_t, uint8_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_u32)))
+svuint32_t svldff1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_s32)))
+svint32_t svldff1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_u64)))
+svuint64_t svldff1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_s64)))
+svint64_t svldff1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_u64)))
+svuint64_t svldff1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_s64)))
+svint64_t svldff1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_u32)))
+svuint32_t svldff1uh_gather_index_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_u64)))
+svuint64_t svldff1uh_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_s32)))
+svint32_t svldff1uh_gather_index_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_s64)))
+svint64_t svldff1uh_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_u32)))
+svuint32_t svldff1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_u64)))
+svuint64_t svldff1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_s32)))
+svint32_t svldff1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_s64)))
+svint64_t svldff1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_u32)))
+svuint32_t svldff1uh_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_u64)))
+svuint64_t svldff1uh_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_s32)))
+svint32_t svldff1uh_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_s64)))
+svint64_t svldff1uh_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_u32)))
+svuint32_t svldff1uh_gather_index_u32(svbool_t, uint16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_s32)))
+svint32_t svldff1uh_gather_index_s32(svbool_t, uint16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_u32)))
+svuint32_t svldff1uh_gather_index_u32(svbool_t, uint16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_s32)))
+svint32_t svldff1uh_gather_index_s32(svbool_t, uint16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_u64)))
+svuint64_t svldff1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_s64)))
+svint64_t svldff1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_u64)))
+svuint64_t svldff1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_s64)))
+svint64_t svldff1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_u32)))
+svuint32_t svldff1uh_gather_offset_u32(svbool_t, uint16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_s32)))
+svint32_t svldff1uh_gather_offset_s32(svbool_t, uint16_t const *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_u32)))
+svuint32_t svldff1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_s32)))
+svint32_t svldff1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_u64)))
+svuint64_t svldff1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_s64)))
+svint64_t svldff1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_u64)))
+svuint64_t svldff1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_s64)))
+svint64_t svldff1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_u64)))
+svuint64_t svldff1uw_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_s64)))
+svint64_t svldff1uw_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_u64)))
+svuint64_t svldff1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_s64)))
+svint64_t svldff1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_u64)))
+svuint64_t svldff1uw_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_s64)))
+svint64_t svldff1uw_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_u64)))
+svuint64_t svldff1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_s64)))
+svint64_t svldff1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_u64)))
+svuint64_t svldff1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_s64)))
+svint64_t svldff1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_u64)))
+svuint64_t svldff1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_s64)))
+svint64_t svldff1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_u64)))
+svuint64_t svldff1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_s64)))
+svint64_t svldff1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u8)))
+svuint8_t svldnf1(svbool_t, uint8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u32)))
+svuint32_t svldnf1(svbool_t, uint32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u64)))
+svuint64_t svldnf1(svbool_t, uint64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u16)))
+svuint16_t svldnf1(svbool_t, uint16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s8)))
+svint8_t svldnf1(svbool_t, int8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f64)))
+svfloat64_t svldnf1(svbool_t, float64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f32)))
+svfloat32_t svldnf1(svbool_t, float32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f16)))
+svfloat16_t svldnf1(svbool_t, float16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s32)))
+svint32_t svldnf1(svbool_t, int32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s64)))
+svint64_t svldnf1(svbool_t, int64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s16)))
+svint16_t svldnf1(svbool_t, int16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u8)))
+svuint8_t svldnf1_vnum(svbool_t, uint8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u32)))
+svuint32_t svldnf1_vnum(svbool_t, uint32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u64)))
+svuint64_t svldnf1_vnum(svbool_t, uint64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u16)))
+svuint16_t svldnf1_vnum(svbool_t, uint16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s8)))
+svint8_t svldnf1_vnum(svbool_t, int8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f64)))
+svfloat64_t svldnf1_vnum(svbool_t, float64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f32)))
+svfloat32_t svldnf1_vnum(svbool_t, float32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f16)))
+svfloat16_t svldnf1_vnum(svbool_t, float16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s32)))
+svint32_t svldnf1_vnum(svbool_t, int32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s64)))
+svint64_t svldnf1_vnum(svbool_t, int64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s16)))
+svint16_t svldnf1_vnum(svbool_t, int16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8)))
+svuint8_t svldnt1(svbool_t, uint8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32)))
+svuint32_t svldnt1(svbool_t, uint32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64)))
+svuint64_t svldnt1(svbool_t, uint64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16)))
+svuint16_t svldnt1(svbool_t, uint16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8)))
+svint8_t svldnt1(svbool_t, int8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64)))
+svfloat64_t svldnt1(svbool_t, float64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32)))
+svfloat32_t svldnt1(svbool_t, float32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16)))
+svfloat16_t svldnt1(svbool_t, float16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32)))
+svint32_t svldnt1(svbool_t, int32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64)))
+svint64_t svldnt1(svbool_t, int64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16)))
+svint16_t svldnt1(svbool_t, int16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8)))
+svuint8_t svldnt1_vnum(svbool_t, uint8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32)))
+svuint32_t svldnt1_vnum(svbool_t, uint32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64)))
+svuint64_t svldnt1_vnum(svbool_t, uint64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16)))
+svuint16_t svldnt1_vnum(svbool_t, uint16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8)))
+svint8_t svldnt1_vnum(svbool_t, int8_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64)))
+svfloat64_t svldnt1_vnum(svbool_t, float64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32)))
+svfloat32_t svldnt1_vnum(svbool_t, float32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16)))
+svfloat16_t svldnt1_vnum(svbool_t, float16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32)))
+svint32_t svldnt1_vnum(svbool_t, int32_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64)))
+svint64_t svldnt1_vnum(svbool_t, int64_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16)))
+svint16_t svldnt1_vnum(svbool_t, int16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u8)))
+uint64_t svlen(svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u32)))
+uint64_t svlen(svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u64)))
+uint64_t svlen(svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u16)))
+uint64_t svlen(svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s8)))
+uint64_t svlen(svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f64)))
+uint64_t svlen(svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f32)))
+uint64_t svlen(svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f16)))
+uint64_t svlen(svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s32)))
+uint64_t svlen(svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s64)))
+uint64_t svlen(svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s16)))
+uint64_t svlen(svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_m)))
+svuint8_t svlsl_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_m)))
+svuint32_t svlsl_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_m)))
+svuint64_t svlsl_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_m)))
+svuint16_t svlsl_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_m)))
+svint8_t svlsl_m(svbool_t, svint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_m)))
+svint32_t svlsl_m(svbool_t, svint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_m)))
+svint64_t svlsl_m(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_m)))
+svint16_t svlsl_m(svbool_t, svint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_x)))
+svuint8_t svlsl_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_x)))
+svuint32_t svlsl_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_x)))
+svuint64_t svlsl_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_x)))
+svuint16_t svlsl_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_x)))
+svint8_t svlsl_x(svbool_t, svint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_x)))
+svint32_t svlsl_x(svbool_t, svint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_x)))
+svint64_t svlsl_x(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_x)))
+svint16_t svlsl_x(svbool_t, svint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_z)))
+svuint8_t svlsl_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_z)))
+svuint32_t svlsl_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_z)))
+svuint64_t svlsl_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_z)))
+svuint16_t svlsl_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_z)))
+svint8_t svlsl_z(svbool_t, svint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_z)))
+svint32_t svlsl_z(svbool_t, svint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_z)))
+svint64_t svlsl_z(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_z)))
+svint16_t svlsl_z(svbool_t, svint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_m)))
+svuint8_t svlsl_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_m)))
+svuint32_t svlsl_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_m)))
+svuint64_t svlsl_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_m)))
+svuint16_t svlsl_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_m)))
+svint8_t svlsl_m(svbool_t, svint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_m)))
+svint32_t svlsl_m(svbool_t, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_m)))
+svint64_t svlsl_m(svbool_t, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_m)))
+svint16_t svlsl_m(svbool_t, svint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_x)))
+svuint8_t svlsl_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_x)))
+svuint32_t svlsl_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_x)))
+svuint64_t svlsl_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_x)))
+svuint16_t svlsl_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_x)))
+svint8_t svlsl_x(svbool_t, svint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_x)))
+svint32_t svlsl_x(svbool_t, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_x)))
+svint64_t svlsl_x(svbool_t, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_x)))
+svint16_t svlsl_x(svbool_t, svint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_z)))
+svuint8_t svlsl_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_z)))
+svuint32_t svlsl_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_z)))
+svuint64_t svlsl_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_z)))
+svuint16_t svlsl_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_z)))
+svint8_t svlsl_z(svbool_t, svint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_z)))
+svint32_t svlsl_z(svbool_t, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_z)))
+svint64_t svlsl_z(svbool_t, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_z)))
+svint16_t svlsl_z(svbool_t, svint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_m)))
+svuint8_t svlsl_wide_m(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_m)))
+svuint32_t svlsl_wide_m(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_m)))
+svuint16_t svlsl_wide_m(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_m)))
+svint8_t svlsl_wide_m(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_m)))
+svint32_t svlsl_wide_m(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_m)))
+svint16_t svlsl_wide_m(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_x)))
+svuint8_t svlsl_wide_x(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_x)))
+svuint32_t svlsl_wide_x(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_x)))
+svuint16_t svlsl_wide_x(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_x)))
+svint8_t svlsl_wide_x(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_x)))
+svint32_t svlsl_wide_x(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_x)))
+svint16_t svlsl_wide_x(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_z)))
+svuint8_t svlsl_wide_z(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_z)))
+svuint32_t svlsl_wide_z(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_z)))
+svuint16_t svlsl_wide_z(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_z)))
+svint8_t svlsl_wide_z(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_z)))
+svint32_t svlsl_wide_z(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_z)))
+svint16_t svlsl_wide_z(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_m)))
+svuint8_t svlsl_wide_m(svbool_t, svuint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_m)))
+svuint32_t svlsl_wide_m(svbool_t, svuint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_m)))
+svuint16_t svlsl_wide_m(svbool_t, svuint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_m)))
+svint8_t svlsl_wide_m(svbool_t, svint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_m)))
+svint32_t svlsl_wide_m(svbool_t, svint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_m)))
+svint16_t svlsl_wide_m(svbool_t, svint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_x)))
+svuint8_t svlsl_wide_x(svbool_t, svuint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_x)))
+svuint32_t svlsl_wide_x(svbool_t, svuint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_x)))
+svuint16_t svlsl_wide_x(svbool_t, svuint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_x)))
+svint8_t svlsl_wide_x(svbool_t, svint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_x)))
+svint32_t svlsl_wide_x(svbool_t, svint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_x)))
+svint16_t svlsl_wide_x(svbool_t, svint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_z)))
+svuint8_t svlsl_wide_z(svbool_t, svuint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_z)))
+svuint32_t svlsl_wide_z(svbool_t, svuint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_z)))
+svuint16_t svlsl_wide_z(svbool_t, svuint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_z)))
+svint8_t svlsl_wide_z(svbool_t, svint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_z)))
+svint32_t svlsl_wide_z(svbool_t, svint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_z)))
+svint16_t svlsl_wide_z(svbool_t, svint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_m)))
+svuint8_t svlsr_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_m)))
+svuint32_t svlsr_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_m)))
+svuint64_t svlsr_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_m)))
+svuint16_t svlsr_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_x)))
+svuint8_t svlsr_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_x)))
+svuint32_t svlsr_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_x)))
+svuint64_t svlsr_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_x)))
+svuint16_t svlsr_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_z)))
+svuint8_t svlsr_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_z)))
+svuint32_t svlsr_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_z)))
+svuint64_t svlsr_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_z)))
+svuint16_t svlsr_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_m)))
+svuint8_t svlsr_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_m)))
+svuint32_t svlsr_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_m)))
+svuint64_t svlsr_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_m)))
+svuint16_t svlsr_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_x)))
+svuint8_t svlsr_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_x)))
+svuint32_t svlsr_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_x)))
+svuint64_t svlsr_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_x)))
+svuint16_t svlsr_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_z)))
+svuint8_t svlsr_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_z)))
+svuint32_t svlsr_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_z)))
+svuint64_t svlsr_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_z)))
+svuint16_t svlsr_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_m)))
+svuint8_t svlsr_wide_m(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_m)))
+svuint32_t svlsr_wide_m(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_m)))
+svuint16_t svlsr_wide_m(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_x)))
+svuint8_t svlsr_wide_x(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_x)))
+svuint32_t svlsr_wide_x(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_x)))
+svuint16_t svlsr_wide_x(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_z)))
+svuint8_t svlsr_wide_z(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_z)))
+svuint32_t svlsr_wide_z(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_z)))
+svuint16_t svlsr_wide_z(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_m)))
+svuint8_t svlsr_wide_m(svbool_t, svuint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_m)))
+svuint32_t svlsr_wide_m(svbool_t, svuint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_m)))
+svuint16_t svlsr_wide_m(svbool_t, svuint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_x)))
+svuint8_t svlsr_wide_x(svbool_t, svuint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_x)))
+svuint32_t svlsr_wide_x(svbool_t, svuint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_x)))
+svuint16_t svlsr_wide_x(svbool_t, svuint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_z)))
+svuint8_t svlsr_wide_z(svbool_t, svuint8_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_z)))
+svuint32_t svlsr_wide_z(svbool_t, svuint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_z)))
+svuint16_t svlsr_wide_z(svbool_t, svuint16_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_m)))
+svfloat64_t svmad_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_m)))
+svfloat32_t svmad_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_m)))
+svfloat16_t svmad_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_x)))
+svfloat64_t svmad_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_x)))
+svfloat32_t svmad_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_x)))
+svfloat16_t svmad_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_z)))
+svfloat64_t svmad_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_z)))
+svfloat32_t svmad_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_z)))
+svfloat16_t svmad_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_m)))
+svuint8_t svmad_m(svbool_t, svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_m)))
+svuint32_t svmad_m(svbool_t, svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_m)))
+svuint64_t svmad_m(svbool_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_m)))
+svuint16_t svmad_m(svbool_t, svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_m)))
+svint8_t svmad_m(svbool_t, svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_m)))
+svint32_t svmad_m(svbool_t, svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_m)))
+svint64_t svmad_m(svbool_t, svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_m)))
+svint16_t svmad_m(svbool_t, svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_x)))
+svuint8_t svmad_x(svbool_t, svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_x)))
+svuint32_t svmad_x(svbool_t, svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_x)))
+svuint64_t svmad_x(svbool_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_x)))
+svuint16_t svmad_x(svbool_t, svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_x)))
+svint8_t svmad_x(svbool_t, svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_x)))
+svint32_t svmad_x(svbool_t, svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_x)))
+svint64_t svmad_x(svbool_t, svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_x)))
+svint16_t svmad_x(svbool_t, svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_z)))
+svuint8_t svmad_z(svbool_t, svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_z)))
+svuint32_t svmad_z(svbool_t, svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_z)))
+svuint64_t svmad_z(svbool_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_z)))
+svuint16_t svmad_z(svbool_t, svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_z)))
+svint8_t svmad_z(svbool_t, svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_z)))
+svint32_t svmad_z(svbool_t, svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_z)))
+svint64_t svmad_z(svbool_t, svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_z)))
+svint16_t svmad_z(svbool_t, svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_m)))
+svfloat64_t svmad_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_m)))
+svfloat32_t svmad_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_m)))
+svfloat16_t svmad_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_x)))
+svfloat64_t svmad_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_x)))
+svfloat32_t svmad_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_x)))
+svfloat16_t svmad_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_z)))
+svfloat64_t svmad_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_z)))
+svfloat32_t svmad_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_z)))
+svfloat16_t svmad_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_m)))
+svuint8_t svmad_m(svbool_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_m)))
+svuint32_t svmad_m(svbool_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_m)))
+svuint64_t svmad_m(svbool_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_m)))
+svuint16_t svmad_m(svbool_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_m)))
+svint8_t svmad_m(svbool_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_m)))
+svint32_t svmad_m(svbool_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_m)))
+svint64_t svmad_m(svbool_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_m)))
+svint16_t svmad_m(svbool_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_x)))
+svuint8_t svmad_x(svbool_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_x)))
+svuint32_t svmad_x(svbool_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_x)))
+svuint64_t svmad_x(svbool_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_x)))
+svuint16_t svmad_x(svbool_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_x)))
+svint8_t svmad_x(svbool_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_x)))
+svint32_t svmad_x(svbool_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_x)))
+svint64_t svmad_x(svbool_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_x)))
+svint16_t svmad_x(svbool_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_z)))
+svuint8_t svmad_z(svbool_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_z)))
+svuint32_t svmad_z(svbool_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_z)))
+svuint64_t svmad_z(svbool_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_z)))
+svuint16_t svmad_z(svbool_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_z)))
+svint8_t svmad_z(svbool_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_z)))
+svint32_t svmad_z(svbool_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_z)))
+svint64_t svmad_z(svbool_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_z)))
+svint16_t svmad_z(svbool_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_m)))
+svfloat64_t svmax_m(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_m)))
+svfloat32_t svmax_m(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_m)))
+svfloat16_t svmax_m(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_x)))
+svfloat64_t svmax_x(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_x)))
+svfloat32_t svmax_x(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_x)))
+svfloat16_t svmax_x(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_z)))
+svfloat64_t svmax_z(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_z)))
+svfloat32_t svmax_z(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_z)))
+svfloat16_t svmax_z(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_m)))
+svint8_t svmax_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_m)))
+svint32_t svmax_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_m)))
+svint64_t svmax_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_m)))
+svint16_t svmax_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_x)))
+svint8_t svmax_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_x)))
+svint32_t svmax_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_x)))
+svint64_t svmax_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_x)))
+svint16_t svmax_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_z)))
+svint8_t svmax_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_z)))
+svint32_t svmax_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_z)))
+svint64_t svmax_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_z)))
+svint16_t svmax_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_m)))
+svuint8_t svmax_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_m)))
+svuint32_t svmax_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_m)))
+svuint64_t svmax_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_m)))
+svuint16_t svmax_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_x)))
+svuint8_t svmax_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_x)))
+svuint32_t svmax_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_x)))
+svuint64_t svmax_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_x)))
+svuint16_t svmax_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_z)))
+svuint8_t svmax_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_z)))
+svuint32_t svmax_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_z)))
+svuint64_t svmax_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_z)))
+svuint16_t svmax_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_m)))
+svfloat64_t svmax_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_m)))
+svfloat32_t svmax_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_m)))
+svfloat16_t svmax_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x)))
+svfloat64_t svmax_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x)))
+svfloat32_t svmax_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x)))
+svfloat16_t svmax_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_z)))
+svfloat64_t svmax_z(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_z)))
+svfloat32_t svmax_z(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_z)))
+svfloat16_t svmax_z(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_m)))
+svint8_t svmax_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_m)))
+svint32_t svmax_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_m)))
+svint64_t svmax_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_m)))
+svint16_t svmax_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x)))
+svint8_t svmax_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x)))
+svint32_t svmax_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x)))
+svint64_t svmax_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x)))
+svint16_t svmax_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_z)))
+svint8_t svmax_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_z)))
+svint32_t svmax_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_z)))
+svint64_t svmax_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_z)))
+svint16_t svmax_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_m)))
+svuint8_t svmax_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_m)))
+svuint32_t svmax_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_m)))
+svuint64_t svmax_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_m)))
+svuint16_t svmax_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x)))
+svuint8_t svmax_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x)))
+svuint32_t svmax_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x)))
+svuint64_t svmax_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x)))
+svuint16_t svmax_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_z)))
+svuint8_t svmax_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_z)))
+svuint32_t svmax_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_z)))
+svuint64_t svmax_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_z)))
+svuint16_t svmax_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_m)))
+svfloat64_t svmaxnm_m(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_m)))
+svfloat32_t svmaxnm_m(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_m)))
+svfloat16_t svmaxnm_m(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_x)))
+svfloat64_t svmaxnm_x(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_x)))
+svfloat32_t svmaxnm_x(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_x)))
+svfloat16_t svmaxnm_x(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_z)))
+svfloat64_t svmaxnm_z(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_z)))
+svfloat32_t svmaxnm_z(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_z)))
+svfloat16_t svmaxnm_z(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_m)))
+svfloat64_t svmaxnm_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_m)))
+svfloat32_t svmaxnm_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_m)))
+svfloat16_t svmaxnm_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x)))
+svfloat64_t svmaxnm_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x)))
+svfloat32_t svmaxnm_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x)))
+svfloat16_t svmaxnm_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_z)))
+svfloat64_t svmaxnm_z(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_z)))
+svfloat32_t svmaxnm_z(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_z)))
+svfloat16_t svmaxnm_z(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f64)))
+float64_t svmaxnmv(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f32)))
+float32_t svmaxnmv(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f16)))
+float16_t svmaxnmv(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f64)))
+float64_t svmaxv(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f32)))
+float32_t svmaxv(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f16)))
+float16_t svmaxv(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s8)))
+int8_t svmaxv(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s32)))
+int32_t svmaxv(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s64)))
+int64_t svmaxv(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s16)))
+int16_t svmaxv(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u8)))
+uint8_t svmaxv(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u32)))
+uint32_t svmaxv(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u64)))
+uint64_t svmaxv(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u16)))
+uint16_t svmaxv(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_m)))
+svfloat64_t svmin_m(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_m)))
+svfloat32_t svmin_m(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_m)))
+svfloat16_t svmin_m(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_x)))
+svfloat64_t svmin_x(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_x)))
+svfloat32_t svmin_x(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_x)))
+svfloat16_t svmin_x(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_z)))
+svfloat64_t svmin_z(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_z)))
+svfloat32_t svmin_z(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_z)))
+svfloat16_t svmin_z(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_m)))
+svint8_t svmin_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_m)))
+svint32_t svmin_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_m)))
+svint64_t svmin_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_m)))
+svint16_t svmin_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_x)))
+svint8_t svmin_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_x)))
+svint32_t svmin_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_x)))
+svint64_t svmin_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_x)))
+svint16_t svmin_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_z)))
+svint8_t svmin_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_z)))
+svint32_t svmin_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_z)))
+svint64_t svmin_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_z)))
+svint16_t svmin_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_m)))
+svuint8_t svmin_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_m)))
+svuint32_t svmin_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_m)))
+svuint64_t svmin_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_m)))
+svuint16_t svmin_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_x)))
+svuint8_t svmin_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_x)))
+svuint32_t svmin_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_x)))
+svuint64_t svmin_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_x)))
+svuint16_t svmin_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_z)))
+svuint8_t svmin_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_z)))
+svuint32_t svmin_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_z)))
+svuint64_t svmin_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_z)))
+svuint16_t svmin_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_m)))
+svfloat64_t svmin_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_m)))
+svfloat32_t svmin_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_m)))
+svfloat16_t svmin_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x)))
+svfloat64_t svmin_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x)))
+svfloat32_t svmin_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x)))
+svfloat16_t svmin_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_z)))
+svfloat64_t svmin_z(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_z)))
+svfloat32_t svmin_z(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_z)))
+svfloat16_t svmin_z(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_m)))
+svint8_t svmin_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_m)))
+svint32_t svmin_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_m)))
+svint64_t svmin_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_m)))
+svint16_t svmin_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x)))
+svint8_t svmin_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x)))
+svint32_t svmin_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x)))
+svint64_t svmin_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x)))
+svint16_t svmin_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_z)))
+svint8_t svmin_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_z)))
+svint32_t svmin_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_z)))
+svint64_t svmin_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_z)))
+svint16_t svmin_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_m)))
+svuint8_t svmin_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_m)))
+svuint32_t svmin_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_m)))
+svuint64_t svmin_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_m)))
+svuint16_t svmin_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x)))
+svuint8_t svmin_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x)))
+svuint32_t svmin_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x)))
+svuint64_t svmin_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x)))
+svuint16_t svmin_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_z)))
+svuint8_t svmin_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_z)))
+svuint32_t svmin_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_z)))
+svuint64_t svmin_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_z)))
+svuint16_t svmin_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_m)))
+svfloat64_t svminnm_m(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_m)))
+svfloat32_t svminnm_m(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_m)))
+svfloat16_t svminnm_m(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_x)))
+svfloat64_t svminnm_x(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_x)))
+svfloat32_t svminnm_x(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_x)))
+svfloat16_t svminnm_x(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_z)))
+svfloat64_t svminnm_z(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_z)))
+svfloat32_t svminnm_z(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_z)))
+svfloat16_t svminnm_z(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_m)))
+svfloat64_t svminnm_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_m)))
+svfloat32_t svminnm_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_m)))
+svfloat16_t svminnm_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x)))
+svfloat64_t svminnm_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x)))
+svfloat32_t svminnm_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x)))
+svfloat16_t svminnm_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_z)))
+svfloat64_t svminnm_z(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_z)))
+svfloat32_t svminnm_z(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_z)))
+svfloat16_t svminnm_z(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f64)))
+float64_t svminnmv(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f32)))
+float32_t svminnmv(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f16)))
+float16_t svminnmv(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f64)))
+float64_t svminv(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f32)))
+float32_t svminv(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f16)))
+float16_t svminv(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s8)))
+int8_t svminv(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s32)))
+int32_t svminv(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s64)))
+int64_t svminv(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s16)))
+int16_t svminv(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u8)))
+uint8_t svminv(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u32)))
+uint32_t svminv(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u64)))
+uint64_t svminv(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u16)))
+uint16_t svminv(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_m)))
+svfloat64_t svmla_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_m)))
+svfloat32_t svmla_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_m)))
+svfloat16_t svmla_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_x)))
+svfloat64_t svmla_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_x)))
+svfloat32_t svmla_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_x)))
+svfloat16_t svmla_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_z)))
+svfloat64_t svmla_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_z)))
+svfloat32_t svmla_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_z)))
+svfloat16_t svmla_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_m)))
+svuint8_t svmla_m(svbool_t, svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_m)))
+svuint32_t svmla_m(svbool_t, svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_m)))
+svuint64_t svmla_m(svbool_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_m)))
+svuint16_t svmla_m(svbool_t, svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_m)))
+svint8_t svmla_m(svbool_t, svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_m)))
+svint32_t svmla_m(svbool_t, svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_m)))
+svint64_t svmla_m(svbool_t, svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_m)))
+svint16_t svmla_m(svbool_t, svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_x)))
+svuint8_t svmla_x(svbool_t, svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_x)))
+svuint32_t svmla_x(svbool_t, svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_x)))
+svuint64_t svmla_x(svbool_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_x)))
+svuint16_t svmla_x(svbool_t, svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_x)))
+svint8_t svmla_x(svbool_t, svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_x)))
+svint32_t svmla_x(svbool_t, svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_x)))
+svint64_t svmla_x(svbool_t, svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_x)))
+svint16_t svmla_x(svbool_t, svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_z)))
+svuint8_t svmla_z(svbool_t, svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_z)))
+svuint32_t svmla_z(svbool_t, svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_z)))
+svuint64_t svmla_z(svbool_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_z)))
+svuint16_t svmla_z(svbool_t, svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_z)))
+svint8_t svmla_z(svbool_t, svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_z)))
+svint32_t svmla_z(svbool_t, svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_z)))
+svint64_t svmla_z(svbool_t, svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_z)))
+svint16_t svmla_z(svbool_t, svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_m)))
+svfloat64_t svmla_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_m)))
+svfloat32_t svmla_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_m)))
+svfloat16_t svmla_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_x)))
+svfloat64_t svmla_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_x)))
+svfloat32_t svmla_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_x)))
+svfloat16_t svmla_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_z)))
+svfloat64_t svmla_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_z)))
+svfloat32_t svmla_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_z)))
+svfloat16_t svmla_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_m)))
+svuint8_t svmla_m(svbool_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_m)))
+svuint32_t svmla_m(svbool_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_m)))
+svuint64_t svmla_m(svbool_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_m)))
+svuint16_t svmla_m(svbool_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_m)))
+svint8_t svmla_m(svbool_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_m)))
+svint32_t svmla_m(svbool_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_m)))
+svint64_t svmla_m(svbool_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_m)))
+svint16_t svmla_m(svbool_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_x)))
+svuint8_t svmla_x(svbool_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_x)))
+svuint32_t svmla_x(svbool_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_x)))
+svuint64_t svmla_x(svbool_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_x)))
+svuint16_t svmla_x(svbool_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_x)))
+svint8_t svmla_x(svbool_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_x)))
+svint32_t svmla_x(svbool_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_x)))
+svint64_t svmla_x(svbool_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_x)))
+svint16_t svmla_x(svbool_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_z)))
+svuint8_t svmla_z(svbool_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_z)))
+svuint32_t svmla_z(svbool_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_z)))
+svuint64_t svmla_z(svbool_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_z)))
+svuint16_t svmla_z(svbool_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_z)))
+svint8_t svmla_z(svbool_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_z)))
+svint32_t svmla_z(svbool_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_z)))
+svint64_t svmla_z(svbool_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_z)))
+svint16_t svmla_z(svbool_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f64)))
+svfloat64_t svmla_lane(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f32)))
+svfloat32_t svmla_lane(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f16)))
+svfloat16_t svmla_lane(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_m)))
+svfloat64_t svmls_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_m)))
+svfloat32_t svmls_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_m)))
+svfloat16_t svmls_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_x)))
+svfloat64_t svmls_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_x)))
+svfloat32_t svmls_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_x)))
+svfloat16_t svmls_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_z)))
+svfloat64_t svmls_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_z)))
+svfloat32_t svmls_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_z)))
+svfloat16_t svmls_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_m)))
+svuint8_t svmls_m(svbool_t, svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_m)))
+svuint32_t svmls_m(svbool_t, svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_m)))
+svuint64_t svmls_m(svbool_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_m)))
+svuint16_t svmls_m(svbool_t, svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_m)))
+svint8_t svmls_m(svbool_t, svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_m)))
+svint32_t svmls_m(svbool_t, svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_m)))
+svint64_t svmls_m(svbool_t, svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_m)))
+svint16_t svmls_m(svbool_t, svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_x)))
+svuint8_t svmls_x(svbool_t, svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_x)))
+svuint32_t svmls_x(svbool_t, svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_x)))
+svuint64_t svmls_x(svbool_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_x)))
+svuint16_t svmls_x(svbool_t, svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_x)))
+svint8_t svmls_x(svbool_t, svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_x)))
+svint32_t svmls_x(svbool_t, svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_x)))
+svint64_t svmls_x(svbool_t, svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_x)))
+svint16_t svmls_x(svbool_t, svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_z)))
+svuint8_t svmls_z(svbool_t, svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_z)))
+svuint32_t svmls_z(svbool_t, svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_z)))
+svuint64_t svmls_z(svbool_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_z)))
+svuint16_t svmls_z(svbool_t, svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_z)))
+svint8_t svmls_z(svbool_t, svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_z)))
+svint32_t svmls_z(svbool_t, svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_z)))
+svint64_t svmls_z(svbool_t, svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_z)))
+svint16_t svmls_z(svbool_t, svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_m)))
+svfloat64_t svmls_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_m)))
+svfloat32_t svmls_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_m)))
+svfloat16_t svmls_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_x)))
+svfloat64_t svmls_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_x)))
+svfloat32_t svmls_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_x)))
+svfloat16_t svmls_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_z)))
+svfloat64_t svmls_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_z)))
+svfloat32_t svmls_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_z)))
+svfloat16_t svmls_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_m)))
+svuint8_t svmls_m(svbool_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_m)))
+svuint32_t svmls_m(svbool_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_m)))
+svuint64_t svmls_m(svbool_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_m)))
+svuint16_t svmls_m(svbool_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_m)))
+svint8_t svmls_m(svbool_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_m)))
+svint32_t svmls_m(svbool_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_m)))
+svint64_t svmls_m(svbool_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_m)))
+svint16_t svmls_m(svbool_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_x)))
+svuint8_t svmls_x(svbool_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_x)))
+svuint32_t svmls_x(svbool_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_x)))
+svuint64_t svmls_x(svbool_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_x)))
+svuint16_t svmls_x(svbool_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_x)))
+svint8_t svmls_x(svbool_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_x)))
+svint32_t svmls_x(svbool_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_x)))
+svint64_t svmls_x(svbool_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_x)))
+svint16_t svmls_x(svbool_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_z)))
+svuint8_t svmls_z(svbool_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_z)))
+svuint32_t svmls_z(svbool_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_z)))
+svuint64_t svmls_z(svbool_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_z)))
+svuint16_t svmls_z(svbool_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_z)))
+svint8_t svmls_z(svbool_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_z)))
+svint32_t svmls_z(svbool_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_z)))
+svint64_t svmls_z(svbool_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_z)))
+svint16_t svmls_z(svbool_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f64)))
+svfloat64_t svmls_lane(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f32)))
+svfloat32_t svmls_lane(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f16)))
+svfloat16_t svmls_lane(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmov_b_z)))
+svbool_t svmov_z(svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_m)))
+svfloat64_t svmsb_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_m)))
+svfloat32_t svmsb_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_m)))
+svfloat16_t svmsb_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_x)))
+svfloat64_t svmsb_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_x)))
+svfloat32_t svmsb_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_x)))
+svfloat16_t svmsb_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_z)))
+svfloat64_t svmsb_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_z)))
+svfloat32_t svmsb_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_z)))
+svfloat16_t svmsb_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_m)))
+svuint8_t svmsb_m(svbool_t, svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_m)))
+svuint32_t svmsb_m(svbool_t, svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_m)))
+svuint64_t svmsb_m(svbool_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_m)))
+svuint16_t svmsb_m(svbool_t, svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_m)))
+svint8_t svmsb_m(svbool_t, svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_m)))
+svint32_t svmsb_m(svbool_t, svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_m)))
+svint64_t svmsb_m(svbool_t, svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_m)))
+svint16_t svmsb_m(svbool_t, svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_x)))
+svuint8_t svmsb_x(svbool_t, svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_x)))
+svuint32_t svmsb_x(svbool_t, svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_x)))
+svuint64_t svmsb_x(svbool_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_x)))
+svuint16_t svmsb_x(svbool_t, svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_x)))
+svint8_t svmsb_x(svbool_t, svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_x)))
+svint32_t svmsb_x(svbool_t, svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_x)))
+svint64_t svmsb_x(svbool_t, svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_x)))
+svint16_t svmsb_x(svbool_t, svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_z)))
+svuint8_t svmsb_z(svbool_t, svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_z)))
+svuint32_t svmsb_z(svbool_t, svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_z)))
+svuint64_t svmsb_z(svbool_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_z)))
+svuint16_t svmsb_z(svbool_t, svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_z)))
+svint8_t svmsb_z(svbool_t, svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_z)))
+svint32_t svmsb_z(svbool_t, svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_z)))
+svint64_t svmsb_z(svbool_t, svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_z)))
+svint16_t svmsb_z(svbool_t, svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_m)))
+svfloat64_t svmsb_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_m)))
+svfloat32_t svmsb_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_m)))
+svfloat16_t svmsb_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_x)))
+svfloat64_t svmsb_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_x)))
+svfloat32_t svmsb_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_x)))
+svfloat16_t svmsb_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_z)))
+svfloat64_t svmsb_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_z)))
+svfloat32_t svmsb_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_z)))
+svfloat16_t svmsb_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_m)))
+svuint8_t svmsb_m(svbool_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_m)))
+svuint32_t svmsb_m(svbool_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_m)))
+svuint64_t svmsb_m(svbool_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_m)))
+svuint16_t svmsb_m(svbool_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_m)))
+svint8_t svmsb_m(svbool_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_m)))
+svint32_t svmsb_m(svbool_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_m)))
+svint64_t svmsb_m(svbool_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_m)))
+svint16_t svmsb_m(svbool_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_x)))
+svuint8_t svmsb_x(svbool_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_x)))
+svuint32_t svmsb_x(svbool_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_x)))
+svuint64_t svmsb_x(svbool_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_x)))
+svuint16_t svmsb_x(svbool_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_x)))
+svint8_t svmsb_x(svbool_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_x)))
+svint32_t svmsb_x(svbool_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_x)))
+svint64_t svmsb_x(svbool_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_x)))
+svint16_t svmsb_x(svbool_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_z)))
+svuint8_t svmsb_z(svbool_t, svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_z)))
+svuint32_t svmsb_z(svbool_t, svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_z)))
+svuint64_t svmsb_z(svbool_t, svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_z)))
+svuint16_t svmsb_z(svbool_t, svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_z)))
+svint8_t svmsb_z(svbool_t, svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_z)))
+svint32_t svmsb_z(svbool_t, svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_z)))
+svint64_t svmsb_z(svbool_t, svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_z)))
+svint16_t svmsb_z(svbool_t, svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_m)))
+svfloat64_t svmul_m(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_m)))
+svfloat32_t svmul_m(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_m)))
+svfloat16_t svmul_m(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_x)))
+svfloat64_t svmul_x(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_x)))
+svfloat32_t svmul_x(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_x)))
+svfloat16_t svmul_x(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_z)))
+svfloat64_t svmul_z(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_z)))
+svfloat32_t svmul_z(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_z)))
+svfloat16_t svmul_z(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_m)))
+svuint8_t svmul_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_m)))
+svuint32_t svmul_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_m)))
+svuint64_t svmul_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_m)))
+svuint16_t svmul_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_m)))
+svint8_t svmul_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_m)))
+svint32_t svmul_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_m)))
+svint64_t svmul_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_m)))
+svint16_t svmul_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_x)))
+svuint8_t svmul_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_x)))
+svuint32_t svmul_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_x)))
+svuint64_t svmul_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_x)))
+svuint16_t svmul_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_x)))
+svint8_t svmul_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_x)))
+svint32_t svmul_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_x)))
+svint64_t svmul_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_x)))
+svint16_t svmul_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_z)))
+svuint8_t svmul_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_z)))
+svuint32_t svmul_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_z)))
+svuint64_t svmul_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_z)))
+svuint16_t svmul_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_z)))
+svint8_t svmul_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_z)))
+svint32_t svmul_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_z)))
+svint64_t svmul_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_z)))
+svint16_t svmul_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_m)))
+svfloat64_t svmul_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_m)))
+svfloat32_t svmul_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_m)))
+svfloat16_t svmul_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_x)))
+svfloat64_t svmul_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_x)))
+svfloat32_t svmul_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_x)))
+svfloat16_t svmul_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_z)))
+svfloat64_t svmul_z(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_z)))
+svfloat32_t svmul_z(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_z)))
+svfloat16_t svmul_z(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_m)))
+svuint8_t svmul_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_m)))
+svuint32_t svmul_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_m)))
+svuint64_t svmul_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_m)))
+svuint16_t svmul_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_m)))
+svint8_t svmul_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_m)))
+svint32_t svmul_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_m)))
+svint64_t svmul_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_m)))
+svint16_t svmul_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_x)))
+svuint8_t svmul_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_x)))
+svuint32_t svmul_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_x)))
+svuint64_t svmul_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_x)))
+svuint16_t svmul_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_x)))
+svint8_t svmul_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_x)))
+svint32_t svmul_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_x)))
+svint64_t svmul_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_x)))
+svint16_t svmul_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_z)))
+svuint8_t svmul_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_z)))
+svuint32_t svmul_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_z)))
+svuint64_t svmul_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_z)))
+svuint16_t svmul_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_z)))
+svint8_t svmul_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_z)))
+svint32_t svmul_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_z)))
+svint64_t svmul_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_z)))
+svint16_t svmul_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f64)))
+svfloat64_t svmul_lane(svfloat64_t, svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f32)))
+svfloat32_t svmul_lane(svfloat32_t, svfloat32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f16)))
+svfloat16_t svmul_lane(svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_m)))
+svint8_t svmulh_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_m)))
+svint32_t svmulh_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_m)))
+svint64_t svmulh_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_m)))
+svint16_t svmulh_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_x)))
+svint8_t svmulh_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_x)))
+svint32_t svmulh_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_x)))
+svint64_t svmulh_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_x)))
+svint16_t svmulh_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_z)))
+svint8_t svmulh_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_z)))
+svint32_t svmulh_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_z)))
+svint64_t svmulh_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_z)))
+svint16_t svmulh_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_m)))
+svuint8_t svmulh_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_m)))
+svuint32_t svmulh_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_m)))
+svuint64_t svmulh_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_m)))
+svuint16_t svmulh_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_x)))
+svuint8_t svmulh_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_x)))
+svuint32_t svmulh_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_x)))
+svuint64_t svmulh_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_x)))
+svuint16_t svmulh_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_z)))
+svuint8_t svmulh_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_z)))
+svuint32_t svmulh_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_z)))
+svuint64_t svmulh_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_z)))
+svuint16_t svmulh_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_m)))
+svint8_t svmulh_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_m)))
+svint32_t svmulh_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_m)))
+svint64_t svmulh_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_m)))
+svint16_t svmulh_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_x)))
+svint8_t svmulh_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_x)))
+svint32_t svmulh_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_x)))
+svint64_t svmulh_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_x)))
+svint16_t svmulh_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_z)))
+svint8_t svmulh_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_z)))
+svint32_t svmulh_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_z)))
+svint64_t svmulh_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_z)))
+svint16_t svmulh_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_m)))
+svuint8_t svmulh_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_m)))
+svuint32_t svmulh_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_m)))
+svuint64_t svmulh_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_m)))
+svuint16_t svmulh_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_x)))
+svuint8_t svmulh_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_x)))
+svuint32_t svmulh_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_x)))
+svuint64_t svmulh_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_x)))
+svuint16_t svmulh_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_z)))
+svuint8_t svmulh_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_z)))
+svuint32_t svmulh_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_z)))
+svuint64_t svmulh_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_z)))
+svuint16_t svmulh_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_m)))
+svfloat64_t svmulx_m(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_m)))
+svfloat32_t svmulx_m(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_m)))
+svfloat16_t svmulx_m(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_x)))
+svfloat64_t svmulx_x(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_x)))
+svfloat32_t svmulx_x(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_x)))
+svfloat16_t svmulx_x(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_z)))
+svfloat64_t svmulx_z(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_z)))
+svfloat32_t svmulx_z(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_z)))
+svfloat16_t svmulx_z(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_m)))
+svfloat64_t svmulx_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_m)))
+svfloat32_t svmulx_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_m)))
+svfloat16_t svmulx_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_x)))
+svfloat64_t svmulx_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_x)))
+svfloat32_t svmulx_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_x)))
+svfloat16_t svmulx_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_z)))
+svfloat64_t svmulx_z(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_z)))
+svfloat32_t svmulx_z(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_z)))
+svfloat16_t svmulx_z(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnand_b_z)))
+svbool_t svnand_z(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_m)))
+svfloat64_t svneg_m(svfloat64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_m)))
+svfloat32_t svneg_m(svfloat32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_m)))
+svfloat16_t svneg_m(svfloat16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_x)))
+svfloat64_t svneg_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_x)))
+svfloat32_t svneg_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_x)))
+svfloat16_t svneg_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_z)))
+svfloat64_t svneg_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_z)))
+svfloat32_t svneg_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_z)))
+svfloat16_t svneg_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_m)))
+svint8_t svneg_m(svint8_t, svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_m)))
+svint32_t svneg_m(svint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_m)))
+svint64_t svneg_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_m)))
+svint16_t svneg_m(svint16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_x)))
+svint8_t svneg_x(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_x)))
+svint32_t svneg_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_x)))
+svint64_t svneg_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_x)))
+svint16_t svneg_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_z)))
+svint8_t svneg_z(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_z)))
+svint32_t svneg_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_z)))
+svint64_t svneg_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_z)))
+svint16_t svneg_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_m)))
+svfloat64_t svnmad_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_m)))
+svfloat32_t svnmad_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_m)))
+svfloat16_t svnmad_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_x)))
+svfloat64_t svnmad_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_x)))
+svfloat32_t svnmad_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_x)))
+svfloat16_t svnmad_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_z)))
+svfloat64_t svnmad_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_z)))
+svfloat32_t svnmad_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_z)))
+svfloat16_t svnmad_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_m)))
+svfloat64_t svnmad_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_m)))
+svfloat32_t svnmad_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_m)))
+svfloat16_t svnmad_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_x)))
+svfloat64_t svnmad_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_x)))
+svfloat32_t svnmad_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_x)))
+svfloat16_t svnmad_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_z)))
+svfloat64_t svnmad_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_z)))
+svfloat32_t svnmad_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_z)))
+svfloat16_t svnmad_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_m)))
+svfloat64_t svnmla_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_m)))
+svfloat32_t svnmla_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_m)))
+svfloat16_t svnmla_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_x)))
+svfloat64_t svnmla_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_x)))
+svfloat32_t svnmla_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_x)))
+svfloat16_t svnmla_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_z)))
+svfloat64_t svnmla_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_z)))
+svfloat32_t svnmla_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_z)))
+svfloat16_t svnmla_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_m)))
+svfloat64_t svnmla_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_m)))
+svfloat32_t svnmla_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_m)))
+svfloat16_t svnmla_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_x)))
+svfloat64_t svnmla_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_x)))
+svfloat32_t svnmla_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_x)))
+svfloat16_t svnmla_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_z)))
+svfloat64_t svnmla_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_z)))
+svfloat32_t svnmla_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_z)))
+svfloat16_t svnmla_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_m)))
+svfloat64_t svnmls_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_m)))
+svfloat32_t svnmls_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_m)))
+svfloat16_t svnmls_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_x)))
+svfloat64_t svnmls_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_x)))
+svfloat32_t svnmls_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_x)))
+svfloat16_t svnmls_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_z)))
+svfloat64_t svnmls_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_z)))
+svfloat32_t svnmls_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_z)))
+svfloat16_t svnmls_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_m)))
+svfloat64_t svnmls_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_m)))
+svfloat32_t svnmls_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_m)))
+svfloat16_t svnmls_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_x)))
+svfloat64_t svnmls_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_x)))
+svfloat32_t svnmls_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_x)))
+svfloat16_t svnmls_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_z)))
+svfloat64_t svnmls_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_z)))
+svfloat32_t svnmls_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_z)))
+svfloat16_t svnmls_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_m)))
+svfloat64_t svnmsb_m(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_m)))
+svfloat32_t svnmsb_m(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_m)))
+svfloat16_t svnmsb_m(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_x)))
+svfloat64_t svnmsb_x(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_x)))
+svfloat32_t svnmsb_x(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_x)))
+svfloat16_t svnmsb_x(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_z)))
+svfloat64_t svnmsb_z(svbool_t, svfloat64_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_z)))
+svfloat32_t svnmsb_z(svbool_t, svfloat32_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_z)))
+svfloat16_t svnmsb_z(svbool_t, svfloat16_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_m)))
+svfloat64_t svnmsb_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_m)))
+svfloat32_t svnmsb_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_m)))
+svfloat16_t svnmsb_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_x)))
+svfloat64_t svnmsb_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_x)))
+svfloat32_t svnmsb_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_x)))
+svfloat16_t svnmsb_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_z)))
+svfloat64_t svnmsb_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_z)))
+svfloat32_t svnmsb_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_z)))
+svfloat16_t svnmsb_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnor_b_z)))
+svbool_t svnor_z(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_b_z)))
+svbool_t svnot_z(svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_m)))
+svuint8_t svnot_m(svuint8_t, svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_m)))
+svuint32_t svnot_m(svuint32_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_m)))
+svuint64_t svnot_m(svuint64_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_m)))
+svuint16_t svnot_m(svuint16_t, svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_m)))
+svint8_t svnot_m(svint8_t, svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_m)))
+svint32_t svnot_m(svint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_m)))
+svint64_t svnot_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_m)))
+svint16_t svnot_m(svint16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_x)))
+svuint8_t svnot_x(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_x)))
+svuint32_t svnot_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_x)))
+svuint64_t svnot_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_x)))
+svuint16_t svnot_x(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_x)))
+svint8_t svnot_x(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_x)))
+svint32_t svnot_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_x)))
+svint64_t svnot_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_x)))
+svint16_t svnot_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_z)))
+svuint8_t svnot_z(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_z)))
+svuint32_t svnot_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_z)))
+svuint64_t svnot_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_z)))
+svuint16_t svnot_z(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_z)))
+svint8_t svnot_z(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_z)))
+svint32_t svnot_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_z)))
+svint64_t svnot_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_z)))
+svint16_t svnot_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorn_b_z)))
+svbool_t svorn_z(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_b_z)))
+svbool_t svorr_z(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_m)))
+svuint8_t svorr_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_m)))
+svuint32_t svorr_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_m)))
+svuint64_t svorr_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_m)))
+svuint16_t svorr_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_m)))
+svint8_t svorr_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_m)))
+svint32_t svorr_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_m)))
+svint64_t svorr_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_m)))
+svint16_t svorr_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_x)))
+svuint8_t svorr_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_x)))
+svuint32_t svorr_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_x)))
+svuint64_t svorr_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_x)))
+svuint16_t svorr_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_x)))
+svint8_t svorr_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_x)))
+svint32_t svorr_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_x)))
+svint64_t svorr_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_x)))
+svint16_t svorr_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_z)))
+svuint8_t svorr_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_z)))
+svuint32_t svorr_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_z)))
+svuint64_t svorr_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_z)))
+svuint16_t svorr_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_z)))
+svint8_t svorr_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_z)))
+svint32_t svorr_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_z)))
+svint64_t svorr_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_z)))
+svint16_t svorr_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_m)))
+svuint8_t svorr_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_m)))
+svuint32_t svorr_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_m)))
+svuint64_t svorr_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_m)))
+svuint16_t svorr_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_m)))
+svint8_t svorr_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_m)))
+svint32_t svorr_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_m)))
+svint64_t svorr_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_m)))
+svint16_t svorr_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_x)))
+svuint8_t svorr_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_x)))
+svuint32_t svorr_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_x)))
+svuint64_t svorr_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_x)))
+svuint16_t svorr_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_x)))
+svint8_t svorr_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_x)))
+svint32_t svorr_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_x)))
+svint64_t svorr_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_x)))
+svint16_t svorr_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_z)))
+svuint8_t svorr_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_z)))
+svuint32_t svorr_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_z)))
+svuint64_t svorr_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_z)))
+svuint16_t svorr_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_z)))
+svint8_t svorr_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_z)))
+svint32_t svorr_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_z)))
+svint64_t svorr_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_z)))
+svint16_t svorr_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u8)))
+uint8_t svorv(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u32)))
+uint32_t svorv(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u64)))
+uint64_t svorv(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u16)))
+uint16_t svorv(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s8)))
+int8_t svorv(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s32)))
+int32_t svorv(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s64)))
+int64_t svorv(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s16)))
+int16_t svorv(svbool_t, svint16_t);
+#define svpfalse(...) __builtin_sve_svpfalse_b(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfirst_b)))
+svbool_t svpfirst(svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base)))
+void svprfb_gather(svbool_t, svuint32_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base)))
+void svprfb_gather(svbool_t, svuint64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base_offset)))
+void svprfb_gather_offset(svbool_t, svuint32_t, int64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base_offset)))
+void svprfb_gather_offset(svbool_t, svuint64_t, int64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s32offset)))
+void svprfb_gather_offset(svbool_t, void const *, svint32_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32offset)))
+void svprfb_gather_offset(svbool_t, void const *, svuint32_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s64offset)))
+void svprfb_gather_offset(svbool_t, void const *, svint64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64offset)))
+void svprfb_gather_offset(svbool_t, void const *, svuint64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base)))
+void svprfd_gather(svbool_t, svuint32_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base)))
+void svprfd_gather(svbool_t, svuint64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base_index)))
+void svprfd_gather_index(svbool_t, svuint32_t, int64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base_index)))
+void svprfd_gather_index(svbool_t, svuint64_t, int64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s32index)))
+void svprfd_gather_index(svbool_t, void const *, svint32_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32index)))
+void svprfd_gather_index(svbool_t, void const *, svuint32_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s64index)))
+void svprfd_gather_index(svbool_t, void const *, svint64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64index)))
+void svprfd_gather_index(svbool_t, void const *, svuint64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base)))
+void svprfh_gather(svbool_t, svuint32_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base)))
+void svprfh_gather(svbool_t, svuint64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base_index)))
+void svprfh_gather_index(svbool_t, svuint32_t, int64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base_index)))
+void svprfh_gather_index(svbool_t, svuint64_t, int64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s32index)))
+void svprfh_gather_index(svbool_t, void const *, svint32_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32index)))
+void svprfh_gather_index(svbool_t, void const *, svuint32_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s64index)))
+void svprfh_gather_index(svbool_t, void const *, svint64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64index)))
+void svprfh_gather_index(svbool_t, void const *, svuint64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base)))
+void svprfw_gather(svbool_t, svuint32_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base)))
+void svprfw_gather(svbool_t, svuint64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base_index)))
+void svprfw_gather_index(svbool_t, svuint32_t, int64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base_index)))
+void svprfw_gather_index(svbool_t, svuint64_t, int64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s32index)))
+void svprfw_gather_index(svbool_t, void const *, svint32_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32index)))
+void svprfw_gather_index(svbool_t, void const *, svuint32_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s64index)))
+void svprfw_gather_index(svbool_t, void const *, svint64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64index)))
+void svprfw_gather_index(svbool_t, void const *, svuint64_t, sv_prfop);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8)))
+svint8_t svqadd(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32)))
+svint32_t svqadd(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64)))
+svint64_t svqadd(svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16)))
+svint16_t svqadd(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8)))
+svuint8_t svqadd(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32)))
+svuint32_t svqadd(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64)))
+svuint64_t svqadd(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16)))
+svuint16_t svqadd(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8)))
+svint8_t svqadd(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32)))
+svint32_t svqadd(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64)))
+svint64_t svqadd(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16)))
+svint16_t svqadd(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8)))
+svuint8_t svqadd(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32)))
+svuint32_t svqadd(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64)))
+svuint64_t svqadd(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16)))
+svuint16_t svqadd(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s32)))
+int32_t svqdecb(int32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s64)))
+int64_t svqdecb(int64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u32)))
+uint32_t svqdecb(uint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u64)))
+uint64_t svqdecb(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s32)))
+int32_t svqdecb_pat(int32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s64)))
+int64_t svqdecb_pat(int64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u32)))
+uint32_t svqdecb_pat(uint32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u64)))
+uint64_t svqdecb_pat(uint64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s32)))
+int32_t svqdecd(int32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s64)))
+int64_t svqdecd(int64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u32)))
+uint32_t svqdecd(uint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u64)))
+uint64_t svqdecd(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_s64)))
+svint64_t svqdecd(svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_u64)))
+svuint64_t svqdecd(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s32)))
+int32_t svqdecd_pat(int32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s64)))
+int64_t svqdecd_pat(int64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u32)))
+uint32_t svqdecd_pat(uint32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u64)))
+uint64_t svqdecd_pat(uint64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_s64)))
+svint64_t svqdecd_pat(svint64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_u64)))
+svuint64_t svqdecd_pat(svuint64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s32)))
+int32_t svqdech(int32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s64)))
+int64_t svqdech(int64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u32)))
+uint32_t svqdech(uint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u64)))
+uint64_t svqdech(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_s16)))
+svint16_t svqdech(svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_u16)))
+svuint16_t svqdech(svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s32)))
+int32_t svqdech_pat(int32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s64)))
+int64_t svqdech_pat(int64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u32)))
+uint32_t svqdech_pat(uint32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u64)))
+uint64_t svqdech_pat(uint64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_s16)))
+svint16_t svqdech_pat(svint16_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_u16)))
+svuint16_t svqdech_pat(svuint16_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b8)))
+int32_t svqdecp_b8(int32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b32)))
+int32_t svqdecp_b32(int32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b64)))
+int32_t svqdecp_b64(int32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b16)))
+int32_t svqdecp_b16(int32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b8)))
+int64_t svqdecp_b8(int64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b32)))
+int64_t svqdecp_b32(int64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b64)))
+int64_t svqdecp_b64(int64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b16)))
+int64_t svqdecp_b16(int64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b8)))
+uint32_t svqdecp_b8(uint32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b32)))
+uint32_t svqdecp_b32(uint32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b64)))
+uint32_t svqdecp_b64(uint32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b16)))
+uint32_t svqdecp_b16(uint32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b8)))
+uint64_t svqdecp_b8(uint64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b32)))
+uint64_t svqdecp_b32(uint64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b64)))
+uint64_t svqdecp_b64(uint64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b16)))
+uint64_t svqdecp_b16(uint64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s32)))
+svint32_t svqdecp(svint32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s64)))
+svint64_t svqdecp(svint64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s16)))
+svint16_t svqdecp(svint16_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u32)))
+svuint32_t svqdecp(svuint32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u64)))
+svuint64_t svqdecp(svuint64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u16)))
+svuint16_t svqdecp(svuint16_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s32)))
+int32_t svqdecw(int32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s64)))
+int64_t svqdecw(int64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u32)))
+uint32_t svqdecw(uint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u64)))
+uint64_t svqdecw(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_s32)))
+svint32_t svqdecw(svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_u32)))
+svuint32_t svqdecw(svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s32)))
+int32_t svqdecw_pat(int32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s64)))
+int64_t svqdecw_pat(int64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u32)))
+uint32_t svqdecw_pat(uint32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u64)))
+uint64_t svqdecw_pat(uint64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_s32)))
+svint32_t svqdecw_pat(svint32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_u32)))
+svuint32_t svqdecw_pat(svuint32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s32)))
+int32_t svqincb(int32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s64)))
+int64_t svqincb(int64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u32)))
+uint32_t svqincb(uint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u64)))
+uint64_t svqincb(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s32)))
+int32_t svqincb_pat(int32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s64)))
+int64_t svqincb_pat(int64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u32)))
+uint32_t svqincb_pat(uint32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u64)))
+uint64_t svqincb_pat(uint64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s32)))
+int32_t svqincd(int32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s64)))
+int64_t svqincd(int64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u32)))
+uint32_t svqincd(uint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u64)))
+uint64_t svqincd(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_s64)))
+svint64_t svqincd(svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_u64)))
+svuint64_t svqincd(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s32)))
+int32_t svqincd_pat(int32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s64)))
+int64_t svqincd_pat(int64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u32)))
+uint32_t svqincd_pat(uint32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u64)))
+uint64_t svqincd_pat(uint64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_s64)))
+svint64_t svqincd_pat(svint64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_u64)))
+svuint64_t svqincd_pat(svuint64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s32)))
+int32_t svqinch(int32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s64)))
+int64_t svqinch(int64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u32)))
+uint32_t svqinch(uint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u64)))
+uint64_t svqinch(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_s16)))
+svint16_t svqinch(svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_u16)))
+svuint16_t svqinch(svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s32)))
+int32_t svqinch_pat(int32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s64)))
+int64_t svqinch_pat(int64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u32)))
+uint32_t svqinch_pat(uint32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u64)))
+uint64_t svqinch_pat(uint64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_s16)))
+svint16_t svqinch_pat(svint16_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_u16)))
+svuint16_t svqinch_pat(svuint16_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b8)))
+int32_t svqincp_b8(int32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b32)))
+int32_t svqincp_b32(int32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b64)))
+int32_t svqincp_b64(int32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b16)))
+int32_t svqincp_b16(int32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b8)))
+int64_t svqincp_b8(int64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b32)))
+int64_t svqincp_b32(int64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b64)))
+int64_t svqincp_b64(int64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b16)))
+int64_t svqincp_b16(int64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b8)))
+uint32_t svqincp_b8(uint32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b32)))
+uint32_t svqincp_b32(uint32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b64)))
+uint32_t svqincp_b64(uint32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b16)))
+uint32_t svqincp_b16(uint32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b8)))
+uint64_t svqincp_b8(uint64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b32)))
+uint64_t svqincp_b32(uint64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b64)))
+uint64_t svqincp_b64(uint64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b16)))
+uint64_t svqincp_b16(uint64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s32)))
+svint32_t svqincp(svint32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s64)))
+svint64_t svqincp(svint64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s16)))
+svint16_t svqincp(svint16_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u32)))
+svuint32_t svqincp(svuint32_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u64)))
+svuint64_t svqincp(svuint64_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u16)))
+svuint16_t svqincp(svuint16_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s32)))
+int32_t svqincw(int32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s64)))
+int64_t svqincw(int64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u32)))
+uint32_t svqincw(uint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u64)))
+uint64_t svqincw(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_s32)))
+svint32_t svqincw(svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_u32)))
+svuint32_t svqincw(svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s32)))
+int32_t svqincw_pat(int32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s64)))
+int64_t svqincw_pat(int64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u32)))
+uint32_t svqincw_pat(uint32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u64)))
+uint64_t svqincw_pat(uint64_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_s32)))
+svint32_t svqincw_pat(svint32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_u32)))
+svuint32_t svqincw_pat(svuint32_t, sv_pattern, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8)))
+svint8_t svqsub(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32)))
+svint32_t svqsub(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64)))
+svint64_t svqsub(svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16)))
+svint16_t svqsub(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8)))
+svuint8_t svqsub(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32)))
+svuint32_t svqsub(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64)))
+svuint64_t svqsub(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16)))
+svuint16_t svqsub(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8)))
+svint8_t svqsub(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32)))
+svint32_t svqsub(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64)))
+svint64_t svqsub(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16)))
+svint16_t svqsub(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8)))
+svuint8_t svqsub(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32)))
+svuint32_t svqsub(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64)))
+svuint64_t svqsub(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16)))
+svuint16_t svqsub(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_m)))
+svuint8_t svrbit_m(svuint8_t, svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_m)))
+svuint32_t svrbit_m(svuint32_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_m)))
+svuint64_t svrbit_m(svuint64_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_m)))
+svuint16_t svrbit_m(svuint16_t, svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_m)))
+svint8_t svrbit_m(svint8_t, svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_m)))
+svint32_t svrbit_m(svint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_m)))
+svint64_t svrbit_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_m)))
+svint16_t svrbit_m(svint16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_x)))
+svuint8_t svrbit_x(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_x)))
+svuint32_t svrbit_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_x)))
+svuint64_t svrbit_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_x)))
+svuint16_t svrbit_x(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_x)))
+svint8_t svrbit_x(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_x)))
+svint32_t svrbit_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_x)))
+svint64_t svrbit_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_x)))
+svint16_t svrbit_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_z)))
+svuint8_t svrbit_z(svbool_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_z)))
+svuint32_t svrbit_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_z)))
+svuint64_t svrbit_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_z)))
+svuint16_t svrbit_z(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_z)))
+svint8_t svrbit_z(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_z)))
+svint32_t svrbit_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_z)))
+svint64_t svrbit_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_z)))
+svint16_t svrbit_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f64)))
+svfloat64_t svrecpe(svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f32)))
+svfloat32_t svrecpe(svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f16)))
+svfloat16_t svrecpe(svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f64)))
+svfloat64_t svrecps(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f32)))
+svfloat32_t svrecps(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f16)))
+svfloat16_t svrecps(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_m)))
+svfloat64_t svrecpx_m(svfloat64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_m)))
+svfloat32_t svrecpx_m(svfloat32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_m)))
+svfloat16_t svrecpx_m(svfloat16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_x)))
+svfloat64_t svrecpx_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_x)))
+svfloat32_t svrecpx_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_x)))
+svfloat16_t svrecpx_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_z)))
+svfloat64_t svrecpx_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_z)))
+svfloat32_t svrecpx_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_z)))
+svfloat16_t svrecpx_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u8)))
+svuint8_t svrev(svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u32)))
+svuint32_t svrev(svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u64)))
+svuint64_t svrev(svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u16)))
+svuint16_t svrev(svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s8)))
+svint8_t svrev(svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f64)))
+svfloat64_t svrev(svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f32)))
+svfloat32_t svrev(svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f16)))
+svfloat16_t svrev(svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s32)))
+svint32_t svrev(svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s64)))
+svint64_t svrev(svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s16)))
+svint16_t svrev(svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_m)))
+svuint32_t svrevb_m(svuint32_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_m)))
+svuint64_t svrevb_m(svuint64_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_m)))
+svuint16_t svrevb_m(svuint16_t, svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_m)))
+svint32_t svrevb_m(svint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_m)))
+svint64_t svrevb_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_m)))
+svint16_t svrevb_m(svint16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_x)))
+svuint32_t svrevb_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_x)))
+svuint64_t svrevb_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_x)))
+svuint16_t svrevb_x(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_x)))
+svint32_t svrevb_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_x)))
+svint64_t svrevb_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_x)))
+svint16_t svrevb_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_z)))
+svuint32_t svrevb_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_z)))
+svuint64_t svrevb_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_z)))
+svuint16_t svrevb_z(svbool_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_z)))
+svint32_t svrevb_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_z)))
+svint64_t svrevb_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_z)))
+svint16_t svrevb_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_m)))
+svuint32_t svrevh_m(svuint32_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_m)))
+svuint64_t svrevh_m(svuint64_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_m)))
+svint32_t svrevh_m(svint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_m)))
+svint64_t svrevh_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_x)))
+svuint32_t svrevh_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_x)))
+svuint64_t svrevh_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_x)))
+svint32_t svrevh_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_x)))
+svint64_t svrevh_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_z)))
+svuint32_t svrevh_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_z)))
+svuint64_t svrevh_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_z)))
+svint32_t svrevh_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_z)))
+svint64_t svrevh_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_m)))
+svuint64_t svrevw_m(svuint64_t, svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_m)))
+svint64_t svrevw_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_x)))
+svuint64_t svrevw_x(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_x)))
+svint64_t svrevw_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_z)))
+svuint64_t svrevw_z(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_z)))
+svint64_t svrevw_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_m)))
+svfloat64_t svrinta_m(svfloat64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_m)))
+svfloat32_t svrinta_m(svfloat32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_m)))
+svfloat16_t svrinta_m(svfloat16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_x)))
+svfloat64_t svrinta_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x)))
+svfloat32_t svrinta_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_x)))
+svfloat16_t svrinta_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_z)))
+svfloat64_t svrinta_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_z)))
+svfloat32_t svrinta_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_z)))
+svfloat16_t svrinta_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_m)))
+svfloat64_t svrinti_m(svfloat64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_m)))
+svfloat32_t svrinti_m(svfloat32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_m)))
+svfloat16_t svrinti_m(svfloat16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_x)))
+svfloat64_t svrinti_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_x)))
+svfloat32_t svrinti_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_x)))
+svfloat16_t svrinti_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_z)))
+svfloat64_t svrinti_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_z)))
+svfloat32_t svrinti_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_z)))
+svfloat16_t svrinti_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_m)))
+svfloat64_t svrintm_m(svfloat64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_m)))
+svfloat32_t svrintm_m(svfloat32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_m)))
+svfloat16_t svrintm_m(svfloat16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_x)))
+svfloat64_t svrintm_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x)))
+svfloat32_t svrintm_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_x)))
+svfloat16_t svrintm_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_z)))
+svfloat64_t svrintm_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_z)))
+svfloat32_t svrintm_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_z)))
+svfloat16_t svrintm_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_m)))
+svfloat64_t svrintn_m(svfloat64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_m)))
+svfloat32_t svrintn_m(svfloat32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_m)))
+svfloat16_t svrintn_m(svfloat16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_x)))
+svfloat64_t svrintn_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x)))
+svfloat32_t svrintn_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_x)))
+svfloat16_t svrintn_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_z)))
+svfloat64_t svrintn_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_z)))
+svfloat32_t svrintn_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_z)))
+svfloat16_t svrintn_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_m)))
+svfloat64_t svrintp_m(svfloat64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_m)))
+svfloat32_t svrintp_m(svfloat32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_m)))
+svfloat16_t svrintp_m(svfloat16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_x)))
+svfloat64_t svrintp_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x)))
+svfloat32_t svrintp_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_x)))
+svfloat16_t svrintp_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_z)))
+svfloat64_t svrintp_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_z)))
+svfloat32_t svrintp_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_z)))
+svfloat16_t svrintp_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_m)))
+svfloat64_t svrintx_m(svfloat64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_m)))
+svfloat32_t svrintx_m(svfloat32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_m)))
+svfloat16_t svrintx_m(svfloat16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_x)))
+svfloat64_t svrintx_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_x)))
+svfloat32_t svrintx_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_x)))
+svfloat16_t svrintx_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_z)))
+svfloat64_t svrintx_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_z)))
+svfloat32_t svrintx_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_z)))
+svfloat16_t svrintx_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_m)))
+svfloat64_t svrintz_m(svfloat64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_m)))
+svfloat32_t svrintz_m(svfloat32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_m)))
+svfloat16_t svrintz_m(svfloat16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_x)))
+svfloat64_t svrintz_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_x)))
+svfloat32_t svrintz_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_x)))
+svfloat16_t svrintz_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_z)))
+svfloat64_t svrintz_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_z)))
+svfloat32_t svrintz_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_z)))
+svfloat16_t svrintz_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f64)))
+svfloat64_t svrsqrte(svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f32)))
+svfloat32_t svrsqrte(svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f16)))
+svfloat16_t svrsqrte(svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f64)))
+svfloat64_t svrsqrts(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f32)))
+svfloat32_t svrsqrts(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f16)))
+svfloat16_t svrsqrts(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_m)))
+svfloat64_t svscale_m(svbool_t, svfloat64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_m)))
+svfloat32_t svscale_m(svbool_t, svfloat32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_m)))
+svfloat16_t svscale_m(svbool_t, svfloat16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_x)))
+svfloat64_t svscale_x(svbool_t, svfloat64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_x)))
+svfloat32_t svscale_x(svbool_t, svfloat32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_x)))
+svfloat16_t svscale_x(svbool_t, svfloat16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_z)))
+svfloat64_t svscale_z(svbool_t, svfloat64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_z)))
+svfloat32_t svscale_z(svbool_t, svfloat32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_z)))
+svfloat16_t svscale_z(svbool_t, svfloat16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_m)))
+svfloat64_t svscale_m(svbool_t, svfloat64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_m)))
+svfloat32_t svscale_m(svbool_t, svfloat32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_m)))
+svfloat16_t svscale_m(svbool_t, svfloat16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_x)))
+svfloat64_t svscale_x(svbool_t, svfloat64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_x)))
+svfloat32_t svscale_x(svbool_t, svfloat32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_x)))
+svfloat16_t svscale_x(svbool_t, svfloat16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_z)))
+svfloat64_t svscale_z(svbool_t, svfloat64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_z)))
+svfloat32_t svscale_z(svbool_t, svfloat32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_z)))
+svfloat16_t svscale_z(svbool_t, svfloat16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_b)))
+svbool_t svsel(svbool_t, svbool_t, svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8)))
+svuint8_t svsel(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32)))
+svuint32_t svsel(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64)))
+svuint64_t svsel(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16)))
+svuint16_t svsel(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8)))
+svint8_t svsel(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64)))
+svfloat64_t svsel(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32)))
+svfloat32_t svsel(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16)))
+svfloat16_t svsel(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32)))
+svint32_t svsel(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64)))
+svint64_t svsel(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16)))
+svint16_t svsel(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u8)))
+svuint8x2_t svset2(svuint8x2_t, uint64_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u32)))
+svuint32x2_t svset2(svuint32x2_t, uint64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u64)))
+svuint64x2_t svset2(svuint64x2_t, uint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u16)))
+svuint16x2_t svset2(svuint16x2_t, uint64_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s8)))
+svint8x2_t svset2(svint8x2_t, uint64_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f64)))
+svfloat64x2_t svset2(svfloat64x2_t, uint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f32)))
+svfloat32x2_t svset2(svfloat32x2_t, uint64_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f16)))
+svfloat16x2_t svset2(svfloat16x2_t, uint64_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s32)))
+svint32x2_t svset2(svint32x2_t, uint64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s64)))
+svint64x2_t svset2(svint64x2_t, uint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s16)))
+svint16x2_t svset2(svint16x2_t, uint64_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u8)))
+svuint8x3_t svset3(svuint8x3_t, uint64_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u32)))
+svuint32x3_t svset3(svuint32x3_t, uint64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u64)))
+svuint64x3_t svset3(svuint64x3_t, uint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u16)))
+svuint16x3_t svset3(svuint16x3_t, uint64_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s8)))
+svint8x3_t svset3(svint8x3_t, uint64_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f64)))
+svfloat64x3_t svset3(svfloat64x3_t, uint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f32)))
+svfloat32x3_t svset3(svfloat32x3_t, uint64_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f16)))
+svfloat16x3_t svset3(svfloat16x3_t, uint64_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s32)))
+svint32x3_t svset3(svint32x3_t, uint64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s64)))
+svint64x3_t svset3(svint64x3_t, uint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s16)))
+svint16x3_t svset3(svint16x3_t, uint64_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u8)))
+svuint8x4_t svset4(svuint8x4_t, uint64_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u32)))
+svuint32x4_t svset4(svuint32x4_t, uint64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u64)))
+svuint64x4_t svset4(svuint64x4_t, uint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u16)))
+svuint16x4_t svset4(svuint16x4_t, uint64_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s8)))
+svint8x4_t svset4(svint8x4_t, uint64_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f64)))
+svfloat64x4_t svset4(svfloat64x4_t, uint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f32)))
+svfloat32x4_t svset4(svfloat32x4_t, uint64_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f16)))
+svfloat16x4_t svset4(svfloat16x4_t, uint64_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s32)))
+svint32x4_t svset4(svint32x4_t, uint64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s64)))
+svint64x4_t svset4(svint64x4_t, uint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s16)))
+svint16x4_t svset4(svint16x4_t, uint64_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u8)))
+svuint8_t svsplice(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u32)))
+svuint32_t svsplice(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u64)))
+svuint64_t svsplice(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u16)))
+svuint16_t svsplice(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s8)))
+svint8_t svsplice(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f64)))
+svfloat64_t svsplice(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f32)))
+svfloat32_t svsplice(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f16)))
+svfloat16_t svsplice(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s32)))
+svint32_t svsplice(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s64)))
+svint64_t svsplice(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s16)))
+svint16_t svsplice(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_m)))
+svfloat64_t svsqrt_m(svfloat64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_m)))
+svfloat32_t svsqrt_m(svfloat32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_m)))
+svfloat16_t svsqrt_m(svfloat16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_x)))
+svfloat64_t svsqrt_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_x)))
+svfloat32_t svsqrt_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_x)))
+svfloat16_t svsqrt_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_z)))
+svfloat64_t svsqrt_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_z)))
+svfloat32_t svsqrt_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_z)))
+svfloat16_t svsqrt_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8)))
+void svst1(svbool_t, uint8_t *, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32)))
+void svst1(svbool_t, uint32_t *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64)))
+void svst1(svbool_t, uint64_t *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16)))
+void svst1(svbool_t, uint16_t *, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8)))
+void svst1(svbool_t, int8_t *, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64)))
+void svst1(svbool_t, float64_t *, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32)))
+void svst1(svbool_t, float32_t *, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16)))
+void svst1(svbool_t, float16_t *, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32)))
+void svst1(svbool_t, int32_t *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64)))
+void svst1(svbool_t, int64_t *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16)))
+void svst1(svbool_t, int16_t *, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_u32)))
+void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_u64)))
+void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_f64)))
+void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_f32)))
+void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_s32)))
+void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_s64)))
+void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_u32)))
+void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_u64)))
+void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_f64)))
+void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_f32)))
+void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_s32)))
+void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_s64)))
+void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_u32)))
+void svst1_scatter(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_u64)))
+void svst1_scatter(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_f64)))
+void svst1_scatter(svbool_t, svuint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_f32)))
+void svst1_scatter(svbool_t, svuint32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_s32)))
+void svst1_scatter(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_s64)))
+void svst1_scatter(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_u32)))
+void svst1_scatter_index(svbool_t, uint32_t *, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_f32)))
+void svst1_scatter_index(svbool_t, float32_t *, svint32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_s32)))
+void svst1_scatter_index(svbool_t, int32_t *, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_u32)))
+void svst1_scatter_index(svbool_t, uint32_t *, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_f32)))
+void svst1_scatter_index(svbool_t, float32_t *, svuint32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_s32)))
+void svst1_scatter_index(svbool_t, int32_t *, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_u64)))
+void svst1_scatter_index(svbool_t, uint64_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_f64)))
+void svst1_scatter_index(svbool_t, float64_t *, svint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_s64)))
+void svst1_scatter_index(svbool_t, int64_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_u64)))
+void svst1_scatter_index(svbool_t, uint64_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_f64)))
+void svst1_scatter_index(svbool_t, float64_t *, svuint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_s64)))
+void svst1_scatter_index(svbool_t, int64_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_u32)))
+void svst1_scatter_offset(svbool_t, uint32_t *, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_f32)))
+void svst1_scatter_offset(svbool_t, float32_t *, svint32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_s32)))
+void svst1_scatter_offset(svbool_t, int32_t *, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_u32)))
+void svst1_scatter_offset(svbool_t, uint32_t *, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_f32)))
+void svst1_scatter_offset(svbool_t, float32_t *, svuint32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_s32)))
+void svst1_scatter_offset(svbool_t, int32_t *, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_u64)))
+void svst1_scatter_offset(svbool_t, uint64_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_f64)))
+void svst1_scatter_offset(svbool_t, float64_t *, svint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_s64)))
+void svst1_scatter_offset(svbool_t, int64_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_u64)))
+void svst1_scatter_offset(svbool_t, uint64_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_f64)))
+void svst1_scatter_offset(svbool_t, float64_t *, svuint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_s64)))
+void svst1_scatter_offset(svbool_t, int64_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8)))
+void svst1_vnum(svbool_t, uint8_t *, int64_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32)))
+void svst1_vnum(svbool_t, uint32_t *, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64)))
+void svst1_vnum(svbool_t, uint64_t *, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16)))
+void svst1_vnum(svbool_t, uint16_t *, int64_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8)))
+void svst1_vnum(svbool_t, int8_t *, int64_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64)))
+void svst1_vnum(svbool_t, float64_t *, int64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32)))
+void svst1_vnum(svbool_t, float32_t *, int64_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16)))
+void svst1_vnum(svbool_t, float16_t *, int64_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32)))
+void svst1_vnum(svbool_t, int32_t *, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64)))
+void svst1_vnum(svbool_t, int64_t *, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16)))
+void svst1_vnum(svbool_t, int16_t *, int64_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s32)))
+void svst1b(svbool_t, int8_t *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s64)))
+void svst1b(svbool_t, int8_t *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s16)))
+void svst1b(svbool_t, int8_t *, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u32)))
+void svst1b(svbool_t, uint8_t *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u64)))
+void svst1b(svbool_t, uint8_t *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u16)))
+void svst1b(svbool_t, uint8_t *, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_u32)))
+void svst1b_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_u64)))
+void svst1b_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_s32)))
+void svst1b_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_s64)))
+void svst1b_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_u32)))
+void svst1b_scatter(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_u64)))
+void svst1b_scatter(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_s32)))
+void svst1b_scatter(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_s64)))
+void svst1b_scatter(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_s32)))
+void svst1b_scatter_offset(svbool_t, int8_t *, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_u32)))
+void svst1b_scatter_offset(svbool_t, uint8_t *, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_s32)))
+void svst1b_scatter_offset(svbool_t, int8_t *, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_u32)))
+void svst1b_scatter_offset(svbool_t, uint8_t *, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_s64)))
+void svst1b_scatter_offset(svbool_t, int8_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_u64)))
+void svst1b_scatter_offset(svbool_t, uint8_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_s64)))
+void svst1b_scatter_offset(svbool_t, int8_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_u64)))
+void svst1b_scatter_offset(svbool_t, uint8_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s32)))
+void svst1b_vnum(svbool_t, int8_t *, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s64)))
+void svst1b_vnum(svbool_t, int8_t *, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s16)))
+void svst1b_vnum(svbool_t, int8_t *, int64_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u32)))
+void svst1b_vnum(svbool_t, uint8_t *, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u64)))
+void svst1b_vnum(svbool_t, uint8_t *, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u16)))
+void svst1b_vnum(svbool_t, uint8_t *, int64_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s32)))
+void svst1h(svbool_t, int16_t *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s64)))
+void svst1h(svbool_t, int16_t *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u32)))
+void svst1h(svbool_t, uint16_t *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u64)))
+void svst1h(svbool_t, uint16_t *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_u32)))
+void svst1h_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_u64)))
+void svst1h_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_s32)))
+void svst1h_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_s64)))
+void svst1h_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_u32)))
+void svst1h_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_u64)))
+void svst1h_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_s32)))
+void svst1h_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_s64)))
+void svst1h_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_u32)))
+void svst1h_scatter(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_u64)))
+void svst1h_scatter(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_s32)))
+void svst1h_scatter(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_s64)))
+void svst1h_scatter(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_s32)))
+void svst1h_scatter_index(svbool_t, int16_t *, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_u32)))
+void svst1h_scatter_index(svbool_t, uint16_t *, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_s32)))
+void svst1h_scatter_index(svbool_t, int16_t *, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_u32)))
+void svst1h_scatter_index(svbool_t, uint16_t *, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_s64)))
+void svst1h_scatter_index(svbool_t, int16_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_u64)))
+void svst1h_scatter_index(svbool_t, uint16_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_s64)))
+void svst1h_scatter_index(svbool_t, int16_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_u64)))
+void svst1h_scatter_index(svbool_t, uint16_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_s32)))
+void svst1h_scatter_offset(svbool_t, int16_t *, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_u32)))
+void svst1h_scatter_offset(svbool_t, uint16_t *, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_s32)))
+void svst1h_scatter_offset(svbool_t, int16_t *, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_u32)))
+void svst1h_scatter_offset(svbool_t, uint16_t *, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_s64)))
+void svst1h_scatter_offset(svbool_t, int16_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_u64)))
+void svst1h_scatter_offset(svbool_t, uint16_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_s64)))
+void svst1h_scatter_offset(svbool_t, int16_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_u64)))
+void svst1h_scatter_offset(svbool_t, uint16_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s32)))
+void svst1h_vnum(svbool_t, int16_t *, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s64)))
+void svst1h_vnum(svbool_t, int16_t *, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u32)))
+void svst1h_vnum(svbool_t, uint16_t *, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u64)))
+void svst1h_vnum(svbool_t, uint16_t *, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_s64)))
+void svst1w(svbool_t, int32_t *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_u64)))
+void svst1w(svbool_t, uint32_t *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_u64)))
+void svst1w_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_s64)))
+void svst1w_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_u64)))
+void svst1w_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_s64)))
+void svst1w_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_u64)))
+void svst1w_scatter(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_s64)))
+void svst1w_scatter(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_s64)))
+void svst1w_scatter_index(svbool_t, int32_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_u64)))
+void svst1w_scatter_index(svbool_t, uint32_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_s64)))
+void svst1w_scatter_index(svbool_t, int32_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_u64)))
+void svst1w_scatter_index(svbool_t, uint32_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_s64)))
+void svst1w_scatter_offset(svbool_t, int32_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_u64)))
+void svst1w_scatter_offset(svbool_t, uint32_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_s64)))
+void svst1w_scatter_offset(svbool_t, int32_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_u64)))
+void svst1w_scatter_offset(svbool_t, uint32_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_s64)))
+void svst1w_vnum(svbool_t, int32_t *, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_u64)))
+void svst1w_vnum(svbool_t, uint32_t *, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u8)))
+void svst2(svbool_t, uint8_t *, svuint8x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u32)))
+void svst2(svbool_t, uint32_t *, svuint32x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u64)))
+void svst2(svbool_t, uint64_t *, svuint64x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u16)))
+void svst2(svbool_t, uint16_t *, svuint16x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s8)))
+void svst2(svbool_t, int8_t *, svint8x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f64)))
+void svst2(svbool_t, float64_t *, svfloat64x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f32)))
+void svst2(svbool_t, float32_t *, svfloat32x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f16)))
+void svst2(svbool_t, float16_t *, svfloat16x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s32)))
+void svst2(svbool_t, int32_t *, svint32x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s64)))
+void svst2(svbool_t, int64_t *, svint64x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s16)))
+void svst2(svbool_t, int16_t *, svint16x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u8)))
+void svst2_vnum(svbool_t, uint8_t *, int64_t, svuint8x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u32)))
+void svst2_vnum(svbool_t, uint32_t *, int64_t, svuint32x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u64)))
+void svst2_vnum(svbool_t, uint64_t *, int64_t, svuint64x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u16)))
+void svst2_vnum(svbool_t, uint16_t *, int64_t, svuint16x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s8)))
+void svst2_vnum(svbool_t, int8_t *, int64_t, svint8x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f64)))
+void svst2_vnum(svbool_t, float64_t *, int64_t, svfloat64x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f32)))
+void svst2_vnum(svbool_t, float32_t *, int64_t, svfloat32x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f16)))
+void svst2_vnum(svbool_t, float16_t *, int64_t, svfloat16x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s32)))
+void svst2_vnum(svbool_t, int32_t *, int64_t, svint32x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s64)))
+void svst2_vnum(svbool_t, int64_t *, int64_t, svint64x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s16)))
+void svst2_vnum(svbool_t, int16_t *, int64_t, svint16x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u8)))
+void svst3(svbool_t, uint8_t *, svuint8x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u32)))
+void svst3(svbool_t, uint32_t *, svuint32x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u64)))
+void svst3(svbool_t, uint64_t *, svuint64x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u16)))
+void svst3(svbool_t, uint16_t *, svuint16x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s8)))
+void svst3(svbool_t, int8_t *, svint8x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f64)))
+void svst3(svbool_t, float64_t *, svfloat64x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f32)))
+void svst3(svbool_t, float32_t *, svfloat32x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f16)))
+void svst3(svbool_t, float16_t *, svfloat16x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s32)))
+void svst3(svbool_t, int32_t *, svint32x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s64)))
+void svst3(svbool_t, int64_t *, svint64x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s16)))
+void svst3(svbool_t, int16_t *, svint16x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u8)))
+void svst3_vnum(svbool_t, uint8_t *, int64_t, svuint8x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u32)))
+void svst3_vnum(svbool_t, uint32_t *, int64_t, svuint32x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u64)))
+void svst3_vnum(svbool_t, uint64_t *, int64_t, svuint64x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u16)))
+void svst3_vnum(svbool_t, uint16_t *, int64_t, svuint16x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s8)))
+void svst3_vnum(svbool_t, int8_t *, int64_t, svint8x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f64)))
+void svst3_vnum(svbool_t, float64_t *, int64_t, svfloat64x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f32)))
+void svst3_vnum(svbool_t, float32_t *, int64_t, svfloat32x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f16)))
+void svst3_vnum(svbool_t, float16_t *, int64_t, svfloat16x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s32)))
+void svst3_vnum(svbool_t, int32_t *, int64_t, svint32x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s64)))
+void svst3_vnum(svbool_t, int64_t *, int64_t, svint64x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s16)))
+void svst3_vnum(svbool_t, int16_t *, int64_t, svint16x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u8)))
+void svst4(svbool_t, uint8_t *, svuint8x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u32)))
+void svst4(svbool_t, uint32_t *, svuint32x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u64)))
+void svst4(svbool_t, uint64_t *, svuint64x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u16)))
+void svst4(svbool_t, uint16_t *, svuint16x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s8)))
+void svst4(svbool_t, int8_t *, svint8x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f64)))
+void svst4(svbool_t, float64_t *, svfloat64x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f32)))
+void svst4(svbool_t, float32_t *, svfloat32x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f16)))
+void svst4(svbool_t, float16_t *, svfloat16x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s32)))
+void svst4(svbool_t, int32_t *, svint32x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s64)))
+void svst4(svbool_t, int64_t *, svint64x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s16)))
+void svst4(svbool_t, int16_t *, svint16x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u8)))
+void svst4_vnum(svbool_t, uint8_t *, int64_t, svuint8x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u32)))
+void svst4_vnum(svbool_t, uint32_t *, int64_t, svuint32x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u64)))
+void svst4_vnum(svbool_t, uint64_t *, int64_t, svuint64x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u16)))
+void svst4_vnum(svbool_t, uint16_t *, int64_t, svuint16x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s8)))
+void svst4_vnum(svbool_t, int8_t *, int64_t, svint8x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f64)))
+void svst4_vnum(svbool_t, float64_t *, int64_t, svfloat64x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f32)))
+void svst4_vnum(svbool_t, float32_t *, int64_t, svfloat32x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f16)))
+void svst4_vnum(svbool_t, float16_t *, int64_t, svfloat16x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s32)))
+void svst4_vnum(svbool_t, int32_t *, int64_t, svint32x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s64)))
+void svst4_vnum(svbool_t, int64_t *, int64_t, svint64x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s16)))
+void svst4_vnum(svbool_t, int16_t *, int64_t, svint16x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8)))
+void svstnt1(svbool_t, uint8_t *, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32)))
+void svstnt1(svbool_t, uint32_t *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64)))
+void svstnt1(svbool_t, uint64_t *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16)))
+void svstnt1(svbool_t, uint16_t *, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8)))
+void svstnt1(svbool_t, int8_t *, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64)))
+void svstnt1(svbool_t, float64_t *, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32)))
+void svstnt1(svbool_t, float32_t *, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16)))
+void svstnt1(svbool_t, float16_t *, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32)))
+void svstnt1(svbool_t, int32_t *, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64)))
+void svstnt1(svbool_t, int64_t *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16)))
+void svstnt1(svbool_t, int16_t *, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8)))
+void svstnt1_vnum(svbool_t, uint8_t *, int64_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32)))
+void svstnt1_vnum(svbool_t, uint32_t *, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64)))
+void svstnt1_vnum(svbool_t, uint64_t *, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16)))
+void svstnt1_vnum(svbool_t, uint16_t *, int64_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8)))
+void svstnt1_vnum(svbool_t, int8_t *, int64_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64)))
+void svstnt1_vnum(svbool_t, float64_t *, int64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32)))
+void svstnt1_vnum(svbool_t, float32_t *, int64_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16)))
+void svstnt1_vnum(svbool_t, float16_t *, int64_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32)))
+void svstnt1_vnum(svbool_t, int32_t *, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64)))
+void svstnt1_vnum(svbool_t, int64_t *, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16)))
+void svstnt1_vnum(svbool_t, int16_t *, int64_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_m)))
+svfloat64_t svsub_m(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_m)))
+svfloat32_t svsub_m(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_m)))
+svfloat16_t svsub_m(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_x)))
+svfloat64_t svsub_x(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_x)))
+svfloat32_t svsub_x(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_x)))
+svfloat16_t svsub_x(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_z)))
+svfloat64_t svsub_z(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_z)))
+svfloat32_t svsub_z(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_z)))
+svfloat16_t svsub_z(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_m)))
+svuint8_t svsub_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_m)))
+svuint32_t svsub_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_m)))
+svuint64_t svsub_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_m)))
+svuint16_t svsub_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_m)))
+svint8_t svsub_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_m)))
+svint32_t svsub_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_m)))
+svint64_t svsub_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_m)))
+svint16_t svsub_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_x)))
+svuint8_t svsub_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_x)))
+svuint32_t svsub_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_x)))
+svuint64_t svsub_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_x)))
+svuint16_t svsub_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_x)))
+svint8_t svsub_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_x)))
+svint32_t svsub_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_x)))
+svint64_t svsub_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_x)))
+svint16_t svsub_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_z)))
+svuint8_t svsub_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_z)))
+svuint32_t svsub_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_z)))
+svuint64_t svsub_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_z)))
+svuint16_t svsub_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_z)))
+svint8_t svsub_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_z)))
+svint32_t svsub_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_z)))
+svint64_t svsub_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_z)))
+svint16_t svsub_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_m)))
+svfloat64_t svsub_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_m)))
+svfloat32_t svsub_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_m)))
+svfloat16_t svsub_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_x)))
+svfloat64_t svsub_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_x)))
+svfloat32_t svsub_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_x)))
+svfloat16_t svsub_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_z)))
+svfloat64_t svsub_z(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_z)))
+svfloat32_t svsub_z(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_z)))
+svfloat16_t svsub_z(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_m)))
+svuint8_t svsub_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_m)))
+svuint32_t svsub_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_m)))
+svuint64_t svsub_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_m)))
+svuint16_t svsub_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_m)))
+svint8_t svsub_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_m)))
+svint32_t svsub_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_m)))
+svint64_t svsub_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_m)))
+svint16_t svsub_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_x)))
+svuint8_t svsub_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_x)))
+svuint32_t svsub_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_x)))
+svuint64_t svsub_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_x)))
+svuint16_t svsub_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_x)))
+svint8_t svsub_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_x)))
+svint32_t svsub_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_x)))
+svint64_t svsub_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_x)))
+svint16_t svsub_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_z)))
+svuint8_t svsub_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_z)))
+svuint32_t svsub_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_z)))
+svuint64_t svsub_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_z)))
+svuint16_t svsub_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_z)))
+svint8_t svsub_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_z)))
+svint32_t svsub_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_z)))
+svint64_t svsub_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_z)))
+svint16_t svsub_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_m)))
+svfloat64_t svsubr_m(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_m)))
+svfloat32_t svsubr_m(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_m)))
+svfloat16_t svsubr_m(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_x)))
+svfloat64_t svsubr_x(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_x)))
+svfloat32_t svsubr_x(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_x)))
+svfloat16_t svsubr_x(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_z)))
+svfloat64_t svsubr_z(svbool_t, svfloat64_t, float64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_z)))
+svfloat32_t svsubr_z(svbool_t, svfloat32_t, float32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_z)))
+svfloat16_t svsubr_z(svbool_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_m)))
+svuint8_t svsubr_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_m)))
+svuint32_t svsubr_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_m)))
+svuint64_t svsubr_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_m)))
+svuint16_t svsubr_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_m)))
+svint8_t svsubr_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_m)))
+svint32_t svsubr_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_m)))
+svint64_t svsubr_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_m)))
+svint16_t svsubr_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_x)))
+svuint8_t svsubr_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_x)))
+svuint32_t svsubr_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_x)))
+svuint64_t svsubr_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_x)))
+svuint16_t svsubr_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_x)))
+svint8_t svsubr_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_x)))
+svint32_t svsubr_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_x)))
+svint64_t svsubr_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_x)))
+svint16_t svsubr_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_z)))
+svuint8_t svsubr_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_z)))
+svuint32_t svsubr_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_z)))
+svuint64_t svsubr_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_z)))
+svuint16_t svsubr_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_z)))
+svint8_t svsubr_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_z)))
+svint32_t svsubr_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_z)))
+svint64_t svsubr_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_z)))
+svint16_t svsubr_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_m)))
+svfloat64_t svsubr_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_m)))
+svfloat32_t svsubr_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_m)))
+svfloat16_t svsubr_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_x)))
+svfloat64_t svsubr_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_x)))
+svfloat32_t svsubr_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_x)))
+svfloat16_t svsubr_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_z)))
+svfloat64_t svsubr_z(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_z)))
+svfloat32_t svsubr_z(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_z)))
+svfloat16_t svsubr_z(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_m)))
+svuint8_t svsubr_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_m)))
+svuint32_t svsubr_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_m)))
+svuint64_t svsubr_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_m)))
+svuint16_t svsubr_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_m)))
+svint8_t svsubr_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_m)))
+svint32_t svsubr_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_m)))
+svint64_t svsubr_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_m)))
+svint16_t svsubr_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_x)))
+svuint8_t svsubr_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_x)))
+svuint32_t svsubr_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_x)))
+svuint64_t svsubr_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_x)))
+svuint16_t svsubr_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_x)))
+svint8_t svsubr_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_x)))
+svint32_t svsubr_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_x)))
+svint64_t svsubr_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_x)))
+svint16_t svsubr_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_z)))
+svuint8_t svsubr_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_z)))
+svuint32_t svsubr_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_z)))
+svuint64_t svsubr_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_z)))
+svuint16_t svsubr_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_z)))
+svint8_t svsubr_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_z)))
+svint32_t svsubr_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_z)))
+svint64_t svsubr_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_z)))
+svint16_t svsubr_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u8)))
+svuint8_t svtbl(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u32)))
+svuint32_t svtbl(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u64)))
+svuint64_t svtbl(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u16)))
+svuint16_t svtbl(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s8)))
+svint8_t svtbl(svint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f64)))
+svfloat64_t svtbl(svfloat64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f32)))
+svfloat32_t svtbl(svfloat32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f16)))
+svfloat16_t svtbl(svfloat16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s32)))
+svint32_t svtbl(svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s64)))
+svint64_t svtbl(svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s16)))
+svint16_t svtbl(svint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f64)))
+svfloat64_t svtmad(svfloat64_t, svfloat64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f32)))
+svfloat32_t svtmad(svfloat32_t, svfloat32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f16)))
+svfloat16_t svtmad(svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u8)))
+svuint8_t svtrn1(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u32)))
+svuint32_t svtrn1(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u64)))
+svuint64_t svtrn1(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u16)))
+svuint16_t svtrn1(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s8)))
+svint8_t svtrn1(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f64)))
+svfloat64_t svtrn1(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f32)))
+svfloat32_t svtrn1(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f16)))
+svfloat16_t svtrn1(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s32)))
+svint32_t svtrn1(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s64)))
+svint64_t svtrn1(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s16)))
+svint16_t svtrn1(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u8)))
+svuint8_t svtrn2(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u32)))
+svuint32_t svtrn2(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u64)))
+svuint64_t svtrn2(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u16)))
+svuint16_t svtrn2(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s8)))
+svint8_t svtrn2(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f64)))
+svfloat64_t svtrn2(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f32)))
+svfloat32_t svtrn2(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f16)))
+svfloat16_t svtrn2(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s32)))
+svint32_t svtrn2(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s64)))
+svint64_t svtrn2(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s16)))
+svint16_t svtrn2(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f64)))
+svfloat64_t svtsmul(svfloat64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f32)))
+svfloat32_t svtsmul(svfloat32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f16)))
+svfloat16_t svtsmul(svfloat16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f64)))
+svfloat64_t svtssel(svfloat64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f32)))
+svfloat32_t svtssel(svfloat32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f16)))
+svfloat16_t svtssel(svfloat16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_b)))
+svbool_t svunpkhi(svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s32)))
+svint32_t svunpkhi(svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s64)))
+svint64_t svunpkhi(svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s16)))
+svint16_t svunpkhi(svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u32)))
+svuint32_t svunpkhi(svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u64)))
+svuint64_t svunpkhi(svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u16)))
+svuint16_t svunpkhi(svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_b)))
+svbool_t svunpklo(svbool_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s32)))
+svint32_t svunpklo(svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s64)))
+svint64_t svunpklo(svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s16)))
+svint16_t svunpklo(svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u32)))
+svuint32_t svunpklo(svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u64)))
+svuint64_t svunpklo(svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u16)))
+svuint16_t svunpklo(svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u8)))
+svuint8_t svuzp1(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u32)))
+svuint32_t svuzp1(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u64)))
+svuint64_t svuzp1(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u16)))
+svuint16_t svuzp1(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s8)))
+svint8_t svuzp1(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f64)))
+svfloat64_t svuzp1(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f32)))
+svfloat32_t svuzp1(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f16)))
+svfloat16_t svuzp1(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s32)))
+svint32_t svuzp1(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s64)))
+svint64_t svuzp1(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s16)))
+svint16_t svuzp1(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u8)))
+svuint8_t svuzp2(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u32)))
+svuint32_t svuzp2(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u64)))
+svuint64_t svuzp2(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u16)))
+svuint16_t svuzp2(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s8)))
+svint8_t svuzp2(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f64)))
+svfloat64_t svuzp2(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f32)))
+svfloat32_t svuzp2(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f16)))
+svfloat16_t svuzp2(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s32)))
+svint32_t svuzp2(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s64)))
+svint64_t svuzp2(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s16)))
+svint16_t svuzp2(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s32)))
+svbool_t svwhilele_b8(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s32)))
+svbool_t svwhilele_b32(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s32)))
+svbool_t svwhilele_b64(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s32)))
+svbool_t svwhilele_b16(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s64)))
+svbool_t svwhilele_b8(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s64)))
+svbool_t svwhilele_b32(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s64)))
+svbool_t svwhilele_b64(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s64)))
+svbool_t svwhilele_b16(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u32)))
+svbool_t svwhilele_b8(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u32)))
+svbool_t svwhilele_b32(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u32)))
+svbool_t svwhilele_b64(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u32)))
+svbool_t svwhilele_b16(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u64)))
+svbool_t svwhilele_b8(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u64)))
+svbool_t svwhilele_b32(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u64)))
+svbool_t svwhilele_b64(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u64)))
+svbool_t svwhilele_b16(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u32)))
+svbool_t svwhilelt_b8(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u32)))
+svbool_t svwhilelt_b32(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u32)))
+svbool_t svwhilelt_b64(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u32)))
+svbool_t svwhilelt_b16(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u64)))
+svbool_t svwhilelt_b8(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u64)))
+svbool_t svwhilelt_b32(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u64)))
+svbool_t svwhilelt_b64(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u64)))
+svbool_t svwhilelt_b16(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s32)))
+svbool_t svwhilelt_b8(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s32)))
+svbool_t svwhilelt_b32(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s32)))
+svbool_t svwhilelt_b64(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s32)))
+svbool_t svwhilelt_b16(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s64)))
+svbool_t svwhilelt_b8(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s64)))
+svbool_t svwhilelt_b32(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s64)))
+svbool_t svwhilelt_b64(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s64)))
+svbool_t svwhilelt_b16(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u8)))
+svuint8_t svzip1(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u32)))
+svuint32_t svzip1(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u64)))
+svuint64_t svzip1(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u16)))
+svuint16_t svzip1(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s8)))
+svint8_t svzip1(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f64)))
+svfloat64_t svzip1(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f32)))
+svfloat32_t svzip1(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f16)))
+svfloat16_t svzip1(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s32)))
+svint32_t svzip1(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s64)))
+svint64_t svzip1(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s16)))
+svint16_t svzip1(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u8)))
+svuint8_t svzip2(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u32)))
+svuint32_t svzip2(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u64)))
+svuint64_t svzip2(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u16)))
+svuint16_t svzip2(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s8)))
+svint8_t svzip2(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f64)))
+svfloat64_t svzip2(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f32)))
+svfloat32_t svzip2(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f16)))
+svfloat16_t svzip2(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s32)))
+svint32_t svzip2(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s64)))
+svint64_t svzip2(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s16)))
+svint16_t svzip2(svint16_t, svint16_t);
+
+#if __ARM_FEATURE_SVE2_BITPERM
+#define svbdep_n_u8(...) __builtin_sve_svbdep_n_u8(__VA_ARGS__)
+#define svbdep_n_u32(...) __builtin_sve_svbdep_n_u32(__VA_ARGS__)
+#define svbdep_n_u64(...) __builtin_sve_svbdep_n_u64(__VA_ARGS__)
+#define svbdep_n_u16(...) __builtin_sve_svbdep_n_u16(__VA_ARGS__)
+#define svbdep_u8(...) __builtin_sve_svbdep_u8(__VA_ARGS__)
+#define svbdep_u32(...) __builtin_sve_svbdep_u32(__VA_ARGS__)
+#define svbdep_u64(...) __builtin_sve_svbdep_u64(__VA_ARGS__)
+#define svbdep_u16(...) __builtin_sve_svbdep_u16(__VA_ARGS__)
+#define svbext_n_u8(...) __builtin_sve_svbext_n_u8(__VA_ARGS__)
+#define svbext_n_u32(...) __builtin_sve_svbext_n_u32(__VA_ARGS__)
+#define svbext_n_u64(...) __builtin_sve_svbext_n_u64(__VA_ARGS__)
+#define svbext_n_u16(...) __builtin_sve_svbext_n_u16(__VA_ARGS__)
+#define svbext_u8(...) __builtin_sve_svbext_u8(__VA_ARGS__)
+#define svbext_u32(...) __builtin_sve_svbext_u32(__VA_ARGS__)
+#define svbext_u64(...) __builtin_sve_svbext_u64(__VA_ARGS__)
+#define svbext_u16(...) __builtin_sve_svbext_u16(__VA_ARGS__)
+#define svbgrp_n_u8(...) __builtin_sve_svbgrp_n_u8(__VA_ARGS__)
+#define svbgrp_n_u32(...) __builtin_sve_svbgrp_n_u32(__VA_ARGS__)
+#define svbgrp_n_u64(...) __builtin_sve_svbgrp_n_u64(__VA_ARGS__)
+#define svbgrp_n_u16(...) __builtin_sve_svbgrp_n_u16(__VA_ARGS__)
+#define svbgrp_u8(...) __builtin_sve_svbgrp_u8(__VA_ARGS__)
+#define svbgrp_u32(...) __builtin_sve_svbgrp_u32(__VA_ARGS__)
+#define svbgrp_u64(...) __builtin_sve_svbgrp_u64(__VA_ARGS__)
+#define svbgrp_u16(...) __builtin_sve_svbgrp_u16(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u8)))
+svuint8_t svbdep(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u32)))
+svuint32_t svbdep(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u64)))
+svuint64_t svbdep(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u16)))
+svuint16_t svbdep(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u8)))
+svuint8_t svbdep(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u32)))
+svuint32_t svbdep(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u64)))
+svuint64_t svbdep(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u16)))
+svuint16_t svbdep(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u8)))
+svuint8_t svbext(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u32)))
+svuint32_t svbext(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u64)))
+svuint64_t svbext(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u16)))
+svuint16_t svbext(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u8)))
+svuint8_t svbext(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u32)))
+svuint32_t svbext(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u64)))
+svuint64_t svbext(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u16)))
+svuint16_t svbext(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u8)))
+svuint8_t svbgrp(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u32)))
+svuint32_t svbgrp(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u64)))
+svuint64_t svbgrp(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u16)))
+svuint16_t svbgrp(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u8)))
+svuint8_t svbgrp(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u32)))
+svuint32_t svbgrp(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u64)))
+svuint64_t svbgrp(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u16)))
+svuint16_t svbgrp(svuint16_t, svuint16_t);
+#endif  //__ARM_FEATURE_SVE2_BITPERM
+
+#if defined(__ARM_FEATURE_SVE2)
+#define svaba_n_s8(...) __builtin_sve_svaba_n_s8(__VA_ARGS__)
+#define svaba_n_s32(...) __builtin_sve_svaba_n_s32(__VA_ARGS__)
+#define svaba_n_s64(...) __builtin_sve_svaba_n_s64(__VA_ARGS__)
+#define svaba_n_s16(...) __builtin_sve_svaba_n_s16(__VA_ARGS__)
+#define svaba_n_u8(...) __builtin_sve_svaba_n_u8(__VA_ARGS__)
+#define svaba_n_u32(...) __builtin_sve_svaba_n_u32(__VA_ARGS__)
+#define svaba_n_u64(...) __builtin_sve_svaba_n_u64(__VA_ARGS__)
+#define svaba_n_u16(...) __builtin_sve_svaba_n_u16(__VA_ARGS__)
+#define svaba_s8(...) __builtin_sve_svaba_s8(__VA_ARGS__)
+#define svaba_s32(...) __builtin_sve_svaba_s32(__VA_ARGS__)
+#define svaba_s64(...) __builtin_sve_svaba_s64(__VA_ARGS__)
+#define svaba_s16(...) __builtin_sve_svaba_s16(__VA_ARGS__)
+#define svaba_u8(...) __builtin_sve_svaba_u8(__VA_ARGS__)
+#define svaba_u32(...) __builtin_sve_svaba_u32(__VA_ARGS__)
+#define svaba_u64(...) __builtin_sve_svaba_u64(__VA_ARGS__)
+#define svaba_u16(...) __builtin_sve_svaba_u16(__VA_ARGS__)
+#define svabalb_n_s32(...) __builtin_sve_svabalb_n_s32(__VA_ARGS__)
+#define svabalb_n_s64(...) __builtin_sve_svabalb_n_s64(__VA_ARGS__)
+#define svabalb_n_s16(...) __builtin_sve_svabalb_n_s16(__VA_ARGS__)
+#define svabalb_n_u32(...) __builtin_sve_svabalb_n_u32(__VA_ARGS__)
+#define svabalb_n_u64(...) __builtin_sve_svabalb_n_u64(__VA_ARGS__)
+#define svabalb_n_u16(...) __builtin_sve_svabalb_n_u16(__VA_ARGS__)
+#define svabalb_s32(...) __builtin_sve_svabalb_s32(__VA_ARGS__)
+#define svabalb_s64(...) __builtin_sve_svabalb_s64(__VA_ARGS__)
+#define svabalb_s16(...) __builtin_sve_svabalb_s16(__VA_ARGS__)
+#define svabalb_u32(...) __builtin_sve_svabalb_u32(__VA_ARGS__)
+#define svabalb_u64(...) __builtin_sve_svabalb_u64(__VA_ARGS__)
+#define svabalb_u16(...) __builtin_sve_svabalb_u16(__VA_ARGS__)
+#define svabalt_n_s32(...) __builtin_sve_svabalt_n_s32(__VA_ARGS__)
+#define svabalt_n_s64(...) __builtin_sve_svabalt_n_s64(__VA_ARGS__)
+#define svabalt_n_s16(...) __builtin_sve_svabalt_n_s16(__VA_ARGS__)
+#define svabalt_n_u32(...) __builtin_sve_svabalt_n_u32(__VA_ARGS__)
+#define svabalt_n_u64(...) __builtin_sve_svabalt_n_u64(__VA_ARGS__)
+#define svabalt_n_u16(...) __builtin_sve_svabalt_n_u16(__VA_ARGS__)
+#define svabalt_s32(...) __builtin_sve_svabalt_s32(__VA_ARGS__)
+#define svabalt_s64(...) __builtin_sve_svabalt_s64(__VA_ARGS__)
+#define svabalt_s16(...) __builtin_sve_svabalt_s16(__VA_ARGS__)
+#define svabalt_u32(...) __builtin_sve_svabalt_u32(__VA_ARGS__)
+#define svabalt_u64(...) __builtin_sve_svabalt_u64(__VA_ARGS__)
+#define svabalt_u16(...) __builtin_sve_svabalt_u16(__VA_ARGS__)
+#define svabdlb_n_s32(...) __builtin_sve_svabdlb_n_s32(__VA_ARGS__)
+#define svabdlb_n_s64(...) __builtin_sve_svabdlb_n_s64(__VA_ARGS__)
+#define svabdlb_n_s16(...) __builtin_sve_svabdlb_n_s16(__VA_ARGS__)
+#define svabdlb_n_u32(...) __builtin_sve_svabdlb_n_u32(__VA_ARGS__)
+#define svabdlb_n_u64(...) __builtin_sve_svabdlb_n_u64(__VA_ARGS__)
+#define svabdlb_n_u16(...) __builtin_sve_svabdlb_n_u16(__VA_ARGS__)
+#define svabdlb_s32(...) __builtin_sve_svabdlb_s32(__VA_ARGS__)
+#define svabdlb_s64(...) __builtin_sve_svabdlb_s64(__VA_ARGS__)
+#define svabdlb_s16(...) __builtin_sve_svabdlb_s16(__VA_ARGS__)
+#define svabdlb_u32(...) __builtin_sve_svabdlb_u32(__VA_ARGS__)
+#define svabdlb_u64(...) __builtin_sve_svabdlb_u64(__VA_ARGS__)
+#define svabdlb_u16(...) __builtin_sve_svabdlb_u16(__VA_ARGS__)
+#define svabdlt_n_s32(...) __builtin_sve_svabdlt_n_s32(__VA_ARGS__)
+#define svabdlt_n_s64(...) __builtin_sve_svabdlt_n_s64(__VA_ARGS__)
+#define svabdlt_n_s16(...) __builtin_sve_svabdlt_n_s16(__VA_ARGS__)
+#define svabdlt_n_u32(...) __builtin_sve_svabdlt_n_u32(__VA_ARGS__)
+#define svabdlt_n_u64(...) __builtin_sve_svabdlt_n_u64(__VA_ARGS__)
+#define svabdlt_n_u16(...) __builtin_sve_svabdlt_n_u16(__VA_ARGS__)
+#define svabdlt_s32(...) __builtin_sve_svabdlt_s32(__VA_ARGS__)
+#define svabdlt_s64(...) __builtin_sve_svabdlt_s64(__VA_ARGS__)
+#define svabdlt_s16(...) __builtin_sve_svabdlt_s16(__VA_ARGS__)
+#define svabdlt_u32(...) __builtin_sve_svabdlt_u32(__VA_ARGS__)
+#define svabdlt_u64(...) __builtin_sve_svabdlt_u64(__VA_ARGS__)
+#define svabdlt_u16(...) __builtin_sve_svabdlt_u16(__VA_ARGS__)
+#define svadalp_s32_m(...) __builtin_sve_svadalp_s32_m(__VA_ARGS__)
+#define svadalp_s64_m(...) __builtin_sve_svadalp_s64_m(__VA_ARGS__)
+#define svadalp_s16_m(...) __builtin_sve_svadalp_s16_m(__VA_ARGS__)
+#define svadalp_s32_x(...) __builtin_sve_svadalp_s32_x(__VA_ARGS__)
+#define svadalp_s64_x(...) __builtin_sve_svadalp_s64_x(__VA_ARGS__)
+#define svadalp_s16_x(...) __builtin_sve_svadalp_s16_x(__VA_ARGS__)
+#define svadalp_s32_z(...) __builtin_sve_svadalp_s32_z(__VA_ARGS__)
+#define svadalp_s64_z(...) __builtin_sve_svadalp_s64_z(__VA_ARGS__)
+#define svadalp_s16_z(...) __builtin_sve_svadalp_s16_z(__VA_ARGS__)
+#define svadalp_u32_m(...) __builtin_sve_svadalp_u32_m(__VA_ARGS__)
+#define svadalp_u64_m(...) __builtin_sve_svadalp_u64_m(__VA_ARGS__)
+#define svadalp_u16_m(...) __builtin_sve_svadalp_u16_m(__VA_ARGS__)
+#define svadalp_u32_x(...) __builtin_sve_svadalp_u32_x(__VA_ARGS__)
+#define svadalp_u64_x(...) __builtin_sve_svadalp_u64_x(__VA_ARGS__)
+#define svadalp_u16_x(...) __builtin_sve_svadalp_u16_x(__VA_ARGS__)
+#define svadalp_u32_z(...) __builtin_sve_svadalp_u32_z(__VA_ARGS__)
+#define svadalp_u64_z(...) __builtin_sve_svadalp_u64_z(__VA_ARGS__)
+#define svadalp_u16_z(...) __builtin_sve_svadalp_u16_z(__VA_ARGS__)
+#define svadclb_n_u32(...) __builtin_sve_svadclb_n_u32(__VA_ARGS__)
+#define svadclb_n_u64(...) __builtin_sve_svadclb_n_u64(__VA_ARGS__)
+#define svadclb_u32(...) __builtin_sve_svadclb_u32(__VA_ARGS__)
+#define svadclb_u64(...) __builtin_sve_svadclb_u64(__VA_ARGS__)
+#define svadclt_n_u32(...) __builtin_sve_svadclt_n_u32(__VA_ARGS__)
+#define svadclt_n_u64(...) __builtin_sve_svadclt_n_u64(__VA_ARGS__)
+#define svadclt_u32(...) __builtin_sve_svadclt_u32(__VA_ARGS__)
+#define svadclt_u64(...) __builtin_sve_svadclt_u64(__VA_ARGS__)
+#define svaddhnb_n_u32(...) __builtin_sve_svaddhnb_n_u32(__VA_ARGS__)
+#define svaddhnb_n_u64(...) __builtin_sve_svaddhnb_n_u64(__VA_ARGS__)
+#define svaddhnb_n_u16(...) __builtin_sve_svaddhnb_n_u16(__VA_ARGS__)
+#define svaddhnb_n_s32(...) __builtin_sve_svaddhnb_n_s32(__VA_ARGS__)
+#define svaddhnb_n_s64(...) __builtin_sve_svaddhnb_n_s64(__VA_ARGS__)
+#define svaddhnb_n_s16(...) __builtin_sve_svaddhnb_n_s16(__VA_ARGS__)
+#define svaddhnb_u32(...) __builtin_sve_svaddhnb_u32(__VA_ARGS__)
+#define svaddhnb_u64(...) __builtin_sve_svaddhnb_u64(__VA_ARGS__)
+#define svaddhnb_u16(...) __builtin_sve_svaddhnb_u16(__VA_ARGS__)
+#define svaddhnb_s32(...) __builtin_sve_svaddhnb_s32(__VA_ARGS__)
+#define svaddhnb_s64(...) __builtin_sve_svaddhnb_s64(__VA_ARGS__)
+#define svaddhnb_s16(...) __builtin_sve_svaddhnb_s16(__VA_ARGS__)
+#define svaddhnt_n_u32(...) __builtin_sve_svaddhnt_n_u32(__VA_ARGS__)
+#define svaddhnt_n_u64(...) __builtin_sve_svaddhnt_n_u64(__VA_ARGS__)
+#define svaddhnt_n_u16(...) __builtin_sve_svaddhnt_n_u16(__VA_ARGS__)
+#define svaddhnt_n_s32(...) __builtin_sve_svaddhnt_n_s32(__VA_ARGS__)
+#define svaddhnt_n_s64(...) __builtin_sve_svaddhnt_n_s64(__VA_ARGS__)
+#define svaddhnt_n_s16(...) __builtin_sve_svaddhnt_n_s16(__VA_ARGS__)
+#define svaddhnt_u32(...) __builtin_sve_svaddhnt_u32(__VA_ARGS__)
+#define svaddhnt_u64(...) __builtin_sve_svaddhnt_u64(__VA_ARGS__)
+#define svaddhnt_u16(...) __builtin_sve_svaddhnt_u16(__VA_ARGS__)
+#define svaddhnt_s32(...) __builtin_sve_svaddhnt_s32(__VA_ARGS__)
+#define svaddhnt_s64(...) __builtin_sve_svaddhnt_s64(__VA_ARGS__)
+#define svaddhnt_s16(...) __builtin_sve_svaddhnt_s16(__VA_ARGS__)
+#define svaddlb_n_s32(...) __builtin_sve_svaddlb_n_s32(__VA_ARGS__)
+#define svaddlb_n_s64(...) __builtin_sve_svaddlb_n_s64(__VA_ARGS__)
+#define svaddlb_n_s16(...) __builtin_sve_svaddlb_n_s16(__VA_ARGS__)
+#define svaddlb_n_u32(...) __builtin_sve_svaddlb_n_u32(__VA_ARGS__)
+#define svaddlb_n_u64(...) __builtin_sve_svaddlb_n_u64(__VA_ARGS__)
+#define svaddlb_n_u16(...) __builtin_sve_svaddlb_n_u16(__VA_ARGS__)
+#define svaddlb_s32(...) __builtin_sve_svaddlb_s32(__VA_ARGS__)
+#define svaddlb_s64(...) __builtin_sve_svaddlb_s64(__VA_ARGS__)
+#define svaddlb_s16(...) __builtin_sve_svaddlb_s16(__VA_ARGS__)
+#define svaddlb_u32(...) __builtin_sve_svaddlb_u32(__VA_ARGS__)
+#define svaddlb_u64(...) __builtin_sve_svaddlb_u64(__VA_ARGS__)
+#define svaddlb_u16(...) __builtin_sve_svaddlb_u16(__VA_ARGS__)
+#define svaddlbt_n_s32(...) __builtin_sve_svaddlbt_n_s32(__VA_ARGS__)
+#define svaddlbt_n_s64(...) __builtin_sve_svaddlbt_n_s64(__VA_ARGS__)
+#define svaddlbt_n_s16(...) __builtin_sve_svaddlbt_n_s16(__VA_ARGS__)
+#define svaddlbt_s32(...) __builtin_sve_svaddlbt_s32(__VA_ARGS__)
+#define svaddlbt_s64(...) __builtin_sve_svaddlbt_s64(__VA_ARGS__)
+#define svaddlbt_s16(...) __builtin_sve_svaddlbt_s16(__VA_ARGS__)
+#define svaddlt_n_s32(...) __builtin_sve_svaddlt_n_s32(__VA_ARGS__)
+#define svaddlt_n_s64(...) __builtin_sve_svaddlt_n_s64(__VA_ARGS__)
+#define svaddlt_n_s16(...) __builtin_sve_svaddlt_n_s16(__VA_ARGS__)
+#define svaddlt_n_u32(...) __builtin_sve_svaddlt_n_u32(__VA_ARGS__)
+#define svaddlt_n_u64(...) __builtin_sve_svaddlt_n_u64(__VA_ARGS__)
+#define svaddlt_n_u16(...) __builtin_sve_svaddlt_n_u16(__VA_ARGS__)
+#define svaddlt_s32(...) __builtin_sve_svaddlt_s32(__VA_ARGS__)
+#define svaddlt_s64(...) __builtin_sve_svaddlt_s64(__VA_ARGS__)
+#define svaddlt_s16(...) __builtin_sve_svaddlt_s16(__VA_ARGS__)
+#define svaddlt_u32(...) __builtin_sve_svaddlt_u32(__VA_ARGS__)
+#define svaddlt_u64(...) __builtin_sve_svaddlt_u64(__VA_ARGS__)
+#define svaddlt_u16(...) __builtin_sve_svaddlt_u16(__VA_ARGS__)
+#define svaddp_f64_m(...) __builtin_sve_svaddp_f64_m(__VA_ARGS__)
+#define svaddp_f32_m(...) __builtin_sve_svaddp_f32_m(__VA_ARGS__)
+#define svaddp_f16_m(...) __builtin_sve_svaddp_f16_m(__VA_ARGS__)
+#define svaddp_f64_x(...) __builtin_sve_svaddp_f64_x(__VA_ARGS__)
+#define svaddp_f32_x(...) __builtin_sve_svaddp_f32_x(__VA_ARGS__)
+#define svaddp_f16_x(...) __builtin_sve_svaddp_f16_x(__VA_ARGS__)
+#define svaddp_u8_m(...) __builtin_sve_svaddp_u8_m(__VA_ARGS__)
+#define svaddp_u32_m(...) __builtin_sve_svaddp_u32_m(__VA_ARGS__)
+#define svaddp_u64_m(...) __builtin_sve_svaddp_u64_m(__VA_ARGS__)
+#define svaddp_u16_m(...) __builtin_sve_svaddp_u16_m(__VA_ARGS__)
+#define svaddp_s8_m(...) __builtin_sve_svaddp_s8_m(__VA_ARGS__)
+#define svaddp_s32_m(...) __builtin_sve_svaddp_s32_m(__VA_ARGS__)
+#define svaddp_s64_m(...) __builtin_sve_svaddp_s64_m(__VA_ARGS__)
+#define svaddp_s16_m(...) __builtin_sve_svaddp_s16_m(__VA_ARGS__)
+#define svaddp_u8_x(...) __builtin_sve_svaddp_u8_x(__VA_ARGS__)
+#define svaddp_u32_x(...) __builtin_sve_svaddp_u32_x(__VA_ARGS__)
+#define svaddp_u64_x(...) __builtin_sve_svaddp_u64_x(__VA_ARGS__)
+#define svaddp_u16_x(...) __builtin_sve_svaddp_u16_x(__VA_ARGS__)
+#define svaddp_s8_x(...) __builtin_sve_svaddp_s8_x(__VA_ARGS__)
+#define svaddp_s32_x(...) __builtin_sve_svaddp_s32_x(__VA_ARGS__)
+#define svaddp_s64_x(...) __builtin_sve_svaddp_s64_x(__VA_ARGS__)
+#define svaddp_s16_x(...) __builtin_sve_svaddp_s16_x(__VA_ARGS__)
+#define svaddwb_n_s32(...) __builtin_sve_svaddwb_n_s32(__VA_ARGS__)
+#define svaddwb_n_s64(...) __builtin_sve_svaddwb_n_s64(__VA_ARGS__)
+#define svaddwb_n_s16(...) __builtin_sve_svaddwb_n_s16(__VA_ARGS__)
+#define svaddwb_n_u32(...) __builtin_sve_svaddwb_n_u32(__VA_ARGS__)
+#define svaddwb_n_u64(...) __builtin_sve_svaddwb_n_u64(__VA_ARGS__)
+#define svaddwb_n_u16(...) __builtin_sve_svaddwb_n_u16(__VA_ARGS__)
+#define svaddwb_s32(...) __builtin_sve_svaddwb_s32(__VA_ARGS__)
+#define svaddwb_s64(...) __builtin_sve_svaddwb_s64(__VA_ARGS__)
+#define svaddwb_s16(...) __builtin_sve_svaddwb_s16(__VA_ARGS__)
+#define svaddwb_u32(...) __builtin_sve_svaddwb_u32(__VA_ARGS__)
+#define svaddwb_u64(...) __builtin_sve_svaddwb_u64(__VA_ARGS__)
+#define svaddwb_u16(...) __builtin_sve_svaddwb_u16(__VA_ARGS__)
+#define svaddwt_n_s32(...) __builtin_sve_svaddwt_n_s32(__VA_ARGS__)
+#define svaddwt_n_s64(...) __builtin_sve_svaddwt_n_s64(__VA_ARGS__)
+#define svaddwt_n_s16(...) __builtin_sve_svaddwt_n_s16(__VA_ARGS__)
+#define svaddwt_n_u32(...) __builtin_sve_svaddwt_n_u32(__VA_ARGS__)
+#define svaddwt_n_u64(...) __builtin_sve_svaddwt_n_u64(__VA_ARGS__)
+#define svaddwt_n_u16(...) __builtin_sve_svaddwt_n_u16(__VA_ARGS__)
+#define svaddwt_s32(...) __builtin_sve_svaddwt_s32(__VA_ARGS__)
+#define svaddwt_s64(...) __builtin_sve_svaddwt_s64(__VA_ARGS__)
+#define svaddwt_s16(...) __builtin_sve_svaddwt_s16(__VA_ARGS__)
+#define svaddwt_u32(...) __builtin_sve_svaddwt_u32(__VA_ARGS__)
+#define svaddwt_u64(...) __builtin_sve_svaddwt_u64(__VA_ARGS__)
+#define svaddwt_u16(...) __builtin_sve_svaddwt_u16(__VA_ARGS__)
+#define svbcax_n_u8(...) __builtin_sve_svbcax_n_u8(__VA_ARGS__)
+#define svbcax_n_u32(...) __builtin_sve_svbcax_n_u32(__VA_ARGS__)
+#define svbcax_n_u64(...) __builtin_sve_svbcax_n_u64(__VA_ARGS__)
+#define svbcax_n_u16(...) __builtin_sve_svbcax_n_u16(__VA_ARGS__)
+#define svbcax_n_s8(...) __builtin_sve_svbcax_n_s8(__VA_ARGS__)
+#define svbcax_n_s32(...) __builtin_sve_svbcax_n_s32(__VA_ARGS__)
+#define svbcax_n_s64(...) __builtin_sve_svbcax_n_s64(__VA_ARGS__)
+#define svbcax_n_s16(...) __builtin_sve_svbcax_n_s16(__VA_ARGS__)
+#define svbcax_u8(...) __builtin_sve_svbcax_u8(__VA_ARGS__)
+#define svbcax_u32(...) __builtin_sve_svbcax_u32(__VA_ARGS__)
+#define svbcax_u64(...) __builtin_sve_svbcax_u64(__VA_ARGS__)
+#define svbcax_u16(...) __builtin_sve_svbcax_u16(__VA_ARGS__)
+#define svbcax_s8(...) __builtin_sve_svbcax_s8(__VA_ARGS__)
+#define svbcax_s32(...) __builtin_sve_svbcax_s32(__VA_ARGS__)
+#define svbcax_s64(...) __builtin_sve_svbcax_s64(__VA_ARGS__)
+#define svbcax_s16(...) __builtin_sve_svbcax_s16(__VA_ARGS__)
+#define svbsl1n_n_u8(...) __builtin_sve_svbsl1n_n_u8(__VA_ARGS__)
+#define svbsl1n_n_u32(...) __builtin_sve_svbsl1n_n_u32(__VA_ARGS__)
+#define svbsl1n_n_u64(...) __builtin_sve_svbsl1n_n_u64(__VA_ARGS__)
+#define svbsl1n_n_u16(...) __builtin_sve_svbsl1n_n_u16(__VA_ARGS__)
+#define svbsl1n_n_s8(...) __builtin_sve_svbsl1n_n_s8(__VA_ARGS__)
+#define svbsl1n_n_s32(...) __builtin_sve_svbsl1n_n_s32(__VA_ARGS__)
+#define svbsl1n_n_s64(...) __builtin_sve_svbsl1n_n_s64(__VA_ARGS__)
+#define svbsl1n_n_s16(...) __builtin_sve_svbsl1n_n_s16(__VA_ARGS__)
+#define svbsl1n_u8(...) __builtin_sve_svbsl1n_u8(__VA_ARGS__)
+#define svbsl1n_u32(...) __builtin_sve_svbsl1n_u32(__VA_ARGS__)
+#define svbsl1n_u64(...) __builtin_sve_svbsl1n_u64(__VA_ARGS__)
+#define svbsl1n_u16(...) __builtin_sve_svbsl1n_u16(__VA_ARGS__)
+#define svbsl1n_s8(...) __builtin_sve_svbsl1n_s8(__VA_ARGS__)
+#define svbsl1n_s32(...) __builtin_sve_svbsl1n_s32(__VA_ARGS__)
+#define svbsl1n_s64(...) __builtin_sve_svbsl1n_s64(__VA_ARGS__)
+#define svbsl1n_s16(...) __builtin_sve_svbsl1n_s16(__VA_ARGS__)
+#define svbsl2n_n_u8(...) __builtin_sve_svbsl2n_n_u8(__VA_ARGS__)
+#define svbsl2n_n_u32(...) __builtin_sve_svbsl2n_n_u32(__VA_ARGS__)
+#define svbsl2n_n_u64(...) __builtin_sve_svbsl2n_n_u64(__VA_ARGS__)
+#define svbsl2n_n_u16(...) __builtin_sve_svbsl2n_n_u16(__VA_ARGS__)
+#define svbsl2n_n_s8(...) __builtin_sve_svbsl2n_n_s8(__VA_ARGS__)
+#define svbsl2n_n_s32(...) __builtin_sve_svbsl2n_n_s32(__VA_ARGS__)
+#define svbsl2n_n_s64(...) __builtin_sve_svbsl2n_n_s64(__VA_ARGS__)
+#define svbsl2n_n_s16(...) __builtin_sve_svbsl2n_n_s16(__VA_ARGS__)
+#define svbsl2n_u8(...) __builtin_sve_svbsl2n_u8(__VA_ARGS__)
+#define svbsl2n_u32(...) __builtin_sve_svbsl2n_u32(__VA_ARGS__)
+#define svbsl2n_u64(...) __builtin_sve_svbsl2n_u64(__VA_ARGS__)
+#define svbsl2n_u16(...) __builtin_sve_svbsl2n_u16(__VA_ARGS__)
+#define svbsl2n_s8(...) __builtin_sve_svbsl2n_s8(__VA_ARGS__)
+#define svbsl2n_s32(...) __builtin_sve_svbsl2n_s32(__VA_ARGS__)
+#define svbsl2n_s64(...) __builtin_sve_svbsl2n_s64(__VA_ARGS__)
+#define svbsl2n_s16(...) __builtin_sve_svbsl2n_s16(__VA_ARGS__)
+#define svbsl_n_u8(...) __builtin_sve_svbsl_n_u8(__VA_ARGS__)
+#define svbsl_n_u32(...) __builtin_sve_svbsl_n_u32(__VA_ARGS__)
+#define svbsl_n_u64(...) __builtin_sve_svbsl_n_u64(__VA_ARGS__)
+#define svbsl_n_u16(...) __builtin_sve_svbsl_n_u16(__VA_ARGS__)
+#define svbsl_n_s8(...) __builtin_sve_svbsl_n_s8(__VA_ARGS__)
+#define svbsl_n_s32(...) __builtin_sve_svbsl_n_s32(__VA_ARGS__)
+#define svbsl_n_s64(...) __builtin_sve_svbsl_n_s64(__VA_ARGS__)
+#define svbsl_n_s16(...) __builtin_sve_svbsl_n_s16(__VA_ARGS__)
+#define svbsl_u8(...) __builtin_sve_svbsl_u8(__VA_ARGS__)
+#define svbsl_u32(...) __builtin_sve_svbsl_u32(__VA_ARGS__)
+#define svbsl_u64(...) __builtin_sve_svbsl_u64(__VA_ARGS__)
+#define svbsl_u16(...) __builtin_sve_svbsl_u16(__VA_ARGS__)
+#define svbsl_s8(...) __builtin_sve_svbsl_s8(__VA_ARGS__)
+#define svbsl_s32(...) __builtin_sve_svbsl_s32(__VA_ARGS__)
+#define svbsl_s64(...) __builtin_sve_svbsl_s64(__VA_ARGS__)
+#define svbsl_s16(...) __builtin_sve_svbsl_s16(__VA_ARGS__)
+#define svcadd_u8(...) __builtin_sve_svcadd_u8(__VA_ARGS__)
+#define svcadd_u32(...) __builtin_sve_svcadd_u32(__VA_ARGS__)
+#define svcadd_u64(...) __builtin_sve_svcadd_u64(__VA_ARGS__)
+#define svcadd_u16(...) __builtin_sve_svcadd_u16(__VA_ARGS__)
+#define svcadd_s8(...) __builtin_sve_svcadd_s8(__VA_ARGS__)
+#define svcadd_s32(...) __builtin_sve_svcadd_s32(__VA_ARGS__)
+#define svcadd_s64(...) __builtin_sve_svcadd_s64(__VA_ARGS__)
+#define svcadd_s16(...) __builtin_sve_svcadd_s16(__VA_ARGS__)
+#define svcdot_s32(...) __builtin_sve_svcdot_s32(__VA_ARGS__)
+#define svcdot_s64(...) __builtin_sve_svcdot_s64(__VA_ARGS__)
+#define svcdot_lane_s32(...) __builtin_sve_svcdot_lane_s32(__VA_ARGS__)
+#define svcdot_lane_s64(...) __builtin_sve_svcdot_lane_s64(__VA_ARGS__)
+#define svcmla_u8(...) __builtin_sve_svcmla_u8(__VA_ARGS__)
+#define svcmla_u32(...) __builtin_sve_svcmla_u32(__VA_ARGS__)
+#define svcmla_u64(...) __builtin_sve_svcmla_u64(__VA_ARGS__)
+#define svcmla_u16(...) __builtin_sve_svcmla_u16(__VA_ARGS__)
+#define svcmla_s8(...) __builtin_sve_svcmla_s8(__VA_ARGS__)
+#define svcmla_s32(...) __builtin_sve_svcmla_s32(__VA_ARGS__)
+#define svcmla_s64(...) __builtin_sve_svcmla_s64(__VA_ARGS__)
+#define svcmla_s16(...) __builtin_sve_svcmla_s16(__VA_ARGS__)
+#define svcmla_lane_u32(...) __builtin_sve_svcmla_lane_u32(__VA_ARGS__)
+#define svcmla_lane_u16(...) __builtin_sve_svcmla_lane_u16(__VA_ARGS__)
+#define svcmla_lane_s32(...) __builtin_sve_svcmla_lane_s32(__VA_ARGS__)
+#define svcmla_lane_s16(...) __builtin_sve_svcmla_lane_s16(__VA_ARGS__)
+#define svcvtlt_f32_f16_m(...) __builtin_sve_svcvtlt_f32_f16_m(__VA_ARGS__)
+#define svcvtlt_f32_f16_x(...) __builtin_sve_svcvtlt_f32_f16_x(__VA_ARGS__)
+#define svcvtlt_f64_f32_m(...) __builtin_sve_svcvtlt_f64_f32_m(__VA_ARGS__)
+#define svcvtlt_f64_f32_x(...) __builtin_sve_svcvtlt_f64_f32_x(__VA_ARGS__)
+#define svcvtnt_f16_f32_m(...) __builtin_sve_svcvtnt_f16_f32_m(__VA_ARGS__)
+#define svcvtnt_f32_f64_m(...) __builtin_sve_svcvtnt_f32_f64_m(__VA_ARGS__)
+#define svcvtx_f32_f64_m(...) __builtin_sve_svcvtx_f32_f64_m(__VA_ARGS__)
+#define svcvtx_f32_f64_x(...) __builtin_sve_svcvtx_f32_f64_x(__VA_ARGS__)
+#define svcvtx_f32_f64_z(...) __builtin_sve_svcvtx_f32_f64_z(__VA_ARGS__)
+#define svcvtxnt_f32_f64_m(...) __builtin_sve_svcvtxnt_f32_f64_m(__VA_ARGS__)
+#define sveor3_n_u8(...) __builtin_sve_sveor3_n_u8(__VA_ARGS__)
+#define sveor3_n_u32(...) __builtin_sve_sveor3_n_u32(__VA_ARGS__)
+#define sveor3_n_u64(...) __builtin_sve_sveor3_n_u64(__VA_ARGS__)
+#define sveor3_n_u16(...) __builtin_sve_sveor3_n_u16(__VA_ARGS__)
+#define sveor3_n_s8(...) __builtin_sve_sveor3_n_s8(__VA_ARGS__)
+#define sveor3_n_s32(...) __builtin_sve_sveor3_n_s32(__VA_ARGS__)
+#define sveor3_n_s64(...) __builtin_sve_sveor3_n_s64(__VA_ARGS__)
+#define sveor3_n_s16(...) __builtin_sve_sveor3_n_s16(__VA_ARGS__)
+#define sveor3_u8(...) __builtin_sve_sveor3_u8(__VA_ARGS__)
+#define sveor3_u32(...) __builtin_sve_sveor3_u32(__VA_ARGS__)
+#define sveor3_u64(...) __builtin_sve_sveor3_u64(__VA_ARGS__)
+#define sveor3_u16(...) __builtin_sve_sveor3_u16(__VA_ARGS__)
+#define sveor3_s8(...) __builtin_sve_sveor3_s8(__VA_ARGS__)
+#define sveor3_s32(...) __builtin_sve_sveor3_s32(__VA_ARGS__)
+#define sveor3_s64(...) __builtin_sve_sveor3_s64(__VA_ARGS__)
+#define sveor3_s16(...) __builtin_sve_sveor3_s16(__VA_ARGS__)
+#define sveorbt_n_u8(...) __builtin_sve_sveorbt_n_u8(__VA_ARGS__)
+#define sveorbt_n_u32(...) __builtin_sve_sveorbt_n_u32(__VA_ARGS__)
+#define sveorbt_n_u64(...) __builtin_sve_sveorbt_n_u64(__VA_ARGS__)
+#define sveorbt_n_u16(...) __builtin_sve_sveorbt_n_u16(__VA_ARGS__)
+#define sveorbt_n_s8(...) __builtin_sve_sveorbt_n_s8(__VA_ARGS__)
+#define sveorbt_n_s32(...) __builtin_sve_sveorbt_n_s32(__VA_ARGS__)
+#define sveorbt_n_s64(...) __builtin_sve_sveorbt_n_s64(__VA_ARGS__)
+#define sveorbt_n_s16(...) __builtin_sve_sveorbt_n_s16(__VA_ARGS__)
+#define sveorbt_u8(...) __builtin_sve_sveorbt_u8(__VA_ARGS__)
+#define sveorbt_u32(...) __builtin_sve_sveorbt_u32(__VA_ARGS__)
+#define sveorbt_u64(...) __builtin_sve_sveorbt_u64(__VA_ARGS__)
+#define sveorbt_u16(...) __builtin_sve_sveorbt_u16(__VA_ARGS__)
+#define sveorbt_s8(...) __builtin_sve_sveorbt_s8(__VA_ARGS__)
+#define sveorbt_s32(...) __builtin_sve_sveorbt_s32(__VA_ARGS__)
+#define sveorbt_s64(...) __builtin_sve_sveorbt_s64(__VA_ARGS__)
+#define sveorbt_s16(...) __builtin_sve_sveorbt_s16(__VA_ARGS__)
+#define sveortb_n_u8(...) __builtin_sve_sveortb_n_u8(__VA_ARGS__)
+#define sveortb_n_u32(...) __builtin_sve_sveortb_n_u32(__VA_ARGS__)
+#define sveortb_n_u64(...) __builtin_sve_sveortb_n_u64(__VA_ARGS__)
+#define sveortb_n_u16(...) __builtin_sve_sveortb_n_u16(__VA_ARGS__)
+#define sveortb_n_s8(...) __builtin_sve_sveortb_n_s8(__VA_ARGS__)
+#define sveortb_n_s32(...) __builtin_sve_sveortb_n_s32(__VA_ARGS__)
+#define sveortb_n_s64(...) __builtin_sve_sveortb_n_s64(__VA_ARGS__)
+#define sveortb_n_s16(...) __builtin_sve_sveortb_n_s16(__VA_ARGS__)
+#define sveortb_u8(...) __builtin_sve_sveortb_u8(__VA_ARGS__)
+#define sveortb_u32(...) __builtin_sve_sveortb_u32(__VA_ARGS__)
+#define sveortb_u64(...) __builtin_sve_sveortb_u64(__VA_ARGS__)
+#define sveortb_u16(...) __builtin_sve_sveortb_u16(__VA_ARGS__)
+#define sveortb_s8(...) __builtin_sve_sveortb_s8(__VA_ARGS__)
+#define sveortb_s32(...) __builtin_sve_sveortb_s32(__VA_ARGS__)
+#define sveortb_s64(...) __builtin_sve_sveortb_s64(__VA_ARGS__)
+#define sveortb_s16(...) __builtin_sve_sveortb_s16(__VA_ARGS__)
+#define svhadd_n_s8_m(...) __builtin_sve_svhadd_n_s8_m(__VA_ARGS__)
+#define svhadd_n_s32_m(...) __builtin_sve_svhadd_n_s32_m(__VA_ARGS__)
+#define svhadd_n_s64_m(...) __builtin_sve_svhadd_n_s64_m(__VA_ARGS__)
+#define svhadd_n_s16_m(...) __builtin_sve_svhadd_n_s16_m(__VA_ARGS__)
+#define svhadd_n_s8_x(...) __builtin_sve_svhadd_n_s8_x(__VA_ARGS__)
+#define svhadd_n_s32_x(...) __builtin_sve_svhadd_n_s32_x(__VA_ARGS__)
+#define svhadd_n_s64_x(...) __builtin_sve_svhadd_n_s64_x(__VA_ARGS__)
+#define svhadd_n_s16_x(...) __builtin_sve_svhadd_n_s16_x(__VA_ARGS__)
+#define svhadd_n_s8_z(...) __builtin_sve_svhadd_n_s8_z(__VA_ARGS__)
+#define svhadd_n_s32_z(...) __builtin_sve_svhadd_n_s32_z(__VA_ARGS__)
+#define svhadd_n_s64_z(...) __builtin_sve_svhadd_n_s64_z(__VA_ARGS__)
+#define svhadd_n_s16_z(...) __builtin_sve_svhadd_n_s16_z(__VA_ARGS__)
+#define svhadd_n_u8_m(...) __builtin_sve_svhadd_n_u8_m(__VA_ARGS__)
+#define svhadd_n_u32_m(...) __builtin_sve_svhadd_n_u32_m(__VA_ARGS__)
+#define svhadd_n_u64_m(...) __builtin_sve_svhadd_n_u64_m(__VA_ARGS__)
+#define svhadd_n_u16_m(...) __builtin_sve_svhadd_n_u16_m(__VA_ARGS__)
+#define svhadd_n_u8_x(...) __builtin_sve_svhadd_n_u8_x(__VA_ARGS__)
+#define svhadd_n_u32_x(...) __builtin_sve_svhadd_n_u32_x(__VA_ARGS__)
+#define svhadd_n_u64_x(...) __builtin_sve_svhadd_n_u64_x(__VA_ARGS__)
+#define svhadd_n_u16_x(...) __builtin_sve_svhadd_n_u16_x(__VA_ARGS__)
+#define svhadd_n_u8_z(...) __builtin_sve_svhadd_n_u8_z(__VA_ARGS__)
+#define svhadd_n_u32_z(...) __builtin_sve_svhadd_n_u32_z(__VA_ARGS__)
+#define svhadd_n_u64_z(...) __builtin_sve_svhadd_n_u64_z(__VA_ARGS__)
+#define svhadd_n_u16_z(...) __builtin_sve_svhadd_n_u16_z(__VA_ARGS__)
+#define svhadd_s8_m(...) __builtin_sve_svhadd_s8_m(__VA_ARGS__)
+#define svhadd_s32_m(...) __builtin_sve_svhadd_s32_m(__VA_ARGS__)
+#define svhadd_s64_m(...) __builtin_sve_svhadd_s64_m(__VA_ARGS__)
+#define svhadd_s16_m(...) __builtin_sve_svhadd_s16_m(__VA_ARGS__)
+#define svhadd_s8_x(...) __builtin_sve_svhadd_s8_x(__VA_ARGS__)
+#define svhadd_s32_x(...) __builtin_sve_svhadd_s32_x(__VA_ARGS__)
+#define svhadd_s64_x(...) __builtin_sve_svhadd_s64_x(__VA_ARGS__)
+#define svhadd_s16_x(...) __builtin_sve_svhadd_s16_x(__VA_ARGS__)
+#define svhadd_s8_z(...) __builtin_sve_svhadd_s8_z(__VA_ARGS__)
+#define svhadd_s32_z(...) __builtin_sve_svhadd_s32_z(__VA_ARGS__)
+#define svhadd_s64_z(...) __builtin_sve_svhadd_s64_z(__VA_ARGS__)
+#define svhadd_s16_z(...) __builtin_sve_svhadd_s16_z(__VA_ARGS__)
+#define svhadd_u8_m(...) __builtin_sve_svhadd_u8_m(__VA_ARGS__)
+#define svhadd_u32_m(...) __builtin_sve_svhadd_u32_m(__VA_ARGS__)
+#define svhadd_u64_m(...) __builtin_sve_svhadd_u64_m(__VA_ARGS__)
+#define svhadd_u16_m(...) __builtin_sve_svhadd_u16_m(__VA_ARGS__)
+#define svhadd_u8_x(...) __builtin_sve_svhadd_u8_x(__VA_ARGS__)
+#define svhadd_u32_x(...) __builtin_sve_svhadd_u32_x(__VA_ARGS__)
+#define svhadd_u64_x(...) __builtin_sve_svhadd_u64_x(__VA_ARGS__)
+#define svhadd_u16_x(...) __builtin_sve_svhadd_u16_x(__VA_ARGS__)
+#define svhadd_u8_z(...) __builtin_sve_svhadd_u8_z(__VA_ARGS__)
+#define svhadd_u32_z(...) __builtin_sve_svhadd_u32_z(__VA_ARGS__)
+#define svhadd_u64_z(...) __builtin_sve_svhadd_u64_z(__VA_ARGS__)
+#define svhadd_u16_z(...) __builtin_sve_svhadd_u16_z(__VA_ARGS__)
+#define svhistcnt_u32_z(...) __builtin_sve_svhistcnt_u32_z(__VA_ARGS__)
+#define svhistcnt_u64_z(...) __builtin_sve_svhistcnt_u64_z(__VA_ARGS__)
+#define svhistcnt_s32_z(...) __builtin_sve_svhistcnt_s32_z(__VA_ARGS__)
+#define svhistcnt_s64_z(...) __builtin_sve_svhistcnt_s64_z(__VA_ARGS__)
+#define svhistseg_u8(...) __builtin_sve_svhistseg_u8(__VA_ARGS__)
+#define svhistseg_s8(...) __builtin_sve_svhistseg_s8(__VA_ARGS__)
+#define svhsub_n_s8_m(...) __builtin_sve_svhsub_n_s8_m(__VA_ARGS__)
+#define svhsub_n_s32_m(...) __builtin_sve_svhsub_n_s32_m(__VA_ARGS__)
+#define svhsub_n_s64_m(...) __builtin_sve_svhsub_n_s64_m(__VA_ARGS__)
+#define svhsub_n_s16_m(...) __builtin_sve_svhsub_n_s16_m(__VA_ARGS__)
+#define svhsub_n_s8_x(...) __builtin_sve_svhsub_n_s8_x(__VA_ARGS__)
+#define svhsub_n_s32_x(...) __builtin_sve_svhsub_n_s32_x(__VA_ARGS__)
+#define svhsub_n_s64_x(...) __builtin_sve_svhsub_n_s64_x(__VA_ARGS__)
+#define svhsub_n_s16_x(...) __builtin_sve_svhsub_n_s16_x(__VA_ARGS__)
+#define svhsub_n_s8_z(...) __builtin_sve_svhsub_n_s8_z(__VA_ARGS__)
+#define svhsub_n_s32_z(...) __builtin_sve_svhsub_n_s32_z(__VA_ARGS__)
+#define svhsub_n_s64_z(...) __builtin_sve_svhsub_n_s64_z(__VA_ARGS__)
+#define svhsub_n_s16_z(...) __builtin_sve_svhsub_n_s16_z(__VA_ARGS__)
+#define svhsub_n_u8_m(...) __builtin_sve_svhsub_n_u8_m(__VA_ARGS__)
+#define svhsub_n_u32_m(...) __builtin_sve_svhsub_n_u32_m(__VA_ARGS__)
+#define svhsub_n_u64_m(...) __builtin_sve_svhsub_n_u64_m(__VA_ARGS__)
+#define svhsub_n_u16_m(...) __builtin_sve_svhsub_n_u16_m(__VA_ARGS__)
+#define svhsub_n_u8_x(...) __builtin_sve_svhsub_n_u8_x(__VA_ARGS__)
+#define svhsub_n_u32_x(...) __builtin_sve_svhsub_n_u32_x(__VA_ARGS__)
+#define svhsub_n_u64_x(...) __builtin_sve_svhsub_n_u64_x(__VA_ARGS__)
+#define svhsub_n_u16_x(...) __builtin_sve_svhsub_n_u16_x(__VA_ARGS__)
+#define svhsub_n_u8_z(...) __builtin_sve_svhsub_n_u8_z(__VA_ARGS__)
+#define svhsub_n_u32_z(...) __builtin_sve_svhsub_n_u32_z(__VA_ARGS__)
+#define svhsub_n_u64_z(...) __builtin_sve_svhsub_n_u64_z(__VA_ARGS__)
+#define svhsub_n_u16_z(...) __builtin_sve_svhsub_n_u16_z(__VA_ARGS__)
+#define svhsub_s8_m(...) __builtin_sve_svhsub_s8_m(__VA_ARGS__)
+#define svhsub_s32_m(...) __builtin_sve_svhsub_s32_m(__VA_ARGS__)
+#define svhsub_s64_m(...) __builtin_sve_svhsub_s64_m(__VA_ARGS__)
+#define svhsub_s16_m(...) __builtin_sve_svhsub_s16_m(__VA_ARGS__)
+#define svhsub_s8_x(...) __builtin_sve_svhsub_s8_x(__VA_ARGS__)
+#define svhsub_s32_x(...) __builtin_sve_svhsub_s32_x(__VA_ARGS__)
+#define svhsub_s64_x(...) __builtin_sve_svhsub_s64_x(__VA_ARGS__)
+#define svhsub_s16_x(...) __builtin_sve_svhsub_s16_x(__VA_ARGS__)
+#define svhsub_s8_z(...) __builtin_sve_svhsub_s8_z(__VA_ARGS__)
+#define svhsub_s32_z(...) __builtin_sve_svhsub_s32_z(__VA_ARGS__)
+#define svhsub_s64_z(...) __builtin_sve_svhsub_s64_z(__VA_ARGS__)
+#define svhsub_s16_z(...) __builtin_sve_svhsub_s16_z(__VA_ARGS__)
+#define svhsub_u8_m(...) __builtin_sve_svhsub_u8_m(__VA_ARGS__)
+#define svhsub_u32_m(...) __builtin_sve_svhsub_u32_m(__VA_ARGS__)
+#define svhsub_u64_m(...) __builtin_sve_svhsub_u64_m(__VA_ARGS__)
+#define svhsub_u16_m(...) __builtin_sve_svhsub_u16_m(__VA_ARGS__)
+#define svhsub_u8_x(...) __builtin_sve_svhsub_u8_x(__VA_ARGS__)
+#define svhsub_u32_x(...) __builtin_sve_svhsub_u32_x(__VA_ARGS__)
+#define svhsub_u64_x(...) __builtin_sve_svhsub_u64_x(__VA_ARGS__)
+#define svhsub_u16_x(...) __builtin_sve_svhsub_u16_x(__VA_ARGS__)
+#define svhsub_u8_z(...) __builtin_sve_svhsub_u8_z(__VA_ARGS__)
+#define svhsub_u32_z(...) __builtin_sve_svhsub_u32_z(__VA_ARGS__)
+#define svhsub_u64_z(...) __builtin_sve_svhsub_u64_z(__VA_ARGS__)
+#define svhsub_u16_z(...) __builtin_sve_svhsub_u16_z(__VA_ARGS__)
+#define svhsubr_n_s8_m(...) __builtin_sve_svhsubr_n_s8_m(__VA_ARGS__)
+#define svhsubr_n_s32_m(...) __builtin_sve_svhsubr_n_s32_m(__VA_ARGS__)
+#define svhsubr_n_s64_m(...) __builtin_sve_svhsubr_n_s64_m(__VA_ARGS__)
+#define svhsubr_n_s16_m(...) __builtin_sve_svhsubr_n_s16_m(__VA_ARGS__)
+#define svhsubr_n_s8_x(...) __builtin_sve_svhsubr_n_s8_x(__VA_ARGS__)
+#define svhsubr_n_s32_x(...) __builtin_sve_svhsubr_n_s32_x(__VA_ARGS__)
+#define svhsubr_n_s64_x(...) __builtin_sve_svhsubr_n_s64_x(__VA_ARGS__)
+#define svhsubr_n_s16_x(...) __builtin_sve_svhsubr_n_s16_x(__VA_ARGS__)
+#define svhsubr_n_s8_z(...) __builtin_sve_svhsubr_n_s8_z(__VA_ARGS__)
+#define svhsubr_n_s32_z(...) __builtin_sve_svhsubr_n_s32_z(__VA_ARGS__)
+#define svhsubr_n_s64_z(...) __builtin_sve_svhsubr_n_s64_z(__VA_ARGS__)
+#define svhsubr_n_s16_z(...) __builtin_sve_svhsubr_n_s16_z(__VA_ARGS__)
+#define svhsubr_n_u8_m(...) __builtin_sve_svhsubr_n_u8_m(__VA_ARGS__)
+#define svhsubr_n_u32_m(...) __builtin_sve_svhsubr_n_u32_m(__VA_ARGS__)
+#define svhsubr_n_u64_m(...) __builtin_sve_svhsubr_n_u64_m(__VA_ARGS__)
+#define svhsubr_n_u16_m(...) __builtin_sve_svhsubr_n_u16_m(__VA_ARGS__)
+#define svhsubr_n_u8_x(...) __builtin_sve_svhsubr_n_u8_x(__VA_ARGS__)
+#define svhsubr_n_u32_x(...) __builtin_sve_svhsubr_n_u32_x(__VA_ARGS__)
+#define svhsubr_n_u64_x(...) __builtin_sve_svhsubr_n_u64_x(__VA_ARGS__)
+#define svhsubr_n_u16_x(...) __builtin_sve_svhsubr_n_u16_x(__VA_ARGS__)
+#define svhsubr_n_u8_z(...) __builtin_sve_svhsubr_n_u8_z(__VA_ARGS__)
+#define svhsubr_n_u32_z(...) __builtin_sve_svhsubr_n_u32_z(__VA_ARGS__)
+#define svhsubr_n_u64_z(...) __builtin_sve_svhsubr_n_u64_z(__VA_ARGS__)
+#define svhsubr_n_u16_z(...) __builtin_sve_svhsubr_n_u16_z(__VA_ARGS__)
+#define svhsubr_s8_m(...) __builtin_sve_svhsubr_s8_m(__VA_ARGS__)
+#define svhsubr_s32_m(...) __builtin_sve_svhsubr_s32_m(__VA_ARGS__)
+#define svhsubr_s64_m(...) __builtin_sve_svhsubr_s64_m(__VA_ARGS__)
+#define svhsubr_s16_m(...) __builtin_sve_svhsubr_s16_m(__VA_ARGS__)
+#define svhsubr_s8_x(...) __builtin_sve_svhsubr_s8_x(__VA_ARGS__)
+#define svhsubr_s32_x(...) __builtin_sve_svhsubr_s32_x(__VA_ARGS__)
+#define svhsubr_s64_x(...) __builtin_sve_svhsubr_s64_x(__VA_ARGS__)
+#define svhsubr_s16_x(...) __builtin_sve_svhsubr_s16_x(__VA_ARGS__)
+#define svhsubr_s8_z(...) __builtin_sve_svhsubr_s8_z(__VA_ARGS__)
+#define svhsubr_s32_z(...) __builtin_sve_svhsubr_s32_z(__VA_ARGS__)
+#define svhsubr_s64_z(...) __builtin_sve_svhsubr_s64_z(__VA_ARGS__)
+#define svhsubr_s16_z(...) __builtin_sve_svhsubr_s16_z(__VA_ARGS__)
+#define svhsubr_u8_m(...) __builtin_sve_svhsubr_u8_m(__VA_ARGS__)
+#define svhsubr_u32_m(...) __builtin_sve_svhsubr_u32_m(__VA_ARGS__)
+#define svhsubr_u64_m(...) __builtin_sve_svhsubr_u64_m(__VA_ARGS__)
+#define svhsubr_u16_m(...) __builtin_sve_svhsubr_u16_m(__VA_ARGS__)
+#define svhsubr_u8_x(...) __builtin_sve_svhsubr_u8_x(__VA_ARGS__)
+#define svhsubr_u32_x(...) __builtin_sve_svhsubr_u32_x(__VA_ARGS__)
+#define svhsubr_u64_x(...) __builtin_sve_svhsubr_u64_x(__VA_ARGS__)
+#define svhsubr_u16_x(...) __builtin_sve_svhsubr_u16_x(__VA_ARGS__)
+#define svhsubr_u8_z(...) __builtin_sve_svhsubr_u8_z(__VA_ARGS__)
+#define svhsubr_u32_z(...) __builtin_sve_svhsubr_u32_z(__VA_ARGS__)
+#define svhsubr_u64_z(...) __builtin_sve_svhsubr_u64_z(__VA_ARGS__)
+#define svhsubr_u16_z(...) __builtin_sve_svhsubr_u16_z(__VA_ARGS__)
+#define svldnt1_gather_u32base_index_u32(...) __builtin_sve_svldnt1_gather_u32base_index_u32(__VA_ARGS__)
+#define svldnt1_gather_u64base_index_u64(...) __builtin_sve_svldnt1_gather_u64base_index_u64(__VA_ARGS__)
+#define svldnt1_gather_u64base_index_f64(...) __builtin_sve_svldnt1_gather_u64base_index_f64(__VA_ARGS__)
+#define svldnt1_gather_u32base_index_f32(...) __builtin_sve_svldnt1_gather_u32base_index_f32(__VA_ARGS__)
+#define svldnt1_gather_u32base_index_s32(...) __builtin_sve_svldnt1_gather_u32base_index_s32(__VA_ARGS__)
+#define svldnt1_gather_u64base_index_s64(...) __builtin_sve_svldnt1_gather_u64base_index_s64(__VA_ARGS__)
+#define svldnt1_gather_u32base_offset_u32(...) __builtin_sve_svldnt1_gather_u32base_offset_u32(__VA_ARGS__)
+#define svldnt1_gather_u64base_offset_u64(...) __builtin_sve_svldnt1_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldnt1_gather_u64base_offset_f64(...) __builtin_sve_svldnt1_gather_u64base_offset_f64(__VA_ARGS__)
+#define svldnt1_gather_u32base_offset_f32(...) __builtin_sve_svldnt1_gather_u32base_offset_f32(__VA_ARGS__)
+#define svldnt1_gather_u32base_offset_s32(...) __builtin_sve_svldnt1_gather_u32base_offset_s32(__VA_ARGS__)
+#define svldnt1_gather_u64base_offset_s64(...) __builtin_sve_svldnt1_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldnt1_gather_u32base_u32(...) __builtin_sve_svldnt1_gather_u32base_u32(__VA_ARGS__)
+#define svldnt1_gather_u64base_u64(...) __builtin_sve_svldnt1_gather_u64base_u64(__VA_ARGS__)
+#define svldnt1_gather_u64base_f64(...) __builtin_sve_svldnt1_gather_u64base_f64(__VA_ARGS__)
+#define svldnt1_gather_u32base_f32(...) __builtin_sve_svldnt1_gather_u32base_f32(__VA_ARGS__)
+#define svldnt1_gather_u32base_s32(...) __builtin_sve_svldnt1_gather_u32base_s32(__VA_ARGS__)
+#define svldnt1_gather_u64base_s64(...) __builtin_sve_svldnt1_gather_u64base_s64(__VA_ARGS__)
+#define svldnt1_gather_s64index_u64(...) __builtin_sve_svldnt1_gather_s64index_u64(__VA_ARGS__)
+#define svldnt1_gather_s64index_f64(...) __builtin_sve_svldnt1_gather_s64index_f64(__VA_ARGS__)
+#define svldnt1_gather_s64index_s64(...) __builtin_sve_svldnt1_gather_s64index_s64(__VA_ARGS__)
+#define svldnt1_gather_u64index_u64(...) __builtin_sve_svldnt1_gather_u64index_u64(__VA_ARGS__)
+#define svldnt1_gather_u64index_f64(...) __builtin_sve_svldnt1_gather_u64index_f64(__VA_ARGS__)
+#define svldnt1_gather_u64index_s64(...) __builtin_sve_svldnt1_gather_u64index_s64(__VA_ARGS__)
+#define svldnt1_gather_u32offset_u32(...) __builtin_sve_svldnt1_gather_u32offset_u32(__VA_ARGS__)
+#define svldnt1_gather_u32offset_f32(...) __builtin_sve_svldnt1_gather_u32offset_f32(__VA_ARGS__)
+#define svldnt1_gather_u32offset_s32(...) __builtin_sve_svldnt1_gather_u32offset_s32(__VA_ARGS__)
+#define svldnt1_gather_s64offset_u64(...) __builtin_sve_svldnt1_gather_s64offset_u64(__VA_ARGS__)
+#define svldnt1_gather_s64offset_f64(...) __builtin_sve_svldnt1_gather_s64offset_f64(__VA_ARGS__)
+#define svldnt1_gather_s64offset_s64(...) __builtin_sve_svldnt1_gather_s64offset_s64(__VA_ARGS__)
+#define svldnt1_gather_u64offset_u64(...) __builtin_sve_svldnt1_gather_u64offset_u64(__VA_ARGS__)
+#define svldnt1_gather_u64offset_f64(...) __builtin_sve_svldnt1_gather_u64offset_f64(__VA_ARGS__)
+#define svldnt1_gather_u64offset_s64(...) __builtin_sve_svldnt1_gather_u64offset_s64(__VA_ARGS__)
+#define svldnt1sb_gather_u32base_offset_u32(...) __builtin_sve_svldnt1sb_gather_u32base_offset_u32(__VA_ARGS__)
+#define svldnt1sb_gather_u64base_offset_u64(...) __builtin_sve_svldnt1sb_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldnt1sb_gather_u32base_offset_s32(...) __builtin_sve_svldnt1sb_gather_u32base_offset_s32(__VA_ARGS__)
+#define svldnt1sb_gather_u64base_offset_s64(...) __builtin_sve_svldnt1sb_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldnt1sb_gather_u32base_u32(...) __builtin_sve_svldnt1sb_gather_u32base_u32(__VA_ARGS__)
+#define svldnt1sb_gather_u64base_u64(...) __builtin_sve_svldnt1sb_gather_u64base_u64(__VA_ARGS__)
+#define svldnt1sb_gather_u32base_s32(...) __builtin_sve_svldnt1sb_gather_u32base_s32(__VA_ARGS__)
+#define svldnt1sb_gather_u64base_s64(...) __builtin_sve_svldnt1sb_gather_u64base_s64(__VA_ARGS__)
+#define svldnt1sb_gather_u32offset_u32(...) __builtin_sve_svldnt1sb_gather_u32offset_u32(__VA_ARGS__)
+#define svldnt1sb_gather_u32offset_s32(...) __builtin_sve_svldnt1sb_gather_u32offset_s32(__VA_ARGS__)
+#define svldnt1sb_gather_s64offset_u64(...) __builtin_sve_svldnt1sb_gather_s64offset_u64(__VA_ARGS__)
+#define svldnt1sb_gather_s64offset_s64(...) __builtin_sve_svldnt1sb_gather_s64offset_s64(__VA_ARGS__)
+#define svldnt1sb_gather_u64offset_u64(...) __builtin_sve_svldnt1sb_gather_u64offset_u64(__VA_ARGS__)
+#define svldnt1sb_gather_u64offset_s64(...) __builtin_sve_svldnt1sb_gather_u64offset_s64(__VA_ARGS__)
+#define svldnt1sh_gather_u32base_index_u32(...) __builtin_sve_svldnt1sh_gather_u32base_index_u32(__VA_ARGS__)
+#define svldnt1sh_gather_u64base_index_u64(...) __builtin_sve_svldnt1sh_gather_u64base_index_u64(__VA_ARGS__)
+#define svldnt1sh_gather_u32base_index_s32(...) __builtin_sve_svldnt1sh_gather_u32base_index_s32(__VA_ARGS__)
+#define svldnt1sh_gather_u64base_index_s64(...) __builtin_sve_svldnt1sh_gather_u64base_index_s64(__VA_ARGS__)
+#define svldnt1sh_gather_u32base_offset_u32(...) __builtin_sve_svldnt1sh_gather_u32base_offset_u32(__VA_ARGS__)
+#define svldnt1sh_gather_u64base_offset_u64(...) __builtin_sve_svldnt1sh_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldnt1sh_gather_u32base_offset_s32(...) __builtin_sve_svldnt1sh_gather_u32base_offset_s32(__VA_ARGS__)
+#define svldnt1sh_gather_u64base_offset_s64(...) __builtin_sve_svldnt1sh_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldnt1sh_gather_u32base_u32(...) __builtin_sve_svldnt1sh_gather_u32base_u32(__VA_ARGS__)
+#define svldnt1sh_gather_u64base_u64(...) __builtin_sve_svldnt1sh_gather_u64base_u64(__VA_ARGS__)
+#define svldnt1sh_gather_u32base_s32(...) __builtin_sve_svldnt1sh_gather_u32base_s32(__VA_ARGS__)
+#define svldnt1sh_gather_u64base_s64(...) __builtin_sve_svldnt1sh_gather_u64base_s64(__VA_ARGS__)
+#define svldnt1sh_gather_s64index_u64(...) __builtin_sve_svldnt1sh_gather_s64index_u64(__VA_ARGS__)
+#define svldnt1sh_gather_s64index_s64(...) __builtin_sve_svldnt1sh_gather_s64index_s64(__VA_ARGS__)
+#define svldnt1sh_gather_u64index_u64(...) __builtin_sve_svldnt1sh_gather_u64index_u64(__VA_ARGS__)
+#define svldnt1sh_gather_u64index_s64(...) __builtin_sve_svldnt1sh_gather_u64index_s64(__VA_ARGS__)
+#define svldnt1sh_gather_u32offset_u32(...) __builtin_sve_svldnt1sh_gather_u32offset_u32(__VA_ARGS__)
+#define svldnt1sh_gather_u32offset_s32(...) __builtin_sve_svldnt1sh_gather_u32offset_s32(__VA_ARGS__)
+#define svldnt1sh_gather_s64offset_u64(...) __builtin_sve_svldnt1sh_gather_s64offset_u64(__VA_ARGS__)
+#define svldnt1sh_gather_s64offset_s64(...) __builtin_sve_svldnt1sh_gather_s64offset_s64(__VA_ARGS__)
+#define svldnt1sh_gather_u64offset_u64(...) __builtin_sve_svldnt1sh_gather_u64offset_u64(__VA_ARGS__)
+#define svldnt1sh_gather_u64offset_s64(...) __builtin_sve_svldnt1sh_gather_u64offset_s64(__VA_ARGS__)
+#define svldnt1sw_gather_u64base_index_u64(...) __builtin_sve_svldnt1sw_gather_u64base_index_u64(__VA_ARGS__)
+#define svldnt1sw_gather_u64base_index_s64(...) __builtin_sve_svldnt1sw_gather_u64base_index_s64(__VA_ARGS__)
+#define svldnt1sw_gather_u64base_offset_u64(...) __builtin_sve_svldnt1sw_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldnt1sw_gather_u64base_offset_s64(...) __builtin_sve_svldnt1sw_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldnt1sw_gather_u64base_u64(...) __builtin_sve_svldnt1sw_gather_u64base_u64(__VA_ARGS__)
+#define svldnt1sw_gather_u64base_s64(...) __builtin_sve_svldnt1sw_gather_u64base_s64(__VA_ARGS__)
+#define svldnt1sw_gather_s64index_u64(...) __builtin_sve_svldnt1sw_gather_s64index_u64(__VA_ARGS__)
+#define svldnt1sw_gather_s64index_s64(...) __builtin_sve_svldnt1sw_gather_s64index_s64(__VA_ARGS__)
+#define svldnt1sw_gather_u64index_u64(...) __builtin_sve_svldnt1sw_gather_u64index_u64(__VA_ARGS__)
+#define svldnt1sw_gather_u64index_s64(...) __builtin_sve_svldnt1sw_gather_u64index_s64(__VA_ARGS__)
+#define svldnt1sw_gather_s64offset_u64(...) __builtin_sve_svldnt1sw_gather_s64offset_u64(__VA_ARGS__)
+#define svldnt1sw_gather_s64offset_s64(...) __builtin_sve_svldnt1sw_gather_s64offset_s64(__VA_ARGS__)
+#define svldnt1sw_gather_u64offset_u64(...) __builtin_sve_svldnt1sw_gather_u64offset_u64(__VA_ARGS__)
+#define svldnt1sw_gather_u64offset_s64(...) __builtin_sve_svldnt1sw_gather_u64offset_s64(__VA_ARGS__)
+#define svldnt1ub_gather_u32base_offset_u32(...) __builtin_sve_svldnt1ub_gather_u32base_offset_u32(__VA_ARGS__)
+#define svldnt1ub_gather_u64base_offset_u64(...) __builtin_sve_svldnt1ub_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldnt1ub_gather_u32base_offset_s32(...) __builtin_sve_svldnt1ub_gather_u32base_offset_s32(__VA_ARGS__)
+#define svldnt1ub_gather_u64base_offset_s64(...) __builtin_sve_svldnt1ub_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldnt1ub_gather_u32base_u32(...) __builtin_sve_svldnt1ub_gather_u32base_u32(__VA_ARGS__)
+#define svldnt1ub_gather_u64base_u64(...) __builtin_sve_svldnt1ub_gather_u64base_u64(__VA_ARGS__)
+#define svldnt1ub_gather_u32base_s32(...) __builtin_sve_svldnt1ub_gather_u32base_s32(__VA_ARGS__)
+#define svldnt1ub_gather_u64base_s64(...) __builtin_sve_svldnt1ub_gather_u64base_s64(__VA_ARGS__)
+#define svldnt1ub_gather_u32offset_u32(...) __builtin_sve_svldnt1ub_gather_u32offset_u32(__VA_ARGS__)
+#define svldnt1ub_gather_u32offset_s32(...) __builtin_sve_svldnt1ub_gather_u32offset_s32(__VA_ARGS__)
+#define svldnt1ub_gather_s64offset_u64(...) __builtin_sve_svldnt1ub_gather_s64offset_u64(__VA_ARGS__)
+#define svldnt1ub_gather_s64offset_s64(...) __builtin_sve_svldnt1ub_gather_s64offset_s64(__VA_ARGS__)
+#define svldnt1ub_gather_u64offset_u64(...) __builtin_sve_svldnt1ub_gather_u64offset_u64(__VA_ARGS__)
+#define svldnt1ub_gather_u64offset_s64(...) __builtin_sve_svldnt1ub_gather_u64offset_s64(__VA_ARGS__)
+#define svldnt1uh_gather_u32base_index_u32(...) __builtin_sve_svldnt1uh_gather_u32base_index_u32(__VA_ARGS__)
+#define svldnt1uh_gather_u64base_index_u64(...) __builtin_sve_svldnt1uh_gather_u64base_index_u64(__VA_ARGS__)
+#define svldnt1uh_gather_u32base_index_s32(...) __builtin_sve_svldnt1uh_gather_u32base_index_s32(__VA_ARGS__)
+#define svldnt1uh_gather_u64base_index_s64(...) __builtin_sve_svldnt1uh_gather_u64base_index_s64(__VA_ARGS__)
+#define svldnt1uh_gather_u32base_offset_u32(...) __builtin_sve_svldnt1uh_gather_u32base_offset_u32(__VA_ARGS__)
+#define svldnt1uh_gather_u64base_offset_u64(...) __builtin_sve_svldnt1uh_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldnt1uh_gather_u32base_offset_s32(...) __builtin_sve_svldnt1uh_gather_u32base_offset_s32(__VA_ARGS__)
+#define svldnt1uh_gather_u64base_offset_s64(...) __builtin_sve_svldnt1uh_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldnt1uh_gather_u32base_u32(...) __builtin_sve_svldnt1uh_gather_u32base_u32(__VA_ARGS__)
+#define svldnt1uh_gather_u64base_u64(...) __builtin_sve_svldnt1uh_gather_u64base_u64(__VA_ARGS__)
+#define svldnt1uh_gather_u32base_s32(...) __builtin_sve_svldnt1uh_gather_u32base_s32(__VA_ARGS__)
+#define svldnt1uh_gather_u64base_s64(...) __builtin_sve_svldnt1uh_gather_u64base_s64(__VA_ARGS__)
+#define svldnt1uh_gather_s64index_u64(...) __builtin_sve_svldnt1uh_gather_s64index_u64(__VA_ARGS__)
+#define svldnt1uh_gather_s64index_s64(...) __builtin_sve_svldnt1uh_gather_s64index_s64(__VA_ARGS__)
+#define svldnt1uh_gather_u64index_u64(...) __builtin_sve_svldnt1uh_gather_u64index_u64(__VA_ARGS__)
+#define svldnt1uh_gather_u64index_s64(...) __builtin_sve_svldnt1uh_gather_u64index_s64(__VA_ARGS__)
+#define svldnt1uh_gather_u32offset_u32(...) __builtin_sve_svldnt1uh_gather_u32offset_u32(__VA_ARGS__)
+#define svldnt1uh_gather_u32offset_s32(...) __builtin_sve_svldnt1uh_gather_u32offset_s32(__VA_ARGS__)
+#define svldnt1uh_gather_s64offset_u64(...) __builtin_sve_svldnt1uh_gather_s64offset_u64(__VA_ARGS__)
+#define svldnt1uh_gather_s64offset_s64(...) __builtin_sve_svldnt1uh_gather_s64offset_s64(__VA_ARGS__)
+#define svldnt1uh_gather_u64offset_u64(...) __builtin_sve_svldnt1uh_gather_u64offset_u64(__VA_ARGS__)
+#define svldnt1uh_gather_u64offset_s64(...) __builtin_sve_svldnt1uh_gather_u64offset_s64(__VA_ARGS__)
+#define svldnt1uw_gather_u64base_index_u64(...) __builtin_sve_svldnt1uw_gather_u64base_index_u64(__VA_ARGS__)
+#define svldnt1uw_gather_u64base_index_s64(...) __builtin_sve_svldnt1uw_gather_u64base_index_s64(__VA_ARGS__)
+#define svldnt1uw_gather_u64base_offset_u64(...) __builtin_sve_svldnt1uw_gather_u64base_offset_u64(__VA_ARGS__)
+#define svldnt1uw_gather_u64base_offset_s64(...) __builtin_sve_svldnt1uw_gather_u64base_offset_s64(__VA_ARGS__)
+#define svldnt1uw_gather_u64base_u64(...) __builtin_sve_svldnt1uw_gather_u64base_u64(__VA_ARGS__)
+#define svldnt1uw_gather_u64base_s64(...) __builtin_sve_svldnt1uw_gather_u64base_s64(__VA_ARGS__)
+#define svldnt1uw_gather_s64index_u64(...) __builtin_sve_svldnt1uw_gather_s64index_u64(__VA_ARGS__)
+#define svldnt1uw_gather_s64index_s64(...) __builtin_sve_svldnt1uw_gather_s64index_s64(__VA_ARGS__)
+#define svldnt1uw_gather_u64index_u64(...) __builtin_sve_svldnt1uw_gather_u64index_u64(__VA_ARGS__)
+#define svldnt1uw_gather_u64index_s64(...) __builtin_sve_svldnt1uw_gather_u64index_s64(__VA_ARGS__)
+#define svldnt1uw_gather_s64offset_u64(...) __builtin_sve_svldnt1uw_gather_s64offset_u64(__VA_ARGS__)
+#define svldnt1uw_gather_s64offset_s64(...) __builtin_sve_svldnt1uw_gather_s64offset_s64(__VA_ARGS__)
+#define svldnt1uw_gather_u64offset_u64(...) __builtin_sve_svldnt1uw_gather_u64offset_u64(__VA_ARGS__)
+#define svldnt1uw_gather_u64offset_s64(...) __builtin_sve_svldnt1uw_gather_u64offset_s64(__VA_ARGS__)
+#define svlogb_f64_m(...) __builtin_sve_svlogb_f64_m(__VA_ARGS__)
+#define svlogb_f32_m(...) __builtin_sve_svlogb_f32_m(__VA_ARGS__)
+#define svlogb_f16_m(...) __builtin_sve_svlogb_f16_m(__VA_ARGS__)
+#define svlogb_f64_x(...) __builtin_sve_svlogb_f64_x(__VA_ARGS__)
+#define svlogb_f32_x(...) __builtin_sve_svlogb_f32_x(__VA_ARGS__)
+#define svlogb_f16_x(...) __builtin_sve_svlogb_f16_x(__VA_ARGS__)
+#define svlogb_f64_z(...) __builtin_sve_svlogb_f64_z(__VA_ARGS__)
+#define svlogb_f32_z(...) __builtin_sve_svlogb_f32_z(__VA_ARGS__)
+#define svlogb_f16_z(...) __builtin_sve_svlogb_f16_z(__VA_ARGS__)
+#define svmatch_u8(...) __builtin_sve_svmatch_u8(__VA_ARGS__)
+#define svmatch_u16(...) __builtin_sve_svmatch_u16(__VA_ARGS__)
+#define svmatch_s8(...) __builtin_sve_svmatch_s8(__VA_ARGS__)
+#define svmatch_s16(...) __builtin_sve_svmatch_s16(__VA_ARGS__)
+#define svmaxnmp_f64_m(...) __builtin_sve_svmaxnmp_f64_m(__VA_ARGS__)
+#define svmaxnmp_f32_m(...) __builtin_sve_svmaxnmp_f32_m(__VA_ARGS__)
+#define svmaxnmp_f16_m(...) __builtin_sve_svmaxnmp_f16_m(__VA_ARGS__)
+#define svmaxnmp_f64_x(...) __builtin_sve_svmaxnmp_f64_x(__VA_ARGS__)
+#define svmaxnmp_f32_x(...) __builtin_sve_svmaxnmp_f32_x(__VA_ARGS__)
+#define svmaxnmp_f16_x(...) __builtin_sve_svmaxnmp_f16_x(__VA_ARGS__)
+#define svmaxp_f64_m(...) __builtin_sve_svmaxp_f64_m(__VA_ARGS__)
+#define svmaxp_f32_m(...) __builtin_sve_svmaxp_f32_m(__VA_ARGS__)
+#define svmaxp_f16_m(...) __builtin_sve_svmaxp_f16_m(__VA_ARGS__)
+#define svmaxp_f64_x(...) __builtin_sve_svmaxp_f64_x(__VA_ARGS__)
+#define svmaxp_f32_x(...) __builtin_sve_svmaxp_f32_x(__VA_ARGS__)
+#define svmaxp_f16_x(...) __builtin_sve_svmaxp_f16_x(__VA_ARGS__)
+#define svmaxp_s8_m(...) __builtin_sve_svmaxp_s8_m(__VA_ARGS__)
+#define svmaxp_s32_m(...) __builtin_sve_svmaxp_s32_m(__VA_ARGS__)
+#define svmaxp_s64_m(...) __builtin_sve_svmaxp_s64_m(__VA_ARGS__)
+#define svmaxp_s16_m(...) __builtin_sve_svmaxp_s16_m(__VA_ARGS__)
+#define svmaxp_s8_x(...) __builtin_sve_svmaxp_s8_x(__VA_ARGS__)
+#define svmaxp_s32_x(...) __builtin_sve_svmaxp_s32_x(__VA_ARGS__)
+#define svmaxp_s64_x(...) __builtin_sve_svmaxp_s64_x(__VA_ARGS__)
+#define svmaxp_s16_x(...) __builtin_sve_svmaxp_s16_x(__VA_ARGS__)
+#define svmaxp_u8_m(...) __builtin_sve_svmaxp_u8_m(__VA_ARGS__)
+#define svmaxp_u32_m(...) __builtin_sve_svmaxp_u32_m(__VA_ARGS__)
+#define svmaxp_u64_m(...) __builtin_sve_svmaxp_u64_m(__VA_ARGS__)
+#define svmaxp_u16_m(...) __builtin_sve_svmaxp_u16_m(__VA_ARGS__)
+#define svmaxp_u8_x(...) __builtin_sve_svmaxp_u8_x(__VA_ARGS__)
+#define svmaxp_u32_x(...) __builtin_sve_svmaxp_u32_x(__VA_ARGS__)
+#define svmaxp_u64_x(...) __builtin_sve_svmaxp_u64_x(__VA_ARGS__)
+#define svmaxp_u16_x(...) __builtin_sve_svmaxp_u16_x(__VA_ARGS__)
+#define svminnmp_f64_m(...) __builtin_sve_svminnmp_f64_m(__VA_ARGS__)
+#define svminnmp_f32_m(...) __builtin_sve_svminnmp_f32_m(__VA_ARGS__)
+#define svminnmp_f16_m(...) __builtin_sve_svminnmp_f16_m(__VA_ARGS__)
+#define svminnmp_f64_x(...) __builtin_sve_svminnmp_f64_x(__VA_ARGS__)
+#define svminnmp_f32_x(...) __builtin_sve_svminnmp_f32_x(__VA_ARGS__)
+#define svminnmp_f16_x(...) __builtin_sve_svminnmp_f16_x(__VA_ARGS__)
+#define svminp_f64_m(...) __builtin_sve_svminp_f64_m(__VA_ARGS__)
+#define svminp_f32_m(...) __builtin_sve_svminp_f32_m(__VA_ARGS__)
+#define svminp_f16_m(...) __builtin_sve_svminp_f16_m(__VA_ARGS__)
+#define svminp_f64_x(...) __builtin_sve_svminp_f64_x(__VA_ARGS__)
+#define svminp_f32_x(...) __builtin_sve_svminp_f32_x(__VA_ARGS__)
+#define svminp_f16_x(...) __builtin_sve_svminp_f16_x(__VA_ARGS__)
+#define svminp_s8_m(...) __builtin_sve_svminp_s8_m(__VA_ARGS__)
+#define svminp_s32_m(...) __builtin_sve_svminp_s32_m(__VA_ARGS__)
+#define svminp_s64_m(...) __builtin_sve_svminp_s64_m(__VA_ARGS__)
+#define svminp_s16_m(...) __builtin_sve_svminp_s16_m(__VA_ARGS__)
+#define svminp_s8_x(...) __builtin_sve_svminp_s8_x(__VA_ARGS__)
+#define svminp_s32_x(...) __builtin_sve_svminp_s32_x(__VA_ARGS__)
+#define svminp_s64_x(...) __builtin_sve_svminp_s64_x(__VA_ARGS__)
+#define svminp_s16_x(...) __builtin_sve_svminp_s16_x(__VA_ARGS__)
+#define svminp_u8_m(...) __builtin_sve_svminp_u8_m(__VA_ARGS__)
+#define svminp_u32_m(...) __builtin_sve_svminp_u32_m(__VA_ARGS__)
+#define svminp_u64_m(...) __builtin_sve_svminp_u64_m(__VA_ARGS__)
+#define svminp_u16_m(...) __builtin_sve_svminp_u16_m(__VA_ARGS__)
+#define svminp_u8_x(...) __builtin_sve_svminp_u8_x(__VA_ARGS__)
+#define svminp_u32_x(...) __builtin_sve_svminp_u32_x(__VA_ARGS__)
+#define svminp_u64_x(...) __builtin_sve_svminp_u64_x(__VA_ARGS__)
+#define svminp_u16_x(...) __builtin_sve_svminp_u16_x(__VA_ARGS__)
+#define svmla_lane_u32(...) __builtin_sve_svmla_lane_u32(__VA_ARGS__)
+#define svmla_lane_u64(...) __builtin_sve_svmla_lane_u64(__VA_ARGS__)
+#define svmla_lane_u16(...) __builtin_sve_svmla_lane_u16(__VA_ARGS__)
+#define svmla_lane_s32(...) __builtin_sve_svmla_lane_s32(__VA_ARGS__)
+#define svmla_lane_s64(...) __builtin_sve_svmla_lane_s64(__VA_ARGS__)
+#define svmla_lane_s16(...) __builtin_sve_svmla_lane_s16(__VA_ARGS__)
+#define svmlalb_n_f32(...) __builtin_sve_svmlalb_n_f32(__VA_ARGS__)
+#define svmlalb_n_s32(...) __builtin_sve_svmlalb_n_s32(__VA_ARGS__)
+#define svmlalb_n_s64(...) __builtin_sve_svmlalb_n_s64(__VA_ARGS__)
+#define svmlalb_n_s16(...) __builtin_sve_svmlalb_n_s16(__VA_ARGS__)
+#define svmlalb_n_u32(...) __builtin_sve_svmlalb_n_u32(__VA_ARGS__)
+#define svmlalb_n_u64(...) __builtin_sve_svmlalb_n_u64(__VA_ARGS__)
+#define svmlalb_n_u16(...) __builtin_sve_svmlalb_n_u16(__VA_ARGS__)
+#define svmlalb_f32(...) __builtin_sve_svmlalb_f32(__VA_ARGS__)
+#define svmlalb_s32(...) __builtin_sve_svmlalb_s32(__VA_ARGS__)
+#define svmlalb_s64(...) __builtin_sve_svmlalb_s64(__VA_ARGS__)
+#define svmlalb_s16(...) __builtin_sve_svmlalb_s16(__VA_ARGS__)
+#define svmlalb_u32(...) __builtin_sve_svmlalb_u32(__VA_ARGS__)
+#define svmlalb_u64(...) __builtin_sve_svmlalb_u64(__VA_ARGS__)
+#define svmlalb_u16(...) __builtin_sve_svmlalb_u16(__VA_ARGS__)
+#define svmlalb_lane_f32(...) __builtin_sve_svmlalb_lane_f32(__VA_ARGS__)
+#define svmlalb_lane_s32(...) __builtin_sve_svmlalb_lane_s32(__VA_ARGS__)
+#define svmlalb_lane_s64(...) __builtin_sve_svmlalb_lane_s64(__VA_ARGS__)
+#define svmlalb_lane_u32(...) __builtin_sve_svmlalb_lane_u32(__VA_ARGS__)
+#define svmlalb_lane_u64(...) __builtin_sve_svmlalb_lane_u64(__VA_ARGS__)
+#define svmlalt_n_f32(...) __builtin_sve_svmlalt_n_f32(__VA_ARGS__)
+#define svmlalt_n_s32(...) __builtin_sve_svmlalt_n_s32(__VA_ARGS__)
+#define svmlalt_n_s64(...) __builtin_sve_svmlalt_n_s64(__VA_ARGS__)
+#define svmlalt_n_s16(...) __builtin_sve_svmlalt_n_s16(__VA_ARGS__)
+#define svmlalt_n_u32(...) __builtin_sve_svmlalt_n_u32(__VA_ARGS__)
+#define svmlalt_n_u64(...) __builtin_sve_svmlalt_n_u64(__VA_ARGS__)
+#define svmlalt_n_u16(...) __builtin_sve_svmlalt_n_u16(__VA_ARGS__)
+#define svmlalt_f32(...) __builtin_sve_svmlalt_f32(__VA_ARGS__)
+#define svmlalt_s32(...) __builtin_sve_svmlalt_s32(__VA_ARGS__)
+#define svmlalt_s64(...) __builtin_sve_svmlalt_s64(__VA_ARGS__)
+#define svmlalt_s16(...) __builtin_sve_svmlalt_s16(__VA_ARGS__)
+#define svmlalt_u32(...) __builtin_sve_svmlalt_u32(__VA_ARGS__)
+#define svmlalt_u64(...) __builtin_sve_svmlalt_u64(__VA_ARGS__)
+#define svmlalt_u16(...) __builtin_sve_svmlalt_u16(__VA_ARGS__)
+#define svmlalt_lane_f32(...) __builtin_sve_svmlalt_lane_f32(__VA_ARGS__)
+#define svmlalt_lane_s32(...) __builtin_sve_svmlalt_lane_s32(__VA_ARGS__)
+#define svmlalt_lane_s64(...) __builtin_sve_svmlalt_lane_s64(__VA_ARGS__)
+#define svmlalt_lane_u32(...) __builtin_sve_svmlalt_lane_u32(__VA_ARGS__)
+#define svmlalt_lane_u64(...) __builtin_sve_svmlalt_lane_u64(__VA_ARGS__)
+#define svmls_lane_u32(...) __builtin_sve_svmls_lane_u32(__VA_ARGS__)
+#define svmls_lane_u64(...) __builtin_sve_svmls_lane_u64(__VA_ARGS__)
+#define svmls_lane_u16(...) __builtin_sve_svmls_lane_u16(__VA_ARGS__)
+#define svmls_lane_s32(...) __builtin_sve_svmls_lane_s32(__VA_ARGS__)
+#define svmls_lane_s64(...) __builtin_sve_svmls_lane_s64(__VA_ARGS__)
+#define svmls_lane_s16(...) __builtin_sve_svmls_lane_s16(__VA_ARGS__)
+#define svmlslb_n_f32(...) __builtin_sve_svmlslb_n_f32(__VA_ARGS__)
+#define svmlslb_n_s32(...) __builtin_sve_svmlslb_n_s32(__VA_ARGS__)
+#define svmlslb_n_s64(...) __builtin_sve_svmlslb_n_s64(__VA_ARGS__)
+#define svmlslb_n_s16(...) __builtin_sve_svmlslb_n_s16(__VA_ARGS__)
+#define svmlslb_n_u32(...) __builtin_sve_svmlslb_n_u32(__VA_ARGS__)
+#define svmlslb_n_u64(...) __builtin_sve_svmlslb_n_u64(__VA_ARGS__)
+#define svmlslb_n_u16(...) __builtin_sve_svmlslb_n_u16(__VA_ARGS__)
+#define svmlslb_f32(...) __builtin_sve_svmlslb_f32(__VA_ARGS__)
+#define svmlslb_s32(...) __builtin_sve_svmlslb_s32(__VA_ARGS__)
+#define svmlslb_s64(...) __builtin_sve_svmlslb_s64(__VA_ARGS__)
+#define svmlslb_s16(...) __builtin_sve_svmlslb_s16(__VA_ARGS__)
+#define svmlslb_u32(...) __builtin_sve_svmlslb_u32(__VA_ARGS__)
+#define svmlslb_u64(...) __builtin_sve_svmlslb_u64(__VA_ARGS__)
+#define svmlslb_u16(...) __builtin_sve_svmlslb_u16(__VA_ARGS__)
+#define svmlslb_lane_f32(...) __builtin_sve_svmlslb_lane_f32(__VA_ARGS__)
+#define svmlslb_lane_s32(...) __builtin_sve_svmlslb_lane_s32(__VA_ARGS__)
+#define svmlslb_lane_s64(...) __builtin_sve_svmlslb_lane_s64(__VA_ARGS__)
+#define svmlslb_lane_u32(...) __builtin_sve_svmlslb_lane_u32(__VA_ARGS__)
+#define svmlslb_lane_u64(...) __builtin_sve_svmlslb_lane_u64(__VA_ARGS__)
+#define svmlslt_n_f32(...) __builtin_sve_svmlslt_n_f32(__VA_ARGS__)
+#define svmlslt_n_s32(...) __builtin_sve_svmlslt_n_s32(__VA_ARGS__)
+#define svmlslt_n_s64(...) __builtin_sve_svmlslt_n_s64(__VA_ARGS__)
+#define svmlslt_n_s16(...) __builtin_sve_svmlslt_n_s16(__VA_ARGS__)
+#define svmlslt_n_u32(...) __builtin_sve_svmlslt_n_u32(__VA_ARGS__)
+#define svmlslt_n_u64(...) __builtin_sve_svmlslt_n_u64(__VA_ARGS__)
+#define svmlslt_n_u16(...) __builtin_sve_svmlslt_n_u16(__VA_ARGS__)
+#define svmlslt_f32(...) __builtin_sve_svmlslt_f32(__VA_ARGS__)
+#define svmlslt_s32(...) __builtin_sve_svmlslt_s32(__VA_ARGS__)
+#define svmlslt_s64(...) __builtin_sve_svmlslt_s64(__VA_ARGS__)
+#define svmlslt_s16(...) __builtin_sve_svmlslt_s16(__VA_ARGS__)
+#define svmlslt_u32(...) __builtin_sve_svmlslt_u32(__VA_ARGS__)
+#define svmlslt_u64(...) __builtin_sve_svmlslt_u64(__VA_ARGS__)
+#define svmlslt_u16(...) __builtin_sve_svmlslt_u16(__VA_ARGS__)
+#define svmlslt_lane_f32(...) __builtin_sve_svmlslt_lane_f32(__VA_ARGS__)
+#define svmlslt_lane_s32(...) __builtin_sve_svmlslt_lane_s32(__VA_ARGS__)
+#define svmlslt_lane_s64(...) __builtin_sve_svmlslt_lane_s64(__VA_ARGS__)
+#define svmlslt_lane_u32(...) __builtin_sve_svmlslt_lane_u32(__VA_ARGS__)
+#define svmlslt_lane_u64(...) __builtin_sve_svmlslt_lane_u64(__VA_ARGS__)
+#define svmovlb_s32(...) __builtin_sve_svmovlb_s32(__VA_ARGS__)
+#define svmovlb_s64(...) __builtin_sve_svmovlb_s64(__VA_ARGS__)
+#define svmovlb_s16(...) __builtin_sve_svmovlb_s16(__VA_ARGS__)
+#define svmovlb_u32(...) __builtin_sve_svmovlb_u32(__VA_ARGS__)
+#define svmovlb_u64(...) __builtin_sve_svmovlb_u64(__VA_ARGS__)
+#define svmovlb_u16(...) __builtin_sve_svmovlb_u16(__VA_ARGS__)
+#define svmovlt_s32(...) __builtin_sve_svmovlt_s32(__VA_ARGS__)
+#define svmovlt_s64(...) __builtin_sve_svmovlt_s64(__VA_ARGS__)
+#define svmovlt_s16(...) __builtin_sve_svmovlt_s16(__VA_ARGS__)
+#define svmovlt_u32(...) __builtin_sve_svmovlt_u32(__VA_ARGS__)
+#define svmovlt_u64(...) __builtin_sve_svmovlt_u64(__VA_ARGS__)
+#define svmovlt_u16(...) __builtin_sve_svmovlt_u16(__VA_ARGS__)
+#define svmul_lane_u32(...) __builtin_sve_svmul_lane_u32(__VA_ARGS__)
+#define svmul_lane_u64(...) __builtin_sve_svmul_lane_u64(__VA_ARGS__)
+#define svmul_lane_u16(...) __builtin_sve_svmul_lane_u16(__VA_ARGS__)
+#define svmul_lane_s32(...) __builtin_sve_svmul_lane_s32(__VA_ARGS__)
+#define svmul_lane_s64(...) __builtin_sve_svmul_lane_s64(__VA_ARGS__)
+#define svmul_lane_s16(...) __builtin_sve_svmul_lane_s16(__VA_ARGS__)
+#define svmullb_n_s32(...) __builtin_sve_svmullb_n_s32(__VA_ARGS__)
+#define svmullb_n_s64(...) __builtin_sve_svmullb_n_s64(__VA_ARGS__)
+#define svmullb_n_s16(...) __builtin_sve_svmullb_n_s16(__VA_ARGS__)
+#define svmullb_n_u32(...) __builtin_sve_svmullb_n_u32(__VA_ARGS__)
+#define svmullb_n_u64(...) __builtin_sve_svmullb_n_u64(__VA_ARGS__)
+#define svmullb_n_u16(...) __builtin_sve_svmullb_n_u16(__VA_ARGS__)
+#define svmullb_s32(...) __builtin_sve_svmullb_s32(__VA_ARGS__)
+#define svmullb_s64(...) __builtin_sve_svmullb_s64(__VA_ARGS__)
+#define svmullb_s16(...) __builtin_sve_svmullb_s16(__VA_ARGS__)
+#define svmullb_u32(...) __builtin_sve_svmullb_u32(__VA_ARGS__)
+#define svmullb_u64(...) __builtin_sve_svmullb_u64(__VA_ARGS__)
+#define svmullb_u16(...) __builtin_sve_svmullb_u16(__VA_ARGS__)
+#define svmullb_lane_s32(...) __builtin_sve_svmullb_lane_s32(__VA_ARGS__)
+#define svmullb_lane_s64(...) __builtin_sve_svmullb_lane_s64(__VA_ARGS__)
+#define svmullb_lane_u32(...) __builtin_sve_svmullb_lane_u32(__VA_ARGS__)
+#define svmullb_lane_u64(...) __builtin_sve_svmullb_lane_u64(__VA_ARGS__)
+#define svmullt_n_s32(...) __builtin_sve_svmullt_n_s32(__VA_ARGS__)
+#define svmullt_n_s64(...) __builtin_sve_svmullt_n_s64(__VA_ARGS__)
+#define svmullt_n_s16(...) __builtin_sve_svmullt_n_s16(__VA_ARGS__)
+#define svmullt_n_u32(...) __builtin_sve_svmullt_n_u32(__VA_ARGS__)
+#define svmullt_n_u64(...) __builtin_sve_svmullt_n_u64(__VA_ARGS__)
+#define svmullt_n_u16(...) __builtin_sve_svmullt_n_u16(__VA_ARGS__)
+#define svmullt_s32(...) __builtin_sve_svmullt_s32(__VA_ARGS__)
+#define svmullt_s64(...) __builtin_sve_svmullt_s64(__VA_ARGS__)
+#define svmullt_s16(...) __builtin_sve_svmullt_s16(__VA_ARGS__)
+#define svmullt_u32(...) __builtin_sve_svmullt_u32(__VA_ARGS__)
+#define svmullt_u64(...) __builtin_sve_svmullt_u64(__VA_ARGS__)
+#define svmullt_u16(...) __builtin_sve_svmullt_u16(__VA_ARGS__)
+#define svmullt_lane_s32(...) __builtin_sve_svmullt_lane_s32(__VA_ARGS__)
+#define svmullt_lane_s64(...) __builtin_sve_svmullt_lane_s64(__VA_ARGS__)
+#define svmullt_lane_u32(...) __builtin_sve_svmullt_lane_u32(__VA_ARGS__)
+#define svmullt_lane_u64(...) __builtin_sve_svmullt_lane_u64(__VA_ARGS__)
+#define svnbsl_n_u8(...) __builtin_sve_svnbsl_n_u8(__VA_ARGS__)
+#define svnbsl_n_u32(...) __builtin_sve_svnbsl_n_u32(__VA_ARGS__)
+#define svnbsl_n_u64(...) __builtin_sve_svnbsl_n_u64(__VA_ARGS__)
+#define svnbsl_n_u16(...) __builtin_sve_svnbsl_n_u16(__VA_ARGS__)
+#define svnbsl_n_s8(...) __builtin_sve_svnbsl_n_s8(__VA_ARGS__)
+#define svnbsl_n_s32(...) __builtin_sve_svnbsl_n_s32(__VA_ARGS__)
+#define svnbsl_n_s64(...) __builtin_sve_svnbsl_n_s64(__VA_ARGS__)
+#define svnbsl_n_s16(...) __builtin_sve_svnbsl_n_s16(__VA_ARGS__)
+#define svnbsl_u8(...) __builtin_sve_svnbsl_u8(__VA_ARGS__)
+#define svnbsl_u32(...) __builtin_sve_svnbsl_u32(__VA_ARGS__)
+#define svnbsl_u64(...) __builtin_sve_svnbsl_u64(__VA_ARGS__)
+#define svnbsl_u16(...) __builtin_sve_svnbsl_u16(__VA_ARGS__)
+#define svnbsl_s8(...) __builtin_sve_svnbsl_s8(__VA_ARGS__)
+#define svnbsl_s32(...) __builtin_sve_svnbsl_s32(__VA_ARGS__)
+#define svnbsl_s64(...) __builtin_sve_svnbsl_s64(__VA_ARGS__)
+#define svnbsl_s16(...) __builtin_sve_svnbsl_s16(__VA_ARGS__)
+#define svnmatch_u8(...) __builtin_sve_svnmatch_u8(__VA_ARGS__)
+#define svnmatch_u16(...) __builtin_sve_svnmatch_u16(__VA_ARGS__)
+#define svnmatch_s8(...) __builtin_sve_svnmatch_s8(__VA_ARGS__)
+#define svnmatch_s16(...) __builtin_sve_svnmatch_s16(__VA_ARGS__)
+#define svpmul_n_u8(...) __builtin_sve_svpmul_n_u8(__VA_ARGS__)
+#define svpmul_u8(...) __builtin_sve_svpmul_u8(__VA_ARGS__)
+#define svpmullb_n_u64(...) __builtin_sve_svpmullb_n_u64(__VA_ARGS__)
+#define svpmullb_n_u16(...) __builtin_sve_svpmullb_n_u16(__VA_ARGS__)
+#define svpmullb_u64(...) __builtin_sve_svpmullb_u64(__VA_ARGS__)
+#define svpmullb_u16(...) __builtin_sve_svpmullb_u16(__VA_ARGS__)
+#define svpmullb_pair_n_u8(...) __builtin_sve_svpmullb_pair_n_u8(__VA_ARGS__)
+#define svpmullb_pair_n_u32(...) __builtin_sve_svpmullb_pair_n_u32(__VA_ARGS__)
+#define svpmullb_pair_u8(...) __builtin_sve_svpmullb_pair_u8(__VA_ARGS__)
+#define svpmullb_pair_u32(...) __builtin_sve_svpmullb_pair_u32(__VA_ARGS__)
+#define svpmullt_n_u64(...) __builtin_sve_svpmullt_n_u64(__VA_ARGS__)
+#define svpmullt_n_u16(...) __builtin_sve_svpmullt_n_u16(__VA_ARGS__)
+#define svpmullt_u64(...) __builtin_sve_svpmullt_u64(__VA_ARGS__)
+#define svpmullt_u16(...) __builtin_sve_svpmullt_u16(__VA_ARGS__)
+#define svpmullt_pair_n_u8(...) __builtin_sve_svpmullt_pair_n_u8(__VA_ARGS__)
+#define svpmullt_pair_n_u32(...) __builtin_sve_svpmullt_pair_n_u32(__VA_ARGS__)
+#define svpmullt_pair_u8(...) __builtin_sve_svpmullt_pair_u8(__VA_ARGS__)
+#define svpmullt_pair_u32(...) __builtin_sve_svpmullt_pair_u32(__VA_ARGS__)
+#define svqabs_s8_m(...) __builtin_sve_svqabs_s8_m(__VA_ARGS__)
+#define svqabs_s32_m(...) __builtin_sve_svqabs_s32_m(__VA_ARGS__)
+#define svqabs_s64_m(...) __builtin_sve_svqabs_s64_m(__VA_ARGS__)
+#define svqabs_s16_m(...) __builtin_sve_svqabs_s16_m(__VA_ARGS__)
+#define svqabs_s8_x(...) __builtin_sve_svqabs_s8_x(__VA_ARGS__)
+#define svqabs_s32_x(...) __builtin_sve_svqabs_s32_x(__VA_ARGS__)
+#define svqabs_s64_x(...) __builtin_sve_svqabs_s64_x(__VA_ARGS__)
+#define svqabs_s16_x(...) __builtin_sve_svqabs_s16_x(__VA_ARGS__)
+#define svqabs_s8_z(...) __builtin_sve_svqabs_s8_z(__VA_ARGS__)
+#define svqabs_s32_z(...) __builtin_sve_svqabs_s32_z(__VA_ARGS__)
+#define svqabs_s64_z(...) __builtin_sve_svqabs_s64_z(__VA_ARGS__)
+#define svqabs_s16_z(...) __builtin_sve_svqabs_s16_z(__VA_ARGS__)
+#define svqadd_n_s8_m(...) __builtin_sve_svqadd_n_s8_m(__VA_ARGS__)
+#define svqadd_n_s32_m(...) __builtin_sve_svqadd_n_s32_m(__VA_ARGS__)
+#define svqadd_n_s64_m(...) __builtin_sve_svqadd_n_s64_m(__VA_ARGS__)
+#define svqadd_n_s16_m(...) __builtin_sve_svqadd_n_s16_m(__VA_ARGS__)
+#define svqadd_n_s8_x(...) __builtin_sve_svqadd_n_s8_x(__VA_ARGS__)
+#define svqadd_n_s32_x(...) __builtin_sve_svqadd_n_s32_x(__VA_ARGS__)
+#define svqadd_n_s64_x(...) __builtin_sve_svqadd_n_s64_x(__VA_ARGS__)
+#define svqadd_n_s16_x(...) __builtin_sve_svqadd_n_s16_x(__VA_ARGS__)
+#define svqadd_n_s8_z(...) __builtin_sve_svqadd_n_s8_z(__VA_ARGS__)
+#define svqadd_n_s32_z(...) __builtin_sve_svqadd_n_s32_z(__VA_ARGS__)
+#define svqadd_n_s64_z(...) __builtin_sve_svqadd_n_s64_z(__VA_ARGS__)
+#define svqadd_n_s16_z(...) __builtin_sve_svqadd_n_s16_z(__VA_ARGS__)
+#define svqadd_n_u8_m(...) __builtin_sve_svqadd_n_u8_m(__VA_ARGS__)
+#define svqadd_n_u32_m(...) __builtin_sve_svqadd_n_u32_m(__VA_ARGS__)
+#define svqadd_n_u64_m(...) __builtin_sve_svqadd_n_u64_m(__VA_ARGS__)
+#define svqadd_n_u16_m(...) __builtin_sve_svqadd_n_u16_m(__VA_ARGS__)
+#define svqadd_n_u8_x(...) __builtin_sve_svqadd_n_u8_x(__VA_ARGS__)
+#define svqadd_n_u32_x(...) __builtin_sve_svqadd_n_u32_x(__VA_ARGS__)
+#define svqadd_n_u64_x(...) __builtin_sve_svqadd_n_u64_x(__VA_ARGS__)
+#define svqadd_n_u16_x(...) __builtin_sve_svqadd_n_u16_x(__VA_ARGS__)
+#define svqadd_n_u8_z(...) __builtin_sve_svqadd_n_u8_z(__VA_ARGS__)
+#define svqadd_n_u32_z(...) __builtin_sve_svqadd_n_u32_z(__VA_ARGS__)
+#define svqadd_n_u64_z(...) __builtin_sve_svqadd_n_u64_z(__VA_ARGS__)
+#define svqadd_n_u16_z(...) __builtin_sve_svqadd_n_u16_z(__VA_ARGS__)
+#define svqadd_s8_m(...) __builtin_sve_svqadd_s8_m(__VA_ARGS__)
+#define svqadd_s32_m(...) __builtin_sve_svqadd_s32_m(__VA_ARGS__)
+#define svqadd_s64_m(...) __builtin_sve_svqadd_s64_m(__VA_ARGS__)
+#define svqadd_s16_m(...) __builtin_sve_svqadd_s16_m(__VA_ARGS__)
+#define svqadd_s8_x(...) __builtin_sve_svqadd_s8_x(__VA_ARGS__)
+#define svqadd_s32_x(...) __builtin_sve_svqadd_s32_x(__VA_ARGS__)
+#define svqadd_s64_x(...) __builtin_sve_svqadd_s64_x(__VA_ARGS__)
+#define svqadd_s16_x(...) __builtin_sve_svqadd_s16_x(__VA_ARGS__)
+#define svqadd_s8_z(...) __builtin_sve_svqadd_s8_z(__VA_ARGS__)
+#define svqadd_s32_z(...) __builtin_sve_svqadd_s32_z(__VA_ARGS__)
+#define svqadd_s64_z(...) __builtin_sve_svqadd_s64_z(__VA_ARGS__)
+#define svqadd_s16_z(...) __builtin_sve_svqadd_s16_z(__VA_ARGS__)
+#define svqadd_u8_m(...) __builtin_sve_svqadd_u8_m(__VA_ARGS__)
+#define svqadd_u32_m(...) __builtin_sve_svqadd_u32_m(__VA_ARGS__)
+#define svqadd_u64_m(...) __builtin_sve_svqadd_u64_m(__VA_ARGS__)
+#define svqadd_u16_m(...) __builtin_sve_svqadd_u16_m(__VA_ARGS__)
+#define svqadd_u8_x(...) __builtin_sve_svqadd_u8_x(__VA_ARGS__)
+#define svqadd_u32_x(...) __builtin_sve_svqadd_u32_x(__VA_ARGS__)
+#define svqadd_u64_x(...) __builtin_sve_svqadd_u64_x(__VA_ARGS__)
+#define svqadd_u16_x(...) __builtin_sve_svqadd_u16_x(__VA_ARGS__)
+#define svqadd_u8_z(...) __builtin_sve_svqadd_u8_z(__VA_ARGS__)
+#define svqadd_u32_z(...) __builtin_sve_svqadd_u32_z(__VA_ARGS__)
+#define svqadd_u64_z(...) __builtin_sve_svqadd_u64_z(__VA_ARGS__)
+#define svqadd_u16_z(...) __builtin_sve_svqadd_u16_z(__VA_ARGS__)
+#define svqcadd_s8(...) __builtin_sve_svqcadd_s8(__VA_ARGS__)
+#define svqcadd_s32(...) __builtin_sve_svqcadd_s32(__VA_ARGS__)
+#define svqcadd_s64(...) __builtin_sve_svqcadd_s64(__VA_ARGS__)
+#define svqcadd_s16(...) __builtin_sve_svqcadd_s16(__VA_ARGS__)
+#define svqdmlalb_n_s32(...) __builtin_sve_svqdmlalb_n_s32(__VA_ARGS__)
+#define svqdmlalb_n_s64(...) __builtin_sve_svqdmlalb_n_s64(__VA_ARGS__)
+#define svqdmlalb_n_s16(...) __builtin_sve_svqdmlalb_n_s16(__VA_ARGS__)
+#define svqdmlalb_s32(...) __builtin_sve_svqdmlalb_s32(__VA_ARGS__)
+#define svqdmlalb_s64(...) __builtin_sve_svqdmlalb_s64(__VA_ARGS__)
+#define svqdmlalb_s16(...) __builtin_sve_svqdmlalb_s16(__VA_ARGS__)
+#define svqdmlalb_lane_s32(...) __builtin_sve_svqdmlalb_lane_s32(__VA_ARGS__)
+#define svqdmlalb_lane_s64(...) __builtin_sve_svqdmlalb_lane_s64(__VA_ARGS__)
+#define svqdmlalbt_n_s32(...) __builtin_sve_svqdmlalbt_n_s32(__VA_ARGS__)
+#define svqdmlalbt_n_s64(...) __builtin_sve_svqdmlalbt_n_s64(__VA_ARGS__)
+#define svqdmlalbt_n_s16(...) __builtin_sve_svqdmlalbt_n_s16(__VA_ARGS__)
+#define svqdmlalbt_s32(...) __builtin_sve_svqdmlalbt_s32(__VA_ARGS__)
+#define svqdmlalbt_s64(...) __builtin_sve_svqdmlalbt_s64(__VA_ARGS__)
+#define svqdmlalbt_s16(...) __builtin_sve_svqdmlalbt_s16(__VA_ARGS__)
+#define svqdmlalt_n_s32(...) __builtin_sve_svqdmlalt_n_s32(__VA_ARGS__)
+#define svqdmlalt_n_s64(...) __builtin_sve_svqdmlalt_n_s64(__VA_ARGS__)
+#define svqdmlalt_n_s16(...) __builtin_sve_svqdmlalt_n_s16(__VA_ARGS__)
+#define svqdmlalt_s32(...) __builtin_sve_svqdmlalt_s32(__VA_ARGS__)
+#define svqdmlalt_s64(...) __builtin_sve_svqdmlalt_s64(__VA_ARGS__)
+#define svqdmlalt_s16(...) __builtin_sve_svqdmlalt_s16(__VA_ARGS__)
+#define svqdmlalt_lane_s32(...) __builtin_sve_svqdmlalt_lane_s32(__VA_ARGS__)
+#define svqdmlalt_lane_s64(...) __builtin_sve_svqdmlalt_lane_s64(__VA_ARGS__)
+#define svqdmlslb_n_s32(...) __builtin_sve_svqdmlslb_n_s32(__VA_ARGS__)
+#define svqdmlslb_n_s64(...) __builtin_sve_svqdmlslb_n_s64(__VA_ARGS__)
+#define svqdmlslb_n_s16(...) __builtin_sve_svqdmlslb_n_s16(__VA_ARGS__)
+#define svqdmlslb_s32(...) __builtin_sve_svqdmlslb_s32(__VA_ARGS__)
+#define svqdmlslb_s64(...) __builtin_sve_svqdmlslb_s64(__VA_ARGS__)
+#define svqdmlslb_s16(...) __builtin_sve_svqdmlslb_s16(__VA_ARGS__)
+#define svqdmlslb_lane_s32(...) __builtin_sve_svqdmlslb_lane_s32(__VA_ARGS__)
+#define svqdmlslb_lane_s64(...) __builtin_sve_svqdmlslb_lane_s64(__VA_ARGS__)
+#define svqdmlslbt_n_s32(...) __builtin_sve_svqdmlslbt_n_s32(__VA_ARGS__)
+#define svqdmlslbt_n_s64(...) __builtin_sve_svqdmlslbt_n_s64(__VA_ARGS__)
+#define svqdmlslbt_n_s16(...) __builtin_sve_svqdmlslbt_n_s16(__VA_ARGS__)
+#define svqdmlslbt_s32(...) __builtin_sve_svqdmlslbt_s32(__VA_ARGS__)
+#define svqdmlslbt_s64(...) __builtin_sve_svqdmlslbt_s64(__VA_ARGS__)
+#define svqdmlslbt_s16(...) __builtin_sve_svqdmlslbt_s16(__VA_ARGS__)
+#define svqdmlslt_n_s32(...) __builtin_sve_svqdmlslt_n_s32(__VA_ARGS__)
+#define svqdmlslt_n_s64(...) __builtin_sve_svqdmlslt_n_s64(__VA_ARGS__)
+#define svqdmlslt_n_s16(...) __builtin_sve_svqdmlslt_n_s16(__VA_ARGS__)
+#define svqdmlslt_s32(...) __builtin_sve_svqdmlslt_s32(__VA_ARGS__)
+#define svqdmlslt_s64(...) __builtin_sve_svqdmlslt_s64(__VA_ARGS__)
+#define svqdmlslt_s16(...) __builtin_sve_svqdmlslt_s16(__VA_ARGS__)
+#define svqdmlslt_lane_s32(...) __builtin_sve_svqdmlslt_lane_s32(__VA_ARGS__)
+#define svqdmlslt_lane_s64(...) __builtin_sve_svqdmlslt_lane_s64(__VA_ARGS__)
+#define svqdmulh_n_s8(...) __builtin_sve_svqdmulh_n_s8(__VA_ARGS__)
+#define svqdmulh_n_s32(...) __builtin_sve_svqdmulh_n_s32(__VA_ARGS__)
+#define svqdmulh_n_s64(...) __builtin_sve_svqdmulh_n_s64(__VA_ARGS__)
+#define svqdmulh_n_s16(...) __builtin_sve_svqdmulh_n_s16(__VA_ARGS__)
+#define svqdmulh_s8(...) __builtin_sve_svqdmulh_s8(__VA_ARGS__)
+#define svqdmulh_s32(...) __builtin_sve_svqdmulh_s32(__VA_ARGS__)
+#define svqdmulh_s64(...) __builtin_sve_svqdmulh_s64(__VA_ARGS__)
+#define svqdmulh_s16(...) __builtin_sve_svqdmulh_s16(__VA_ARGS__)
+#define svqdmulh_lane_s32(...) __builtin_sve_svqdmulh_lane_s32(__VA_ARGS__)
+#define svqdmulh_lane_s64(...) __builtin_sve_svqdmulh_lane_s64(__VA_ARGS__)
+#define svqdmulh_lane_s16(...) __builtin_sve_svqdmulh_lane_s16(__VA_ARGS__)
+#define svqdmullb_n_s32(...) __builtin_sve_svqdmullb_n_s32(__VA_ARGS__)
+#define svqdmullb_n_s64(...) __builtin_sve_svqdmullb_n_s64(__VA_ARGS__)
+#define svqdmullb_n_s16(...) __builtin_sve_svqdmullb_n_s16(__VA_ARGS__)
+#define svqdmullb_s32(...) __builtin_sve_svqdmullb_s32(__VA_ARGS__)
+#define svqdmullb_s64(...) __builtin_sve_svqdmullb_s64(__VA_ARGS__)
+#define svqdmullb_s16(...) __builtin_sve_svqdmullb_s16(__VA_ARGS__)
+#define svqdmullb_lane_s32(...) __builtin_sve_svqdmullb_lane_s32(__VA_ARGS__)
+#define svqdmullb_lane_s64(...) __builtin_sve_svqdmullb_lane_s64(__VA_ARGS__)
+#define svqdmullt_n_s32(...) __builtin_sve_svqdmullt_n_s32(__VA_ARGS__)
+#define svqdmullt_n_s64(...) __builtin_sve_svqdmullt_n_s64(__VA_ARGS__)
+#define svqdmullt_n_s16(...) __builtin_sve_svqdmullt_n_s16(__VA_ARGS__)
+#define svqdmullt_s32(...) __builtin_sve_svqdmullt_s32(__VA_ARGS__)
+#define svqdmullt_s64(...) __builtin_sve_svqdmullt_s64(__VA_ARGS__)
+#define svqdmullt_s16(...) __builtin_sve_svqdmullt_s16(__VA_ARGS__)
+#define svqdmullt_lane_s32(...) __builtin_sve_svqdmullt_lane_s32(__VA_ARGS__)
+#define svqdmullt_lane_s64(...) __builtin_sve_svqdmullt_lane_s64(__VA_ARGS__)
+#define svqneg_s8_m(...) __builtin_sve_svqneg_s8_m(__VA_ARGS__)
+#define svqneg_s32_m(...) __builtin_sve_svqneg_s32_m(__VA_ARGS__)
+#define svqneg_s64_m(...) __builtin_sve_svqneg_s64_m(__VA_ARGS__)
+#define svqneg_s16_m(...) __builtin_sve_svqneg_s16_m(__VA_ARGS__)
+#define svqneg_s8_x(...) __builtin_sve_svqneg_s8_x(__VA_ARGS__)
+#define svqneg_s32_x(...) __builtin_sve_svqneg_s32_x(__VA_ARGS__)
+#define svqneg_s64_x(...) __builtin_sve_svqneg_s64_x(__VA_ARGS__)
+#define svqneg_s16_x(...) __builtin_sve_svqneg_s16_x(__VA_ARGS__)
+#define svqneg_s8_z(...) __builtin_sve_svqneg_s8_z(__VA_ARGS__)
+#define svqneg_s32_z(...) __builtin_sve_svqneg_s32_z(__VA_ARGS__)
+#define svqneg_s64_z(...) __builtin_sve_svqneg_s64_z(__VA_ARGS__)
+#define svqneg_s16_z(...) __builtin_sve_svqneg_s16_z(__VA_ARGS__)
+#define svqrdcmlah_s8(...) __builtin_sve_svqrdcmlah_s8(__VA_ARGS__)
+#define svqrdcmlah_s32(...) __builtin_sve_svqrdcmlah_s32(__VA_ARGS__)
+#define svqrdcmlah_s64(...) __builtin_sve_svqrdcmlah_s64(__VA_ARGS__)
+#define svqrdcmlah_s16(...) __builtin_sve_svqrdcmlah_s16(__VA_ARGS__)
+#define svqrdcmlah_lane_s32(...) __builtin_sve_svqrdcmlah_lane_s32(__VA_ARGS__)
+#define svqrdcmlah_lane_s16(...) __builtin_sve_svqrdcmlah_lane_s16(__VA_ARGS__)
+#define svqrdmlah_n_s8(...) __builtin_sve_svqrdmlah_n_s8(__VA_ARGS__)
+#define svqrdmlah_n_s32(...) __builtin_sve_svqrdmlah_n_s32(__VA_ARGS__)
+#define svqrdmlah_n_s64(...) __builtin_sve_svqrdmlah_n_s64(__VA_ARGS__)
+#define svqrdmlah_n_s16(...) __builtin_sve_svqrdmlah_n_s16(__VA_ARGS__)
+#define svqrdmlah_s8(...) __builtin_sve_svqrdmlah_s8(__VA_ARGS__)
+#define svqrdmlah_s32(...) __builtin_sve_svqrdmlah_s32(__VA_ARGS__)
+#define svqrdmlah_s64(...) __builtin_sve_svqrdmlah_s64(__VA_ARGS__)
+#define svqrdmlah_s16(...) __builtin_sve_svqrdmlah_s16(__VA_ARGS__)
+#define svqrdmlah_lane_s32(...) __builtin_sve_svqrdmlah_lane_s32(__VA_ARGS__)
+#define svqrdmlah_lane_s64(...) __builtin_sve_svqrdmlah_lane_s64(__VA_ARGS__)
+#define svqrdmlah_lane_s16(...) __builtin_sve_svqrdmlah_lane_s16(__VA_ARGS__)
+#define svqrdmlsh_n_s8(...) __builtin_sve_svqrdmlsh_n_s8(__VA_ARGS__)
+#define svqrdmlsh_n_s32(...) __builtin_sve_svqrdmlsh_n_s32(__VA_ARGS__)
+#define svqrdmlsh_n_s64(...) __builtin_sve_svqrdmlsh_n_s64(__VA_ARGS__)
+#define svqrdmlsh_n_s16(...) __builtin_sve_svqrdmlsh_n_s16(__VA_ARGS__)
+#define svqrdmlsh_s8(...) __builtin_sve_svqrdmlsh_s8(__VA_ARGS__)
+#define svqrdmlsh_s32(...) __builtin_sve_svqrdmlsh_s32(__VA_ARGS__)
+#define svqrdmlsh_s64(...) __builtin_sve_svqrdmlsh_s64(__VA_ARGS__)
+#define svqrdmlsh_s16(...) __builtin_sve_svqrdmlsh_s16(__VA_ARGS__)
+#define svqrdmlsh_lane_s32(...) __builtin_sve_svqrdmlsh_lane_s32(__VA_ARGS__)
+#define svqrdmlsh_lane_s64(...) __builtin_sve_svqrdmlsh_lane_s64(__VA_ARGS__)
+#define svqrdmlsh_lane_s16(...) __builtin_sve_svqrdmlsh_lane_s16(__VA_ARGS__)
+#define svqrdmulh_n_s8(...) __builtin_sve_svqrdmulh_n_s8(__VA_ARGS__)
+#define svqrdmulh_n_s32(...) __builtin_sve_svqrdmulh_n_s32(__VA_ARGS__)
+#define svqrdmulh_n_s64(...) __builtin_sve_svqrdmulh_n_s64(__VA_ARGS__)
+#define svqrdmulh_n_s16(...) __builtin_sve_svqrdmulh_n_s16(__VA_ARGS__)
+#define svqrdmulh_s8(...) __builtin_sve_svqrdmulh_s8(__VA_ARGS__)
+#define svqrdmulh_s32(...) __builtin_sve_svqrdmulh_s32(__VA_ARGS__)
+#define svqrdmulh_s64(...) __builtin_sve_svqrdmulh_s64(__VA_ARGS__)
+#define svqrdmulh_s16(...) __builtin_sve_svqrdmulh_s16(__VA_ARGS__)
+#define svqrdmulh_lane_s32(...) __builtin_sve_svqrdmulh_lane_s32(__VA_ARGS__)
+#define svqrdmulh_lane_s64(...) __builtin_sve_svqrdmulh_lane_s64(__VA_ARGS__)
+#define svqrdmulh_lane_s16(...) __builtin_sve_svqrdmulh_lane_s16(__VA_ARGS__)
+#define svqrshl_n_s8_m(...) __builtin_sve_svqrshl_n_s8_m(__VA_ARGS__)
+#define svqrshl_n_s32_m(...) __builtin_sve_svqrshl_n_s32_m(__VA_ARGS__)
+#define svqrshl_n_s64_m(...) __builtin_sve_svqrshl_n_s64_m(__VA_ARGS__)
+#define svqrshl_n_s16_m(...) __builtin_sve_svqrshl_n_s16_m(__VA_ARGS__)
+#define svqrshl_n_s8_x(...) __builtin_sve_svqrshl_n_s8_x(__VA_ARGS__)
+#define svqrshl_n_s32_x(...) __builtin_sve_svqrshl_n_s32_x(__VA_ARGS__)
+#define svqrshl_n_s64_x(...) __builtin_sve_svqrshl_n_s64_x(__VA_ARGS__)
+#define svqrshl_n_s16_x(...) __builtin_sve_svqrshl_n_s16_x(__VA_ARGS__)
+#define svqrshl_n_s8_z(...) __builtin_sve_svqrshl_n_s8_z(__VA_ARGS__)
+#define svqrshl_n_s32_z(...) __builtin_sve_svqrshl_n_s32_z(__VA_ARGS__)
+#define svqrshl_n_s64_z(...) __builtin_sve_svqrshl_n_s64_z(__VA_ARGS__)
+#define svqrshl_n_s16_z(...) __builtin_sve_svqrshl_n_s16_z(__VA_ARGS__)
+#define svqrshl_n_u8_m(...) __builtin_sve_svqrshl_n_u8_m(__VA_ARGS__)
+#define svqrshl_n_u32_m(...) __builtin_sve_svqrshl_n_u32_m(__VA_ARGS__)
+#define svqrshl_n_u64_m(...) __builtin_sve_svqrshl_n_u64_m(__VA_ARGS__)
+#define svqrshl_n_u16_m(...) __builtin_sve_svqrshl_n_u16_m(__VA_ARGS__)
+#define svqrshl_n_u8_x(...) __builtin_sve_svqrshl_n_u8_x(__VA_ARGS__)
+#define svqrshl_n_u32_x(...) __builtin_sve_svqrshl_n_u32_x(__VA_ARGS__)
+#define svqrshl_n_u64_x(...) __builtin_sve_svqrshl_n_u64_x(__VA_ARGS__)
+#define svqrshl_n_u16_x(...) __builtin_sve_svqrshl_n_u16_x(__VA_ARGS__)
+#define svqrshl_n_u8_z(...) __builtin_sve_svqrshl_n_u8_z(__VA_ARGS__)
+#define svqrshl_n_u32_z(...) __builtin_sve_svqrshl_n_u32_z(__VA_ARGS__)
+#define svqrshl_n_u64_z(...) __builtin_sve_svqrshl_n_u64_z(__VA_ARGS__)
+#define svqrshl_n_u16_z(...) __builtin_sve_svqrshl_n_u16_z(__VA_ARGS__)
+#define svqrshl_s8_m(...) __builtin_sve_svqrshl_s8_m(__VA_ARGS__)
+#define svqrshl_s32_m(...) __builtin_sve_svqrshl_s32_m(__VA_ARGS__)
+#define svqrshl_s64_m(...) __builtin_sve_svqrshl_s64_m(__VA_ARGS__)
+#define svqrshl_s16_m(...) __builtin_sve_svqrshl_s16_m(__VA_ARGS__)
+#define svqrshl_s8_x(...) __builtin_sve_svqrshl_s8_x(__VA_ARGS__)
+#define svqrshl_s32_x(...) __builtin_sve_svqrshl_s32_x(__VA_ARGS__)
+#define svqrshl_s64_x(...) __builtin_sve_svqrshl_s64_x(__VA_ARGS__)
+#define svqrshl_s16_x(...) __builtin_sve_svqrshl_s16_x(__VA_ARGS__)
+#define svqrshl_s8_z(...) __builtin_sve_svqrshl_s8_z(__VA_ARGS__)
+#define svqrshl_s32_z(...) __builtin_sve_svqrshl_s32_z(__VA_ARGS__)
+#define svqrshl_s64_z(...) __builtin_sve_svqrshl_s64_z(__VA_ARGS__)
+#define svqrshl_s16_z(...) __builtin_sve_svqrshl_s16_z(__VA_ARGS__)
+#define svqrshl_u8_m(...) __builtin_sve_svqrshl_u8_m(__VA_ARGS__)
+#define svqrshl_u32_m(...) __builtin_sve_svqrshl_u32_m(__VA_ARGS__)
+#define svqrshl_u64_m(...) __builtin_sve_svqrshl_u64_m(__VA_ARGS__)
+#define svqrshl_u16_m(...) __builtin_sve_svqrshl_u16_m(__VA_ARGS__)
+#define svqrshl_u8_x(...) __builtin_sve_svqrshl_u8_x(__VA_ARGS__)
+#define svqrshl_u32_x(...) __builtin_sve_svqrshl_u32_x(__VA_ARGS__)
+#define svqrshl_u64_x(...) __builtin_sve_svqrshl_u64_x(__VA_ARGS__)
+#define svqrshl_u16_x(...) __builtin_sve_svqrshl_u16_x(__VA_ARGS__)
+#define svqrshl_u8_z(...) __builtin_sve_svqrshl_u8_z(__VA_ARGS__)
+#define svqrshl_u32_z(...) __builtin_sve_svqrshl_u32_z(__VA_ARGS__)
+#define svqrshl_u64_z(...) __builtin_sve_svqrshl_u64_z(__VA_ARGS__)
+#define svqrshl_u16_z(...) __builtin_sve_svqrshl_u16_z(__VA_ARGS__)
+#define svqrshrnb_n_s32(...) __builtin_sve_svqrshrnb_n_s32(__VA_ARGS__)
+#define svqrshrnb_n_s64(...) __builtin_sve_svqrshrnb_n_s64(__VA_ARGS__)
+#define svqrshrnb_n_s16(...) __builtin_sve_svqrshrnb_n_s16(__VA_ARGS__)
+#define svqrshrnb_n_u32(...) __builtin_sve_svqrshrnb_n_u32(__VA_ARGS__)
+#define svqrshrnb_n_u64(...) __builtin_sve_svqrshrnb_n_u64(__VA_ARGS__)
+#define svqrshrnb_n_u16(...) __builtin_sve_svqrshrnb_n_u16(__VA_ARGS__)
+#define svqrshrnt_n_s32(...) __builtin_sve_svqrshrnt_n_s32(__VA_ARGS__)
+#define svqrshrnt_n_s64(...) __builtin_sve_svqrshrnt_n_s64(__VA_ARGS__)
+#define svqrshrnt_n_s16(...) __builtin_sve_svqrshrnt_n_s16(__VA_ARGS__)
+#define svqrshrnt_n_u32(...) __builtin_sve_svqrshrnt_n_u32(__VA_ARGS__)
+#define svqrshrnt_n_u64(...) __builtin_sve_svqrshrnt_n_u64(__VA_ARGS__)
+#define svqrshrnt_n_u16(...) __builtin_sve_svqrshrnt_n_u16(__VA_ARGS__)
+#define svqrshrunb_n_s32(...) __builtin_sve_svqrshrunb_n_s32(__VA_ARGS__)
+#define svqrshrunb_n_s64(...) __builtin_sve_svqrshrunb_n_s64(__VA_ARGS__)
+#define svqrshrunb_n_s16(...) __builtin_sve_svqrshrunb_n_s16(__VA_ARGS__)
+#define svqrshrunt_n_s32(...) __builtin_sve_svqrshrunt_n_s32(__VA_ARGS__)
+#define svqrshrunt_n_s64(...) __builtin_sve_svqrshrunt_n_s64(__VA_ARGS__)
+#define svqrshrunt_n_s16(...) __builtin_sve_svqrshrunt_n_s16(__VA_ARGS__)
+#define svqshl_n_s8_m(...) __builtin_sve_svqshl_n_s8_m(__VA_ARGS__)
+#define svqshl_n_s32_m(...) __builtin_sve_svqshl_n_s32_m(__VA_ARGS__)
+#define svqshl_n_s64_m(...) __builtin_sve_svqshl_n_s64_m(__VA_ARGS__)
+#define svqshl_n_s16_m(...) __builtin_sve_svqshl_n_s16_m(__VA_ARGS__)
+#define svqshl_n_s8_x(...) __builtin_sve_svqshl_n_s8_x(__VA_ARGS__)
+#define svqshl_n_s32_x(...) __builtin_sve_svqshl_n_s32_x(__VA_ARGS__)
+#define svqshl_n_s64_x(...) __builtin_sve_svqshl_n_s64_x(__VA_ARGS__)
+#define svqshl_n_s16_x(...) __builtin_sve_svqshl_n_s16_x(__VA_ARGS__)
+#define svqshl_n_s8_z(...) __builtin_sve_svqshl_n_s8_z(__VA_ARGS__)
+#define svqshl_n_s32_z(...) __builtin_sve_svqshl_n_s32_z(__VA_ARGS__)
+#define svqshl_n_s64_z(...) __builtin_sve_svqshl_n_s64_z(__VA_ARGS__)
+#define svqshl_n_s16_z(...) __builtin_sve_svqshl_n_s16_z(__VA_ARGS__)
+#define svqshl_n_u8_m(...) __builtin_sve_svqshl_n_u8_m(__VA_ARGS__)
+#define svqshl_n_u32_m(...) __builtin_sve_svqshl_n_u32_m(__VA_ARGS__)
+#define svqshl_n_u64_m(...) __builtin_sve_svqshl_n_u64_m(__VA_ARGS__)
+#define svqshl_n_u16_m(...) __builtin_sve_svqshl_n_u16_m(__VA_ARGS__)
+#define svqshl_n_u8_x(...) __builtin_sve_svqshl_n_u8_x(__VA_ARGS__)
+#define svqshl_n_u32_x(...) __builtin_sve_svqshl_n_u32_x(__VA_ARGS__)
+#define svqshl_n_u64_x(...) __builtin_sve_svqshl_n_u64_x(__VA_ARGS__)
+#define svqshl_n_u16_x(...) __builtin_sve_svqshl_n_u16_x(__VA_ARGS__)
+#define svqshl_n_u8_z(...) __builtin_sve_svqshl_n_u8_z(__VA_ARGS__)
+#define svqshl_n_u32_z(...) __builtin_sve_svqshl_n_u32_z(__VA_ARGS__)
+#define svqshl_n_u64_z(...) __builtin_sve_svqshl_n_u64_z(__VA_ARGS__)
+#define svqshl_n_u16_z(...) __builtin_sve_svqshl_n_u16_z(__VA_ARGS__)
+#define svqshl_s8_m(...) __builtin_sve_svqshl_s8_m(__VA_ARGS__)
+#define svqshl_s32_m(...) __builtin_sve_svqshl_s32_m(__VA_ARGS__)
+#define svqshl_s64_m(...) __builtin_sve_svqshl_s64_m(__VA_ARGS__)
+#define svqshl_s16_m(...) __builtin_sve_svqshl_s16_m(__VA_ARGS__)
+#define svqshl_s8_x(...) __builtin_sve_svqshl_s8_x(__VA_ARGS__)
+#define svqshl_s32_x(...) __builtin_sve_svqshl_s32_x(__VA_ARGS__)
+#define svqshl_s64_x(...) __builtin_sve_svqshl_s64_x(__VA_ARGS__)
+#define svqshl_s16_x(...) __builtin_sve_svqshl_s16_x(__VA_ARGS__)
+#define svqshl_s8_z(...) __builtin_sve_svqshl_s8_z(__VA_ARGS__)
+#define svqshl_s32_z(...) __builtin_sve_svqshl_s32_z(__VA_ARGS__)
+#define svqshl_s64_z(...) __builtin_sve_svqshl_s64_z(__VA_ARGS__)
+#define svqshl_s16_z(...) __builtin_sve_svqshl_s16_z(__VA_ARGS__)
+#define svqshl_u8_m(...) __builtin_sve_svqshl_u8_m(__VA_ARGS__)
+#define svqshl_u32_m(...) __builtin_sve_svqshl_u32_m(__VA_ARGS__)
+#define svqshl_u64_m(...) __builtin_sve_svqshl_u64_m(__VA_ARGS__)
+#define svqshl_u16_m(...) __builtin_sve_svqshl_u16_m(__VA_ARGS__)
+#define svqshl_u8_x(...) __builtin_sve_svqshl_u8_x(__VA_ARGS__)
+#define svqshl_u32_x(...) __builtin_sve_svqshl_u32_x(__VA_ARGS__)
+#define svqshl_u64_x(...) __builtin_sve_svqshl_u64_x(__VA_ARGS__)
+#define svqshl_u16_x(...) __builtin_sve_svqshl_u16_x(__VA_ARGS__)
+#define svqshl_u8_z(...) __builtin_sve_svqshl_u8_z(__VA_ARGS__)
+#define svqshl_u32_z(...) __builtin_sve_svqshl_u32_z(__VA_ARGS__)
+#define svqshl_u64_z(...) __builtin_sve_svqshl_u64_z(__VA_ARGS__)
+#define svqshl_u16_z(...) __builtin_sve_svqshl_u16_z(__VA_ARGS__)
+#define svqshlu_n_s8_m(...) __builtin_sve_svqshlu_n_s8_m(__VA_ARGS__)
+#define svqshlu_n_s32_m(...) __builtin_sve_svqshlu_n_s32_m(__VA_ARGS__)
+#define svqshlu_n_s64_m(...) __builtin_sve_svqshlu_n_s64_m(__VA_ARGS__)
+#define svqshlu_n_s16_m(...) __builtin_sve_svqshlu_n_s16_m(__VA_ARGS__)
+#define svqshlu_n_s8_x(...) __builtin_sve_svqshlu_n_s8_x(__VA_ARGS__)
+#define svqshlu_n_s32_x(...) __builtin_sve_svqshlu_n_s32_x(__VA_ARGS__)
+#define svqshlu_n_s64_x(...) __builtin_sve_svqshlu_n_s64_x(__VA_ARGS__)
+#define svqshlu_n_s16_x(...) __builtin_sve_svqshlu_n_s16_x(__VA_ARGS__)
+#define svqshlu_n_s8_z(...) __builtin_sve_svqshlu_n_s8_z(__VA_ARGS__)
+#define svqshlu_n_s32_z(...) __builtin_sve_svqshlu_n_s32_z(__VA_ARGS__)
+#define svqshlu_n_s64_z(...) __builtin_sve_svqshlu_n_s64_z(__VA_ARGS__)
+#define svqshlu_n_s16_z(...) __builtin_sve_svqshlu_n_s16_z(__VA_ARGS__)
+#define svqshrnb_n_s32(...) __builtin_sve_svqshrnb_n_s32(__VA_ARGS__)
+#define svqshrnb_n_s64(...) __builtin_sve_svqshrnb_n_s64(__VA_ARGS__)
+#define svqshrnb_n_s16(...) __builtin_sve_svqshrnb_n_s16(__VA_ARGS__)
+#define svqshrnb_n_u32(...) __builtin_sve_svqshrnb_n_u32(__VA_ARGS__)
+#define svqshrnb_n_u64(...) __builtin_sve_svqshrnb_n_u64(__VA_ARGS__)
+#define svqshrnb_n_u16(...) __builtin_sve_svqshrnb_n_u16(__VA_ARGS__)
+#define svqshrnt_n_s32(...) __builtin_sve_svqshrnt_n_s32(__VA_ARGS__)
+#define svqshrnt_n_s64(...) __builtin_sve_svqshrnt_n_s64(__VA_ARGS__)
+#define svqshrnt_n_s16(...) __builtin_sve_svqshrnt_n_s16(__VA_ARGS__)
+#define svqshrnt_n_u32(...) __builtin_sve_svqshrnt_n_u32(__VA_ARGS__)
+#define svqshrnt_n_u64(...) __builtin_sve_svqshrnt_n_u64(__VA_ARGS__)
+#define svqshrnt_n_u16(...) __builtin_sve_svqshrnt_n_u16(__VA_ARGS__)
+#define svqshrunb_n_s32(...) __builtin_sve_svqshrunb_n_s32(__VA_ARGS__)
+#define svqshrunb_n_s64(...) __builtin_sve_svqshrunb_n_s64(__VA_ARGS__)
+#define svqshrunb_n_s16(...) __builtin_sve_svqshrunb_n_s16(__VA_ARGS__)
+#define svqshrunt_n_s32(...) __builtin_sve_svqshrunt_n_s32(__VA_ARGS__)
+#define svqshrunt_n_s64(...) __builtin_sve_svqshrunt_n_s64(__VA_ARGS__)
+#define svqshrunt_n_s16(...) __builtin_sve_svqshrunt_n_s16(__VA_ARGS__)
+#define svqsub_n_s8_m(...) __builtin_sve_svqsub_n_s8_m(__VA_ARGS__)
+#define svqsub_n_s32_m(...) __builtin_sve_svqsub_n_s32_m(__VA_ARGS__)
+#define svqsub_n_s64_m(...) __builtin_sve_svqsub_n_s64_m(__VA_ARGS__)
+#define svqsub_n_s16_m(...) __builtin_sve_svqsub_n_s16_m(__VA_ARGS__)
+#define svqsub_n_s8_x(...) __builtin_sve_svqsub_n_s8_x(__VA_ARGS__)
+#define svqsub_n_s32_x(...) __builtin_sve_svqsub_n_s32_x(__VA_ARGS__)
+#define svqsub_n_s64_x(...) __builtin_sve_svqsub_n_s64_x(__VA_ARGS__)
+#define svqsub_n_s16_x(...) __builtin_sve_svqsub_n_s16_x(__VA_ARGS__)
+#define svqsub_n_s8_z(...) __builtin_sve_svqsub_n_s8_z(__VA_ARGS__)
+#define svqsub_n_s32_z(...) __builtin_sve_svqsub_n_s32_z(__VA_ARGS__)
+#define svqsub_n_s64_z(...) __builtin_sve_svqsub_n_s64_z(__VA_ARGS__)
+#define svqsub_n_s16_z(...) __builtin_sve_svqsub_n_s16_z(__VA_ARGS__)
+#define svqsub_n_u8_m(...) __builtin_sve_svqsub_n_u8_m(__VA_ARGS__)
+#define svqsub_n_u32_m(...) __builtin_sve_svqsub_n_u32_m(__VA_ARGS__)
+#define svqsub_n_u64_m(...) __builtin_sve_svqsub_n_u64_m(__VA_ARGS__)
+#define svqsub_n_u16_m(...) __builtin_sve_svqsub_n_u16_m(__VA_ARGS__)
+#define svqsub_n_u8_x(...) __builtin_sve_svqsub_n_u8_x(__VA_ARGS__)
+#define svqsub_n_u32_x(...) __builtin_sve_svqsub_n_u32_x(__VA_ARGS__)
+#define svqsub_n_u64_x(...) __builtin_sve_svqsub_n_u64_x(__VA_ARGS__)
+#define svqsub_n_u16_x(...) __builtin_sve_svqsub_n_u16_x(__VA_ARGS__)
+#define svqsub_n_u8_z(...) __builtin_sve_svqsub_n_u8_z(__VA_ARGS__)
+#define svqsub_n_u32_z(...) __builtin_sve_svqsub_n_u32_z(__VA_ARGS__)
+#define svqsub_n_u64_z(...) __builtin_sve_svqsub_n_u64_z(__VA_ARGS__)
+#define svqsub_n_u16_z(...) __builtin_sve_svqsub_n_u16_z(__VA_ARGS__)
+#define svqsub_s8_m(...) __builtin_sve_svqsub_s8_m(__VA_ARGS__)
+#define svqsub_s32_m(...) __builtin_sve_svqsub_s32_m(__VA_ARGS__)
+#define svqsub_s64_m(...) __builtin_sve_svqsub_s64_m(__VA_ARGS__)
+#define svqsub_s16_m(...) __builtin_sve_svqsub_s16_m(__VA_ARGS__)
+#define svqsub_s8_x(...) __builtin_sve_svqsub_s8_x(__VA_ARGS__)
+#define svqsub_s32_x(...) __builtin_sve_svqsub_s32_x(__VA_ARGS__)
+#define svqsub_s64_x(...) __builtin_sve_svqsub_s64_x(__VA_ARGS__)
+#define svqsub_s16_x(...) __builtin_sve_svqsub_s16_x(__VA_ARGS__)
+#define svqsub_s8_z(...) __builtin_sve_svqsub_s8_z(__VA_ARGS__)
+#define svqsub_s32_z(...) __builtin_sve_svqsub_s32_z(__VA_ARGS__)
+#define svqsub_s64_z(...) __builtin_sve_svqsub_s64_z(__VA_ARGS__)
+#define svqsub_s16_z(...) __builtin_sve_svqsub_s16_z(__VA_ARGS__)
+#define svqsub_u8_m(...) __builtin_sve_svqsub_u8_m(__VA_ARGS__)
+#define svqsub_u32_m(...) __builtin_sve_svqsub_u32_m(__VA_ARGS__)
+#define svqsub_u64_m(...) __builtin_sve_svqsub_u64_m(__VA_ARGS__)
+#define svqsub_u16_m(...) __builtin_sve_svqsub_u16_m(__VA_ARGS__)
+#define svqsub_u8_x(...) __builtin_sve_svqsub_u8_x(__VA_ARGS__)
+#define svqsub_u32_x(...) __builtin_sve_svqsub_u32_x(__VA_ARGS__)
+#define svqsub_u64_x(...) __builtin_sve_svqsub_u64_x(__VA_ARGS__)
+#define svqsub_u16_x(...) __builtin_sve_svqsub_u16_x(__VA_ARGS__)
+#define svqsub_u8_z(...) __builtin_sve_svqsub_u8_z(__VA_ARGS__)
+#define svqsub_u32_z(...) __builtin_sve_svqsub_u32_z(__VA_ARGS__)
+#define svqsub_u64_z(...) __builtin_sve_svqsub_u64_z(__VA_ARGS__)
+#define svqsub_u16_z(...) __builtin_sve_svqsub_u16_z(__VA_ARGS__)
+#define svqsubr_n_s8_m(...) __builtin_sve_svqsubr_n_s8_m(__VA_ARGS__)
+#define svqsubr_n_s32_m(...) __builtin_sve_svqsubr_n_s32_m(__VA_ARGS__)
+#define svqsubr_n_s64_m(...) __builtin_sve_svqsubr_n_s64_m(__VA_ARGS__)
+#define svqsubr_n_s16_m(...) __builtin_sve_svqsubr_n_s16_m(__VA_ARGS__)
+#define svqsubr_n_s8_x(...) __builtin_sve_svqsubr_n_s8_x(__VA_ARGS__)
+#define svqsubr_n_s32_x(...) __builtin_sve_svqsubr_n_s32_x(__VA_ARGS__)
+#define svqsubr_n_s64_x(...) __builtin_sve_svqsubr_n_s64_x(__VA_ARGS__)
+#define svqsubr_n_s16_x(...) __builtin_sve_svqsubr_n_s16_x(__VA_ARGS__)
+#define svqsubr_n_s8_z(...) __builtin_sve_svqsubr_n_s8_z(__VA_ARGS__)
+#define svqsubr_n_s32_z(...) __builtin_sve_svqsubr_n_s32_z(__VA_ARGS__)
+#define svqsubr_n_s64_z(...) __builtin_sve_svqsubr_n_s64_z(__VA_ARGS__)
+#define svqsubr_n_s16_z(...) __builtin_sve_svqsubr_n_s16_z(__VA_ARGS__)
+#define svqsubr_n_u8_m(...) __builtin_sve_svqsubr_n_u8_m(__VA_ARGS__)
+#define svqsubr_n_u32_m(...) __builtin_sve_svqsubr_n_u32_m(__VA_ARGS__)
+#define svqsubr_n_u64_m(...) __builtin_sve_svqsubr_n_u64_m(__VA_ARGS__)
+#define svqsubr_n_u16_m(...) __builtin_sve_svqsubr_n_u16_m(__VA_ARGS__)
+#define svqsubr_n_u8_x(...) __builtin_sve_svqsubr_n_u8_x(__VA_ARGS__)
+#define svqsubr_n_u32_x(...) __builtin_sve_svqsubr_n_u32_x(__VA_ARGS__)
+#define svqsubr_n_u64_x(...) __builtin_sve_svqsubr_n_u64_x(__VA_ARGS__)
+#define svqsubr_n_u16_x(...) __builtin_sve_svqsubr_n_u16_x(__VA_ARGS__)
+#define svqsubr_n_u8_z(...) __builtin_sve_svqsubr_n_u8_z(__VA_ARGS__)
+#define svqsubr_n_u32_z(...) __builtin_sve_svqsubr_n_u32_z(__VA_ARGS__)
+#define svqsubr_n_u64_z(...) __builtin_sve_svqsubr_n_u64_z(__VA_ARGS__)
+#define svqsubr_n_u16_z(...) __builtin_sve_svqsubr_n_u16_z(__VA_ARGS__)
+#define svqsubr_s8_m(...) __builtin_sve_svqsubr_s8_m(__VA_ARGS__)
+#define svqsubr_s32_m(...) __builtin_sve_svqsubr_s32_m(__VA_ARGS__)
+#define svqsubr_s64_m(...) __builtin_sve_svqsubr_s64_m(__VA_ARGS__)
+#define svqsubr_s16_m(...) __builtin_sve_svqsubr_s16_m(__VA_ARGS__)
+#define svqsubr_s8_x(...) __builtin_sve_svqsubr_s8_x(__VA_ARGS__)
+#define svqsubr_s32_x(...) __builtin_sve_svqsubr_s32_x(__VA_ARGS__)
+#define svqsubr_s64_x(...) __builtin_sve_svqsubr_s64_x(__VA_ARGS__)
+#define svqsubr_s16_x(...) __builtin_sve_svqsubr_s16_x(__VA_ARGS__)
+#define svqsubr_s8_z(...) __builtin_sve_svqsubr_s8_z(__VA_ARGS__)
+#define svqsubr_s32_z(...) __builtin_sve_svqsubr_s32_z(__VA_ARGS__)
+#define svqsubr_s64_z(...) __builtin_sve_svqsubr_s64_z(__VA_ARGS__)
+#define svqsubr_s16_z(...) __builtin_sve_svqsubr_s16_z(__VA_ARGS__)
+#define svqsubr_u8_m(...) __builtin_sve_svqsubr_u8_m(__VA_ARGS__)
+#define svqsubr_u32_m(...) __builtin_sve_svqsubr_u32_m(__VA_ARGS__)
+#define svqsubr_u64_m(...) __builtin_sve_svqsubr_u64_m(__VA_ARGS__)
+#define svqsubr_u16_m(...) __builtin_sve_svqsubr_u16_m(__VA_ARGS__)
+#define svqsubr_u8_x(...) __builtin_sve_svqsubr_u8_x(__VA_ARGS__)
+#define svqsubr_u32_x(...) __builtin_sve_svqsubr_u32_x(__VA_ARGS__)
+#define svqsubr_u64_x(...) __builtin_sve_svqsubr_u64_x(__VA_ARGS__)
+#define svqsubr_u16_x(...) __builtin_sve_svqsubr_u16_x(__VA_ARGS__)
+#define svqsubr_u8_z(...) __builtin_sve_svqsubr_u8_z(__VA_ARGS__)
+#define svqsubr_u32_z(...) __builtin_sve_svqsubr_u32_z(__VA_ARGS__)
+#define svqsubr_u64_z(...) __builtin_sve_svqsubr_u64_z(__VA_ARGS__)
+#define svqsubr_u16_z(...) __builtin_sve_svqsubr_u16_z(__VA_ARGS__)
+#define svqxtnb_s32(...) __builtin_sve_svqxtnb_s32(__VA_ARGS__)
+#define svqxtnb_s64(...) __builtin_sve_svqxtnb_s64(__VA_ARGS__)
+#define svqxtnb_s16(...) __builtin_sve_svqxtnb_s16(__VA_ARGS__)
+#define svqxtnb_u32(...) __builtin_sve_svqxtnb_u32(__VA_ARGS__)
+#define svqxtnb_u64(...) __builtin_sve_svqxtnb_u64(__VA_ARGS__)
+#define svqxtnb_u16(...) __builtin_sve_svqxtnb_u16(__VA_ARGS__)
+#define svqxtnt_s32(...) __builtin_sve_svqxtnt_s32(__VA_ARGS__)
+#define svqxtnt_s64(...) __builtin_sve_svqxtnt_s64(__VA_ARGS__)
+#define svqxtnt_s16(...) __builtin_sve_svqxtnt_s16(__VA_ARGS__)
+#define svqxtnt_u32(...) __builtin_sve_svqxtnt_u32(__VA_ARGS__)
+#define svqxtnt_u64(...) __builtin_sve_svqxtnt_u64(__VA_ARGS__)
+#define svqxtnt_u16(...) __builtin_sve_svqxtnt_u16(__VA_ARGS__)
+#define svqxtunb_s32(...) __builtin_sve_svqxtunb_s32(__VA_ARGS__)
+#define svqxtunb_s64(...) __builtin_sve_svqxtunb_s64(__VA_ARGS__)
+#define svqxtunb_s16(...) __builtin_sve_svqxtunb_s16(__VA_ARGS__)
+#define svqxtunt_s32(...) __builtin_sve_svqxtunt_s32(__VA_ARGS__)
+#define svqxtunt_s64(...) __builtin_sve_svqxtunt_s64(__VA_ARGS__)
+#define svqxtunt_s16(...) __builtin_sve_svqxtunt_s16(__VA_ARGS__)
+#define svraddhnb_n_u32(...) __builtin_sve_svraddhnb_n_u32(__VA_ARGS__)
+#define svraddhnb_n_u64(...) __builtin_sve_svraddhnb_n_u64(__VA_ARGS__)
+#define svraddhnb_n_u16(...) __builtin_sve_svraddhnb_n_u16(__VA_ARGS__)
+#define svraddhnb_n_s32(...) __builtin_sve_svraddhnb_n_s32(__VA_ARGS__)
+#define svraddhnb_n_s64(...) __builtin_sve_svraddhnb_n_s64(__VA_ARGS__)
+#define svraddhnb_n_s16(...) __builtin_sve_svraddhnb_n_s16(__VA_ARGS__)
+#define svraddhnb_u32(...) __builtin_sve_svraddhnb_u32(__VA_ARGS__)
+#define svraddhnb_u64(...) __builtin_sve_svraddhnb_u64(__VA_ARGS__)
+#define svraddhnb_u16(...) __builtin_sve_svraddhnb_u16(__VA_ARGS__)
+#define svraddhnb_s32(...) __builtin_sve_svraddhnb_s32(__VA_ARGS__)
+#define svraddhnb_s64(...) __builtin_sve_svraddhnb_s64(__VA_ARGS__)
+#define svraddhnb_s16(...) __builtin_sve_svraddhnb_s16(__VA_ARGS__)
+#define svraddhnt_n_u32(...) __builtin_sve_svraddhnt_n_u32(__VA_ARGS__)
+#define svraddhnt_n_u64(...) __builtin_sve_svraddhnt_n_u64(__VA_ARGS__)
+#define svraddhnt_n_u16(...) __builtin_sve_svraddhnt_n_u16(__VA_ARGS__)
+#define svraddhnt_n_s32(...) __builtin_sve_svraddhnt_n_s32(__VA_ARGS__)
+#define svraddhnt_n_s64(...) __builtin_sve_svraddhnt_n_s64(__VA_ARGS__)
+#define svraddhnt_n_s16(...) __builtin_sve_svraddhnt_n_s16(__VA_ARGS__)
+#define svraddhnt_u32(...) __builtin_sve_svraddhnt_u32(__VA_ARGS__)
+#define svraddhnt_u64(...) __builtin_sve_svraddhnt_u64(__VA_ARGS__)
+#define svraddhnt_u16(...) __builtin_sve_svraddhnt_u16(__VA_ARGS__)
+#define svraddhnt_s32(...) __builtin_sve_svraddhnt_s32(__VA_ARGS__)
+#define svraddhnt_s64(...) __builtin_sve_svraddhnt_s64(__VA_ARGS__)
+#define svraddhnt_s16(...) __builtin_sve_svraddhnt_s16(__VA_ARGS__)
+#define svrecpe_u32_m(...) __builtin_sve_svrecpe_u32_m(__VA_ARGS__)
+#define svrecpe_u32_x(...) __builtin_sve_svrecpe_u32_x(__VA_ARGS__)
+#define svrecpe_u32_z(...) __builtin_sve_svrecpe_u32_z(__VA_ARGS__)
+#define svrhadd_n_s8_m(...) __builtin_sve_svrhadd_n_s8_m(__VA_ARGS__)
+#define svrhadd_n_s32_m(...) __builtin_sve_svrhadd_n_s32_m(__VA_ARGS__)
+#define svrhadd_n_s64_m(...) __builtin_sve_svrhadd_n_s64_m(__VA_ARGS__)
+#define svrhadd_n_s16_m(...) __builtin_sve_svrhadd_n_s16_m(__VA_ARGS__)
+#define svrhadd_n_s8_x(...) __builtin_sve_svrhadd_n_s8_x(__VA_ARGS__)
+#define svrhadd_n_s32_x(...) __builtin_sve_svrhadd_n_s32_x(__VA_ARGS__)
+#define svrhadd_n_s64_x(...) __builtin_sve_svrhadd_n_s64_x(__VA_ARGS__)
+#define svrhadd_n_s16_x(...) __builtin_sve_svrhadd_n_s16_x(__VA_ARGS__)
+#define svrhadd_n_s8_z(...) __builtin_sve_svrhadd_n_s8_z(__VA_ARGS__)
+#define svrhadd_n_s32_z(...) __builtin_sve_svrhadd_n_s32_z(__VA_ARGS__)
+#define svrhadd_n_s64_z(...) __builtin_sve_svrhadd_n_s64_z(__VA_ARGS__)
+#define svrhadd_n_s16_z(...) __builtin_sve_svrhadd_n_s16_z(__VA_ARGS__)
+#define svrhadd_n_u8_m(...) __builtin_sve_svrhadd_n_u8_m(__VA_ARGS__)
+#define svrhadd_n_u32_m(...) __builtin_sve_svrhadd_n_u32_m(__VA_ARGS__)
+#define svrhadd_n_u64_m(...) __builtin_sve_svrhadd_n_u64_m(__VA_ARGS__)
+#define svrhadd_n_u16_m(...) __builtin_sve_svrhadd_n_u16_m(__VA_ARGS__)
+#define svrhadd_n_u8_x(...) __builtin_sve_svrhadd_n_u8_x(__VA_ARGS__)
+#define svrhadd_n_u32_x(...) __builtin_sve_svrhadd_n_u32_x(__VA_ARGS__)
+#define svrhadd_n_u64_x(...) __builtin_sve_svrhadd_n_u64_x(__VA_ARGS__)
+#define svrhadd_n_u16_x(...) __builtin_sve_svrhadd_n_u16_x(__VA_ARGS__)
+#define svrhadd_n_u8_z(...) __builtin_sve_svrhadd_n_u8_z(__VA_ARGS__)
+#define svrhadd_n_u32_z(...) __builtin_sve_svrhadd_n_u32_z(__VA_ARGS__)
+#define svrhadd_n_u64_z(...) __builtin_sve_svrhadd_n_u64_z(__VA_ARGS__)
+#define svrhadd_n_u16_z(...) __builtin_sve_svrhadd_n_u16_z(__VA_ARGS__)
+#define svrhadd_s8_m(...) __builtin_sve_svrhadd_s8_m(__VA_ARGS__)
+#define svrhadd_s32_m(...) __builtin_sve_svrhadd_s32_m(__VA_ARGS__)
+#define svrhadd_s64_m(...) __builtin_sve_svrhadd_s64_m(__VA_ARGS__)
+#define svrhadd_s16_m(...) __builtin_sve_svrhadd_s16_m(__VA_ARGS__)
+#define svrhadd_s8_x(...) __builtin_sve_svrhadd_s8_x(__VA_ARGS__)
+#define svrhadd_s32_x(...) __builtin_sve_svrhadd_s32_x(__VA_ARGS__)
+#define svrhadd_s64_x(...) __builtin_sve_svrhadd_s64_x(__VA_ARGS__)
+#define svrhadd_s16_x(...) __builtin_sve_svrhadd_s16_x(__VA_ARGS__)
+#define svrhadd_s8_z(...) __builtin_sve_svrhadd_s8_z(__VA_ARGS__)
+#define svrhadd_s32_z(...) __builtin_sve_svrhadd_s32_z(__VA_ARGS__)
+#define svrhadd_s64_z(...) __builtin_sve_svrhadd_s64_z(__VA_ARGS__)
+#define svrhadd_s16_z(...) __builtin_sve_svrhadd_s16_z(__VA_ARGS__)
+#define svrhadd_u8_m(...) __builtin_sve_svrhadd_u8_m(__VA_ARGS__)
+#define svrhadd_u32_m(...) __builtin_sve_svrhadd_u32_m(__VA_ARGS__)
+#define svrhadd_u64_m(...) __builtin_sve_svrhadd_u64_m(__VA_ARGS__)
+#define svrhadd_u16_m(...) __builtin_sve_svrhadd_u16_m(__VA_ARGS__)
+#define svrhadd_u8_x(...) __builtin_sve_svrhadd_u8_x(__VA_ARGS__)
+#define svrhadd_u32_x(...) __builtin_sve_svrhadd_u32_x(__VA_ARGS__)
+#define svrhadd_u64_x(...) __builtin_sve_svrhadd_u64_x(__VA_ARGS__)
+#define svrhadd_u16_x(...) __builtin_sve_svrhadd_u16_x(__VA_ARGS__)
+#define svrhadd_u8_z(...) __builtin_sve_svrhadd_u8_z(__VA_ARGS__)
+#define svrhadd_u32_z(...) __builtin_sve_svrhadd_u32_z(__VA_ARGS__)
+#define svrhadd_u64_z(...) __builtin_sve_svrhadd_u64_z(__VA_ARGS__)
+#define svrhadd_u16_z(...) __builtin_sve_svrhadd_u16_z(__VA_ARGS__)
+#define svrshl_n_s8_m(...) __builtin_sve_svrshl_n_s8_m(__VA_ARGS__)
+#define svrshl_n_s32_m(...) __builtin_sve_svrshl_n_s32_m(__VA_ARGS__)
+#define svrshl_n_s64_m(...) __builtin_sve_svrshl_n_s64_m(__VA_ARGS__)
+#define svrshl_n_s16_m(...) __builtin_sve_svrshl_n_s16_m(__VA_ARGS__)
+#define svrshl_n_s8_x(...) __builtin_sve_svrshl_n_s8_x(__VA_ARGS__)
+#define svrshl_n_s32_x(...) __builtin_sve_svrshl_n_s32_x(__VA_ARGS__)
+#define svrshl_n_s64_x(...) __builtin_sve_svrshl_n_s64_x(__VA_ARGS__)
+#define svrshl_n_s16_x(...) __builtin_sve_svrshl_n_s16_x(__VA_ARGS__)
+#define svrshl_n_s8_z(...) __builtin_sve_svrshl_n_s8_z(__VA_ARGS__)
+#define svrshl_n_s32_z(...) __builtin_sve_svrshl_n_s32_z(__VA_ARGS__)
+#define svrshl_n_s64_z(...) __builtin_sve_svrshl_n_s64_z(__VA_ARGS__)
+#define svrshl_n_s16_z(...) __builtin_sve_svrshl_n_s16_z(__VA_ARGS__)
+#define svrshl_n_u8_m(...) __builtin_sve_svrshl_n_u8_m(__VA_ARGS__)
+#define svrshl_n_u32_m(...) __builtin_sve_svrshl_n_u32_m(__VA_ARGS__)
+#define svrshl_n_u64_m(...) __builtin_sve_svrshl_n_u64_m(__VA_ARGS__)
+#define svrshl_n_u16_m(...) __builtin_sve_svrshl_n_u16_m(__VA_ARGS__)
+#define svrshl_n_u8_x(...) __builtin_sve_svrshl_n_u8_x(__VA_ARGS__)
+#define svrshl_n_u32_x(...) __builtin_sve_svrshl_n_u32_x(__VA_ARGS__)
+#define svrshl_n_u64_x(...) __builtin_sve_svrshl_n_u64_x(__VA_ARGS__)
+#define svrshl_n_u16_x(...) __builtin_sve_svrshl_n_u16_x(__VA_ARGS__)
+#define svrshl_n_u8_z(...) __builtin_sve_svrshl_n_u8_z(__VA_ARGS__)
+#define svrshl_n_u32_z(...) __builtin_sve_svrshl_n_u32_z(__VA_ARGS__)
+#define svrshl_n_u64_z(...) __builtin_sve_svrshl_n_u64_z(__VA_ARGS__)
+#define svrshl_n_u16_z(...) __builtin_sve_svrshl_n_u16_z(__VA_ARGS__)
+#define svrshl_s8_m(...) __builtin_sve_svrshl_s8_m(__VA_ARGS__)
+#define svrshl_s32_m(...) __builtin_sve_svrshl_s32_m(__VA_ARGS__)
+#define svrshl_s64_m(...) __builtin_sve_svrshl_s64_m(__VA_ARGS__)
+#define svrshl_s16_m(...) __builtin_sve_svrshl_s16_m(__VA_ARGS__)
+#define svrshl_s8_x(...) __builtin_sve_svrshl_s8_x(__VA_ARGS__)
+#define svrshl_s32_x(...) __builtin_sve_svrshl_s32_x(__VA_ARGS__)
+#define svrshl_s64_x(...) __builtin_sve_svrshl_s64_x(__VA_ARGS__)
+#define svrshl_s16_x(...) __builtin_sve_svrshl_s16_x(__VA_ARGS__)
+#define svrshl_s8_z(...) __builtin_sve_svrshl_s8_z(__VA_ARGS__)
+#define svrshl_s32_z(...) __builtin_sve_svrshl_s32_z(__VA_ARGS__)
+#define svrshl_s64_z(...) __builtin_sve_svrshl_s64_z(__VA_ARGS__)
+#define svrshl_s16_z(...) __builtin_sve_svrshl_s16_z(__VA_ARGS__)
+#define svrshl_u8_m(...) __builtin_sve_svrshl_u8_m(__VA_ARGS__)
+#define svrshl_u32_m(...) __builtin_sve_svrshl_u32_m(__VA_ARGS__)
+#define svrshl_u64_m(...) __builtin_sve_svrshl_u64_m(__VA_ARGS__)
+#define svrshl_u16_m(...) __builtin_sve_svrshl_u16_m(__VA_ARGS__)
+#define svrshl_u8_x(...) __builtin_sve_svrshl_u8_x(__VA_ARGS__)
+#define svrshl_u32_x(...) __builtin_sve_svrshl_u32_x(__VA_ARGS__)
+#define svrshl_u64_x(...) __builtin_sve_svrshl_u64_x(__VA_ARGS__)
+#define svrshl_u16_x(...) __builtin_sve_svrshl_u16_x(__VA_ARGS__)
+#define svrshl_u8_z(...) __builtin_sve_svrshl_u8_z(__VA_ARGS__)
+#define svrshl_u32_z(...) __builtin_sve_svrshl_u32_z(__VA_ARGS__)
+#define svrshl_u64_z(...) __builtin_sve_svrshl_u64_z(__VA_ARGS__)
+#define svrshl_u16_z(...) __builtin_sve_svrshl_u16_z(__VA_ARGS__)
+#define svrshr_n_s8_m(...) __builtin_sve_svrshr_n_s8_m(__VA_ARGS__)
+#define svrshr_n_s32_m(...) __builtin_sve_svrshr_n_s32_m(__VA_ARGS__)
+#define svrshr_n_s64_m(...) __builtin_sve_svrshr_n_s64_m(__VA_ARGS__)
+#define svrshr_n_s16_m(...) __builtin_sve_svrshr_n_s16_m(__VA_ARGS__)
+#define svrshr_n_u8_m(...) __builtin_sve_svrshr_n_u8_m(__VA_ARGS__)
+#define svrshr_n_u32_m(...) __builtin_sve_svrshr_n_u32_m(__VA_ARGS__)
+#define svrshr_n_u64_m(...) __builtin_sve_svrshr_n_u64_m(__VA_ARGS__)
+#define svrshr_n_u16_m(...) __builtin_sve_svrshr_n_u16_m(__VA_ARGS__)
+#define svrshr_n_s8_x(...) __builtin_sve_svrshr_n_s8_x(__VA_ARGS__)
+#define svrshr_n_s32_x(...) __builtin_sve_svrshr_n_s32_x(__VA_ARGS__)
+#define svrshr_n_s64_x(...) __builtin_sve_svrshr_n_s64_x(__VA_ARGS__)
+#define svrshr_n_s16_x(...) __builtin_sve_svrshr_n_s16_x(__VA_ARGS__)
+#define svrshr_n_u8_x(...) __builtin_sve_svrshr_n_u8_x(__VA_ARGS__)
+#define svrshr_n_u32_x(...) __builtin_sve_svrshr_n_u32_x(__VA_ARGS__)
+#define svrshr_n_u64_x(...) __builtin_sve_svrshr_n_u64_x(__VA_ARGS__)
+#define svrshr_n_u16_x(...) __builtin_sve_svrshr_n_u16_x(__VA_ARGS__)
+#define svrshr_n_s8_z(...) __builtin_sve_svrshr_n_s8_z(__VA_ARGS__)
+#define svrshr_n_s32_z(...) __builtin_sve_svrshr_n_s32_z(__VA_ARGS__)
+#define svrshr_n_s64_z(...) __builtin_sve_svrshr_n_s64_z(__VA_ARGS__)
+#define svrshr_n_s16_z(...) __builtin_sve_svrshr_n_s16_z(__VA_ARGS__)
+#define svrshr_n_u8_z(...) __builtin_sve_svrshr_n_u8_z(__VA_ARGS__)
+#define svrshr_n_u32_z(...) __builtin_sve_svrshr_n_u32_z(__VA_ARGS__)
+#define svrshr_n_u64_z(...) __builtin_sve_svrshr_n_u64_z(__VA_ARGS__)
+#define svrshr_n_u16_z(...) __builtin_sve_svrshr_n_u16_z(__VA_ARGS__)
+#define svrshrnb_n_u32(...) __builtin_sve_svrshrnb_n_u32(__VA_ARGS__)
+#define svrshrnb_n_u64(...) __builtin_sve_svrshrnb_n_u64(__VA_ARGS__)
+#define svrshrnb_n_u16(...) __builtin_sve_svrshrnb_n_u16(__VA_ARGS__)
+#define svrshrnb_n_s32(...) __builtin_sve_svrshrnb_n_s32(__VA_ARGS__)
+#define svrshrnb_n_s64(...) __builtin_sve_svrshrnb_n_s64(__VA_ARGS__)
+#define svrshrnb_n_s16(...) __builtin_sve_svrshrnb_n_s16(__VA_ARGS__)
+#define svrshrnt_n_u32(...) __builtin_sve_svrshrnt_n_u32(__VA_ARGS__)
+#define svrshrnt_n_u64(...) __builtin_sve_svrshrnt_n_u64(__VA_ARGS__)
+#define svrshrnt_n_u16(...) __builtin_sve_svrshrnt_n_u16(__VA_ARGS__)
+#define svrshrnt_n_s32(...) __builtin_sve_svrshrnt_n_s32(__VA_ARGS__)
+#define svrshrnt_n_s64(...) __builtin_sve_svrshrnt_n_s64(__VA_ARGS__)
+#define svrshrnt_n_s16(...) __builtin_sve_svrshrnt_n_s16(__VA_ARGS__)
+#define svrsqrte_u32_m(...) __builtin_sve_svrsqrte_u32_m(__VA_ARGS__)
+#define svrsqrte_u32_x(...) __builtin_sve_svrsqrte_u32_x(__VA_ARGS__)
+#define svrsqrte_u32_z(...) __builtin_sve_svrsqrte_u32_z(__VA_ARGS__)
+#define svrsra_n_s8(...) __builtin_sve_svrsra_n_s8(__VA_ARGS__)
+#define svrsra_n_s32(...) __builtin_sve_svrsra_n_s32(__VA_ARGS__)
+#define svrsra_n_s64(...) __builtin_sve_svrsra_n_s64(__VA_ARGS__)
+#define svrsra_n_s16(...) __builtin_sve_svrsra_n_s16(__VA_ARGS__)
+#define svrsra_n_u8(...) __builtin_sve_svrsra_n_u8(__VA_ARGS__)
+#define svrsra_n_u32(...) __builtin_sve_svrsra_n_u32(__VA_ARGS__)
+#define svrsra_n_u64(...) __builtin_sve_svrsra_n_u64(__VA_ARGS__)
+#define svrsra_n_u16(...) __builtin_sve_svrsra_n_u16(__VA_ARGS__)
+#define svrsubhnb_n_u32(...) __builtin_sve_svrsubhnb_n_u32(__VA_ARGS__)
+#define svrsubhnb_n_u64(...) __builtin_sve_svrsubhnb_n_u64(__VA_ARGS__)
+#define svrsubhnb_n_u16(...) __builtin_sve_svrsubhnb_n_u16(__VA_ARGS__)
+#define svrsubhnb_n_s32(...) __builtin_sve_svrsubhnb_n_s32(__VA_ARGS__)
+#define svrsubhnb_n_s64(...) __builtin_sve_svrsubhnb_n_s64(__VA_ARGS__)
+#define svrsubhnb_n_s16(...) __builtin_sve_svrsubhnb_n_s16(__VA_ARGS__)
+#define svrsubhnb_u32(...) __builtin_sve_svrsubhnb_u32(__VA_ARGS__)
+#define svrsubhnb_u64(...) __builtin_sve_svrsubhnb_u64(__VA_ARGS__)
+#define svrsubhnb_u16(...) __builtin_sve_svrsubhnb_u16(__VA_ARGS__)
+#define svrsubhnb_s32(...) __builtin_sve_svrsubhnb_s32(__VA_ARGS__)
+#define svrsubhnb_s64(...) __builtin_sve_svrsubhnb_s64(__VA_ARGS__)
+#define svrsubhnb_s16(...) __builtin_sve_svrsubhnb_s16(__VA_ARGS__)
+#define svrsubhnt_n_u32(...) __builtin_sve_svrsubhnt_n_u32(__VA_ARGS__)
+#define svrsubhnt_n_u64(...) __builtin_sve_svrsubhnt_n_u64(__VA_ARGS__)
+#define svrsubhnt_n_u16(...) __builtin_sve_svrsubhnt_n_u16(__VA_ARGS__)
+#define svrsubhnt_n_s32(...) __builtin_sve_svrsubhnt_n_s32(__VA_ARGS__)
+#define svrsubhnt_n_s64(...) __builtin_sve_svrsubhnt_n_s64(__VA_ARGS__)
+#define svrsubhnt_n_s16(...) __builtin_sve_svrsubhnt_n_s16(__VA_ARGS__)
+#define svrsubhnt_u32(...) __builtin_sve_svrsubhnt_u32(__VA_ARGS__)
+#define svrsubhnt_u64(...) __builtin_sve_svrsubhnt_u64(__VA_ARGS__)
+#define svrsubhnt_u16(...) __builtin_sve_svrsubhnt_u16(__VA_ARGS__)
+#define svrsubhnt_s32(...) __builtin_sve_svrsubhnt_s32(__VA_ARGS__)
+#define svrsubhnt_s64(...) __builtin_sve_svrsubhnt_s64(__VA_ARGS__)
+#define svrsubhnt_s16(...) __builtin_sve_svrsubhnt_s16(__VA_ARGS__)
+#define svsbclb_n_u32(...) __builtin_sve_svsbclb_n_u32(__VA_ARGS__)
+#define svsbclb_n_u64(...) __builtin_sve_svsbclb_n_u64(__VA_ARGS__)
+#define svsbclb_u32(...) __builtin_sve_svsbclb_u32(__VA_ARGS__)
+#define svsbclb_u64(...) __builtin_sve_svsbclb_u64(__VA_ARGS__)
+#define svsbclt_n_u32(...) __builtin_sve_svsbclt_n_u32(__VA_ARGS__)
+#define svsbclt_n_u64(...) __builtin_sve_svsbclt_n_u64(__VA_ARGS__)
+#define svsbclt_u32(...) __builtin_sve_svsbclt_u32(__VA_ARGS__)
+#define svsbclt_u64(...) __builtin_sve_svsbclt_u64(__VA_ARGS__)
+#define svshllb_n_s32(...) __builtin_sve_svshllb_n_s32(__VA_ARGS__)
+#define svshllb_n_s64(...) __builtin_sve_svshllb_n_s64(__VA_ARGS__)
+#define svshllb_n_s16(...) __builtin_sve_svshllb_n_s16(__VA_ARGS__)
+#define svshllb_n_u32(...) __builtin_sve_svshllb_n_u32(__VA_ARGS__)
+#define svshllb_n_u64(...) __builtin_sve_svshllb_n_u64(__VA_ARGS__)
+#define svshllb_n_u16(...) __builtin_sve_svshllb_n_u16(__VA_ARGS__)
+#define svshllt_n_s32(...) __builtin_sve_svshllt_n_s32(__VA_ARGS__)
+#define svshllt_n_s64(...) __builtin_sve_svshllt_n_s64(__VA_ARGS__)
+#define svshllt_n_s16(...) __builtin_sve_svshllt_n_s16(__VA_ARGS__)
+#define svshllt_n_u32(...) __builtin_sve_svshllt_n_u32(__VA_ARGS__)
+#define svshllt_n_u64(...) __builtin_sve_svshllt_n_u64(__VA_ARGS__)
+#define svshllt_n_u16(...) __builtin_sve_svshllt_n_u16(__VA_ARGS__)
+#define svshrnb_n_u32(...) __builtin_sve_svshrnb_n_u32(__VA_ARGS__)
+#define svshrnb_n_u64(...) __builtin_sve_svshrnb_n_u64(__VA_ARGS__)
+#define svshrnb_n_u16(...) __builtin_sve_svshrnb_n_u16(__VA_ARGS__)
+#define svshrnb_n_s32(...) __builtin_sve_svshrnb_n_s32(__VA_ARGS__)
+#define svshrnb_n_s64(...) __builtin_sve_svshrnb_n_s64(__VA_ARGS__)
+#define svshrnb_n_s16(...) __builtin_sve_svshrnb_n_s16(__VA_ARGS__)
+#define svshrnt_n_u32(...) __builtin_sve_svshrnt_n_u32(__VA_ARGS__)
+#define svshrnt_n_u64(...) __builtin_sve_svshrnt_n_u64(__VA_ARGS__)
+#define svshrnt_n_u16(...) __builtin_sve_svshrnt_n_u16(__VA_ARGS__)
+#define svshrnt_n_s32(...) __builtin_sve_svshrnt_n_s32(__VA_ARGS__)
+#define svshrnt_n_s64(...) __builtin_sve_svshrnt_n_s64(__VA_ARGS__)
+#define svshrnt_n_s16(...) __builtin_sve_svshrnt_n_s16(__VA_ARGS__)
+#define svsli_n_u8(...) __builtin_sve_svsli_n_u8(__VA_ARGS__)
+#define svsli_n_u32(...) __builtin_sve_svsli_n_u32(__VA_ARGS__)
+#define svsli_n_u64(...) __builtin_sve_svsli_n_u64(__VA_ARGS__)
+#define svsli_n_u16(...) __builtin_sve_svsli_n_u16(__VA_ARGS__)
+#define svsli_n_s8(...) __builtin_sve_svsli_n_s8(__VA_ARGS__)
+#define svsli_n_s32(...) __builtin_sve_svsli_n_s32(__VA_ARGS__)
+#define svsli_n_s64(...) __builtin_sve_svsli_n_s64(__VA_ARGS__)
+#define svsli_n_s16(...) __builtin_sve_svsli_n_s16(__VA_ARGS__)
+#define svsqadd_n_u8_m(...) __builtin_sve_svsqadd_n_u8_m(__VA_ARGS__)
+#define svsqadd_n_u32_m(...) __builtin_sve_svsqadd_n_u32_m(__VA_ARGS__)
+#define svsqadd_n_u64_m(...) __builtin_sve_svsqadd_n_u64_m(__VA_ARGS__)
+#define svsqadd_n_u16_m(...) __builtin_sve_svsqadd_n_u16_m(__VA_ARGS__)
+#define svsqadd_n_u8_x(...) __builtin_sve_svsqadd_n_u8_x(__VA_ARGS__)
+#define svsqadd_n_u32_x(...) __builtin_sve_svsqadd_n_u32_x(__VA_ARGS__)
+#define svsqadd_n_u64_x(...) __builtin_sve_svsqadd_n_u64_x(__VA_ARGS__)
+#define svsqadd_n_u16_x(...) __builtin_sve_svsqadd_n_u16_x(__VA_ARGS__)
+#define svsqadd_n_u8_z(...) __builtin_sve_svsqadd_n_u8_z(__VA_ARGS__)
+#define svsqadd_n_u32_z(...) __builtin_sve_svsqadd_n_u32_z(__VA_ARGS__)
+#define svsqadd_n_u64_z(...) __builtin_sve_svsqadd_n_u64_z(__VA_ARGS__)
+#define svsqadd_n_u16_z(...) __builtin_sve_svsqadd_n_u16_z(__VA_ARGS__)
+#define svsqadd_u8_m(...) __builtin_sve_svsqadd_u8_m(__VA_ARGS__)
+#define svsqadd_u32_m(...) __builtin_sve_svsqadd_u32_m(__VA_ARGS__)
+#define svsqadd_u64_m(...) __builtin_sve_svsqadd_u64_m(__VA_ARGS__)
+#define svsqadd_u16_m(...) __builtin_sve_svsqadd_u16_m(__VA_ARGS__)
+#define svsqadd_u8_x(...) __builtin_sve_svsqadd_u8_x(__VA_ARGS__)
+#define svsqadd_u32_x(...) __builtin_sve_svsqadd_u32_x(__VA_ARGS__)
+#define svsqadd_u64_x(...) __builtin_sve_svsqadd_u64_x(__VA_ARGS__)
+#define svsqadd_u16_x(...) __builtin_sve_svsqadd_u16_x(__VA_ARGS__)
+#define svsqadd_u8_z(...) __builtin_sve_svsqadd_u8_z(__VA_ARGS__)
+#define svsqadd_u32_z(...) __builtin_sve_svsqadd_u32_z(__VA_ARGS__)
+#define svsqadd_u64_z(...) __builtin_sve_svsqadd_u64_z(__VA_ARGS__)
+#define svsqadd_u16_z(...) __builtin_sve_svsqadd_u16_z(__VA_ARGS__)
+#define svsra_n_s8(...) __builtin_sve_svsra_n_s8(__VA_ARGS__)
+#define svsra_n_s32(...) __builtin_sve_svsra_n_s32(__VA_ARGS__)
+#define svsra_n_s64(...) __builtin_sve_svsra_n_s64(__VA_ARGS__)
+#define svsra_n_s16(...) __builtin_sve_svsra_n_s16(__VA_ARGS__)
+#define svsra_n_u8(...) __builtin_sve_svsra_n_u8(__VA_ARGS__)
+#define svsra_n_u32(...) __builtin_sve_svsra_n_u32(__VA_ARGS__)
+#define svsra_n_u64(...) __builtin_sve_svsra_n_u64(__VA_ARGS__)
+#define svsra_n_u16(...) __builtin_sve_svsra_n_u16(__VA_ARGS__)
+#define svsri_n_u8(...) __builtin_sve_svsri_n_u8(__VA_ARGS__)
+#define svsri_n_u32(...) __builtin_sve_svsri_n_u32(__VA_ARGS__)
+#define svsri_n_u64(...) __builtin_sve_svsri_n_u64(__VA_ARGS__)
+#define svsri_n_u16(...) __builtin_sve_svsri_n_u16(__VA_ARGS__)
+#define svsri_n_s8(...) __builtin_sve_svsri_n_s8(__VA_ARGS__)
+#define svsri_n_s32(...) __builtin_sve_svsri_n_s32(__VA_ARGS__)
+#define svsri_n_s64(...) __builtin_sve_svsri_n_s64(__VA_ARGS__)
+#define svsri_n_s16(...) __builtin_sve_svsri_n_s16(__VA_ARGS__)
+#define svstnt1_scatter_u32base_index_u32(...) __builtin_sve_svstnt1_scatter_u32base_index_u32(__VA_ARGS__)
+#define svstnt1_scatter_u64base_index_u64(...) __builtin_sve_svstnt1_scatter_u64base_index_u64(__VA_ARGS__)
+#define svstnt1_scatter_u64base_index_f64(...) __builtin_sve_svstnt1_scatter_u64base_index_f64(__VA_ARGS__)
+#define svstnt1_scatter_u32base_index_f32(...) __builtin_sve_svstnt1_scatter_u32base_index_f32(__VA_ARGS__)
+#define svstnt1_scatter_u32base_index_s32(...) __builtin_sve_svstnt1_scatter_u32base_index_s32(__VA_ARGS__)
+#define svstnt1_scatter_u64base_index_s64(...) __builtin_sve_svstnt1_scatter_u64base_index_s64(__VA_ARGS__)
+#define svstnt1_scatter_u32base_offset_u32(...) __builtin_sve_svstnt1_scatter_u32base_offset_u32(__VA_ARGS__)
+#define svstnt1_scatter_u64base_offset_u64(...) __builtin_sve_svstnt1_scatter_u64base_offset_u64(__VA_ARGS__)
+#define svstnt1_scatter_u64base_offset_f64(...) __builtin_sve_svstnt1_scatter_u64base_offset_f64(__VA_ARGS__)
+#define svstnt1_scatter_u32base_offset_f32(...) __builtin_sve_svstnt1_scatter_u32base_offset_f32(__VA_ARGS__)
+#define svstnt1_scatter_u32base_offset_s32(...) __builtin_sve_svstnt1_scatter_u32base_offset_s32(__VA_ARGS__)
+#define svstnt1_scatter_u64base_offset_s64(...) __builtin_sve_svstnt1_scatter_u64base_offset_s64(__VA_ARGS__)
+#define svstnt1_scatter_u32base_u32(...) __builtin_sve_svstnt1_scatter_u32base_u32(__VA_ARGS__)
+#define svstnt1_scatter_u64base_u64(...) __builtin_sve_svstnt1_scatter_u64base_u64(__VA_ARGS__)
+#define svstnt1_scatter_u64base_f64(...) __builtin_sve_svstnt1_scatter_u64base_f64(__VA_ARGS__)
+#define svstnt1_scatter_u32base_f32(...) __builtin_sve_svstnt1_scatter_u32base_f32(__VA_ARGS__)
+#define svstnt1_scatter_u32base_s32(...) __builtin_sve_svstnt1_scatter_u32base_s32(__VA_ARGS__)
+#define svstnt1_scatter_u64base_s64(...) __builtin_sve_svstnt1_scatter_u64base_s64(__VA_ARGS__)
+#define svstnt1_scatter_s64index_u64(...) __builtin_sve_svstnt1_scatter_s64index_u64(__VA_ARGS__)
+#define svstnt1_scatter_s64index_f64(...) __builtin_sve_svstnt1_scatter_s64index_f64(__VA_ARGS__)
+#define svstnt1_scatter_s64index_s64(...) __builtin_sve_svstnt1_scatter_s64index_s64(__VA_ARGS__)
+#define svstnt1_scatter_u64index_u64(...) __builtin_sve_svstnt1_scatter_u64index_u64(__VA_ARGS__)
+#define svstnt1_scatter_u64index_f64(...) __builtin_sve_svstnt1_scatter_u64index_f64(__VA_ARGS__)
+#define svstnt1_scatter_u64index_s64(...) __builtin_sve_svstnt1_scatter_u64index_s64(__VA_ARGS__)
+#define svstnt1_scatter_u32offset_u32(...) __builtin_sve_svstnt1_scatter_u32offset_u32(__VA_ARGS__)
+#define svstnt1_scatter_u32offset_f32(...) __builtin_sve_svstnt1_scatter_u32offset_f32(__VA_ARGS__)
+#define svstnt1_scatter_u32offset_s32(...) __builtin_sve_svstnt1_scatter_u32offset_s32(__VA_ARGS__)
+#define svstnt1_scatter_s64offset_u64(...) __builtin_sve_svstnt1_scatter_s64offset_u64(__VA_ARGS__)
+#define svstnt1_scatter_s64offset_f64(...) __builtin_sve_svstnt1_scatter_s64offset_f64(__VA_ARGS__)
+#define svstnt1_scatter_s64offset_s64(...) __builtin_sve_svstnt1_scatter_s64offset_s64(__VA_ARGS__)
+#define svstnt1_scatter_u64offset_u64(...) __builtin_sve_svstnt1_scatter_u64offset_u64(__VA_ARGS__)
+#define svstnt1_scatter_u64offset_f64(...) __builtin_sve_svstnt1_scatter_u64offset_f64(__VA_ARGS__)
+#define svstnt1_scatter_u64offset_s64(...) __builtin_sve_svstnt1_scatter_u64offset_s64(__VA_ARGS__)
+#define svstnt1b_scatter_u32base_offset_u32(...) __builtin_sve_svstnt1b_scatter_u32base_offset_u32(__VA_ARGS__)
+#define svstnt1b_scatter_u64base_offset_u64(...) __builtin_sve_svstnt1b_scatter_u64base_offset_u64(__VA_ARGS__)
+#define svstnt1b_scatter_u32base_offset_s32(...) __builtin_sve_svstnt1b_scatter_u32base_offset_s32(__VA_ARGS__)
+#define svstnt1b_scatter_u64base_offset_s64(...) __builtin_sve_svstnt1b_scatter_u64base_offset_s64(__VA_ARGS__)
+#define svstnt1b_scatter_u32base_u32(...) __builtin_sve_svstnt1b_scatter_u32base_u32(__VA_ARGS__)
+#define svstnt1b_scatter_u64base_u64(...) __builtin_sve_svstnt1b_scatter_u64base_u64(__VA_ARGS__)
+#define svstnt1b_scatter_u32base_s32(...) __builtin_sve_svstnt1b_scatter_u32base_s32(__VA_ARGS__)
+#define svstnt1b_scatter_u64base_s64(...) __builtin_sve_svstnt1b_scatter_u64base_s64(__VA_ARGS__)
+#define svstnt1b_scatter_u32offset_s32(...) __builtin_sve_svstnt1b_scatter_u32offset_s32(__VA_ARGS__)
+#define svstnt1b_scatter_u32offset_u32(...) __builtin_sve_svstnt1b_scatter_u32offset_u32(__VA_ARGS__)
+#define svstnt1b_scatter_s64offset_s64(...) __builtin_sve_svstnt1b_scatter_s64offset_s64(__VA_ARGS__)
+#define svstnt1b_scatter_s64offset_u64(...) __builtin_sve_svstnt1b_scatter_s64offset_u64(__VA_ARGS__)
+#define svstnt1b_scatter_u64offset_s64(...) __builtin_sve_svstnt1b_scatter_u64offset_s64(__VA_ARGS__)
+#define svstnt1b_scatter_u64offset_u64(...) __builtin_sve_svstnt1b_scatter_u64offset_u64(__VA_ARGS__)
+#define svstnt1h_scatter_u32base_index_u32(...) __builtin_sve_svstnt1h_scatter_u32base_index_u32(__VA_ARGS__)
+#define svstnt1h_scatter_u64base_index_u64(...) __builtin_sve_svstnt1h_scatter_u64base_index_u64(__VA_ARGS__)
+#define svstnt1h_scatter_u32base_index_s32(...) __builtin_sve_svstnt1h_scatter_u32base_index_s32(__VA_ARGS__)
+#define svstnt1h_scatter_u64base_index_s64(...) __builtin_sve_svstnt1h_scatter_u64base_index_s64(__VA_ARGS__)
+#define svstnt1h_scatter_u32base_offset_u32(...) __builtin_sve_svstnt1h_scatter_u32base_offset_u32(__VA_ARGS__)
+#define svstnt1h_scatter_u64base_offset_u64(...) __builtin_sve_svstnt1h_scatter_u64base_offset_u64(__VA_ARGS__)
+#define svstnt1h_scatter_u32base_offset_s32(...) __builtin_sve_svstnt1h_scatter_u32base_offset_s32(__VA_ARGS__)
+#define svstnt1h_scatter_u64base_offset_s64(...) __builtin_sve_svstnt1h_scatter_u64base_offset_s64(__VA_ARGS__)
+#define svstnt1h_scatter_u32base_u32(...) __builtin_sve_svstnt1h_scatter_u32base_u32(__VA_ARGS__)
+#define svstnt1h_scatter_u64base_u64(...) __builtin_sve_svstnt1h_scatter_u64base_u64(__VA_ARGS__)
+#define svstnt1h_scatter_u32base_s32(...) __builtin_sve_svstnt1h_scatter_u32base_s32(__VA_ARGS__)
+#define svstnt1h_scatter_u64base_s64(...) __builtin_sve_svstnt1h_scatter_u64base_s64(__VA_ARGS__)
+#define svstnt1h_scatter_s64index_s64(...) __builtin_sve_svstnt1h_scatter_s64index_s64(__VA_ARGS__)
+#define svstnt1h_scatter_s64index_u64(...) __builtin_sve_svstnt1h_scatter_s64index_u64(__VA_ARGS__)
+#define svstnt1h_scatter_u64index_s64(...) __builtin_sve_svstnt1h_scatter_u64index_s64(__VA_ARGS__)
+#define svstnt1h_scatter_u64index_u64(...) __builtin_sve_svstnt1h_scatter_u64index_u64(__VA_ARGS__)
+#define svstnt1h_scatter_u32offset_s32(...) __builtin_sve_svstnt1h_scatter_u32offset_s32(__VA_ARGS__)
+#define svstnt1h_scatter_u32offset_u32(...) __builtin_sve_svstnt1h_scatter_u32offset_u32(__VA_ARGS__)
+#define svstnt1h_scatter_s64offset_s64(...) __builtin_sve_svstnt1h_scatter_s64offset_s64(__VA_ARGS__)
+#define svstnt1h_scatter_s64offset_u64(...) __builtin_sve_svstnt1h_scatter_s64offset_u64(__VA_ARGS__)
+#define svstnt1h_scatter_u64offset_s64(...) __builtin_sve_svstnt1h_scatter_u64offset_s64(__VA_ARGS__)
+#define svstnt1h_scatter_u64offset_u64(...) __builtin_sve_svstnt1h_scatter_u64offset_u64(__VA_ARGS__)
+#define svstnt1w_scatter_u64base_index_u64(...) __builtin_sve_svstnt1w_scatter_u64base_index_u64(__VA_ARGS__)
+#define svstnt1w_scatter_u64base_index_s64(...) __builtin_sve_svstnt1w_scatter_u64base_index_s64(__VA_ARGS__)
+#define svstnt1w_scatter_u64base_offset_u64(...) __builtin_sve_svstnt1w_scatter_u64base_offset_u64(__VA_ARGS__)
+#define svstnt1w_scatter_u64base_offset_s64(...) __builtin_sve_svstnt1w_scatter_u64base_offset_s64(__VA_ARGS__)
+#define svstnt1w_scatter_u64base_u64(...) __builtin_sve_svstnt1w_scatter_u64base_u64(__VA_ARGS__)
+#define svstnt1w_scatter_u64base_s64(...) __builtin_sve_svstnt1w_scatter_u64base_s64(__VA_ARGS__)
+#define svstnt1w_scatter_s64index_s64(...) __builtin_sve_svstnt1w_scatter_s64index_s64(__VA_ARGS__)
+#define svstnt1w_scatter_s64index_u64(...) __builtin_sve_svstnt1w_scatter_s64index_u64(__VA_ARGS__)
+#define svstnt1w_scatter_u64index_s64(...) __builtin_sve_svstnt1w_scatter_u64index_s64(__VA_ARGS__)
+#define svstnt1w_scatter_u64index_u64(...) __builtin_sve_svstnt1w_scatter_u64index_u64(__VA_ARGS__)
+#define svstnt1w_scatter_s64offset_s64(...) __builtin_sve_svstnt1w_scatter_s64offset_s64(__VA_ARGS__)
+#define svstnt1w_scatter_s64offset_u64(...) __builtin_sve_svstnt1w_scatter_s64offset_u64(__VA_ARGS__)
+#define svstnt1w_scatter_u64offset_s64(...) __builtin_sve_svstnt1w_scatter_u64offset_s64(__VA_ARGS__)
+#define svstnt1w_scatter_u64offset_u64(...) __builtin_sve_svstnt1w_scatter_u64offset_u64(__VA_ARGS__)
+#define svsubhnb_n_u32(...) __builtin_sve_svsubhnb_n_u32(__VA_ARGS__)
+#define svsubhnb_n_u64(...) __builtin_sve_svsubhnb_n_u64(__VA_ARGS__)
+#define svsubhnb_n_u16(...) __builtin_sve_svsubhnb_n_u16(__VA_ARGS__)
+#define svsubhnb_n_s32(...) __builtin_sve_svsubhnb_n_s32(__VA_ARGS__)
+#define svsubhnb_n_s64(...) __builtin_sve_svsubhnb_n_s64(__VA_ARGS__)
+#define svsubhnb_n_s16(...) __builtin_sve_svsubhnb_n_s16(__VA_ARGS__)
+#define svsubhnb_u32(...) __builtin_sve_svsubhnb_u32(__VA_ARGS__)
+#define svsubhnb_u64(...) __builtin_sve_svsubhnb_u64(__VA_ARGS__)
+#define svsubhnb_u16(...) __builtin_sve_svsubhnb_u16(__VA_ARGS__)
+#define svsubhnb_s32(...) __builtin_sve_svsubhnb_s32(__VA_ARGS__)
+#define svsubhnb_s64(...) __builtin_sve_svsubhnb_s64(__VA_ARGS__)
+#define svsubhnb_s16(...) __builtin_sve_svsubhnb_s16(__VA_ARGS__)
+#define svsubhnt_n_u32(...) __builtin_sve_svsubhnt_n_u32(__VA_ARGS__)
+#define svsubhnt_n_u64(...) __builtin_sve_svsubhnt_n_u64(__VA_ARGS__)
+#define svsubhnt_n_u16(...) __builtin_sve_svsubhnt_n_u16(__VA_ARGS__)
+#define svsubhnt_n_s32(...) __builtin_sve_svsubhnt_n_s32(__VA_ARGS__)
+#define svsubhnt_n_s64(...) __builtin_sve_svsubhnt_n_s64(__VA_ARGS__)
+#define svsubhnt_n_s16(...) __builtin_sve_svsubhnt_n_s16(__VA_ARGS__)
+#define svsubhnt_u32(...) __builtin_sve_svsubhnt_u32(__VA_ARGS__)
+#define svsubhnt_u64(...) __builtin_sve_svsubhnt_u64(__VA_ARGS__)
+#define svsubhnt_u16(...) __builtin_sve_svsubhnt_u16(__VA_ARGS__)
+#define svsubhnt_s32(...) __builtin_sve_svsubhnt_s32(__VA_ARGS__)
+#define svsubhnt_s64(...) __builtin_sve_svsubhnt_s64(__VA_ARGS__)
+#define svsubhnt_s16(...) __builtin_sve_svsubhnt_s16(__VA_ARGS__)
+#define svsublb_n_s32(...) __builtin_sve_svsublb_n_s32(__VA_ARGS__)
+#define svsublb_n_s64(...) __builtin_sve_svsublb_n_s64(__VA_ARGS__)
+#define svsublb_n_s16(...) __builtin_sve_svsublb_n_s16(__VA_ARGS__)
+#define svsublb_n_u32(...) __builtin_sve_svsublb_n_u32(__VA_ARGS__)
+#define svsublb_n_u64(...) __builtin_sve_svsublb_n_u64(__VA_ARGS__)
+#define svsublb_n_u16(...) __builtin_sve_svsublb_n_u16(__VA_ARGS__)
+#define svsublb_s32(...) __builtin_sve_svsublb_s32(__VA_ARGS__)
+#define svsublb_s64(...) __builtin_sve_svsublb_s64(__VA_ARGS__)
+#define svsublb_s16(...) __builtin_sve_svsublb_s16(__VA_ARGS__)
+#define svsublb_u32(...) __builtin_sve_svsublb_u32(__VA_ARGS__)
+#define svsublb_u64(...) __builtin_sve_svsublb_u64(__VA_ARGS__)
+#define svsublb_u16(...) __builtin_sve_svsublb_u16(__VA_ARGS__)
+#define svsublbt_n_s32(...) __builtin_sve_svsublbt_n_s32(__VA_ARGS__)
+#define svsublbt_n_s64(...) __builtin_sve_svsublbt_n_s64(__VA_ARGS__)
+#define svsublbt_n_s16(...) __builtin_sve_svsublbt_n_s16(__VA_ARGS__)
+#define svsublbt_s32(...) __builtin_sve_svsublbt_s32(__VA_ARGS__)
+#define svsublbt_s64(...) __builtin_sve_svsublbt_s64(__VA_ARGS__)
+#define svsublbt_s16(...) __builtin_sve_svsublbt_s16(__VA_ARGS__)
+#define svsublt_n_s32(...) __builtin_sve_svsublt_n_s32(__VA_ARGS__)
+#define svsublt_n_s64(...) __builtin_sve_svsublt_n_s64(__VA_ARGS__)
+#define svsublt_n_s16(...) __builtin_sve_svsublt_n_s16(__VA_ARGS__)
+#define svsublt_n_u32(...) __builtin_sve_svsublt_n_u32(__VA_ARGS__)
+#define svsublt_n_u64(...) __builtin_sve_svsublt_n_u64(__VA_ARGS__)
+#define svsublt_n_u16(...) __builtin_sve_svsublt_n_u16(__VA_ARGS__)
+#define svsublt_s32(...) __builtin_sve_svsublt_s32(__VA_ARGS__)
+#define svsublt_s64(...) __builtin_sve_svsublt_s64(__VA_ARGS__)
+#define svsublt_s16(...) __builtin_sve_svsublt_s16(__VA_ARGS__)
+#define svsublt_u32(...) __builtin_sve_svsublt_u32(__VA_ARGS__)
+#define svsublt_u64(...) __builtin_sve_svsublt_u64(__VA_ARGS__)
+#define svsublt_u16(...) __builtin_sve_svsublt_u16(__VA_ARGS__)
+#define svsubltb_n_s32(...) __builtin_sve_svsubltb_n_s32(__VA_ARGS__)
+#define svsubltb_n_s64(...) __builtin_sve_svsubltb_n_s64(__VA_ARGS__)
+#define svsubltb_n_s16(...) __builtin_sve_svsubltb_n_s16(__VA_ARGS__)
+#define svsubltb_s32(...) __builtin_sve_svsubltb_s32(__VA_ARGS__)
+#define svsubltb_s64(...) __builtin_sve_svsubltb_s64(__VA_ARGS__)
+#define svsubltb_s16(...) __builtin_sve_svsubltb_s16(__VA_ARGS__)
+#define svsubwb_n_s32(...) __builtin_sve_svsubwb_n_s32(__VA_ARGS__)
+#define svsubwb_n_s64(...) __builtin_sve_svsubwb_n_s64(__VA_ARGS__)
+#define svsubwb_n_s16(...) __builtin_sve_svsubwb_n_s16(__VA_ARGS__)
+#define svsubwb_n_u32(...) __builtin_sve_svsubwb_n_u32(__VA_ARGS__)
+#define svsubwb_n_u64(...) __builtin_sve_svsubwb_n_u64(__VA_ARGS__)
+#define svsubwb_n_u16(...) __builtin_sve_svsubwb_n_u16(__VA_ARGS__)
+#define svsubwb_s32(...) __builtin_sve_svsubwb_s32(__VA_ARGS__)
+#define svsubwb_s64(...) __builtin_sve_svsubwb_s64(__VA_ARGS__)
+#define svsubwb_s16(...) __builtin_sve_svsubwb_s16(__VA_ARGS__)
+#define svsubwb_u32(...) __builtin_sve_svsubwb_u32(__VA_ARGS__)
+#define svsubwb_u64(...) __builtin_sve_svsubwb_u64(__VA_ARGS__)
+#define svsubwb_u16(...) __builtin_sve_svsubwb_u16(__VA_ARGS__)
+#define svsubwt_n_s32(...) __builtin_sve_svsubwt_n_s32(__VA_ARGS__)
+#define svsubwt_n_s64(...) __builtin_sve_svsubwt_n_s64(__VA_ARGS__)
+#define svsubwt_n_s16(...) __builtin_sve_svsubwt_n_s16(__VA_ARGS__)
+#define svsubwt_n_u32(...) __builtin_sve_svsubwt_n_u32(__VA_ARGS__)
+#define svsubwt_n_u64(...) __builtin_sve_svsubwt_n_u64(__VA_ARGS__)
+#define svsubwt_n_u16(...) __builtin_sve_svsubwt_n_u16(__VA_ARGS__)
+#define svsubwt_s32(...) __builtin_sve_svsubwt_s32(__VA_ARGS__)
+#define svsubwt_s64(...) __builtin_sve_svsubwt_s64(__VA_ARGS__)
+#define svsubwt_s16(...) __builtin_sve_svsubwt_s16(__VA_ARGS__)
+#define svsubwt_u32(...) __builtin_sve_svsubwt_u32(__VA_ARGS__)
+#define svsubwt_u64(...) __builtin_sve_svsubwt_u64(__VA_ARGS__)
+#define svsubwt_u16(...) __builtin_sve_svsubwt_u16(__VA_ARGS__)
+#define svtbl2_u8(...) __builtin_sve_svtbl2_u8(__VA_ARGS__)
+#define svtbl2_u32(...) __builtin_sve_svtbl2_u32(__VA_ARGS__)
+#define svtbl2_u64(...) __builtin_sve_svtbl2_u64(__VA_ARGS__)
+#define svtbl2_u16(...) __builtin_sve_svtbl2_u16(__VA_ARGS__)
+#define svtbl2_s8(...) __builtin_sve_svtbl2_s8(__VA_ARGS__)
+#define svtbl2_f64(...) __builtin_sve_svtbl2_f64(__VA_ARGS__)
+#define svtbl2_f32(...) __builtin_sve_svtbl2_f32(__VA_ARGS__)
+#define svtbl2_f16(...) __builtin_sve_svtbl2_f16(__VA_ARGS__)
+#define svtbl2_s32(...) __builtin_sve_svtbl2_s32(__VA_ARGS__)
+#define svtbl2_s64(...) __builtin_sve_svtbl2_s64(__VA_ARGS__)
+#define svtbl2_s16(...) __builtin_sve_svtbl2_s16(__VA_ARGS__)
+#define svtbx_u8(...) __builtin_sve_svtbx_u8(__VA_ARGS__)
+#define svtbx_u32(...) __builtin_sve_svtbx_u32(__VA_ARGS__)
+#define svtbx_u64(...) __builtin_sve_svtbx_u64(__VA_ARGS__)
+#define svtbx_u16(...) __builtin_sve_svtbx_u16(__VA_ARGS__)
+#define svtbx_s8(...) __builtin_sve_svtbx_s8(__VA_ARGS__)
+#define svtbx_f64(...) __builtin_sve_svtbx_f64(__VA_ARGS__)
+#define svtbx_f32(...) __builtin_sve_svtbx_f32(__VA_ARGS__)
+#define svtbx_f16(...) __builtin_sve_svtbx_f16(__VA_ARGS__)
+#define svtbx_s32(...) __builtin_sve_svtbx_s32(__VA_ARGS__)
+#define svtbx_s64(...) __builtin_sve_svtbx_s64(__VA_ARGS__)
+#define svtbx_s16(...) __builtin_sve_svtbx_s16(__VA_ARGS__)
+#define svuqadd_n_s8_m(...) __builtin_sve_svuqadd_n_s8_m(__VA_ARGS__)
+#define svuqadd_n_s32_m(...) __builtin_sve_svuqadd_n_s32_m(__VA_ARGS__)
+#define svuqadd_n_s64_m(...) __builtin_sve_svuqadd_n_s64_m(__VA_ARGS__)
+#define svuqadd_n_s16_m(...) __builtin_sve_svuqadd_n_s16_m(__VA_ARGS__)
+#define svuqadd_n_s8_x(...) __builtin_sve_svuqadd_n_s8_x(__VA_ARGS__)
+#define svuqadd_n_s32_x(...) __builtin_sve_svuqadd_n_s32_x(__VA_ARGS__)
+#define svuqadd_n_s64_x(...) __builtin_sve_svuqadd_n_s64_x(__VA_ARGS__)
+#define svuqadd_n_s16_x(...) __builtin_sve_svuqadd_n_s16_x(__VA_ARGS__)
+#define svuqadd_n_s8_z(...) __builtin_sve_svuqadd_n_s8_z(__VA_ARGS__)
+#define svuqadd_n_s32_z(...) __builtin_sve_svuqadd_n_s32_z(__VA_ARGS__)
+#define svuqadd_n_s64_z(...) __builtin_sve_svuqadd_n_s64_z(__VA_ARGS__)
+#define svuqadd_n_s16_z(...) __builtin_sve_svuqadd_n_s16_z(__VA_ARGS__)
+#define svuqadd_s8_m(...) __builtin_sve_svuqadd_s8_m(__VA_ARGS__)
+#define svuqadd_s32_m(...) __builtin_sve_svuqadd_s32_m(__VA_ARGS__)
+#define svuqadd_s64_m(...) __builtin_sve_svuqadd_s64_m(__VA_ARGS__)
+#define svuqadd_s16_m(...) __builtin_sve_svuqadd_s16_m(__VA_ARGS__)
+#define svuqadd_s8_x(...) __builtin_sve_svuqadd_s8_x(__VA_ARGS__)
+#define svuqadd_s32_x(...) __builtin_sve_svuqadd_s32_x(__VA_ARGS__)
+#define svuqadd_s64_x(...) __builtin_sve_svuqadd_s64_x(__VA_ARGS__)
+#define svuqadd_s16_x(...) __builtin_sve_svuqadd_s16_x(__VA_ARGS__)
+#define svuqadd_s8_z(...) __builtin_sve_svuqadd_s8_z(__VA_ARGS__)
+#define svuqadd_s32_z(...) __builtin_sve_svuqadd_s32_z(__VA_ARGS__)
+#define svuqadd_s64_z(...) __builtin_sve_svuqadd_s64_z(__VA_ARGS__)
+#define svuqadd_s16_z(...) __builtin_sve_svuqadd_s16_z(__VA_ARGS__)
+#define svwhilege_b8_s32(...) __builtin_sve_svwhilege_b8_s32(__VA_ARGS__)
+#define svwhilege_b32_s32(...) __builtin_sve_svwhilege_b32_s32(__VA_ARGS__)
+#define svwhilege_b64_s32(...) __builtin_sve_svwhilege_b64_s32(__VA_ARGS__)
+#define svwhilege_b16_s32(...) __builtin_sve_svwhilege_b16_s32(__VA_ARGS__)
+#define svwhilege_b8_s64(...) __builtin_sve_svwhilege_b8_s64(__VA_ARGS__)
+#define svwhilege_b32_s64(...) __builtin_sve_svwhilege_b32_s64(__VA_ARGS__)
+#define svwhilege_b64_s64(...) __builtin_sve_svwhilege_b64_s64(__VA_ARGS__)
+#define svwhilege_b16_s64(...) __builtin_sve_svwhilege_b16_s64(__VA_ARGS__)
+#define svwhilege_b8_u32(...) __builtin_sve_svwhilege_b8_u32(__VA_ARGS__)
+#define svwhilege_b32_u32(...) __builtin_sve_svwhilege_b32_u32(__VA_ARGS__)
+#define svwhilege_b64_u32(...) __builtin_sve_svwhilege_b64_u32(__VA_ARGS__)
+#define svwhilege_b16_u32(...) __builtin_sve_svwhilege_b16_u32(__VA_ARGS__)
+#define svwhilege_b8_u64(...) __builtin_sve_svwhilege_b8_u64(__VA_ARGS__)
+#define svwhilege_b32_u64(...) __builtin_sve_svwhilege_b32_u64(__VA_ARGS__)
+#define svwhilege_b64_u64(...) __builtin_sve_svwhilege_b64_u64(__VA_ARGS__)
+#define svwhilege_b16_u64(...) __builtin_sve_svwhilege_b16_u64(__VA_ARGS__)
+#define svwhilegt_b8_s32(...) __builtin_sve_svwhilegt_b8_s32(__VA_ARGS__)
+#define svwhilegt_b32_s32(...) __builtin_sve_svwhilegt_b32_s32(__VA_ARGS__)
+#define svwhilegt_b64_s32(...) __builtin_sve_svwhilegt_b64_s32(__VA_ARGS__)
+#define svwhilegt_b16_s32(...) __builtin_sve_svwhilegt_b16_s32(__VA_ARGS__)
+#define svwhilegt_b8_s64(...) __builtin_sve_svwhilegt_b8_s64(__VA_ARGS__)
+#define svwhilegt_b32_s64(...) __builtin_sve_svwhilegt_b32_s64(__VA_ARGS__)
+#define svwhilegt_b64_s64(...) __builtin_sve_svwhilegt_b64_s64(__VA_ARGS__)
+#define svwhilegt_b16_s64(...) __builtin_sve_svwhilegt_b16_s64(__VA_ARGS__)
+#define svwhilegt_b8_u32(...) __builtin_sve_svwhilegt_b8_u32(__VA_ARGS__)
+#define svwhilegt_b32_u32(...) __builtin_sve_svwhilegt_b32_u32(__VA_ARGS__)
+#define svwhilegt_b64_u32(...) __builtin_sve_svwhilegt_b64_u32(__VA_ARGS__)
+#define svwhilegt_b16_u32(...) __builtin_sve_svwhilegt_b16_u32(__VA_ARGS__)
+#define svwhilegt_b8_u64(...) __builtin_sve_svwhilegt_b8_u64(__VA_ARGS__)
+#define svwhilegt_b32_u64(...) __builtin_sve_svwhilegt_b32_u64(__VA_ARGS__)
+#define svwhilegt_b64_u64(...) __builtin_sve_svwhilegt_b64_u64(__VA_ARGS__)
+#define svwhilegt_b16_u64(...) __builtin_sve_svwhilegt_b16_u64(__VA_ARGS__)
+#define svwhilerw_u8(...) __builtin_sve_svwhilerw_u8(__VA_ARGS__)
+#define svwhilerw_s8(...) __builtin_sve_svwhilerw_s8(__VA_ARGS__)
+#define svwhilerw_u64(...) __builtin_sve_svwhilerw_u64(__VA_ARGS__)
+#define svwhilerw_f64(...) __builtin_sve_svwhilerw_f64(__VA_ARGS__)
+#define svwhilerw_s64(...) __builtin_sve_svwhilerw_s64(__VA_ARGS__)
+#define svwhilerw_u16(...) __builtin_sve_svwhilerw_u16(__VA_ARGS__)
+#define svwhilerw_f16(...) __builtin_sve_svwhilerw_f16(__VA_ARGS__)
+#define svwhilerw_s16(...) __builtin_sve_svwhilerw_s16(__VA_ARGS__)
+#define svwhilerw_u32(...) __builtin_sve_svwhilerw_u32(__VA_ARGS__)
+#define svwhilerw_f32(...) __builtin_sve_svwhilerw_f32(__VA_ARGS__)
+#define svwhilerw_s32(...) __builtin_sve_svwhilerw_s32(__VA_ARGS__)
+#define svwhilewr_u8(...) __builtin_sve_svwhilewr_u8(__VA_ARGS__)
+#define svwhilewr_s8(...) __builtin_sve_svwhilewr_s8(__VA_ARGS__)
+#define svwhilewr_u64(...) __builtin_sve_svwhilewr_u64(__VA_ARGS__)
+#define svwhilewr_f64(...) __builtin_sve_svwhilewr_f64(__VA_ARGS__)
+#define svwhilewr_s64(...) __builtin_sve_svwhilewr_s64(__VA_ARGS__)
+#define svwhilewr_u16(...) __builtin_sve_svwhilewr_u16(__VA_ARGS__)
+#define svwhilewr_f16(...) __builtin_sve_svwhilewr_f16(__VA_ARGS__)
+#define svwhilewr_s16(...) __builtin_sve_svwhilewr_s16(__VA_ARGS__)
+#define svwhilewr_u32(...) __builtin_sve_svwhilewr_u32(__VA_ARGS__)
+#define svwhilewr_f32(...) __builtin_sve_svwhilewr_f32(__VA_ARGS__)
+#define svwhilewr_s32(...) __builtin_sve_svwhilewr_s32(__VA_ARGS__)
+#define svxar_n_u8(...) __builtin_sve_svxar_n_u8(__VA_ARGS__)
+#define svxar_n_u32(...) __builtin_sve_svxar_n_u32(__VA_ARGS__)
+#define svxar_n_u64(...) __builtin_sve_svxar_n_u64(__VA_ARGS__)
+#define svxar_n_u16(...) __builtin_sve_svxar_n_u16(__VA_ARGS__)
+#define svxar_n_s8(...) __builtin_sve_svxar_n_s8(__VA_ARGS__)
+#define svxar_n_s32(...) __builtin_sve_svxar_n_s32(__VA_ARGS__)
+#define svxar_n_s64(...) __builtin_sve_svxar_n_s64(__VA_ARGS__)
+#define svxar_n_s16(...) __builtin_sve_svxar_n_s16(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s8)))
+svint8_t svaba(svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s32)))
+svint32_t svaba(svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s64)))
+svint64_t svaba(svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s16)))
+svint16_t svaba(svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u8)))
+svuint8_t svaba(svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u32)))
+svuint32_t svaba(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u64)))
+svuint64_t svaba(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u16)))
+svuint16_t svaba(svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s8)))
+svint8_t svaba(svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s32)))
+svint32_t svaba(svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s64)))
+svint64_t svaba(svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s16)))
+svint16_t svaba(svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u8)))
+svuint8_t svaba(svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u32)))
+svuint32_t svaba(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u64)))
+svuint64_t svaba(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u16)))
+svuint16_t svaba(svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s32)))
+svint32_t svabalb(svint32_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s64)))
+svint64_t svabalb(svint64_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s16)))
+svint16_t svabalb(svint16_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u32)))
+svuint32_t svabalb(svuint32_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u64)))
+svuint64_t svabalb(svuint64_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u16)))
+svuint16_t svabalb(svuint16_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s32)))
+svint32_t svabalb(svint32_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s64)))
+svint64_t svabalb(svint64_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s16)))
+svint16_t svabalb(svint16_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u32)))
+svuint32_t svabalb(svuint32_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u64)))
+svuint64_t svabalb(svuint64_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u16)))
+svuint16_t svabalb(svuint16_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s32)))
+svint32_t svabalt(svint32_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s64)))
+svint64_t svabalt(svint64_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s16)))
+svint16_t svabalt(svint16_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u32)))
+svuint32_t svabalt(svuint32_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u64)))
+svuint64_t svabalt(svuint64_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u16)))
+svuint16_t svabalt(svuint16_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s32)))
+svint32_t svabalt(svint32_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s64)))
+svint64_t svabalt(svint64_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s16)))
+svint16_t svabalt(svint16_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u32)))
+svuint32_t svabalt(svuint32_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u64)))
+svuint64_t svabalt(svuint64_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u16)))
+svuint16_t svabalt(svuint16_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s32)))
+svint32_t svabdlb(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s64)))
+svint64_t svabdlb(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s16)))
+svint16_t svabdlb(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u32)))
+svuint32_t svabdlb(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u64)))
+svuint64_t svabdlb(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u16)))
+svuint16_t svabdlb(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s32)))
+svint32_t svabdlb(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s64)))
+svint64_t svabdlb(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s16)))
+svint16_t svabdlb(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u32)))
+svuint32_t svabdlb(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u64)))
+svuint64_t svabdlb(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u16)))
+svuint16_t svabdlb(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s32)))
+svint32_t svabdlt(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s64)))
+svint64_t svabdlt(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s16)))
+svint16_t svabdlt(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u32)))
+svuint32_t svabdlt(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u64)))
+svuint64_t svabdlt(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u16)))
+svuint16_t svabdlt(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s32)))
+svint32_t svabdlt(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s64)))
+svint64_t svabdlt(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s16)))
+svint16_t svabdlt(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u32)))
+svuint32_t svabdlt(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u64)))
+svuint64_t svabdlt(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u16)))
+svuint16_t svabdlt(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_m)))
+svint32_t svadalp_m(svbool_t, svint32_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_m)))
+svint64_t svadalp_m(svbool_t, svint64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_m)))
+svint16_t svadalp_m(svbool_t, svint16_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_x)))
+svint32_t svadalp_x(svbool_t, svint32_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_x)))
+svint64_t svadalp_x(svbool_t, svint64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_x)))
+svint16_t svadalp_x(svbool_t, svint16_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_z)))
+svint32_t svadalp_z(svbool_t, svint32_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_z)))
+svint64_t svadalp_z(svbool_t, svint64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_z)))
+svint16_t svadalp_z(svbool_t, svint16_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_m)))
+svuint32_t svadalp_m(svbool_t, svuint32_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_m)))
+svuint64_t svadalp_m(svbool_t, svuint64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_m)))
+svuint16_t svadalp_m(svbool_t, svuint16_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_x)))
+svuint32_t svadalp_x(svbool_t, svuint32_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_x)))
+svuint64_t svadalp_x(svbool_t, svuint64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_x)))
+svuint16_t svadalp_x(svbool_t, svuint16_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_z)))
+svuint32_t svadalp_z(svbool_t, svuint32_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_z)))
+svuint64_t svadalp_z(svbool_t, svuint64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_z)))
+svuint16_t svadalp_z(svbool_t, svuint16_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u32)))
+svuint32_t svadclb(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u64)))
+svuint64_t svadclb(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u32)))
+svuint32_t svadclb(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u64)))
+svuint64_t svadclb(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u32)))
+svuint32_t svadclt(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u64)))
+svuint64_t svadclt(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u32)))
+svuint32_t svadclt(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u64)))
+svuint64_t svadclt(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u32)))
+svuint16_t svaddhnb(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u64)))
+svuint32_t svaddhnb(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u16)))
+svuint8_t svaddhnb(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s32)))
+svint16_t svaddhnb(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s64)))
+svint32_t svaddhnb(svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s16)))
+svint8_t svaddhnb(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u32)))
+svuint16_t svaddhnb(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u64)))
+svuint32_t svaddhnb(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u16)))
+svuint8_t svaddhnb(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s32)))
+svint16_t svaddhnb(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s64)))
+svint32_t svaddhnb(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s16)))
+svint8_t svaddhnb(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u32)))
+svuint16_t svaddhnt(svuint16_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u64)))
+svuint32_t svaddhnt(svuint32_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u16)))
+svuint8_t svaddhnt(svuint8_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s32)))
+svint16_t svaddhnt(svint16_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s64)))
+svint32_t svaddhnt(svint32_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s16)))
+svint8_t svaddhnt(svint8_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u32)))
+svuint16_t svaddhnt(svuint16_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u64)))
+svuint32_t svaddhnt(svuint32_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u16)))
+svuint8_t svaddhnt(svuint8_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s32)))
+svint16_t svaddhnt(svint16_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s64)))
+svint32_t svaddhnt(svint32_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s16)))
+svint8_t svaddhnt(svint8_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s32)))
+svint32_t svaddlb(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s64)))
+svint64_t svaddlb(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s16)))
+svint16_t svaddlb(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u32)))
+svuint32_t svaddlb(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u64)))
+svuint64_t svaddlb(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u16)))
+svuint16_t svaddlb(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s32)))
+svint32_t svaddlb(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s64)))
+svint64_t svaddlb(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s16)))
+svint16_t svaddlb(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u32)))
+svuint32_t svaddlb(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u64)))
+svuint64_t svaddlb(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u16)))
+svuint16_t svaddlb(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s32)))
+svint32_t svaddlbt(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s64)))
+svint64_t svaddlbt(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s16)))
+svint16_t svaddlbt(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s32)))
+svint32_t svaddlbt(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s64)))
+svint64_t svaddlbt(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s16)))
+svint16_t svaddlbt(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s32)))
+svint32_t svaddlt(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s64)))
+svint64_t svaddlt(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s16)))
+svint16_t svaddlt(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u32)))
+svuint32_t svaddlt(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u64)))
+svuint64_t svaddlt(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u16)))
+svuint16_t svaddlt(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s32)))
+svint32_t svaddlt(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s64)))
+svint64_t svaddlt(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s16)))
+svint16_t svaddlt(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u32)))
+svuint32_t svaddlt(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u64)))
+svuint64_t svaddlt(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u16)))
+svuint16_t svaddlt(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_m)))
+svfloat64_t svaddp_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_m)))
+svfloat32_t svaddp_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_m)))
+svfloat16_t svaddp_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_x)))
+svfloat64_t svaddp_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_x)))
+svfloat32_t svaddp_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_x)))
+svfloat16_t svaddp_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_m)))
+svuint8_t svaddp_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_m)))
+svuint32_t svaddp_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_m)))
+svuint64_t svaddp_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_m)))
+svuint16_t svaddp_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_m)))
+svint8_t svaddp_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_m)))
+svint32_t svaddp_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_m)))
+svint64_t svaddp_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_m)))
+svint16_t svaddp_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_x)))
+svuint8_t svaddp_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_x)))
+svuint32_t svaddp_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_x)))
+svuint64_t svaddp_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_x)))
+svuint16_t svaddp_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_x)))
+svint8_t svaddp_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_x)))
+svint32_t svaddp_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_x)))
+svint64_t svaddp_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_x)))
+svint16_t svaddp_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s32)))
+svint32_t svaddwb(svint32_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s64)))
+svint64_t svaddwb(svint64_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s16)))
+svint16_t svaddwb(svint16_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u32)))
+svuint32_t svaddwb(svuint32_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u64)))
+svuint64_t svaddwb(svuint64_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u16)))
+svuint16_t svaddwb(svuint16_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s32)))
+svint32_t svaddwb(svint32_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s64)))
+svint64_t svaddwb(svint64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s16)))
+svint16_t svaddwb(svint16_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u32)))
+svuint32_t svaddwb(svuint32_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u64)))
+svuint64_t svaddwb(svuint64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u16)))
+svuint16_t svaddwb(svuint16_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s32)))
+svint32_t svaddwt(svint32_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s64)))
+svint64_t svaddwt(svint64_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s16)))
+svint16_t svaddwt(svint16_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u32)))
+svuint32_t svaddwt(svuint32_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u64)))
+svuint64_t svaddwt(svuint64_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u16)))
+svuint16_t svaddwt(svuint16_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s32)))
+svint32_t svaddwt(svint32_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s64)))
+svint64_t svaddwt(svint64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s16)))
+svint16_t svaddwt(svint16_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u32)))
+svuint32_t svaddwt(svuint32_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u64)))
+svuint64_t svaddwt(svuint64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u16)))
+svuint16_t svaddwt(svuint16_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u8)))
+svuint8_t svbcax(svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u32)))
+svuint32_t svbcax(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u64)))
+svuint64_t svbcax(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u16)))
+svuint16_t svbcax(svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s8)))
+svint8_t svbcax(svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s32)))
+svint32_t svbcax(svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s64)))
+svint64_t svbcax(svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s16)))
+svint16_t svbcax(svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u8)))
+svuint8_t svbcax(svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u32)))
+svuint32_t svbcax(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u64)))
+svuint64_t svbcax(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u16)))
+svuint16_t svbcax(svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s8)))
+svint8_t svbcax(svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s32)))
+svint32_t svbcax(svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s64)))
+svint64_t svbcax(svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s16)))
+svint16_t svbcax(svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u8)))
+svuint8_t svbsl1n(svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u32)))
+svuint32_t svbsl1n(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u64)))
+svuint64_t svbsl1n(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u16)))
+svuint16_t svbsl1n(svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s8)))
+svint8_t svbsl1n(svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s32)))
+svint32_t svbsl1n(svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s64)))
+svint64_t svbsl1n(svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s16)))
+svint16_t svbsl1n(svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u8)))
+svuint8_t svbsl1n(svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u32)))
+svuint32_t svbsl1n(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u64)))
+svuint64_t svbsl1n(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u16)))
+svuint16_t svbsl1n(svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s8)))
+svint8_t svbsl1n(svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s32)))
+svint32_t svbsl1n(svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s64)))
+svint64_t svbsl1n(svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s16)))
+svint16_t svbsl1n(svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u8)))
+svuint8_t svbsl2n(svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u32)))
+svuint32_t svbsl2n(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u64)))
+svuint64_t svbsl2n(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u16)))
+svuint16_t svbsl2n(svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s8)))
+svint8_t svbsl2n(svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s32)))
+svint32_t svbsl2n(svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s64)))
+svint64_t svbsl2n(svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s16)))
+svint16_t svbsl2n(svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u8)))
+svuint8_t svbsl2n(svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u32)))
+svuint32_t svbsl2n(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u64)))
+svuint64_t svbsl2n(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u16)))
+svuint16_t svbsl2n(svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s8)))
+svint8_t svbsl2n(svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s32)))
+svint32_t svbsl2n(svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s64)))
+svint64_t svbsl2n(svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s16)))
+svint16_t svbsl2n(svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u8)))
+svuint8_t svbsl(svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u32)))
+svuint32_t svbsl(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u64)))
+svuint64_t svbsl(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u16)))
+svuint16_t svbsl(svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s8)))
+svint8_t svbsl(svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s32)))
+svint32_t svbsl(svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s64)))
+svint64_t svbsl(svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s16)))
+svint16_t svbsl(svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u8)))
+svuint8_t svbsl(svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u32)))
+svuint32_t svbsl(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u64)))
+svuint64_t svbsl(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u16)))
+svuint16_t svbsl(svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s8)))
+svint8_t svbsl(svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s32)))
+svint32_t svbsl(svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s64)))
+svint64_t svbsl(svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s16)))
+svint16_t svbsl(svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u8)))
+svuint8_t svcadd(svuint8_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u32)))
+svuint32_t svcadd(svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u64)))
+svuint64_t svcadd(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u16)))
+svuint16_t svcadd(svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s8)))
+svint8_t svcadd(svint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s32)))
+svint32_t svcadd(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s64)))
+svint64_t svcadd(svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s16)))
+svint16_t svcadd(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s32)))
+svint32_t svcdot(svint32_t, svint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s64)))
+svint64_t svcdot(svint64_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s32)))
+svint32_t svcdot_lane(svint32_t, svint8_t, svint8_t, uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s64)))
+svint64_t svcdot_lane(svint64_t, svint16_t, svint16_t, uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u8)))
+svuint8_t svcmla(svuint8_t, svuint8_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u32)))
+svuint32_t svcmla(svuint32_t, svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u64)))
+svuint64_t svcmla(svuint64_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u16)))
+svuint16_t svcmla(svuint16_t, svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s8)))
+svint8_t svcmla(svint8_t, svint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s32)))
+svint32_t svcmla(svint32_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s64)))
+svint64_t svcmla(svint64_t, svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s16)))
+svint16_t svcmla(svint16_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u32)))
+svuint32_t svcmla_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u16)))
+svuint16_t svcmla_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s32)))
+svint32_t svcmla_lane(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s16)))
+svint16_t svcmla_lane(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_m)))
+svfloat32_t svcvtlt_f32_m(svfloat32_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_x)))
+svfloat32_t svcvtlt_f32_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_m)))
+svfloat64_t svcvtlt_f64_m(svfloat64_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_x)))
+svfloat64_t svcvtlt_f64_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f16_f32_m)))
+svfloat16_t svcvtnt_f16_m(svfloat16_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f32_f64_m)))
+svfloat32_t svcvtnt_f32_m(svfloat32_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_m)))
+svfloat32_t svcvtx_f32_m(svfloat32_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_x)))
+svfloat32_t svcvtx_f32_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_z)))
+svfloat32_t svcvtx_f32_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtxnt_f32_f64_m)))
+svfloat32_t svcvtxnt_f32_m(svfloat32_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u8)))
+svuint8_t sveor3(svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u32)))
+svuint32_t sveor3(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u64)))
+svuint64_t sveor3(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u16)))
+svuint16_t sveor3(svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s8)))
+svint8_t sveor3(svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s32)))
+svint32_t sveor3(svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s64)))
+svint64_t sveor3(svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s16)))
+svint16_t sveor3(svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u8)))
+svuint8_t sveor3(svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u32)))
+svuint32_t sveor3(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u64)))
+svuint64_t sveor3(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u16)))
+svuint16_t sveor3(svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s8)))
+svint8_t sveor3(svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s32)))
+svint32_t sveor3(svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s64)))
+svint64_t sveor3(svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s16)))
+svint16_t sveor3(svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u8)))
+svuint8_t sveorbt(svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u32)))
+svuint32_t sveorbt(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u64)))
+svuint64_t sveorbt(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u16)))
+svuint16_t sveorbt(svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s8)))
+svint8_t sveorbt(svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s32)))
+svint32_t sveorbt(svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s64)))
+svint64_t sveorbt(svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s16)))
+svint16_t sveorbt(svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u8)))
+svuint8_t sveorbt(svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u32)))
+svuint32_t sveorbt(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u64)))
+svuint64_t sveorbt(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u16)))
+svuint16_t sveorbt(svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s8)))
+svint8_t sveorbt(svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s32)))
+svint32_t sveorbt(svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s64)))
+svint64_t sveorbt(svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s16)))
+svint16_t sveorbt(svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u8)))
+svuint8_t sveortb(svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u32)))
+svuint32_t sveortb(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u64)))
+svuint64_t sveortb(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u16)))
+svuint16_t sveortb(svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s8)))
+svint8_t sveortb(svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s32)))
+svint32_t sveortb(svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s64)))
+svint64_t sveortb(svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s16)))
+svint16_t sveortb(svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u8)))
+svuint8_t sveortb(svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u32)))
+svuint32_t sveortb(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u64)))
+svuint64_t sveortb(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u16)))
+svuint16_t sveortb(svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s8)))
+svint8_t sveortb(svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s32)))
+svint32_t sveortb(svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s64)))
+svint64_t sveortb(svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s16)))
+svint16_t sveortb(svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_m)))
+svint8_t svhadd_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_m)))
+svint32_t svhadd_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_m)))
+svint64_t svhadd_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_m)))
+svint16_t svhadd_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_x)))
+svint8_t svhadd_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_x)))
+svint32_t svhadd_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_x)))
+svint64_t svhadd_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_x)))
+svint16_t svhadd_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_z)))
+svint8_t svhadd_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_z)))
+svint32_t svhadd_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_z)))
+svint64_t svhadd_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_z)))
+svint16_t svhadd_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_m)))
+svuint8_t svhadd_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_m)))
+svuint32_t svhadd_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_m)))
+svuint64_t svhadd_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_m)))
+svuint16_t svhadd_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_x)))
+svuint8_t svhadd_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_x)))
+svuint32_t svhadd_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_x)))
+svuint64_t svhadd_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_x)))
+svuint16_t svhadd_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_z)))
+svuint8_t svhadd_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_z)))
+svuint32_t svhadd_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_z)))
+svuint64_t svhadd_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_z)))
+svuint16_t svhadd_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_m)))
+svint8_t svhadd_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_m)))
+svint32_t svhadd_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_m)))
+svint64_t svhadd_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_m)))
+svint16_t svhadd_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_x)))
+svint8_t svhadd_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_x)))
+svint32_t svhadd_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_x)))
+svint64_t svhadd_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_x)))
+svint16_t svhadd_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_z)))
+svint8_t svhadd_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_z)))
+svint32_t svhadd_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_z)))
+svint64_t svhadd_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_z)))
+svint16_t svhadd_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_m)))
+svuint8_t svhadd_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_m)))
+svuint32_t svhadd_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_m)))
+svuint64_t svhadd_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_m)))
+svuint16_t svhadd_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_x)))
+svuint8_t svhadd_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_x)))
+svuint32_t svhadd_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_x)))
+svuint64_t svhadd_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_x)))
+svuint16_t svhadd_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_z)))
+svuint8_t svhadd_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_z)))
+svuint32_t svhadd_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_z)))
+svuint64_t svhadd_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_z)))
+svuint16_t svhadd_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u32_z)))
+svuint32_t svhistcnt_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u64_z)))
+svuint64_t svhistcnt_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s32_z)))
+svuint32_t svhistcnt_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s64_z)))
+svuint64_t svhistcnt_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_u8)))
+svuint8_t svhistseg(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_s8)))
+svuint8_t svhistseg(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_m)))
+svint8_t svhsub_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_m)))
+svint32_t svhsub_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_m)))
+svint64_t svhsub_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_m)))
+svint16_t svhsub_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_x)))
+svint8_t svhsub_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_x)))
+svint32_t svhsub_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_x)))
+svint64_t svhsub_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_x)))
+svint16_t svhsub_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_z)))
+svint8_t svhsub_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_z)))
+svint32_t svhsub_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_z)))
+svint64_t svhsub_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_z)))
+svint16_t svhsub_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_m)))
+svuint8_t svhsub_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_m)))
+svuint32_t svhsub_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_m)))
+svuint64_t svhsub_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_m)))
+svuint16_t svhsub_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_x)))
+svuint8_t svhsub_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_x)))
+svuint32_t svhsub_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_x)))
+svuint64_t svhsub_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_x)))
+svuint16_t svhsub_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_z)))
+svuint8_t svhsub_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_z)))
+svuint32_t svhsub_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_z)))
+svuint64_t svhsub_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_z)))
+svuint16_t svhsub_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_m)))
+svint8_t svhsub_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_m)))
+svint32_t svhsub_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_m)))
+svint64_t svhsub_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_m)))
+svint16_t svhsub_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_x)))
+svint8_t svhsub_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_x)))
+svint32_t svhsub_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_x)))
+svint64_t svhsub_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_x)))
+svint16_t svhsub_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_z)))
+svint8_t svhsub_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_z)))
+svint32_t svhsub_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_z)))
+svint64_t svhsub_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_z)))
+svint16_t svhsub_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_m)))
+svuint8_t svhsub_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_m)))
+svuint32_t svhsub_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_m)))
+svuint64_t svhsub_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_m)))
+svuint16_t svhsub_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_x)))
+svuint8_t svhsub_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_x)))
+svuint32_t svhsub_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_x)))
+svuint64_t svhsub_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_x)))
+svuint16_t svhsub_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_z)))
+svuint8_t svhsub_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_z)))
+svuint32_t svhsub_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_z)))
+svuint64_t svhsub_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_z)))
+svuint16_t svhsub_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_m)))
+svint8_t svhsubr_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_m)))
+svint32_t svhsubr_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_m)))
+svint64_t svhsubr_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_m)))
+svint16_t svhsubr_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_x)))
+svint8_t svhsubr_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_x)))
+svint32_t svhsubr_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_x)))
+svint64_t svhsubr_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_x)))
+svint16_t svhsubr_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_z)))
+svint8_t svhsubr_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_z)))
+svint32_t svhsubr_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_z)))
+svint64_t svhsubr_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_z)))
+svint16_t svhsubr_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_m)))
+svuint8_t svhsubr_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_m)))
+svuint32_t svhsubr_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_m)))
+svuint64_t svhsubr_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_m)))
+svuint16_t svhsubr_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_x)))
+svuint8_t svhsubr_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_x)))
+svuint32_t svhsubr_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_x)))
+svuint64_t svhsubr_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_x)))
+svuint16_t svhsubr_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_z)))
+svuint8_t svhsubr_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_z)))
+svuint32_t svhsubr_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_z)))
+svuint64_t svhsubr_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_z)))
+svuint16_t svhsubr_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_m)))
+svint8_t svhsubr_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_m)))
+svint32_t svhsubr_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_m)))
+svint64_t svhsubr_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_m)))
+svint16_t svhsubr_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_x)))
+svint8_t svhsubr_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_x)))
+svint32_t svhsubr_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_x)))
+svint64_t svhsubr_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_x)))
+svint16_t svhsubr_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_z)))
+svint8_t svhsubr_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_z)))
+svint32_t svhsubr_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_z)))
+svint64_t svhsubr_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_z)))
+svint16_t svhsubr_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_m)))
+svuint8_t svhsubr_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_m)))
+svuint32_t svhsubr_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_m)))
+svuint64_t svhsubr_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_m)))
+svuint16_t svhsubr_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_x)))
+svuint8_t svhsubr_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_x)))
+svuint32_t svhsubr_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_x)))
+svuint64_t svhsubr_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_x)))
+svuint16_t svhsubr_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_z)))
+svuint8_t svhsubr_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_z)))
+svuint32_t svhsubr_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_z)))
+svuint64_t svhsubr_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_z)))
+svuint16_t svhsubr_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_u32)))
+svuint32_t svldnt1_gather_index_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_u64)))
+svuint64_t svldnt1_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_f64)))
+svfloat64_t svldnt1_gather_index_f64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_f32)))
+svfloat32_t svldnt1_gather_index_f32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_s32)))
+svint32_t svldnt1_gather_index_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_s64)))
+svint64_t svldnt1_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_u32)))
+svuint32_t svldnt1_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_u64)))
+svuint64_t svldnt1_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_f64)))
+svfloat64_t svldnt1_gather_offset_f64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_f32)))
+svfloat32_t svldnt1_gather_offset_f32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_s32)))
+svint32_t svldnt1_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_s64)))
+svint64_t svldnt1_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_u32)))
+svuint32_t svldnt1_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_u64)))
+svuint64_t svldnt1_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_f64)))
+svfloat64_t svldnt1_gather_f64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_f32)))
+svfloat32_t svldnt1_gather_f32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_s32)))
+svint32_t svldnt1_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_s64)))
+svint64_t svldnt1_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_u64)))
+svuint64_t svldnt1_gather_index(svbool_t, uint64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_f64)))
+svfloat64_t svldnt1_gather_index(svbool_t, float64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_s64)))
+svint64_t svldnt1_gather_index(svbool_t, int64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_u64)))
+svuint64_t svldnt1_gather_index(svbool_t, uint64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_f64)))
+svfloat64_t svldnt1_gather_index(svbool_t, float64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_s64)))
+svint64_t svldnt1_gather_index(svbool_t, int64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_u32)))
+svuint32_t svldnt1_gather_offset(svbool_t, uint32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_f32)))
+svfloat32_t svldnt1_gather_offset(svbool_t, float32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_s32)))
+svint32_t svldnt1_gather_offset(svbool_t, int32_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_u64)))
+svuint64_t svldnt1_gather_offset(svbool_t, uint64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_f64)))
+svfloat64_t svldnt1_gather_offset(svbool_t, float64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_s64)))
+svint64_t svldnt1_gather_offset(svbool_t, int64_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_u64)))
+svuint64_t svldnt1_gather_offset(svbool_t, uint64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_f64)))
+svfloat64_t svldnt1_gather_offset(svbool_t, float64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_s64)))
+svint64_t svldnt1_gather_offset(svbool_t, int64_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_u32)))
+svuint32_t svldnt1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_u64)))
+svuint64_t svldnt1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_s32)))
+svint32_t svldnt1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_s64)))
+svint64_t svldnt1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_u32)))
+svuint32_t svldnt1sb_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_u64)))
+svuint64_t svldnt1sb_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_s32)))
+svint32_t svldnt1sb_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_s64)))
+svint64_t svldnt1sb_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_u32)))
+svuint32_t svldnt1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_s32)))
+svint32_t svldnt1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_u64)))
+svuint64_t svldnt1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_s64)))
+svint64_t svldnt1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_u64)))
+svuint64_t svldnt1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_s64)))
+svint64_t svldnt1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_u32)))
+svuint32_t svldnt1sh_gather_index_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_u64)))
+svuint64_t svldnt1sh_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_s32)))
+svint32_t svldnt1sh_gather_index_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_s64)))
+svint64_t svldnt1sh_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_u32)))
+svuint32_t svldnt1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_u64)))
+svuint64_t svldnt1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_s32)))
+svint32_t svldnt1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_s64)))
+svint64_t svldnt1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_u32)))
+svuint32_t svldnt1sh_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_u64)))
+svuint64_t svldnt1sh_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_s32)))
+svint32_t svldnt1sh_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_s64)))
+svint64_t svldnt1sh_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_u64)))
+svuint64_t svldnt1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_s64)))
+svint64_t svldnt1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_u64)))
+svuint64_t svldnt1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_s64)))
+svint64_t svldnt1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_u32)))
+svuint32_t svldnt1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_s32)))
+svint32_t svldnt1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_u64)))
+svuint64_t svldnt1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_s64)))
+svint64_t svldnt1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_u64)))
+svuint64_t svldnt1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_s64)))
+svint64_t svldnt1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_u64)))
+svuint64_t svldnt1sw_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_s64)))
+svint64_t svldnt1sw_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_u64)))
+svuint64_t svldnt1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_s64)))
+svint64_t svldnt1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_u64)))
+svuint64_t svldnt1sw_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_s64)))
+svint64_t svldnt1sw_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_u64)))
+svuint64_t svldnt1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_s64)))
+svint64_t svldnt1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_u64)))
+svuint64_t svldnt1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_s64)))
+svint64_t svldnt1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_u64)))
+svuint64_t svldnt1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_s64)))
+svint64_t svldnt1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_u64)))
+svuint64_t svldnt1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_s64)))
+svint64_t svldnt1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_u32)))
+svuint32_t svldnt1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_u64)))
+svuint64_t svldnt1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_s32)))
+svint32_t svldnt1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_s64)))
+svint64_t svldnt1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_u32)))
+svuint32_t svldnt1ub_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_u64)))
+svuint64_t svldnt1ub_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_s32)))
+svint32_t svldnt1ub_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_s64)))
+svint64_t svldnt1ub_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_u32)))
+svuint32_t svldnt1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_s32)))
+svint32_t svldnt1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_u64)))
+svuint64_t svldnt1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_s64)))
+svint64_t svldnt1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_u64)))
+svuint64_t svldnt1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_s64)))
+svint64_t svldnt1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_u32)))
+svuint32_t svldnt1uh_gather_index_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_u64)))
+svuint64_t svldnt1uh_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_s32)))
+svint32_t svldnt1uh_gather_index_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_s64)))
+svint64_t svldnt1uh_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_u32)))
+svuint32_t svldnt1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_u64)))
+svuint64_t svldnt1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_s32)))
+svint32_t svldnt1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_s64)))
+svint64_t svldnt1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_u32)))
+svuint32_t svldnt1uh_gather_u32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_u64)))
+svuint64_t svldnt1uh_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_s32)))
+svint32_t svldnt1uh_gather_s32(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_s64)))
+svint64_t svldnt1uh_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_u64)))
+svuint64_t svldnt1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_s64)))
+svint64_t svldnt1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_u64)))
+svuint64_t svldnt1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_s64)))
+svint64_t svldnt1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_u32)))
+svuint32_t svldnt1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_s32)))
+svint32_t svldnt1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_u64)))
+svuint64_t svldnt1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_s64)))
+svint64_t svldnt1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_u64)))
+svuint64_t svldnt1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_s64)))
+svint64_t svldnt1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_u64)))
+svuint64_t svldnt1uw_gather_index_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_s64)))
+svint64_t svldnt1uw_gather_index_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_u64)))
+svuint64_t svldnt1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_s64)))
+svint64_t svldnt1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_u64)))
+svuint64_t svldnt1uw_gather_u64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_s64)))
+svint64_t svldnt1uw_gather_s64(svbool_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_u64)))
+svuint64_t svldnt1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_s64)))
+svint64_t svldnt1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_u64)))
+svuint64_t svldnt1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_s64)))
+svint64_t svldnt1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_u64)))
+svuint64_t svldnt1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_s64)))
+svint64_t svldnt1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_u64)))
+svuint64_t svldnt1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_s64)))
+svint64_t svldnt1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_m)))
+svint64_t svlogb_m(svint64_t, svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_m)))
+svint32_t svlogb_m(svint32_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_m)))
+svint16_t svlogb_m(svint16_t, svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_x)))
+svint64_t svlogb_x(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_x)))
+svint32_t svlogb_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_x)))
+svint16_t svlogb_x(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_z)))
+svint64_t svlogb_z(svbool_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_z)))
+svint32_t svlogb_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_z)))
+svint16_t svlogb_z(svbool_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u8)))
+svbool_t svmatch(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u16)))
+svbool_t svmatch(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s8)))
+svbool_t svmatch(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s16)))
+svbool_t svmatch(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_m)))
+svfloat64_t svmaxnmp_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_m)))
+svfloat32_t svmaxnmp_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_m)))
+svfloat16_t svmaxnmp_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_x)))
+svfloat64_t svmaxnmp_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_x)))
+svfloat32_t svmaxnmp_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_x)))
+svfloat16_t svmaxnmp_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_m)))
+svfloat64_t svmaxp_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_m)))
+svfloat32_t svmaxp_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_m)))
+svfloat16_t svmaxp_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_x)))
+svfloat64_t svmaxp_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_x)))
+svfloat32_t svmaxp_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_x)))
+svfloat16_t svmaxp_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_m)))
+svint8_t svmaxp_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_m)))
+svint32_t svmaxp_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_m)))
+svint64_t svmaxp_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_m)))
+svint16_t svmaxp_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_x)))
+svint8_t svmaxp_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_x)))
+svint32_t svmaxp_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_x)))
+svint64_t svmaxp_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_x)))
+svint16_t svmaxp_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_m)))
+svuint8_t svmaxp_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_m)))
+svuint32_t svmaxp_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_m)))
+svuint64_t svmaxp_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_m)))
+svuint16_t svmaxp_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_x)))
+svuint8_t svmaxp_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_x)))
+svuint32_t svmaxp_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_x)))
+svuint64_t svmaxp_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_x)))
+svuint16_t svmaxp_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_m)))
+svfloat64_t svminnmp_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_m)))
+svfloat32_t svminnmp_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_m)))
+svfloat16_t svminnmp_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_x)))
+svfloat64_t svminnmp_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_x)))
+svfloat32_t svminnmp_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_x)))
+svfloat16_t svminnmp_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_m)))
+svfloat64_t svminp_m(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_m)))
+svfloat32_t svminp_m(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_m)))
+svfloat16_t svminp_m(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_x)))
+svfloat64_t svminp_x(svbool_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_x)))
+svfloat32_t svminp_x(svbool_t, svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_x)))
+svfloat16_t svminp_x(svbool_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_m)))
+svint8_t svminp_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_m)))
+svint32_t svminp_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_m)))
+svint64_t svminp_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_m)))
+svint16_t svminp_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_x)))
+svint8_t svminp_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_x)))
+svint32_t svminp_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_x)))
+svint64_t svminp_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_x)))
+svint16_t svminp_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_m)))
+svuint8_t svminp_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_m)))
+svuint32_t svminp_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_m)))
+svuint64_t svminp_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_m)))
+svuint16_t svminp_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_x)))
+svuint8_t svminp_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_x)))
+svuint32_t svminp_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_x)))
+svuint64_t svminp_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_x)))
+svuint16_t svminp_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u32)))
+svuint32_t svmla_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u64)))
+svuint64_t svmla_lane(svuint64_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u16)))
+svuint16_t svmla_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s32)))
+svint32_t svmla_lane(svint32_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s64)))
+svint64_t svmla_lane(svint64_t, svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s16)))
+svint16_t svmla_lane(svint16_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_f32)))
+svfloat32_t svmlalb(svfloat32_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s32)))
+svint32_t svmlalb(svint32_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s64)))
+svint64_t svmlalb(svint64_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s16)))
+svint16_t svmlalb(svint16_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u32)))
+svuint32_t svmlalb(svuint32_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u64)))
+svuint64_t svmlalb(svuint64_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u16)))
+svuint16_t svmlalb(svuint16_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_f32)))
+svfloat32_t svmlalb(svfloat32_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s32)))
+svint32_t svmlalb(svint32_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s64)))
+svint64_t svmlalb(svint64_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s16)))
+svint16_t svmlalb(svint16_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u32)))
+svuint32_t svmlalb(svuint32_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u64)))
+svuint64_t svmlalb(svuint64_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u16)))
+svuint16_t svmlalb(svuint16_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_f32)))
+svfloat32_t svmlalb_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s32)))
+svint32_t svmlalb_lane(svint32_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s64)))
+svint64_t svmlalb_lane(svint64_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u32)))
+svuint32_t svmlalb_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u64)))
+svuint64_t svmlalb_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_f32)))
+svfloat32_t svmlalt(svfloat32_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s32)))
+svint32_t svmlalt(svint32_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s64)))
+svint64_t svmlalt(svint64_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s16)))
+svint16_t svmlalt(svint16_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u32)))
+svuint32_t svmlalt(svuint32_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u64)))
+svuint64_t svmlalt(svuint64_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u16)))
+svuint16_t svmlalt(svuint16_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_f32)))
+svfloat32_t svmlalt(svfloat32_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s32)))
+svint32_t svmlalt(svint32_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s64)))
+svint64_t svmlalt(svint64_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s16)))
+svint16_t svmlalt(svint16_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u32)))
+svuint32_t svmlalt(svuint32_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u64)))
+svuint64_t svmlalt(svuint64_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u16)))
+svuint16_t svmlalt(svuint16_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_f32)))
+svfloat32_t svmlalt_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s32)))
+svint32_t svmlalt_lane(svint32_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s64)))
+svint64_t svmlalt_lane(svint64_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u32)))
+svuint32_t svmlalt_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u64)))
+svuint64_t svmlalt_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u32)))
+svuint32_t svmls_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u64)))
+svuint64_t svmls_lane(svuint64_t, svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u16)))
+svuint16_t svmls_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s32)))
+svint32_t svmls_lane(svint32_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s64)))
+svint64_t svmls_lane(svint64_t, svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s16)))
+svint16_t svmls_lane(svint16_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_f32)))
+svfloat32_t svmlslb(svfloat32_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s32)))
+svint32_t svmlslb(svint32_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s64)))
+svint64_t svmlslb(svint64_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s16)))
+svint16_t svmlslb(svint16_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u32)))
+svuint32_t svmlslb(svuint32_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u64)))
+svuint64_t svmlslb(svuint64_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u16)))
+svuint16_t svmlslb(svuint16_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_f32)))
+svfloat32_t svmlslb(svfloat32_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s32)))
+svint32_t svmlslb(svint32_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s64)))
+svint64_t svmlslb(svint64_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s16)))
+svint16_t svmlslb(svint16_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u32)))
+svuint32_t svmlslb(svuint32_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u64)))
+svuint64_t svmlslb(svuint64_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u16)))
+svuint16_t svmlslb(svuint16_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_f32)))
+svfloat32_t svmlslb_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s32)))
+svint32_t svmlslb_lane(svint32_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s64)))
+svint64_t svmlslb_lane(svint64_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u32)))
+svuint32_t svmlslb_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u64)))
+svuint64_t svmlslb_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_f32)))
+svfloat32_t svmlslt(svfloat32_t, svfloat16_t, float16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s32)))
+svint32_t svmlslt(svint32_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s64)))
+svint64_t svmlslt(svint64_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s16)))
+svint16_t svmlslt(svint16_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u32)))
+svuint32_t svmlslt(svuint32_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u64)))
+svuint64_t svmlslt(svuint64_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u16)))
+svuint16_t svmlslt(svuint16_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_f32)))
+svfloat32_t svmlslt(svfloat32_t, svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s32)))
+svint32_t svmlslt(svint32_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s64)))
+svint64_t svmlslt(svint64_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s16)))
+svint16_t svmlslt(svint16_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u32)))
+svuint32_t svmlslt(svuint32_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u64)))
+svuint64_t svmlslt(svuint64_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u16)))
+svuint16_t svmlslt(svuint16_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_f32)))
+svfloat32_t svmlslt_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s32)))
+svint32_t svmlslt_lane(svint32_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s64)))
+svint64_t svmlslt_lane(svint64_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u32)))
+svuint32_t svmlslt_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u64)))
+svuint64_t svmlslt_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s32)))
+svint32_t svmovlb(svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s64)))
+svint64_t svmovlb(svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s16)))
+svint16_t svmovlb(svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u32)))
+svuint32_t svmovlb(svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u64)))
+svuint64_t svmovlb(svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u16)))
+svuint16_t svmovlb(svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s32)))
+svint32_t svmovlt(svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s64)))
+svint64_t svmovlt(svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s16)))
+svint16_t svmovlt(svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u32)))
+svuint32_t svmovlt(svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u64)))
+svuint64_t svmovlt(svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u16)))
+svuint16_t svmovlt(svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u32)))
+svuint32_t svmul_lane(svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u64)))
+svuint64_t svmul_lane(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u16)))
+svuint16_t svmul_lane(svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s32)))
+svint32_t svmul_lane(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s64)))
+svint64_t svmul_lane(svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s16)))
+svint16_t svmul_lane(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s32)))
+svint32_t svmullb(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s64)))
+svint64_t svmullb(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s16)))
+svint16_t svmullb(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u32)))
+svuint32_t svmullb(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u64)))
+svuint64_t svmullb(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u16)))
+svuint16_t svmullb(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s32)))
+svint32_t svmullb(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s64)))
+svint64_t svmullb(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s16)))
+svint16_t svmullb(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u32)))
+svuint32_t svmullb(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u64)))
+svuint64_t svmullb(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u16)))
+svuint16_t svmullb(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s32)))
+svint32_t svmullb_lane(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s64)))
+svint64_t svmullb_lane(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u32)))
+svuint32_t svmullb_lane(svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u64)))
+svuint64_t svmullb_lane(svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s32)))
+svint32_t svmullt(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s64)))
+svint64_t svmullt(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s16)))
+svint16_t svmullt(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u32)))
+svuint32_t svmullt(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u64)))
+svuint64_t svmullt(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u16)))
+svuint16_t svmullt(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s32)))
+svint32_t svmullt(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s64)))
+svint64_t svmullt(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s16)))
+svint16_t svmullt(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u32)))
+svuint32_t svmullt(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u64)))
+svuint64_t svmullt(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u16)))
+svuint16_t svmullt(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s32)))
+svint32_t svmullt_lane(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s64)))
+svint64_t svmullt_lane(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u32)))
+svuint32_t svmullt_lane(svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u64)))
+svuint64_t svmullt_lane(svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u8)))
+svuint8_t svnbsl(svuint8_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u32)))
+svuint32_t svnbsl(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u64)))
+svuint64_t svnbsl(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u16)))
+svuint16_t svnbsl(svuint16_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s8)))
+svint8_t svnbsl(svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s32)))
+svint32_t svnbsl(svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s64)))
+svint64_t svnbsl(svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s16)))
+svint16_t svnbsl(svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u8)))
+svuint8_t svnbsl(svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u32)))
+svuint32_t svnbsl(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u64)))
+svuint64_t svnbsl(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u16)))
+svuint16_t svnbsl(svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s8)))
+svint8_t svnbsl(svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s32)))
+svint32_t svnbsl(svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s64)))
+svint64_t svnbsl(svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s16)))
+svint16_t svnbsl(svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u8)))
+svbool_t svnmatch(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u16)))
+svbool_t svnmatch(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s8)))
+svbool_t svnmatch(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s16)))
+svbool_t svnmatch(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_n_u8)))
+svuint8_t svpmul(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_u8)))
+svuint8_t svpmul(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u64)))
+svuint64_t svpmullb(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u16)))
+svuint16_t svpmullb(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u64)))
+svuint64_t svpmullb(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u16)))
+svuint16_t svpmullb(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u8)))
+svuint8_t svpmullb_pair(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u32)))
+svuint32_t svpmullb_pair(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u8)))
+svuint8_t svpmullb_pair(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u32)))
+svuint32_t svpmullb_pair(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u64)))
+svuint64_t svpmullt(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u16)))
+svuint16_t svpmullt(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u64)))
+svuint64_t svpmullt(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u16)))
+svuint16_t svpmullt(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u8)))
+svuint8_t svpmullt_pair(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u32)))
+svuint32_t svpmullt_pair(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u8)))
+svuint8_t svpmullt_pair(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u32)))
+svuint32_t svpmullt_pair(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_m)))
+svint8_t svqabs_m(svint8_t, svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_m)))
+svint32_t svqabs_m(svint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_m)))
+svint64_t svqabs_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_m)))
+svint16_t svqabs_m(svint16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_x)))
+svint8_t svqabs_x(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_x)))
+svint32_t svqabs_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_x)))
+svint64_t svqabs_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_x)))
+svint16_t svqabs_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_z)))
+svint8_t svqabs_z(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_z)))
+svint32_t svqabs_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_z)))
+svint64_t svqabs_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_z)))
+svint16_t svqabs_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_m)))
+svint8_t svqadd_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_m)))
+svint32_t svqadd_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_m)))
+svint64_t svqadd_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_m)))
+svint16_t svqadd_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_x)))
+svint8_t svqadd_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_x)))
+svint32_t svqadd_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_x)))
+svint64_t svqadd_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_x)))
+svint16_t svqadd_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_z)))
+svint8_t svqadd_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_z)))
+svint32_t svqadd_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_z)))
+svint64_t svqadd_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_z)))
+svint16_t svqadd_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_m)))
+svuint8_t svqadd_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_m)))
+svuint32_t svqadd_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_m)))
+svuint64_t svqadd_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_m)))
+svuint16_t svqadd_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_x)))
+svuint8_t svqadd_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_x)))
+svuint32_t svqadd_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_x)))
+svuint64_t svqadd_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_x)))
+svuint16_t svqadd_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_z)))
+svuint8_t svqadd_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_z)))
+svuint32_t svqadd_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_z)))
+svuint64_t svqadd_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_z)))
+svuint16_t svqadd_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_m)))
+svint8_t svqadd_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_m)))
+svint32_t svqadd_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_m)))
+svint64_t svqadd_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_m)))
+svint16_t svqadd_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_x)))
+svint8_t svqadd_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_x)))
+svint32_t svqadd_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_x)))
+svint64_t svqadd_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_x)))
+svint16_t svqadd_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_z)))
+svint8_t svqadd_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_z)))
+svint32_t svqadd_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_z)))
+svint64_t svqadd_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_z)))
+svint16_t svqadd_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_m)))
+svuint8_t svqadd_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_m)))
+svuint32_t svqadd_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_m)))
+svuint64_t svqadd_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_m)))
+svuint16_t svqadd_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_x)))
+svuint8_t svqadd_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_x)))
+svuint32_t svqadd_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_x)))
+svuint64_t svqadd_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_x)))
+svuint16_t svqadd_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_z)))
+svuint8_t svqadd_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_z)))
+svuint32_t svqadd_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_z)))
+svuint64_t svqadd_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_z)))
+svuint16_t svqadd_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s8)))
+svint8_t svqcadd(svint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s32)))
+svint32_t svqcadd(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s64)))
+svint64_t svqcadd(svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s16)))
+svint16_t svqcadd(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s32)))
+svint32_t svqdmlalb(svint32_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s64)))
+svint64_t svqdmlalb(svint64_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s16)))
+svint16_t svqdmlalb(svint16_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s32)))
+svint32_t svqdmlalb(svint32_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s64)))
+svint64_t svqdmlalb(svint64_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s16)))
+svint16_t svqdmlalb(svint16_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s32)))
+svint32_t svqdmlalb_lane(svint32_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s64)))
+svint64_t svqdmlalb_lane(svint64_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s32)))
+svint32_t svqdmlalbt(svint32_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s64)))
+svint64_t svqdmlalbt(svint64_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s16)))
+svint16_t svqdmlalbt(svint16_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s32)))
+svint32_t svqdmlalbt(svint32_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s64)))
+svint64_t svqdmlalbt(svint64_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s16)))
+svint16_t svqdmlalbt(svint16_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s32)))
+svint32_t svqdmlalt(svint32_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s64)))
+svint64_t svqdmlalt(svint64_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s16)))
+svint16_t svqdmlalt(svint16_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s32)))
+svint32_t svqdmlalt(svint32_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s64)))
+svint64_t svqdmlalt(svint64_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s16)))
+svint16_t svqdmlalt(svint16_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s32)))
+svint32_t svqdmlalt_lane(svint32_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s64)))
+svint64_t svqdmlalt_lane(svint64_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s32)))
+svint32_t svqdmlslb(svint32_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s64)))
+svint64_t svqdmlslb(svint64_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s16)))
+svint16_t svqdmlslb(svint16_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s32)))
+svint32_t svqdmlslb(svint32_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s64)))
+svint64_t svqdmlslb(svint64_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s16)))
+svint16_t svqdmlslb(svint16_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s32)))
+svint32_t svqdmlslb_lane(svint32_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s64)))
+svint64_t svqdmlslb_lane(svint64_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s32)))
+svint32_t svqdmlslbt(svint32_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s64)))
+svint64_t svqdmlslbt(svint64_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s16)))
+svint16_t svqdmlslbt(svint16_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s32)))
+svint32_t svqdmlslbt(svint32_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s64)))
+svint64_t svqdmlslbt(svint64_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s16)))
+svint16_t svqdmlslbt(svint16_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s32)))
+svint32_t svqdmlslt(svint32_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s64)))
+svint64_t svqdmlslt(svint64_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s16)))
+svint16_t svqdmlslt(svint16_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s32)))
+svint32_t svqdmlslt(svint32_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s64)))
+svint64_t svqdmlslt(svint64_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s16)))
+svint16_t svqdmlslt(svint16_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s32)))
+svint32_t svqdmlslt_lane(svint32_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s64)))
+svint64_t svqdmlslt_lane(svint64_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s8)))
+svint8_t svqdmulh(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s32)))
+svint32_t svqdmulh(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s64)))
+svint64_t svqdmulh(svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s16)))
+svint16_t svqdmulh(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8)))
+svint8_t svqdmulh(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32)))
+svint32_t svqdmulh(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64)))
+svint64_t svqdmulh(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16)))
+svint16_t svqdmulh(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s32)))
+svint32_t svqdmulh_lane(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s64)))
+svint64_t svqdmulh_lane(svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s16)))
+svint16_t svqdmulh_lane(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s32)))
+svint32_t svqdmullb(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s64)))
+svint64_t svqdmullb(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s16)))
+svint16_t svqdmullb(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s32)))
+svint32_t svqdmullb(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s64)))
+svint64_t svqdmullb(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s16)))
+svint16_t svqdmullb(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s32)))
+svint32_t svqdmullb_lane(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s64)))
+svint64_t svqdmullb_lane(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s32)))
+svint32_t svqdmullt(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s64)))
+svint64_t svqdmullt(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s16)))
+svint16_t svqdmullt(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s32)))
+svint32_t svqdmullt(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s64)))
+svint64_t svqdmullt(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s16)))
+svint16_t svqdmullt(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s32)))
+svint32_t svqdmullt_lane(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s64)))
+svint64_t svqdmullt_lane(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_m)))
+svint8_t svqneg_m(svint8_t, svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_m)))
+svint32_t svqneg_m(svint32_t, svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_m)))
+svint64_t svqneg_m(svint64_t, svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_m)))
+svint16_t svqneg_m(svint16_t, svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_x)))
+svint8_t svqneg_x(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_x)))
+svint32_t svqneg_x(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_x)))
+svint64_t svqneg_x(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_x)))
+svint16_t svqneg_x(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_z)))
+svint8_t svqneg_z(svbool_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_z)))
+svint32_t svqneg_z(svbool_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_z)))
+svint64_t svqneg_z(svbool_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_z)))
+svint16_t svqneg_z(svbool_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s8)))
+svint8_t svqrdcmlah(svint8_t, svint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s32)))
+svint32_t svqrdcmlah(svint32_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s64)))
+svint64_t svqrdcmlah(svint64_t, svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s16)))
+svint16_t svqrdcmlah(svint16_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s32)))
+svint32_t svqrdcmlah_lane(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s16)))
+svint16_t svqrdcmlah_lane(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s8)))
+svint8_t svqrdmlah(svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s32)))
+svint32_t svqrdmlah(svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s64)))
+svint64_t svqrdmlah(svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s16)))
+svint16_t svqrdmlah(svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s8)))
+svint8_t svqrdmlah(svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s32)))
+svint32_t svqrdmlah(svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s64)))
+svint64_t svqrdmlah(svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s16)))
+svint16_t svqrdmlah(svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s32)))
+svint32_t svqrdmlah_lane(svint32_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s64)))
+svint64_t svqrdmlah_lane(svint64_t, svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s16)))
+svint16_t svqrdmlah_lane(svint16_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s8)))
+svint8_t svqrdmlsh(svint8_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s32)))
+svint32_t svqrdmlsh(svint32_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s64)))
+svint64_t svqrdmlsh(svint64_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s16)))
+svint16_t svqrdmlsh(svint16_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s8)))
+svint8_t svqrdmlsh(svint8_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s32)))
+svint32_t svqrdmlsh(svint32_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s64)))
+svint64_t svqrdmlsh(svint64_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s16)))
+svint16_t svqrdmlsh(svint16_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s32)))
+svint32_t svqrdmlsh_lane(svint32_t, svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s64)))
+svint64_t svqrdmlsh_lane(svint64_t, svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s16)))
+svint16_t svqrdmlsh_lane(svint16_t, svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s8)))
+svint8_t svqrdmulh(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s32)))
+svint32_t svqrdmulh(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s64)))
+svint64_t svqrdmulh(svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s16)))
+svint16_t svqrdmulh(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s8)))
+svint8_t svqrdmulh(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s32)))
+svint32_t svqrdmulh(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s64)))
+svint64_t svqrdmulh(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s16)))
+svint16_t svqrdmulh(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s32)))
+svint32_t svqrdmulh_lane(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s64)))
+svint64_t svqrdmulh_lane(svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s16)))
+svint16_t svqrdmulh_lane(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_m)))
+svint8_t svqrshl_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_m)))
+svint32_t svqrshl_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_m)))
+svint64_t svqrshl_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_m)))
+svint16_t svqrshl_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_x)))
+svint8_t svqrshl_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_x)))
+svint32_t svqrshl_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_x)))
+svint64_t svqrshl_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_x)))
+svint16_t svqrshl_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_z)))
+svint8_t svqrshl_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_z)))
+svint32_t svqrshl_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_z)))
+svint64_t svqrshl_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_z)))
+svint16_t svqrshl_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_m)))
+svuint8_t svqrshl_m(svbool_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_m)))
+svuint32_t svqrshl_m(svbool_t, svuint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_m)))
+svuint64_t svqrshl_m(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_m)))
+svuint16_t svqrshl_m(svbool_t, svuint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_x)))
+svuint8_t svqrshl_x(svbool_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_x)))
+svuint32_t svqrshl_x(svbool_t, svuint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_x)))
+svuint64_t svqrshl_x(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_x)))
+svuint16_t svqrshl_x(svbool_t, svuint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_z)))
+svuint8_t svqrshl_z(svbool_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_z)))
+svuint32_t svqrshl_z(svbool_t, svuint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_z)))
+svuint64_t svqrshl_z(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_z)))
+svuint16_t svqrshl_z(svbool_t, svuint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_m)))
+svint8_t svqrshl_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_m)))
+svint32_t svqrshl_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_m)))
+svint64_t svqrshl_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_m)))
+svint16_t svqrshl_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_x)))
+svint8_t svqrshl_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_x)))
+svint32_t svqrshl_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_x)))
+svint64_t svqrshl_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_x)))
+svint16_t svqrshl_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_z)))
+svint8_t svqrshl_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_z)))
+svint32_t svqrshl_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_z)))
+svint64_t svqrshl_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_z)))
+svint16_t svqrshl_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_m)))
+svuint8_t svqrshl_m(svbool_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_m)))
+svuint32_t svqrshl_m(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_m)))
+svuint64_t svqrshl_m(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_m)))
+svuint16_t svqrshl_m(svbool_t, svuint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_x)))
+svuint8_t svqrshl_x(svbool_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_x)))
+svuint32_t svqrshl_x(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_x)))
+svuint64_t svqrshl_x(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_x)))
+svuint16_t svqrshl_x(svbool_t, svuint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_z)))
+svuint8_t svqrshl_z(svbool_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_z)))
+svuint32_t svqrshl_z(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_z)))
+svuint64_t svqrshl_z(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_z)))
+svuint16_t svqrshl_z(svbool_t, svuint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s32)))
+svint16_t svqrshrnb(svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s64)))
+svint32_t svqrshrnb(svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s16)))
+svint8_t svqrshrnb(svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u32)))
+svuint16_t svqrshrnb(svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u64)))
+svuint32_t svqrshrnb(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u16)))
+svuint8_t svqrshrnb(svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s32)))
+svint16_t svqrshrnt(svint16_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s64)))
+svint32_t svqrshrnt(svint32_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s16)))
+svint8_t svqrshrnt(svint8_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u32)))
+svuint16_t svqrshrnt(svuint16_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u64)))
+svuint32_t svqrshrnt(svuint32_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u16)))
+svuint8_t svqrshrnt(svuint8_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s32)))
+svuint16_t svqrshrunb(svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s64)))
+svuint32_t svqrshrunb(svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s16)))
+svuint8_t svqrshrunb(svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s32)))
+svuint16_t svqrshrunt(svuint16_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s64)))
+svuint32_t svqrshrunt(svuint32_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s16)))
+svuint8_t svqrshrunt(svuint8_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_m)))
+svint8_t svqshl_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_m)))
+svint32_t svqshl_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_m)))
+svint64_t svqshl_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_m)))
+svint16_t svqshl_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_x)))
+svint8_t svqshl_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_x)))
+svint32_t svqshl_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_x)))
+svint64_t svqshl_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_x)))
+svint16_t svqshl_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_z)))
+svint8_t svqshl_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_z)))
+svint32_t svqshl_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_z)))
+svint64_t svqshl_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_z)))
+svint16_t svqshl_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_m)))
+svuint8_t svqshl_m(svbool_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_m)))
+svuint32_t svqshl_m(svbool_t, svuint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_m)))
+svuint64_t svqshl_m(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_m)))
+svuint16_t svqshl_m(svbool_t, svuint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_x)))
+svuint8_t svqshl_x(svbool_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_x)))
+svuint32_t svqshl_x(svbool_t, svuint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_x)))
+svuint64_t svqshl_x(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_x)))
+svuint16_t svqshl_x(svbool_t, svuint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_z)))
+svuint8_t svqshl_z(svbool_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_z)))
+svuint32_t svqshl_z(svbool_t, svuint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_z)))
+svuint64_t svqshl_z(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_z)))
+svuint16_t svqshl_z(svbool_t, svuint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_m)))
+svint8_t svqshl_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_m)))
+svint32_t svqshl_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_m)))
+svint64_t svqshl_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_m)))
+svint16_t svqshl_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_x)))
+svint8_t svqshl_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_x)))
+svint32_t svqshl_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_x)))
+svint64_t svqshl_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_x)))
+svint16_t svqshl_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_z)))
+svint8_t svqshl_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_z)))
+svint32_t svqshl_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_z)))
+svint64_t svqshl_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_z)))
+svint16_t svqshl_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_m)))
+svuint8_t svqshl_m(svbool_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_m)))
+svuint32_t svqshl_m(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_m)))
+svuint64_t svqshl_m(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_m)))
+svuint16_t svqshl_m(svbool_t, svuint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_x)))
+svuint8_t svqshl_x(svbool_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_x)))
+svuint32_t svqshl_x(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_x)))
+svuint64_t svqshl_x(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_x)))
+svuint16_t svqshl_x(svbool_t, svuint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_z)))
+svuint8_t svqshl_z(svbool_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_z)))
+svuint32_t svqshl_z(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_z)))
+svuint64_t svqshl_z(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_z)))
+svuint16_t svqshl_z(svbool_t, svuint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_m)))
+svuint8_t svqshlu_m(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_m)))
+svuint32_t svqshlu_m(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_m)))
+svuint64_t svqshlu_m(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_m)))
+svuint16_t svqshlu_m(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_x)))
+svuint8_t svqshlu_x(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_x)))
+svuint32_t svqshlu_x(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_x)))
+svuint64_t svqshlu_x(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_x)))
+svuint16_t svqshlu_x(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_z)))
+svuint8_t svqshlu_z(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_z)))
+svuint32_t svqshlu_z(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_z)))
+svuint64_t svqshlu_z(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_z)))
+svuint16_t svqshlu_z(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s32)))
+svint16_t svqshrnb(svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s64)))
+svint32_t svqshrnb(svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s16)))
+svint8_t svqshrnb(svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u32)))
+svuint16_t svqshrnb(svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u64)))
+svuint32_t svqshrnb(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u16)))
+svuint8_t svqshrnb(svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s32)))
+svint16_t svqshrnt(svint16_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s64)))
+svint32_t svqshrnt(svint32_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s16)))
+svint8_t svqshrnt(svint8_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u32)))
+svuint16_t svqshrnt(svuint16_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u64)))
+svuint32_t svqshrnt(svuint32_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u16)))
+svuint8_t svqshrnt(svuint8_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s32)))
+svuint16_t svqshrunb(svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s64)))
+svuint32_t svqshrunb(svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s16)))
+svuint8_t svqshrunb(svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s32)))
+svuint16_t svqshrunt(svuint16_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s64)))
+svuint32_t svqshrunt(svuint32_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s16)))
+svuint8_t svqshrunt(svuint8_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_m)))
+svint8_t svqsub_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_m)))
+svint32_t svqsub_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_m)))
+svint64_t svqsub_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_m)))
+svint16_t svqsub_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_x)))
+svint8_t svqsub_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_x)))
+svint32_t svqsub_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_x)))
+svint64_t svqsub_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_x)))
+svint16_t svqsub_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_z)))
+svint8_t svqsub_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_z)))
+svint32_t svqsub_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_z)))
+svint64_t svqsub_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_z)))
+svint16_t svqsub_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_m)))
+svuint8_t svqsub_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_m)))
+svuint32_t svqsub_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_m)))
+svuint64_t svqsub_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_m)))
+svuint16_t svqsub_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_x)))
+svuint8_t svqsub_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_x)))
+svuint32_t svqsub_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_x)))
+svuint64_t svqsub_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_x)))
+svuint16_t svqsub_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_z)))
+svuint8_t svqsub_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_z)))
+svuint32_t svqsub_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_z)))
+svuint64_t svqsub_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_z)))
+svuint16_t svqsub_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_m)))
+svint8_t svqsub_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_m)))
+svint32_t svqsub_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_m)))
+svint64_t svqsub_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_m)))
+svint16_t svqsub_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_x)))
+svint8_t svqsub_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_x)))
+svint32_t svqsub_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_x)))
+svint64_t svqsub_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_x)))
+svint16_t svqsub_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_z)))
+svint8_t svqsub_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_z)))
+svint32_t svqsub_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_z)))
+svint64_t svqsub_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_z)))
+svint16_t svqsub_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_m)))
+svuint8_t svqsub_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_m)))
+svuint32_t svqsub_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_m)))
+svuint64_t svqsub_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_m)))
+svuint16_t svqsub_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_x)))
+svuint8_t svqsub_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_x)))
+svuint32_t svqsub_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_x)))
+svuint64_t svqsub_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_x)))
+svuint16_t svqsub_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_z)))
+svuint8_t svqsub_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_z)))
+svuint32_t svqsub_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_z)))
+svuint64_t svqsub_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_z)))
+svuint16_t svqsub_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_m)))
+svint8_t svqsubr_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_m)))
+svint32_t svqsubr_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_m)))
+svint64_t svqsubr_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_m)))
+svint16_t svqsubr_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_x)))
+svint8_t svqsubr_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_x)))
+svint32_t svqsubr_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_x)))
+svint64_t svqsubr_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_x)))
+svint16_t svqsubr_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_z)))
+svint8_t svqsubr_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_z)))
+svint32_t svqsubr_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_z)))
+svint64_t svqsubr_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_z)))
+svint16_t svqsubr_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_m)))
+svuint8_t svqsubr_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_m)))
+svuint32_t svqsubr_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_m)))
+svuint64_t svqsubr_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_m)))
+svuint16_t svqsubr_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_x)))
+svuint8_t svqsubr_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_x)))
+svuint32_t svqsubr_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_x)))
+svuint64_t svqsubr_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_x)))
+svuint16_t svqsubr_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_z)))
+svuint8_t svqsubr_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_z)))
+svuint32_t svqsubr_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_z)))
+svuint64_t svqsubr_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_z)))
+svuint16_t svqsubr_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_m)))
+svint8_t svqsubr_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_m)))
+svint32_t svqsubr_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_m)))
+svint64_t svqsubr_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_m)))
+svint16_t svqsubr_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_x)))
+svint8_t svqsubr_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_x)))
+svint32_t svqsubr_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_x)))
+svint64_t svqsubr_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_x)))
+svint16_t svqsubr_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_z)))
+svint8_t svqsubr_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_z)))
+svint32_t svqsubr_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_z)))
+svint64_t svqsubr_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_z)))
+svint16_t svqsubr_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_m)))
+svuint8_t svqsubr_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_m)))
+svuint32_t svqsubr_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_m)))
+svuint64_t svqsubr_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_m)))
+svuint16_t svqsubr_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_x)))
+svuint8_t svqsubr_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_x)))
+svuint32_t svqsubr_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_x)))
+svuint64_t svqsubr_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_x)))
+svuint16_t svqsubr_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_z)))
+svuint8_t svqsubr_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_z)))
+svuint32_t svqsubr_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_z)))
+svuint64_t svqsubr_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_z)))
+svuint16_t svqsubr_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s32)))
+svint16_t svqxtnb(svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s64)))
+svint32_t svqxtnb(svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s16)))
+svint8_t svqxtnb(svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u32)))
+svuint16_t svqxtnb(svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u64)))
+svuint32_t svqxtnb(svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u16)))
+svuint8_t svqxtnb(svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s32)))
+svint16_t svqxtnt(svint16_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s64)))
+svint32_t svqxtnt(svint32_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s16)))
+svint8_t svqxtnt(svint8_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u32)))
+svuint16_t svqxtnt(svuint16_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u64)))
+svuint32_t svqxtnt(svuint32_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u16)))
+svuint8_t svqxtnt(svuint8_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s32)))
+svuint16_t svqxtunb(svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s64)))
+svuint32_t svqxtunb(svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s16)))
+svuint8_t svqxtunb(svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s32)))
+svuint16_t svqxtunt(svuint16_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s64)))
+svuint32_t svqxtunt(svuint32_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s16)))
+svuint8_t svqxtunt(svuint8_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u32)))
+svuint16_t svraddhnb(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u64)))
+svuint32_t svraddhnb(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u16)))
+svuint8_t svraddhnb(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s32)))
+svint16_t svraddhnb(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s64)))
+svint32_t svraddhnb(svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s16)))
+svint8_t svraddhnb(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u32)))
+svuint16_t svraddhnb(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u64)))
+svuint32_t svraddhnb(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u16)))
+svuint8_t svraddhnb(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s32)))
+svint16_t svraddhnb(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s64)))
+svint32_t svraddhnb(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s16)))
+svint8_t svraddhnb(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u32)))
+svuint16_t svraddhnt(svuint16_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u64)))
+svuint32_t svraddhnt(svuint32_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u16)))
+svuint8_t svraddhnt(svuint8_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s32)))
+svint16_t svraddhnt(svint16_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s64)))
+svint32_t svraddhnt(svint32_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s16)))
+svint8_t svraddhnt(svint8_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u32)))
+svuint16_t svraddhnt(svuint16_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u64)))
+svuint32_t svraddhnt(svuint32_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u16)))
+svuint8_t svraddhnt(svuint8_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s32)))
+svint16_t svraddhnt(svint16_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s64)))
+svint32_t svraddhnt(svint32_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s16)))
+svint8_t svraddhnt(svint8_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_m)))
+svuint32_t svrecpe_m(svuint32_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_x)))
+svuint32_t svrecpe_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_z)))
+svuint32_t svrecpe_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_m)))
+svint8_t svrhadd_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_m)))
+svint32_t svrhadd_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_m)))
+svint64_t svrhadd_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_m)))
+svint16_t svrhadd_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_x)))
+svint8_t svrhadd_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_x)))
+svint32_t svrhadd_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_x)))
+svint64_t svrhadd_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_x)))
+svint16_t svrhadd_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_z)))
+svint8_t svrhadd_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_z)))
+svint32_t svrhadd_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_z)))
+svint64_t svrhadd_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_z)))
+svint16_t svrhadd_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_m)))
+svuint8_t svrhadd_m(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_m)))
+svuint32_t svrhadd_m(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_m)))
+svuint64_t svrhadd_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_m)))
+svuint16_t svrhadd_m(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_x)))
+svuint8_t svrhadd_x(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_x)))
+svuint32_t svrhadd_x(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_x)))
+svuint64_t svrhadd_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_x)))
+svuint16_t svrhadd_x(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_z)))
+svuint8_t svrhadd_z(svbool_t, svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_z)))
+svuint32_t svrhadd_z(svbool_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_z)))
+svuint64_t svrhadd_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_z)))
+svuint16_t svrhadd_z(svbool_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_m)))
+svint8_t svrhadd_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_m)))
+svint32_t svrhadd_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_m)))
+svint64_t svrhadd_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_m)))
+svint16_t svrhadd_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_x)))
+svint8_t svrhadd_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_x)))
+svint32_t svrhadd_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_x)))
+svint64_t svrhadd_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_x)))
+svint16_t svrhadd_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_z)))
+svint8_t svrhadd_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_z)))
+svint32_t svrhadd_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_z)))
+svint64_t svrhadd_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_z)))
+svint16_t svrhadd_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_m)))
+svuint8_t svrhadd_m(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_m)))
+svuint32_t svrhadd_m(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_m)))
+svuint64_t svrhadd_m(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_m)))
+svuint16_t svrhadd_m(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_x)))
+svuint8_t svrhadd_x(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_x)))
+svuint32_t svrhadd_x(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_x)))
+svuint64_t svrhadd_x(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_x)))
+svuint16_t svrhadd_x(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_z)))
+svuint8_t svrhadd_z(svbool_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_z)))
+svuint32_t svrhadd_z(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_z)))
+svuint64_t svrhadd_z(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_z)))
+svuint16_t svrhadd_z(svbool_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_m)))
+svint8_t svrshl_m(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_m)))
+svint32_t svrshl_m(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_m)))
+svint64_t svrshl_m(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_m)))
+svint16_t svrshl_m(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_x)))
+svint8_t svrshl_x(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_x)))
+svint32_t svrshl_x(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_x)))
+svint64_t svrshl_x(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_x)))
+svint16_t svrshl_x(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_z)))
+svint8_t svrshl_z(svbool_t, svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_z)))
+svint32_t svrshl_z(svbool_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_z)))
+svint64_t svrshl_z(svbool_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_z)))
+svint16_t svrshl_z(svbool_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_m)))
+svuint8_t svrshl_m(svbool_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_m)))
+svuint32_t svrshl_m(svbool_t, svuint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_m)))
+svuint64_t svrshl_m(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_m)))
+svuint16_t svrshl_m(svbool_t, svuint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_x)))
+svuint8_t svrshl_x(svbool_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_x)))
+svuint32_t svrshl_x(svbool_t, svuint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_x)))
+svuint64_t svrshl_x(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_x)))
+svuint16_t svrshl_x(svbool_t, svuint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_z)))
+svuint8_t svrshl_z(svbool_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_z)))
+svuint32_t svrshl_z(svbool_t, svuint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_z)))
+svuint64_t svrshl_z(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_z)))
+svuint16_t svrshl_z(svbool_t, svuint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_m)))
+svint8_t svrshl_m(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_m)))
+svint32_t svrshl_m(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_m)))
+svint64_t svrshl_m(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_m)))
+svint16_t svrshl_m(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x)))
+svint8_t svrshl_x(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x)))
+svint32_t svrshl_x(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x)))
+svint64_t svrshl_x(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x)))
+svint16_t svrshl_x(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_z)))
+svint8_t svrshl_z(svbool_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_z)))
+svint32_t svrshl_z(svbool_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_z)))
+svint64_t svrshl_z(svbool_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_z)))
+svint16_t svrshl_z(svbool_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_m)))
+svuint8_t svrshl_m(svbool_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_m)))
+svuint32_t svrshl_m(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_m)))
+svuint64_t svrshl_m(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_m)))
+svuint16_t svrshl_m(svbool_t, svuint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x)))
+svuint8_t svrshl_x(svbool_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x)))
+svuint32_t svrshl_x(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x)))
+svuint64_t svrshl_x(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x)))
+svuint16_t svrshl_x(svbool_t, svuint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_z)))
+svuint8_t svrshl_z(svbool_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_z)))
+svuint32_t svrshl_z(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_z)))
+svuint64_t svrshl_z(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_z)))
+svuint16_t svrshl_z(svbool_t, svuint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_m)))
+svint8_t svrshr_m(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_m)))
+svint32_t svrshr_m(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_m)))
+svint64_t svrshr_m(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_m)))
+svint16_t svrshr_m(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_m)))
+svuint8_t svrshr_m(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_m)))
+svuint32_t svrshr_m(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_m)))
+svuint64_t svrshr_m(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_m)))
+svuint16_t svrshr_m(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_x)))
+svint8_t svrshr_x(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_x)))
+svint32_t svrshr_x(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_x)))
+svint64_t svrshr_x(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_x)))
+svint16_t svrshr_x(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_x)))
+svuint8_t svrshr_x(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_x)))
+svuint32_t svrshr_x(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_x)))
+svuint64_t svrshr_x(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_x)))
+svuint16_t svrshr_x(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_z)))
+svint8_t svrshr_z(svbool_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_z)))
+svint32_t svrshr_z(svbool_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_z)))
+svint64_t svrshr_z(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_z)))
+svint16_t svrshr_z(svbool_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_z)))
+svuint8_t svrshr_z(svbool_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_z)))
+svuint32_t svrshr_z(svbool_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_z)))
+svuint64_t svrshr_z(svbool_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_z)))
+svuint16_t svrshr_z(svbool_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u32)))
+svuint16_t svrshrnb(svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u64)))
+svuint32_t svrshrnb(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u16)))
+svuint8_t svrshrnb(svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s32)))
+svint16_t svrshrnb(svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s64)))
+svint32_t svrshrnb(svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s16)))
+svint8_t svrshrnb(svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u32)))
+svuint16_t svrshrnt(svuint16_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u64)))
+svuint32_t svrshrnt(svuint32_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u16)))
+svuint8_t svrshrnt(svuint8_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s32)))
+svint16_t svrshrnt(svint16_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s64)))
+svint32_t svrshrnt(svint32_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s16)))
+svint8_t svrshrnt(svint8_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_m)))
+svuint32_t svrsqrte_m(svuint32_t, svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_x)))
+svuint32_t svrsqrte_x(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_z)))
+svuint32_t svrsqrte_z(svbool_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s8)))
+svint8_t svrsra(svint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s32)))
+svint32_t svrsra(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s64)))
+svint64_t svrsra(svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s16)))
+svint16_t svrsra(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u8)))
+svuint8_t svrsra(svuint8_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u32)))
+svuint32_t svrsra(svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u64)))
+svuint64_t svrsra(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u16)))
+svuint16_t svrsra(svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u32)))
+svuint16_t svrsubhnb(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u64)))
+svuint32_t svrsubhnb(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u16)))
+svuint8_t svrsubhnb(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s32)))
+svint16_t svrsubhnb(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s64)))
+svint32_t svrsubhnb(svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s16)))
+svint8_t svrsubhnb(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u32)))
+svuint16_t svrsubhnb(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u64)))
+svuint32_t svrsubhnb(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u16)))
+svuint8_t svrsubhnb(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s32)))
+svint16_t svrsubhnb(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s64)))
+svint32_t svrsubhnb(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s16)))
+svint8_t svrsubhnb(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u32)))
+svuint16_t svrsubhnt(svuint16_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u64)))
+svuint32_t svrsubhnt(svuint32_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u16)))
+svuint8_t svrsubhnt(svuint8_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s32)))
+svint16_t svrsubhnt(svint16_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s64)))
+svint32_t svrsubhnt(svint32_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s16)))
+svint8_t svrsubhnt(svint8_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u32)))
+svuint16_t svrsubhnt(svuint16_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u64)))
+svuint32_t svrsubhnt(svuint32_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u16)))
+svuint8_t svrsubhnt(svuint8_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s32)))
+svint16_t svrsubhnt(svint16_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s64)))
+svint32_t svrsubhnt(svint32_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s16)))
+svint8_t svrsubhnt(svint8_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u32)))
+svuint32_t svsbclb(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u64)))
+svuint64_t svsbclb(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u32)))
+svuint32_t svsbclb(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u64)))
+svuint64_t svsbclb(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u32)))
+svuint32_t svsbclt(svuint32_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u64)))
+svuint64_t svsbclt(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u32)))
+svuint32_t svsbclt(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u64)))
+svuint64_t svsbclt(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s32)))
+svint32_t svshllb(svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s64)))
+svint64_t svshllb(svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s16)))
+svint16_t svshllb(svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u32)))
+svuint32_t svshllb(svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u64)))
+svuint64_t svshllb(svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u16)))
+svuint16_t svshllb(svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s32)))
+svint32_t svshllt(svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s64)))
+svint64_t svshllt(svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s16)))
+svint16_t svshllt(svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u32)))
+svuint32_t svshllt(svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u64)))
+svuint64_t svshllt(svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u16)))
+svuint16_t svshllt(svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u32)))
+svuint16_t svshrnb(svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u64)))
+svuint32_t svshrnb(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u16)))
+svuint8_t svshrnb(svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s32)))
+svint16_t svshrnb(svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s64)))
+svint32_t svshrnb(svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s16)))
+svint8_t svshrnb(svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u32)))
+svuint16_t svshrnt(svuint16_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u64)))
+svuint32_t svshrnt(svuint32_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u16)))
+svuint8_t svshrnt(svuint8_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s32)))
+svint16_t svshrnt(svint16_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s64)))
+svint32_t svshrnt(svint32_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s16)))
+svint8_t svshrnt(svint8_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u8)))
+svuint8_t svsli(svuint8_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u32)))
+svuint32_t svsli(svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u64)))
+svuint64_t svsli(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u16)))
+svuint16_t svsli(svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s8)))
+svint8_t svsli(svint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s32)))
+svint32_t svsli(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s64)))
+svint64_t svsli(svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s16)))
+svint16_t svsli(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_m)))
+svuint8_t svsqadd_m(svbool_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_m)))
+svuint32_t svsqadd_m(svbool_t, svuint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_m)))
+svuint64_t svsqadd_m(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_m)))
+svuint16_t svsqadd_m(svbool_t, svuint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_x)))
+svuint8_t svsqadd_x(svbool_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_x)))
+svuint32_t svsqadd_x(svbool_t, svuint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_x)))
+svuint64_t svsqadd_x(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_x)))
+svuint16_t svsqadd_x(svbool_t, svuint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_z)))
+svuint8_t svsqadd_z(svbool_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_z)))
+svuint32_t svsqadd_z(svbool_t, svuint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_z)))
+svuint64_t svsqadd_z(svbool_t, svuint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_z)))
+svuint16_t svsqadd_z(svbool_t, svuint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_m)))
+svuint8_t svsqadd_m(svbool_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_m)))
+svuint32_t svsqadd_m(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_m)))
+svuint64_t svsqadd_m(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_m)))
+svuint16_t svsqadd_m(svbool_t, svuint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_x)))
+svuint8_t svsqadd_x(svbool_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_x)))
+svuint32_t svsqadd_x(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_x)))
+svuint64_t svsqadd_x(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_x)))
+svuint16_t svsqadd_x(svbool_t, svuint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_z)))
+svuint8_t svsqadd_z(svbool_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_z)))
+svuint32_t svsqadd_z(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_z)))
+svuint64_t svsqadd_z(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_z)))
+svuint16_t svsqadd_z(svbool_t, svuint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s8)))
+svint8_t svsra(svint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s32)))
+svint32_t svsra(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s64)))
+svint64_t svsra(svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s16)))
+svint16_t svsra(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u8)))
+svuint8_t svsra(svuint8_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u32)))
+svuint32_t svsra(svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u64)))
+svuint64_t svsra(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u16)))
+svuint16_t svsra(svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u8)))
+svuint8_t svsri(svuint8_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u32)))
+svuint32_t svsri(svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u64)))
+svuint64_t svsri(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u16)))
+svuint16_t svsri(svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s8)))
+svint8_t svsri(svint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s32)))
+svint32_t svsri(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s64)))
+svint64_t svsri(svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s16)))
+svint16_t svsri(svint16_t, svint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_u32)))
+void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_u64)))
+void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_f64)))
+void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_f32)))
+void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_s32)))
+void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_s64)))
+void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_u32)))
+void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_u64)))
+void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_f64)))
+void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_f32)))
+void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_s32)))
+void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_s64)))
+void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_u32)))
+void svstnt1_scatter(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_u64)))
+void svstnt1_scatter(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_f64)))
+void svstnt1_scatter(svbool_t, svuint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_f32)))
+void svstnt1_scatter(svbool_t, svuint32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_s32)))
+void svstnt1_scatter(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_s64)))
+void svstnt1_scatter(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_u64)))
+void svstnt1_scatter_index(svbool_t, uint64_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_f64)))
+void svstnt1_scatter_index(svbool_t, float64_t *, svint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_s64)))
+void svstnt1_scatter_index(svbool_t, int64_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_u64)))
+void svstnt1_scatter_index(svbool_t, uint64_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_f64)))
+void svstnt1_scatter_index(svbool_t, float64_t *, svuint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_s64)))
+void svstnt1_scatter_index(svbool_t, int64_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_u32)))
+void svstnt1_scatter_offset(svbool_t, uint32_t *, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_f32)))
+void svstnt1_scatter_offset(svbool_t, float32_t *, svuint32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_s32)))
+void svstnt1_scatter_offset(svbool_t, int32_t *, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_u64)))
+void svstnt1_scatter_offset(svbool_t, uint64_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_f64)))
+void svstnt1_scatter_offset(svbool_t, float64_t *, svint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_s64)))
+void svstnt1_scatter_offset(svbool_t, int64_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_u64)))
+void svstnt1_scatter_offset(svbool_t, uint64_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_f64)))
+void svstnt1_scatter_offset(svbool_t, float64_t *, svuint64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_s64)))
+void svstnt1_scatter_offset(svbool_t, int64_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_u32)))
+void svstnt1b_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_u64)))
+void svstnt1b_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_s32)))
+void svstnt1b_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_s64)))
+void svstnt1b_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_u32)))
+void svstnt1b_scatter(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_u64)))
+void svstnt1b_scatter(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_s32)))
+void svstnt1b_scatter(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_s64)))
+void svstnt1b_scatter(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_s32)))
+void svstnt1b_scatter_offset(svbool_t, int8_t *, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_u32)))
+void svstnt1b_scatter_offset(svbool_t, uint8_t *, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_s64)))
+void svstnt1b_scatter_offset(svbool_t, int8_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_u64)))
+void svstnt1b_scatter_offset(svbool_t, uint8_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_s64)))
+void svstnt1b_scatter_offset(svbool_t, int8_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_u64)))
+void svstnt1b_scatter_offset(svbool_t, uint8_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_u32)))
+void svstnt1h_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_u64)))
+void svstnt1h_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_s32)))
+void svstnt1h_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_s64)))
+void svstnt1h_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_u32)))
+void svstnt1h_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_u64)))
+void svstnt1h_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_s32)))
+void svstnt1h_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_s64)))
+void svstnt1h_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_u32)))
+void svstnt1h_scatter(svbool_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_u64)))
+void svstnt1h_scatter(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_s32)))
+void svstnt1h_scatter(svbool_t, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_s64)))
+void svstnt1h_scatter(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_s64)))
+void svstnt1h_scatter_index(svbool_t, int16_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_u64)))
+void svstnt1h_scatter_index(svbool_t, uint16_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_s64)))
+void svstnt1h_scatter_index(svbool_t, int16_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_u64)))
+void svstnt1h_scatter_index(svbool_t, uint16_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_s32)))
+void svstnt1h_scatter_offset(svbool_t, int16_t *, svuint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_u32)))
+void svstnt1h_scatter_offset(svbool_t, uint16_t *, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_s64)))
+void svstnt1h_scatter_offset(svbool_t, int16_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_u64)))
+void svstnt1h_scatter_offset(svbool_t, uint16_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_s64)))
+void svstnt1h_scatter_offset(svbool_t, int16_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_u64)))
+void svstnt1h_scatter_offset(svbool_t, uint16_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_u64)))
+void svstnt1w_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_s64)))
+void svstnt1w_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_u64)))
+void svstnt1w_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_s64)))
+void svstnt1w_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_u64)))
+void svstnt1w_scatter(svbool_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_s64)))
+void svstnt1w_scatter(svbool_t, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_s64)))
+void svstnt1w_scatter_index(svbool_t, int32_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_u64)))
+void svstnt1w_scatter_index(svbool_t, uint32_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_s64)))
+void svstnt1w_scatter_index(svbool_t, int32_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_u64)))
+void svstnt1w_scatter_index(svbool_t, uint32_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_s64)))
+void svstnt1w_scatter_offset(svbool_t, int32_t *, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_u64)))
+void svstnt1w_scatter_offset(svbool_t, uint32_t *, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_s64)))
+void svstnt1w_scatter_offset(svbool_t, int32_t *, svuint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_u64)))
+void svstnt1w_scatter_offset(svbool_t, uint32_t *, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u32)))
+svuint16_t svsubhnb(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u64)))
+svuint32_t svsubhnb(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u16)))
+svuint8_t svsubhnb(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s32)))
+svint16_t svsubhnb(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s64)))
+svint32_t svsubhnb(svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s16)))
+svint8_t svsubhnb(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u32)))
+svuint16_t svsubhnb(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u64)))
+svuint32_t svsubhnb(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u16)))
+svuint8_t svsubhnb(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s32)))
+svint16_t svsubhnb(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s64)))
+svint32_t svsubhnb(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s16)))
+svint8_t svsubhnb(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u32)))
+svuint16_t svsubhnt(svuint16_t, svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u64)))
+svuint32_t svsubhnt(svuint32_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u16)))
+svuint8_t svsubhnt(svuint8_t, svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s32)))
+svint16_t svsubhnt(svint16_t, svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s64)))
+svint32_t svsubhnt(svint32_t, svint64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s16)))
+svint8_t svsubhnt(svint8_t, svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u32)))
+svuint16_t svsubhnt(svuint16_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u64)))
+svuint32_t svsubhnt(svuint32_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u16)))
+svuint8_t svsubhnt(svuint8_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s32)))
+svint16_t svsubhnt(svint16_t, svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s64)))
+svint32_t svsubhnt(svint32_t, svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s16)))
+svint8_t svsubhnt(svint8_t, svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s32)))
+svint32_t svsublb(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s64)))
+svint64_t svsublb(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s16)))
+svint16_t svsublb(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u32)))
+svuint32_t svsublb(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u64)))
+svuint64_t svsublb(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u16)))
+svuint16_t svsublb(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s32)))
+svint32_t svsublb(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s64)))
+svint64_t svsublb(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s16)))
+svint16_t svsublb(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u32)))
+svuint32_t svsublb(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u64)))
+svuint64_t svsublb(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u16)))
+svuint16_t svsublb(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s32)))
+svint32_t svsublbt(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s64)))
+svint64_t svsublbt(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s16)))
+svint16_t svsublbt(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s32)))
+svint32_t svsublbt(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s64)))
+svint64_t svsublbt(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s16)))
+svint16_t svsublbt(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s32)))
+svint32_t svsublt(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s64)))
+svint64_t svsublt(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s16)))
+svint16_t svsublt(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u32)))
+svuint32_t svsublt(svuint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u64)))
+svuint64_t svsublt(svuint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u16)))
+svuint16_t svsublt(svuint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s32)))
+svint32_t svsublt(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s64)))
+svint64_t svsublt(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s16)))
+svint16_t svsublt(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u32)))
+svuint32_t svsublt(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u64)))
+svuint64_t svsublt(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u16)))
+svuint16_t svsublt(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s32)))
+svint32_t svsubltb(svint16_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s64)))
+svint64_t svsubltb(svint32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s16)))
+svint16_t svsubltb(svint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s32)))
+svint32_t svsubltb(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s64)))
+svint64_t svsubltb(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s16)))
+svint16_t svsubltb(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s32)))
+svint32_t svsubwb(svint32_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s64)))
+svint64_t svsubwb(svint64_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s16)))
+svint16_t svsubwb(svint16_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u32)))
+svuint32_t svsubwb(svuint32_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u64)))
+svuint64_t svsubwb(svuint64_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u16)))
+svuint16_t svsubwb(svuint16_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s32)))
+svint32_t svsubwb(svint32_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s64)))
+svint64_t svsubwb(svint64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s16)))
+svint16_t svsubwb(svint16_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u32)))
+svuint32_t svsubwb(svuint32_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u64)))
+svuint64_t svsubwb(svuint64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u16)))
+svuint16_t svsubwb(svuint16_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s32)))
+svint32_t svsubwt(svint32_t, int16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s64)))
+svint64_t svsubwt(svint64_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s16)))
+svint16_t svsubwt(svint16_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u32)))
+svuint32_t svsubwt(svuint32_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u64)))
+svuint64_t svsubwt(svuint64_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u16)))
+svuint16_t svsubwt(svuint16_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s32)))
+svint32_t svsubwt(svint32_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s64)))
+svint64_t svsubwt(svint64_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s16)))
+svint16_t svsubwt(svint16_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u32)))
+svuint32_t svsubwt(svuint32_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u64)))
+svuint64_t svsubwt(svuint64_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u16)))
+svuint16_t svsubwt(svuint16_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u8)))
+svuint8_t svtbl2(svuint8x2_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u32)))
+svuint32_t svtbl2(svuint32x2_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u64)))
+svuint64_t svtbl2(svuint64x2_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u16)))
+svuint16_t svtbl2(svuint16x2_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s8)))
+svint8_t svtbl2(svint8x2_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f64)))
+svfloat64_t svtbl2(svfloat64x2_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f32)))
+svfloat32_t svtbl2(svfloat32x2_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f16)))
+svfloat16_t svtbl2(svfloat16x2_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s32)))
+svint32_t svtbl2(svint32x2_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s64)))
+svint64_t svtbl2(svint64x2_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s16)))
+svint16_t svtbl2(svint16x2_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u8)))
+svuint8_t svtbx(svuint8_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u32)))
+svuint32_t svtbx(svuint32_t, svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u64)))
+svuint64_t svtbx(svuint64_t, svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u16)))
+svuint16_t svtbx(svuint16_t, svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s8)))
+svint8_t svtbx(svint8_t, svint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f64)))
+svfloat64_t svtbx(svfloat64_t, svfloat64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f32)))
+svfloat32_t svtbx(svfloat32_t, svfloat32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f16)))
+svfloat16_t svtbx(svfloat16_t, svfloat16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s32)))
+svint32_t svtbx(svint32_t, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s64)))
+svint64_t svtbx(svint64_t, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s16)))
+svint16_t svtbx(svint16_t, svint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_m)))
+svint8_t svuqadd_m(svbool_t, svint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_m)))
+svint32_t svuqadd_m(svbool_t, svint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_m)))
+svint64_t svuqadd_m(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_m)))
+svint16_t svuqadd_m(svbool_t, svint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_x)))
+svint8_t svuqadd_x(svbool_t, svint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_x)))
+svint32_t svuqadd_x(svbool_t, svint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_x)))
+svint64_t svuqadd_x(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_x)))
+svint16_t svuqadd_x(svbool_t, svint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_z)))
+svint8_t svuqadd_z(svbool_t, svint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_z)))
+svint32_t svuqadd_z(svbool_t, svint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_z)))
+svint64_t svuqadd_z(svbool_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_z)))
+svint16_t svuqadd_z(svbool_t, svint16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_m)))
+svint8_t svuqadd_m(svbool_t, svint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_m)))
+svint32_t svuqadd_m(svbool_t, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_m)))
+svint64_t svuqadd_m(svbool_t, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_m)))
+svint16_t svuqadd_m(svbool_t, svint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_x)))
+svint8_t svuqadd_x(svbool_t, svint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_x)))
+svint32_t svuqadd_x(svbool_t, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_x)))
+svint64_t svuqadd_x(svbool_t, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_x)))
+svint16_t svuqadd_x(svbool_t, svint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_z)))
+svint8_t svuqadd_z(svbool_t, svint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_z)))
+svint32_t svuqadd_z(svbool_t, svint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_z)))
+svint64_t svuqadd_z(svbool_t, svint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_z)))
+svint16_t svuqadd_z(svbool_t, svint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s32)))
+svbool_t svwhilege_b8(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s32)))
+svbool_t svwhilege_b32(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s32)))
+svbool_t svwhilege_b64(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s32)))
+svbool_t svwhilege_b16(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s64)))
+svbool_t svwhilege_b8(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s64)))
+svbool_t svwhilege_b32(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s64)))
+svbool_t svwhilege_b64(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s64)))
+svbool_t svwhilege_b16(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u32)))
+svbool_t svwhilege_b8(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u32)))
+svbool_t svwhilege_b32(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u32)))
+svbool_t svwhilege_b64(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u32)))
+svbool_t svwhilege_b16(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u64)))
+svbool_t svwhilege_b8(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u64)))
+svbool_t svwhilege_b32(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u64)))
+svbool_t svwhilege_b64(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u64)))
+svbool_t svwhilege_b16(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s32)))
+svbool_t svwhilegt_b8(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s32)))
+svbool_t svwhilegt_b32(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s32)))
+svbool_t svwhilegt_b64(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s32)))
+svbool_t svwhilegt_b16(int32_t, int32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s64)))
+svbool_t svwhilegt_b8(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s64)))
+svbool_t svwhilegt_b32(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s64)))
+svbool_t svwhilegt_b64(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s64)))
+svbool_t svwhilegt_b16(int64_t, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u32)))
+svbool_t svwhilegt_b8(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u32)))
+svbool_t svwhilegt_b32(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u32)))
+svbool_t svwhilegt_b64(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u32)))
+svbool_t svwhilegt_b16(uint32_t, uint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u64)))
+svbool_t svwhilegt_b8(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u64)))
+svbool_t svwhilegt_b32(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u64)))
+svbool_t svwhilegt_b64(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u64)))
+svbool_t svwhilegt_b16(uint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u8)))
+svbool_t svwhilerw(uint8_t const *, uint8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s8)))
+svbool_t svwhilerw(int8_t const *, int8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u64)))
+svbool_t svwhilerw(uint64_t const *, uint64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f64)))
+svbool_t svwhilerw(float64_t const *, float64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s64)))
+svbool_t svwhilerw(int64_t const *, int64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u16)))
+svbool_t svwhilerw(uint16_t const *, uint16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f16)))
+svbool_t svwhilerw(float16_t const *, float16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s16)))
+svbool_t svwhilerw(int16_t const *, int16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u32)))
+svbool_t svwhilerw(uint32_t const *, uint32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f32)))
+svbool_t svwhilerw(float32_t const *, float32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s32)))
+svbool_t svwhilerw(int32_t const *, int32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u8)))
+svbool_t svwhilewr(uint8_t const *, uint8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s8)))
+svbool_t svwhilewr(int8_t const *, int8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u64)))
+svbool_t svwhilewr(uint64_t const *, uint64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f64)))
+svbool_t svwhilewr(float64_t const *, float64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s64)))
+svbool_t svwhilewr(int64_t const *, int64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u16)))
+svbool_t svwhilewr(uint16_t const *, uint16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f16)))
+svbool_t svwhilewr(float16_t const *, float16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s16)))
+svbool_t svwhilewr(int16_t const *, int16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u32)))
+svbool_t svwhilewr(uint32_t const *, uint32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f32)))
+svbool_t svwhilewr(float32_t const *, float32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s32)))
+svbool_t svwhilewr(int32_t const *, int32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u8)))
+svuint8_t svxar(svuint8_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u32)))
+svuint32_t svxar(svuint32_t, svuint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u64)))
+svuint64_t svxar(svuint64_t, svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u16)))
+svuint16_t svxar(svuint16_t, svuint16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s8)))
+svint8_t svxar(svint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s32)))
+svint32_t svxar(svint32_t, svint32_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s64)))
+svint64_t svxar(svint64_t, svint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s16)))
+svint16_t svxar(svint16_t, svint16_t, uint64_t);
+#endif  //defined(__ARM_FEATURE_SVE2)
+
+#if defined(__ARM_FEATURE_SVE2) && defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC)
+#define svwhilerw_bf16(...) __builtin_sve_svwhilerw_bf16(__VA_ARGS__)
+#define svwhilewr_bf16(...) __builtin_sve_svwhilewr_bf16(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_bf16)))
+svbool_t svwhilerw(bfloat16_t const *, bfloat16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_bf16)))
+svbool_t svwhilewr(bfloat16_t const *, bfloat16_t const *);
+#endif  //defined(__ARM_FEATURE_SVE2) && defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC)
+
+#if defined(__ARM_FEATURE_SVE2) && defined(__ARM_FEATURE_SVE_BF16)
+#define svtbl2_bf16(...) __builtin_sve_svtbl2_bf16(__VA_ARGS__)
+#define svtbx_bf16(...) __builtin_sve_svtbx_bf16(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_bf16)))
+svbfloat16_t svtbl2(svbfloat16x2_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_bf16)))
+svbfloat16_t svtbx(svbfloat16_t, svbfloat16_t, svuint16_t);
+#endif  //defined(__ARM_FEATURE_SVE2) && defined(__ARM_FEATURE_SVE_BF16)
+
+#if defined(__ARM_FEATURE_SVE2_AES)
+#define svaesd_u8(...) __builtin_sve_svaesd_u8(__VA_ARGS__)
+#define svaese_u8(...) __builtin_sve_svaese_u8(__VA_ARGS__)
+#define svaesimc_u8(...) __builtin_sve_svaesimc_u8(__VA_ARGS__)
+#define svaesmc_u8(...) __builtin_sve_svaesmc_u8(__VA_ARGS__)
+#define svpmullb_pair_n_u64(...) __builtin_sve_svpmullb_pair_n_u64(__VA_ARGS__)
+#define svpmullb_pair_u64(...) __builtin_sve_svpmullb_pair_u64(__VA_ARGS__)
+#define svpmullt_pair_n_u64(...) __builtin_sve_svpmullt_pair_n_u64(__VA_ARGS__)
+#define svpmullt_pair_u64(...) __builtin_sve_svpmullt_pair_u64(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesd_u8)))
+svuint8_t svaesd(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaese_u8)))
+svuint8_t svaese(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesimc_u8)))
+svuint8_t svaesimc(svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesmc_u8)))
+svuint8_t svaesmc(svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u64)))
+svuint64_t svpmullb_pair(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u64)))
+svuint64_t svpmullb_pair(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u64)))
+svuint64_t svpmullt_pair(svuint64_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u64)))
+svuint64_t svpmullt_pair(svuint64_t, svuint64_t);
+#endif  //defined(__ARM_FEATURE_SVE2_AES)
+
+#if defined(__ARM_FEATURE_SVE2_SHA3)
+#define svrax1_u64(...) __builtin_sve_svrax1_u64(__VA_ARGS__)
+#define svrax1_s64(...) __builtin_sve_svrax1_s64(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_u64)))
+svuint64_t svrax1(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_s64)))
+svint64_t svrax1(svint64_t, svint64_t);
+#endif  //defined(__ARM_FEATURE_SVE2_SHA3)
+
+#if defined(__ARM_FEATURE_SVE2_SM4)
+#define svsm4e_u32(...) __builtin_sve_svsm4e_u32(__VA_ARGS__)
+#define svsm4ekey_u32(...) __builtin_sve_svsm4ekey_u32(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4e_u32)))
+svuint32_t svsm4e(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4ekey_u32)))
+svuint32_t svsm4ekey(svuint32_t, svuint32_t);
+#endif  //defined(__ARM_FEATURE_SVE2_SM4)
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svbfdot_n_f32(...) __builtin_sve_svbfdot_n_f32(__VA_ARGS__)
+#define svbfdot_f32(...) __builtin_sve_svbfdot_f32(__VA_ARGS__)
+#define svbfdot_lane_f32(...) __builtin_sve_svbfdot_lane_f32(__VA_ARGS__)
+#define svbfmlalb_n_f32(...) __builtin_sve_svbfmlalb_n_f32(__VA_ARGS__)
+#define svbfmlalb_f32(...) __builtin_sve_svbfmlalb_f32(__VA_ARGS__)
+#define svbfmlalb_lane_f32(...) __builtin_sve_svbfmlalb_lane_f32(__VA_ARGS__)
+#define svbfmlalt_n_f32(...) __builtin_sve_svbfmlalt_n_f32(__VA_ARGS__)
+#define svbfmlalt_f32(...) __builtin_sve_svbfmlalt_f32(__VA_ARGS__)
+#define svbfmlalt_lane_f32(...) __builtin_sve_svbfmlalt_lane_f32(__VA_ARGS__)
+#define svbfmmla_f32(...) __builtin_sve_svbfmmla_f32(__VA_ARGS__)
+#define svclasta_n_bf16(...) __builtin_sve_svclasta_n_bf16(__VA_ARGS__)
+#define svclasta_bf16(...) __builtin_sve_svclasta_bf16(__VA_ARGS__)
+#define svclastb_n_bf16(...) __builtin_sve_svclastb_n_bf16(__VA_ARGS__)
+#define svclastb_bf16(...) __builtin_sve_svclastb_bf16(__VA_ARGS__)
+#define svcnt_bf16_m(...) __builtin_sve_svcnt_bf16_m(__VA_ARGS__)
+#define svcnt_bf16_x(...) __builtin_sve_svcnt_bf16_x(__VA_ARGS__)
+#define svcnt_bf16_z(...) __builtin_sve_svcnt_bf16_z(__VA_ARGS__)
+#define svcreate2_bf16(...) __builtin_sve_svcreate2_bf16(__VA_ARGS__)
+#define svcreate3_bf16(...) __builtin_sve_svcreate3_bf16(__VA_ARGS__)
+#define svcreate4_bf16(...) __builtin_sve_svcreate4_bf16(__VA_ARGS__)
+#define svcvt_bf16_f32_m(...) __builtin_sve_svcvt_bf16_f32_m(__VA_ARGS__)
+#define svcvt_bf16_f32_x(...) __builtin_sve_svcvt_bf16_f32_x(__VA_ARGS__)
+#define svcvt_bf16_f32_z(...) __builtin_sve_svcvt_bf16_f32_z(__VA_ARGS__)
+#define svcvtnt_bf16_f32_m(...) __builtin_sve_svcvtnt_bf16_f32_m(__VA_ARGS__)
+#define svdup_n_bf16(...) __builtin_sve_svdup_n_bf16(__VA_ARGS__)
+#define svdup_n_bf16_m(...) __builtin_sve_svdup_n_bf16_m(__VA_ARGS__)
+#define svdup_n_bf16_x(...) __builtin_sve_svdup_n_bf16_x(__VA_ARGS__)
+#define svdup_n_bf16_z(...) __builtin_sve_svdup_n_bf16_z(__VA_ARGS__)
+#define svdup_lane_bf16(...) __builtin_sve_svdup_lane_bf16(__VA_ARGS__)
+#define svdupq_n_bf16(...) __builtin_sve_svdupq_n_bf16(__VA_ARGS__)
+#define svdupq_lane_bf16(...) __builtin_sve_svdupq_lane_bf16(__VA_ARGS__)
+#define svext_bf16(...) __builtin_sve_svext_bf16(__VA_ARGS__)
+#define svget2_bf16(...) __builtin_sve_svget2_bf16(__VA_ARGS__)
+#define svget3_bf16(...) __builtin_sve_svget3_bf16(__VA_ARGS__)
+#define svget4_bf16(...) __builtin_sve_svget4_bf16(__VA_ARGS__)
+#define svinsr_n_bf16(...) __builtin_sve_svinsr_n_bf16(__VA_ARGS__)
+#define svlasta_bf16(...) __builtin_sve_svlasta_bf16(__VA_ARGS__)
+#define svlastb_bf16(...) __builtin_sve_svlastb_bf16(__VA_ARGS__)
+#define svld1_bf16(...) __builtin_sve_svld1_bf16(__VA_ARGS__)
+#define svld1_vnum_bf16(...) __builtin_sve_svld1_vnum_bf16(__VA_ARGS__)
+#define svld1rq_bf16(...) __builtin_sve_svld1rq_bf16(__VA_ARGS__)
+#define svld2_bf16(...) __builtin_sve_svld2_bf16(__VA_ARGS__)
+#define svld2_vnum_bf16(...) __builtin_sve_svld2_vnum_bf16(__VA_ARGS__)
+#define svld3_bf16(...) __builtin_sve_svld3_bf16(__VA_ARGS__)
+#define svld3_vnum_bf16(...) __builtin_sve_svld3_vnum_bf16(__VA_ARGS__)
+#define svld4_bf16(...) __builtin_sve_svld4_bf16(__VA_ARGS__)
+#define svld4_vnum_bf16(...) __builtin_sve_svld4_vnum_bf16(__VA_ARGS__)
+#define svldff1_bf16(...) __builtin_sve_svldff1_bf16(__VA_ARGS__)
+#define svldff1_vnum_bf16(...) __builtin_sve_svldff1_vnum_bf16(__VA_ARGS__)
+#define svldnf1_bf16(...) __builtin_sve_svldnf1_bf16(__VA_ARGS__)
+#define svldnf1_vnum_bf16(...) __builtin_sve_svldnf1_vnum_bf16(__VA_ARGS__)
+#define svldnt1_bf16(...) __builtin_sve_svldnt1_bf16(__VA_ARGS__)
+#define svldnt1_vnum_bf16(...) __builtin_sve_svldnt1_vnum_bf16(__VA_ARGS__)
+#define svlen_bf16(...) __builtin_sve_svlen_bf16(__VA_ARGS__)
+#define svrev_bf16(...) __builtin_sve_svrev_bf16(__VA_ARGS__)
+#define svsel_bf16(...) __builtin_sve_svsel_bf16(__VA_ARGS__)
+#define svset2_bf16(...) __builtin_sve_svset2_bf16(__VA_ARGS__)
+#define svset3_bf16(...) __builtin_sve_svset3_bf16(__VA_ARGS__)
+#define svset4_bf16(...) __builtin_sve_svset4_bf16(__VA_ARGS__)
+#define svsplice_bf16(...) __builtin_sve_svsplice_bf16(__VA_ARGS__)
+#define svst1_bf16(...) __builtin_sve_svst1_bf16(__VA_ARGS__)
+#define svst1_vnum_bf16(...) __builtin_sve_svst1_vnum_bf16(__VA_ARGS__)
+#define svst2_bf16(...) __builtin_sve_svst2_bf16(__VA_ARGS__)
+#define svst2_vnum_bf16(...) __builtin_sve_svst2_vnum_bf16(__VA_ARGS__)
+#define svst3_bf16(...) __builtin_sve_svst3_bf16(__VA_ARGS__)
+#define svst3_vnum_bf16(...) __builtin_sve_svst3_vnum_bf16(__VA_ARGS__)
+#define svst4_bf16(...) __builtin_sve_svst4_bf16(__VA_ARGS__)
+#define svst4_vnum_bf16(...) __builtin_sve_svst4_vnum_bf16(__VA_ARGS__)
+#define svstnt1_bf16(...) __builtin_sve_svstnt1_bf16(__VA_ARGS__)
+#define svstnt1_vnum_bf16(...) __builtin_sve_svstnt1_vnum_bf16(__VA_ARGS__)
+#define svtbl_bf16(...) __builtin_sve_svtbl_bf16(__VA_ARGS__)
+#define svtrn1_bf16(...) __builtin_sve_svtrn1_bf16(__VA_ARGS__)
+#define svtrn2_bf16(...) __builtin_sve_svtrn2_bf16(__VA_ARGS__)
+#define svundef2_bf16(...) __builtin_sve_svundef2_bf16(__VA_ARGS__)
+#define svundef3_bf16(...) __builtin_sve_svundef3_bf16(__VA_ARGS__)
+#define svundef4_bf16(...) __builtin_sve_svundef4_bf16(__VA_ARGS__)
+#define svundef_bf16(...) __builtin_sve_svundef_bf16(__VA_ARGS__)
+#define svuzp1_bf16(...) __builtin_sve_svuzp1_bf16(__VA_ARGS__)
+#define svuzp2_bf16(...) __builtin_sve_svuzp2_bf16(__VA_ARGS__)
+#define svzip1_bf16(...) __builtin_sve_svzip1_bf16(__VA_ARGS__)
+#define svzip2_bf16(...) __builtin_sve_svzip2_bf16(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_n_f32)))
+svfloat32_t svbfdot(svfloat32_t, svbfloat16_t, bfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_f32)))
+svfloat32_t svbfdot(svfloat32_t, svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_lane_f32)))
+svfloat32_t svbfdot_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_n_f32)))
+svfloat32_t svbfmlalb(svfloat32_t, svbfloat16_t, bfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_f32)))
+svfloat32_t svbfmlalb(svfloat32_t, svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_lane_f32)))
+svfloat32_t svbfmlalb_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_n_f32)))
+svfloat32_t svbfmlalt(svfloat32_t, svbfloat16_t, bfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_f32)))
+svfloat32_t svbfmlalt(svfloat32_t, svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_lane_f32)))
+svfloat32_t svbfmlalt_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmmla_f32)))
+svfloat32_t svbfmmla(svfloat32_t, svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_bf16)))
+bfloat16_t svclasta(svbool_t, bfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_bf16)))
+svbfloat16_t svclasta(svbool_t, svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_bf16)))
+bfloat16_t svclastb(svbool_t, bfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_bf16)))
+svbfloat16_t svclastb(svbool_t, svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_m)))
+svuint16_t svcnt_m(svuint16_t, svbool_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_x)))
+svuint16_t svcnt_x(svbool_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_z)))
+svuint16_t svcnt_z(svbool_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_bf16)))
+svbfloat16x2_t svcreate2(svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_bf16)))
+svbfloat16x3_t svcreate3(svbfloat16_t, svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_bf16)))
+svbfloat16x4_t svcreate4(svbfloat16_t, svbfloat16_t, svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_m)))
+svbfloat16_t svcvt_bf16_m(svbfloat16_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x)))
+svbfloat16_t svcvt_bf16_x(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_z)))
+svbfloat16_t svcvt_bf16_z(svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_bf16_f32_m)))
+svbfloat16_t svcvtnt_bf16_m(svbfloat16_t, svbool_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16)))
+svbfloat16_t svdup_bf16(bfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_m)))
+svbfloat16_t svdup_bf16_m(svbfloat16_t, svbool_t, bfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_x)))
+svbfloat16_t svdup_bf16_x(svbool_t, bfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_z)))
+svbfloat16_t svdup_bf16_z(svbool_t, bfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_bf16)))
+svbfloat16_t svdup_lane(svbfloat16_t, uint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_bf16)))
+svbfloat16_t svdupq_bf16(bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_bf16)))
+svbfloat16_t svdupq_lane(svbfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_bf16)))
+svbfloat16_t svext(svbfloat16_t, svbfloat16_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_bf16)))
+svbfloat16_t svget2(svbfloat16x2_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_bf16)))
+svbfloat16_t svget3(svbfloat16x3_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_bf16)))
+svbfloat16_t svget4(svbfloat16x4_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_bf16)))
+svbfloat16_t svinsr(svbfloat16_t, bfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_bf16)))
+bfloat16_t svlasta(svbool_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_bf16)))
+bfloat16_t svlastb(svbool_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16)))
+svbfloat16_t svld1(svbool_t, bfloat16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16)))
+svbfloat16_t svld1_vnum(svbool_t, bfloat16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_bf16)))
+svbfloat16_t svld1rq(svbool_t, bfloat16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_bf16)))
+svbfloat16x2_t svld2(svbool_t, bfloat16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_bf16)))
+svbfloat16x2_t svld2_vnum(svbool_t, bfloat16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_bf16)))
+svbfloat16x3_t svld3(svbool_t, bfloat16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_bf16)))
+svbfloat16x3_t svld3_vnum(svbool_t, bfloat16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_bf16)))
+svbfloat16x4_t svld4(svbool_t, bfloat16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_bf16)))
+svbfloat16x4_t svld4_vnum(svbool_t, bfloat16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_bf16)))
+svbfloat16_t svldff1(svbool_t, bfloat16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_bf16)))
+svbfloat16_t svldff1_vnum(svbool_t, bfloat16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_bf16)))
+svbfloat16_t svldnf1(svbool_t, bfloat16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_bf16)))
+svbfloat16_t svldnf1_vnum(svbool_t, bfloat16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16)))
+svbfloat16_t svldnt1(svbool_t, bfloat16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16)))
+svbfloat16_t svldnt1_vnum(svbool_t, bfloat16_t const *, int64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_bf16)))
+uint64_t svlen(svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_bf16)))
+svbfloat16_t svrev(svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16)))
+svbfloat16_t svsel(svbool_t, svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_bf16)))
+svbfloat16x2_t svset2(svbfloat16x2_t, uint64_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_bf16)))
+svbfloat16x3_t svset3(svbfloat16x3_t, uint64_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_bf16)))
+svbfloat16x4_t svset4(svbfloat16x4_t, uint64_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_bf16)))
+svbfloat16_t svsplice(svbool_t, svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16)))
+void svst1(svbool_t, bfloat16_t *, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16)))
+void svst1_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_bf16)))
+void svst2(svbool_t, bfloat16_t *, svbfloat16x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_bf16)))
+void svst2_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_bf16)))
+void svst3(svbool_t, bfloat16_t *, svbfloat16x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_bf16)))
+void svst3_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x3_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_bf16)))
+void svst4(svbool_t, bfloat16_t *, svbfloat16x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_bf16)))
+void svst4_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16)))
+void svstnt1(svbool_t, bfloat16_t *, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16)))
+void svstnt1_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_bf16)))
+svbfloat16_t svtbl(svbfloat16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_bf16)))
+svbfloat16_t svtrn1(svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_bf16)))
+svbfloat16_t svtrn2(svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_bf16)))
+svbfloat16_t svuzp1(svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_bf16)))
+svbfloat16_t svuzp2(svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_bf16)))
+svbfloat16_t svzip1(svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_bf16)))
+svbfloat16_t svzip2(svbfloat16_t, svbfloat16_t);
+#endif  //defined(__ARM_FEATURE_SVE_BF16)
+
+#if defined(__ARM_FEATURE_SVE_MATMUL_FP32)
+#define svmmla_f32(...) __builtin_sve_svmmla_f32(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f32)))
+svfloat32_t svmmla(svfloat32_t, svfloat32_t, svfloat32_t);
+#endif  //defined(__ARM_FEATURE_SVE_MATMUL_FP32)
+
+#if defined(__ARM_FEATURE_SVE_MATMUL_FP64)
+#define svld1ro_u8(...) __builtin_sve_svld1ro_u8(__VA_ARGS__)
+#define svld1ro_u32(...) __builtin_sve_svld1ro_u32(__VA_ARGS__)
+#define svld1ro_u64(...) __builtin_sve_svld1ro_u64(__VA_ARGS__)
+#define svld1ro_u16(...) __builtin_sve_svld1ro_u16(__VA_ARGS__)
+#define svld1ro_s8(...) __builtin_sve_svld1ro_s8(__VA_ARGS__)
+#define svld1ro_f64(...) __builtin_sve_svld1ro_f64(__VA_ARGS__)
+#define svld1ro_f32(...) __builtin_sve_svld1ro_f32(__VA_ARGS__)
+#define svld1ro_f16(...) __builtin_sve_svld1ro_f16(__VA_ARGS__)
+#define svld1ro_s32(...) __builtin_sve_svld1ro_s32(__VA_ARGS__)
+#define svld1ro_s64(...) __builtin_sve_svld1ro_s64(__VA_ARGS__)
+#define svld1ro_s16(...) __builtin_sve_svld1ro_s16(__VA_ARGS__)
+#define svmmla_f64(...) __builtin_sve_svmmla_f64(__VA_ARGS__)
+#define svtrn1q_u8(...) __builtin_sve_svtrn1q_u8(__VA_ARGS__)
+#define svtrn1q_u32(...) __builtin_sve_svtrn1q_u32(__VA_ARGS__)
+#define svtrn1q_u64(...) __builtin_sve_svtrn1q_u64(__VA_ARGS__)
+#define svtrn1q_u16(...) __builtin_sve_svtrn1q_u16(__VA_ARGS__)
+#define svtrn1q_s8(...) __builtin_sve_svtrn1q_s8(__VA_ARGS__)
+#define svtrn1q_f64(...) __builtin_sve_svtrn1q_f64(__VA_ARGS__)
+#define svtrn1q_f32(...) __builtin_sve_svtrn1q_f32(__VA_ARGS__)
+#define svtrn1q_f16(...) __builtin_sve_svtrn1q_f16(__VA_ARGS__)
+#define svtrn1q_s32(...) __builtin_sve_svtrn1q_s32(__VA_ARGS__)
+#define svtrn1q_s64(...) __builtin_sve_svtrn1q_s64(__VA_ARGS__)
+#define svtrn1q_s16(...) __builtin_sve_svtrn1q_s16(__VA_ARGS__)
+#define svtrn2q_u8(...) __builtin_sve_svtrn2q_u8(__VA_ARGS__)
+#define svtrn2q_u32(...) __builtin_sve_svtrn2q_u32(__VA_ARGS__)
+#define svtrn2q_u64(...) __builtin_sve_svtrn2q_u64(__VA_ARGS__)
+#define svtrn2q_u16(...) __builtin_sve_svtrn2q_u16(__VA_ARGS__)
+#define svtrn2q_s8(...) __builtin_sve_svtrn2q_s8(__VA_ARGS__)
+#define svtrn2q_f64(...) __builtin_sve_svtrn2q_f64(__VA_ARGS__)
+#define svtrn2q_f32(...) __builtin_sve_svtrn2q_f32(__VA_ARGS__)
+#define svtrn2q_f16(...) __builtin_sve_svtrn2q_f16(__VA_ARGS__)
+#define svtrn2q_s32(...) __builtin_sve_svtrn2q_s32(__VA_ARGS__)
+#define svtrn2q_s64(...) __builtin_sve_svtrn2q_s64(__VA_ARGS__)
+#define svtrn2q_s16(...) __builtin_sve_svtrn2q_s16(__VA_ARGS__)
+#define svuzp1q_u8(...) __builtin_sve_svuzp1q_u8(__VA_ARGS__)
+#define svuzp1q_u32(...) __builtin_sve_svuzp1q_u32(__VA_ARGS__)
+#define svuzp1q_u64(...) __builtin_sve_svuzp1q_u64(__VA_ARGS__)
+#define svuzp1q_u16(...) __builtin_sve_svuzp1q_u16(__VA_ARGS__)
+#define svuzp1q_s8(...) __builtin_sve_svuzp1q_s8(__VA_ARGS__)
+#define svuzp1q_f64(...) __builtin_sve_svuzp1q_f64(__VA_ARGS__)
+#define svuzp1q_f32(...) __builtin_sve_svuzp1q_f32(__VA_ARGS__)
+#define svuzp1q_f16(...) __builtin_sve_svuzp1q_f16(__VA_ARGS__)
+#define svuzp1q_s32(...) __builtin_sve_svuzp1q_s32(__VA_ARGS__)
+#define svuzp1q_s64(...) __builtin_sve_svuzp1q_s64(__VA_ARGS__)
+#define svuzp1q_s16(...) __builtin_sve_svuzp1q_s16(__VA_ARGS__)
+#define svuzp2q_u8(...) __builtin_sve_svuzp2q_u8(__VA_ARGS__)
+#define svuzp2q_u32(...) __builtin_sve_svuzp2q_u32(__VA_ARGS__)
+#define svuzp2q_u64(...) __builtin_sve_svuzp2q_u64(__VA_ARGS__)
+#define svuzp2q_u16(...) __builtin_sve_svuzp2q_u16(__VA_ARGS__)
+#define svuzp2q_s8(...) __builtin_sve_svuzp2q_s8(__VA_ARGS__)
+#define svuzp2q_f64(...) __builtin_sve_svuzp2q_f64(__VA_ARGS__)
+#define svuzp2q_f32(...) __builtin_sve_svuzp2q_f32(__VA_ARGS__)
+#define svuzp2q_f16(...) __builtin_sve_svuzp2q_f16(__VA_ARGS__)
+#define svuzp2q_s32(...) __builtin_sve_svuzp2q_s32(__VA_ARGS__)
+#define svuzp2q_s64(...) __builtin_sve_svuzp2q_s64(__VA_ARGS__)
+#define svuzp2q_s16(...) __builtin_sve_svuzp2q_s16(__VA_ARGS__)
+#define svzip1q_u8(...) __builtin_sve_svzip1q_u8(__VA_ARGS__)
+#define svzip1q_u32(...) __builtin_sve_svzip1q_u32(__VA_ARGS__)
+#define svzip1q_u64(...) __builtin_sve_svzip1q_u64(__VA_ARGS__)
+#define svzip1q_u16(...) __builtin_sve_svzip1q_u16(__VA_ARGS__)
+#define svzip1q_s8(...) __builtin_sve_svzip1q_s8(__VA_ARGS__)
+#define svzip1q_f64(...) __builtin_sve_svzip1q_f64(__VA_ARGS__)
+#define svzip1q_f32(...) __builtin_sve_svzip1q_f32(__VA_ARGS__)
+#define svzip1q_f16(...) __builtin_sve_svzip1q_f16(__VA_ARGS__)
+#define svzip1q_s32(...) __builtin_sve_svzip1q_s32(__VA_ARGS__)
+#define svzip1q_s64(...) __builtin_sve_svzip1q_s64(__VA_ARGS__)
+#define svzip1q_s16(...) __builtin_sve_svzip1q_s16(__VA_ARGS__)
+#define svzip2q_u8(...) __builtin_sve_svzip2q_u8(__VA_ARGS__)
+#define svzip2q_u32(...) __builtin_sve_svzip2q_u32(__VA_ARGS__)
+#define svzip2q_u64(...) __builtin_sve_svzip2q_u64(__VA_ARGS__)
+#define svzip2q_u16(...) __builtin_sve_svzip2q_u16(__VA_ARGS__)
+#define svzip2q_s8(...) __builtin_sve_svzip2q_s8(__VA_ARGS__)
+#define svzip2q_f64(...) __builtin_sve_svzip2q_f64(__VA_ARGS__)
+#define svzip2q_f32(...) __builtin_sve_svzip2q_f32(__VA_ARGS__)
+#define svzip2q_f16(...) __builtin_sve_svzip2q_f16(__VA_ARGS__)
+#define svzip2q_s32(...) __builtin_sve_svzip2q_s32(__VA_ARGS__)
+#define svzip2q_s64(...) __builtin_sve_svzip2q_s64(__VA_ARGS__)
+#define svzip2q_s16(...) __builtin_sve_svzip2q_s16(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u8)))
+svuint8_t svld1ro(svbool_t, uint8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u32)))
+svuint32_t svld1ro(svbool_t, uint32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u64)))
+svuint64_t svld1ro(svbool_t, uint64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u16)))
+svuint16_t svld1ro(svbool_t, uint16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s8)))
+svint8_t svld1ro(svbool_t, int8_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f64)))
+svfloat64_t svld1ro(svbool_t, float64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f32)))
+svfloat32_t svld1ro(svbool_t, float32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f16)))
+svfloat16_t svld1ro(svbool_t, float16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s32)))
+svint32_t svld1ro(svbool_t, int32_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s64)))
+svint64_t svld1ro(svbool_t, int64_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s16)))
+svint16_t svld1ro(svbool_t, int16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f64)))
+svfloat64_t svmmla(svfloat64_t, svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u8)))
+svuint8_t svtrn1q(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u32)))
+svuint32_t svtrn1q(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u64)))
+svuint64_t svtrn1q(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u16)))
+svuint16_t svtrn1q(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s8)))
+svint8_t svtrn1q(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f64)))
+svfloat64_t svtrn1q(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f32)))
+svfloat32_t svtrn1q(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f16)))
+svfloat16_t svtrn1q(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s32)))
+svint32_t svtrn1q(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s64)))
+svint64_t svtrn1q(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s16)))
+svint16_t svtrn1q(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u8)))
+svuint8_t svtrn2q(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u32)))
+svuint32_t svtrn2q(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u64)))
+svuint64_t svtrn2q(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u16)))
+svuint16_t svtrn2q(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s8)))
+svint8_t svtrn2q(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f64)))
+svfloat64_t svtrn2q(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f32)))
+svfloat32_t svtrn2q(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f16)))
+svfloat16_t svtrn2q(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s32)))
+svint32_t svtrn2q(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s64)))
+svint64_t svtrn2q(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s16)))
+svint16_t svtrn2q(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u8)))
+svuint8_t svuzp1q(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u32)))
+svuint32_t svuzp1q(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u64)))
+svuint64_t svuzp1q(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u16)))
+svuint16_t svuzp1q(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s8)))
+svint8_t svuzp1q(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f64)))
+svfloat64_t svuzp1q(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f32)))
+svfloat32_t svuzp1q(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f16)))
+svfloat16_t svuzp1q(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s32)))
+svint32_t svuzp1q(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s64)))
+svint64_t svuzp1q(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s16)))
+svint16_t svuzp1q(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u8)))
+svuint8_t svuzp2q(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u32)))
+svuint32_t svuzp2q(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u64)))
+svuint64_t svuzp2q(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u16)))
+svuint16_t svuzp2q(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s8)))
+svint8_t svuzp2q(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f64)))
+svfloat64_t svuzp2q(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f32)))
+svfloat32_t svuzp2q(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f16)))
+svfloat16_t svuzp2q(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s32)))
+svint32_t svuzp2q(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s64)))
+svint64_t svuzp2q(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s16)))
+svint16_t svuzp2q(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u8)))
+svuint8_t svzip1q(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u32)))
+svuint32_t svzip1q(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u64)))
+svuint64_t svzip1q(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u16)))
+svuint16_t svzip1q(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s8)))
+svint8_t svzip1q(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f64)))
+svfloat64_t svzip1q(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f32)))
+svfloat32_t svzip1q(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f16)))
+svfloat16_t svzip1q(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s32)))
+svint32_t svzip1q(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s64)))
+svint64_t svzip1q(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s16)))
+svint16_t svzip1q(svint16_t, svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u8)))
+svuint8_t svzip2q(svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u32)))
+svuint32_t svzip2q(svuint32_t, svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u64)))
+svuint64_t svzip2q(svuint64_t, svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u16)))
+svuint16_t svzip2q(svuint16_t, svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s8)))
+svint8_t svzip2q(svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f64)))
+svfloat64_t svzip2q(svfloat64_t, svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f32)))
+svfloat32_t svzip2q(svfloat32_t, svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f16)))
+svfloat16_t svzip2q(svfloat16_t, svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s32)))
+svint32_t svzip2q(svint32_t, svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s64)))
+svint64_t svzip2q(svint64_t, svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s16)))
+svint16_t svzip2q(svint16_t, svint16_t);
+#endif  //defined(__ARM_FEATURE_SVE_MATMUL_FP64)
+
+#if defined(__ARM_FEATURE_SVE_MATMUL_FP64) && defined(__ARM_FEATURE_SVE_BF16)
+#define svld1ro_bf16(...) __builtin_sve_svld1ro_bf16(__VA_ARGS__)
+#define svtrn1q_bf16(...) __builtin_sve_svtrn1q_bf16(__VA_ARGS__)
+#define svtrn2q_bf16(...) __builtin_sve_svtrn2q_bf16(__VA_ARGS__)
+#define svuzp1q_bf16(...) __builtin_sve_svuzp1q_bf16(__VA_ARGS__)
+#define svuzp2q_bf16(...) __builtin_sve_svuzp2q_bf16(__VA_ARGS__)
+#define svzip1q_bf16(...) __builtin_sve_svzip1q_bf16(__VA_ARGS__)
+#define svzip2q_bf16(...) __builtin_sve_svzip2q_bf16(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_bf16)))
+svbfloat16_t svld1ro(svbool_t, bfloat16_t const *);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_bf16)))
+svbfloat16_t svtrn1q(svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_bf16)))
+svbfloat16_t svtrn2q(svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_bf16)))
+svbfloat16_t svuzp1q(svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_bf16)))
+svbfloat16_t svuzp2q(svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_bf16)))
+svbfloat16_t svzip1q(svbfloat16_t, svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_bf16)))
+svbfloat16_t svzip2q(svbfloat16_t, svbfloat16_t);
+#endif  //defined(__ARM_FEATURE_SVE_MATMUL_FP64) && defined(__ARM_FEATURE_SVE_BF16)
+
+#if defined(__ARM_FEATURE_SVE_MATMUL_INT8)
+#define svmmla_s32(...) __builtin_sve_svmmla_s32(__VA_ARGS__)
+#define svmmla_u32(...) __builtin_sve_svmmla_u32(__VA_ARGS__)
+#define svsudot_n_s32(...) __builtin_sve_svsudot_n_s32(__VA_ARGS__)
+#define svsudot_s32(...) __builtin_sve_svsudot_s32(__VA_ARGS__)
+#define svsudot_lane_s32(...) __builtin_sve_svsudot_lane_s32(__VA_ARGS__)
+#define svusdot_n_s32(...) __builtin_sve_svusdot_n_s32(__VA_ARGS__)
+#define svusdot_s32(...) __builtin_sve_svusdot_s32(__VA_ARGS__)
+#define svusdot_lane_s32(...) __builtin_sve_svusdot_lane_s32(__VA_ARGS__)
+#define svusmmla_s32(...) __builtin_sve_svusmmla_s32(__VA_ARGS__)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_s32)))
+svint32_t svmmla(svint32_t, svint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_u32)))
+svuint32_t svmmla(svuint32_t, svuint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_n_s32)))
+svint32_t svsudot(svint32_t, svint8_t, uint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_s32)))
+svint32_t svsudot(svint32_t, svint8_t, svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_lane_s32)))
+svint32_t svsudot_lane(svint32_t, svint8_t, svuint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_n_s32)))
+svint32_t svusdot(svint32_t, svuint8_t, int8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_s32)))
+svint32_t svusdot(svint32_t, svuint8_t, svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_lane_s32)))
+svint32_t svusdot_lane(svint32_t, svuint8_t, svint8_t, uint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusmmla_s32)))
+svint32_t svusmmla(svint32_t, svuint8_t, svint8_t);
+#endif  //defined(__ARM_FEATURE_SVE_MATMUL_INT8)
+#if defined(__ARM_FEATURE_SVE_BF16)
+#define svcvtnt_bf16_x      svcvtnt_bf16_m
+#define svcvtnt_bf16_f32_x  svcvtnt_bf16_f32_m
+#endif /*__ARM_FEATURE_SVE_BF16 */
+
+#if defined(__ARM_FEATURE_SVE2)
+#define svcvtnt_f16_x      svcvtnt_f16_m
+#define svcvtnt_f16_f32_x  svcvtnt_f16_f32_m
+#define svcvtnt_f32_x      svcvtnt_f32_m
+#define svcvtnt_f32_f64_x  svcvtnt_f32_f64_m
+
+#define svcvtxnt_f32_x     svcvtxnt_f32_m
+#define svcvtxnt_f32_f64_x svcvtxnt_f32_f64_m
+
+#endif /*__ARM_FEATURE_SVE2 */
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif /*__ARM_FEATURE_SVE */
+
+#endif /* __ARM_SVE_H */
diff --git a/linux-x86/lib64/clang/11.0.1/include/armintr.h b/linux-x86/lib64/clang/11.0.5/include/armintr.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/armintr.h
rename to linux-x86/lib64/clang/11.0.5/include/armintr.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx2intrin.h b/linux-x86/lib64/clang/11.0.5/include/avx2intrin.h
similarity index 99%
copy from darwin-x86/lib64/clang/11.0.1/include/avx2intrin.h
copy to linux-x86/lib64/clang/11.0.5/include/avx2intrin.h
index 162e83e..cc16720 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/avx2intrin.h
+++ b/linux-x86/lib64/clang/11.0.5/include/avx2intrin.h
@@ -740,6 +740,8 @@
   return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 1, 0, 1);
 }
 
+#define _mm_broadcastsi128_si256(X) _mm256_broadcastsi128_si256(X)
+
 #define _mm_blend_epi32(V1, V2, M) \
   (__m128i)__builtin_ia32_pblendd128((__v4si)(__m128i)(V1), \
                                      (__v4si)(__m128i)(V2), (int)(M))
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512bf16intrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512bf16intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/avx512bf16intrin.h
rename to linux-x86/lib64/clang/11.0.5/include/avx512bf16intrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512bitalgintrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512bitalgintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/avx512bitalgintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/avx512bitalgintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512bwintrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512bwintrin.h
similarity index 98%
copy from darwin-x86/lib64/clang/11.0.1/include/avx512bwintrin.h
copy to linux-x86/lib64/clang/11.0.5/include/avx512bwintrin.h
index 3765584..4281a33 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/avx512bwintrin.h
+++ b/linux-x86/lib64/clang/11.0.5/include/avx512bwintrin.h
@@ -1504,13 +1504,14 @@
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi16(__m512i __A, int __B)
+_mm512_slli_epi16(__m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_psllwi512((__v32hi)__A, __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_slli_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
+_mm512_mask_slli_epi16(__m512i __W, __mmask32 __U, __m512i __A,
+                       unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
                                          (__v32hi)_mm512_slli_epi16(__A, __B),
@@ -1518,7 +1519,7 @@
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_slli_epi16(__mmask32 __U, __m512i __A, int __B)
+_mm512_maskz_slli_epi16(__mmask32 __U, __m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
                                          (__v32hi)_mm512_slli_epi16(__A, __B),
@@ -1595,13 +1596,14 @@
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi16(__m512i __A, int __B)
+_mm512_srai_epi16(__m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_psrawi512((__v32hi)__A, __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srai_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
+_mm512_mask_srai_epi16(__m512i __W, __mmask32 __U, __m512i __A,
+                       unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
                                          (__v32hi)_mm512_srai_epi16(__A, __B),
@@ -1609,7 +1611,7 @@
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi16(__mmask32 __U, __m512i __A, int __B)
+_mm512_maskz_srai_epi16(__mmask32 __U, __m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
                                          (__v32hi)_mm512_srai_epi16(__A, __B),
@@ -1639,13 +1641,14 @@
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi16(__m512i __A, int __B)
+_mm512_srli_epi16(__m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_psrlwi512((__v32hi)__A, __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srli_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
+_mm512_mask_srli_epi16(__m512i __W, __mmask32 __U, __m512i __A,
+                       unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
                                          (__v32hi)_mm512_srli_epi16(__A, __B),
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512cdintrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512cdintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/avx512cdintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/avx512cdintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512dqintrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512dqintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/avx512dqintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/avx512dqintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512erintrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512erintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/avx512erintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/avx512erintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/avx512fintrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512fintrin.h
similarity index 99%
copy from darwin-x86/lib64/clang/11.0.1/include/avx512fintrin.h
copy to linux-x86/lib64/clang/11.0.5/include/avx512fintrin.h
index 7465da3..fa22ef3 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/avx512fintrin.h
+++ b/linux-x86/lib64/clang/11.0.5/include/avx512fintrin.h
@@ -5111,13 +5111,14 @@
                                       (__v8di)_mm512_setzero_si512())
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi32(__m512i __A, int __B)
+_mm512_slli_epi32(__m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_pslldi512((__v16si)__A, __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_slli_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
+_mm512_mask_slli_epi32(__m512i __W, __mmask16 __U, __m512i __A,
+                       unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
                                          (__v16si)_mm512_slli_epi32(__A, __B),
@@ -5125,20 +5126,20 @@
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_slli_epi32(__mmask16 __U, __m512i __A, int __B) {
+_mm512_maskz_slli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) {
   return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
                                          (__v16si)_mm512_slli_epi32(__A, __B),
                                          (__v16si)_mm512_setzero_si512());
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi64(__m512i __A, int __B)
+_mm512_slli_epi64(__m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_psllqi512((__v8di)__A, __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_slli_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
+_mm512_mask_slli_epi64(__m512i __W, __mmask8 __U, __m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
                                           (__v8di)_mm512_slli_epi64(__A, __B),
@@ -5146,7 +5147,7 @@
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, int __B)
+_mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
                                           (__v8di)_mm512_slli_epi64(__A, __B),
@@ -5154,13 +5155,14 @@
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi32(__m512i __A, int __B)
+_mm512_srli_epi32(__m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_psrldi512((__v16si)__A, __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srli_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
+_mm512_mask_srli_epi32(__m512i __W, __mmask16 __U, __m512i __A,
+                       unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
                                          (__v16si)_mm512_srli_epi32(__A, __B),
@@ -5168,20 +5170,21 @@
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srli_epi32(__mmask16 __U, __m512i __A, int __B) {
+_mm512_maskz_srli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) {
   return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
                                          (__v16si)_mm512_srli_epi32(__A, __B),
                                          (__v16si)_mm512_setzero_si512());
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi64(__m512i __A, int __B)
+_mm512_srli_epi64(__m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_psrlqi512((__v8di)__A, __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srli_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
+_mm512_mask_srli_epi64(__m512i __W, __mmask8 __U, __m512i __A,
+                       unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
                                           (__v8di)_mm512_srli_epi64(__A, __B),
@@ -5189,7 +5192,8 @@
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srli_epi64(__mmask8 __U, __m512i __A, int __B)
+_mm512_maskz_srli_epi64(__mmask8 __U, __m512i __A,
+                        unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
                                           (__v8di)_mm512_srli_epi64(__A, __B),
@@ -6593,13 +6597,14 @@
                                              (int)(R))
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi32(__m512i __A, int __B)
+_mm512_srai_epi32(__m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_psradi512((__v16si)__A, __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srai_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
+_mm512_mask_srai_epi32(__m512i __W, __mmask16 __U, __m512i __A,
+                       unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
                                          (__v16si)_mm512_srai_epi32(__A, __B),
@@ -6607,20 +6612,21 @@
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi32(__mmask16 __U, __m512i __A, int __B) {
+_mm512_maskz_srai_epi32(__mmask16 __U, __m512i __A,
+                        unsigned int __B) {
   return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
                                          (__v16si)_mm512_srai_epi32(__A, __B),
                                          (__v16si)_mm512_setzero_si512());
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi64(__m512i __A, int __B)
+_mm512_srai_epi64(__m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_psraqi512((__v8di)__A, __B);
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srai_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
+_mm512_mask_srai_epi64(__m512i __W, __mmask8 __U, __m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
                                           (__v8di)_mm512_srai_epi64(__A, __B),
@@ -6628,7 +6634,7 @@
 }
 
 static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi64(__mmask8 __U, __m512i __A, int __B)
+_mm512_maskz_srai_epi64(__mmask8 __U, __m512i __A, unsigned int __B)
 {
   return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
                                           (__v8di)_mm512_srai_epi64(__A, __B),
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512ifmaintrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512ifmaintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/avx512ifmaintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/avx512ifmaintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512ifmavlintrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512ifmavlintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/avx512ifmavlintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/avx512ifmavlintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512pfintrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512pfintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/avx512pfintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/avx512pfintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512vbmi2intrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512vbmi2intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/avx512vbmi2intrin.h
rename to linux-x86/lib64/clang/11.0.5/include/avx512vbmi2intrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512vbmiintrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512vbmiintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/avx512vbmiintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/avx512vbmiintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512vbmivlintrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512vbmivlintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/avx512vbmivlintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/avx512vbmivlintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512vlbf16intrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512vlbf16intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/avx512vlbf16intrin.h
rename to linux-x86/lib64/clang/11.0.5/include/avx512vlbf16intrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512vlbitalgintrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512vlbitalgintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/avx512vlbitalgintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/avx512vlbitalgintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512vlbwintrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512vlbwintrin.h
similarity index 99%
rename from linux-x86/lib64/clang/11.0.1/include/avx512vlbwintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/avx512vlbwintrin.h
index cd9f240..6ed10ed 100644
--- a/linux-x86/lib64/clang/11.0.1/include/avx512vlbwintrin.h
+++ b/linux-x86/lib64/clang/11.0.5/include/avx512vlbwintrin.h
@@ -1939,7 +1939,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
                                              (__v8hi)_mm_slli_epi16(__A, __B),
@@ -1947,7 +1947,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
                                              (__v8hi)_mm_slli_epi16(__A, __B),
@@ -1955,7 +1955,8 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_slli_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B)
+_mm256_mask_slli_epi16(__m256i __W, __mmask16 __U, __m256i __A,
+                       unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
                                          (__v16hi)_mm256_slli_epi16(__A, __B),
@@ -1963,7 +1964,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_slli_epi16(__mmask16 __U, __m256i __A, int __B)
+_mm256_maskz_slli_epi16(__mmask16 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
                                          (__v16hi)_mm256_slli_epi16(__A, __B),
@@ -2091,7 +2092,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
                                              (__v8hi)_mm_srai_epi16(__A, __B),
@@ -2099,7 +2100,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
                                              (__v8hi)_mm_srai_epi16(__A, __B),
@@ -2107,7 +2108,8 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srai_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B)
+_mm256_mask_srai_epi16(__m256i __W, __mmask16 __U, __m256i __A,
+                       unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
                                          (__v16hi)_mm256_srai_epi16(__A, __B),
@@ -2115,7 +2117,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srai_epi16(__mmask16 __U, __m256i __A, int __B)
+_mm256_maskz_srai_epi16(__mmask16 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
                                          (__v16hi)_mm256_srai_epi16(__A, __B),
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512vlcdintrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512vlcdintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/avx512vlcdintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/avx512vlcdintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512vldqintrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512vldqintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/avx512vldqintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/avx512vldqintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512vlintrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512vlintrin.h
similarity index 99%
rename from linux-x86/lib64/clang/11.0.1/include/avx512vlintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/avx512vlintrin.h
index 9d1d791..968c10e 100644
--- a/linux-x86/lib64/clang/11.0.1/include/avx512vlintrin.h
+++ b/linux-x86/lib64/clang/11.0.5/include/avx512vlintrin.h
@@ -4522,7 +4522,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
                                              (__v4si)_mm_slli_epi32(__A, __B),
@@ -4530,7 +4530,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
                                              (__v4si)_mm_slli_epi32(__A, __B),
@@ -4538,7 +4538,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
+_mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
                                              (__v8si)_mm256_slli_epi32(__A, __B),
@@ -4546,7 +4546,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, int __B)
+_mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
                                              (__v8si)_mm256_slli_epi32(__A, __B),
@@ -4586,7 +4586,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
                                              (__v2di)_mm_slli_epi64(__A, __B),
@@ -4594,7 +4594,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
                                              (__v2di)_mm_slli_epi64(__A, __B),
@@ -4602,7 +4602,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __B)
+_mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
                                              (__v4di)_mm256_slli_epi64(__A, __B),
@@ -4610,7 +4610,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, int __B)
+_mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
                                              (__v4di)_mm256_slli_epi64(__A, __B),
@@ -4866,7 +4866,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
                                              (__v4si)_mm_srli_epi32(__A, __B),
@@ -4874,7 +4874,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
                                              (__v4si)_mm_srli_epi32(__A, __B),
@@ -4882,7 +4882,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
+_mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
                                              (__v8si)_mm256_srli_epi32(__A, __B),
@@ -4890,7 +4890,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, int __B)
+_mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
                                              (__v8si)_mm256_srli_epi32(__A, __B),
@@ -4930,7 +4930,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
                                              (__v2di)_mm_srli_epi64(__A, __B),
@@ -4938,7 +4938,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
                                              (__v2di)_mm_srli_epi64(__A, __B),
@@ -4946,7 +4946,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __B)
+_mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
                                              (__v4di)_mm256_srli_epi64(__A, __B),
@@ -4954,7 +4954,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, int __B)
+_mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
                                              (__v4di)_mm256_srli_epi64(__A, __B),
@@ -6405,7 +6405,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+_mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
                                              (__v4si)_mm_srai_epi32(__A, __B),
@@ -6413,7 +6413,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, int __B)
+_mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
 {
   return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
                                              (__v4si)_mm_srai_epi32(__A, __B),
@@ -6421,7 +6421,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
+_mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
                                              (__v8si)_mm256_srai_epi32(__A, __B),
@@ -6429,7 +6429,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, int __B)
+_mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
 {
   return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
                                              (__v8si)_mm256_srai_epi32(__A, __B),
@@ -6481,13 +6481,13 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_srai_epi64(__m128i __A, int __imm)
+_mm_srai_epi64(__m128i __A, unsigned int __imm)
 {
   return (__m128i)__builtin_ia32_psraqi128((__v2di)__A, __imm);
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __imm)
+_mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __imm)
 {
   return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
                                            (__v2di)_mm_srai_epi64(__A, __imm), \
@@ -6495,7 +6495,7 @@
 }
 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, int __imm)
+_mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, unsigned int __imm)
 {
   return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
                                            (__v2di)_mm_srai_epi64(__A, __imm), \
@@ -6503,13 +6503,14 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srai_epi64(__m256i __A, int __imm)
+_mm256_srai_epi64(__m256i __A, unsigned int __imm)
 {
   return (__m256i)__builtin_ia32_psraqi256((__v4di)__A, __imm);
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __imm)
+_mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A,
+                       unsigned int __imm)
 {
   return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
                                         (__v4di)_mm256_srai_epi64(__A, __imm), \
@@ -6517,7 +6518,7 @@
 }
 
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, int __imm)
+_mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, unsigned int __imm)
 {
   return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
                                         (__v4di)_mm256_srai_epi64(__A, __imm), \
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512vlvbmi2intrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512vlvbmi2intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/avx512vlvbmi2intrin.h
rename to linux-x86/lib64/clang/11.0.5/include/avx512vlvbmi2intrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512vlvnniintrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512vlvnniintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/avx512vlvnniintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/avx512vlvnniintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512vlvp2intersectintrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512vlvp2intersectintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/avx512vlvp2intersectintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/avx512vlvp2intersectintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512vnniintrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512vnniintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/avx512vnniintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/avx512vnniintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512vp2intersectintrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512vp2intersectintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/avx512vp2intersectintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/avx512vp2intersectintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512vpopcntdqintrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512vpopcntdqintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/avx512vpopcntdqintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/avx512vpopcntdqintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/avx512vpopcntdqvlintrin.h b/linux-x86/lib64/clang/11.0.5/include/avx512vpopcntdqvlintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/avx512vpopcntdqvlintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/avx512vpopcntdqvlintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/avxintrin.h b/linux-x86/lib64/clang/11.0.5/include/avxintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/avxintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/avxintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/bits/stdatomic.h b/linux-x86/lib64/clang/11.0.5/include/bits/stdatomic.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/bits/stdatomic.h
rename to linux-x86/lib64/clang/11.0.5/include/bits/stdatomic.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/bmi2intrin.h b/linux-x86/lib64/clang/11.0.5/include/bmi2intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/bmi2intrin.h
rename to linux-x86/lib64/clang/11.0.5/include/bmi2intrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/bmiintrin.h b/linux-x86/lib64/clang/11.0.5/include/bmiintrin.h
similarity index 86%
copy from darwin-x86/lib64/clang/11.0.1/include/bmiintrin.h
copy to linux-x86/lib64/clang/11.0.5/include/bmiintrin.h
index 841bd84..f583c21 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/bmiintrin.h
+++ b/linux-x86/lib64/clang/11.0.5/include/bmiintrin.h
@@ -111,7 +111,8 @@
 
 #undef __RELAXED_FN_ATTRS
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__BMI__)
 
 /* Define the default attributes for the functions in this file. */
 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi")))
@@ -192,6 +193,28 @@
   return __builtin_ia32_bextr_u32 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
 }
 
+/* Intel-specified, single-leading-underscore version of BEXTR2 */
+/// Extracts the specified bits from the first operand and returns them
+///    in the least significant bits of the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
+///
+/// \param __X
+///    An unsigned integer whose bits are to be extracted.
+/// \param __Y
+///    An unsigned integer used to specify which bits are extracted. Bits [7:0]
+///    specify the index of the least significant bit. Bits [15:8] specify the
+///    number of bits to be extracted.
+/// \returns An unsigned integer whose least significant bits contain the
+///    extracted bits.
+/// \see __bextr_u32
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_bextr2_u32(unsigned int __X, unsigned int __Y) {
+  return __builtin_ia32_bextr_u32(__X, __Y);
+}
+
 /// Clears all bits in the source except for the least significant bit
 ///    containing a value of 1 and returns the result.
 ///
@@ -321,6 +344,28 @@
   return __builtin_ia32_bextr_u64 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
 }
 
+/* Intel-specified, single-leading-underscore version of BEXTR2 */
+/// Extracts the specified bits from the first operand and returns them
+///    in the least significant bits of the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
+///
+/// \param __X
+///    An unsigned 64-bit integer whose bits are to be extracted.
+/// \param __Y
+///    An unsigned 64-bit integer used to specify which bits are extracted. Bits
+///    [7:0] specify the index of the least significant bit. Bits [15:8] specify
+///    the number of bits to be extracted.
+/// \returns An unsigned 64-bit integer whose least significant bits contain the
+///    extracted bits.
+/// \see __bextr_u64
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_bextr2_u64(unsigned long long __X, unsigned long long __Y) {
+  return __builtin_ia32_bextr_u64(__X, __Y);
+}
+
 /// Clears all bits in the source except for the least significant bit
 ///    containing a value of 1 and returns the result.
 ///
@@ -376,6 +421,7 @@
 
 #undef __DEFAULT_FN_ATTRS
 
-#endif /* !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI__) */
+#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules)   \
+          || defined(__BMI__) */
 
 #endif /* __BMIINTRIN_H */
diff --git a/linux-x86/lib64/clang/11.0.5/include/cet.h b/linux-x86/lib64/clang/11.0.5/include/cet.h
new file mode 100644
index 0000000..ffb19de
--- /dev/null
+++ b/linux-x86/lib64/clang/11.0.5/include/cet.h
@@ -0,0 +1,66 @@
+/*===------ cet.h -Control-flow Enforcement Technology  feature ------------===
+ * Add x86 feature with IBT and/or SHSTK bits to ELF program property if they
+ * are enabled. Otherwise, contents in this header file are unused. This file
+ * is mainly design for assembly source code which want to enable CET.
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __CET_H
+#define __CET_H
+
+#ifdef __ASSEMBLER__
+
+#ifndef __CET__
+# define _CET_ENDBR
+#endif
+
+#ifdef __CET__
+
+# ifdef __LP64__
+#  if __CET__ & 0x1
+#    define _CET_ENDBR endbr64
+#  else
+#    define _CET_ENDBR
+#  endif
+# else
+#  if __CET__ & 0x1
+#    define _CET_ENDBR endbr32
+#  else
+#    define _CET_ENDBR
+#  endif
+# endif
+
+
+#  ifdef __LP64__
+#   define __PROPERTY_ALIGN 3
+#  else
+#   define __PROPERTY_ALIGN 2
+#  endif
+
+	.pushsection ".note.gnu.property", "a"
+	.p2align __PROPERTY_ALIGN
+	.long 1f - 0f		/* name length.  */
+	.long 4f - 1f		/* data length.  */
+	/* NT_GNU_PROPERTY_TYPE_0.   */
+	.long 5			/* note type.  */
+0:
+	.asciz "GNU"		/* vendor name.  */
+1:
+	.p2align __PROPERTY_ALIGN
+	/* GNU_PROPERTY_X86_FEATURE_1_AND.  */
+	.long 0xc0000002	/* pr_type.  */
+	.long 3f - 2f		/* pr_datasz.  */
+2:
+	/* GNU_PROPERTY_X86_FEATURE_1_XXX.  */
+	.long __CET__
+3:
+	.p2align __PROPERTY_ALIGN
+4:
+	.popsection
+#endif
+#endif
+#endif
diff --git a/linux-x86/lib64/clang/11.0.1/include/cetintrin.h b/linux-x86/lib64/clang/11.0.5/include/cetintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/cetintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/cetintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/cldemoteintrin.h b/linux-x86/lib64/clang/11.0.5/include/cldemoteintrin.h
similarity index 73%
rename from linux-x86/lib64/clang/11.0.1/include/cldemoteintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/cldemoteintrin.h
index 2413e7d..cfb951c 100644
--- a/linux-x86/lib64/clang/11.0.1/include/cldemoteintrin.h
+++ b/linux-x86/lib64/clang/11.0.5/include/cldemoteintrin.h
@@ -18,11 +18,19 @@
 #define __DEFAULT_FN_ATTRS \
   __attribute__((__always_inline__, __nodebug__,  __target__("cldemote")))
 
+/// Hint to hardware that the cache line that contains \p __P should be demoted
+/// from the cache closest to the processor core to a level more distant from
+/// the processor core.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> CLDEMOTE </c> instruction.
 static __inline__ void __DEFAULT_FN_ATTRS
 _cldemote(const void * __P) {
   __builtin_ia32_cldemote(__P);
 }
 
+#define _mm_cldemote(p) _cldemote(p)
 #undef __DEFAULT_FN_ATTRS
 
 #endif
diff --git a/linux-x86/lib64/clang/11.0.1/include/clflushoptintrin.h b/linux-x86/lib64/clang/11.0.5/include/clflushoptintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/clflushoptintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/clflushoptintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/clwbintrin.h b/linux-x86/lib64/clang/11.0.5/include/clwbintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/clwbintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/clwbintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/clzerointrin.h b/linux-x86/lib64/clang/11.0.5/include/clzerointrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/clzerointrin.h
rename to linux-x86/lib64/clang/11.0.5/include/clzerointrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/cpuid.h b/linux-x86/lib64/clang/11.0.5/include/cpuid.h
similarity index 96%
copy from darwin-x86/lib64/clang/11.0.1/include/cpuid.h
copy to linux-x86/lib64/clang/11.0.5/include/cpuid.h
index 4ddd648..2a88c04 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/cpuid.h
+++ b/linux-x86/lib64/clang/11.0.5/include/cpuid.h
@@ -24,6 +24,10 @@
 #define signature_CYRIX_ebx 0x69727943
 #define signature_CYRIX_edx 0x736e4978
 #define signature_CYRIX_ecx 0x64616574
+/* HYGON:   "HygonGenuine" */
+#define signature_HYGON_ebx 0x6f677948
+#define signature_HYGON_edx 0x6e65476e
+#define signature_HYGON_ecx 0x656e6975
 /* INTEL:   "GenuineIntel" */
 #define signature_INTEL_ebx 0x756e6547
 #define signature_INTEL_edx 0x49656e69
@@ -182,8 +186,13 @@
 /* Features in %edx for leaf 7 sub-leaf 0 */
 #define bit_AVX5124VNNIW  0x00000004
 #define bit_AVX5124FMAPS  0x00000008
+#define bit_SERIALIZE     0x00004000
+#define bit_TSXLDTRK      0x00010000
 #define bit_PCONFIG       0x00040000
 #define bit_IBT           0x00100000
+#define bit_AMXBF16       0x00400000
+#define bit_AMXTILE       0x01000000
+#define bit_AMXINT8       0x02000000
 
 /* Features in %eax for leaf 7 sub-leaf 1 */
 #define bit_AVX512BF16    0x00000020
diff --git a/linux-x86/lib64/clang/11.0.1/include/cuda_wrappers/algorithm b/linux-x86/lib64/clang/11.0.5/include/cuda_wrappers/algorithm
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/cuda_wrappers/algorithm
rename to linux-x86/lib64/clang/11.0.5/include/cuda_wrappers/algorithm
diff --git a/linux-x86/lib64/clang/11.0.1/include/cuda_wrappers/complex b/linux-x86/lib64/clang/11.0.5/include/cuda_wrappers/complex
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/cuda_wrappers/complex
rename to linux-x86/lib64/clang/11.0.5/include/cuda_wrappers/complex
diff --git a/linux-x86/lib64/clang/11.0.1/include/cuda_wrappers/new b/linux-x86/lib64/clang/11.0.5/include/cuda_wrappers/new
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/cuda_wrappers/new
rename to linux-x86/lib64/clang/11.0.5/include/cuda_wrappers/new
diff --git a/darwin-x86/lib64/clang/11.0.1/include/emmintrin.h b/linux-x86/lib64/clang/11.0.5/include/emmintrin.h
similarity index 99%
copy from darwin-x86/lib64/clang/11.0.1/include/emmintrin.h
copy to linux-x86/lib64/clang/11.0.5/include/emmintrin.h
index 993c688..73a777b 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/emmintrin.h
+++ b/linux-x86/lib64/clang/11.0.5/include/emmintrin.h
@@ -4970,10 +4970,10 @@
 
 #define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
 
-#define _MM_DENORMALS_ZERO_ON   (0x0040)
-#define _MM_DENORMALS_ZERO_OFF  (0x0000)
+#define _MM_DENORMALS_ZERO_ON   (0x0040U)
+#define _MM_DENORMALS_ZERO_OFF  (0x0000U)
 
-#define _MM_DENORMALS_ZERO_MASK (0x0040)
+#define _MM_DENORMALS_ZERO_MASK (0x0040U)
 
 #define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK)
 #define _MM_SET_DENORMALS_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
diff --git a/linux-x86/lib64/clang/11.0.1/include/enqcmdintrin.h b/linux-x86/lib64/clang/11.0.5/include/enqcmdintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/enqcmdintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/enqcmdintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/f16cintrin.h b/linux-x86/lib64/clang/11.0.5/include/f16cintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/f16cintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/f16cintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/float.h b/linux-x86/lib64/clang/11.0.5/include/float.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/float.h
rename to linux-x86/lib64/clang/11.0.5/include/float.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/fma4intrin.h b/linux-x86/lib64/clang/11.0.5/include/fma4intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/fma4intrin.h
rename to linux-x86/lib64/clang/11.0.5/include/fma4intrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/fmaintrin.h b/linux-x86/lib64/clang/11.0.5/include/fmaintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/fmaintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/fmaintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/fuzzer/FuzzedDataProvider.h b/linux-x86/lib64/clang/11.0.5/include/fuzzer/FuzzedDataProvider.h
new file mode 100644
index 0000000..83bcd01
--- /dev/null
+++ b/linux-x86/lib64/clang/11.0.5/include/fuzzer/FuzzedDataProvider.h
@@ -0,0 +1,387 @@
+//===- FuzzedDataProvider.h - Utility header for fuzz targets ---*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// A single header library providing an utility class to break up an array of
+// bytes. Whenever run on the same input, provides the same output, as long as
+// its methods are called in the same order, with the same arguments.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_
+#define LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_
+
+#include <algorithm>
+#include <climits>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <initializer_list>
+#include <string>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+// In addition to the comments below, the API is also briefly documented at
+// https://github.com/google/fuzzing/blob/master/docs/split-inputs.md#fuzzed-data-provider
+class FuzzedDataProvider {
+ public:
+  // |data| is an array of length |size| that the FuzzedDataProvider wraps to
+  // provide more granular access. |data| must outlive the FuzzedDataProvider.
+  FuzzedDataProvider(const uint8_t *data, size_t size)
+      : data_ptr_(data), remaining_bytes_(size) {}
+  ~FuzzedDataProvider() = default;
+
+  // See the implementation below (after the class definition) for more verbose
+  // comments for each of the methods.
+
+  // Methods returning std::vector of bytes. These are the most popular choice
+  // when splitting fuzzing input into pieces, as every piece is put into a
+  // separate buffer (i.e. ASan would catch any under-/overflow) and the memory
+  // will be released automatically.
+  template <typename T> std::vector<T> ConsumeBytes(size_t num_bytes);
+  template <typename T>
+  std::vector<T> ConsumeBytesWithTerminator(size_t num_bytes, T terminator = 0);
+  template <typename T> std::vector<T> ConsumeRemainingBytes();
+
+  // Methods returning strings. Use only when you need a std::string or a null
+  // terminated C-string. Otherwise, prefer the methods returning std::vector.
+  std::string ConsumeBytesAsString(size_t num_bytes);
+  std::string ConsumeRandomLengthString(size_t max_length);
+  std::string ConsumeRandomLengthString();
+  std::string ConsumeRemainingBytesAsString();
+
+  // Methods returning integer values.
+  template <typename T> T ConsumeIntegral();
+  template <typename T> T ConsumeIntegralInRange(T min, T max);
+
+  // Methods returning floating point values.
+  template <typename T> T ConsumeFloatingPoint();
+  template <typename T> T ConsumeFloatingPointInRange(T min, T max);
+
+  // 0 <= return value <= 1.
+  template <typename T> T ConsumeProbability();
+
+  bool ConsumeBool();
+
+  // Returns a value chosen from the given enum.
+  template <typename T> T ConsumeEnum();
+
+  // Returns a value from the given array.
+  template <typename T, size_t size> T PickValueInArray(const T (&array)[size]);
+  template <typename T> T PickValueInArray(std::initializer_list<const T> list);
+
+  // Writes data to the given destination and returns number of bytes written.
+  size_t ConsumeData(void *destination, size_t num_bytes);
+
+  // Reports the remaining bytes available for fuzzed input.
+  size_t remaining_bytes() { return remaining_bytes_; }
+
+ private:
+  FuzzedDataProvider(const FuzzedDataProvider &) = delete;
+  FuzzedDataProvider &operator=(const FuzzedDataProvider &) = delete;
+
+  void CopyAndAdvance(void *destination, size_t num_bytes);
+
+  void Advance(size_t num_bytes);
+
+  template <typename T>
+  std::vector<T> ConsumeBytes(size_t size, size_t num_bytes);
+
+  template <typename TS, typename TU> TS ConvertUnsignedToSigned(TU value);
+
+  const uint8_t *data_ptr_;
+  size_t remaining_bytes_;
+};
+
+// Returns a std::vector containing |num_bytes| of input data. If fewer than
+// |num_bytes| of data remain, returns a shorter std::vector containing all
+// of the data that's left. Can be used with any byte sized type, such as
+// char, unsigned char, uint8_t, etc.
+template <typename T>
+std::vector<T> FuzzedDataProvider::ConsumeBytes(size_t num_bytes) {
+  num_bytes = std::min(num_bytes, remaining_bytes_);
+  return ConsumeBytes<T>(num_bytes, num_bytes);
+}
+
+// Similar to |ConsumeBytes|, but also appends the terminator value at the end
+// of the resulting vector. Useful, when a mutable null-terminated C-string is
+// needed, for example. But that is a rare case. Better avoid it, if possible,
+// and prefer using |ConsumeBytes| or |ConsumeBytesAsString| methods.
+template <typename T>
+std::vector<T> FuzzedDataProvider::ConsumeBytesWithTerminator(size_t num_bytes,
+                                                              T terminator) {
+  num_bytes = std::min(num_bytes, remaining_bytes_);
+  std::vector<T> result = ConsumeBytes<T>(num_bytes + 1, num_bytes);
+  result.back() = terminator;
+  return result;
+}
+
+// Returns a std::vector containing all remaining bytes of the input data.
+template <typename T>
+std::vector<T> FuzzedDataProvider::ConsumeRemainingBytes() {
+  return ConsumeBytes<T>(remaining_bytes_);
+}
+
+// Returns a std::string containing |num_bytes| of input data. Using this and
+// |.c_str()| on the resulting string is the best way to get an immutable
+// null-terminated C string. If fewer than |num_bytes| of data remain, returns
+// a shorter std::string containing all of the data that's left.
+inline std::string FuzzedDataProvider::ConsumeBytesAsString(size_t num_bytes) {
+  static_assert(sizeof(std::string::value_type) == sizeof(uint8_t),
+                "ConsumeBytesAsString cannot convert the data to a string.");
+
+  num_bytes = std::min(num_bytes, remaining_bytes_);
+  std::string result(
+      reinterpret_cast<const std::string::value_type *>(data_ptr_), num_bytes);
+  Advance(num_bytes);
+  return result;
+}
+
+// Returns a std::string of length from 0 to |max_length|. When it runs out of
+// input data, returns what remains of the input. Designed to be more stable
+// with respect to a fuzzer inserting characters than just picking a random
+// length and then consuming that many bytes with |ConsumeBytes|.
+inline std::string
+FuzzedDataProvider::ConsumeRandomLengthString(size_t max_length) {
+  // Reads bytes from the start of |data_ptr_|. Maps "\\" to "\", and maps "\"
+  // followed by anything else to the end of the string. As a result of this
+  // logic, a fuzzer can insert characters into the string, and the string
+  // will be lengthened to include those new characters, resulting in a more
+  // stable fuzzer than picking the length of a string independently from
+  // picking its contents.
+  std::string result;
+
+  // Reserve the anticipated capaticity to prevent several reallocations.
+  result.reserve(std::min(max_length, remaining_bytes_));
+  for (size_t i = 0; i < max_length && remaining_bytes_ != 0; ++i) {
+    char next = ConvertUnsignedToSigned<char>(data_ptr_[0]);
+    Advance(1);
+    if (next == '\\' && remaining_bytes_ != 0) {
+      next = ConvertUnsignedToSigned<char>(data_ptr_[0]);
+      Advance(1);
+      if (next != '\\')
+        break;
+    }
+    result += next;
+  }
+
+  result.shrink_to_fit();
+  return result;
+}
+
+// Returns a std::string of length from 0 to |remaining_bytes_|.
+inline std::string FuzzedDataProvider::ConsumeRandomLengthString() {
+  return ConsumeRandomLengthString(remaining_bytes_);
+}
+
+// Returns a std::string containing all remaining bytes of the input data.
+// Prefer using |ConsumeRemainingBytes| unless you actually need a std::string
+// object.
+inline std::string FuzzedDataProvider::ConsumeRemainingBytesAsString() {
+  return ConsumeBytesAsString(remaining_bytes_);
+}
+
+// Returns a number in the range [Type's min, Type's max]. The value might
+// not be uniformly distributed in the given range. If there's no input data
+// left, always returns |min|.
+template <typename T> T FuzzedDataProvider::ConsumeIntegral() {
+  return ConsumeIntegralInRange(std::numeric_limits<T>::min(),
+                                std::numeric_limits<T>::max());
+}
+
+// Returns a number in the range [min, max] by consuming bytes from the
+// input data. The value might not be uniformly distributed in the given
+// range. If there's no input data left, always returns |min|. |min| must
+// be less than or equal to |max|.
+template <typename T>
+T FuzzedDataProvider::ConsumeIntegralInRange(T min, T max) {
+  static_assert(std::is_integral<T>::value, "An integral type is required.");
+  static_assert(sizeof(T) <= sizeof(uint64_t), "Unsupported integral type.");
+
+  if (min > max)
+    abort();
+
+  // Use the biggest type possible to hold the range and the result.
+  uint64_t range = static_cast<uint64_t>(max) - min;
+  uint64_t result = 0;
+  size_t offset = 0;
+
+  while (offset < sizeof(T) * CHAR_BIT && (range >> offset) > 0 &&
+         remaining_bytes_ != 0) {
+    // Pull bytes off the end of the seed data. Experimentally, this seems to
+    // allow the fuzzer to more easily explore the input space. This makes
+    // sense, since it works by modifying inputs that caused new code to run,
+    // and this data is often used to encode length of data read by
+    // |ConsumeBytes|. Separating out read lengths makes it easier modify the
+    // contents of the data that is actually read.
+    --remaining_bytes_;
+    result = (result << CHAR_BIT) | data_ptr_[remaining_bytes_];
+    offset += CHAR_BIT;
+  }
+
+  // Avoid division by 0, in case |range + 1| results in overflow.
+  if (range != std::numeric_limits<decltype(range)>::max())
+    result = result % (range + 1);
+
+  return static_cast<T>(min + result);
+}
+
+// Returns a floating point value in the range [Type's lowest, Type's max] by
+// consuming bytes from the input data. If there's no input data left, always
+// returns approximately 0.
+template <typename T> T FuzzedDataProvider::ConsumeFloatingPoint() {
+  return ConsumeFloatingPointInRange<T>(std::numeric_limits<T>::lowest(),
+                                        std::numeric_limits<T>::max());
+}
+
+// Returns a floating point value in the given range by consuming bytes from
+// the input data. If there's no input data left, returns |min|. Note that
+// |min| must be less than or equal to |max|.
+template <typename T>
+T FuzzedDataProvider::ConsumeFloatingPointInRange(T min, T max) {
+  if (min > max)
+    abort();
+
+  T range = .0;
+  T result = min;
+  constexpr T zero(.0);
+  if (max > zero && min < zero && max > min + std::numeric_limits<T>::max()) {
+    // The diff |max - min| would overflow the given floating point type. Use
+    // the half of the diff as the range and consume a bool to decide whether
+    // the result is in the first of the second part of the diff.
+    range = (max / 2.0) - (min / 2.0);
+    if (ConsumeBool()) {
+      result += range;
+    }
+  } else {
+    range = max - min;
+  }
+
+  return result + range * ConsumeProbability<T>();
+}
+
+// Returns a floating point number in the range [0.0, 1.0]. If there's no
+// input data left, always returns 0.
+template <typename T> T FuzzedDataProvider::ConsumeProbability() {
+  static_assert(std::is_floating_point<T>::value,
+                "A floating point type is required.");
+
+  // Use different integral types for different floating point types in order
+  // to provide better density of the resulting values.
+  using IntegralType =
+      typename std::conditional<(sizeof(T) <= sizeof(uint32_t)), uint32_t,
+                                uint64_t>::type;
+
+  T result = static_cast<T>(ConsumeIntegral<IntegralType>());
+  result /= static_cast<T>(std::numeric_limits<IntegralType>::max());
+  return result;
+}
+
+// Reads one byte and returns a bool, or false when no data remains.
+inline bool FuzzedDataProvider::ConsumeBool() {
+  return 1 & ConsumeIntegral<uint8_t>();
+}
+
+// Returns an enum value. The enum must start at 0 and be contiguous. It must
+// also contain |kMaxValue| aliased to its largest (inclusive) value. Such as:
+// enum class Foo { SomeValue, OtherValue, kMaxValue = OtherValue };
+template <typename T> T FuzzedDataProvider::ConsumeEnum() {
+  static_assert(std::is_enum<T>::value, "|T| must be an enum type.");
+  return static_cast<T>(
+      ConsumeIntegralInRange<uint32_t>(0, static_cast<uint32_t>(T::kMaxValue)));
+}
+
+// Returns a copy of the value selected from the given fixed-size |array|.
+template <typename T, size_t size>
+T FuzzedDataProvider::PickValueInArray(const T (&array)[size]) {
+  static_assert(size > 0, "The array must be non empty.");
+  return array[ConsumeIntegralInRange<size_t>(0, size - 1)];
+}
+
+template <typename T>
+T FuzzedDataProvider::PickValueInArray(std::initializer_list<const T> list) {
+  // TODO(Dor1s): switch to static_assert once C++14 is allowed.
+  if (!list.size())
+    abort();
+
+  return *(list.begin() + ConsumeIntegralInRange<size_t>(0, list.size() - 1));
+}
+
+// Writes |num_bytes| of input data to the given destination pointer. If there
+// is not enough data left, writes all remaining bytes. Return value is the
+// number of bytes written.
+// In general, it's better to avoid using this function, but it may be useful
+// in cases when it's necessary to fill a certain buffer or object with
+// fuzzing data.
+inline size_t FuzzedDataProvider::ConsumeData(void *destination,
+                                              size_t num_bytes) {
+  num_bytes = std::min(num_bytes, remaining_bytes_);
+  CopyAndAdvance(destination, num_bytes);
+  return num_bytes;
+}
+
+// Private methods.
+inline void FuzzedDataProvider::CopyAndAdvance(void *destination,
+                                               size_t num_bytes) {
+  std::memcpy(destination, data_ptr_, num_bytes);
+  Advance(num_bytes);
+}
+
+inline void FuzzedDataProvider::Advance(size_t num_bytes) {
+  if (num_bytes > remaining_bytes_)
+    abort();
+
+  data_ptr_ += num_bytes;
+  remaining_bytes_ -= num_bytes;
+}
+
+template <typename T>
+std::vector<T> FuzzedDataProvider::ConsumeBytes(size_t size, size_t num_bytes) {
+  static_assert(sizeof(T) == sizeof(uint8_t), "Incompatible data type.");
+
+  // The point of using the size-based constructor below is to increase the
+  // odds of having a vector object with capacity being equal to the length.
+  // That part is always implementation specific, but at least both libc++ and
+  // libstdc++ allocate the requested number of bytes in that constructor,
+  // which seems to be a natural choice for other implementations as well.
+  // To increase the odds even more, we also call |shrink_to_fit| below.
+  std::vector<T> result(size);
+  if (size == 0) {
+    if (num_bytes != 0)
+      abort();
+    return result;
+  }
+
+  CopyAndAdvance(result.data(), num_bytes);
+
+  // Even though |shrink_to_fit| is also implementation specific, we expect it
+  // to provide an additional assurance in case vector's constructor allocated
+  // a buffer which is larger than the actual amount of data we put inside it.
+  result.shrink_to_fit();
+  return result;
+}
+
+template <typename TS, typename TU>
+TS FuzzedDataProvider::ConvertUnsignedToSigned(TU value) {
+  static_assert(sizeof(TS) == sizeof(TU), "Incompatible data types.");
+  static_assert(!std::numeric_limits<TU>::is_signed,
+                "Source type must be unsigned.");
+
+  // TODO(Dor1s): change to `if constexpr` once C++17 becomes mainstream.
+  if (std::numeric_limits<TS>::is_modulo)
+    return static_cast<TS>(value);
+
+  // Avoid using implementation-defined unsigned to signed conversions.
+  // To learn more, see https://stackoverflow.com/questions/13150449.
+  if (value <= std::numeric_limits<TS>::max()) {
+    return static_cast<TS>(value);
+  } else {
+    constexpr auto TS_min = std::numeric_limits<TS>::min();
+    return TS_min + static_cast<char>(value - TS_min);
+  }
+}
+
+#endif // LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_
diff --git a/linux-x86/lib64/clang/11.0.1/include/fxsrintrin.h b/linux-x86/lib64/clang/11.0.5/include/fxsrintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/fxsrintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/fxsrintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/gfniintrin.h b/linux-x86/lib64/clang/11.0.5/include/gfniintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/gfniintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/gfniintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/htmintrin.h b/linux-x86/lib64/clang/11.0.5/include/htmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/htmintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/htmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/htmxlintrin.h b/linux-x86/lib64/clang/11.0.5/include/htmxlintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/htmxlintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/htmxlintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/ia32intrin.h b/linux-x86/lib64/clang/11.0.5/include/ia32intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/ia32intrin.h
rename to linux-x86/lib64/clang/11.0.5/include/ia32intrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/immintrin.h b/linux-x86/lib64/clang/11.0.5/include/immintrin.h
similarity index 61%
copy from darwin-x86/lib64/clang/11.0.1/include/immintrin.h
copy to linux-x86/lib64/clang/11.0.5/include/immintrin.h
index edf8c42..e9dff23 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/immintrin.h
+++ b/linux-x86/lib64/clang/11.0.5/include/immintrin.h
@@ -10,198 +10,231 @@
 #ifndef __IMMINTRIN_H
 #define __IMMINTRIN_H
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__MMX__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__MMX__)
 #include <mmintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__SSE__)
 #include <xmmintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE2__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__SSE2__)
 #include <emmintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE3__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__SSE3__)
 #include <pmmintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSSE3__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__SSSE3__)
 #include <tmmintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     (defined(__SSE4_2__) || defined(__SSE4_1__))
 #include <smmintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     (defined(__AES__) || defined(__PCLMUL__))
 #include <wmmintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLFLUSHOPT__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__CLFLUSHOPT__)
 #include <clflushoptintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLWB__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__CLWB__)
 #include <clwbintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX__)
 #include <avxintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX2__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX2__)
 #include <avx2intrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__F16C__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__F16C__)
 #include <f16cintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__VPCLMULQDQ__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__VPCLMULQDQ__)
 #include <vpclmulqdqintrin.h>
 #endif
 
 /* No feature check desired due to internal checks */
 #include <bmiintrin.h>
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI2__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__BMI2__)
 #include <bmi2intrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__LZCNT__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__LZCNT__)
 #include <lzcntintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__POPCNT__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__POPCNT__)
 #include <popcntintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FMA__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__FMA__)
 #include <fmaintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512F__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512F__)
 #include <avx512fintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VL__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512VL__)
 #include <avx512vlintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512BW__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512BW__)
 #include <avx512bwintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512BITALG__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512BITALG__)
 #include <avx512bitalgintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512CD__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512CD__)
 #include <avx512cdintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VPOPCNTDQ__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512VPOPCNTDQ__)
 #include <avx512vpopcntdqintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     (defined(__AVX512VL__) && defined(__AVX512VPOPCNTDQ__))
 #include <avx512vpopcntdqvlintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VNNI__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512VNNI__)
 #include <avx512vnniintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     (defined(__AVX512VL__) && defined(__AVX512VNNI__))
 #include <avx512vlvnniintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512DQ__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512DQ__)
 #include <avx512dqintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     (defined(__AVX512VL__) && defined(__AVX512BITALG__))
 #include <avx512vlbitalgintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     (defined(__AVX512VL__) && defined(__AVX512BW__))
 #include <avx512vlbwintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     (defined(__AVX512VL__) && defined(__AVX512CD__))
 #include <avx512vlcdintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     (defined(__AVX512VL__) && defined(__AVX512DQ__))
 #include <avx512vldqintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512ER__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512ER__)
 #include <avx512erintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512IFMA__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512IFMA__)
 #include <avx512ifmaintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     (defined(__AVX512IFMA__) && defined(__AVX512VL__))
 #include <avx512ifmavlintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VBMI__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512VBMI__)
 #include <avx512vbmiintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     (defined(__AVX512VBMI__) && defined(__AVX512VL__))
 #include <avx512vbmivlintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VBMI2__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512VBMI2__)
 #include <avx512vbmi2intrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     (defined(__AVX512VBMI2__) && defined(__AVX512VL__))
 #include <avx512vlvbmi2intrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512PF__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512PF__)
 #include <avx512pfintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512BF16__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512BF16__)
 #include <avx512bf16intrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     (defined(__AVX512VL__) && defined(__AVX512BF16__))
 #include <avx512vlbf16intrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PKU__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__PKU__)
 #include <pkuintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__VAES__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__VAES__)
 #include <vaesintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__GFNI__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__GFNI__)
 #include <gfniintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDPID__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__RDPID__)
 /// Returns the value of the IA32_TSC_AUX MSR (0xc0000103).
 ///
 /// \headerfile <immintrin.h>
@@ -213,7 +246,8 @@
 }
 #endif // __RDPID__
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDRND__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__RDRND__)
 static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
 _rdrand16_step(unsigned short *__p)
 {
@@ -235,7 +269,8 @@
 #endif
 #endif /* __RDRND__ */
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FSGSBASE__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__FSGSBASE__)
 #ifdef __x86_64__
 static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
 _readfsbase_u32(void)
@@ -288,7 +323,8 @@
 #endif
 #endif /* __FSGSBASE__ */
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__MOVBE__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__MOVBE__)
 
 /* The structs used below are to force the load/store to be unaligned. This
  * is accomplished with the __packed__ attribute. The __may_alias__ prevents
@@ -347,35 +383,42 @@
 #endif
 #endif /* __MOVBE */
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RTM__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__RTM__)
 #include <rtmintrin.h>
 #include <xtestintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SHA__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__SHA__)
 #include <shaintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FXSR__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__FXSR__)
 #include <fxsrintrin.h>
 #endif
 
 /* No feature check desired due to internal MSC_VER checks */
 #include <xsaveintrin.h>
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVEOPT__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__XSAVEOPT__)
 #include <xsaveoptintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVEC__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__XSAVEC__)
 #include <xsavecintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVES__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__XSAVES__)
 #include <xsavesintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SHSTK__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__SHSTK__)
 #include <cetintrin.h>
 #endif
 
@@ -383,57 +426,81 @@
  * whereas others are also available at all times. */
 #include <adxintrin.h>
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDSEED__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__RDSEED__)
 #include <rdseedintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__WBNOINVD__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__WBNOINVD__)
 #include <wbnoinvdintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLDEMOTE__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__CLDEMOTE__)
 #include <cldemoteintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__WAITPKG__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__WAITPKG__)
 #include <waitpkgintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-  defined(__MOVDIRI__) || defined(__MOVDIR64B__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__MOVDIRI__) || defined(__MOVDIR64B__)
 #include <movdirintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PCONFIG__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__PCONFIG__)
 #include <pconfigintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SGX__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__SGX__)
 #include <sgxintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PTWRITE__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__PTWRITE__)
 #include <ptwriteintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__INVPCID__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__INVPCID__)
 #include <invpcidintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-  defined(__AVX512VP2INTERSECT__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AMXTILE__) || defined(__AMXINT8__) || defined(__AMXBF16__)
+#include <amxintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__AVX512VP2INTERSECT__)
 #include <avx512vp2intersectintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-  (defined(__AVX512VL__) && defined(__AVX512VP2INTERSECT__))
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    (defined(__AVX512VL__) && defined(__AVX512VP2INTERSECT__))
 #include <avx512vlvp2intersectintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__ENQCMD__)
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__ENQCMD__)
 #include <enqcmdintrin.h>
 #endif
 
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__SERIALIZE__)
+#include <serializeintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__TSXLDTRK__)
+#include <tsxldtrkintrin.h>
+#endif
+
 #if defined(_MSC_VER) && __has_extension(gnu_asm)
 /* Define the default attributes for these intrinsics */
 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
diff --git a/darwin-x86/lib64/clang/11.0.1/include/intrin.h b/linux-x86/lib64/clang/11.0.5/include/intrin.h
similarity index 99%
copy from darwin-x86/lib64/clang/11.0.1/include/intrin.h
copy to linux-x86/lib64/clang/11.0.5/include/intrin.h
index f85f7a2..871b47c 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/intrin.h
+++ b/linux-x86/lib64/clang/11.0.5/include/intrin.h
@@ -289,6 +289,9 @@
 static __inline__
 unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
 
+#endif
+
+#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
 static __inline__
 __int64 _InterlockedDecrement64(__int64 volatile *_Addend);
 static __inline__
diff --git a/linux-x86/lib64/clang/11.0.1/include/inttypes.h b/linux-x86/lib64/clang/11.0.5/include/inttypes.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/inttypes.h
rename to linux-x86/lib64/clang/11.0.5/include/inttypes.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/invpcidintrin.h b/linux-x86/lib64/clang/11.0.5/include/invpcidintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/invpcidintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/invpcidintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/iso646.h b/linux-x86/lib64/clang/11.0.5/include/iso646.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/iso646.h
rename to linux-x86/lib64/clang/11.0.5/include/iso646.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/limits.h b/linux-x86/lib64/clang/11.0.5/include/limits.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/limits.h
rename to linux-x86/lib64/clang/11.0.5/include/limits.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/lwpintrin.h b/linux-x86/lib64/clang/11.0.5/include/lwpintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/lwpintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/lwpintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/lzcntintrin.h b/linux-x86/lib64/clang/11.0.5/include/lzcntintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/lzcntintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/lzcntintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/mm3dnow.h b/linux-x86/lib64/clang/11.0.5/include/mm3dnow.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/mm3dnow.h
rename to linux-x86/lib64/clang/11.0.5/include/mm3dnow.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/mm_malloc.h b/linux-x86/lib64/clang/11.0.5/include/mm_malloc.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/mm_malloc.h
rename to linux-x86/lib64/clang/11.0.5/include/mm_malloc.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/mmintrin.h b/linux-x86/lib64/clang/11.0.5/include/mmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/mmintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/mmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/module.modulemap b/linux-x86/lib64/clang/11.0.5/include/module.modulemap
similarity index 97%
rename from linux-x86/lib64/clang/11.0.1/include/module.modulemap
rename to linux-x86/lib64/clang/11.0.5/include/module.modulemap
index 7954a77..6894672 100644
--- a/linux-x86/lib64/clang/11.0.1/include/module.modulemap
+++ b/linux-x86/lib64/clang/11.0.5/include/module.modulemap
@@ -27,6 +27,12 @@
       header "arm_fp16.h"
       export *
     }
+
+    explicit module sve {
+      requires sve
+      header "arm_sve.h"
+      export *
+    }
   }
 
   explicit module intel {
diff --git a/linux-x86/lib64/clang/11.0.1/include/movdirintrin.h b/linux-x86/lib64/clang/11.0.5/include/movdirintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/movdirintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/movdirintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/msa.h b/linux-x86/lib64/clang/11.0.5/include/msa.h
similarity index 99%
rename from linux-x86/lib64/clang/11.0.1/include/msa.h
rename to linux-x86/lib64/clang/11.0.5/include/msa.h
index 19ea607..0ca4900 100644
--- a/linux-x86/lib64/clang/11.0.1/include/msa.h
+++ b/linux-x86/lib64/clang/11.0.5/include/msa.h
@@ -212,10 +212,14 @@
 #define __msa_ld_h __builtin_msa_ld_h
 #define __msa_ld_w __builtin_msa_ld_w
 #define __msa_ld_d __builtin_msa_ld_d
+#define __msa_ldr_d __builtin_msa_ldr_d
+#define __msa_ldr_w __builtin_msa_ldrq_w
 #define __msa_st_b __builtin_msa_st_b
 #define __msa_st_h __builtin_msa_st_h
 #define __msa_st_w __builtin_msa_st_w
 #define __msa_st_d __builtin_msa_st_d
+#define __msa_str_d __builtin_msa_str_d
+#define __msa_str_w __builtin_msa_strq_w
 #define __msa_sat_s_b __builtin_msa_sat_s_b
 #define __msa_sat_s_h __builtin_msa_sat_s_h
 #define __msa_sat_s_w __builtin_msa_sat_s_w
diff --git a/linux-x86/lib64/clang/11.0.1/include/mwaitxintrin.h b/linux-x86/lib64/clang/11.0.5/include/mwaitxintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/mwaitxintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/mwaitxintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/nmmintrin.h b/linux-x86/lib64/clang/11.0.5/include/nmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/nmmintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/nmmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/omp-tools.h b/linux-x86/lib64/clang/11.0.5/include/omp-tools.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/omp-tools.h
rename to linux-x86/lib64/clang/11.0.5/include/omp-tools.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/omp.h b/linux-x86/lib64/clang/11.0.5/include/omp.h
similarity index 99%
copy from darwin-x86/lib64/clang/11.0.1/include/omp.h
copy to linux-x86/lib64/clang/11.0.5/include/omp.h
index 0581ab4..e2a62ab 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/omp.h
+++ b/linux-x86/lib64/clang/11.0.5/include/omp.h
@@ -355,6 +355,9 @@
 
     extern int __KAI_KMPC_CONVENTION omp_get_supported_active_levels(void);
 
+    /* OpenMP 5.1 Display Environment */
+    extern void omp_display_env(int verbose);
+
 #   undef __KAI_KMPC_CONVENTION
 #   undef __KMP_IMP
 
diff --git a/linux-x86/lib64/clang/11.0.1/include/ompt.h b/linux-x86/lib64/clang/11.0.5/include/ompt.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/ompt.h
rename to linux-x86/lib64/clang/11.0.5/include/ompt.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/opencl-c-base.h b/linux-x86/lib64/clang/11.0.5/include/opencl-c-base.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/opencl-c-base.h
rename to linux-x86/lib64/clang/11.0.5/include/opencl-c-base.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/opencl-c.h b/linux-x86/lib64/clang/11.0.5/include/opencl-c.h
similarity index 95%
copy from darwin-x86/lib64/clang/11.0.1/include/opencl-c.h
copy to linux-x86/lib64/clang/11.0.5/include/opencl-c.h
index 3210f93..66e18bd 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/opencl-c.h
+++ b/linux-x86/lib64/clang/11.0.5/include/opencl-c.h
@@ -13432,18 +13432,12 @@
 uint __ovld atomic_fetch_min(volatile atomic_uint *object, uint operand);
 uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order);
 uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_min(volatile atomic_uint *object, int operand);
-uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, int operand, memory_order order);
-uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, int operand, memory_order order, memory_scope scope);
 int __ovld atomic_fetch_max(volatile atomic_int *object, int operand);
 int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order);
 int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
 uint __ovld atomic_fetch_max(volatile atomic_uint *object, uint operand);
 uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order);
 uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_max(volatile atomic_uint *object, int operand);
-uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, int operand, memory_order order);
-uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, int operand, memory_order order, memory_scope scope);
 
 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
 long __ovld atomic_fetch_add(volatile atomic_long *object, long operand);
@@ -13482,18 +13476,12 @@
 ulong __ovld atomic_fetch_min(volatile atomic_ulong *object, ulong operand);
 ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
 ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_min(volatile atomic_ulong *object, long operand);
-ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, long operand, memory_order order, memory_scope scope);
 long __ovld atomic_fetch_max(volatile atomic_long *object, long operand);
 long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order);
 long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
 ulong __ovld atomic_fetch_max(volatile atomic_ulong *object, ulong operand);
 ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
 ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_max(volatile atomic_ulong *object, long operand);
-ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, long operand, memory_order order, memory_scope scope);
 #endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
 
 // OpenCL v2.0 s6.13.11.7.5:
@@ -15472,6 +15460,674 @@
 
 #endif //cl_khr_subgroups cl_intel_subgroups
 
+#if defined(cl_khr_subgroup_extended_types)
+char __ovld __conv sub_group_broadcast( char value, uint index );
+char2 __ovld __conv sub_group_broadcast( char2 value, uint index );
+char3 __ovld __conv sub_group_broadcast( char3 value, uint index );
+char4 __ovld __conv sub_group_broadcast( char4 value, uint index );
+char8 __ovld __conv sub_group_broadcast( char8 value, uint index );
+char16 __ovld __conv sub_group_broadcast( char16 value, uint index );
+
+uchar __ovld __conv sub_group_broadcast( uchar value, uint index );
+uchar2 __ovld __conv sub_group_broadcast( uchar2 value, uint index );
+uchar3 __ovld __conv sub_group_broadcast( uchar3 value, uint index );
+uchar4 __ovld __conv sub_group_broadcast( uchar4 value, uint index );
+uchar8 __ovld __conv sub_group_broadcast( uchar8 value, uint index );
+uchar16 __ovld __conv sub_group_broadcast( uchar16 value, uint index );
+
+short __ovld __conv sub_group_broadcast( short value, uint index );
+short2 __ovld __conv sub_group_broadcast( short2 value, uint index );
+short3 __ovld __conv sub_group_broadcast( short3 value, uint index );
+short4 __ovld __conv sub_group_broadcast( short4 value, uint index );
+short8 __ovld __conv sub_group_broadcast( short8 value, uint index );
+short16 __ovld __conv sub_group_broadcast( short16 value, uint index );
+
+ushort __ovld __conv sub_group_broadcast( ushort value, uint index );
+ushort2 __ovld __conv sub_group_broadcast( ushort2 value, uint index );
+ushort3 __ovld __conv sub_group_broadcast( ushort3 value, uint index );
+ushort4 __ovld __conv sub_group_broadcast( ushort4 value, uint index );
+ushort8 __ovld __conv sub_group_broadcast( ushort8 value, uint index );
+ushort16 __ovld __conv sub_group_broadcast( ushort16 value, uint index );
+
+// scalar int broadcast is part of cl_khr_subgroups
+int2 __ovld __conv sub_group_broadcast( int2 value, uint index );
+int3 __ovld __conv sub_group_broadcast( int3 value, uint index );
+int4 __ovld __conv sub_group_broadcast( int4 value, uint index );
+int8 __ovld __conv sub_group_broadcast( int8 value, uint index );
+int16 __ovld __conv sub_group_broadcast( int16 value, uint index );
+
+// scalar uint broadcast is part of cl_khr_subgroups
+uint2 __ovld __conv sub_group_broadcast( uint2 value, uint index );
+uint3 __ovld __conv sub_group_broadcast( uint3 value, uint index );
+uint4 __ovld __conv sub_group_broadcast( uint4 value, uint index );
+uint8 __ovld __conv sub_group_broadcast( uint8 value, uint index );
+uint16 __ovld __conv sub_group_broadcast( uint16 value, uint index );
+
+// scalar long broadcast is part of cl_khr_subgroups
+long2 __ovld __conv sub_group_broadcast( long2 value, uint index );
+long3 __ovld __conv sub_group_broadcast( long3 value, uint index );
+long4 __ovld __conv sub_group_broadcast( long4 value, uint index );
+long8 __ovld __conv sub_group_broadcast( long8 value, uint index );
+long16 __ovld __conv sub_group_broadcast( long16 value, uint index );
+
+// scalar ulong broadcast is part of cl_khr_subgroups
+ulong2 __ovld __conv sub_group_broadcast( ulong2 value, uint index );
+ulong3 __ovld __conv sub_group_broadcast( ulong3 value, uint index );
+ulong4 __ovld __conv sub_group_broadcast( ulong4 value, uint index );
+ulong8 __ovld __conv sub_group_broadcast( ulong8 value, uint index );
+ulong16 __ovld __conv sub_group_broadcast( ulong16 value, uint index );
+
+// scalar float broadcast is part of cl_khr_subgroups
+float2 __ovld __conv sub_group_broadcast( float2 value, uint index );
+float3 __ovld __conv sub_group_broadcast( float3 value, uint index );
+float4 __ovld __conv sub_group_broadcast( float4 value, uint index );
+float8 __ovld __conv sub_group_broadcast( float8 value, uint index );
+float16 __ovld __conv sub_group_broadcast( float16 value, uint index );
+
+char __ovld __conv sub_group_reduce_add( char value );
+uchar __ovld __conv sub_group_reduce_add( uchar value );
+short __ovld __conv sub_group_reduce_add( short value );
+ushort __ovld __conv sub_group_reduce_add( ushort value );
+
+char __ovld __conv sub_group_reduce_min( char value );
+uchar __ovld __conv sub_group_reduce_min( uchar value );
+short __ovld __conv sub_group_reduce_min( short value );
+ushort __ovld __conv sub_group_reduce_min( ushort value );
+
+char __ovld __conv sub_group_reduce_max( char value );
+uchar __ovld __conv sub_group_reduce_max( uchar value );
+short __ovld __conv sub_group_reduce_max( short value );
+ushort __ovld __conv sub_group_reduce_max( ushort value );
+
+char __ovld __conv sub_group_scan_inclusive_add( char value );
+uchar __ovld __conv sub_group_scan_inclusive_add( uchar value );
+short __ovld __conv sub_group_scan_inclusive_add( short value );
+ushort __ovld __conv sub_group_scan_inclusive_add( ushort value );
+
+char __ovld __conv sub_group_scan_inclusive_min( char value );
+uchar __ovld __conv sub_group_scan_inclusive_min( uchar value );
+short __ovld __conv sub_group_scan_inclusive_min( short value );
+ushort __ovld __conv sub_group_scan_inclusive_min( ushort value );
+
+char __ovld __conv sub_group_scan_inclusive_max( char value );
+uchar __ovld __conv sub_group_scan_inclusive_max( uchar value );
+short __ovld __conv sub_group_scan_inclusive_max( short value );
+ushort __ovld __conv sub_group_scan_inclusive_max( ushort value );
+
+char __ovld __conv sub_group_scan_exclusive_add( char value );
+uchar __ovld __conv sub_group_scan_exclusive_add( uchar value );
+short __ovld __conv sub_group_scan_exclusive_add( short value );
+ushort __ovld __conv sub_group_scan_exclusive_add( ushort value );
+
+char __ovld __conv sub_group_scan_exclusive_min( char value );
+uchar __ovld __conv sub_group_scan_exclusive_min( uchar value );
+short __ovld __conv sub_group_scan_exclusive_min( short value );
+ushort __ovld __conv sub_group_scan_exclusive_min( ushort value );
+
+char __ovld __conv sub_group_scan_exclusive_max( char value );
+uchar __ovld __conv sub_group_scan_exclusive_max( uchar value );
+short __ovld __conv sub_group_scan_exclusive_max( short value );
+ushort __ovld __conv sub_group_scan_exclusive_max( ushort value );
+
+#if defined(cl_khr_fp16)
+// scalar half broadcast is part of cl_khr_subgroups
+half2 __ovld __conv sub_group_broadcast( half2 value, uint index );
+half3 __ovld __conv sub_group_broadcast( half3 value, uint index );
+half4 __ovld __conv sub_group_broadcast( half4 value, uint index );
+half8 __ovld __conv sub_group_broadcast( half8 value, uint index );
+half16 __ovld __conv sub_group_broadcast( half16 value, uint index );
+#endif  // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+// scalar double broadcast is part of cl_khr_subgroups
+double2 __ovld __conv sub_group_broadcast( double2 value, uint index );
+double3 __ovld __conv sub_group_broadcast( double3 value, uint index );
+double4 __ovld __conv sub_group_broadcast( double4 value, uint index );
+double8 __ovld __conv sub_group_broadcast( double8 value, uint index );
+double16 __ovld __conv sub_group_broadcast( double16 value, uint index );
+#endif  // cl_khr_fp64
+
+#endif  // cl_khr_subgroup_extended_types
+
+#if defined(cl_khr_subgroup_non_uniform_vote)
+int     __ovld sub_group_elect(void);
+int     __ovld sub_group_non_uniform_all( int predicate );
+int     __ovld sub_group_non_uniform_any( int predicate );
+
+int     __ovld sub_group_non_uniform_all_equal( char value );
+int     __ovld sub_group_non_uniform_all_equal( uchar value );
+int     __ovld sub_group_non_uniform_all_equal( short value );
+int     __ovld sub_group_non_uniform_all_equal( ushort value );
+int     __ovld sub_group_non_uniform_all_equal( int value );
+int     __ovld sub_group_non_uniform_all_equal( uint value );
+int     __ovld sub_group_non_uniform_all_equal( long value );
+int     __ovld sub_group_non_uniform_all_equal( ulong value );
+int     __ovld sub_group_non_uniform_all_equal( float value );
+
+#if defined(cl_khr_fp16)
+int     __ovld sub_group_non_uniform_all_equal( half value );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+int     __ovld sub_group_non_uniform_all_equal( double value );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_non_uniform_vote
+
+#if defined(cl_khr_subgroup_ballot)
+char    __ovld sub_group_non_uniform_broadcast( char value, uint index );
+char2   __ovld sub_group_non_uniform_broadcast( char2 value, uint index );
+char3   __ovld sub_group_non_uniform_broadcast( char3 value, uint index );
+char4   __ovld sub_group_non_uniform_broadcast( char4 value, uint index );
+char8   __ovld sub_group_non_uniform_broadcast( char8 value, uint index );
+char16  __ovld sub_group_non_uniform_broadcast( char16 value, uint index );
+
+uchar   __ovld sub_group_non_uniform_broadcast( uchar value, uint index );
+uchar2  __ovld sub_group_non_uniform_broadcast( uchar2 value, uint index );
+uchar3  __ovld sub_group_non_uniform_broadcast( uchar3 value, uint index );
+uchar4  __ovld sub_group_non_uniform_broadcast( uchar4 value, uint index );
+uchar8  __ovld sub_group_non_uniform_broadcast( uchar8 value, uint index );
+uchar16 __ovld sub_group_non_uniform_broadcast( uchar16 value, uint index );
+
+short   __ovld sub_group_non_uniform_broadcast( short value, uint index );
+short2  __ovld sub_group_non_uniform_broadcast( short2 value, uint index );
+short3  __ovld sub_group_non_uniform_broadcast( short3 value, uint index );
+short4  __ovld sub_group_non_uniform_broadcast( short4 value, uint index );
+short8  __ovld sub_group_non_uniform_broadcast( short8 value, uint index );
+short16 __ovld sub_group_non_uniform_broadcast( short16 value, uint index );
+
+ushort  __ovld sub_group_non_uniform_broadcast( ushort value, uint index );
+ushort2 __ovld sub_group_non_uniform_broadcast( ushort2 value, uint index );
+ushort3 __ovld sub_group_non_uniform_broadcast( ushort3 value, uint index );
+ushort4 __ovld sub_group_non_uniform_broadcast( ushort4 value, uint index );
+ushort8 __ovld sub_group_non_uniform_broadcast( ushort8 value, uint index );
+ushort16 __ovld sub_group_non_uniform_broadcast( ushort16 value, uint index );
+
+int     __ovld sub_group_non_uniform_broadcast( int value, uint index );
+int2    __ovld sub_group_non_uniform_broadcast( int2 value, uint index );
+int3    __ovld sub_group_non_uniform_broadcast( int3 value, uint index );
+int4    __ovld sub_group_non_uniform_broadcast( int4 value, uint index );
+int8    __ovld sub_group_non_uniform_broadcast( int8 value, uint index );
+int16   __ovld sub_group_non_uniform_broadcast( int16 value, uint index );
+
+uint    __ovld sub_group_non_uniform_broadcast( uint value, uint index );
+uint2   __ovld sub_group_non_uniform_broadcast( uint2 value, uint index );
+uint3   __ovld sub_group_non_uniform_broadcast( uint3 value, uint index );
+uint4   __ovld sub_group_non_uniform_broadcast( uint4 value, uint index );
+uint8   __ovld sub_group_non_uniform_broadcast( uint8 value, uint index );
+uint16  __ovld sub_group_non_uniform_broadcast( uint16 value, uint index );
+
+long    __ovld sub_group_non_uniform_broadcast( long value, uint index );
+long2   __ovld sub_group_non_uniform_broadcast( long2 value, uint index );
+long3   __ovld sub_group_non_uniform_broadcast( long3 value, uint index );
+long4   __ovld sub_group_non_uniform_broadcast( long4 value, uint index );
+long8   __ovld sub_group_non_uniform_broadcast( long8 value, uint index );
+long16  __ovld sub_group_non_uniform_broadcast( long16 value, uint index );
+
+ulong   __ovld sub_group_non_uniform_broadcast( ulong value, uint index );
+ulong2  __ovld sub_group_non_uniform_broadcast( ulong2 value, uint index );
+ulong3  __ovld sub_group_non_uniform_broadcast( ulong3 value, uint index );
+ulong4  __ovld sub_group_non_uniform_broadcast( ulong4 value, uint index );
+ulong8  __ovld sub_group_non_uniform_broadcast( ulong8 value, uint index );
+ulong16 __ovld sub_group_non_uniform_broadcast( ulong16 value, uint index );
+
+float   __ovld sub_group_non_uniform_broadcast( float value, uint index );
+float2  __ovld sub_group_non_uniform_broadcast( float2 value, uint index );
+float3  __ovld sub_group_non_uniform_broadcast( float3 value, uint index );
+float4  __ovld sub_group_non_uniform_broadcast( float4 value, uint index );
+float8  __ovld sub_group_non_uniform_broadcast( float8 value, uint index );
+float16 __ovld sub_group_non_uniform_broadcast( float16 value, uint index );
+
+char    __ovld sub_group_broadcast_first( char value );
+uchar   __ovld sub_group_broadcast_first( uchar value );
+short   __ovld sub_group_broadcast_first( short value );
+ushort  __ovld sub_group_broadcast_first( ushort value );
+int     __ovld sub_group_broadcast_first( int value );
+uint    __ovld sub_group_broadcast_first( uint value );
+long    __ovld sub_group_broadcast_first( long value );
+ulong   __ovld sub_group_broadcast_first( ulong value );
+float   __ovld sub_group_broadcast_first( float value );
+
+uint4   __ovld sub_group_ballot( int predicate );
+int     __ovld __cnfn sub_group_inverse_ballot( uint4 value );
+int     __ovld __cnfn sub_group_ballot_bit_extract( uint4 value, uint index );
+uint    __ovld __cnfn sub_group_ballot_bit_count( uint4 value );
+
+uint    __ovld sub_group_ballot_inclusive_scan( uint4 value );
+uint    __ovld sub_group_ballot_exclusive_scan( uint4 value );
+uint    __ovld sub_group_ballot_find_lsb( uint4 value );
+uint    __ovld sub_group_ballot_find_msb( uint4 value );
+
+uint4   __ovld __cnfn get_sub_group_eq_mask(void);
+uint4   __ovld __cnfn get_sub_group_ge_mask(void);
+uint4   __ovld __cnfn get_sub_group_gt_mask(void);
+uint4   __ovld __cnfn get_sub_group_le_mask(void);
+uint4   __ovld __cnfn get_sub_group_lt_mask(void);
+
+#if defined(cl_khr_fp16)
+half    __ovld sub_group_non_uniform_broadcast( half value, uint index );
+half2   __ovld sub_group_non_uniform_broadcast( half2 value, uint index );
+half3   __ovld sub_group_non_uniform_broadcast( half3 value, uint index );
+half4   __ovld sub_group_non_uniform_broadcast( half4 value, uint index );
+half8   __ovld sub_group_non_uniform_broadcast( half8 value, uint index );
+half16  __ovld sub_group_non_uniform_broadcast( half16 value, uint index );
+
+half    __ovld sub_group_broadcast_first( half value );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double   __ovld sub_group_non_uniform_broadcast( double value, uint index );
+double2  __ovld sub_group_non_uniform_broadcast( double2 value, uint index );
+double3  __ovld sub_group_non_uniform_broadcast( double3 value, uint index );
+double4  __ovld sub_group_non_uniform_broadcast( double4 value, uint index );
+double8  __ovld sub_group_non_uniform_broadcast( double8 value, uint index );
+double16 __ovld sub_group_non_uniform_broadcast( double16 value, uint index );
+
+double   __ovld sub_group_broadcast_first( double value );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_ballot
+
+#if defined(cl_khr_subgroup_non_uniform_arithmetic)
+char    __ovld sub_group_non_uniform_reduce_add( char value );
+uchar   __ovld sub_group_non_uniform_reduce_add( uchar value );
+short   __ovld sub_group_non_uniform_reduce_add( short value );
+ushort  __ovld sub_group_non_uniform_reduce_add( ushort value );
+int     __ovld sub_group_non_uniform_reduce_add( int value );
+uint    __ovld sub_group_non_uniform_reduce_add( uint value );
+long    __ovld sub_group_non_uniform_reduce_add( long value );
+ulong   __ovld sub_group_non_uniform_reduce_add( ulong value );
+float   __ovld sub_group_non_uniform_reduce_add( float value );
+
+char    __ovld sub_group_non_uniform_reduce_mul( char value );
+uchar   __ovld sub_group_non_uniform_reduce_mul( uchar value );
+short   __ovld sub_group_non_uniform_reduce_mul( short value );
+ushort  __ovld sub_group_non_uniform_reduce_mul( ushort value );
+int     __ovld sub_group_non_uniform_reduce_mul( int value );
+uint    __ovld sub_group_non_uniform_reduce_mul( uint value );
+long    __ovld sub_group_non_uniform_reduce_mul( long value );
+ulong   __ovld sub_group_non_uniform_reduce_mul( ulong value );
+float   __ovld sub_group_non_uniform_reduce_mul( float value );
+
+char    __ovld sub_group_non_uniform_reduce_min( char value );
+uchar   __ovld sub_group_non_uniform_reduce_min( uchar value );
+short   __ovld sub_group_non_uniform_reduce_min( short value );
+ushort  __ovld sub_group_non_uniform_reduce_min( ushort value );
+int     __ovld sub_group_non_uniform_reduce_min( int value );
+uint    __ovld sub_group_non_uniform_reduce_min( uint value );
+long    __ovld sub_group_non_uniform_reduce_min( long value );
+ulong   __ovld sub_group_non_uniform_reduce_min( ulong value );
+float   __ovld sub_group_non_uniform_reduce_min( float value );
+
+char    __ovld sub_group_non_uniform_reduce_max( char value );
+uchar   __ovld sub_group_non_uniform_reduce_max( uchar value );
+short   __ovld sub_group_non_uniform_reduce_max( short value );
+ushort  __ovld sub_group_non_uniform_reduce_max( ushort value );
+int     __ovld sub_group_non_uniform_reduce_max( int value );
+uint    __ovld sub_group_non_uniform_reduce_max( uint value );
+long    __ovld sub_group_non_uniform_reduce_max( long value );
+ulong   __ovld sub_group_non_uniform_reduce_max( ulong value );
+float   __ovld sub_group_non_uniform_reduce_max( float value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_add( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_add( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_add( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_add( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_add( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_add( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_add( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_add( ulong value );
+float   __ovld sub_group_non_uniform_scan_inclusive_add( float value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_mul( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_mul( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_mul( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_mul( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_mul( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_mul( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_mul( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_mul( ulong value );
+float   __ovld sub_group_non_uniform_scan_inclusive_mul( float value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_min( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_min( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_min( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_min( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_min( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_min( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_min( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_min( ulong value );
+float   __ovld sub_group_non_uniform_scan_inclusive_min( float value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_max( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_max( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_max( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_max( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_max( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_max( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_max( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_max( ulong value );
+float   __ovld sub_group_non_uniform_scan_inclusive_max( float value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_add( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_add( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_add( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_add( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_add( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_add( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_add( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_add( ulong value );
+float   __ovld sub_group_non_uniform_scan_exclusive_add( float value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_mul( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_mul( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_mul( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_mul( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_mul( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_mul( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_mul( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_mul( ulong value );
+float   __ovld sub_group_non_uniform_scan_exclusive_mul( float value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_min( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_min( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_min( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_min( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_min( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_min( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_min( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_min( ulong value );
+float   __ovld sub_group_non_uniform_scan_exclusive_min( float value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_max( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_max( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_max( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_max( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_max( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_max( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_max( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_max( ulong value );
+float   __ovld sub_group_non_uniform_scan_exclusive_max( float value );
+
+char    __ovld sub_group_non_uniform_reduce_and( char value );
+uchar   __ovld sub_group_non_uniform_reduce_and( uchar value );
+short   __ovld sub_group_non_uniform_reduce_and( short value );
+ushort  __ovld sub_group_non_uniform_reduce_and( ushort value );
+int     __ovld sub_group_non_uniform_reduce_and( int value );
+uint    __ovld sub_group_non_uniform_reduce_and( uint value );
+long    __ovld sub_group_non_uniform_reduce_and( long value );
+ulong   __ovld sub_group_non_uniform_reduce_and( ulong value );
+
+char    __ovld sub_group_non_uniform_reduce_or( char value );
+uchar   __ovld sub_group_non_uniform_reduce_or( uchar value );
+short   __ovld sub_group_non_uniform_reduce_or( short value );
+ushort  __ovld sub_group_non_uniform_reduce_or( ushort value );
+int     __ovld sub_group_non_uniform_reduce_or( int value );
+uint    __ovld sub_group_non_uniform_reduce_or( uint value );
+long    __ovld sub_group_non_uniform_reduce_or( long value );
+ulong   __ovld sub_group_non_uniform_reduce_or( ulong value );
+
+char    __ovld sub_group_non_uniform_reduce_xor( char value );
+uchar   __ovld sub_group_non_uniform_reduce_xor( uchar value );
+short   __ovld sub_group_non_uniform_reduce_xor( short value );
+ushort  __ovld sub_group_non_uniform_reduce_xor( ushort value );
+int     __ovld sub_group_non_uniform_reduce_xor( int value );
+uint    __ovld sub_group_non_uniform_reduce_xor( uint value );
+long    __ovld sub_group_non_uniform_reduce_xor( long value );
+ulong   __ovld sub_group_non_uniform_reduce_xor( ulong value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_and( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_and( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_and( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_and( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_and( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_and( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_and( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_and( ulong value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_or( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_or( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_or( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_or( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_or( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_or( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_or( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_or( ulong value );
+
+char    __ovld sub_group_non_uniform_scan_inclusive_xor( char value );
+uchar   __ovld sub_group_non_uniform_scan_inclusive_xor( uchar value );
+short   __ovld sub_group_non_uniform_scan_inclusive_xor( short value );
+ushort  __ovld sub_group_non_uniform_scan_inclusive_xor( ushort value );
+int     __ovld sub_group_non_uniform_scan_inclusive_xor( int value );
+uint    __ovld sub_group_non_uniform_scan_inclusive_xor( uint value );
+long    __ovld sub_group_non_uniform_scan_inclusive_xor( long value );
+ulong   __ovld sub_group_non_uniform_scan_inclusive_xor( ulong value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_and( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_and( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_and( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_and( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_and( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_and( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_and( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_and( ulong value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_or( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_or( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_or( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_or( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_or( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_or( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_or( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_or( ulong value );
+
+char    __ovld sub_group_non_uniform_scan_exclusive_xor( char value );
+uchar   __ovld sub_group_non_uniform_scan_exclusive_xor( uchar value );
+short   __ovld sub_group_non_uniform_scan_exclusive_xor( short value );
+ushort  __ovld sub_group_non_uniform_scan_exclusive_xor( ushort value );
+int     __ovld sub_group_non_uniform_scan_exclusive_xor( int value );
+uint    __ovld sub_group_non_uniform_scan_exclusive_xor( uint value );
+long    __ovld sub_group_non_uniform_scan_exclusive_xor( long value );
+ulong   __ovld sub_group_non_uniform_scan_exclusive_xor( ulong value );
+
+int     __ovld sub_group_non_uniform_reduce_logical_and( int predicate );
+int     __ovld sub_group_non_uniform_reduce_logical_or( int predicate );
+int     __ovld sub_group_non_uniform_reduce_logical_xor( int predicate );
+
+int     __ovld sub_group_non_uniform_scan_inclusive_logical_and( int predicate );
+int     __ovld sub_group_non_uniform_scan_inclusive_logical_or( int predicate );
+int     __ovld sub_group_non_uniform_scan_inclusive_logical_xor( int predicate );
+
+int     __ovld sub_group_non_uniform_scan_exclusive_logical_and( int predicate );
+int     __ovld sub_group_non_uniform_scan_exclusive_logical_or( int predicate );
+int     __ovld sub_group_non_uniform_scan_exclusive_logical_xor( int predicate );
+
+#if defined(cl_khr_fp16)
+half    __ovld sub_group_non_uniform_reduce_add( half value );
+half    __ovld sub_group_non_uniform_reduce_mul( half value );
+half    __ovld sub_group_non_uniform_reduce_min( half value );
+half    __ovld sub_group_non_uniform_reduce_max( half value );
+half    __ovld sub_group_non_uniform_scan_inclusive_add( half value );
+half    __ovld sub_group_non_uniform_scan_inclusive_mul( half value );
+half    __ovld sub_group_non_uniform_scan_inclusive_min( half value );
+half    __ovld sub_group_non_uniform_scan_inclusive_max( half value );
+half    __ovld sub_group_non_uniform_scan_exclusive_add( half value );
+half    __ovld sub_group_non_uniform_scan_exclusive_mul( half value );
+half    __ovld sub_group_non_uniform_scan_exclusive_min( half value );
+half    __ovld sub_group_non_uniform_scan_exclusive_max( half value );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double  __ovld sub_group_non_uniform_reduce_add( double value );
+double  __ovld sub_group_non_uniform_reduce_mul( double value );
+double  __ovld sub_group_non_uniform_reduce_min( double value );
+double  __ovld sub_group_non_uniform_reduce_max( double value );
+double  __ovld sub_group_non_uniform_scan_inclusive_add( double value );
+double  __ovld sub_group_non_uniform_scan_inclusive_mul( double value );
+double  __ovld sub_group_non_uniform_scan_inclusive_min( double value );
+double  __ovld sub_group_non_uniform_scan_inclusive_max( double value );
+double  __ovld sub_group_non_uniform_scan_exclusive_add( double value );
+double  __ovld sub_group_non_uniform_scan_exclusive_mul( double value );
+double  __ovld sub_group_non_uniform_scan_exclusive_min( double value );
+double  __ovld sub_group_non_uniform_scan_exclusive_max( double value );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_non_uniform_arithmetic
+
+#if defined(cl_khr_subgroup_shuffle)
+char    __ovld sub_group_shuffle( char value, uint index );
+uchar   __ovld sub_group_shuffle( uchar value, uint index );
+short   __ovld sub_group_shuffle( short value, uint index );
+ushort  __ovld sub_group_shuffle( ushort value, uint index );
+int     __ovld sub_group_shuffle( int value, uint index );
+uint    __ovld sub_group_shuffle( uint value, uint index );
+long    __ovld sub_group_shuffle( long value, uint index );
+ulong   __ovld sub_group_shuffle( ulong value, uint index );
+float   __ovld sub_group_shuffle( float value, uint index );
+
+char    __ovld sub_group_shuffle_xor( char value, uint mask );
+uchar   __ovld sub_group_shuffle_xor( uchar value, uint mask );
+short   __ovld sub_group_shuffle_xor( short value, uint mask );
+ushort  __ovld sub_group_shuffle_xor( ushort value, uint mask );
+int     __ovld sub_group_shuffle_xor( int value, uint mask );
+uint    __ovld sub_group_shuffle_xor( uint value, uint mask );
+long    __ovld sub_group_shuffle_xor( long value, uint mask );
+ulong   __ovld sub_group_shuffle_xor( ulong value, uint mask );
+float   __ovld sub_group_shuffle_xor( float value, uint mask );
+
+#if defined(cl_khr_fp16)
+half    __ovld sub_group_shuffle( half value, uint index );
+half    __ovld sub_group_shuffle_xor( half value, uint mask );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double  __ovld sub_group_shuffle( double value, uint index );
+double  __ovld sub_group_shuffle_xor( double value, uint mask );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_shuffle
+
+#if defined(cl_khr_subgroup_shuffle_relative)
+char    __ovld sub_group_shuffle_up( char value, uint delta );
+uchar   __ovld sub_group_shuffle_up( uchar value, uint delta );
+short   __ovld sub_group_shuffle_up( short value, uint delta );
+ushort  __ovld sub_group_shuffle_up( ushort value, uint delta );
+int     __ovld sub_group_shuffle_up( int value, uint delta );
+uint    __ovld sub_group_shuffle_up( uint value, uint delta );
+long    __ovld sub_group_shuffle_up( long value, uint delta );
+ulong   __ovld sub_group_shuffle_up( ulong value, uint delta );
+float   __ovld sub_group_shuffle_up( float value, uint delta );
+
+char    __ovld sub_group_shuffle_down( char value, uint delta );
+uchar   __ovld sub_group_shuffle_down( uchar value, uint delta );
+short   __ovld sub_group_shuffle_down( short value, uint delta );
+ushort  __ovld sub_group_shuffle_down( ushort value, uint delta );
+int     __ovld sub_group_shuffle_down( int value, uint delta );
+uint    __ovld sub_group_shuffle_down( uint value, uint delta );
+long    __ovld sub_group_shuffle_down( long value, uint delta );
+ulong   __ovld sub_group_shuffle_down( ulong value, uint delta );
+float   __ovld sub_group_shuffle_down( float value, uint delta );
+
+#if defined(cl_khr_fp16)
+half    __ovld sub_group_shuffle_up( half value, uint delta );
+half    __ovld sub_group_shuffle_down( half value, uint delta );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double  __ovld sub_group_shuffle_up( double value, uint delta );
+double  __ovld sub_group_shuffle_down( double value, uint delta );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_shuffle_relative
+
+#if defined(cl_khr_subgroup_clustered_reduce)
+char    __ovld sub_group_clustered_reduce_add( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_add( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_add( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_add( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_add( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_add( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_add( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_add( ulong value, uint clustersize );
+float   __ovld sub_group_clustered_reduce_add( float value, uint clustersize );
+
+char    __ovld sub_group_clustered_reduce_mul( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_mul( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_mul( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_mul( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_mul( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_mul( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_mul( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_mul( ulong value, uint clustersize );
+float   __ovld sub_group_clustered_reduce_mul( float value, uint clustersize );
+
+char    __ovld sub_group_clustered_reduce_min( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_min( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_min( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_min( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_min( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_min( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_min( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_min( ulong value, uint clustersize );
+float   __ovld sub_group_clustered_reduce_min( float value, uint clustersize );
+
+char    __ovld sub_group_clustered_reduce_max( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_max( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_max( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_max( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_max( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_max( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_max( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_max( ulong value, uint clustersize );
+float   __ovld sub_group_clustered_reduce_max( float value, uint clustersize );
+
+char    __ovld sub_group_clustered_reduce_and( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_and( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_and( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_and( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_and( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_and( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_and( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_and( ulong value, uint clustersize );
+
+char    __ovld sub_group_clustered_reduce_or( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_or( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_or( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_or( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_or( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_or( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_or( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_or( ulong value, uint clustersize );
+
+char    __ovld sub_group_clustered_reduce_xor( char value, uint clustersize );
+uchar   __ovld sub_group_clustered_reduce_xor( uchar value, uint clustersize );
+short   __ovld sub_group_clustered_reduce_xor( short value, uint clustersize );
+ushort  __ovld sub_group_clustered_reduce_xor( ushort value, uint clustersize );
+int     __ovld sub_group_clustered_reduce_xor( int value, uint clustersize );
+uint    __ovld sub_group_clustered_reduce_xor( uint value, uint clustersize );
+long    __ovld sub_group_clustered_reduce_xor( long value, uint clustersize );
+ulong   __ovld sub_group_clustered_reduce_xor( ulong value, uint clustersize );
+
+int     __ovld sub_group_clustered_reduce_logical_and( int predicate, uint clustersize );
+int     __ovld sub_group_clustered_reduce_logical_or( int predicate, uint clustersize );
+int     __ovld sub_group_clustered_reduce_logical_xor( int predicate, uint clustersize );
+
+#if defined(cl_khr_fp16)
+half    __ovld sub_group_clustered_reduce_add( half value, uint clustersize );
+half    __ovld sub_group_clustered_reduce_mul( half value, uint clustersize );
+half    __ovld sub_group_clustered_reduce_min( half value, uint clustersize );
+half    __ovld sub_group_clustered_reduce_max( half value, uint clustersize );
+#endif // cl_khr_fp16
+
+#if defined(cl_khr_fp64)
+double  __ovld sub_group_clustered_reduce_add( double value, uint clustersize );
+double  __ovld sub_group_clustered_reduce_mul( double value, uint clustersize );
+double  __ovld sub_group_clustered_reduce_min( double value, uint clustersize );
+double  __ovld sub_group_clustered_reduce_max( double value, uint clustersize );
+#endif // cl_khr_fp64
+
+#endif // cl_khr_subgroup_clustered_reduce
+
 #if defined(cl_intel_subgroups)
 // Intel-Specific Sub Group Functions
 float   __ovld __conv intel_sub_group_shuffle( float  x, uint c );
diff --git a/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/__clang_openmp_device_functions.h b/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/__clang_openmp_device_functions.h
new file mode 100644
index 0000000..406c974
--- /dev/null
+++ b/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/__clang_openmp_device_functions.h
@@ -0,0 +1,42 @@
+/*===- __clang_openmp_device_functions.h - OpenMP device function declares -===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_OPENMP_DEVICE_FUNCTIONS_H__
+#define __CLANG_OPENMP_DEVICE_FUNCTIONS_H__
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
+#endif
+
+#pragma omp begin declare variant match(                                       \
+    device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define __CUDA__
+#define __OPENMP_NVPTX__
+
+/// Include declarations for libdevice functions.
+#include <__clang_cuda_libdevice_declares.h>
+
+/// Provide definitions for these functions.
+#include <__clang_cuda_device_functions.h>
+
+#undef __OPENMP_NVPTX__
+#undef __CUDA__
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#pragma omp end declare variant
+
+#endif
diff --git a/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/cmath b/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/cmath
new file mode 100644
index 0000000..bd6011e
--- /dev/null
+++ b/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/cmath
@@ -0,0 +1,75 @@
+/*===-- __clang_openmp_device_functions.h - OpenMP math declares ------ c++ -===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_OPENMP_CMATH_H__
+#define __CLANG_OPENMP_CMATH_H__
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
+#endif
+
+#include_next <cmath>
+
+// Make sure we include our math.h overlay, it probably happend already but we
+// need to be sure.
+#include <math.h>
+
+// We (might) need cstdlib because __clang_cuda_cmath.h below declares `abs`
+// which might live in cstdlib.
+#include <cstdlib>
+
+#pragma omp begin declare variant match(                                       \
+    device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
+
+#define __CUDA__
+#define __OPENMP_NVPTX__
+#include <__clang_cuda_cmath.h>
+#undef __OPENMP_NVPTX__
+#undef __CUDA__
+
+// Overloads not provided by the CUDA wrappers but by the CUDA system headers.
+// Since we do not include the latter we define them ourselves.
+#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow))
+
+__DEVICE__ float acosh(float __x) { return ::acoshf(__x); }
+__DEVICE__ float asinh(float __x) { return ::asinhf(__x); }
+__DEVICE__ float atanh(float __x) { return ::atanhf(__x); }
+__DEVICE__ float cbrt(float __x) { return ::cbrtf(__x); }
+__DEVICE__ float erf(float __x) { return ::erff(__x); }
+__DEVICE__ float erfc(float __x) { return ::erfcf(__x); }
+__DEVICE__ float exp2(float __x) { return ::exp2f(__x); }
+__DEVICE__ float expm1(float __x) { return ::expm1f(__x); }
+__DEVICE__ float fdim(float __x, float __y) { return ::fdimf(__x, __y); }
+__DEVICE__ float hypot(float __x, float __y) { return ::hypotf(__x, __y); }
+__DEVICE__ int ilogb(float __x) { return ::ilogbf(__x); }
+__DEVICE__ float lgamma(float __x) { return ::lgammaf(__x); }
+__DEVICE__ long long int llrint(float __x) { return ::llrintf(__x); }
+__DEVICE__ long long int llround(float __x) { return ::llroundf(__x); }
+__DEVICE__ float log1p(float __x) { return ::log1pf(__x); }
+__DEVICE__ float log2(float __x) { return ::log2f(__x); }
+__DEVICE__ float logb(float __x) { return ::logbf(__x); }
+__DEVICE__ long int lrint(float __x) { return ::lrintf(__x); }
+__DEVICE__ long int lround(float __x) { return ::lroundf(__x); }
+__DEVICE__ float nextafter(float __x, float __y) {
+  return ::nextafterf(__x, __y);
+}
+__DEVICE__ float remainder(float __x, float __y) {
+  return ::remainderf(__x, __y);
+}
+__DEVICE__ float scalbln(float __x, long int __y) {
+  return ::scalblnf(__x, __y);
+}
+__DEVICE__ float scalbn(float __x, int __y) { return ::scalbnf(__x, __y); }
+__DEVICE__ float tgamma(float __x) { return ::tgammaf(__x); }
+
+#undef __DEVICE__
+
+#pragma omp end declare variant
+
+#endif
diff --git a/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/complex b/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/complex
new file mode 100644
index 0000000..1ed0b14
--- /dev/null
+++ b/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/complex
@@ -0,0 +1,25 @@
+/*===-- complex --- OpenMP complex wrapper for target regions --------- c++ -===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_OPENMP_COMPLEX__
+#define __CLANG_OPENMP_COMPLEX__
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
+#endif
+
+// We require std::math functions in the complex builtins below.
+#include <cmath>
+
+#define __CUDA__
+#include <__clang_cuda_complex_builtins.h>
+#endif
+
+// Grab the host header too.
+#include_next <complex>
diff --git a/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/complex.h b/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/complex.h
new file mode 100644
index 0000000..829c7a7
--- /dev/null
+++ b/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/complex.h
@@ -0,0 +1,25 @@
+/*===-- complex --- OpenMP complex wrapper for target regions --------- c++ -===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_OPENMP_COMPLEX_H__
+#define __CLANG_OPENMP_COMPLEX_H__
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
+#endif
+
+// We require math functions in the complex builtins below.
+#include <math.h>
+
+#define __CUDA__
+#include <__clang_cuda_complex_builtins.h>
+#endif
+
+// Grab the host header too.
+#include_next <complex.h>
diff --git a/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/math.h b/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/math.h
new file mode 100644
index 0000000..c64af8b
--- /dev/null
+++ b/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/math.h
@@ -0,0 +1,51 @@
+/*===---- openmp_wrapper/math.h -------- OpenMP math.h intercept ------ c++ -===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+// If we are in C++ mode and include <math.h> (not <cmath>) first, we still need
+// to make sure <cmath> is read first. The problem otherwise is that we haven't
+// seen the declarations of the math.h functions when the system math.h includes
+// our cmath overlay. However, our cmath overlay, or better the underlying
+// overlay, e.g. CUDA, uses the math.h functions. Since we haven't declared them
+// yet we get errors. CUDA avoids this by eagerly declaring all math functions
+// (in the __device__ space) but we cannot do this. Instead we break the
+// dependence by forcing cmath to go first. While our cmath will in turn include
+// this file, the cmath guards will prevent recursion.
+#ifdef __cplusplus
+#include <cmath>
+#endif
+
+#ifndef __CLANG_OPENMP_MATH_H__
+#define __CLANG_OPENMP_MATH_H__
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
+#endif
+
+#include_next <math.h>
+
+// We need limits.h for __clang_cuda_math.h below and because it should not hurt
+// we include it eagerly here.
+#include <limits.h>
+
+// We need stdlib.h because (for now) __clang_cuda_math.h below declares `abs`
+// which should live in stdlib.h.
+#include <stdlib.h>
+
+#pragma omp begin declare variant match(                                       \
+    device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
+
+#define __CUDA__
+#define __OPENMP_NVPTX__
+#include <__clang_cuda_math.h>
+#undef __OPENMP_NVPTX__
+#undef __CUDA__
+
+#pragma omp end declare variant
+
+#endif
diff --git a/linux-x86/lib64/clang/11.0.1/include/openmp_wrappers/new b/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/new
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/openmp_wrappers/new
rename to linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/new
diff --git a/linux-x86/lib64/clang/11.0.1/include/pconfigintrin.h b/linux-x86/lib64/clang/11.0.5/include/pconfigintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/pconfigintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/pconfigintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/pkuintrin.h b/linux-x86/lib64/clang/11.0.5/include/pkuintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/pkuintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/pkuintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/pmmintrin.h b/linux-x86/lib64/clang/11.0.5/include/pmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/pmmintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/pmmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/popcntintrin.h b/linux-x86/lib64/clang/11.0.5/include/popcntintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/popcntintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/popcntintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/ppc_wrappers/emmintrin.h b/linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/emmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/ppc_wrappers/emmintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/emmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/ppc_wrappers/mm_malloc.h b/linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/mm_malloc.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/ppc_wrappers/mm_malloc.h
rename to linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/mm_malloc.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/ppc_wrappers/mmintrin.h b/linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/mmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/ppc_wrappers/mmintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/mmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/ppc_wrappers/pmmintrin.h b/linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/pmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/ppc_wrappers/pmmintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/pmmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/ppc_wrappers/smmintrin.h b/linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/smmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/ppc_wrappers/smmintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/smmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/ppc_wrappers/tmmintrin.h b/linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/tmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/ppc_wrappers/tmmintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/tmmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/ppc_wrappers/xmmintrin.h b/linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/xmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/ppc_wrappers/xmmintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/xmmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/prfchwintrin.h b/linux-x86/lib64/clang/11.0.5/include/prfchwintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/prfchwintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/prfchwintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.1/include/profile/InstrProfData.inc b/linux-x86/lib64/clang/11.0.5/include/profile/InstrProfData.inc
similarity index 94%
copy from darwin-x86/lib64/clang/11.0.1/include/profile/InstrProfData.inc
copy to linux-x86/lib64/clang/11.0.5/include/profile/InstrProfData.inc
index 99f41d8..a691352 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/profile/InstrProfData.inc
+++ b/linux-x86/lib64/clang/11.0.5/include/profile/InstrProfData.inc
@@ -198,6 +198,14 @@
 #undef VALUE_PROF_KIND
 /* VALUE_PROF_KIND end */
 
+#undef COVMAP_V2_OR_V3
+#ifdef COVMAP_V2
+#define COVMAP_V2_OR_V3
+#endif
+#ifdef COVMAP_V3
+#define COVMAP_V2_OR_V3
+#endif
+
 /* COVMAP_FUNC_RECORD start */
 /* Definition of member fields of the function record structure in coverage
  * map.
@@ -214,16 +222,30 @@
 COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), NameSize, \
                    llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), \
                    NameValue.size()))
-#else
+#endif
+#ifdef COVMAP_V2_OR_V3
 COVMAP_FUNC_RECORD(const int64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \
-                   llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
-                   llvm::IndexedInstrProf::ComputeHash(NameValue)))
+                   llvm::ConstantInt::get( \
+                     llvm::Type::getInt64Ty(Ctx), NameHash))
 #endif
 COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), DataSize, \
-                   llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx),\
-                   CoverageMapping.size()))
+                   llvm::ConstantInt::get( \
+                     llvm::Type::getInt32Ty(Ctx), CoverageMapping.size()))
 COVMAP_FUNC_RECORD(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \
-                   llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), FuncHash))
+                   llvm::ConstantInt::get( \
+                     llvm::Type::getInt64Ty(Ctx), FuncHash))
+#ifdef COVMAP_V3
+COVMAP_FUNC_RECORD(const uint64_t, llvm::Type::getInt64Ty(Ctx), FilenamesRef, \
+                   llvm::ConstantInt::get( \
+                     llvm::Type::getInt64Ty(Ctx), FilenamesRef))
+COVMAP_FUNC_RECORD(const char, \
+                   llvm::ArrayType::get(llvm::Type::getInt8Ty(Ctx), \
+                                        CoverageMapping.size()), \
+                   CoverageMapping,
+                   llvm::ConstantDataArray::getRaw( \
+                     CoverageMapping, CoverageMapping.size(), \
+                     llvm::Type::getInt8Ty(Ctx)))
+#endif
 #undef COVMAP_FUNC_RECORD
 /* COVMAP_FUNC_RECORD end.  */
 
@@ -236,7 +258,7 @@
 #define INSTR_PROF_DATA_DEFINED
 #endif
 COVMAP_HEADER(uint32_t, Int32Ty, NRecords, \
-              llvm::ConstantInt::get(Int32Ty,  FunctionRecords.size()))
+              llvm::ConstantInt::get(Int32Ty, NRecords))
 COVMAP_HEADER(uint32_t, Int32Ty, FilenamesSize, \
               llvm::ConstantInt::get(Int32Ty, FilenamesSize))
 COVMAP_HEADER(uint32_t, Int32Ty, CoverageSize, \
@@ -267,6 +289,9 @@
 INSTR_PROF_SECT_ENTRY(IPSK_covmap, \
                       INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COMMON), \
                       INSTR_PROF_COVMAP_COFF, "__LLVM_COV,")
+INSTR_PROF_SECT_ENTRY(IPSK_covfun, \
+                      INSTR_PROF_QUOTE(INSTR_PROF_COVFUN_COMMON), \
+                      INSTR_PROF_COVFUN_COFF, "__LLVM_COV,")
 INSTR_PROF_SECT_ENTRY(IPSK_orderfile, \
                       INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COMMON), \
                       INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COFF), "__DATA,")
@@ -632,9 +657,9 @@
 /* Raw profile format version (start from 1). */
 #define INSTR_PROF_RAW_VERSION 5
 /* Indexed profile format version (start from 1). */
-#define INSTR_PROF_INDEX_VERSION 5
-/* Coverage mapping format vresion (start from 0). */
-#define INSTR_PROF_COVMAP_VERSION 2
+#define INSTR_PROF_INDEX_VERSION 6
+/* Coverage mapping format version (start from 0). */
+#define INSTR_PROF_COVMAP_VERSION 3
 
 /* Profile version is always of type uint64_t. Reserve the upper 8 bits in the
  * version for other variants of profile. We set the lowest bit of the upper 8
@@ -661,6 +686,7 @@
 #define INSTR_PROF_VALS_COMMON __llvm_prf_vals
 #define INSTR_PROF_VNODES_COMMON __llvm_prf_vnds
 #define INSTR_PROF_COVMAP_COMMON __llvm_covmap
+#define INSTR_PROF_COVFUN_COMMON __llvm_covfun
 #define INSTR_PROF_ORDERFILE_COMMON __llvm_orderfile
 /* Windows section names. Because these section names contain dollar characters,
  * they must be quoted.
@@ -671,6 +697,7 @@
 #define INSTR_PROF_VALS_COFF ".lprfv$M"
 #define INSTR_PROF_VNODES_COFF ".lprfnd$M"
 #define INSTR_PROF_COVMAP_COFF ".lcovmap$M"
+#define INSTR_PROF_COVFUN_COFF ".lcovfun$M"
 #define INSTR_PROF_ORDERFILE_COFF ".lorderfile$M"
 
 #ifdef _WIN32
@@ -685,6 +712,7 @@
 /* Value profile nodes section. */
 #define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_VNODES_COFF
 #define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_COVMAP_COFF
+#define INSTR_PROF_COVFUN_SECT_NAME INSTR_PROF_COVFUN_COFF
 #define INSTR_PROF_ORDERFILE_SECT_NAME INSTR_PROF_ORDERFILE_COFF
 #else
 /* Runtime section names and name strings.  */
@@ -698,6 +726,7 @@
 /* Value profile nodes section. */
 #define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_VNODES_COMMON)
 #define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COMMON)
+#define INSTR_PROF_COVFUN_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_COVFUN_COMMON)
 /* Order file instrumentation. */
 #define INSTR_PROF_ORDERFILE_SECT_NAME                                         \
   INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COMMON)
@@ -752,3 +781,5 @@
 #else
 #undef INSTR_PROF_DATA_DEFINED
 #endif
+
+#undef COVMAP_V2_OR_V3
diff --git a/linux-x86/lib64/clang/11.0.1/include/ptwriteintrin.h b/linux-x86/lib64/clang/11.0.5/include/ptwriteintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/ptwriteintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/ptwriteintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/rdseedintrin.h b/linux-x86/lib64/clang/11.0.5/include/rdseedintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/rdseedintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/rdseedintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/rtmintrin.h b/linux-x86/lib64/clang/11.0.5/include/rtmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/rtmintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/rtmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/s390intrin.h b/linux-x86/lib64/clang/11.0.5/include/s390intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/s390intrin.h
rename to linux-x86/lib64/clang/11.0.5/include/s390intrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/sanitizer/allocator_interface.h b/linux-x86/lib64/clang/11.0.5/include/sanitizer/allocator_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/sanitizer/allocator_interface.h
rename to linux-x86/lib64/clang/11.0.5/include/sanitizer/allocator_interface.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/sanitizer/asan_interface.h b/linux-x86/lib64/clang/11.0.5/include/sanitizer/asan_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/sanitizer/asan_interface.h
rename to linux-x86/lib64/clang/11.0.5/include/sanitizer/asan_interface.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/sanitizer/common_interface_defs.h b/linux-x86/lib64/clang/11.0.5/include/sanitizer/common_interface_defs.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/sanitizer/common_interface_defs.h
rename to linux-x86/lib64/clang/11.0.5/include/sanitizer/common_interface_defs.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/sanitizer/coverage_interface.h b/linux-x86/lib64/clang/11.0.5/include/sanitizer/coverage_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/sanitizer/coverage_interface.h
rename to linux-x86/lib64/clang/11.0.5/include/sanitizer/coverage_interface.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/sanitizer/dfsan_interface.h b/linux-x86/lib64/clang/11.0.5/include/sanitizer/dfsan_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/sanitizer/dfsan_interface.h
rename to linux-x86/lib64/clang/11.0.5/include/sanitizer/dfsan_interface.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/sanitizer/hwasan_interface.h b/linux-x86/lib64/clang/11.0.5/include/sanitizer/hwasan_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/sanitizer/hwasan_interface.h
rename to linux-x86/lib64/clang/11.0.5/include/sanitizer/hwasan_interface.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/sanitizer/linux_syscall_hooks.h b/linux-x86/lib64/clang/11.0.5/include/sanitizer/linux_syscall_hooks.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/sanitizer/linux_syscall_hooks.h
rename to linux-x86/lib64/clang/11.0.5/include/sanitizer/linux_syscall_hooks.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/sanitizer/lsan_interface.h b/linux-x86/lib64/clang/11.0.5/include/sanitizer/lsan_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/sanitizer/lsan_interface.h
rename to linux-x86/lib64/clang/11.0.5/include/sanitizer/lsan_interface.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/sanitizer/msan_interface.h b/linux-x86/lib64/clang/11.0.5/include/sanitizer/msan_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/sanitizer/msan_interface.h
rename to linux-x86/lib64/clang/11.0.5/include/sanitizer/msan_interface.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/sanitizer/netbsd_syscall_hooks.h b/linux-x86/lib64/clang/11.0.5/include/sanitizer/netbsd_syscall_hooks.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/sanitizer/netbsd_syscall_hooks.h
rename to linux-x86/lib64/clang/11.0.5/include/sanitizer/netbsd_syscall_hooks.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/sanitizer/scudo_interface.h b/linux-x86/lib64/clang/11.0.5/include/sanitizer/scudo_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/sanitizer/scudo_interface.h
rename to linux-x86/lib64/clang/11.0.5/include/sanitizer/scudo_interface.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/sanitizer/tsan_interface.h b/linux-x86/lib64/clang/11.0.5/include/sanitizer/tsan_interface.h
similarity index 91%
rename from linux-x86/lib64/clang/11.0.1/include/sanitizer/tsan_interface.h
rename to linux-x86/lib64/clang/11.0.5/include/sanitizer/tsan_interface.h
index 011b233..96b8ad5 100644
--- a/linux-x86/lib64/clang/11.0.1/include/sanitizer/tsan_interface.h
+++ b/linux-x86/lib64/clang/11.0.5/include/sanitizer/tsan_interface.h
@@ -38,34 +38,34 @@
 
 // Mutex has static storage duration and no-op constructor and destructor.
 // This effectively makes tsan ignore destroy annotation.
-const unsigned __tsan_mutex_linker_init      = 1 << 0;
+static const unsigned __tsan_mutex_linker_init      = 1 << 0;
 // Mutex is write reentrant.
-const unsigned __tsan_mutex_write_reentrant  = 1 << 1;
+static const unsigned __tsan_mutex_write_reentrant  = 1 << 1;
 // Mutex is read reentrant.
-const unsigned __tsan_mutex_read_reentrant   = 1 << 2;
+static const unsigned __tsan_mutex_read_reentrant   = 1 << 2;
 // Mutex does not have static storage duration, and must not be used after
 // its destructor runs.  The opposite of __tsan_mutex_linker_init.
 // If this flag is passed to __tsan_mutex_destroy, then the destruction
 // is ignored unless this flag was previously set on the mutex.
-const unsigned __tsan_mutex_not_static       = 1 << 8;
+static const unsigned __tsan_mutex_not_static       = 1 << 8;
 
 // Mutex operation flags:
 
 // Denotes read lock operation.
-const unsigned __tsan_mutex_read_lock        = 1 << 3;
+static const unsigned __tsan_mutex_read_lock        = 1 << 3;
 // Denotes try lock operation.
-const unsigned __tsan_mutex_try_lock         = 1 << 4;
+static const unsigned __tsan_mutex_try_lock         = 1 << 4;
 // Denotes that a try lock operation has failed to acquire the mutex.
-const unsigned __tsan_mutex_try_lock_failed  = 1 << 5;
+static const unsigned __tsan_mutex_try_lock_failed  = 1 << 5;
 // Denotes that the lock operation acquires multiple recursion levels.
 // Number of levels is passed in recursion parameter.
 // This is useful for annotation of e.g. Java builtin monitors,
 // for which wait operation releases all recursive acquisitions of the mutex.
-const unsigned __tsan_mutex_recursive_lock   = 1 << 6;
+static const unsigned __tsan_mutex_recursive_lock   = 1 << 6;
 // Denotes that the unlock operation releases all recursion levels.
 // Number of released levels is returned and later must be passed to
 // the corresponding __tsan_mutex_post_lock annotation.
-const unsigned __tsan_mutex_recursive_unlock = 1 << 7;
+static const unsigned __tsan_mutex_recursive_unlock = 1 << 7;
 
 // Annotate creation of a mutex.
 // Supported flags: mutex creation flags.
@@ -152,7 +152,7 @@
 
 // Flags for __tsan_switch_to_fiber:
 // Do not establish a happens-before relation between fibers
-const unsigned __tsan_switch_to_fiber_no_sync = 1 << 0;
+static const unsigned __tsan_switch_to_fiber_no_sync = 1 << 0;
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/linux-x86/lib64/clang/11.0.1/include/sanitizer/tsan_interface_atomic.h b/linux-x86/lib64/clang/11.0.5/include/sanitizer/tsan_interface_atomic.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/sanitizer/tsan_interface_atomic.h
rename to linux-x86/lib64/clang/11.0.5/include/sanitizer/tsan_interface_atomic.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/sanitizer/ubsan_interface.h b/linux-x86/lib64/clang/11.0.5/include/sanitizer/ubsan_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/sanitizer/ubsan_interface.h
rename to linux-x86/lib64/clang/11.0.5/include/sanitizer/ubsan_interface.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/serializeintrin.h b/linux-x86/lib64/clang/11.0.5/include/serializeintrin.h
new file mode 100644
index 0000000..b774e5a
--- /dev/null
+++ b/linux-x86/lib64/clang/11.0.5/include/serializeintrin.h
@@ -0,0 +1,30 @@
+/*===--------------- serializeintrin.h - serialize intrinsics --------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <serializeintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __SERIALIZEINTRIN_H
+#define __SERIALIZEINTRIN_H
+
+/// Serialize instruction fetch and execution.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> SERIALIZE </c> instruction.
+///
+static __inline__ void
+__attribute__((__always_inline__, __nodebug__, __target__("serialize")))
+_serialize (void)
+{
+  __builtin_ia32_serialize ();
+}
+
+#endif /* __SERIALIZEINTRIN_H */
diff --git a/linux-x86/lib64/clang/11.0.1/include/sgxintrin.h b/linux-x86/lib64/clang/11.0.5/include/sgxintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/sgxintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/sgxintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/shaintrin.h b/linux-x86/lib64/clang/11.0.5/include/shaintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/shaintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/shaintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/smmintrin.h b/linux-x86/lib64/clang/11.0.5/include/smmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/smmintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/smmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/stdalign.h b/linux-x86/lib64/clang/11.0.5/include/stdalign.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/stdalign.h
rename to linux-x86/lib64/clang/11.0.5/include/stdalign.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/stdarg.h b/linux-x86/lib64/clang/11.0.5/include/stdarg.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/stdarg.h
rename to linux-x86/lib64/clang/11.0.5/include/stdarg.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/stdatomic.h b/linux-x86/lib64/clang/11.0.5/include/stdatomic.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/stdatomic.h
rename to linux-x86/lib64/clang/11.0.5/include/stdatomic.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/stdbool.h b/linux-x86/lib64/clang/11.0.5/include/stdbool.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/stdbool.h
rename to linux-x86/lib64/clang/11.0.5/include/stdbool.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/stddef.h b/linux-x86/lib64/clang/11.0.5/include/stddef.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/stddef.h
rename to linux-x86/lib64/clang/11.0.5/include/stddef.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/stdint.h b/linux-x86/lib64/clang/11.0.5/include/stdint.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/stdint.h
rename to linux-x86/lib64/clang/11.0.5/include/stdint.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/stdnoreturn.h b/linux-x86/lib64/clang/11.0.5/include/stdnoreturn.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/stdnoreturn.h
rename to linux-x86/lib64/clang/11.0.5/include/stdnoreturn.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/tbmintrin.h b/linux-x86/lib64/clang/11.0.5/include/tbmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/tbmintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/tbmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/tgmath.h b/linux-x86/lib64/clang/11.0.5/include/tgmath.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/tgmath.h
rename to linux-x86/lib64/clang/11.0.5/include/tgmath.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/tmmintrin.h b/linux-x86/lib64/clang/11.0.5/include/tmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/tmmintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/tmmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/tsxldtrkintrin.h b/linux-x86/lib64/clang/11.0.5/include/tsxldtrkintrin.h
new file mode 100644
index 0000000..491823e
--- /dev/null
+++ b/linux-x86/lib64/clang/11.0.5/include/tsxldtrkintrin.h
@@ -0,0 +1,56 @@
+/*===------------- tsxldtrkintrin.h - tsxldtrk intrinsics ------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <tsxldtrkintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __TSXLDTRKINTRIN_H
+#define __TSXLDTRKINTRIN_H
+
+/* Define the default attributes for the functions in this file */
+#define _DEFAULT_FN_ATTRS \
+  __attribute__((__always_inline__, __nodebug__, __target__("tsxldtrk")))
+
+/// Marks the start of an TSX (RTM) suspend load address tracking region. If
+///    this intrinsic is used inside a transactional region, subsequent loads
+///    are not added to the read set of the transaction. If it's used inside a
+///    suspend load address tracking region it will cause transaction abort.
+///    If it's used outside of a transactional region it behaves like a NOP.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c XSUSLDTRK instruction.
+///
+static __inline__ void _DEFAULT_FN_ATTRS
+_xsusldtrk (void)
+{
+    __builtin_ia32_xsusldtrk();
+}
+
+/// Marks the end of an TSX (RTM) suspend load address tracking region. If this
+///    intrinsic is used inside a suspend load address tracking region it will
+///    end the suspend region and all following load addresses will be added to
+///    the transaction read set. If it's used inside an active transaction but
+///    not in a suspend region it will cause transaction abort. If it's used
+///    outside of a transactional region it behaves like a NOP.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c XRESLDTRK instruction.
+///
+static __inline__ void _DEFAULT_FN_ATTRS
+_xresldtrk (void)
+{
+    __builtin_ia32_xresldtrk();
+}
+
+#undef _DEFAULT_FN_ATTRS
+
+#endif /* __TSXLDTRKINTRIN_H */
diff --git a/linux-x86/lib64/clang/11.0.1/include/unwind.h b/linux-x86/lib64/clang/11.0.5/include/unwind.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/unwind.h
rename to linux-x86/lib64/clang/11.0.5/include/unwind.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/vadefs.h b/linux-x86/lib64/clang/11.0.5/include/vadefs.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/vadefs.h
rename to linux-x86/lib64/clang/11.0.5/include/vadefs.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/vaesintrin.h b/linux-x86/lib64/clang/11.0.5/include/vaesintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/vaesintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/vaesintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/varargs.h b/linux-x86/lib64/clang/11.0.5/include/varargs.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/varargs.h
rename to linux-x86/lib64/clang/11.0.5/include/varargs.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/vecintrin.h b/linux-x86/lib64/clang/11.0.5/include/vecintrin.h
new file mode 100644
index 0000000..e58c976
--- /dev/null
+++ b/linux-x86/lib64/clang/11.0.5/include/vecintrin.h
@@ -0,0 +1,10998 @@
+/*===---- vecintrin.h - Vector intrinsics ----------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if defined(__s390x__) && defined(__VEC__)
+
+#define __ATTRS_ai __attribute__((__always_inline__))
+#define __ATTRS_o __attribute__((__overloadable__))
+#define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__))
+
+#define __constant(PARM) \
+  __attribute__((__enable_if__ ((PARM) == (PARM), \
+     "argument must be a constant integer")))
+#define __constant_range(PARM, LOW, HIGH) \
+  __attribute__((__enable_if__ ((PARM) >= (LOW) && (PARM) <= (HIGH), \
+     "argument must be a constant integer from " #LOW " to " #HIGH)))
+#define __constant_pow2_range(PARM, LOW, HIGH) \
+  __attribute__((__enable_if__ ((PARM) >= (LOW) && (PARM) <= (HIGH) && \
+                                ((PARM) & ((PARM) - 1)) == 0, \
+     "argument must be a constant power of 2 from " #LOW " to " #HIGH)))
+
+/*-- __lcbb -----------------------------------------------------------------*/
+
+extern __ATTRS_o unsigned int
+__lcbb(const void *__ptr, unsigned short __len)
+  __constant_pow2_range(__len, 64, 4096);
+
+#define __lcbb(X, Y) ((__typeof__((__lcbb)((X), (Y)))) \
+  __builtin_s390_lcbb((X), __builtin_constant_p((Y))? \
+                           ((Y) == 64 ? 0 : \
+                            (Y) == 128 ? 1 : \
+                            (Y) == 256 ? 2 : \
+                            (Y) == 512 ? 3 : \
+                            (Y) == 1024 ? 4 : \
+                            (Y) == 2048 ? 5 : \
+                            (Y) == 4096 ? 6 : 0) : 0))
+
+/*-- vec_extract ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai signed char
+vec_extract(__vector signed char __vec, int __index) {
+  return __vec[__index & 15];
+}
+
+static inline __ATTRS_o_ai unsigned char
+vec_extract(__vector __bool char __vec, int __index) {
+  return __vec[__index & 15];
+}
+
+static inline __ATTRS_o_ai unsigned char
+vec_extract(__vector unsigned char __vec, int __index) {
+  return __vec[__index & 15];
+}
+
+static inline __ATTRS_o_ai signed short
+vec_extract(__vector signed short __vec, int __index) {
+  return __vec[__index & 7];
+}
+
+static inline __ATTRS_o_ai unsigned short
+vec_extract(__vector __bool short __vec, int __index) {
+  return __vec[__index & 7];
+}
+
+static inline __ATTRS_o_ai unsigned short
+vec_extract(__vector unsigned short __vec, int __index) {
+  return __vec[__index & 7];
+}
+
+static inline __ATTRS_o_ai signed int
+vec_extract(__vector signed int __vec, int __index) {
+  return __vec[__index & 3];
+}
+
+static inline __ATTRS_o_ai unsigned int
+vec_extract(__vector __bool int __vec, int __index) {
+  return __vec[__index & 3];
+}
+
+static inline __ATTRS_o_ai unsigned int
+vec_extract(__vector unsigned int __vec, int __index) {
+  return __vec[__index & 3];
+}
+
+static inline __ATTRS_o_ai signed long long
+vec_extract(__vector signed long long __vec, int __index) {
+  return __vec[__index & 1];
+}
+
+static inline __ATTRS_o_ai unsigned long long
+vec_extract(__vector __bool long long __vec, int __index) {
+  return __vec[__index & 1];
+}
+
+static inline __ATTRS_o_ai unsigned long long
+vec_extract(__vector unsigned long long __vec, int __index) {
+  return __vec[__index & 1];
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai float
+vec_extract(__vector float __vec, int __index) {
+  return __vec[__index & 3];
+}
+#endif
+
+static inline __ATTRS_o_ai double
+vec_extract(__vector double __vec, int __index) {
+  return __vec[__index & 1];
+}
+
+/*-- vec_insert -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_insert(signed char __scalar, __vector signed char __vec, int __index) {
+  __vec[__index & 15] = __scalar;
+  return __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_insert(unsigned char __scalar, __vector __bool char __vec, int __index) {
+  __vector unsigned char __newvec = (__vector unsigned char)__vec;
+  __newvec[__index & 15] = (unsigned char)__scalar;
+  return __newvec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_insert(unsigned char __scalar, __vector unsigned char __vec, int __index) {
+  __vec[__index & 15] = __scalar;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_insert(signed short __scalar, __vector signed short __vec, int __index) {
+  __vec[__index & 7] = __scalar;
+  return __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_insert(unsigned short __scalar, __vector __bool short __vec,
+           int __index) {
+  __vector unsigned short __newvec = (__vector unsigned short)__vec;
+  __newvec[__index & 7] = (unsigned short)__scalar;
+  return __newvec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_insert(unsigned short __scalar, __vector unsigned short __vec,
+           int __index) {
+  __vec[__index & 7] = __scalar;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_insert(signed int __scalar, __vector signed int __vec, int __index) {
+  __vec[__index & 3] = __scalar;
+  return __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_insert(unsigned int __scalar, __vector __bool int __vec, int __index) {
+  __vector unsigned int __newvec = (__vector unsigned int)__vec;
+  __newvec[__index & 3] = __scalar;
+  return __newvec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_insert(unsigned int __scalar, __vector unsigned int __vec, int __index) {
+  __vec[__index & 3] = __scalar;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_insert(signed long long __scalar, __vector signed long long __vec,
+           int __index) {
+  __vec[__index & 1] = __scalar;
+  return __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_insert(unsigned long long __scalar, __vector __bool long long __vec,
+           int __index) {
+  __vector unsigned long long __newvec = (__vector unsigned long long)__vec;
+  __newvec[__index & 1] = __scalar;
+  return __newvec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_insert(unsigned long long __scalar, __vector unsigned long long __vec,
+           int __index) {
+  __vec[__index & 1] = __scalar;
+  return __vec;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_insert(float __scalar, __vector float __vec, int __index) {
+  __vec[__index & 1] = __scalar;
+  return __vec;
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_insert(double __scalar, __vector double __vec, int __index) {
+  __vec[__index & 1] = __scalar;
+  return __vec;
+}
+
+/*-- vec_promote ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_promote(signed char __scalar, int __index) {
+  const __vector signed char __zero = (__vector signed char)0;
+  __vector signed char __vec = __builtin_shufflevector(__zero, __zero,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
+  __vec[__index & 15] = __scalar;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_promote(unsigned char __scalar, int __index) {
+  const __vector unsigned char __zero = (__vector unsigned char)0;
+  __vector unsigned char __vec = __builtin_shufflevector(__zero, __zero,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
+  __vec[__index & 15] = __scalar;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_promote(signed short __scalar, int __index) {
+  const __vector signed short __zero = (__vector signed short)0;
+  __vector signed short __vec = __builtin_shufflevector(__zero, __zero,
+                                -1, -1, -1, -1, -1, -1, -1, -1);
+  __vec[__index & 7] = __scalar;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_promote(unsigned short __scalar, int __index) {
+  const __vector unsigned short __zero = (__vector unsigned short)0;
+  __vector unsigned short __vec = __builtin_shufflevector(__zero, __zero,
+                                  -1, -1, -1, -1, -1, -1, -1, -1);
+  __vec[__index & 7] = __scalar;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_promote(signed int __scalar, int __index) {
+  const __vector signed int __zero = (__vector signed int)0;
+  __vector signed int __vec = __builtin_shufflevector(__zero, __zero,
+                                                      -1, -1, -1, -1);
+  __vec[__index & 3] = __scalar;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_promote(unsigned int __scalar, int __index) {
+  const __vector unsigned int __zero = (__vector unsigned int)0;
+  __vector unsigned int __vec = __builtin_shufflevector(__zero, __zero,
+                                                        -1, -1, -1, -1);
+  __vec[__index & 3] = __scalar;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_promote(signed long long __scalar, int __index) {
+  const __vector signed long long __zero = (__vector signed long long)0;
+  __vector signed long long __vec = __builtin_shufflevector(__zero, __zero,
+                                                            -1, -1);
+  __vec[__index & 1] = __scalar;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_promote(unsigned long long __scalar, int __index) {
+  const __vector unsigned long long __zero = (__vector unsigned long long)0;
+  __vector unsigned long long __vec = __builtin_shufflevector(__zero, __zero,
+                                                              -1, -1);
+  __vec[__index & 1] = __scalar;
+  return __vec;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_promote(float __scalar, int __index) {
+  const __vector float __zero = (__vector float)0.0f;
+  __vector float __vec = __builtin_shufflevector(__zero, __zero,
+                                                 -1, -1, -1, -1);
+  __vec[__index & 3] = __scalar;
+  return __vec;
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_promote(double __scalar, int __index) {
+  const __vector double __zero = (__vector double)0.0;
+  __vector double __vec = __builtin_shufflevector(__zero, __zero, -1, -1);
+  __vec[__index & 1] = __scalar;
+  return __vec;
+}
+
+/*-- vec_insert_and_zero ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_insert_and_zero(const signed char *__ptr) {
+  __vector signed char __vec = (__vector signed char)0;
+  __vec[7] = *__ptr;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_insert_and_zero(const unsigned char *__ptr) {
+  __vector unsigned char __vec = (__vector unsigned char)0;
+  __vec[7] = *__ptr;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_insert_and_zero(const signed short *__ptr) {
+  __vector signed short __vec = (__vector signed short)0;
+  __vec[3] = *__ptr;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_insert_and_zero(const unsigned short *__ptr) {
+  __vector unsigned short __vec = (__vector unsigned short)0;
+  __vec[3] = *__ptr;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_insert_and_zero(const signed int *__ptr) {
+  __vector signed int __vec = (__vector signed int)0;
+  __vec[1] = *__ptr;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_insert_and_zero(const unsigned int *__ptr) {
+  __vector unsigned int __vec = (__vector unsigned int)0;
+  __vec[1] = *__ptr;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_insert_and_zero(const signed long long *__ptr) {
+  __vector signed long long __vec = (__vector signed long long)0;
+  __vec[0] = *__ptr;
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_insert_and_zero(const unsigned long long *__ptr) {
+  __vector unsigned long long __vec = (__vector unsigned long long)0;
+  __vec[0] = *__ptr;
+  return __vec;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_insert_and_zero(const float *__ptr) {
+  __vector float __vec = (__vector float)0.0f;
+  __vec[1] = *__ptr;
+  return __vec;
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_insert_and_zero(const double *__ptr) {
+  __vector double __vec = (__vector double)0.0;
+  __vec[0] = *__ptr;
+  return __vec;
+}
+
+/*-- vec_perm ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_perm(__vector signed char __a, __vector signed char __b,
+         __vector unsigned char __c) {
+  return (__vector signed char)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_perm(__vector unsigned char __a, __vector unsigned char __b,
+         __vector unsigned char __c) {
+  return (__vector unsigned char)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_perm(__vector __bool char __a, __vector __bool char __b,
+         __vector unsigned char __c) {
+  return (__vector __bool char)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_perm(__vector signed short __a, __vector signed short __b,
+         __vector unsigned char __c) {
+  return (__vector signed short)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_perm(__vector unsigned short __a, __vector unsigned short __b,
+         __vector unsigned char __c) {
+  return (__vector unsigned short)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_perm(__vector __bool short __a, __vector __bool short __b,
+         __vector unsigned char __c) {
+  return (__vector __bool short)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_perm(__vector signed int __a, __vector signed int __b,
+         __vector unsigned char __c) {
+  return (__vector signed int)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_perm(__vector unsigned int __a, __vector unsigned int __b,
+         __vector unsigned char __c) {
+  return (__vector unsigned int)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_perm(__vector __bool int __a, __vector __bool int __b,
+         __vector unsigned char __c) {
+  return (__vector __bool int)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_perm(__vector signed long long __a, __vector signed long long __b,
+         __vector unsigned char __c) {
+  return (__vector signed long long)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_perm(__vector unsigned long long __a, __vector unsigned long long __b,
+         __vector unsigned char __c) {
+  return (__vector unsigned long long)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_perm(__vector __bool long long __a, __vector __bool long long __b,
+         __vector unsigned char __c) {
+  return (__vector __bool long long)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_perm(__vector float __a, __vector float __b,
+         __vector unsigned char __c) {
+  return (__vector float)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_perm(__vector double __a, __vector double __b,
+         __vector unsigned char __c) {
+  return (__vector double)__builtin_s390_vperm(
+           (__vector unsigned char)__a, (__vector unsigned char)__b, __c);
+}
+
+/*-- vec_permi --------------------------------------------------------------*/
+
+// This prototype is deprecated.
+extern __ATTRS_o __vector signed long long
+vec_permi(__vector signed long long __a, __vector signed long long __b,
+          int __c)
+  __constant_range(__c, 0, 3);
+
+// This prototype is deprecated.
+extern __ATTRS_o __vector unsigned long long
+vec_permi(__vector unsigned long long __a, __vector unsigned long long __b,
+          int __c)
+  __constant_range(__c, 0, 3);
+
+// This prototype is deprecated.
+extern __ATTRS_o __vector __bool long long
+vec_permi(__vector __bool long long __a, __vector __bool long long __b,
+          int __c)
+  __constant_range(__c, 0, 3);
+
+// This prototype is deprecated.
+extern __ATTRS_o __vector double
+vec_permi(__vector double __a, __vector double __b, int __c)
+  __constant_range(__c, 0, 3);
+
+#define vec_permi(X, Y, Z) ((__typeof__((vec_permi)((X), (Y), (Z)))) \
+  __builtin_s390_vpdi((__vector unsigned long long)(X), \
+                      (__vector unsigned long long)(Y), \
+                      (((Z) & 2) << 1) | ((Z) & 1)))
+
+/*-- vec_bperm_u128 ---------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_ai __vector unsigned long long
+vec_bperm_u128(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vbperm(__a, __b);
+}
+#endif
+
+/*-- vec_revb ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed short
+vec_revb(__vector signed short __vec) {
+  return (__vector signed short)
+         __builtin_s390_vlbrh((__vector unsigned short)__vec);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_revb(__vector unsigned short __vec) {
+  return __builtin_s390_vlbrh(__vec);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_revb(__vector signed int __vec) {
+  return (__vector signed int)
+         __builtin_s390_vlbrf((__vector unsigned int)__vec);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_revb(__vector unsigned int __vec) {
+  return __builtin_s390_vlbrf(__vec);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_revb(__vector signed long long __vec) {
+  return (__vector signed long long)
+         __builtin_s390_vlbrg((__vector unsigned long long)__vec);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_revb(__vector unsigned long long __vec) {
+  return __builtin_s390_vlbrg(__vec);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_revb(__vector float __vec) {
+  return (__vector float)
+         __builtin_s390_vlbrf((__vector unsigned int)__vec);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_revb(__vector double __vec) {
+  return (__vector double)
+         __builtin_s390_vlbrg((__vector unsigned long long)__vec);
+}
+
+/*-- vec_reve ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_reve(__vector signed char __vec) {
+  return (__vector signed char) { __vec[15], __vec[14], __vec[13], __vec[12],
+                                  __vec[11], __vec[10], __vec[9], __vec[8],
+                                  __vec[7], __vec[6], __vec[5], __vec[4],
+                                  __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_reve(__vector unsigned char __vec) {
+  return (__vector unsigned char) { __vec[15], __vec[14], __vec[13], __vec[12],
+                                    __vec[11], __vec[10], __vec[9], __vec[8],
+                                    __vec[7], __vec[6], __vec[5], __vec[4],
+                                    __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_reve(__vector __bool char __vec) {
+  return (__vector __bool char) { __vec[15], __vec[14], __vec[13], __vec[12],
+                                  __vec[11], __vec[10], __vec[9], __vec[8],
+                                  __vec[7], __vec[6], __vec[5], __vec[4],
+                                  __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_reve(__vector signed short __vec) {
+  return (__vector signed short) { __vec[7], __vec[6], __vec[5], __vec[4],
+                                   __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_reve(__vector unsigned short __vec) {
+  return (__vector unsigned short) { __vec[7], __vec[6], __vec[5], __vec[4],
+                                     __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_reve(__vector __bool short __vec) {
+  return (__vector __bool short) { __vec[7], __vec[6], __vec[5], __vec[4],
+                                   __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_reve(__vector signed int __vec) {
+  return (__vector signed int) { __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_reve(__vector unsigned int __vec) {
+  return (__vector unsigned int) { __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_reve(__vector __bool int __vec) {
+  return (__vector __bool int) { __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_reve(__vector signed long long __vec) {
+  return (__vector signed long long) { __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_reve(__vector unsigned long long __vec) {
+  return (__vector unsigned long long) { __vec[1], __vec[0] };
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_reve(__vector __bool long long __vec) {
+  return (__vector __bool long long) { __vec[1], __vec[0] };
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_reve(__vector float __vec) {
+  return (__vector float) { __vec[3], __vec[2], __vec[1], __vec[0] };
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_reve(__vector double __vec) {
+  return (__vector double) { __vec[1], __vec[0] };
+}
+
+/*-- vec_sel ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_sel(__vector signed char __a, __vector signed char __b,
+        __vector unsigned char __c) {
+  return (((__vector signed char)__c & __b) |
+          (~(__vector signed char)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector signed char
+vec_sel(__vector signed char __a, __vector signed char __b,
+        __vector __bool char __c) {
+  return (((__vector signed char)__c & __b) |
+          (~(__vector signed char)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_sel(__vector __bool char __a, __vector __bool char __b,
+        __vector unsigned char __c) {
+  return (((__vector __bool char)__c & __b) |
+          (~(__vector __bool char)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_sel(__vector __bool char __a, __vector __bool char __b,
+        __vector __bool char __c) {
+  return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sel(__vector unsigned char __a, __vector unsigned char __b,
+        __vector unsigned char __c) {
+  return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sel(__vector unsigned char __a, __vector unsigned char __b,
+        __vector __bool char __c) {
+  return (((__vector unsigned char)__c & __b) |
+          (~(__vector unsigned char)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_sel(__vector signed short __a, __vector signed short __b,
+        __vector unsigned short __c) {
+  return (((__vector signed short)__c & __b) |
+          (~(__vector signed short)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_sel(__vector signed short __a, __vector signed short __b,
+        __vector __bool short __c) {
+  return (((__vector signed short)__c & __b) |
+          (~(__vector signed short)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_sel(__vector __bool short __a, __vector __bool short __b,
+        __vector unsigned short __c) {
+  return (((__vector __bool short)__c & __b) |
+          (~(__vector __bool short)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_sel(__vector __bool short __a, __vector __bool short __b,
+        __vector __bool short __c) {
+  return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sel(__vector unsigned short __a, __vector unsigned short __b,
+        __vector unsigned short __c) {
+  return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sel(__vector unsigned short __a, __vector unsigned short __b,
+        __vector __bool short __c) {
+  return (((__vector unsigned short)__c & __b) |
+          (~(__vector unsigned short)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_sel(__vector signed int __a, __vector signed int __b,
+        __vector unsigned int __c) {
+  return (((__vector signed int)__c & __b) |
+          (~(__vector signed int)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_sel(__vector signed int __a, __vector signed int __b,
+        __vector __bool int __c) {
+  return (((__vector signed int)__c & __b) |
+          (~(__vector signed int)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_sel(__vector __bool int __a, __vector __bool int __b,
+        __vector unsigned int __c) {
+  return (((__vector __bool int)__c & __b) |
+          (~(__vector __bool int)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_sel(__vector __bool int __a, __vector __bool int __b,
+        __vector __bool int __c) {
+  return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sel(__vector unsigned int __a, __vector unsigned int __b,
+        __vector unsigned int __c) {
+  return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sel(__vector unsigned int __a, __vector unsigned int __b,
+        __vector __bool int __c) {
+  return (((__vector unsigned int)__c & __b) |
+          (~(__vector unsigned int)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_sel(__vector signed long long __a, __vector signed long long __b,
+        __vector unsigned long long __c) {
+  return (((__vector signed long long)__c & __b) |
+          (~(__vector signed long long)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_sel(__vector signed long long __a, __vector signed long long __b,
+        __vector __bool long long __c) {
+  return (((__vector signed long long)__c & __b) |
+          (~(__vector signed long long)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sel(__vector __bool long long __a, __vector __bool long long __b,
+        __vector unsigned long long __c) {
+  return (((__vector __bool long long)__c & __b) |
+          (~(__vector __bool long long)__c & __a));
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sel(__vector __bool long long __a, __vector __bool long long __b,
+        __vector __bool long long __c) {
+  return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sel(__vector unsigned long long __a, __vector unsigned long long __b,
+        __vector unsigned long long __c) {
+  return (__c & __b) | (~__c & __a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sel(__vector unsigned long long __a, __vector unsigned long long __b,
+        __vector __bool long long __c) {
+  return (((__vector unsigned long long)__c & __b) |
+          (~(__vector unsigned long long)__c & __a));
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_sel(__vector float __a, __vector float __b, __vector unsigned int __c) {
+  return (__vector float)((__c & (__vector unsigned int)__b) |
+                          (~__c & (__vector unsigned int)__a));
+}
+
+static inline __ATTRS_o_ai __vector float
+vec_sel(__vector float __a, __vector float __b, __vector __bool int __c) {
+  __vector unsigned int __ac = (__vector unsigned int)__a;
+  __vector unsigned int __bc = (__vector unsigned int)__b;
+  __vector unsigned int __cc = (__vector unsigned int)__c;
+  return (__vector float)((__cc & __bc) | (~__cc & __ac));
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_sel(__vector double __a, __vector double __b,
+        __vector unsigned long long __c) {
+  return (__vector double)((__c & (__vector unsigned long long)__b) |
+                         (~__c & (__vector unsigned long long)__a));
+}
+
+static inline __ATTRS_o_ai __vector double
+vec_sel(__vector double __a, __vector double __b,
+        __vector __bool long long __c) {
+  __vector unsigned long long __ac = (__vector unsigned long long)__a;
+  __vector unsigned long long __bc = (__vector unsigned long long)__b;
+  __vector unsigned long long __cc = (__vector unsigned long long)__c;
+  return (__vector double)((__cc & __bc) | (~__cc & __ac));
+}
+
+/*-- vec_gather_element -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed int
+vec_gather_element(__vector signed int __vec,
+                   __vector unsigned int __offset,
+                   const signed int *__ptr, int __index)
+  __constant_range(__index, 0, 3) {
+  __vec[__index] = *(const signed int *)(
+    (const char *)__ptr + __offset[__index]);
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_gather_element(__vector __bool int __vec,
+                   __vector unsigned int __offset,
+                   const unsigned int *__ptr, int __index)
+  __constant_range(__index, 0, 3) {
+  __vec[__index] = *(const unsigned int *)(
+    (const char *)__ptr + __offset[__index]);
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_gather_element(__vector unsigned int __vec,
+                   __vector unsigned int __offset,
+                   const unsigned int *__ptr, int __index)
+  __constant_range(__index, 0, 3) {
+  __vec[__index] = *(const unsigned int *)(
+    (const char *)__ptr + __offset[__index]);
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_gather_element(__vector signed long long __vec,
+                   __vector unsigned long long __offset,
+                   const signed long long *__ptr, int __index)
+  __constant_range(__index, 0, 1) {
+  __vec[__index] = *(const signed long long *)(
+    (const char *)__ptr + __offset[__index]);
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_gather_element(__vector __bool long long __vec,
+                   __vector unsigned long long __offset,
+                   const unsigned long long *__ptr, int __index)
+  __constant_range(__index, 0, 1) {
+  __vec[__index] = *(const unsigned long long *)(
+    (const char *)__ptr + __offset[__index]);
+  return __vec;
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_gather_element(__vector unsigned long long __vec,
+                   __vector unsigned long long __offset,
+                   const unsigned long long *__ptr, int __index)
+  __constant_range(__index, 0, 1) {
+  __vec[__index] = *(const unsigned long long *)(
+    (const char *)__ptr + __offset[__index]);
+  return __vec;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_gather_element(__vector float __vec,
+                   __vector unsigned int __offset,
+                   const float *__ptr, int __index)
+  __constant_range(__index, 0, 3) {
+  __vec[__index] = *(const float *)(
+    (const char *)__ptr + __offset[__index]);
+  return __vec;
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_gather_element(__vector double __vec,
+                   __vector unsigned long long __offset,
+                   const double *__ptr, int __index)
+  __constant_range(__index, 0, 1) {
+  __vec[__index] = *(const double *)(
+    (const char *)__ptr + __offset[__index]);
+  return __vec;
+}
+
+/*-- vec_scatter_element ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(__vector signed int __vec,
+                    __vector unsigned int __offset,
+                    signed int *__ptr, int __index)
+  __constant_range(__index, 0, 3) {
+  *(signed int *)((char *)__ptr + __offset[__index]) =
+    __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(__vector __bool int __vec,
+                    __vector unsigned int __offset,
+                    unsigned int *__ptr, int __index)
+  __constant_range(__index, 0, 3) {
+  *(unsigned int *)((char *)__ptr + __offset[__index]) =
+    __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(__vector unsigned int __vec,
+                    __vector unsigned int __offset,
+                    unsigned int *__ptr, int __index)
+  __constant_range(__index, 0, 3) {
+  *(unsigned int *)((char *)__ptr + __offset[__index]) =
+    __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(__vector signed long long __vec,
+                    __vector unsigned long long __offset,
+                    signed long long *__ptr, int __index)
+  __constant_range(__index, 0, 1) {
+  *(signed long long *)((char *)__ptr + __offset[__index]) =
+    __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(__vector __bool long long __vec,
+                    __vector unsigned long long __offset,
+                    unsigned long long *__ptr, int __index)
+  __constant_range(__index, 0, 1) {
+  *(unsigned long long *)((char *)__ptr + __offset[__index]) =
+    __vec[__index];
+}
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(__vector unsigned long long __vec,
+                    __vector unsigned long long __offset,
+                    unsigned long long *__ptr, int __index)
+  __constant_range(__index, 0, 1) {
+  *(unsigned long long *)((char *)__ptr + __offset[__index]) =
+    __vec[__index];
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai void
+vec_scatter_element(__vector float __vec,
+                    __vector unsigned int __offset,
+                    float *__ptr, int __index)
+  __constant_range(__index, 0, 3) {
+  *(float *)((char *)__ptr + __offset[__index]) =
+    __vec[__index];
+}
+#endif
+
+static inline __ATTRS_o_ai void
+vec_scatter_element(__vector double __vec,
+                    __vector unsigned long long __offset,
+                    double *__ptr, int __index)
+  __constant_range(__index, 0, 1) {
+  *(double *)((char *)__ptr + __offset[__index]) =
+    __vec[__index];
+}
+
+/*-- vec_xl -----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_xl(long __offset, const signed char *__ptr) {
+  return *(const __vector signed char *)
+          ((const char *)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_xl(long __offset, const unsigned char *__ptr) {
+  return *(const __vector unsigned char *)
+          ((const char *)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_xl(long __offset, const signed short *__ptr) {
+  return *(const __vector signed short *)
+          ((const char *)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_xl(long __offset, const unsigned short *__ptr) {
+  return *(const __vector unsigned short *)
+          ((const char *)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_xl(long __offset, const signed int *__ptr) {
+  return *(const __vector signed int *)
+          ((const char *)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_xl(long __offset, const unsigned int *__ptr) {
+  return *(const __vector unsigned int *)
+          ((const char *)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_xl(long __offset, const signed long long *__ptr) {
+  return *(const __vector signed long long *)
+          ((const char *)__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_xl(long __offset, const unsigned long long *__ptr) {
+  return *(const __vector unsigned long long *)
+          ((const char *)__ptr + __offset);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_xl(long __offset, const float *__ptr) {
+  return *(const __vector float *)
+          ((const char *)__ptr + __offset);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_xl(long __offset, const double *__ptr) {
+  return *(const __vector double *)
+          ((const char *)__ptr + __offset);
+}
+
+/*-- vec_xld2 ---------------------------------------------------------------*/
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_xld2(long __offset, const signed char *__ptr) {
+  return *(const __vector signed char *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_xld2(long __offset, const unsigned char *__ptr) {
+  return *(const __vector unsigned char *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_xld2(long __offset, const signed short *__ptr) {
+  return *(const __vector signed short *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_xld2(long __offset, const unsigned short *__ptr) {
+  return *(const __vector unsigned short *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_xld2(long __offset, const signed int *__ptr) {
+  return *(const __vector signed int *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_xld2(long __offset, const unsigned int *__ptr) {
+  return *(const __vector unsigned int *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_xld2(long __offset, const signed long long *__ptr) {
+  return *(const __vector signed long long *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_xld2(long __offset, const unsigned long long *__ptr) {
+  return *(const __vector unsigned long long *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector double
+vec_xld2(long __offset, const double *__ptr) {
+  return *(const __vector double *)
+          ((const char *)__ptr + __offset);
+}
+
+/*-- vec_xlw4 ---------------------------------------------------------------*/
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_xlw4(long __offset, const signed char *__ptr) {
+  return *(const __vector signed char *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_xlw4(long __offset, const unsigned char *__ptr) {
+  return *(const __vector unsigned char *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_xlw4(long __offset, const signed short *__ptr) {
+  return *(const __vector signed short *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_xlw4(long __offset, const unsigned short *__ptr) {
+  return *(const __vector unsigned short *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_xlw4(long __offset, const signed int *__ptr) {
+  return *(const __vector signed int *)
+          ((const char *)__ptr + __offset);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_xlw4(long __offset, const unsigned int *__ptr) {
+  return *(const __vector unsigned int *)
+          ((const char *)__ptr + __offset);
+}
+
+/*-- vec_xst ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai void
+vec_xst(__vector signed char __vec, long __offset, signed char *__ptr) {
+  *(__vector signed char *)((char *)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xst(__vector unsigned char __vec, long __offset, unsigned char *__ptr) {
+  *(__vector unsigned char *)((char *)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xst(__vector signed short __vec, long __offset, signed short *__ptr) {
+  *(__vector signed short *)((char *)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xst(__vector unsigned short __vec, long __offset, unsigned short *__ptr) {
+  *(__vector unsigned short *)((char *)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xst(__vector signed int __vec, long __offset, signed int *__ptr) {
+  *(__vector signed int *)((char *)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xst(__vector unsigned int __vec, long __offset, unsigned int *__ptr) {
+  *(__vector unsigned int *)((char *)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xst(__vector signed long long __vec, long __offset,
+          signed long long *__ptr) {
+  *(__vector signed long long *)((char *)__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void
+vec_xst(__vector unsigned long long __vec, long __offset,
+          unsigned long long *__ptr) {
+  *(__vector unsigned long long *)((char *)__ptr + __offset) = __vec;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai void
+vec_xst(__vector float __vec, long __offset, float *__ptr) {
+  *(__vector float *)((char *)__ptr + __offset) = __vec;
+}
+#endif
+
+static inline __ATTRS_o_ai void
+vec_xst(__vector double __vec, long __offset, double *__ptr) {
+  *(__vector double *)((char *)__ptr + __offset) = __vec;
+}
+
+/*-- vec_xstd2 --------------------------------------------------------------*/
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstd2(__vector signed char __vec, long __offset, signed char *__ptr) {
+  *(__vector signed char *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstd2(__vector unsigned char __vec, long __offset, unsigned char *__ptr) {
+  *(__vector unsigned char *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstd2(__vector signed short __vec, long __offset, signed short *__ptr) {
+  *(__vector signed short *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstd2(__vector unsigned short __vec, long __offset, unsigned short *__ptr) {
+  *(__vector unsigned short *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstd2(__vector signed int __vec, long __offset, signed int *__ptr) {
+  *(__vector signed int *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstd2(__vector unsigned int __vec, long __offset, unsigned int *__ptr) {
+  *(__vector unsigned int *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstd2(__vector signed long long __vec, long __offset,
+          signed long long *__ptr) {
+  *(__vector signed long long *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstd2(__vector unsigned long long __vec, long __offset,
+          unsigned long long *__ptr) {
+  *(__vector unsigned long long *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstd2(__vector double __vec, long __offset, double *__ptr) {
+  *(__vector double *)((char *)__ptr + __offset) = __vec;
+}
+
+/*-- vec_xstw4 --------------------------------------------------------------*/
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstw4(__vector signed char __vec, long __offset, signed char *__ptr) {
+  *(__vector signed char *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstw4(__vector unsigned char __vec, long __offset, unsigned char *__ptr) {
+  *(__vector unsigned char *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstw4(__vector signed short __vec, long __offset, signed short *__ptr) {
+  *(__vector signed short *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstw4(__vector unsigned short __vec, long __offset, unsigned short *__ptr) {
+  *(__vector unsigned short *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstw4(__vector signed int __vec, long __offset, signed int *__ptr) {
+  *(__vector signed int *)((char *)__ptr + __offset) = __vec;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai void
+vec_xstw4(__vector unsigned int __vec, long __offset, unsigned int *__ptr) {
+  *(__vector unsigned int *)((char *)__ptr + __offset) = __vec;
+}
+
+/*-- vec_load_bndry ---------------------------------------------------------*/
+
+extern __ATTRS_o __vector signed char
+vec_load_bndry(const signed char *__ptr, unsigned short __len)
+  __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o __vector unsigned char
+vec_load_bndry(const unsigned char *__ptr, unsigned short __len)
+  __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o __vector signed short
+vec_load_bndry(const signed short *__ptr, unsigned short __len)
+  __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o __vector unsigned short
+vec_load_bndry(const unsigned short *__ptr, unsigned short __len)
+  __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o __vector signed int
+vec_load_bndry(const signed int *__ptr, unsigned short __len)
+  __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o __vector unsigned int
+vec_load_bndry(const unsigned int *__ptr, unsigned short __len)
+  __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o __vector signed long long
+vec_load_bndry(const signed long long *__ptr, unsigned short __len)
+  __constant_pow2_range(__len, 64, 4096);
+
+extern __ATTRS_o __vector unsigned long long
+vec_load_bndry(const unsigned long long *__ptr, unsigned short __len)
+  __constant_pow2_range(__len, 64, 4096);
+
+#if __ARCH__ >= 12
+extern __ATTRS_o __vector float
+vec_load_bndry(const float *__ptr, unsigned short __len)
+  __constant_pow2_range(__len, 64, 4096);
+#endif
+
+extern __ATTRS_o __vector double
+vec_load_bndry(const double *__ptr, unsigned short __len)
+  __constant_pow2_range(__len, 64, 4096);
+
+#define vec_load_bndry(X, Y) ((__typeof__((vec_load_bndry)((X), (Y)))) \
+  __builtin_s390_vlbb((X), ((Y) == 64 ? 0 : \
+                            (Y) == 128 ? 1 : \
+                            (Y) == 256 ? 2 : \
+                            (Y) == 512 ? 3 : \
+                            (Y) == 1024 ? 4 : \
+                            (Y) == 2048 ? 5 : \
+                            (Y) == 4096 ? 6 : -1)))
+
+/*-- vec_load_len -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_load_len(const signed char *__ptr, unsigned int __len) {
+  return (__vector signed char)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_load_len(const unsigned char *__ptr, unsigned int __len) {
+  return (__vector unsigned char)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_load_len(const signed short *__ptr, unsigned int __len) {
+  return (__vector signed short)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_load_len(const unsigned short *__ptr, unsigned int __len) {
+  return (__vector unsigned short)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_load_len(const signed int *__ptr, unsigned int __len) {
+  return (__vector signed int)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_load_len(const unsigned int *__ptr, unsigned int __len) {
+  return (__vector unsigned int)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_load_len(const signed long long *__ptr, unsigned int __len) {
+  return (__vector signed long long)__builtin_s390_vll(__len, __ptr);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_load_len(const unsigned long long *__ptr, unsigned int __len) {
+  return (__vector unsigned long long)__builtin_s390_vll(__len, __ptr);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_load_len(const float *__ptr, unsigned int __len) {
+  return (__vector float)__builtin_s390_vll(__len, __ptr);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_load_len(const double *__ptr, unsigned int __len) {
+  return (__vector double)__builtin_s390_vll(__len, __ptr);
+}
+
+/*-- vec_load_len_r ---------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_ai __vector unsigned char
+vec_load_len_r(const unsigned char *__ptr, unsigned int __len) {
+  return (__vector unsigned char)__builtin_s390_vlrl(__len, __ptr);
+}
+#endif
+
+/*-- vec_store_len ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai void
+vec_store_len(__vector signed char __vec, signed char *__ptr,
+              unsigned int __len) {
+  __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(__vector unsigned char __vec, unsigned char *__ptr,
+              unsigned int __len) {
+  __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(__vector signed short __vec, signed short *__ptr,
+              unsigned int __len) {
+  __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(__vector unsigned short __vec, unsigned short *__ptr,
+              unsigned int __len) {
+  __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(__vector signed int __vec, signed int *__ptr,
+              unsigned int __len) {
+  __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(__vector unsigned int __vec, unsigned int *__ptr,
+              unsigned int __len) {
+  __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(__vector signed long long __vec, signed long long *__ptr,
+              unsigned int __len) {
+  __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
+}
+
+static inline __ATTRS_o_ai void
+vec_store_len(__vector unsigned long long __vec, unsigned long long *__ptr,
+              unsigned int __len) {
+  __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai void
+vec_store_len(__vector float __vec, float *__ptr,
+              unsigned int __len) {
+  __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
+}
+#endif
+
+static inline __ATTRS_o_ai void
+vec_store_len(__vector double __vec, double *__ptr,
+              unsigned int __len) {
+  __builtin_s390_vstl((__vector signed char)__vec, __len, __ptr);
+}
+
+/*-- vec_store_len_r --------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_ai void
+vec_store_len_r(__vector unsigned char __vec, unsigned char *__ptr,
+                unsigned int __len) {
+  __builtin_s390_vstrl((__vector signed char)__vec, __len, __ptr);
+}
+#endif
+
+/*-- vec_load_pair ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_load_pair(signed long long __a, signed long long __b) {
+  return (__vector signed long long)(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_load_pair(unsigned long long __a, unsigned long long __b) {
+  return (__vector unsigned long long)(__a, __b);
+}
+
+/*-- vec_genmask ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_genmask(unsigned short __mask)
+  __constant(__mask) {
+  return (__vector unsigned char)(
+    __mask & 0x8000 ? 0xff : 0,
+    __mask & 0x4000 ? 0xff : 0,
+    __mask & 0x2000 ? 0xff : 0,
+    __mask & 0x1000 ? 0xff : 0,
+    __mask & 0x0800 ? 0xff : 0,
+    __mask & 0x0400 ? 0xff : 0,
+    __mask & 0x0200 ? 0xff : 0,
+    __mask & 0x0100 ? 0xff : 0,
+    __mask & 0x0080 ? 0xff : 0,
+    __mask & 0x0040 ? 0xff : 0,
+    __mask & 0x0020 ? 0xff : 0,
+    __mask & 0x0010 ? 0xff : 0,
+    __mask & 0x0008 ? 0xff : 0,
+    __mask & 0x0004 ? 0xff : 0,
+    __mask & 0x0002 ? 0xff : 0,
+    __mask & 0x0001 ? 0xff : 0);
+}
+
+/*-- vec_genmasks_* ---------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_genmasks_8(unsigned char __first, unsigned char __last)
+  __constant(__first) __constant(__last) {
+  unsigned char __bit1 = __first & 7;
+  unsigned char __bit2 = __last & 7;
+  unsigned char __mask1 = (unsigned char)(1U << (7 - __bit1) << 1) - 1;
+  unsigned char __mask2 = (unsigned char)(1U << (7 - __bit2)) - 1;
+  unsigned char __value = (__bit1 <= __bit2 ?
+                           __mask1 & ~__mask2 :
+                           __mask1 | ~__mask2);
+  return (__vector unsigned char)__value;
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_genmasks_16(unsigned char __first, unsigned char __last)
+  __constant(__first) __constant(__last) {
+  unsigned char __bit1 = __first & 15;
+  unsigned char __bit2 = __last & 15;
+  unsigned short __mask1 = (unsigned short)(1U << (15 - __bit1) << 1) - 1;
+  unsigned short __mask2 = (unsigned short)(1U << (15 - __bit2)) - 1;
+  unsigned short __value = (__bit1 <= __bit2 ?
+                            __mask1 & ~__mask2 :
+                            __mask1 | ~__mask2);
+  return (__vector unsigned short)__value;
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_genmasks_32(unsigned char __first, unsigned char __last)
+  __constant(__first) __constant(__last) {
+  unsigned char __bit1 = __first & 31;
+  unsigned char __bit2 = __last & 31;
+  unsigned int __mask1 = (1U << (31 - __bit1) << 1) - 1;
+  unsigned int __mask2 = (1U << (31 - __bit2)) - 1;
+  unsigned int __value = (__bit1 <= __bit2 ?
+                          __mask1 & ~__mask2 :
+                          __mask1 | ~__mask2);
+  return (__vector unsigned int)__value;
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_genmasks_64(unsigned char __first, unsigned char __last)
+  __constant(__first) __constant(__last) {
+  unsigned char __bit1 = __first & 63;
+  unsigned char __bit2 = __last & 63;
+  unsigned long long __mask1 = (1ULL << (63 - __bit1) << 1) - 1;
+  unsigned long long __mask2 = (1ULL << (63 - __bit2)) - 1;
+  unsigned long long __value = (__bit1 <= __bit2 ?
+                                __mask1 & ~__mask2 :
+                                __mask1 | ~__mask2);
+  return (__vector unsigned long long)__value;
+}
+
+/*-- vec_splat --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_splat(__vector signed char __vec, int __index)
+  __constant_range(__index, 0, 15) {
+  return (__vector signed char)__vec[__index];
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_splat(__vector __bool char __vec, int __index)
+  __constant_range(__index, 0, 15) {
+  return (__vector __bool char)(__vector unsigned char)__vec[__index];
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_splat(__vector unsigned char __vec, int __index)
+  __constant_range(__index, 0, 15) {
+  return (__vector unsigned char)__vec[__index];
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_splat(__vector signed short __vec, int __index)
+  __constant_range(__index, 0, 7) {
+  return (__vector signed short)__vec[__index];
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_splat(__vector __bool short __vec, int __index)
+  __constant_range(__index, 0, 7) {
+  return (__vector __bool short)(__vector unsigned short)__vec[__index];
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_splat(__vector unsigned short __vec, int __index)
+  __constant_range(__index, 0, 7) {
+  return (__vector unsigned short)__vec[__index];
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_splat(__vector signed int __vec, int __index)
+  __constant_range(__index, 0, 3) {
+  return (__vector signed int)__vec[__index];
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_splat(__vector __bool int __vec, int __index)
+  __constant_range(__index, 0, 3) {
+  return (__vector __bool int)(__vector unsigned int)__vec[__index];
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_splat(__vector unsigned int __vec, int __index)
+  __constant_range(__index, 0, 3) {
+  return (__vector unsigned int)__vec[__index];
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_splat(__vector signed long long __vec, int __index)
+  __constant_range(__index, 0, 1) {
+  return (__vector signed long long)__vec[__index];
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_splat(__vector __bool long long __vec, int __index)
+  __constant_range(__index, 0, 1) {
+  return ((__vector __bool long long)
+          (__vector unsigned long long)__vec[__index]);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_splat(__vector unsigned long long __vec, int __index)
+  __constant_range(__index, 0, 1) {
+  return (__vector unsigned long long)__vec[__index];
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_splat(__vector float __vec, int __index)
+  __constant_range(__index, 0, 3) {
+  return (__vector float)__vec[__index];
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_splat(__vector double __vec, int __index)
+  __constant_range(__index, 0, 1) {
+  return (__vector double)__vec[__index];
+}
+
+/*-- vec_splat_s* -----------------------------------------------------------*/
+
+static inline __ATTRS_ai __vector signed char
+vec_splat_s8(signed char __scalar)
+  __constant(__scalar) {
+  return (__vector signed char)__scalar;
+}
+
+static inline __ATTRS_ai __vector signed short
+vec_splat_s16(signed short __scalar)
+  __constant(__scalar) {
+  return (__vector signed short)__scalar;
+}
+
+static inline __ATTRS_ai __vector signed int
+vec_splat_s32(signed short __scalar)
+  __constant(__scalar) {
+  return (__vector signed int)(signed int)__scalar;
+}
+
+static inline __ATTRS_ai __vector signed long long
+vec_splat_s64(signed short __scalar)
+  __constant(__scalar) {
+  return (__vector signed long long)(signed long)__scalar;
+}
+
+/*-- vec_splat_u* -----------------------------------------------------------*/
+
+static inline __ATTRS_ai __vector unsigned char
+vec_splat_u8(unsigned char __scalar)
+  __constant(__scalar) {
+  return (__vector unsigned char)__scalar;
+}
+
+static inline __ATTRS_ai __vector unsigned short
+vec_splat_u16(unsigned short __scalar)
+  __constant(__scalar) {
+  return (__vector unsigned short)__scalar;
+}
+
+static inline __ATTRS_ai __vector unsigned int
+vec_splat_u32(signed short __scalar)
+  __constant(__scalar) {
+  return (__vector unsigned int)(signed int)__scalar;
+}
+
+static inline __ATTRS_ai __vector unsigned long long
+vec_splat_u64(signed short __scalar)
+  __constant(__scalar) {
+  return (__vector unsigned long long)(signed long long)__scalar;
+}
+
+/*-- vec_splats -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_splats(signed char __scalar) {
+  return (__vector signed char)__scalar;
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_splats(unsigned char __scalar) {
+  return (__vector unsigned char)__scalar;
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_splats(signed short __scalar) {
+  return (__vector signed short)__scalar;
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_splats(unsigned short __scalar) {
+  return (__vector unsigned short)__scalar;
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_splats(signed int __scalar) {
+  return (__vector signed int)__scalar;
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_splats(unsigned int __scalar) {
+  return (__vector unsigned int)__scalar;
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_splats(signed long long __scalar) {
+  return (__vector signed long long)__scalar;
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_splats(unsigned long long __scalar) {
+  return (__vector unsigned long long)__scalar;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_splats(float __scalar) {
+  return (__vector float)__scalar;
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_splats(double __scalar) {
+  return (__vector double)__scalar;
+}
+
+/*-- vec_extend_s64 ---------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_extend_s64(__vector signed char __a) {
+  return (__vector signed long long)(__a[7], __a[15]);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_extend_s64(__vector signed short __a) {
+  return (__vector signed long long)(__a[3], __a[7]);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_extend_s64(__vector signed int __a) {
+  return (__vector signed long long)(__a[1], __a[3]);
+}
+
+/*-- vec_mergeh -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_mergeh(__vector signed char __a, __vector signed char __b) {
+  return (__vector signed char)(
+    __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
+    __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_mergeh(__vector __bool char __a, __vector __bool char __b) {
+  return (__vector __bool char)(
+    __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
+    __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_mergeh(__vector unsigned char __a, __vector unsigned char __b) {
+  return (__vector unsigned char)(
+    __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3],
+    __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_mergeh(__vector signed short __a, __vector signed short __b) {
+  return (__vector signed short)(
+    __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_mergeh(__vector __bool short __a, __vector __bool short __b) {
+  return (__vector __bool short)(
+    __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mergeh(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector unsigned short)(
+    __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_mergeh(__vector signed int __a, __vector signed int __b) {
+  return (__vector signed int)(__a[0], __b[0], __a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_mergeh(__vector __bool int __a, __vector __bool int __b) {
+  return (__vector __bool int)(__a[0], __b[0], __a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mergeh(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector unsigned int)(__a[0], __b[0], __a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_mergeh(__vector signed long long __a, __vector signed long long __b) {
+  return (__vector signed long long)(__a[0], __b[0]);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_mergeh(__vector __bool long long __a, __vector __bool long long __b) {
+  return (__vector __bool long long)(__a[0], __b[0]);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_mergeh(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return (__vector unsigned long long)(__a[0], __b[0]);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_mergeh(__vector float __a, __vector float __b) {
+  return (__vector float)(__a[0], __b[0], __a[1], __b[1]);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_mergeh(__vector double __a, __vector double __b) {
+  return (__vector double)(__a[0], __b[0]);
+}
+
+/*-- vec_mergel -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_mergel(__vector signed char __a, __vector signed char __b) {
+  return (__vector signed char)(
+    __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
+    __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_mergel(__vector __bool char __a, __vector __bool char __b) {
+  return (__vector __bool char)(
+    __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
+    __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_mergel(__vector unsigned char __a, __vector unsigned char __b) {
+  return (__vector unsigned char)(
+    __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11],
+    __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_mergel(__vector signed short __a, __vector signed short __b) {
+  return (__vector signed short)(
+    __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_mergel(__vector __bool short __a, __vector __bool short __b) {
+  return (__vector __bool short)(
+    __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mergel(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector unsigned short)(
+    __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_mergel(__vector signed int __a, __vector signed int __b) {
+  return (__vector signed int)(__a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_mergel(__vector __bool int __a, __vector __bool int __b) {
+  return (__vector __bool int)(__a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mergel(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector unsigned int)(__a[2], __b[2], __a[3], __b[3]);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_mergel(__vector signed long long __a, __vector signed long long __b) {
+  return (__vector signed long long)(__a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_mergel(__vector __bool long long __a, __vector __bool long long __b) {
+  return (__vector __bool long long)(__a[1], __b[1]);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_mergel(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return (__vector unsigned long long)(__a[1], __b[1]);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_mergel(__vector float __a, __vector float __b) {
+  return (__vector float)(__a[2], __b[2], __a[3], __b[3]);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_mergel(__vector double __a, __vector double __b) {
+  return (__vector double)(__a[1], __b[1]);
+}
+
+/*-- vec_pack ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_pack(__vector signed short __a, __vector signed short __b) {
+  __vector signed char __ac = (__vector signed char)__a;
+  __vector signed char __bc = (__vector signed char)__b;
+  return (__vector signed char)(
+    __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
+    __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_pack(__vector __bool short __a, __vector __bool short __b) {
+  __vector __bool char __ac = (__vector __bool char)__a;
+  __vector __bool char __bc = (__vector __bool char)__b;
+  return (__vector __bool char)(
+    __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
+    __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_pack(__vector unsigned short __a, __vector unsigned short __b) {
+  __vector unsigned char __ac = (__vector unsigned char)__a;
+  __vector unsigned char __bc = (__vector unsigned char)__b;
+  return (__vector unsigned char)(
+    __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15],
+    __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_pack(__vector signed int __a, __vector signed int __b) {
+  __vector signed short __ac = (__vector signed short)__a;
+  __vector signed short __bc = (__vector signed short)__b;
+  return (__vector signed short)(
+    __ac[1], __ac[3], __ac[5], __ac[7],
+    __bc[1], __bc[3], __bc[5], __bc[7]);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_pack(__vector __bool int __a, __vector __bool int __b) {
+  __vector __bool short __ac = (__vector __bool short)__a;
+  __vector __bool short __bc = (__vector __bool short)__b;
+  return (__vector __bool short)(
+    __ac[1], __ac[3], __ac[5], __ac[7],
+    __bc[1], __bc[3], __bc[5], __bc[7]);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_pack(__vector unsigned int __a, __vector unsigned int __b) {
+  __vector unsigned short __ac = (__vector unsigned short)__a;
+  __vector unsigned short __bc = (__vector unsigned short)__b;
+  return (__vector unsigned short)(
+    __ac[1], __ac[3], __ac[5], __ac[7],
+    __bc[1], __bc[3], __bc[5], __bc[7]);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_pack(__vector signed long long __a, __vector signed long long __b) {
+  __vector signed int __ac = (__vector signed int)__a;
+  __vector signed int __bc = (__vector signed int)__b;
+  return (__vector signed int)(__ac[1], __ac[3], __bc[1], __bc[3]);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_pack(__vector __bool long long __a, __vector __bool long long __b) {
+  __vector __bool int __ac = (__vector __bool int)__a;
+  __vector __bool int __bc = (__vector __bool int)__b;
+  return (__vector __bool int)(__ac[1], __ac[3], __bc[1], __bc[3]);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_pack(__vector unsigned long long __a, __vector unsigned long long __b) {
+  __vector unsigned int __ac = (__vector unsigned int)__a;
+  __vector unsigned int __bc = (__vector unsigned int)__b;
+  return (__vector unsigned int)(__ac[1], __ac[3], __bc[1], __bc[3]);
+}
+
+/*-- vec_packs --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_packs(__vector signed short __a, __vector signed short __b) {
+  return __builtin_s390_vpksh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_packs(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vpklsh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_packs(__vector signed int __a, __vector signed int __b) {
+  return __builtin_s390_vpksf(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_packs(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vpklsf(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_packs(__vector signed long long __a, __vector signed long long __b) {
+  return __builtin_s390_vpksg(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_packs(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return __builtin_s390_vpklsg(__a, __b);
+}
+
+/*-- vec_packs_cc -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_packs_cc(__vector signed short __a, __vector signed short __b, int *__cc) {
+  return __builtin_s390_vpkshs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_packs_cc(__vector unsigned short __a, __vector unsigned short __b,
+             int *__cc) {
+  return __builtin_s390_vpklshs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_packs_cc(__vector signed int __a, __vector signed int __b, int *__cc) {
+  return __builtin_s390_vpksfs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_packs_cc(__vector unsigned int __a, __vector unsigned int __b, int *__cc) {
+  return __builtin_s390_vpklsfs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_packs_cc(__vector signed long long __a, __vector signed long long __b,
+             int *__cc) {
+  return __builtin_s390_vpksgs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_packs_cc(__vector unsigned long long __a, __vector unsigned long long __b,
+             int *__cc) {
+  return __builtin_s390_vpklsgs(__a, __b, __cc);
+}
+
+/*-- vec_packsu -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_packsu(__vector signed short __a, __vector signed short __b) {
+  const __vector signed short __zero = (__vector signed short)0;
+  return __builtin_s390_vpklsh(
+    (__vector unsigned short)(__a >= __zero) & (__vector unsigned short)__a,
+    (__vector unsigned short)(__b >= __zero) & (__vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_packsu(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vpklsh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_packsu(__vector signed int __a, __vector signed int __b) {
+  const __vector signed int __zero = (__vector signed int)0;
+  return __builtin_s390_vpklsf(
+    (__vector unsigned int)(__a >= __zero) & (__vector unsigned int)__a,
+    (__vector unsigned int)(__b >= __zero) & (__vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_packsu(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vpklsf(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_packsu(__vector signed long long __a, __vector signed long long __b) {
+  const __vector signed long long __zero = (__vector signed long long)0;
+  return __builtin_s390_vpklsg(
+    (__vector unsigned long long)(__a >= __zero) &
+    (__vector unsigned long long)__a,
+    (__vector unsigned long long)(__b >= __zero) &
+    (__vector unsigned long long)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_packsu(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return __builtin_s390_vpklsg(__a, __b);
+}
+
+/*-- vec_packsu_cc ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_packsu_cc(__vector unsigned short __a, __vector unsigned short __b,
+              int *__cc) {
+  return __builtin_s390_vpklshs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_packsu_cc(__vector unsigned int __a, __vector unsigned int __b, int *__cc) {
+  return __builtin_s390_vpklsfs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_packsu_cc(__vector unsigned long long __a, __vector unsigned long long __b,
+              int *__cc) {
+  return __builtin_s390_vpklsgs(__a, __b, __cc);
+}
+
+/*-- vec_unpackh ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed short
+vec_unpackh(__vector signed char __a) {
+  return __builtin_s390_vuphb(__a);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_unpackh(__vector __bool char __a) {
+  return ((__vector __bool short)
+          __builtin_s390_vuphb((__vector signed char)__a));
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_unpackh(__vector unsigned char __a) {
+  return __builtin_s390_vuplhb(__a);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_unpackh(__vector signed short __a) {
+  return __builtin_s390_vuphh(__a);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_unpackh(__vector __bool short __a) {
+  return (__vector __bool int)__builtin_s390_vuphh((__vector signed short)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_unpackh(__vector unsigned short __a) {
+  return __builtin_s390_vuplhh(__a);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_unpackh(__vector signed int __a) {
+  return __builtin_s390_vuphf(__a);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_unpackh(__vector __bool int __a) {
+  return ((__vector __bool long long)
+          __builtin_s390_vuphf((__vector signed int)__a));
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_unpackh(__vector unsigned int __a) {
+  return __builtin_s390_vuplhf(__a);
+}
+
+/*-- vec_unpackl ------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed short
+vec_unpackl(__vector signed char __a) {
+  return __builtin_s390_vuplb(__a);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_unpackl(__vector __bool char __a) {
+  return ((__vector __bool short)
+          __builtin_s390_vuplb((__vector signed char)__a));
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_unpackl(__vector unsigned char __a) {
+  return __builtin_s390_vupllb(__a);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_unpackl(__vector signed short __a) {
+  return __builtin_s390_vuplhw(__a);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_unpackl(__vector __bool short __a) {
+  return ((__vector __bool int)
+          __builtin_s390_vuplhw((__vector signed short)__a));
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_unpackl(__vector unsigned short __a) {
+  return __builtin_s390_vupllh(__a);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_unpackl(__vector signed int __a) {
+  return __builtin_s390_vuplf(__a);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_unpackl(__vector __bool int __a) {
+  return ((__vector __bool long long)
+          __builtin_s390_vuplf((__vector signed int)__a));
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_unpackl(__vector unsigned int __a) {
+  return __builtin_s390_vupllf(__a);
+}
+
+/*-- vec_cmpeq --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpeq(__vector __bool char __a, __vector __bool char __b) {
+  return (__vector __bool char)(__a == __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpeq(__vector signed char __a, __vector signed char __b) {
+  return (__vector __bool char)(__a == __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpeq(__vector unsigned char __a, __vector unsigned char __b) {
+  return (__vector __bool char)(__a == __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpeq(__vector __bool short __a, __vector __bool short __b) {
+  return (__vector __bool short)(__a == __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpeq(__vector signed short __a, __vector signed short __b) {
+  return (__vector __bool short)(__a == __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpeq(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector __bool short)(__a == __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpeq(__vector __bool int __a, __vector __bool int __b) {
+  return (__vector __bool int)(__a == __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpeq(__vector signed int __a, __vector signed int __b) {
+  return (__vector __bool int)(__a == __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpeq(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector __bool int)(__a == __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpeq(__vector __bool long long __a, __vector __bool long long __b) {
+  return (__vector __bool long long)(__a == __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpeq(__vector signed long long __a, __vector signed long long __b) {
+  return (__vector __bool long long)(__a == __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpeq(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return (__vector __bool long long)(__a == __b);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpeq(__vector float __a, __vector float __b) {
+  return (__vector __bool int)(__a == __b);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpeq(__vector double __a, __vector double __b) {
+  return (__vector __bool long long)(__a == __b);
+}
+
+/*-- vec_cmpge --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpge(__vector signed char __a, __vector signed char __b) {
+  return (__vector __bool char)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpge(__vector unsigned char __a, __vector unsigned char __b) {
+  return (__vector __bool char)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpge(__vector signed short __a, __vector signed short __b) {
+  return (__vector __bool short)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpge(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector __bool short)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpge(__vector signed int __a, __vector signed int __b) {
+  return (__vector __bool int)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpge(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector __bool int)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpge(__vector signed long long __a, __vector signed long long __b) {
+  return (__vector __bool long long)(__a >= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpge(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return (__vector __bool long long)(__a >= __b);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpge(__vector float __a, __vector float __b) {
+  return (__vector __bool int)(__a >= __b);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpge(__vector double __a, __vector double __b) {
+  return (__vector __bool long long)(__a >= __b);
+}
+
+/*-- vec_cmpgt --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpgt(__vector signed char __a, __vector signed char __b) {
+  return (__vector __bool char)(__a > __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpgt(__vector unsigned char __a, __vector unsigned char __b) {
+  return (__vector __bool char)(__a > __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpgt(__vector signed short __a, __vector signed short __b) {
+  return (__vector __bool short)(__a > __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpgt(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector __bool short)(__a > __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpgt(__vector signed int __a, __vector signed int __b) {
+  return (__vector __bool int)(__a > __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpgt(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector __bool int)(__a > __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpgt(__vector signed long long __a, __vector signed long long __b) {
+  return (__vector __bool long long)(__a > __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpgt(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return (__vector __bool long long)(__a > __b);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpgt(__vector float __a, __vector float __b) {
+  return (__vector __bool int)(__a > __b);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmpgt(__vector double __a, __vector double __b) {
+  return (__vector __bool long long)(__a > __b);
+}
+
+/*-- vec_cmple --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmple(__vector signed char __a, __vector signed char __b) {
+  return (__vector __bool char)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmple(__vector unsigned char __a, __vector unsigned char __b) {
+  return (__vector __bool char)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmple(__vector signed short __a, __vector signed short __b) {
+  return (__vector __bool short)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmple(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector __bool short)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmple(__vector signed int __a, __vector signed int __b) {
+  return (__vector __bool int)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmple(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector __bool int)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmple(__vector signed long long __a, __vector signed long long __b) {
+  return (__vector __bool long long)(__a <= __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmple(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return (__vector __bool long long)(__a <= __b);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmple(__vector float __a, __vector float __b) {
+  return (__vector __bool int)(__a <= __b);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmple(__vector double __a, __vector double __b) {
+  return (__vector __bool long long)(__a <= __b);
+}
+
+/*-- vec_cmplt --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmplt(__vector signed char __a, __vector signed char __b) {
+  return (__vector __bool char)(__a < __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmplt(__vector unsigned char __a, __vector unsigned char __b) {
+  return (__vector __bool char)(__a < __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmplt(__vector signed short __a, __vector signed short __b) {
+  return (__vector __bool short)(__a < __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmplt(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector __bool short)(__a < __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmplt(__vector signed int __a, __vector signed int __b) {
+  return (__vector __bool int)(__a < __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmplt(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector __bool int)(__a < __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmplt(__vector signed long long __a, __vector signed long long __b) {
+  return (__vector __bool long long)(__a < __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmplt(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return (__vector __bool long long)(__a < __b);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmplt(__vector float __a, __vector float __b) {
+  return (__vector __bool int)(__a < __b);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_cmplt(__vector double __a, __vector double __b) {
+  return (__vector __bool long long)(__a < __b);
+}
+
+/*-- vec_all_eq -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector signed char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vceqbs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector signed char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vceqbs(__a, (__vector signed char)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector __bool char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a, __b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector unsigned char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector unsigned char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector __bool char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector __bool char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector signed short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vceqhs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector signed short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vceqhs(__a, (__vector signed short)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector __bool short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a, __b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector unsigned short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector unsigned short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector __bool short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector __bool short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector signed int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vceqfs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector signed int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vceqfs(__a, (__vector signed int)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector __bool int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a, __b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector unsigned int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector unsigned int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector __bool int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector __bool int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector signed long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector signed long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs(__a, (__vector signed long long)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector __bool long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a, __b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector unsigned long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector unsigned long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector __bool long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector __bool long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc == 0;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfcesbs(__a, __b, &__cc);
+  return __cc == 0;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_all_eq(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfcedbs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+/*-- vec_all_ne -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector signed char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vceqbs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector signed char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vceqbs(__a, (__vector signed char)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector __bool char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a, __b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector unsigned char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector unsigned char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector __bool char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector __bool char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector signed short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vceqhs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector signed short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vceqhs(__a, (__vector signed short)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector __bool short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a, __b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector unsigned short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector unsigned short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector __bool short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector __bool short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector signed int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vceqfs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector signed int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vceqfs(__a, (__vector signed int)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector __bool int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a, __b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector unsigned int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector unsigned int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector __bool int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector __bool int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector signed long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector signed long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs(__a, (__vector signed long long)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector __bool long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a, __b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector unsigned long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector unsigned long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector __bool long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector __bool long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc == 3;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfcesbs(__a, __b, &__cc);
+  return __cc == 3;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_all_ne(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfcedbs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+/*-- vec_all_ge -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector signed char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector signed char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchbs((__vector signed char)__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector __bool char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__b, (__vector signed char)__a, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector unsigned char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector unsigned char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector __bool char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__b, (__vector unsigned char)__a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector __bool char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__b,
+                        (__vector unsigned char)__a, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector signed short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector signed short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchhs((__vector signed short)__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector __bool short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__b, (__vector signed short)__a, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector unsigned short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector unsigned short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector __bool short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__b, (__vector unsigned short)__a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector __bool short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__b,
+                        (__vector unsigned short)__a, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector signed int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector signed int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchfs((__vector signed int)__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector __bool int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__b, (__vector signed int)__a, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector unsigned int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector unsigned int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector __bool int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__b, (__vector unsigned int)__a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector __bool int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__b,
+                        (__vector unsigned int)__a, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector signed long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector signed long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchgs((__vector signed long long)__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector __bool long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__b, (__vector signed long long)__a, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector unsigned long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector unsigned long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__b, __a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector __bool long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__b, (__vector unsigned long long)__a, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector __bool long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__b,
+                        (__vector unsigned long long)__a, &__cc);
+  return __cc == 3;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchesbs(__a, __b, &__cc);
+  return __cc == 0;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_all_ge(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchedbs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+/*-- vec_all_gt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector signed char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector signed char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__a, (__vector signed char)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector __bool char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs((__vector signed char)__a, __b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector unsigned char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector unsigned char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__a, (__vector unsigned char)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector __bool char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector __bool char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__a,
+                        (__vector unsigned char)__b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector signed short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector signed short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__a, (__vector signed short)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector __bool short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs((__vector signed short)__a, __b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector unsigned short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector unsigned short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__a, (__vector unsigned short)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector __bool short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector __bool short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__a,
+                        (__vector unsigned short)__b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector signed int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector signed int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__a, (__vector signed int)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector __bool int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs((__vector signed int)__a, __b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector unsigned int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector unsigned int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__a, (__vector unsigned int)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector __bool int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector __bool int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__a,
+                        (__vector unsigned int)__b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector signed long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector signed long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__a, (__vector signed long long)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector __bool long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs((__vector signed long long)__a, __b, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector unsigned long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector unsigned long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__a, (__vector unsigned long long)__b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector __bool long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__a, __b, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector __bool long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__a,
+                        (__vector unsigned long long)__b, &__cc);
+  return __cc == 0;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchsbs(__a, __b, &__cc);
+  return __cc == 0;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_all_gt(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchdbs(__a, __b, &__cc);
+  return __cc == 0;
+}
+
+/*-- vec_all_le -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_le(__vector signed char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector signed char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__a, (__vector signed char)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector __bool char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs((__vector signed char)__a, __b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(__vector unsigned char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector unsigned char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__a, (__vector unsigned char)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector __bool char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector __bool char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__a,
+                        (__vector unsigned char)__b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(__vector signed short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector signed short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__a, (__vector signed short)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector __bool short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs((__vector signed short)__a, __b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(__vector unsigned short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector unsigned short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__a, (__vector unsigned short)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector __bool short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector __bool short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__a,
+                        (__vector unsigned short)__b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(__vector signed int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector signed int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__a, (__vector signed int)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector __bool int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs((__vector signed int)__a, __b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(__vector unsigned int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector unsigned int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__a, (__vector unsigned int)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector __bool int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector __bool int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__a,
+                        (__vector unsigned int)__b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(__vector signed long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector signed long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__a, (__vector signed long long)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector __bool long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs((__vector signed long long)__a, __b, &__cc);
+  return __cc == 3;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_le(__vector unsigned long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector unsigned long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__a, (__vector unsigned long long)__b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector __bool long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__a, __b, &__cc);
+  return __cc == 3;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_le(__vector __bool long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__a,
+                        (__vector unsigned long long)__b, &__cc);
+  return __cc == 3;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_all_le(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchesbs(__b, __a, &__cc);
+  return __cc == 0;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_all_le(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchedbs(__b, __a, &__cc);
+  return __cc == 0;
+}
+
+/*-- vec_all_lt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector signed char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector signed char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchbs((__vector signed char)__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector __bool char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__b, (__vector signed char)__a, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector unsigned char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector unsigned char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector __bool char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__b, (__vector unsigned char)__a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector __bool char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__b,
+                        (__vector unsigned char)__a, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector signed short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector signed short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchhs((__vector signed short)__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector __bool short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__b, (__vector signed short)__a, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector unsigned short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector unsigned short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector __bool short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__b, (__vector unsigned short)__a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector __bool short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__b,
+                        (__vector unsigned short)__a, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector signed int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector signed int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchfs((__vector signed int)__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector __bool int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__b, (__vector signed int)__a, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector unsigned int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector unsigned int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector __bool int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__b, (__vector unsigned int)__a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector __bool int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__b,
+                        (__vector unsigned int)__a, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector signed long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector signed long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchgs((__vector signed long long)__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector __bool long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__b, (__vector signed long long)__a, &__cc);
+  return __cc == 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector unsigned long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector unsigned long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__b, __a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector __bool long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__b, (__vector unsigned long long)__a, &__cc);
+  return __cc == 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector __bool long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__b,
+                        (__vector unsigned long long)__a, &__cc);
+  return __cc == 0;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchsbs(__b, __a, &__cc);
+  return __cc == 0;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_all_lt(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchdbs(__b, __a, &__cc);
+  return __cc == 0;
+}
+
+/*-- vec_all_nge ------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_all_nge(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchesbs(__a, __b, &__cc);
+  return __cc == 3;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_all_nge(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchedbs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+/*-- vec_all_ngt ------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_all_ngt(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchsbs(__a, __b, &__cc);
+  return __cc == 3;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_all_ngt(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchdbs(__a, __b, &__cc);
+  return __cc == 3;
+}
+
+/*-- vec_all_nle ------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_all_nle(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchesbs(__b, __a, &__cc);
+  return __cc == 3;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_all_nle(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchedbs(__b, __a, &__cc);
+  return __cc == 3;
+}
+
+/*-- vec_all_nlt ------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_all_nlt(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchsbs(__b, __a, &__cc);
+  return __cc == 3;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_all_nlt(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchdbs(__b, __a, &__cc);
+  return __cc == 3;
+}
+
+/*-- vec_all_nan ------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_all_nan(__vector float __a) {
+  int __cc;
+  __builtin_s390_vftcisb(__a, 15, &__cc);
+  return __cc == 0;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_all_nan(__vector double __a) {
+  int __cc;
+  __builtin_s390_vftcidb(__a, 15, &__cc);
+  return __cc == 0;
+}
+
+/*-- vec_all_numeric --------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_all_numeric(__vector float __a) {
+  int __cc;
+  __builtin_s390_vftcisb(__a, 15, &__cc);
+  return __cc == 3;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_all_numeric(__vector double __a) {
+  int __cc;
+  __builtin_s390_vftcidb(__a, 15, &__cc);
+  return __cc == 3;
+}
+
+/*-- vec_any_eq -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector signed char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vceqbs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector signed char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vceqbs(__a, (__vector signed char)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector __bool char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector unsigned char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector unsigned char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector __bool char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector __bool char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector signed short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vceqhs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector signed short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vceqhs(__a, (__vector signed short)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector __bool short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector unsigned short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector unsigned short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector __bool short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector __bool short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector signed int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vceqfs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector signed int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vceqfs(__a, (__vector signed int)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector __bool int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector unsigned int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector unsigned int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector __bool int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector __bool int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector signed long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector signed long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs(__a, (__vector signed long long)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector __bool long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector unsigned long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector unsigned long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector __bool long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector __bool long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc <= 1;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfcesbs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_any_eq(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfcedbs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+/*-- vec_any_ne -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector signed char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vceqbs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector signed char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vceqbs(__a, (__vector signed char)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector __bool char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a, __b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector unsigned char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector unsigned char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector __bool char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector __bool char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vceqbs((__vector signed char)__a,
+                        (__vector signed char)__b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector signed short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vceqhs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector signed short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vceqhs(__a, (__vector signed short)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector __bool short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a, __b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector unsigned short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector unsigned short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector __bool short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector __bool short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vceqhs((__vector signed short)__a,
+                        (__vector signed short)__b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector signed int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vceqfs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector signed int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vceqfs(__a, (__vector signed int)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector __bool int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a, __b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector unsigned int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector unsigned int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector __bool int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector __bool int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vceqfs((__vector signed int)__a,
+                        (__vector signed int)__b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector signed long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector signed long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs(__a, (__vector signed long long)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector __bool long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a, __b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector unsigned long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector unsigned long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector __bool long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector __bool long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vceqgs((__vector signed long long)__a,
+                        (__vector signed long long)__b, &__cc);
+  return __cc != 0;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfcesbs(__a, __b, &__cc);
+  return __cc != 0;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_any_ne(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfcedbs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+/*-- vec_any_ge -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector signed char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector signed char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchbs((__vector signed char)__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector __bool char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__b, (__vector signed char)__a, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector unsigned char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector unsigned char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector __bool char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__b, (__vector unsigned char)__a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector __bool char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__b,
+                        (__vector unsigned char)__a, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector signed short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector signed short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchhs((__vector signed short)__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector __bool short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__b, (__vector signed short)__a, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector unsigned short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector unsigned short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector __bool short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__b, (__vector unsigned short)__a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector __bool short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__b,
+                        (__vector unsigned short)__a, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector signed int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector signed int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchfs((__vector signed int)__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector __bool int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__b, (__vector signed int)__a, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector unsigned int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector unsigned int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector __bool int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__b, (__vector unsigned int)__a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector __bool int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__b,
+                        (__vector unsigned int)__a, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector signed long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector signed long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchgs((__vector signed long long)__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector __bool long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__b, (__vector signed long long)__a, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector unsigned long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector unsigned long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__b, __a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector __bool long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__b, (__vector unsigned long long)__a, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector __bool long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__b,
+                        (__vector unsigned long long)__a, &__cc);
+  return __cc != 0;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchesbs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_any_ge(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchedbs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+/*-- vec_any_gt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector signed char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector signed char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__a, (__vector signed char)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector __bool char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs((__vector signed char)__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector unsigned char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector unsigned char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__a, (__vector unsigned char)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector __bool char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector __bool char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__a,
+                        (__vector unsigned char)__b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector signed short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector signed short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__a, (__vector signed short)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector __bool short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs((__vector signed short)__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector unsigned short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector unsigned short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__a, (__vector unsigned short)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector __bool short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector __bool short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__a,
+                        (__vector unsigned short)__b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector signed int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector signed int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__a, (__vector signed int)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector __bool int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs((__vector signed int)__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector unsigned int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector unsigned int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__a, (__vector unsigned int)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector __bool int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector __bool int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__a,
+                        (__vector unsigned int)__b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector signed long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector signed long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__a, (__vector signed long long)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector __bool long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs((__vector signed long long)__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector unsigned long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector unsigned long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__a, (__vector unsigned long long)__b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector __bool long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector __bool long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__a,
+                        (__vector unsigned long long)__b, &__cc);
+  return __cc <= 1;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchsbs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_any_gt(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchdbs(__a, __b, &__cc);
+  return __cc <= 1;
+}
+
+/*-- vec_any_le -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_le(__vector signed char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector signed char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__a, (__vector signed char)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector __bool char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs((__vector signed char)__a, __b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(__vector unsigned char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector unsigned char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__a, (__vector unsigned char)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector __bool char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector __bool char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__a,
+                        (__vector unsigned char)__b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(__vector signed short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector signed short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__a, (__vector signed short)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector __bool short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs((__vector signed short)__a, __b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(__vector unsigned short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector unsigned short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__a, (__vector unsigned short)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector __bool short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector __bool short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__a,
+                        (__vector unsigned short)__b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(__vector signed int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector signed int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__a, (__vector signed int)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector __bool int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs((__vector signed int)__a, __b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(__vector unsigned int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector unsigned int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__a, (__vector unsigned int)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector __bool int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector __bool int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__a,
+                        (__vector unsigned int)__b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(__vector signed long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector signed long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__a, (__vector signed long long)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector __bool long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs((__vector signed long long)__a, __b, &__cc);
+  return __cc != 0;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_le(__vector unsigned long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector unsigned long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__a, (__vector unsigned long long)__b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector __bool long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__a, __b, &__cc);
+  return __cc != 0;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_le(__vector __bool long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__a,
+                        (__vector unsigned long long)__b, &__cc);
+  return __cc != 0;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_any_le(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchesbs(__b, __a, &__cc);
+  return __cc <= 1;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_any_le(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchedbs(__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+/*-- vec_any_lt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector signed char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector signed char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchbs((__vector signed char)__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector __bool char __a, __vector signed char __b) {
+  int __cc;
+  __builtin_s390_vchbs(__b, (__vector signed char)__a, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector unsigned char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector unsigned char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector __bool char __a, __vector unsigned char __b) {
+  int __cc;
+  __builtin_s390_vchlbs(__b, (__vector unsigned char)__a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector __bool char __a, __vector __bool char __b) {
+  int __cc;
+  __builtin_s390_vchlbs((__vector unsigned char)__b,
+                        (__vector unsigned char)__a, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector signed short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector signed short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchhs((__vector signed short)__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector __bool short __a, __vector signed short __b) {
+  int __cc;
+  __builtin_s390_vchhs(__b, (__vector signed short)__a, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector unsigned short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector unsigned short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector __bool short __a, __vector unsigned short __b) {
+  int __cc;
+  __builtin_s390_vchlhs(__b, (__vector unsigned short)__a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector __bool short __a, __vector __bool short __b) {
+  int __cc;
+  __builtin_s390_vchlhs((__vector unsigned short)__b,
+                        (__vector unsigned short)__a, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector signed int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector signed int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchfs((__vector signed int)__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector __bool int __a, __vector signed int __b) {
+  int __cc;
+  __builtin_s390_vchfs(__b, (__vector signed int)__a, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector unsigned int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector unsigned int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector __bool int __a, __vector unsigned int __b) {
+  int __cc;
+  __builtin_s390_vchlfs(__b, (__vector unsigned int)__a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector __bool int __a, __vector __bool int __b) {
+  int __cc;
+  __builtin_s390_vchlfs((__vector unsigned int)__b,
+                        (__vector unsigned int)__a, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector signed long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector signed long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchgs((__vector signed long long)__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector __bool long long __a, __vector signed long long __b) {
+  int __cc;
+  __builtin_s390_vchgs(__b, (__vector signed long long)__a, &__cc);
+  return __cc <= 1;
+}
+
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector unsigned long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector unsigned long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector __bool long long __a, __vector unsigned long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs(__b, (__vector unsigned long long)__a, &__cc);
+  return __cc <= 1;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector __bool long long __a, __vector __bool long long __b) {
+  int __cc;
+  __builtin_s390_vchlgs((__vector unsigned long long)__b,
+                        (__vector unsigned long long)__a, &__cc);
+  return __cc <= 1;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchsbs(__b, __a, &__cc);
+  return __cc <= 1;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_any_lt(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchdbs(__b, __a, &__cc);
+  return __cc <= 1;
+}
+
+/*-- vec_any_nge ------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_any_nge(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchesbs(__a, __b, &__cc);
+  return __cc != 0;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_any_nge(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchedbs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+/*-- vec_any_ngt ------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_any_ngt(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchsbs(__a, __b, &__cc);
+  return __cc != 0;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_any_ngt(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchdbs(__a, __b, &__cc);
+  return __cc != 0;
+}
+
+/*-- vec_any_nle ------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_any_nle(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchesbs(__b, __a, &__cc);
+  return __cc != 0;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_any_nle(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchedbs(__b, __a, &__cc);
+  return __cc != 0;
+}
+
+/*-- vec_any_nlt ------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_any_nlt(__vector float __a, __vector float __b) {
+  int __cc;
+  __builtin_s390_vfchsbs(__b, __a, &__cc);
+  return __cc != 0;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_any_nlt(__vector double __a, __vector double __b) {
+  int __cc;
+  __builtin_s390_vfchdbs(__b, __a, &__cc);
+  return __cc != 0;
+}
+
+/*-- vec_any_nan ------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_any_nan(__vector float __a) {
+  int __cc;
+  __builtin_s390_vftcisb(__a, 15, &__cc);
+  return __cc != 3;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_any_nan(__vector double __a) {
+  int __cc;
+  __builtin_s390_vftcidb(__a, 15, &__cc);
+  return __cc != 3;
+}
+
+/*-- vec_any_numeric --------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_any_numeric(__vector float __a) {
+  int __cc;
+  __builtin_s390_vftcisb(__a, 15, &__cc);
+  return __cc != 0;
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_any_numeric(__vector double __a) {
+  int __cc;
+  __builtin_s390_vftcidb(__a, 15, &__cc);
+  return __cc != 0;
+}
+
+/*-- vec_andc ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_andc(__vector __bool char __a, __vector __bool char __b) {
+  return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai __vector signed char
+vec_andc(__vector signed char __a, __vector signed char __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_andc(__vector __bool char __a, __vector signed char __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_andc(__vector signed char __a, __vector __bool char __b) {
+  return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_andc(__vector unsigned char __a, __vector unsigned char __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_andc(__vector __bool char __a, __vector unsigned char __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_andc(__vector unsigned char __a, __vector __bool char __b) {
+  return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_andc(__vector __bool short __a, __vector __bool short __b) {
+  return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_andc(__vector signed short __a, __vector signed short __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_andc(__vector __bool short __a, __vector signed short __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_andc(__vector signed short __a, __vector __bool short __b) {
+  return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_andc(__vector unsigned short __a, __vector unsigned short __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_andc(__vector __bool short __a, __vector unsigned short __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_andc(__vector unsigned short __a, __vector __bool short __b) {
+  return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_andc(__vector __bool int __a, __vector __bool int __b) {
+  return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_andc(__vector signed int __a, __vector signed int __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_andc(__vector __bool int __a, __vector signed int __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_andc(__vector signed int __a, __vector __bool int __b) {
+  return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_andc(__vector unsigned int __a, __vector unsigned int __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_andc(__vector __bool int __a, __vector unsigned int __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_andc(__vector unsigned int __a, __vector __bool int __b) {
+  return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_andc(__vector __bool long long __a, __vector __bool long long __b) {
+  return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_andc(__vector signed long long __a, __vector signed long long __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_andc(__vector __bool long long __a, __vector signed long long __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_andc(__vector signed long long __a, __vector __bool long long __b) {
+  return __a & ~__b;
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_andc(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_andc(__vector __bool long long __a, __vector unsigned long long __b) {
+  return __a & ~__b;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_andc(__vector unsigned long long __a, __vector __bool long long __b) {
+  return __a & ~__b;
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_andc(__vector float __a, __vector float __b) {
+  return (__vector float)((__vector unsigned int)__a &
+                         ~(__vector unsigned int)__b);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_andc(__vector double __a, __vector double __b) {
+  return (__vector double)((__vector unsigned long long)__a &
+                         ~(__vector unsigned long long)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector double
+vec_andc(__vector __bool long long __a, __vector double __b) {
+  return (__vector double)((__vector unsigned long long)__a &
+                         ~(__vector unsigned long long)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector double
+vec_andc(__vector double __a, __vector __bool long long __b) {
+  return (__vector double)((__vector unsigned long long)__a &
+                         ~(__vector unsigned long long)__b);
+}
+
+/*-- vec_nor ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_nor(__vector __bool char __a, __vector __bool char __b) {
+  return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai __vector signed char
+vec_nor(__vector signed char __a, __vector signed char __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_nor(__vector __bool char __a, __vector signed char __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_nor(__vector signed char __a, __vector __bool char __b) {
+  return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_nor(__vector unsigned char __a, __vector unsigned char __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_nor(__vector __bool char __a, __vector unsigned char __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_nor(__vector unsigned char __a, __vector __bool char __b) {
+  return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_nor(__vector __bool short __a, __vector __bool short __b) {
+  return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_nor(__vector signed short __a, __vector signed short __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_nor(__vector __bool short __a, __vector signed short __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_nor(__vector signed short __a, __vector __bool short __b) {
+  return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_nor(__vector unsigned short __a, __vector unsigned short __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_nor(__vector __bool short __a, __vector unsigned short __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_nor(__vector unsigned short __a, __vector __bool short __b) {
+  return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_nor(__vector __bool int __a, __vector __bool int __b) {
+  return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_nor(__vector signed int __a, __vector signed int __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_nor(__vector __bool int __a, __vector signed int __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_nor(__vector signed int __a, __vector __bool int __b) {
+  return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_nor(__vector unsigned int __a, __vector unsigned int __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_nor(__vector __bool int __a, __vector unsigned int __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_nor(__vector unsigned int __a, __vector __bool int __b) {
+  return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_nor(__vector __bool long long __a, __vector __bool long long __b) {
+  return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_nor(__vector signed long long __a, __vector signed long long __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_nor(__vector __bool long long __a, __vector signed long long __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_nor(__vector signed long long __a, __vector __bool long long __b) {
+  return ~(__a | __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_nor(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_nor(__vector __bool long long __a, __vector unsigned long long __b) {
+  return ~(__a | __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_nor(__vector unsigned long long __a, __vector __bool long long __b) {
+  return ~(__a | __b);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_nor(__vector float __a, __vector float __b) {
+  return (__vector float)~((__vector unsigned int)__a |
+                         (__vector unsigned int)__b);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_nor(__vector double __a, __vector double __b) {
+  return (__vector double)~((__vector unsigned long long)__a |
+                          (__vector unsigned long long)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector double
+vec_nor(__vector __bool long long __a, __vector double __b) {
+  return (__vector double)~((__vector unsigned long long)__a |
+                          (__vector unsigned long long)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector double
+vec_nor(__vector double __a, __vector __bool long long __b) {
+  return (__vector double)~((__vector unsigned long long)__a |
+                          (__vector unsigned long long)__b);
+}
+
+/*-- vec_orc ----------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector __bool char
+vec_orc(__vector __bool char __a, __vector __bool char __b) {
+  return __a | ~__b;
+}
+
+static inline __ATTRS_o_ai __vector signed char
+vec_orc(__vector signed char __a, __vector signed char __b) {
+  return __a | ~__b;
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_orc(__vector unsigned char __a, __vector unsigned char __b) {
+  return __a | ~__b;
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_orc(__vector __bool short __a, __vector __bool short __b) {
+  return __a | ~__b;
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_orc(__vector signed short __a, __vector signed short __b) {
+  return __a | ~__b;
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_orc(__vector unsigned short __a, __vector unsigned short __b) {
+  return __a | ~__b;
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_orc(__vector __bool int __a, __vector __bool int __b) {
+  return __a | ~__b;
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_orc(__vector signed int __a, __vector signed int __b) {
+  return __a | ~__b;
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_orc(__vector unsigned int __a, __vector unsigned int __b) {
+  return __a | ~__b;
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_orc(__vector __bool long long __a, __vector __bool long long __b) {
+  return __a | ~__b;
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_orc(__vector signed long long __a, __vector signed long long __b) {
+  return __a | ~__b;
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_orc(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return __a | ~__b;
+}
+
+static inline __ATTRS_o_ai __vector float
+vec_orc(__vector float __a, __vector float __b) {
+  return (__vector float)((__vector unsigned int)__a |
+                        ~(__vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai __vector double
+vec_orc(__vector double __a, __vector double __b) {
+  return (__vector double)((__vector unsigned long long)__a |
+                         ~(__vector unsigned long long)__b);
+}
+#endif
+
+/*-- vec_nand ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector __bool char
+vec_nand(__vector __bool char __a, __vector __bool char __b) {
+  return ~(__a & __b);
+}
+
+static inline __ATTRS_o_ai __vector signed char
+vec_nand(__vector signed char __a, __vector signed char __b) {
+  return ~(__a & __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_nand(__vector unsigned char __a, __vector unsigned char __b) {
+  return ~(__a & __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_nand(__vector __bool short __a, __vector __bool short __b) {
+  return ~(__a & __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_nand(__vector signed short __a, __vector signed short __b) {
+  return ~(__a & __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_nand(__vector unsigned short __a, __vector unsigned short __b) {
+  return ~(__a & __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_nand(__vector __bool int __a, __vector __bool int __b) {
+  return ~(__a & __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_nand(__vector signed int __a, __vector signed int __b) {
+  return ~(__a & __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_nand(__vector unsigned int __a, __vector unsigned int __b) {
+  return ~(__a & __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_nand(__vector __bool long long __a, __vector __bool long long __b) {
+  return ~(__a & __b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_nand(__vector signed long long __a, __vector signed long long __b) {
+  return ~(__a & __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_nand(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return ~(__a & __b);
+}
+
+static inline __ATTRS_o_ai __vector float
+vec_nand(__vector float __a, __vector float __b) {
+  return (__vector float)~((__vector unsigned int)__a &
+                         (__vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai __vector double
+vec_nand(__vector double __a, __vector double __b) {
+  return (__vector double)~((__vector unsigned long long)__a &
+                          (__vector unsigned long long)__b);
+}
+#endif
+
+/*-- vec_eqv ----------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector __bool char
+vec_eqv(__vector __bool char __a, __vector __bool char __b) {
+  return ~(__a ^ __b);
+}
+
+static inline __ATTRS_o_ai __vector signed char
+vec_eqv(__vector signed char __a, __vector signed char __b) {
+  return ~(__a ^ __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_eqv(__vector unsigned char __a, __vector unsigned char __b) {
+  return ~(__a ^ __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_eqv(__vector __bool short __a, __vector __bool short __b) {
+  return ~(__a ^ __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_eqv(__vector signed short __a, __vector signed short __b) {
+  return ~(__a ^ __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_eqv(__vector unsigned short __a, __vector unsigned short __b) {
+  return ~(__a ^ __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_eqv(__vector __bool int __a, __vector __bool int __b) {
+  return ~(__a ^ __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_eqv(__vector signed int __a, __vector signed int __b) {
+  return ~(__a ^ __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_eqv(__vector unsigned int __a, __vector unsigned int __b) {
+  return ~(__a ^ __b);
+}
+
+static inline __ATTRS_o_ai __vector __bool long long
+vec_eqv(__vector __bool long long __a, __vector __bool long long __b) {
+  return ~(__a ^ __b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_eqv(__vector signed long long __a, __vector signed long long __b) {
+  return ~(__a ^ __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_eqv(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return ~(__a ^ __b);
+}
+
+static inline __ATTRS_o_ai __vector float
+vec_eqv(__vector float __a, __vector float __b) {
+  return (__vector float)~((__vector unsigned int)__a ^
+                         (__vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai __vector double
+vec_eqv(__vector double __a, __vector double __b) {
+  return (__vector double)~((__vector unsigned long long)__a ^
+                          (__vector unsigned long long)__b);
+}
+#endif
+
+/*-- vec_cntlz --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cntlz(__vector signed char __a) {
+  return __builtin_s390_vclzb((__vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cntlz(__vector unsigned char __a) {
+  return __builtin_s390_vclzb(__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cntlz(__vector signed short __a) {
+  return __builtin_s390_vclzh((__vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cntlz(__vector unsigned short __a) {
+  return __builtin_s390_vclzh(__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cntlz(__vector signed int __a) {
+  return __builtin_s390_vclzf((__vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cntlz(__vector unsigned int __a) {
+  return __builtin_s390_vclzf(__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_cntlz(__vector signed long long __a) {
+  return __builtin_s390_vclzg((__vector unsigned long long)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_cntlz(__vector unsigned long long __a) {
+  return __builtin_s390_vclzg(__a);
+}
+
+/*-- vec_cnttz --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cnttz(__vector signed char __a) {
+  return __builtin_s390_vctzb((__vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cnttz(__vector unsigned char __a) {
+  return __builtin_s390_vctzb(__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cnttz(__vector signed short __a) {
+  return __builtin_s390_vctzh((__vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cnttz(__vector unsigned short __a) {
+  return __builtin_s390_vctzh(__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cnttz(__vector signed int __a) {
+  return __builtin_s390_vctzf((__vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cnttz(__vector unsigned int __a) {
+  return __builtin_s390_vctzf(__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_cnttz(__vector signed long long __a) {
+  return __builtin_s390_vctzg((__vector unsigned long long)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_cnttz(__vector unsigned long long __a) {
+  return __builtin_s390_vctzg(__a);
+}
+
+/*-- vec_popcnt -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_popcnt(__vector signed char __a) {
+  return __builtin_s390_vpopctb((__vector unsigned char)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_popcnt(__vector unsigned char __a) {
+  return __builtin_s390_vpopctb(__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_popcnt(__vector signed short __a) {
+  return __builtin_s390_vpopcth((__vector unsigned short)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_popcnt(__vector unsigned short __a) {
+  return __builtin_s390_vpopcth(__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_popcnt(__vector signed int __a) {
+  return __builtin_s390_vpopctf((__vector unsigned int)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_popcnt(__vector unsigned int __a) {
+  return __builtin_s390_vpopctf(__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_popcnt(__vector signed long long __a) {
+  return __builtin_s390_vpopctg((__vector unsigned long long)__a);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_popcnt(__vector unsigned long long __a) {
+  return __builtin_s390_vpopctg(__a);
+}
+
+/*-- vec_rl -----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_rl(__vector signed char __a, __vector unsigned char __b) {
+  return (__vector signed char)__builtin_s390_verllvb(
+    (__vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_rl(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_verllvb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_rl(__vector signed short __a, __vector unsigned short __b) {
+  return (__vector signed short)__builtin_s390_verllvh(
+    (__vector unsigned short)__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_rl(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_verllvh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_rl(__vector signed int __a, __vector unsigned int __b) {
+  return (__vector signed int)__builtin_s390_verllvf(
+    (__vector unsigned int)__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_rl(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_verllvf(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_rl(__vector signed long long __a, __vector unsigned long long __b) {
+  return (__vector signed long long)__builtin_s390_verllvg(
+    (__vector unsigned long long)__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_rl(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return __builtin_s390_verllvg(__a, __b);
+}
+
+/*-- vec_rli ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_rli(__vector signed char __a, unsigned long __b) {
+  return (__vector signed char)__builtin_s390_verllb(
+    (__vector unsigned char)__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_rli(__vector unsigned char __a, unsigned long __b) {
+  return __builtin_s390_verllb(__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_rli(__vector signed short __a, unsigned long __b) {
+  return (__vector signed short)__builtin_s390_verllh(
+    (__vector unsigned short)__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_rli(__vector unsigned short __a, unsigned long __b) {
+  return __builtin_s390_verllh(__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_rli(__vector signed int __a, unsigned long __b) {
+  return (__vector signed int)__builtin_s390_verllf(
+    (__vector unsigned int)__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_rli(__vector unsigned int __a, unsigned long __b) {
+  return __builtin_s390_verllf(__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_rli(__vector signed long long __a, unsigned long __b) {
+  return (__vector signed long long)__builtin_s390_verllg(
+    (__vector unsigned long long)__a, (int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_rli(__vector unsigned long long __a, unsigned long __b) {
+  return __builtin_s390_verllg(__a, (int)__b);
+}
+
+/*-- vec_rl_mask ------------------------------------------------------------*/
+
+extern __ATTRS_o __vector signed char
+vec_rl_mask(__vector signed char __a, __vector unsigned char __b,
+            unsigned char __c) __constant(__c);
+
+extern __ATTRS_o __vector unsigned char
+vec_rl_mask(__vector unsigned char __a, __vector unsigned char __b,
+            unsigned char __c) __constant(__c);
+
+extern __ATTRS_o __vector signed short
+vec_rl_mask(__vector signed short __a, __vector unsigned short __b,
+            unsigned char __c) __constant(__c);
+
+extern __ATTRS_o __vector unsigned short
+vec_rl_mask(__vector unsigned short __a, __vector unsigned short __b,
+            unsigned char __c) __constant(__c);
+
+extern __ATTRS_o __vector signed int
+vec_rl_mask(__vector signed int __a, __vector unsigned int __b,
+            unsigned char __c) __constant(__c);
+
+extern __ATTRS_o __vector unsigned int
+vec_rl_mask(__vector unsigned int __a, __vector unsigned int __b,
+            unsigned char __c) __constant(__c);
+
+extern __ATTRS_o __vector signed long long
+vec_rl_mask(__vector signed long long __a, __vector unsigned long long __b,
+            unsigned char __c) __constant(__c);
+
+extern __ATTRS_o __vector unsigned long long
+vec_rl_mask(__vector unsigned long long __a, __vector unsigned long long __b,
+            unsigned char __c) __constant(__c);
+
+#define vec_rl_mask(X, Y, Z) ((__typeof__((vec_rl_mask)((X), (Y), (Z)))) \
+  __extension__ ({ \
+    __vector unsigned char __res; \
+    __vector unsigned char __x = (__vector unsigned char)(X); \
+    __vector unsigned char __y = (__vector unsigned char)(Y); \
+    switch (sizeof ((X)[0])) { \
+    case 1: __res = (__vector unsigned char) __builtin_s390_verimb( \
+             (__vector unsigned char)__x, (__vector unsigned char)__x, \
+             (__vector unsigned char)__y, (Z)); break; \
+    case 2: __res = (__vector unsigned char) __builtin_s390_verimh( \
+             (__vector unsigned short)__x, (__vector unsigned short)__x, \
+             (__vector unsigned short)__y, (Z)); break; \
+    case 4: __res = (__vector unsigned char) __builtin_s390_verimf( \
+             (__vector unsigned int)__x, (__vector unsigned int)__x, \
+             (__vector unsigned int)__y, (Z)); break; \
+    default: __res = (__vector unsigned char) __builtin_s390_verimg( \
+             (__vector unsigned long long)__x, (__vector unsigned long long)__x, \
+             (__vector unsigned long long)__y, (Z)); break; \
+    } __res; }))
+
+/*-- vec_sll ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_sll(__vector signed char __a, __vector unsigned char __b) {
+  return (__vector signed char)__builtin_s390_vsl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_sll(__vector signed char __a, __vector unsigned short __b) {
+  return (__vector signed char)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_sll(__vector signed char __a, __vector unsigned int __b) {
+  return (__vector signed char)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool char
+vec_sll(__vector __bool char __a, __vector unsigned char __b) {
+  return (__vector __bool char)__builtin_s390_vsl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool char
+vec_sll(__vector __bool char __a, __vector unsigned short __b) {
+  return (__vector __bool char)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool char
+vec_sll(__vector __bool char __a, __vector unsigned int __b) {
+  return (__vector __bool char)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sll(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vsl(__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sll(__vector unsigned char __a, __vector unsigned short __b) {
+  return __builtin_s390_vsl(__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sll(__vector unsigned char __a, __vector unsigned int __b) {
+  return __builtin_s390_vsl(__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_sll(__vector signed short __a, __vector unsigned char __b) {
+  return (__vector signed short)__builtin_s390_vsl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_sll(__vector signed short __a, __vector unsigned short __b) {
+  return (__vector signed short)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_sll(__vector signed short __a, __vector unsigned int __b) {
+  return (__vector signed short)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool short
+vec_sll(__vector __bool short __a, __vector unsigned char __b) {
+  return (__vector __bool short)__builtin_s390_vsl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool short
+vec_sll(__vector __bool short __a, __vector unsigned short __b) {
+  return (__vector __bool short)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool short
+vec_sll(__vector __bool short __a, __vector unsigned int __b) {
+  return (__vector __bool short)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sll(__vector unsigned short __a, __vector unsigned char __b) {
+  return (__vector unsigned short)__builtin_s390_vsl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sll(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector unsigned short)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sll(__vector unsigned short __a, __vector unsigned int __b) {
+  return (__vector unsigned short)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_sll(__vector signed int __a, __vector unsigned char __b) {
+  return (__vector signed int)__builtin_s390_vsl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_sll(__vector signed int __a, __vector unsigned short __b) {
+  return (__vector signed int)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_sll(__vector signed int __a, __vector unsigned int __b) {
+  return (__vector signed int)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool int
+vec_sll(__vector __bool int __a, __vector unsigned char __b) {
+  return (__vector __bool int)__builtin_s390_vsl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool int
+vec_sll(__vector __bool int __a, __vector unsigned short __b) {
+  return (__vector __bool int)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool int
+vec_sll(__vector __bool int __a, __vector unsigned int __b) {
+  return (__vector __bool int)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sll(__vector unsigned int __a, __vector unsigned char __b) {
+  return (__vector unsigned int)__builtin_s390_vsl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sll(__vector unsigned int __a, __vector unsigned short __b) {
+  return (__vector unsigned int)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sll(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector unsigned int)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_sll(__vector signed long long __a, __vector unsigned char __b) {
+  return (__vector signed long long)__builtin_s390_vsl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_sll(__vector signed long long __a, __vector unsigned short __b) {
+  return (__vector signed long long)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_sll(__vector signed long long __a, __vector unsigned int __b) {
+  return (__vector signed long long)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sll(__vector __bool long long __a, __vector unsigned char __b) {
+  return (__vector __bool long long)__builtin_s390_vsl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sll(__vector __bool long long __a, __vector unsigned short __b) {
+  return (__vector __bool long long)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sll(__vector __bool long long __a, __vector unsigned int __b) {
+  return (__vector __bool long long)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sll(__vector unsigned long long __a, __vector unsigned char __b) {
+  return (__vector unsigned long long)__builtin_s390_vsl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sll(__vector unsigned long long __a, __vector unsigned short __b) {
+  return (__vector unsigned long long)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sll(__vector unsigned long long __a, __vector unsigned int __b) {
+  return (__vector unsigned long long)__builtin_s390_vsl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+/*-- vec_slb ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_slb(__vector signed char __a, __vector signed char __b) {
+  return (__vector signed char)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed char
+vec_slb(__vector signed char __a, __vector unsigned char __b) {
+  return (__vector signed char)__builtin_s390_vslb(
+    (__vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_slb(__vector unsigned char __a, __vector signed char __b) {
+  return __builtin_s390_vslb(__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_slb(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vslb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_slb(__vector signed short __a, __vector signed short __b) {
+  return (__vector signed short)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_slb(__vector signed short __a, __vector unsigned short __b) {
+  return (__vector signed short)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_slb(__vector unsigned short __a, __vector signed short __b) {
+  return (__vector unsigned short)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_slb(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector unsigned short)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_slb(__vector signed int __a, __vector signed int __b) {
+  return (__vector signed int)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_slb(__vector signed int __a, __vector unsigned int __b) {
+  return (__vector signed int)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_slb(__vector unsigned int __a, __vector signed int __b) {
+  return (__vector unsigned int)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_slb(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector unsigned int)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_slb(__vector signed long long __a, __vector signed long long __b) {
+  return (__vector signed long long)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_slb(__vector signed long long __a, __vector unsigned long long __b) {
+  return (__vector signed long long)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_slb(__vector unsigned long long __a, __vector signed long long __b) {
+  return (__vector unsigned long long)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_slb(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return (__vector unsigned long long)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_slb(__vector float __a, __vector signed int __b) {
+  return (__vector float)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector float
+vec_slb(__vector float __a, __vector unsigned int __b) {
+  return (__vector float)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_slb(__vector double __a, __vector signed long long __b) {
+  return (__vector double)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector double
+vec_slb(__vector double __a, __vector unsigned long long __b) {
+  return (__vector double)__builtin_s390_vslb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+/*-- vec_sld ----------------------------------------------------------------*/
+
+extern __ATTRS_o __vector signed char
+vec_sld(__vector signed char __a, __vector signed char __b, int __c)
+  __constant_range(__c, 0, 15);
+
+extern __ATTRS_o __vector __bool char
+vec_sld(__vector __bool char __a, __vector __bool char __b, int __c)
+  __constant_range(__c, 0, 15);
+
+extern __ATTRS_o __vector unsigned char
+vec_sld(__vector unsigned char __a, __vector unsigned char __b, int __c)
+  __constant_range(__c, 0, 15);
+
+extern __ATTRS_o __vector signed short
+vec_sld(__vector signed short __a, __vector signed short __b, int __c)
+  __constant_range(__c, 0, 15);
+
+extern __ATTRS_o __vector __bool short
+vec_sld(__vector __bool short __a, __vector __bool short __b, int __c)
+  __constant_range(__c, 0, 15);
+
+extern __ATTRS_o __vector unsigned short
+vec_sld(__vector unsigned short __a, __vector unsigned short __b, int __c)
+  __constant_range(__c, 0, 15);
+
+extern __ATTRS_o __vector signed int
+vec_sld(__vector signed int __a, __vector signed int __b, int __c)
+  __constant_range(__c, 0, 15);
+
+extern __ATTRS_o __vector __bool int
+vec_sld(__vector __bool int __a, __vector __bool int __b, int __c)
+  __constant_range(__c, 0, 15);
+
+extern __ATTRS_o __vector unsigned int
+vec_sld(__vector unsigned int __a, __vector unsigned int __b, int __c)
+  __constant_range(__c, 0, 15);
+
+extern __ATTRS_o __vector signed long long
+vec_sld(__vector signed long long __a, __vector signed long long __b, int __c)
+  __constant_range(__c, 0, 15);
+
+extern __ATTRS_o __vector __bool long long
+vec_sld(__vector __bool long long __a, __vector __bool long long __b, int __c)
+  __constant_range(__c, 0, 15);
+
+extern __ATTRS_o __vector unsigned long long
+vec_sld(__vector unsigned long long __a, __vector unsigned long long __b,
+        int __c)
+  __constant_range(__c, 0, 15);
+
+#if __ARCH__ >= 12
+extern __ATTRS_o __vector float
+vec_sld(__vector float __a, __vector float __b, int __c)
+  __constant_range(__c, 0, 15);
+#endif
+
+extern __ATTRS_o __vector double
+vec_sld(__vector double __a, __vector double __b, int __c)
+  __constant_range(__c, 0, 15);
+
+#define vec_sld(X, Y, Z) ((__typeof__((vec_sld)((X), (Y), (Z)))) \
+  __builtin_s390_vsldb((__vector unsigned char)(X), \
+                       (__vector unsigned char)(Y), (Z)))
+
+/*-- vec_sldw ---------------------------------------------------------------*/
+
+extern __ATTRS_o __vector signed char
+vec_sldw(__vector signed char __a, __vector signed char __b, int __c)
+  __constant_range(__c, 0, 3);
+
+extern __ATTRS_o __vector unsigned char
+vec_sldw(__vector unsigned char __a, __vector unsigned char __b, int __c)
+  __constant_range(__c, 0, 3);
+
+extern __ATTRS_o __vector signed short
+vec_sldw(__vector signed short __a, __vector signed short __b, int __c)
+  __constant_range(__c, 0, 3);
+
+extern __ATTRS_o __vector unsigned short
+vec_sldw(__vector unsigned short __a, __vector unsigned short __b, int __c)
+  __constant_range(__c, 0, 3);
+
+extern __ATTRS_o __vector signed int
+vec_sldw(__vector signed int __a, __vector signed int __b, int __c)
+  __constant_range(__c, 0, 3);
+
+extern __ATTRS_o __vector unsigned int
+vec_sldw(__vector unsigned int __a, __vector unsigned int __b, int __c)
+  __constant_range(__c, 0, 3);
+
+extern __ATTRS_o __vector signed long long
+vec_sldw(__vector signed long long __a, __vector signed long long __b, int __c)
+  __constant_range(__c, 0, 3);
+
+extern __ATTRS_o __vector unsigned long long
+vec_sldw(__vector unsigned long long __a, __vector unsigned long long __b,
+         int __c)
+  __constant_range(__c, 0, 3);
+
+// This prototype is deprecated.
+extern __ATTRS_o __vector double
+vec_sldw(__vector double __a, __vector double __b, int __c)
+  __constant_range(__c, 0, 3);
+
+#define vec_sldw(X, Y, Z) ((__typeof__((vec_sldw)((X), (Y), (Z)))) \
+  __builtin_s390_vsldb((__vector unsigned char)(X), \
+                       (__vector unsigned char)(Y), (Z) * 4))
+
+/*-- vec_sldb ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 13
+
+extern __ATTRS_o __vector signed char
+vec_sldb(__vector signed char __a, __vector signed char __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector unsigned char
+vec_sldb(__vector unsigned char __a, __vector unsigned char __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector signed short
+vec_sldb(__vector signed short __a, __vector signed short __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector unsigned short
+vec_sldb(__vector unsigned short __a, __vector unsigned short __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector signed int
+vec_sldb(__vector signed int __a, __vector signed int __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector unsigned int
+vec_sldb(__vector unsigned int __a, __vector unsigned int __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector signed long long
+vec_sldb(__vector signed long long __a, __vector signed long long __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector unsigned long long
+vec_sldb(__vector unsigned long long __a, __vector unsigned long long __b,
+         int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector float
+vec_sldb(__vector float __a, __vector float __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector double
+vec_sldb(__vector double __a, __vector double __b, int __c)
+  __constant_range(__c, 0, 7);
+
+#define vec_sldb(X, Y, Z) ((__typeof__((vec_sldb)((X), (Y), (Z)))) \
+  __builtin_s390_vsld((__vector unsigned char)(X), \
+                      (__vector unsigned char)(Y), (Z)))
+
+#endif
+
+/*-- vec_sral ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_sral(__vector signed char __a, __vector unsigned char __b) {
+  return (__vector signed char)__builtin_s390_vsra(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_sral(__vector signed char __a, __vector unsigned short __b) {
+  return (__vector signed char)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_sral(__vector signed char __a, __vector unsigned int __b) {
+  return (__vector signed char)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool char
+vec_sral(__vector __bool char __a, __vector unsigned char __b) {
+  return (__vector __bool char)__builtin_s390_vsra(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool char
+vec_sral(__vector __bool char __a, __vector unsigned short __b) {
+  return (__vector __bool char)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool char
+vec_sral(__vector __bool char __a, __vector unsigned int __b) {
+  return (__vector __bool char)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sral(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vsra(__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sral(__vector unsigned char __a, __vector unsigned short __b) {
+  return __builtin_s390_vsra(__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sral(__vector unsigned char __a, __vector unsigned int __b) {
+  return __builtin_s390_vsra(__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_sral(__vector signed short __a, __vector unsigned char __b) {
+  return (__vector signed short)__builtin_s390_vsra(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_sral(__vector signed short __a, __vector unsigned short __b) {
+  return (__vector signed short)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_sral(__vector signed short __a, __vector unsigned int __b) {
+  return (__vector signed short)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool short
+vec_sral(__vector __bool short __a, __vector unsigned char __b) {
+  return (__vector __bool short)__builtin_s390_vsra(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool short
+vec_sral(__vector __bool short __a, __vector unsigned short __b) {
+  return (__vector __bool short)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool short
+vec_sral(__vector __bool short __a, __vector unsigned int __b) {
+  return (__vector __bool short)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sral(__vector unsigned short __a, __vector unsigned char __b) {
+  return (__vector unsigned short)__builtin_s390_vsra(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sral(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector unsigned short)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_sral(__vector unsigned short __a, __vector unsigned int __b) {
+  return (__vector unsigned short)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_sral(__vector signed int __a, __vector unsigned char __b) {
+  return (__vector signed int)__builtin_s390_vsra(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_sral(__vector signed int __a, __vector unsigned short __b) {
+  return (__vector signed int)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_sral(__vector signed int __a, __vector unsigned int __b) {
+  return (__vector signed int)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool int
+vec_sral(__vector __bool int __a, __vector unsigned char __b) {
+  return (__vector __bool int)__builtin_s390_vsra(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool int
+vec_sral(__vector __bool int __a, __vector unsigned short __b) {
+  return (__vector __bool int)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool int
+vec_sral(__vector __bool int __a, __vector unsigned int __b) {
+  return (__vector __bool int)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sral(__vector unsigned int __a, __vector unsigned char __b) {
+  return (__vector unsigned int)__builtin_s390_vsra(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sral(__vector unsigned int __a, __vector unsigned short __b) {
+  return (__vector unsigned int)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sral(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector unsigned int)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_sral(__vector signed long long __a, __vector unsigned char __b) {
+  return (__vector signed long long)__builtin_s390_vsra(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_sral(__vector signed long long __a, __vector unsigned short __b) {
+  return (__vector signed long long)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_sral(__vector signed long long __a, __vector unsigned int __b) {
+  return (__vector signed long long)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sral(__vector __bool long long __a, __vector unsigned char __b) {
+  return (__vector __bool long long)__builtin_s390_vsra(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sral(__vector __bool long long __a, __vector unsigned short __b) {
+  return (__vector __bool long long)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool long long
+vec_sral(__vector __bool long long __a, __vector unsigned int __b) {
+  return (__vector __bool long long)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sral(__vector unsigned long long __a, __vector unsigned char __b) {
+  return (__vector unsigned long long)__builtin_s390_vsra(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sral(__vector unsigned long long __a, __vector unsigned short __b) {
+  return (__vector unsigned long long)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sral(__vector unsigned long long __a, __vector unsigned int __b) {
+  return (__vector unsigned long long)__builtin_s390_vsra(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+/*-- vec_srab ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_srab(__vector signed char __a, __vector signed char __b) {
+  return (__vector signed char)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed char
+vec_srab(__vector signed char __a, __vector unsigned char __b) {
+  return (__vector signed char)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srab(__vector unsigned char __a, __vector signed char __b) {
+  return __builtin_s390_vsrab(__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srab(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vsrab(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_srab(__vector signed short __a, __vector signed short __b) {
+  return (__vector signed short)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_srab(__vector signed short __a, __vector unsigned short __b) {
+  return (__vector signed short)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srab(__vector unsigned short __a, __vector signed short __b) {
+  return (__vector unsigned short)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srab(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector unsigned short)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_srab(__vector signed int __a, __vector signed int __b) {
+  return (__vector signed int)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_srab(__vector signed int __a, __vector unsigned int __b) {
+  return (__vector signed int)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srab(__vector unsigned int __a, __vector signed int __b) {
+  return (__vector unsigned int)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srab(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector unsigned int)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_srab(__vector signed long long __a, __vector signed long long __b) {
+  return (__vector signed long long)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_srab(__vector signed long long __a, __vector unsigned long long __b) {
+  return (__vector signed long long)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srab(__vector unsigned long long __a, __vector signed long long __b) {
+  return (__vector unsigned long long)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srab(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return (__vector unsigned long long)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_srab(__vector float __a, __vector signed int __b) {
+  return (__vector float)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector float
+vec_srab(__vector float __a, __vector unsigned int __b) {
+  return (__vector float)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_srab(__vector double __a, __vector signed long long __b) {
+  return (__vector double)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector double
+vec_srab(__vector double __a, __vector unsigned long long __b) {
+  return (__vector double)__builtin_s390_vsrab(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+/*-- vec_srl ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_srl(__vector signed char __a, __vector unsigned char __b) {
+  return (__vector signed char)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_srl(__vector signed char __a, __vector unsigned short __b) {
+  return (__vector signed char)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_srl(__vector signed char __a, __vector unsigned int __b) {
+  return (__vector signed char)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool char
+vec_srl(__vector __bool char __a, __vector unsigned char __b) {
+  return (__vector __bool char)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool char
+vec_srl(__vector __bool char __a, __vector unsigned short __b) {
+  return (__vector __bool char)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool char
+vec_srl(__vector __bool char __a, __vector unsigned int __b) {
+  return (__vector __bool char)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srl(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vsrl(__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srl(__vector unsigned char __a, __vector unsigned short __b) {
+  return __builtin_s390_vsrl(__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srl(__vector unsigned char __a, __vector unsigned int __b) {
+  return __builtin_s390_vsrl(__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_srl(__vector signed short __a, __vector unsigned char __b) {
+  return (__vector signed short)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_srl(__vector signed short __a, __vector unsigned short __b) {
+  return (__vector signed short)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_srl(__vector signed short __a, __vector unsigned int __b) {
+  return (__vector signed short)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool short
+vec_srl(__vector __bool short __a, __vector unsigned char __b) {
+  return (__vector __bool short)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool short
+vec_srl(__vector __bool short __a, __vector unsigned short __b) {
+  return (__vector __bool short)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool short
+vec_srl(__vector __bool short __a, __vector unsigned int __b) {
+  return (__vector __bool short)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srl(__vector unsigned short __a, __vector unsigned char __b) {
+  return (__vector unsigned short)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srl(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector unsigned short)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srl(__vector unsigned short __a, __vector unsigned int __b) {
+  return (__vector unsigned short)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_srl(__vector signed int __a, __vector unsigned char __b) {
+  return (__vector signed int)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_srl(__vector signed int __a, __vector unsigned short __b) {
+  return (__vector signed int)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_srl(__vector signed int __a, __vector unsigned int __b) {
+  return (__vector signed int)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool int
+vec_srl(__vector __bool int __a, __vector unsigned char __b) {
+  return (__vector __bool int)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool int
+vec_srl(__vector __bool int __a, __vector unsigned short __b) {
+  return (__vector __bool int)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool int
+vec_srl(__vector __bool int __a, __vector unsigned int __b) {
+  return (__vector __bool int)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srl(__vector unsigned int __a, __vector unsigned char __b) {
+  return (__vector unsigned int)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srl(__vector unsigned int __a, __vector unsigned short __b) {
+  return (__vector unsigned int)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srl(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector unsigned int)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_srl(__vector signed long long __a, __vector unsigned char __b) {
+  return (__vector signed long long)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_srl(__vector signed long long __a, __vector unsigned short __b) {
+  return (__vector signed long long)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_srl(__vector signed long long __a, __vector unsigned int __b) {
+  return (__vector signed long long)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool long long
+vec_srl(__vector __bool long long __a, __vector unsigned char __b) {
+  return (__vector __bool long long)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool long long
+vec_srl(__vector __bool long long __a, __vector unsigned short __b) {
+  return (__vector __bool long long)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector __bool long long
+vec_srl(__vector __bool long long __a, __vector unsigned int __b) {
+  return (__vector __bool long long)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srl(__vector unsigned long long __a, __vector unsigned char __b) {
+  return (__vector unsigned long long)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, __b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srl(__vector unsigned long long __a, __vector unsigned short __b) {
+  return (__vector unsigned long long)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srl(__vector unsigned long long __a, __vector unsigned int __b) {
+  return (__vector unsigned long long)__builtin_s390_vsrl(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+/*-- vec_srb ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_srb(__vector signed char __a, __vector signed char __b) {
+  return (__vector signed char)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed char
+vec_srb(__vector signed char __a, __vector unsigned char __b) {
+  return (__vector signed char)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srb(__vector unsigned char __a, __vector signed char __b) {
+  return __builtin_s390_vsrlb(__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_srb(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vsrlb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_srb(__vector signed short __a, __vector signed short __b) {
+  return (__vector signed short)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_srb(__vector signed short __a, __vector unsigned short __b) {
+  return (__vector signed short)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srb(__vector unsigned short __a, __vector signed short __b) {
+  return (__vector unsigned short)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_srb(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector unsigned short)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_srb(__vector signed int __a, __vector signed int __b) {
+  return (__vector signed int)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_srb(__vector signed int __a, __vector unsigned int __b) {
+  return (__vector signed int)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srb(__vector unsigned int __a, __vector signed int __b) {
+  return (__vector unsigned int)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_srb(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector unsigned int)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_srb(__vector signed long long __a, __vector signed long long __b) {
+  return (__vector signed long long)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_srb(__vector signed long long __a, __vector unsigned long long __b) {
+  return (__vector signed long long)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srb(__vector unsigned long long __a, __vector signed long long __b) {
+  return (__vector unsigned long long)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_srb(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return (__vector unsigned long long)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_srb(__vector float __a, __vector signed int __b) {
+  return (__vector float)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector float
+vec_srb(__vector float __a, __vector unsigned int __b) {
+  return (__vector float)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_srb(__vector double __a, __vector signed long long __b) {
+  return (__vector double)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector double
+vec_srb(__vector double __a, __vector unsigned long long __b) {
+  return (__vector double)__builtin_s390_vsrlb(
+    (__vector unsigned char)__a, (__vector unsigned char)__b);
+}
+
+/*-- vec_srdb ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 13
+
+extern __ATTRS_o __vector signed char
+vec_srdb(__vector signed char __a, __vector signed char __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector unsigned char
+vec_srdb(__vector unsigned char __a, __vector unsigned char __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector signed short
+vec_srdb(__vector signed short __a, __vector signed short __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector unsigned short
+vec_srdb(__vector unsigned short __a, __vector unsigned short __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector signed int
+vec_srdb(__vector signed int __a, __vector signed int __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector unsigned int
+vec_srdb(__vector unsigned int __a, __vector unsigned int __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector signed long long
+vec_srdb(__vector signed long long __a, __vector signed long long __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector unsigned long long
+vec_srdb(__vector unsigned long long __a, __vector unsigned long long __b,
+         int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector float
+vec_srdb(__vector float __a, __vector float __b, int __c)
+  __constant_range(__c, 0, 7);
+
+extern __ATTRS_o __vector double
+vec_srdb(__vector double __a, __vector double __b, int __c)
+  __constant_range(__c, 0, 7);
+
+#define vec_srdb(X, Y, Z) ((__typeof__((vec_srdb)((X), (Y), (Z)))) \
+  __builtin_s390_vsrd((__vector unsigned char)(X), \
+                      (__vector unsigned char)(Y), (Z)))
+
+#endif
+
+/*-- vec_abs ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_abs(__vector signed char __a) {
+  return vec_sel(__a, -__a, vec_cmplt(__a, (__vector signed char)0));
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_abs(__vector signed short __a) {
+  return vec_sel(__a, -__a, vec_cmplt(__a, (__vector signed short)0));
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_abs(__vector signed int __a) {
+  return vec_sel(__a, -__a, vec_cmplt(__a, (__vector signed int)0));
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_abs(__vector signed long long __a) {
+  return vec_sel(__a, -__a, vec_cmplt(__a, (__vector signed long long)0));
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_abs(__vector float __a) {
+  return __builtin_s390_vflpsb(__a);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_abs(__vector double __a) {
+  return __builtin_s390_vflpdb(__a);
+}
+
+/*-- vec_nabs ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_nabs(__vector float __a) {
+  return __builtin_s390_vflnsb(__a);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_nabs(__vector double __a) {
+  return __builtin_s390_vflndb(__a);
+}
+
+/*-- vec_max ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_max(__vector signed char __a, __vector signed char __b) {
+  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_max(__vector signed char __a, __vector __bool char __b) {
+  __vector signed char __bc = (__vector signed char)__b;
+  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_max(__vector __bool char __a, __vector signed char __b) {
+  __vector signed char __ac = (__vector signed char)__a;
+  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_max(__vector unsigned char __a, __vector unsigned char __b) {
+  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_max(__vector unsigned char __a, __vector __bool char __b) {
+  __vector unsigned char __bc = (__vector unsigned char)__b;
+  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_max(__vector __bool char __a, __vector unsigned char __b) {
+  __vector unsigned char __ac = (__vector unsigned char)__a;
+  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_max(__vector signed short __a, __vector signed short __b) {
+  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_max(__vector signed short __a, __vector __bool short __b) {
+  __vector signed short __bc = (__vector signed short)__b;
+  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_max(__vector __bool short __a, __vector signed short __b) {
+  __vector signed short __ac = (__vector signed short)__a;
+  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_max(__vector unsigned short __a, __vector unsigned short __b) {
+  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_max(__vector unsigned short __a, __vector __bool short __b) {
+  __vector unsigned short __bc = (__vector unsigned short)__b;
+  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_max(__vector __bool short __a, __vector unsigned short __b) {
+  __vector unsigned short __ac = (__vector unsigned short)__a;
+  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_max(__vector signed int __a, __vector signed int __b) {
+  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_max(__vector signed int __a, __vector __bool int __b) {
+  __vector signed int __bc = (__vector signed int)__b;
+  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_max(__vector __bool int __a, __vector signed int __b) {
+  __vector signed int __ac = (__vector signed int)__a;
+  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_max(__vector unsigned int __a, __vector unsigned int __b) {
+  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_max(__vector unsigned int __a, __vector __bool int __b) {
+  __vector unsigned int __bc = (__vector unsigned int)__b;
+  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_max(__vector __bool int __a, __vector unsigned int __b) {
+  __vector unsigned int __ac = (__vector unsigned int)__a;
+  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_max(__vector signed long long __a, __vector signed long long __b) {
+  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_max(__vector signed long long __a, __vector __bool long long __b) {
+  __vector signed long long __bc = (__vector signed long long)__b;
+  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_max(__vector __bool long long __a, __vector signed long long __b) {
+  __vector signed long long __ac = (__vector signed long long)__a;
+  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_max(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_max(__vector unsigned long long __a, __vector __bool long long __b) {
+  __vector unsigned long long __bc = (__vector unsigned long long)__b;
+  return vec_sel(__bc, __a, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_max(__vector __bool long long __a, __vector unsigned long long __b) {
+  __vector unsigned long long __ac = (__vector unsigned long long)__a;
+  return vec_sel(__b, __ac, vec_cmpgt(__ac, __b));
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_max(__vector float __a, __vector float __b) {
+  return __builtin_s390_vfmaxsb(__a, __b, 0);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_max(__vector double __a, __vector double __b) {
+#if __ARCH__ >= 12
+  return __builtin_s390_vfmaxdb(__a, __b, 0);
+#else
+  return vec_sel(__b, __a, vec_cmpgt(__a, __b));
+#endif
+}
+
+/*-- vec_min ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_min(__vector signed char __a, __vector signed char __b) {
+  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_min(__vector signed char __a, __vector __bool char __b) {
+  __vector signed char __bc = (__vector signed char)__b;
+  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed char
+vec_min(__vector __bool char __a, __vector signed char __b) {
+  __vector signed char __ac = (__vector signed char)__a;
+  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_min(__vector unsigned char __a, __vector unsigned char __b) {
+  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_min(__vector unsigned char __a, __vector __bool char __b) {
+  __vector unsigned char __bc = (__vector unsigned char)__b;
+  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned char
+vec_min(__vector __bool char __a, __vector unsigned char __b) {
+  __vector unsigned char __ac = (__vector unsigned char)__a;
+  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_min(__vector signed short __a, __vector signed short __b) {
+  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_min(__vector signed short __a, __vector __bool short __b) {
+  __vector signed short __bc = (__vector signed short)__b;
+  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed short
+vec_min(__vector __bool short __a, __vector signed short __b) {
+  __vector signed short __ac = (__vector signed short)__a;
+  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_min(__vector unsigned short __a, __vector unsigned short __b) {
+  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_min(__vector unsigned short __a, __vector __bool short __b) {
+  __vector unsigned short __bc = (__vector unsigned short)__b;
+  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned short
+vec_min(__vector __bool short __a, __vector unsigned short __b) {
+  __vector unsigned short __ac = (__vector unsigned short)__a;
+  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_min(__vector signed int __a, __vector signed int __b) {
+  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_min(__vector signed int __a, __vector __bool int __b) {
+  __vector signed int __bc = (__vector signed int)__b;
+  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed int
+vec_min(__vector __bool int __a, __vector signed int __b) {
+  __vector signed int __ac = (__vector signed int)__a;
+  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_min(__vector unsigned int __a, __vector unsigned int __b) {
+  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_min(__vector unsigned int __a, __vector __bool int __b) {
+  __vector unsigned int __bc = (__vector unsigned int)__b;
+  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned int
+vec_min(__vector __bool int __a, __vector unsigned int __b) {
+  __vector unsigned int __ac = (__vector unsigned int)__a;
+  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_min(__vector signed long long __a, __vector signed long long __b) {
+  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_min(__vector signed long long __a, __vector __bool long long __b) {
+  __vector signed long long __bc = (__vector signed long long)__b;
+  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_min(__vector __bool long long __a, __vector signed long long __b) {
+  __vector signed long long __ac = (__vector signed long long)__a;
+  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_min(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_min(__vector unsigned long long __a, __vector __bool long long __b) {
+  __vector unsigned long long __bc = (__vector unsigned long long)__b;
+  return vec_sel(__a, __bc, vec_cmpgt(__a, __bc));
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_min(__vector __bool long long __a, __vector unsigned long long __b) {
+  __vector unsigned long long __ac = (__vector unsigned long long)__a;
+  return vec_sel(__ac, __b, vec_cmpgt(__ac, __b));
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_min(__vector float __a, __vector float __b) {
+  return __builtin_s390_vfminsb(__a, __b, 0);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_min(__vector double __a, __vector double __b) {
+#if __ARCH__ >= 12
+  return __builtin_s390_vfmindb(__a, __b, 0);
+#else
+  return vec_sel(__a, __b, vec_cmpgt(__a, __b));
+#endif
+}
+
+/*-- vec_add_u128 -----------------------------------------------------------*/
+
+static inline __ATTRS_ai __vector unsigned char
+vec_add_u128(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vaq(__a, __b);
+}
+
+/*-- vec_addc ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_addc(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vaccb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_addc(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vacch(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_addc(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vaccf(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_addc(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return __builtin_s390_vaccg(__a, __b);
+}
+
+/*-- vec_addc_u128 ----------------------------------------------------------*/
+
+static inline __ATTRS_ai __vector unsigned char
+vec_addc_u128(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vaccq(__a, __b);
+}
+
+/*-- vec_adde_u128 ----------------------------------------------------------*/
+
+static inline __ATTRS_ai __vector unsigned char
+vec_adde_u128(__vector unsigned char __a, __vector unsigned char __b,
+              __vector unsigned char __c) {
+  return __builtin_s390_vacq(__a, __b, __c);
+}
+
+/*-- vec_addec_u128 ---------------------------------------------------------*/
+
+static inline __ATTRS_ai __vector unsigned char
+vec_addec_u128(__vector unsigned char __a, __vector unsigned char __b,
+               __vector unsigned char __c) {
+  return __builtin_s390_vacccq(__a, __b, __c);
+}
+
+/*-- vec_avg ----------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_avg(__vector signed char __a, __vector signed char __b) {
+  return __builtin_s390_vavgb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_avg(__vector signed short __a, __vector signed short __b) {
+  return __builtin_s390_vavgh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_avg(__vector signed int __a, __vector signed int __b) {
+  return __builtin_s390_vavgf(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_avg(__vector signed long long __a, __vector signed long long __b) {
+  return __builtin_s390_vavgg(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_avg(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vavglb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_avg(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vavglh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_avg(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vavglf(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_avg(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return __builtin_s390_vavglg(__a, __b);
+}
+
+/*-- vec_checksum -----------------------------------------------------------*/
+
+static inline __ATTRS_ai __vector unsigned int
+vec_checksum(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vcksm(__a, __b);
+}
+
+/*-- vec_gfmsum -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_gfmsum(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vgfmb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_gfmsum(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vgfmh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_gfmsum(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vgfmf(__a, __b);
+}
+
+/*-- vec_gfmsum_128 ---------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_gfmsum_128(__vector unsigned long long __a,
+               __vector unsigned long long __b) {
+  return __builtin_s390_vgfmg(__a, __b);
+}
+
+/*-- vec_gfmsum_accum -------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_gfmsum_accum(__vector unsigned char __a, __vector unsigned char __b,
+                 __vector unsigned short __c) {
+  return __builtin_s390_vgfmab(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_gfmsum_accum(__vector unsigned short __a, __vector unsigned short __b,
+                 __vector unsigned int __c) {
+  return __builtin_s390_vgfmah(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_gfmsum_accum(__vector unsigned int __a, __vector unsigned int __b,
+                 __vector unsigned long long __c) {
+  return __builtin_s390_vgfmaf(__a, __b, __c);
+}
+
+/*-- vec_gfmsum_accum_128 ---------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_gfmsum_accum_128(__vector unsigned long long __a,
+                     __vector unsigned long long __b,
+                     __vector unsigned char __c) {
+  return __builtin_s390_vgfmag(__a, __b, __c);
+}
+
+/*-- vec_mladd --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_mladd(__vector signed char __a, __vector signed char __b,
+          __vector signed char __c) {
+  return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai __vector signed char
+vec_mladd(__vector unsigned char __a, __vector signed char __b,
+          __vector signed char __c) {
+  return (__vector signed char)__a * __b + __c;
+}
+
+static inline __ATTRS_o_ai __vector signed char
+vec_mladd(__vector signed char __a, __vector unsigned char __b,
+          __vector unsigned char __c) {
+  return __a * (__vector signed char)__b + (__vector signed char)__c;
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_mladd(__vector unsigned char __a, __vector unsigned char __b,
+          __vector unsigned char __c) {
+  return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_mladd(__vector signed short __a, __vector signed short __b,
+          __vector signed short __c) {
+  return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_mladd(__vector unsigned short __a, __vector signed short __b,
+          __vector signed short __c) {
+  return (__vector signed short)__a * __b + __c;
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_mladd(__vector signed short __a, __vector unsigned short __b,
+          __vector unsigned short __c) {
+  return __a * (__vector signed short)__b + (__vector signed short)__c;
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mladd(__vector unsigned short __a, __vector unsigned short __b,
+          __vector unsigned short __c) {
+  return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_mladd(__vector signed int __a, __vector signed int __b,
+          __vector signed int __c) {
+  return __a * __b + __c;
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_mladd(__vector unsigned int __a, __vector signed int __b,
+          __vector signed int __c) {
+  return (__vector signed int)__a * __b + __c;
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_mladd(__vector signed int __a, __vector unsigned int __b,
+          __vector unsigned int __c) {
+  return __a * (__vector signed int)__b + (__vector signed int)__c;
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mladd(__vector unsigned int __a, __vector unsigned int __b,
+          __vector unsigned int __c) {
+  return __a * __b + __c;
+}
+
+/*-- vec_mhadd --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_mhadd(__vector signed char __a, __vector signed char __b,
+          __vector signed char __c) {
+  return __builtin_s390_vmahb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_mhadd(__vector unsigned char __a, __vector unsigned char __b,
+          __vector unsigned char __c) {
+  return __builtin_s390_vmalhb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_mhadd(__vector signed short __a, __vector signed short __b,
+          __vector signed short __c) {
+  return __builtin_s390_vmahh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mhadd(__vector unsigned short __a, __vector unsigned short __b,
+          __vector unsigned short __c) {
+  return __builtin_s390_vmalhh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_mhadd(__vector signed int __a, __vector signed int __b,
+          __vector signed int __c) {
+  return __builtin_s390_vmahf(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mhadd(__vector unsigned int __a, __vector unsigned int __b,
+          __vector unsigned int __c) {
+  return __builtin_s390_vmalhf(__a, __b, __c);
+}
+
+/*-- vec_meadd --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed short
+vec_meadd(__vector signed char __a, __vector signed char __b,
+          __vector signed short __c) {
+  return __builtin_s390_vmaeb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_meadd(__vector unsigned char __a, __vector unsigned char __b,
+          __vector unsigned short __c) {
+  return __builtin_s390_vmaleb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_meadd(__vector signed short __a, __vector signed short __b,
+          __vector signed int __c) {
+  return __builtin_s390_vmaeh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_meadd(__vector unsigned short __a, __vector unsigned short __b,
+          __vector unsigned int __c) {
+  return __builtin_s390_vmaleh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_meadd(__vector signed int __a, __vector signed int __b,
+          __vector signed long long __c) {
+  return __builtin_s390_vmaef(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_meadd(__vector unsigned int __a, __vector unsigned int __b,
+          __vector unsigned long long __c) {
+  return __builtin_s390_vmalef(__a, __b, __c);
+}
+
+/*-- vec_moadd --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed short
+vec_moadd(__vector signed char __a, __vector signed char __b,
+          __vector signed short __c) {
+  return __builtin_s390_vmaob(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_moadd(__vector unsigned char __a, __vector unsigned char __b,
+          __vector unsigned short __c) {
+  return __builtin_s390_vmalob(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_moadd(__vector signed short __a, __vector signed short __b,
+          __vector signed int __c) {
+  return __builtin_s390_vmaoh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_moadd(__vector unsigned short __a, __vector unsigned short __b,
+          __vector unsigned int __c) {
+  return __builtin_s390_vmaloh(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_moadd(__vector signed int __a, __vector signed int __b,
+          __vector signed long long __c) {
+  return __builtin_s390_vmaof(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_moadd(__vector unsigned int __a, __vector unsigned int __b,
+          __vector unsigned long long __c) {
+  return __builtin_s390_vmalof(__a, __b, __c);
+}
+
+/*-- vec_mulh ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_mulh(__vector signed char __a, __vector signed char __b) {
+  return __builtin_s390_vmhb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_mulh(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vmlhb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_mulh(__vector signed short __a, __vector signed short __b) {
+  return __builtin_s390_vmhh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mulh(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vmlhh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_mulh(__vector signed int __a, __vector signed int __b) {
+  return __builtin_s390_vmhf(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mulh(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vmlhf(__a, __b);
+}
+
+/*-- vec_mule ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed short
+vec_mule(__vector signed char __a, __vector signed char __b) {
+  return __builtin_s390_vmeb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mule(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vmleb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_mule(__vector signed short __a, __vector signed short __b) {
+  return __builtin_s390_vmeh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mule(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vmleh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_mule(__vector signed int __a, __vector signed int __b) {
+  return __builtin_s390_vmef(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_mule(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vmlef(__a, __b);
+}
+
+/*-- vec_mulo ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed short
+vec_mulo(__vector signed char __a, __vector signed char __b) {
+  return __builtin_s390_vmob(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_mulo(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vmlob(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_mulo(__vector signed short __a, __vector signed short __b) {
+  return __builtin_s390_vmoh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_mulo(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vmloh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_mulo(__vector signed int __a, __vector signed int __b) {
+  return __builtin_s390_vmof(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_mulo(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vmlof(__a, __b);
+}
+
+/*-- vec_msum_u128 ----------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+#define vec_msum_u128(X, Y, Z, W) \
+  ((__vector unsigned char)__builtin_s390_vmslg((X), (Y), (Z), (W)));
+#endif
+
+/*-- vec_sub_u128 -----------------------------------------------------------*/
+
+static inline __ATTRS_ai __vector unsigned char
+vec_sub_u128(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vsq(__a, __b);
+}
+
+/*-- vec_subc ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_subc(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vscbib(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_subc(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vscbih(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_subc(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vscbif(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_subc(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return __builtin_s390_vscbig(__a, __b);
+}
+
+/*-- vec_subc_u128 ----------------------------------------------------------*/
+
+static inline __ATTRS_ai __vector unsigned char
+vec_subc_u128(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vscbiq(__a, __b);
+}
+
+/*-- vec_sube_u128 ----------------------------------------------------------*/
+
+static inline __ATTRS_ai __vector unsigned char
+vec_sube_u128(__vector unsigned char __a, __vector unsigned char __b,
+              __vector unsigned char __c) {
+  return __builtin_s390_vsbiq(__a, __b, __c);
+}
+
+/*-- vec_subec_u128 ---------------------------------------------------------*/
+
+static inline __ATTRS_ai __vector unsigned char
+vec_subec_u128(__vector unsigned char __a, __vector unsigned char __b,
+               __vector unsigned char __c) {
+  return __builtin_s390_vsbcbiq(__a, __b, __c);
+}
+
+/*-- vec_sum2 ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sum2(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vsumgh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_sum2(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vsumgf(__a, __b);
+}
+
+/*-- vec_sum_u128 -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sum_u128(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vsumqf(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_sum_u128(__vector unsigned long long __a, __vector unsigned long long __b) {
+  return __builtin_s390_vsumqg(__a, __b);
+}
+
+/*-- vec_sum4 ---------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sum4(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vsumb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_sum4(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vsumh(__a, __b);
+}
+
+/*-- vec_test_mask ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai int
+vec_test_mask(__vector signed char __a, __vector unsigned char __b) {
+  return __builtin_s390_vtm((__vector unsigned char)__a,
+                            (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vtm(__a, __b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(__vector signed short __a, __vector unsigned short __b) {
+  return __builtin_s390_vtm((__vector unsigned char)__a,
+                            (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vtm((__vector unsigned char)__a,
+                            (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(__vector signed int __a, __vector unsigned int __b) {
+  return __builtin_s390_vtm((__vector unsigned char)__a,
+                            (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vtm((__vector unsigned char)__a,
+                            (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(__vector signed long long __a, __vector unsigned long long __b) {
+  return __builtin_s390_vtm((__vector unsigned char)__a,
+                            (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai int
+vec_test_mask(__vector unsigned long long __a,
+              __vector unsigned long long __b) {
+  return __builtin_s390_vtm((__vector unsigned char)__a,
+                            (__vector unsigned char)__b);
+}
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai int
+vec_test_mask(__vector float __a, __vector unsigned int __b) {
+  return __builtin_s390_vtm((__vector unsigned char)__a,
+                            (__vector unsigned char)__b);
+}
+#endif
+
+static inline __ATTRS_o_ai int
+vec_test_mask(__vector double __a, __vector unsigned long long __b) {
+  return __builtin_s390_vtm((__vector unsigned char)__a,
+                            (__vector unsigned char)__b);
+}
+
+/*-- vec_madd ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_madd(__vector float __a, __vector float __b, __vector float __c) {
+  return __builtin_s390_vfmasb(__a, __b, __c);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_madd(__vector double __a, __vector double __b, __vector double __c) {
+  return __builtin_s390_vfmadb(__a, __b, __c);
+}
+
+/*-- vec_msub ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_msub(__vector float __a, __vector float __b, __vector float __c) {
+  return __builtin_s390_vfmssb(__a, __b, __c);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_msub(__vector double __a, __vector double __b, __vector double __c) {
+  return __builtin_s390_vfmsdb(__a, __b, __c);
+}
+
+/*-- vec_nmadd ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_nmadd(__vector float __a, __vector float __b, __vector float __c) {
+  return __builtin_s390_vfnmasb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector double
+vec_nmadd(__vector double __a, __vector double __b, __vector double __c) {
+  return __builtin_s390_vfnmadb(__a, __b, __c);
+}
+#endif
+
+/*-- vec_nmsub ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_nmsub(__vector float __a, __vector float __b, __vector float __c) {
+  return __builtin_s390_vfnmssb(__a, __b, __c);
+}
+
+static inline __ATTRS_o_ai __vector double
+vec_nmsub(__vector double __a, __vector double __b, __vector double __c) {
+  return __builtin_s390_vfnmsdb(__a, __b, __c);
+}
+#endif
+
+/*-- vec_sqrt ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_sqrt(__vector float __a) {
+  return __builtin_s390_vfsqsb(__a);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_sqrt(__vector double __a) {
+  return __builtin_s390_vfsqdb(__a);
+}
+
+/*-- vec_ld2f ---------------------------------------------------------------*/
+
+// This prototype is deprecated.
+static inline __ATTRS_ai __vector double
+vec_ld2f(const float *__ptr) {
+  typedef float __v2f32 __attribute__((__vector_size__(8)));
+  return __builtin_convertvector(*(const __v2f32 *)__ptr, __vector double);
+}
+
+/*-- vec_st2f ---------------------------------------------------------------*/
+
+// This prototype is deprecated.
+static inline __ATTRS_ai void
+vec_st2f(__vector double __a, float *__ptr) {
+  typedef float __v2f32 __attribute__((__vector_size__(8)));
+  *(__v2f32 *)__ptr = __builtin_convertvector(__a, __v2f32);
+}
+
+/*-- vec_ctd ----------------------------------------------------------------*/
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector double
+vec_ctd(__vector signed long long __a, int __b)
+  __constant_range(__b, 0, 31) {
+  __vector double __conv = __builtin_convertvector(__a, __vector double);
+  __conv *= ((__vector double)(__vector unsigned long long)
+             ((0x3ffULL - __b) << 52));
+  return __conv;
+}
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector double
+vec_ctd(__vector unsigned long long __a, int __b)
+  __constant_range(__b, 0, 31) {
+  __vector double __conv = __builtin_convertvector(__a, __vector double);
+  __conv *= ((__vector double)(__vector unsigned long long)
+             ((0x3ffULL - __b) << 52));
+  return __conv;
+}
+
+/*-- vec_ctsl ---------------------------------------------------------------*/
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector signed long long
+vec_ctsl(__vector double __a, int __b)
+  __constant_range(__b, 0, 31) {
+  __a *= ((__vector double)(__vector unsigned long long)
+          ((0x3ffULL + __b) << 52));
+  return __builtin_convertvector(__a, __vector signed long long);
+}
+
+/*-- vec_ctul ---------------------------------------------------------------*/
+
+// This prototype is deprecated.
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_ctul(__vector double __a, int __b)
+  __constant_range(__b, 0, 31) {
+  __a *= ((__vector double)(__vector unsigned long long)
+          ((0x3ffULL + __b) << 52));
+  return __builtin_convertvector(__a, __vector unsigned long long);
+}
+
+/*-- vec_doublee ------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_ai __vector double
+vec_doublee(__vector float __a) {
+  typedef float __v2f32 __attribute__((__vector_size__(8)));
+  __v2f32 __pack = __builtin_shufflevector(__a, __a, 0, 2);
+  return __builtin_convertvector(__pack, __vector double);
+}
+#endif
+
+/*-- vec_floate -------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_ai __vector float
+vec_floate(__vector double __a) {
+  typedef float __v2f32 __attribute__((__vector_size__(8)));
+  __v2f32 __pack = __builtin_convertvector(__a, __v2f32);
+  return __builtin_shufflevector(__pack, __pack, 0, -1, 1, -1);
+}
+#endif
+
+/*-- vec_double -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector double
+vec_double(__vector signed long long __a) {
+  return __builtin_convertvector(__a, __vector double);
+}
+
+static inline __ATTRS_o_ai __vector double
+vec_double(__vector unsigned long long __a) {
+  return __builtin_convertvector(__a, __vector double);
+}
+
+/*-- vec_float --------------------------------------------------------------*/
+
+#if __ARCH__ >= 13
+
+static inline __ATTRS_o_ai __vector float
+vec_float(__vector signed int __a) {
+  return __builtin_convertvector(__a, __vector float);
+}
+
+static inline __ATTRS_o_ai __vector float
+vec_float(__vector unsigned int __a) {
+  return __builtin_convertvector(__a, __vector float);
+}
+
+#endif
+
+/*-- vec_signed -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed long long
+vec_signed(__vector double __a) {
+  return __builtin_convertvector(__a, __vector signed long long);
+}
+
+#if __ARCH__ >= 13
+static inline __ATTRS_o_ai __vector signed int
+vec_signed(__vector float __a) {
+  return __builtin_convertvector(__a, __vector signed int);
+}
+#endif
+
+/*-- vec_unsigned -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned long long
+vec_unsigned(__vector double __a) {
+  return __builtin_convertvector(__a, __vector unsigned long long);
+}
+
+#if __ARCH__ >= 13
+static inline __ATTRS_o_ai __vector unsigned int
+vec_unsigned(__vector float __a) {
+  return __builtin_convertvector(__a, __vector unsigned int);
+}
+#endif
+
+/*-- vec_roundp -------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_roundp(__vector float __a) {
+  return __builtin_s390_vfisb(__a, 4, 6);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_roundp(__vector double __a) {
+  return __builtin_s390_vfidb(__a, 4, 6);
+}
+
+/*-- vec_ceil ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_ceil(__vector float __a) {
+  // On this platform, vec_ceil never triggers the IEEE-inexact exception.
+  return __builtin_s390_vfisb(__a, 4, 6);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_ceil(__vector double __a) {
+  // On this platform, vec_ceil never triggers the IEEE-inexact exception.
+  return __builtin_s390_vfidb(__a, 4, 6);
+}
+
+/*-- vec_roundm -------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_roundm(__vector float __a) {
+  return __builtin_s390_vfisb(__a, 4, 7);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_roundm(__vector double __a) {
+  return __builtin_s390_vfidb(__a, 4, 7);
+}
+
+/*-- vec_floor --------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_floor(__vector float __a) {
+  // On this platform, vec_floor never triggers the IEEE-inexact exception.
+  return __builtin_s390_vfisb(__a, 4, 7);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_floor(__vector double __a) {
+  // On this platform, vec_floor never triggers the IEEE-inexact exception.
+  return __builtin_s390_vfidb(__a, 4, 7);
+}
+
+/*-- vec_roundz -------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_roundz(__vector float __a) {
+  return __builtin_s390_vfisb(__a, 4, 5);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_roundz(__vector double __a) {
+  return __builtin_s390_vfidb(__a, 4, 5);
+}
+
+/*-- vec_trunc --------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_trunc(__vector float __a) {
+  // On this platform, vec_trunc never triggers the IEEE-inexact exception.
+  return __builtin_s390_vfisb(__a, 4, 5);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_trunc(__vector double __a) {
+  // On this platform, vec_trunc never triggers the IEEE-inexact exception.
+  return __builtin_s390_vfidb(__a, 4, 5);
+}
+
+/*-- vec_roundc -------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_roundc(__vector float __a) {
+  return __builtin_s390_vfisb(__a, 4, 0);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_roundc(__vector double __a) {
+  return __builtin_s390_vfidb(__a, 4, 0);
+}
+
+/*-- vec_rint ---------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_rint(__vector float __a) {
+  // vec_rint may trigger the IEEE-inexact exception.
+  return __builtin_s390_vfisb(__a, 0, 0);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_rint(__vector double __a) {
+  // vec_rint may trigger the IEEE-inexact exception.
+  return __builtin_s390_vfidb(__a, 0, 0);
+}
+
+/*-- vec_round --------------------------------------------------------------*/
+
+#if __ARCH__ >= 12
+static inline __ATTRS_o_ai __vector float
+vec_round(__vector float __a) {
+  return __builtin_s390_vfisb(__a, 4, 4);
+}
+#endif
+
+static inline __ATTRS_o_ai __vector double
+vec_round(__vector double __a) {
+  return __builtin_s390_vfidb(__a, 4, 4);
+}
+
+/*-- vec_fp_test_data_class -------------------------------------------------*/
+
+#if __ARCH__ >= 12
+extern __ATTRS_o __vector __bool int
+vec_fp_test_data_class(__vector float __a, int __b, int *__c)
+  __constant_range(__b, 0, 4095);
+
+extern __ATTRS_o __vector __bool long long
+vec_fp_test_data_class(__vector double __a, int __b, int *__c)
+  __constant_range(__b, 0, 4095);
+
+#define vec_fp_test_data_class(X, Y, Z) \
+  ((__typeof__((vec_fp_test_data_class)((X), (Y), (Z)))) \
+   __extension__ ({ \
+     __vector unsigned char __res; \
+     __vector unsigned char __x = (__vector unsigned char)(X); \
+     int *__z = (Z); \
+     switch (sizeof ((X)[0])) { \
+     case 4:  __res = (__vector unsigned char) \
+                      __builtin_s390_vftcisb((__vector float)__x, (Y), __z); \
+              break; \
+     default: __res = (__vector unsigned char) \
+                      __builtin_s390_vftcidb((__vector double)__x, (Y), __z); \
+              break; \
+     } __res; }))
+#else
+#define vec_fp_test_data_class(X, Y, Z) \
+  ((__vector __bool long long)__builtin_s390_vftcidb((X), (Y), (Z)))
+#endif
+
+#define __VEC_CLASS_FP_ZERO_P (1 << 11)
+#define __VEC_CLASS_FP_ZERO_N (1 << 10)
+#define __VEC_CLASS_FP_ZERO (__VEC_CLASS_FP_ZERO_P | __VEC_CLASS_FP_ZERO_N)
+#define __VEC_CLASS_FP_NORMAL_P (1 << 9)
+#define __VEC_CLASS_FP_NORMAL_N (1 << 8)
+#define __VEC_CLASS_FP_NORMAL (__VEC_CLASS_FP_NORMAL_P | \
+                               __VEC_CLASS_FP_NORMAL_N)
+#define __VEC_CLASS_FP_SUBNORMAL_P (1 << 7)
+#define __VEC_CLASS_FP_SUBNORMAL_N (1 << 6)
+#define __VEC_CLASS_FP_SUBNORMAL (__VEC_CLASS_FP_SUBNORMAL_P | \
+                                  __VEC_CLASS_FP_SUBNORMAL_N)
+#define __VEC_CLASS_FP_INFINITY_P (1 << 5)
+#define __VEC_CLASS_FP_INFINITY_N (1 << 4)
+#define __VEC_CLASS_FP_INFINITY (__VEC_CLASS_FP_INFINITY_P | \
+                                 __VEC_CLASS_FP_INFINITY_N)
+#define __VEC_CLASS_FP_QNAN_P (1 << 3)
+#define __VEC_CLASS_FP_QNAN_N (1 << 2)
+#define __VEC_CLASS_FP_QNAN (__VEC_CLASS_FP_QNAN_P | __VEC_CLASS_FP_QNAN_N)
+#define __VEC_CLASS_FP_SNAN_P (1 << 1)
+#define __VEC_CLASS_FP_SNAN_N (1 << 0)
+#define __VEC_CLASS_FP_SNAN (__VEC_CLASS_FP_SNAN_P | __VEC_CLASS_FP_SNAN_N)
+#define __VEC_CLASS_FP_NAN (__VEC_CLASS_FP_QNAN | __VEC_CLASS_FP_SNAN)
+#define __VEC_CLASS_FP_NOT_NORMAL (__VEC_CLASS_FP_NAN | \
+                                   __VEC_CLASS_FP_SUBNORMAL | \
+                                   __VEC_CLASS_FP_ZERO | \
+                                   __VEC_CLASS_FP_INFINITY)
+
+/*-- vec_cp_until_zero ------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_cp_until_zero(__vector signed char __a) {
+  return ((__vector signed char)
+          __builtin_s390_vistrb((__vector unsigned char)__a));
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cp_until_zero(__vector __bool char __a) {
+  return ((__vector __bool char)
+          __builtin_s390_vistrb((__vector unsigned char)__a));
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cp_until_zero(__vector unsigned char __a) {
+  return __builtin_s390_vistrb(__a);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_cp_until_zero(__vector signed short __a) {
+  return ((__vector signed short)
+          __builtin_s390_vistrh((__vector unsigned short)__a));
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cp_until_zero(__vector __bool short __a) {
+  return ((__vector __bool short)
+          __builtin_s390_vistrh((__vector unsigned short)__a));
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cp_until_zero(__vector unsigned short __a) {
+  return __builtin_s390_vistrh(__a);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_cp_until_zero(__vector signed int __a) {
+  return ((__vector signed int)
+          __builtin_s390_vistrf((__vector unsigned int)__a));
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cp_until_zero(__vector __bool int __a) {
+  return ((__vector __bool int)
+          __builtin_s390_vistrf((__vector unsigned int)__a));
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cp_until_zero(__vector unsigned int __a) {
+  return __builtin_s390_vistrf(__a);
+}
+
+/*-- vec_cp_until_zero_cc ---------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_cp_until_zero_cc(__vector signed char __a, int *__cc) {
+  return (__vector signed char)
+    __builtin_s390_vistrbs((__vector unsigned char)__a, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cp_until_zero_cc(__vector __bool char __a, int *__cc) {
+  return (__vector __bool char)
+    __builtin_s390_vistrbs((__vector unsigned char)__a, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cp_until_zero_cc(__vector unsigned char __a, int *__cc) {
+  return __builtin_s390_vistrbs(__a, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_cp_until_zero_cc(__vector signed short __a, int *__cc) {
+  return (__vector signed short)
+    __builtin_s390_vistrhs((__vector unsigned short)__a, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cp_until_zero_cc(__vector __bool short __a, int *__cc) {
+  return (__vector __bool short)
+    __builtin_s390_vistrhs((__vector unsigned short)__a, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cp_until_zero_cc(__vector unsigned short __a, int *__cc) {
+  return __builtin_s390_vistrhs(__a, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_cp_until_zero_cc(__vector signed int __a, int *__cc) {
+  return (__vector signed int)
+    __builtin_s390_vistrfs((__vector unsigned int)__a, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cp_until_zero_cc(__vector __bool int __a, int *__cc) {
+  return (__vector __bool int)
+    __builtin_s390_vistrfs((__vector unsigned int)__a, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cp_until_zero_cc(__vector unsigned int __a, int *__cc) {
+  return __builtin_s390_vistrfs(__a, __cc);
+}
+
+/*-- vec_cmpeq_idx ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpeq_idx(__vector signed char __a, __vector signed char __b) {
+  return (__vector signed char)
+    __builtin_s390_vfeeb((__vector unsigned char)__a,
+                         (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_idx(__vector __bool char __a, __vector __bool char __b) {
+  return __builtin_s390_vfeeb((__vector unsigned char)__a,
+                              (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_idx(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vfeeb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpeq_idx(__vector signed short __a, __vector signed short __b) {
+  return (__vector signed short)
+    __builtin_s390_vfeeh((__vector unsigned short)__a,
+                         (__vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_idx(__vector __bool short __a, __vector __bool short __b) {
+  return __builtin_s390_vfeeh((__vector unsigned short)__a,
+                              (__vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_idx(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vfeeh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpeq_idx(__vector signed int __a, __vector signed int __b) {
+  return (__vector signed int)
+    __builtin_s390_vfeef((__vector unsigned int)__a,
+                         (__vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_idx(__vector __bool int __a, __vector __bool int __b) {
+  return __builtin_s390_vfeef((__vector unsigned int)__a,
+                              (__vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_idx(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vfeef(__a, __b);
+}
+
+/*-- vec_cmpeq_idx_cc -------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpeq_idx_cc(__vector signed char __a, __vector signed char __b, int *__cc) {
+  return (__vector signed char)
+    __builtin_s390_vfeebs((__vector unsigned char)__a,
+                          (__vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_idx_cc(__vector __bool char __a, __vector __bool char __b, int *__cc) {
+  return __builtin_s390_vfeebs((__vector unsigned char)__a,
+                               (__vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
+                 int *__cc) {
+  return __builtin_s390_vfeebs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpeq_idx_cc(__vector signed short __a, __vector signed short __b,
+                 int *__cc) {
+  return (__vector signed short)
+    __builtin_s390_vfeehs((__vector unsigned short)__a,
+                          (__vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_idx_cc(__vector __bool short __a, __vector __bool short __b, int *__cc) {
+  return __builtin_s390_vfeehs((__vector unsigned short)__a,
+                               (__vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
+                 int *__cc) {
+  return __builtin_s390_vfeehs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpeq_idx_cc(__vector signed int __a, __vector signed int __b, int *__cc) {
+  return (__vector signed int)
+    __builtin_s390_vfeefs((__vector unsigned int)__a,
+                          (__vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_idx_cc(__vector __bool int __a, __vector __bool int __b, int *__cc) {
+  return __builtin_s390_vfeefs((__vector unsigned int)__a,
+                               (__vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
+                 int *__cc) {
+  return __builtin_s390_vfeefs(__a, __b, __cc);
+}
+
+/*-- vec_cmpeq_or_0_idx -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpeq_or_0_idx(__vector signed char __a, __vector signed char __b) {
+  return (__vector signed char)
+    __builtin_s390_vfeezb((__vector unsigned char)__a,
+                          (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_or_0_idx(__vector __bool char __a, __vector __bool char __b) {
+  return __builtin_s390_vfeezb((__vector unsigned char)__a,
+                               (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_or_0_idx(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vfeezb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpeq_or_0_idx(__vector signed short __a, __vector signed short __b) {
+  return (__vector signed short)
+    __builtin_s390_vfeezh((__vector unsigned short)__a,
+                          (__vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_or_0_idx(__vector __bool short __a, __vector __bool short __b) {
+  return __builtin_s390_vfeezh((__vector unsigned short)__a,
+                               (__vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_or_0_idx(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vfeezh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpeq_or_0_idx(__vector signed int __a, __vector signed int __b) {
+  return (__vector signed int)
+    __builtin_s390_vfeezf((__vector unsigned int)__a,
+                          (__vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_or_0_idx(__vector __bool int __a, __vector __bool int __b) {
+  return __builtin_s390_vfeezf((__vector unsigned int)__a,
+                               (__vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_or_0_idx(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vfeezf(__a, __b);
+}
+
+/*-- vec_cmpeq_or_0_idx_cc --------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpeq_or_0_idx_cc(__vector signed char __a, __vector signed char __b,
+                      int *__cc) {
+  return (__vector signed char)
+    __builtin_s390_vfeezbs((__vector unsigned char)__a,
+                           (__vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_or_0_idx_cc(__vector __bool char __a, __vector __bool char __b,
+                      int *__cc) {
+  return __builtin_s390_vfeezbs((__vector unsigned char)__a,
+                                (__vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpeq_or_0_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
+                      int *__cc) {
+  return __builtin_s390_vfeezbs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpeq_or_0_idx_cc(__vector signed short __a, __vector signed short __b,
+                      int *__cc) {
+  return (__vector signed short)
+    __builtin_s390_vfeezhs((__vector unsigned short)__a,
+                           (__vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_or_0_idx_cc(__vector __bool short __a, __vector __bool short __b,
+                      int *__cc) {
+  return __builtin_s390_vfeezhs((__vector unsigned short)__a,
+                                (__vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpeq_or_0_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
+                      int *__cc) {
+  return __builtin_s390_vfeezhs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpeq_or_0_idx_cc(__vector signed int __a, __vector signed int __b,
+                      int *__cc) {
+  return (__vector signed int)
+    __builtin_s390_vfeezfs((__vector unsigned int)__a,
+                           (__vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_or_0_idx_cc(__vector __bool int __a, __vector __bool int __b,
+                      int *__cc) {
+  return __builtin_s390_vfeezfs((__vector unsigned int)__a,
+                                (__vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpeq_or_0_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
+                      int *__cc) {
+  return __builtin_s390_vfeezfs(__a, __b, __cc);
+}
+
+/*-- vec_cmpne_idx ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpne_idx(__vector signed char __a, __vector signed char __b) {
+  return (__vector signed char)
+    __builtin_s390_vfeneb((__vector unsigned char)__a,
+                          (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_idx(__vector __bool char __a, __vector __bool char __b) {
+  return __builtin_s390_vfeneb((__vector unsigned char)__a,
+                               (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_idx(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vfeneb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpne_idx(__vector signed short __a, __vector signed short __b) {
+  return (__vector signed short)
+    __builtin_s390_vfeneh((__vector unsigned short)__a,
+                          (__vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_idx(__vector __bool short __a, __vector __bool short __b) {
+  return __builtin_s390_vfeneh((__vector unsigned short)__a,
+                               (__vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_idx(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vfeneh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpne_idx(__vector signed int __a, __vector signed int __b) {
+  return (__vector signed int)
+    __builtin_s390_vfenef((__vector unsigned int)__a,
+                          (__vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_idx(__vector __bool int __a, __vector __bool int __b) {
+  return __builtin_s390_vfenef((__vector unsigned int)__a,
+                               (__vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_idx(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vfenef(__a, __b);
+}
+
+/*-- vec_cmpne_idx_cc -------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpne_idx_cc(__vector signed char __a, __vector signed char __b, int *__cc) {
+  return (__vector signed char)
+    __builtin_s390_vfenebs((__vector unsigned char)__a,
+                           (__vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_idx_cc(__vector __bool char __a, __vector __bool char __b, int *__cc) {
+  return __builtin_s390_vfenebs((__vector unsigned char)__a,
+                                (__vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
+                 int *__cc) {
+  return __builtin_s390_vfenebs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpne_idx_cc(__vector signed short __a, __vector signed short __b,
+                 int *__cc) {
+  return (__vector signed short)
+    __builtin_s390_vfenehs((__vector unsigned short)__a,
+                           (__vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_idx_cc(__vector __bool short __a, __vector __bool short __b,
+                 int *__cc) {
+  return __builtin_s390_vfenehs((__vector unsigned short)__a,
+                                (__vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
+                 int *__cc) {
+  return __builtin_s390_vfenehs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpne_idx_cc(__vector signed int __a, __vector signed int __b, int *__cc) {
+  return (__vector signed int)
+    __builtin_s390_vfenefs((__vector unsigned int)__a,
+                           (__vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_idx_cc(__vector __bool int __a, __vector __bool int __b, int *__cc) {
+  return __builtin_s390_vfenefs((__vector unsigned int)__a,
+                                (__vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
+                 int *__cc) {
+  return __builtin_s390_vfenefs(__a, __b, __cc);
+}
+
+/*-- vec_cmpne_or_0_idx -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpne_or_0_idx(__vector signed char __a, __vector signed char __b) {
+  return (__vector signed char)
+    __builtin_s390_vfenezb((__vector unsigned char)__a,
+                           (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_or_0_idx(__vector __bool char __a, __vector __bool char __b) {
+  return __builtin_s390_vfenezb((__vector unsigned char)__a,
+                                (__vector unsigned char)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_or_0_idx(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vfenezb(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpne_or_0_idx(__vector signed short __a, __vector signed short __b) {
+  return (__vector signed short)
+    __builtin_s390_vfenezh((__vector unsigned short)__a,
+                           (__vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_or_0_idx(__vector __bool short __a, __vector __bool short __b) {
+  return __builtin_s390_vfenezh((__vector unsigned short)__a,
+                                (__vector unsigned short)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_or_0_idx(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vfenezh(__a, __b);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpne_or_0_idx(__vector signed int __a, __vector signed int __b) {
+  return (__vector signed int)
+    __builtin_s390_vfenezf((__vector unsigned int)__a,
+                           (__vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_or_0_idx(__vector __bool int __a, __vector __bool int __b) {
+  return __builtin_s390_vfenezf((__vector unsigned int)__a,
+                                (__vector unsigned int)__b);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_or_0_idx(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vfenezf(__a, __b);
+}
+
+/*-- vec_cmpne_or_0_idx_cc --------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_cmpne_or_0_idx_cc(__vector signed char __a, __vector signed char __b,
+                      int *__cc) {
+  return (__vector signed char)
+    __builtin_s390_vfenezbs((__vector unsigned char)__a,
+                            (__vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_or_0_idx_cc(__vector __bool char __a, __vector __bool char __b,
+                      int *__cc) {
+  return __builtin_s390_vfenezbs((__vector unsigned char)__a,
+                                 (__vector unsigned char)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpne_or_0_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
+                      int *__cc) {
+  return __builtin_s390_vfenezbs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_cmpne_or_0_idx_cc(__vector signed short __a, __vector signed short __b,
+                      int *__cc) {
+  return (__vector signed short)
+    __builtin_s390_vfenezhs((__vector unsigned short)__a,
+                            (__vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_or_0_idx_cc(__vector __bool short __a, __vector __bool short __b,
+                      int *__cc) {
+  return __builtin_s390_vfenezhs((__vector unsigned short)__a,
+                                 (__vector unsigned short)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpne_or_0_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
+                      int *__cc) {
+  return __builtin_s390_vfenezhs(__a, __b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_cmpne_or_0_idx_cc(__vector signed int __a, __vector signed int __b,
+                      int *__cc) {
+  return (__vector signed int)
+    __builtin_s390_vfenezfs((__vector unsigned int)__a,
+                            (__vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_or_0_idx_cc(__vector __bool int __a, __vector __bool int __b,
+                      int *__cc) {
+  return __builtin_s390_vfenezfs((__vector unsigned int)__a,
+                                 (__vector unsigned int)__b, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpne_or_0_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
+                      int *__cc) {
+  return __builtin_s390_vfenezfs(__a, __b, __cc);
+}
+
+/*-- vec_cmprg --------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmprg(__vector unsigned char __a, __vector unsigned char __b,
+          __vector unsigned char __c) {
+  return (__vector __bool char)__builtin_s390_vstrcb(__a, __b, __c, 4);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmprg(__vector unsigned short __a, __vector unsigned short __b,
+          __vector unsigned short __c) {
+  return (__vector __bool short)__builtin_s390_vstrch(__a, __b, __c, 4);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmprg(__vector unsigned int __a, __vector unsigned int __b,
+          __vector unsigned int __c) {
+  return (__vector __bool int)__builtin_s390_vstrcf(__a, __b, __c, 4);
+}
+
+/*-- vec_cmprg_cc -----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmprg_cc(__vector unsigned char __a, __vector unsigned char __b,
+             __vector unsigned char __c, int *__cc) {
+  return (__vector __bool char)__builtin_s390_vstrcbs(__a, __b, __c, 4, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmprg_cc(__vector unsigned short __a, __vector unsigned short __b,
+             __vector unsigned short __c, int *__cc) {
+  return (__vector __bool short)__builtin_s390_vstrchs(__a, __b, __c, 4, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmprg_cc(__vector unsigned int __a, __vector unsigned int __b,
+             __vector unsigned int __c, int *__cc) {
+  return (__vector __bool int)__builtin_s390_vstrcfs(__a, __b, __c, 4, __cc);
+}
+
+/*-- vec_cmprg_idx ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmprg_idx(__vector unsigned char __a, __vector unsigned char __b,
+              __vector unsigned char __c) {
+  return __builtin_s390_vstrcb(__a, __b, __c, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmprg_idx(__vector unsigned short __a, __vector unsigned short __b,
+              __vector unsigned short __c) {
+  return __builtin_s390_vstrch(__a, __b, __c, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmprg_idx(__vector unsigned int __a, __vector unsigned int __b,
+              __vector unsigned int __c) {
+  return __builtin_s390_vstrcf(__a, __b, __c, 0);
+}
+
+/*-- vec_cmprg_idx_cc -------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmprg_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
+                 __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrcbs(__a, __b, __c, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmprg_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
+                 __vector unsigned short __c, int *__cc) {
+  return __builtin_s390_vstrchs(__a, __b, __c, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmprg_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
+                 __vector unsigned int __c, int *__cc) {
+  return __builtin_s390_vstrcfs(__a, __b, __c, 0, __cc);
+}
+
+/*-- vec_cmprg_or_0_idx -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmprg_or_0_idx(__vector unsigned char __a, __vector unsigned char __b,
+                   __vector unsigned char __c) {
+  return __builtin_s390_vstrczb(__a, __b, __c, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmprg_or_0_idx(__vector unsigned short __a, __vector unsigned short __b,
+                   __vector unsigned short __c) {
+  return __builtin_s390_vstrczh(__a, __b, __c, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmprg_or_0_idx(__vector unsigned int __a, __vector unsigned int __b,
+                   __vector unsigned int __c) {
+  return __builtin_s390_vstrczf(__a, __b, __c, 0);
+}
+
+/*-- vec_cmprg_or_0_idx_cc --------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmprg_or_0_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
+                      __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrczbs(__a, __b, __c, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmprg_or_0_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
+                      __vector unsigned short __c, int *__cc) {
+  return __builtin_s390_vstrczhs(__a, __b, __c, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmprg_or_0_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
+                      __vector unsigned int __c, int *__cc) {
+  return __builtin_s390_vstrczfs(__a, __b, __c, 0, __cc);
+}
+
+/*-- vec_cmpnrg -------------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpnrg(__vector unsigned char __a, __vector unsigned char __b,
+           __vector unsigned char __c) {
+  return (__vector __bool char)__builtin_s390_vstrcb(__a, __b, __c, 12);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpnrg(__vector unsigned short __a, __vector unsigned short __b,
+           __vector unsigned short __c) {
+  return (__vector __bool short)__builtin_s390_vstrch(__a, __b, __c, 12);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpnrg(__vector unsigned int __a, __vector unsigned int __b,
+           __vector unsigned int __c) {
+  return (__vector __bool int)__builtin_s390_vstrcf(__a, __b, __c, 12);
+}
+
+/*-- vec_cmpnrg_cc ----------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_cmpnrg_cc(__vector unsigned char __a, __vector unsigned char __b,
+              __vector unsigned char __c, int *__cc) {
+  return (__vector __bool char)
+    __builtin_s390_vstrcbs(__a, __b, __c, 12, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_cmpnrg_cc(__vector unsigned short __a, __vector unsigned short __b,
+              __vector unsigned short __c, int *__cc) {
+  return (__vector __bool short)
+    __builtin_s390_vstrchs(__a, __b, __c, 12, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_cmpnrg_cc(__vector unsigned int __a, __vector unsigned int __b,
+              __vector unsigned int __c, int *__cc) {
+  return (__vector __bool int)
+    __builtin_s390_vstrcfs(__a, __b, __c, 12, __cc);
+}
+
+/*-- vec_cmpnrg_idx ---------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpnrg_idx(__vector unsigned char __a, __vector unsigned char __b,
+               __vector unsigned char __c) {
+  return __builtin_s390_vstrcb(__a, __b, __c, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpnrg_idx(__vector unsigned short __a, __vector unsigned short __b,
+               __vector unsigned short __c) {
+  return __builtin_s390_vstrch(__a, __b, __c, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpnrg_idx(__vector unsigned int __a, __vector unsigned int __b,
+               __vector unsigned int __c) {
+  return __builtin_s390_vstrcf(__a, __b, __c, 8);
+}
+
+/*-- vec_cmpnrg_idx_cc ------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpnrg_idx_cc(__vector unsigned char __a, __vector unsigned char __b,
+                  __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrcbs(__a, __b, __c, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpnrg_idx_cc(__vector unsigned short __a, __vector unsigned short __b,
+                  __vector unsigned short __c, int *__cc) {
+  return __builtin_s390_vstrchs(__a, __b, __c, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpnrg_idx_cc(__vector unsigned int __a, __vector unsigned int __b,
+                  __vector unsigned int __c, int *__cc) {
+  return __builtin_s390_vstrcfs(__a, __b, __c, 8, __cc);
+}
+
+/*-- vec_cmpnrg_or_0_idx ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpnrg_or_0_idx(__vector unsigned char __a, __vector unsigned char __b,
+                    __vector unsigned char __c) {
+  return __builtin_s390_vstrczb(__a, __b, __c, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpnrg_or_0_idx(__vector unsigned short __a, __vector unsigned short __b,
+                    __vector unsigned short __c) {
+  return __builtin_s390_vstrczh(__a, __b, __c, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpnrg_or_0_idx(__vector unsigned int __a, __vector unsigned int __b,
+                    __vector unsigned int __c) {
+  return __builtin_s390_vstrczf(__a, __b, __c, 8);
+}
+
+/*-- vec_cmpnrg_or_0_idx_cc -------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_cmpnrg_or_0_idx_cc(__vector unsigned char __a,
+                       __vector unsigned char __b,
+                       __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrczbs(__a, __b, __c, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_cmpnrg_or_0_idx_cc(__vector unsigned short __a,
+                       __vector unsigned short __b,
+                       __vector unsigned short __c, int *__cc) {
+  return __builtin_s390_vstrczhs(__a, __b, __c, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_cmpnrg_or_0_idx_cc(__vector unsigned int __a,
+                       __vector unsigned int __b,
+                       __vector unsigned int __c, int *__cc) {
+  return __builtin_s390_vstrczfs(__a, __b, __c, 8, __cc);
+}
+
+/*-- vec_find_any_eq --------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_eq(__vector signed char __a, __vector signed char __b) {
+  return (__vector __bool char)
+    __builtin_s390_vfaeb((__vector unsigned char)__a,
+                         (__vector unsigned char)__b, 4);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_eq(__vector __bool char __a, __vector __bool char __b) {
+  return (__vector __bool char)
+    __builtin_s390_vfaeb((__vector unsigned char)__a,
+                         (__vector unsigned char)__b, 4);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_eq(__vector unsigned char __a, __vector unsigned char __b) {
+  return (__vector __bool char)__builtin_s390_vfaeb(__a, __b, 4);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_eq(__vector signed short __a, __vector signed short __b) {
+  return (__vector __bool short)
+    __builtin_s390_vfaeh((__vector unsigned short)__a,
+                         (__vector unsigned short)__b, 4);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_eq(__vector __bool short __a, __vector __bool short __b) {
+  return (__vector __bool short)
+    __builtin_s390_vfaeh((__vector unsigned short)__a,
+                         (__vector unsigned short)__b, 4);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_eq(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector __bool short)__builtin_s390_vfaeh(__a, __b, 4);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_eq(__vector signed int __a, __vector signed int __b) {
+  return (__vector __bool int)
+    __builtin_s390_vfaef((__vector unsigned int)__a,
+                         (__vector unsigned int)__b, 4);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_eq(__vector __bool int __a, __vector __bool int __b) {
+  return (__vector __bool int)
+    __builtin_s390_vfaef((__vector unsigned int)__a,
+                         (__vector unsigned int)__b, 4);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_eq(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector __bool int)__builtin_s390_vfaef(__a, __b, 4);
+}
+
+/*-- vec_find_any_eq_cc -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_eq_cc(__vector signed char __a, __vector signed char __b,
+                   int *__cc) {
+  return (__vector __bool char)
+    __builtin_s390_vfaebs((__vector unsigned char)__a,
+                          (__vector unsigned char)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_eq_cc(__vector __bool char __a, __vector __bool char __b,
+                   int *__cc) {
+  return (__vector __bool char)
+    __builtin_s390_vfaebs((__vector unsigned char)__a,
+                          (__vector unsigned char)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_eq_cc(__vector unsigned char __a, __vector unsigned char __b,
+                   int *__cc) {
+  return (__vector __bool char)__builtin_s390_vfaebs(__a, __b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_eq_cc(__vector signed short __a, __vector signed short __b,
+                   int *__cc) {
+  return (__vector __bool short)
+    __builtin_s390_vfaehs((__vector unsigned short)__a,
+                          (__vector unsigned short)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_eq_cc(__vector __bool short __a, __vector __bool short __b,
+                   int *__cc) {
+  return (__vector __bool short)
+    __builtin_s390_vfaehs((__vector unsigned short)__a,
+                          (__vector unsigned short)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_eq_cc(__vector unsigned short __a, __vector unsigned short __b,
+                   int *__cc) {
+  return (__vector __bool short)__builtin_s390_vfaehs(__a, __b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_eq_cc(__vector signed int __a, __vector signed int __b,
+                   int *__cc) {
+  return (__vector __bool int)
+    __builtin_s390_vfaefs((__vector unsigned int)__a,
+                          (__vector unsigned int)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_eq_cc(__vector __bool int __a, __vector __bool int __b,
+                   int *__cc) {
+  return (__vector __bool int)
+    __builtin_s390_vfaefs((__vector unsigned int)__a,
+                          (__vector unsigned int)__b, 4, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_eq_cc(__vector unsigned int __a, __vector unsigned int __b,
+                   int *__cc) {
+  return (__vector __bool int)__builtin_s390_vfaefs(__a, __b, 4, __cc);
+}
+
+/*-- vec_find_any_eq_idx ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_eq_idx(__vector signed char __a, __vector signed char __b) {
+  return (__vector signed char)
+    __builtin_s390_vfaeb((__vector unsigned char)__a,
+                         (__vector unsigned char)__b, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_idx(__vector __bool char __a, __vector __bool char __b) {
+  return __builtin_s390_vfaeb((__vector unsigned char)__a,
+                              (__vector unsigned char)__b, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_idx(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vfaeb(__a, __b, 0);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_eq_idx(__vector signed short __a, __vector signed short __b) {
+  return (__vector signed short)
+    __builtin_s390_vfaeh((__vector unsigned short)__a,
+                         (__vector unsigned short)__b, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_idx(__vector __bool short __a, __vector __bool short __b) {
+  return __builtin_s390_vfaeh((__vector unsigned short)__a,
+                              (__vector unsigned short)__b, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_idx(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vfaeh(__a, __b, 0);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_eq_idx(__vector signed int __a, __vector signed int __b) {
+  return (__vector signed int)
+    __builtin_s390_vfaef((__vector unsigned int)__a,
+                         (__vector unsigned int)__b, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_idx(__vector __bool int __a, __vector __bool int __b) {
+  return __builtin_s390_vfaef((__vector unsigned int)__a,
+                              (__vector unsigned int)__b, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_idx(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vfaef(__a, __b, 0);
+}
+
+/*-- vec_find_any_eq_idx_cc -------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_eq_idx_cc(__vector signed char __a,
+                       __vector signed char __b, int *__cc) {
+  return (__vector signed char)
+    __builtin_s390_vfaebs((__vector unsigned char)__a,
+                          (__vector unsigned char)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_idx_cc(__vector __bool char __a,
+                       __vector __bool char __b, int *__cc) {
+  return __builtin_s390_vfaebs((__vector unsigned char)__a,
+                               (__vector unsigned char)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_idx_cc(__vector unsigned char __a,
+                       __vector unsigned char __b, int *__cc) {
+  return __builtin_s390_vfaebs(__a, __b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_eq_idx_cc(__vector signed short __a,
+                       __vector signed short __b, int *__cc) {
+  return (__vector signed short)
+    __builtin_s390_vfaehs((__vector unsigned short)__a,
+                          (__vector unsigned short)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_idx_cc(__vector __bool short __a,
+                       __vector __bool short __b, int *__cc) {
+  return __builtin_s390_vfaehs((__vector unsigned short)__a,
+                               (__vector unsigned short)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_idx_cc(__vector unsigned short __a,
+                       __vector unsigned short __b, int *__cc) {
+  return __builtin_s390_vfaehs(__a, __b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_eq_idx_cc(__vector signed int __a,
+                       __vector signed int __b, int *__cc) {
+  return (__vector signed int)
+    __builtin_s390_vfaefs((__vector unsigned int)__a,
+                          (__vector unsigned int)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_idx_cc(__vector __bool int __a,
+                       __vector __bool int __b, int *__cc) {
+  return __builtin_s390_vfaefs((__vector unsigned int)__a,
+                               (__vector unsigned int)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_idx_cc(__vector unsigned int __a,
+                       __vector unsigned int __b, int *__cc) {
+  return __builtin_s390_vfaefs(__a, __b, 0, __cc);
+}
+
+/*-- vec_find_any_eq_or_0_idx -----------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_eq_or_0_idx(__vector signed char __a,
+                         __vector signed char __b) {
+  return (__vector signed char)
+    __builtin_s390_vfaezb((__vector unsigned char)__a,
+                          (__vector unsigned char)__b, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_or_0_idx(__vector __bool char __a,
+                         __vector __bool char __b) {
+  return __builtin_s390_vfaezb((__vector unsigned char)__a,
+                               (__vector unsigned char)__b, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_or_0_idx(__vector unsigned char __a,
+                         __vector unsigned char __b) {
+  return __builtin_s390_vfaezb(__a, __b, 0);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_eq_or_0_idx(__vector signed short __a,
+                         __vector signed short __b) {
+  return (__vector signed short)
+    __builtin_s390_vfaezh((__vector unsigned short)__a,
+                          (__vector unsigned short)__b, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_or_0_idx(__vector __bool short __a,
+                         __vector __bool short __b) {
+  return __builtin_s390_vfaezh((__vector unsigned short)__a,
+                               (__vector unsigned short)__b, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_or_0_idx(__vector unsigned short __a,
+                         __vector unsigned short __b) {
+  return __builtin_s390_vfaezh(__a, __b, 0);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_eq_or_0_idx(__vector signed int __a,
+                         __vector signed int __b) {
+  return (__vector signed int)
+    __builtin_s390_vfaezf((__vector unsigned int)__a,
+                          (__vector unsigned int)__b, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_or_0_idx(__vector __bool int __a,
+                         __vector __bool int __b) {
+  return __builtin_s390_vfaezf((__vector unsigned int)__a,
+                               (__vector unsigned int)__b, 0);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_or_0_idx(__vector unsigned int __a,
+                         __vector unsigned int __b) {
+  return __builtin_s390_vfaezf(__a, __b, 0);
+}
+
+/*-- vec_find_any_eq_or_0_idx_cc --------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_eq_or_0_idx_cc(__vector signed char __a,
+                            __vector signed char __b, int *__cc) {
+  return (__vector signed char)
+    __builtin_s390_vfaezbs((__vector unsigned char)__a,
+                           (__vector unsigned char)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_or_0_idx_cc(__vector __bool char __a,
+                            __vector __bool char __b, int *__cc) {
+  return __builtin_s390_vfaezbs((__vector unsigned char)__a,
+                                (__vector unsigned char)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_eq_or_0_idx_cc(__vector unsigned char __a,
+                            __vector unsigned char __b, int *__cc) {
+  return __builtin_s390_vfaezbs(__a, __b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_eq_or_0_idx_cc(__vector signed short __a,
+                            __vector signed short __b, int *__cc) {
+  return (__vector signed short)
+    __builtin_s390_vfaezhs((__vector unsigned short)__a,
+                           (__vector unsigned short)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_or_0_idx_cc(__vector __bool short __a,
+                            __vector __bool short __b, int *__cc) {
+  return __builtin_s390_vfaezhs((__vector unsigned short)__a,
+                                (__vector unsigned short)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_eq_or_0_idx_cc(__vector unsigned short __a,
+                            __vector unsigned short __b, int *__cc) {
+  return __builtin_s390_vfaezhs(__a, __b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_eq_or_0_idx_cc(__vector signed int __a,
+                            __vector signed int __b, int *__cc) {
+  return (__vector signed int)
+    __builtin_s390_vfaezfs((__vector unsigned int)__a,
+                           (__vector unsigned int)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_or_0_idx_cc(__vector __bool int __a,
+                            __vector __bool int __b, int *__cc) {
+  return __builtin_s390_vfaezfs((__vector unsigned int)__a,
+                                (__vector unsigned int)__b, 0, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_eq_or_0_idx_cc(__vector unsigned int __a,
+                            __vector unsigned int __b, int *__cc) {
+  return __builtin_s390_vfaezfs(__a, __b, 0, __cc);
+}
+
+/*-- vec_find_any_ne --------------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_ne(__vector signed char __a, __vector signed char __b) {
+  return (__vector __bool char)
+    __builtin_s390_vfaeb((__vector unsigned char)__a,
+                         (__vector unsigned char)__b, 12);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_ne(__vector __bool char __a, __vector __bool char __b) {
+  return (__vector __bool char)
+    __builtin_s390_vfaeb((__vector unsigned char)__a,
+                         (__vector unsigned char)__b, 12);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_ne(__vector unsigned char __a, __vector unsigned char __b) {
+  return (__vector __bool char)__builtin_s390_vfaeb(__a, __b, 12);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_ne(__vector signed short __a, __vector signed short __b) {
+  return (__vector __bool short)
+    __builtin_s390_vfaeh((__vector unsigned short)__a,
+                         (__vector unsigned short)__b, 12);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_ne(__vector __bool short __a, __vector __bool short __b) {
+  return (__vector __bool short)
+    __builtin_s390_vfaeh((__vector unsigned short)__a,
+                         (__vector unsigned short)__b, 12);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_ne(__vector unsigned short __a, __vector unsigned short __b) {
+  return (__vector __bool short)__builtin_s390_vfaeh(__a, __b, 12);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_ne(__vector signed int __a, __vector signed int __b) {
+  return (__vector __bool int)
+    __builtin_s390_vfaef((__vector unsigned int)__a,
+                         (__vector unsigned int)__b, 12);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_ne(__vector __bool int __a, __vector __bool int __b) {
+  return (__vector __bool int)
+    __builtin_s390_vfaef((__vector unsigned int)__a,
+                         (__vector unsigned int)__b, 12);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_ne(__vector unsigned int __a, __vector unsigned int __b) {
+  return (__vector __bool int)__builtin_s390_vfaef(__a, __b, 12);
+}
+
+/*-- vec_find_any_ne_cc -----------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_ne_cc(__vector signed char __a,
+                   __vector signed char __b, int *__cc) {
+  return (__vector __bool char)
+    __builtin_s390_vfaebs((__vector unsigned char)__a,
+                          (__vector unsigned char)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_ne_cc(__vector __bool char __a,
+                   __vector __bool char __b, int *__cc) {
+  return (__vector __bool char)
+    __builtin_s390_vfaebs((__vector unsigned char)__a,
+                          (__vector unsigned char)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool char
+vec_find_any_ne_cc(__vector unsigned char __a,
+                   __vector unsigned char __b, int *__cc) {
+  return (__vector __bool char)__builtin_s390_vfaebs(__a, __b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_ne_cc(__vector signed short __a,
+                   __vector signed short __b, int *__cc) {
+  return (__vector __bool short)
+    __builtin_s390_vfaehs((__vector unsigned short)__a,
+                          (__vector unsigned short)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_ne_cc(__vector __bool short __a,
+                   __vector __bool short __b, int *__cc) {
+  return (__vector __bool short)
+    __builtin_s390_vfaehs((__vector unsigned short)__a,
+                          (__vector unsigned short)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool short
+vec_find_any_ne_cc(__vector unsigned short __a,
+                   __vector unsigned short __b, int *__cc) {
+  return (__vector __bool short)__builtin_s390_vfaehs(__a, __b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_ne_cc(__vector signed int __a,
+                   __vector signed int __b, int *__cc) {
+  return (__vector __bool int)
+    __builtin_s390_vfaefs((__vector unsigned int)__a,
+                          (__vector unsigned int)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_ne_cc(__vector __bool int __a,
+                   __vector __bool int __b, int *__cc) {
+  return (__vector __bool int)
+    __builtin_s390_vfaefs((__vector unsigned int)__a,
+                          (__vector unsigned int)__b, 12, __cc);
+}
+
+static inline __ATTRS_o_ai __vector __bool int
+vec_find_any_ne_cc(__vector unsigned int __a,
+                   __vector unsigned int __b, int *__cc) {
+  return (__vector __bool int)__builtin_s390_vfaefs(__a, __b, 12, __cc);
+}
+
+/*-- vec_find_any_ne_idx ----------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_ne_idx(__vector signed char __a, __vector signed char __b) {
+  return (__vector signed char)
+    __builtin_s390_vfaeb((__vector unsigned char)__a,
+                         (__vector unsigned char)__b, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_idx(__vector __bool char __a, __vector __bool char __b) {
+  return __builtin_s390_vfaeb((__vector unsigned char)__a,
+                              (__vector unsigned char)__b, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_idx(__vector unsigned char __a, __vector unsigned char __b) {
+  return __builtin_s390_vfaeb(__a, __b, 8);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_ne_idx(__vector signed short __a, __vector signed short __b) {
+  return (__vector signed short)
+    __builtin_s390_vfaeh((__vector unsigned short)__a,
+                         (__vector unsigned short)__b, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_idx(__vector __bool short __a, __vector __bool short __b) {
+  return __builtin_s390_vfaeh((__vector unsigned short)__a,
+                              (__vector unsigned short)__b, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_idx(__vector unsigned short __a, __vector unsigned short __b) {
+  return __builtin_s390_vfaeh(__a, __b, 8);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_ne_idx(__vector signed int __a, __vector signed int __b) {
+  return (__vector signed int)
+    __builtin_s390_vfaef((__vector unsigned int)__a,
+                         (__vector unsigned int)__b, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_idx(__vector __bool int __a, __vector __bool int __b) {
+  return __builtin_s390_vfaef((__vector unsigned int)__a,
+                              (__vector unsigned int)__b, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_idx(__vector unsigned int __a, __vector unsigned int __b) {
+  return __builtin_s390_vfaef(__a, __b, 8);
+}
+
+/*-- vec_find_any_ne_idx_cc -------------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_ne_idx_cc(__vector signed char __a,
+                       __vector signed char __b, int *__cc) {
+  return (__vector signed char)
+    __builtin_s390_vfaebs((__vector unsigned char)__a,
+                          (__vector unsigned char)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_idx_cc(__vector __bool char __a,
+                       __vector __bool char __b, int *__cc) {
+  return __builtin_s390_vfaebs((__vector unsigned char)__a,
+                               (__vector unsigned char)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_idx_cc(__vector unsigned char __a,
+                       __vector unsigned char __b,
+                       int *__cc) {
+  return __builtin_s390_vfaebs(__a, __b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_ne_idx_cc(__vector signed short __a,
+                       __vector signed short __b, int *__cc) {
+  return (__vector signed short)
+    __builtin_s390_vfaehs((__vector unsigned short)__a,
+                          (__vector unsigned short)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_idx_cc(__vector __bool short __a,
+                       __vector __bool short __b, int *__cc) {
+  return __builtin_s390_vfaehs((__vector unsigned short)__a,
+                               (__vector unsigned short)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_idx_cc(__vector unsigned short __a,
+                       __vector unsigned short __b, int *__cc) {
+  return __builtin_s390_vfaehs(__a, __b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_ne_idx_cc(__vector signed int __a,
+                       __vector signed int __b, int *__cc) {
+  return (__vector signed int)
+    __builtin_s390_vfaefs((__vector unsigned int)__a,
+                          (__vector unsigned int)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_idx_cc(__vector __bool int __a,
+                       __vector __bool int __b, int *__cc) {
+  return __builtin_s390_vfaefs((__vector unsigned int)__a,
+                               (__vector unsigned int)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_idx_cc(__vector unsigned int __a,
+                       __vector unsigned int __b, int *__cc) {
+  return __builtin_s390_vfaefs(__a, __b, 8, __cc);
+}
+
+/*-- vec_find_any_ne_or_0_idx -----------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_ne_or_0_idx(__vector signed char __a,
+                         __vector signed char __b) {
+  return (__vector signed char)
+    __builtin_s390_vfaezb((__vector unsigned char)__a,
+                          (__vector unsigned char)__b, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_or_0_idx(__vector __bool char __a,
+                         __vector __bool char __b) {
+  return __builtin_s390_vfaezb((__vector unsigned char)__a,
+                               (__vector unsigned char)__b, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_or_0_idx(__vector unsigned char __a,
+                         __vector unsigned char __b) {
+  return __builtin_s390_vfaezb(__a, __b, 8);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_ne_or_0_idx(__vector signed short __a,
+                         __vector signed short __b) {
+  return (__vector signed short)
+    __builtin_s390_vfaezh((__vector unsigned short)__a,
+                          (__vector unsigned short)__b, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_or_0_idx(__vector __bool short __a,
+                         __vector __bool short __b) {
+  return __builtin_s390_vfaezh((__vector unsigned short)__a,
+                               (__vector unsigned short)__b, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_or_0_idx(__vector unsigned short __a,
+                         __vector unsigned short __b) {
+  return __builtin_s390_vfaezh(__a, __b, 8);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_ne_or_0_idx(__vector signed int __a,
+                         __vector signed int __b) {
+  return (__vector signed int)
+    __builtin_s390_vfaezf((__vector unsigned int)__a,
+                          (__vector unsigned int)__b, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_or_0_idx(__vector __bool int __a,
+                         __vector __bool int __b) {
+  return __builtin_s390_vfaezf((__vector unsigned int)__a,
+                               (__vector unsigned int)__b, 8);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_or_0_idx(__vector unsigned int __a,
+                         __vector unsigned int __b) {
+  return __builtin_s390_vfaezf(__a, __b, 8);
+}
+
+/*-- vec_find_any_ne_or_0_idx_cc --------------------------------------------*/
+
+static inline __ATTRS_o_ai __vector signed char
+vec_find_any_ne_or_0_idx_cc(__vector signed char __a,
+                            __vector signed char __b, int *__cc) {
+  return (__vector signed char)
+    __builtin_s390_vfaezbs((__vector unsigned char)__a,
+                           (__vector unsigned char)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_or_0_idx_cc(__vector __bool char __a,
+                            __vector __bool char __b, int *__cc) {
+  return __builtin_s390_vfaezbs((__vector unsigned char)__a,
+                                (__vector unsigned char)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_find_any_ne_or_0_idx_cc(__vector unsigned char __a,
+                            __vector unsigned char __b, int *__cc) {
+  return __builtin_s390_vfaezbs(__a, __b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed short
+vec_find_any_ne_or_0_idx_cc(__vector signed short __a,
+                            __vector signed short __b, int *__cc) {
+  return (__vector signed short)
+    __builtin_s390_vfaezhs((__vector unsigned short)__a,
+                           (__vector unsigned short)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_or_0_idx_cc(__vector __bool short __a,
+                            __vector __bool short __b, int *__cc) {
+  return __builtin_s390_vfaezhs((__vector unsigned short)__a,
+                                (__vector unsigned short)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned short
+vec_find_any_ne_or_0_idx_cc(__vector unsigned short __a,
+                            __vector unsigned short __b, int *__cc) {
+  return __builtin_s390_vfaezhs(__a, __b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector signed int
+vec_find_any_ne_or_0_idx_cc(__vector signed int __a,
+                            __vector signed int __b, int *__cc) {
+  return (__vector signed int)
+    __builtin_s390_vfaezfs((__vector unsigned int)__a,
+                           (__vector unsigned int)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_or_0_idx_cc(__vector __bool int __a,
+                            __vector __bool int __b, int *__cc) {
+  return __builtin_s390_vfaezfs((__vector unsigned int)__a,
+                                (__vector unsigned int)__b, 8, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned int
+vec_find_any_ne_or_0_idx_cc(__vector unsigned int __a,
+                            __vector unsigned int __b, int *__cc) {
+  return __builtin_s390_vfaezfs(__a, __b, 8, __cc);
+}
+
+/*-- vec_search_string_cc ---------------------------------------------------*/
+
+#if __ARCH__ >= 13
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector signed char __a, __vector signed char __b,
+                     __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrsb((__vector unsigned char)__a,
+                               (__vector unsigned char)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector __bool char __a, __vector __bool char __b,
+                     __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrsb((__vector unsigned char)__a,
+                               (__vector unsigned char)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector unsigned char __a, __vector unsigned char __b,
+                     __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrsb(__a, __b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector signed short __a, __vector signed short __b,
+                     __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrsh((__vector unsigned short)__a,
+                               (__vector unsigned short)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector __bool short __a, __vector __bool short __b,
+                     __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrsh((__vector unsigned short)__a,
+                               (__vector unsigned short)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector unsigned short __a, __vector unsigned short __b,
+                     __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrsh(__a, __b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector signed int __a, __vector signed int __b,
+                     __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrsf((__vector unsigned int)__a,
+                               (__vector unsigned int)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector __bool int __a, __vector __bool int __b,
+                     __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrsf((__vector unsigned int)__a,
+                               (__vector unsigned int)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_cc(__vector unsigned int __a, __vector unsigned int __b,
+                     __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrsf(__a, __b, __c, __cc);
+}
+
+#endif
+
+/*-- vec_search_string_until_zero_cc ----------------------------------------*/
+
+#if __ARCH__ >= 13
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector signed char __a,
+                                __vector signed char __b,
+                                __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrszb((__vector unsigned char)__a,
+                                (__vector unsigned char)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector __bool char __a,
+                                __vector __bool char __b,
+                                __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrszb((__vector unsigned char)__a,
+                                (__vector unsigned char)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector unsigned char __a,
+                                __vector unsigned char __b,
+                                __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrszb(__a, __b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector signed short __a,
+                                __vector signed short __b,
+                                __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrszh((__vector unsigned short)__a,
+                                (__vector unsigned short)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector __bool short __a,
+                                __vector __bool short __b,
+                                __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrszh((__vector unsigned short)__a,
+                                (__vector unsigned short)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector unsigned short __a,
+                                __vector unsigned short __b,
+                                __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrszh(__a, __b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector signed int __a,
+                                __vector signed int __b,
+                                __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrszf((__vector unsigned int)__a,
+                                (__vector unsigned int)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector __bool int __a,
+                                __vector __bool int __b,
+                                __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrszf((__vector unsigned int)__a,
+                                (__vector unsigned int)__b, __c, __cc);
+}
+
+static inline __ATTRS_o_ai __vector unsigned char
+vec_search_string_until_zero_cc(__vector unsigned int __a,
+                                __vector unsigned int __b,
+                                __vector unsigned char __c, int *__cc) {
+  return __builtin_s390_vstrszf(__a, __b, __c, __cc);
+}
+
+#endif
+
+#undef __constant_pow2_range
+#undef __constant_range
+#undef __constant
+#undef __ATTRS_o
+#undef __ATTRS_o_ai
+#undef __ATTRS_ai
+
+#else
+
+#error "Use -fzvector to enable vector extensions"
+
+#endif
diff --git a/linux-x86/lib64/clang/11.0.1/include/vpclmulqdqintrin.h b/linux-x86/lib64/clang/11.0.5/include/vpclmulqdqintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/vpclmulqdqintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/vpclmulqdqintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/waitpkgintrin.h b/linux-x86/lib64/clang/11.0.5/include/waitpkgintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/waitpkgintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/waitpkgintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/wasm_simd128.h b/linux-x86/lib64/clang/11.0.5/include/wasm_simd128.h
new file mode 100644
index 0000000..b781238
--- /dev/null
+++ b/linux-x86/lib64/clang/11.0.5/include/wasm_simd128.h
@@ -0,0 +1,1133 @@
+/*===---- wasm_simd128.h - WebAssembly portable SIMD intrinsics ------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __WASM_SIMD128_H
+#define __WASM_SIMD128_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+// User-facing type
+typedef int32_t v128_t __attribute__((__vector_size__(16), __aligned__(16)));
+
+// Internal types determined by clang builtin definitions
+typedef int32_t __v128_u __attribute__((__vector_size__(16), __aligned__(1)));
+typedef char __i8x16 __attribute__((__vector_size__(16), __aligned__(16)));
+typedef signed char __s8x16
+    __attribute__((__vector_size__(16), __aligned__(16)));
+typedef unsigned char __u8x16
+    __attribute__((__vector_size__(16), __aligned__(16)));
+typedef short __i16x8 __attribute__((__vector_size__(16), __aligned__(16)));
+typedef unsigned short __u16x8
+    __attribute__((__vector_size__(16), __aligned__(16)));
+typedef int __i32x4 __attribute__((__vector_size__(16), __aligned__(16)));
+typedef unsigned int __u32x4
+    __attribute__((__vector_size__(16), __aligned__(16)));
+typedef long long __i64x2 __attribute__((__vector_size__(16), __aligned__(16)));
+typedef unsigned long long __u64x2
+    __attribute__((__vector_size__(16), __aligned__(16)));
+typedef float __f32x4 __attribute__((__vector_size__(16), __aligned__(16)));
+typedef double __f64x2 __attribute__((__vector_size__(16), __aligned__(16)));
+
+#define __DEFAULT_FN_ATTRS                                                     \
+  __attribute__((__always_inline__, __nodebug__, __target__("simd128"),        \
+                 __min_vector_width__(128)))
+
+#define __REQUIRE_CONSTANT(e)                                                  \
+  _Static_assert(__builtin_constant_p(e), "Expected constant")
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load(const void *__mem) {
+  // UB-free unaligned access copied from xmmintrin.h
+  struct __wasm_v128_load_struct {
+    __v128_u __v;
+  } __attribute__((__packed__, __may_alias__));
+  return ((const struct __wasm_v128_load_struct *)__mem)->__v;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_v8x16_load_splat(const void *__mem) {
+  struct __wasm_v8x16_load_splat_struct {
+    uint8_t __v;
+  } __attribute__((__packed__, __may_alias__));
+  uint8_t __v = ((const struct __wasm_v8x16_load_splat_struct *)__mem)->__v;
+  return (v128_t)(__u8x16){__v, __v, __v, __v, __v, __v, __v, __v,
+                           __v, __v, __v, __v, __v, __v, __v, __v};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_v16x8_load_splat(const void *__mem) {
+  struct __wasm_v16x8_load_splat_struct {
+    uint16_t __v;
+  } __attribute__((__packed__, __may_alias__));
+  uint16_t __v = ((const struct __wasm_v16x8_load_splat_struct *)__mem)->__v;
+  return (v128_t)(__u16x8){__v, __v, __v, __v, __v, __v, __v, __v};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_v32x4_load_splat(const void *__mem) {
+  struct __wasm_v32x4_load_splat_struct {
+    uint32_t __v;
+  } __attribute__((__packed__, __may_alias__));
+  uint32_t __v = ((const struct __wasm_v32x4_load_splat_struct *)__mem)->__v;
+  return (v128_t)(__u32x4){__v, __v, __v, __v};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_v64x2_load_splat(const void *__mem) {
+  struct __wasm_v64x2_load_splat_struct {
+    uint64_t __v;
+  } __attribute__((__packed__, __may_alias__));
+  uint64_t __v = ((const struct __wasm_v64x2_load_splat_struct *)__mem)->__v;
+  return (v128_t)(__u64x2){__v, __v};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_load_8x8(const void *__mem) {
+  typedef int8_t __i8x8 __attribute__((__vector_size__(8), __aligned__(8)));
+  struct __wasm_i16x8_load_8x8_struct {
+    __i8x8 __v;
+  } __attribute__((__packed__, __may_alias__));
+  __i8x8 __v = ((const struct __wasm_i16x8_load_8x8_struct *)__mem)->__v;
+  return (v128_t) __builtin_convertvector(__v, __i16x8);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u16x8_load_8x8(const void *__mem) {
+  typedef uint8_t __u8x8 __attribute__((__vector_size__(8), __aligned__(8)));
+  struct __wasm_u16x8_load_8x8_struct {
+    __u8x8 __v;
+  } __attribute__((__packed__, __may_alias__));
+  __u8x8 __v = ((const struct __wasm_u16x8_load_8x8_struct *)__mem)->__v;
+  return (v128_t) __builtin_convertvector(__v, __u16x8);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_load_16x4(const void *__mem) {
+  typedef int16_t __i16x4 __attribute__((__vector_size__(8), __aligned__(8)));
+  struct __wasm_i32x4_load_16x4_struct {
+    __i16x4 __v;
+  } __attribute__((__packed__, __may_alias__));
+  __i16x4 __v = ((const struct __wasm_i32x4_load_16x4_struct *)__mem)->__v;
+  return (v128_t) __builtin_convertvector(__v, __i32x4);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u32x4_load_16x4(const void *__mem) {
+  typedef uint16_t __u16x4 __attribute__((__vector_size__(8), __aligned__(8)));
+  struct __wasm_u32x4_load_16x4_struct {
+    __u16x4 __v;
+  } __attribute__((__packed__, __may_alias__));
+  __u16x4 __v = ((const struct __wasm_u32x4_load_16x4_struct *)__mem)->__v;
+  return (v128_t) __builtin_convertvector(__v, __u32x4);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i64x2_load_32x2(const void *__mem) {
+  typedef int32_t __i32x2 __attribute__((__vector_size__(8), __aligned__(8)));
+  struct __wasm_i64x2_load_32x2_struct {
+    __i32x2 __v;
+  } __attribute__((__packed__, __may_alias__));
+  __i32x2 __v = ((const struct __wasm_i64x2_load_32x2_struct *)__mem)->__v;
+  return (v128_t) __builtin_convertvector(__v, __i64x2);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u64x2_load_32x2(const void *__mem) {
+  typedef uint32_t __u32x2 __attribute__((__vector_size__(8), __aligned__(8)));
+  struct __wasm_u64x2_load_32x2_struct {
+    __u32x2 __v;
+  } __attribute__((__packed__, __may_alias__));
+  __u32x2 __v = ((const struct __wasm_u64x2_load_32x2_struct *)__mem)->__v;
+  return (v128_t) __builtin_convertvector(__v, __u64x2);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store(void *__mem,
+                                                          v128_t __a) {
+  // UB-free unaligned access copied from xmmintrin.h
+  struct __wasm_v128_store_struct {
+    __v128_u __v;
+  } __attribute__((__packed__, __may_alias__));
+  ((struct __wasm_v128_store_struct *)__mem)->__v = __a;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i8x16_make(int8_t __c0, int8_t __c1, int8_t __c2, int8_t __c3, int8_t __c4,
+                int8_t __c5, int8_t __c6, int8_t __c7, int8_t __c8, int8_t __c9,
+                int8_t __c10, int8_t __c11, int8_t __c12, int8_t __c13,
+                int8_t __c14, int8_t __c15) {
+  return (v128_t)(__i8x16){__c0,  __c1,  __c2,  __c3, __c4,  __c5,
+                           __c6,  __c7,  __c8,  __c9, __c10, __c11,
+                           __c12, __c13, __c14, __c15};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_make(int16_t __c0, int16_t __c1, int16_t __c2, int16_t __c3,
+                int16_t __c4, int16_t __c5, int16_t __c6, int16_t __c7) {
+  return (v128_t)(__i16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_make(int32_t __c0,
+                                                            int32_t __c1,
+                                                            int32_t __c2,
+                                                            int32_t __c3) {
+  return (v128_t)(__i32x4){__c0, __c1, __c2, __c3};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_make(float __c0,
+                                                            float __c1,
+                                                            float __c2,
+                                                            float __c3) {
+  return (v128_t)(__f32x4){__c0, __c1, __c2, __c3};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_make(int64_t __c0,
+                                                            int64_t __c1) {
+  return (v128_t)(__i64x2){__c0, __c1};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_make(double __c0,
+                                                            double __c1) {
+  return (v128_t)(__f64x2){__c0, __c1};
+}
+
+#define wasm_i8x16_const(__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7, __c8, \
+                         __c9, __c10, __c11, __c12, __c13, __c14, __c15)       \
+  __extension__({                                                              \
+    __REQUIRE_CONSTANT(__c0);                                                  \
+    __REQUIRE_CONSTANT(__c1);                                                  \
+    __REQUIRE_CONSTANT(__c2);                                                  \
+    __REQUIRE_CONSTANT(__c3);                                                  \
+    __REQUIRE_CONSTANT(__c4);                                                  \
+    __REQUIRE_CONSTANT(__c5);                                                  \
+    __REQUIRE_CONSTANT(__c6);                                                  \
+    __REQUIRE_CONSTANT(__c7);                                                  \
+    __REQUIRE_CONSTANT(__c8);                                                  \
+    __REQUIRE_CONSTANT(__c9);                                                  \
+    __REQUIRE_CONSTANT(__c10);                                                 \
+    __REQUIRE_CONSTANT(__c11);                                                 \
+    __REQUIRE_CONSTANT(__c12);                                                 \
+    __REQUIRE_CONSTANT(__c13);                                                 \
+    __REQUIRE_CONSTANT(__c14);                                                 \
+    __REQUIRE_CONSTANT(__c15);                                                 \
+    (v128_t)(__i8x16){__c0, __c1, __c2,  __c3,  __c4,  __c5,  __c6,  __c7,     \
+                      __c8, __c9, __c10, __c11, __c12, __c13, __c14, __c15};   \
+  })
+
+#define wasm_i16x8_const(__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7)       \
+  __extension__({                                                              \
+    __REQUIRE_CONSTANT(__c0);                                                  \
+    __REQUIRE_CONSTANT(__c1);                                                  \
+    __REQUIRE_CONSTANT(__c2);                                                  \
+    __REQUIRE_CONSTANT(__c3);                                                  \
+    __REQUIRE_CONSTANT(__c4);                                                  \
+    __REQUIRE_CONSTANT(__c5);                                                  \
+    __REQUIRE_CONSTANT(__c6);                                                  \
+    __REQUIRE_CONSTANT(__c7);                                                  \
+    (v128_t)(__i16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7};         \
+  })
+
+#define wasm_i32x4_const(__c0, __c1, __c2, __c3)                               \
+  __extension__({                                                              \
+    __REQUIRE_CONSTANT(__c0);                                                  \
+    __REQUIRE_CONSTANT(__c1);                                                  \
+    __REQUIRE_CONSTANT(__c2);                                                  \
+    __REQUIRE_CONSTANT(__c3);                                                  \
+    (v128_t)(__i32x4){__c0, __c1, __c2, __c3};                                 \
+  })
+
+#define wasm_f32x4_const(__c0, __c1, __c2, __c3)                               \
+  __extension__({                                                              \
+    __REQUIRE_CONSTANT(__c0);                                                  \
+    __REQUIRE_CONSTANT(__c1);                                                  \
+    __REQUIRE_CONSTANT(__c2);                                                  \
+    __REQUIRE_CONSTANT(__c3);                                                  \
+    (v128_t)(__f32x4){__c0, __c1, __c2, __c3};                                 \
+  })
+
+#define wasm_i64x2_const(__c0, __c1)                                           \
+  __extension__({                                                              \
+    __REQUIRE_CONSTANT(__c0);                                                  \
+    __REQUIRE_CONSTANT(__c1);                                                  \
+    (v128_t)(__i64x2){__c0, __c1};                                             \
+  })
+
+#define wasm_f64x2_const(__c0, __c1)                                           \
+  __extension__({                                                              \
+    __REQUIRE_CONSTANT(__c0);                                                  \
+    __REQUIRE_CONSTANT(__c1);                                                  \
+    (v128_t)(__f64x2){__c0, __c1};                                             \
+  })
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_splat(int8_t __a) {
+  return (v128_t)(__i8x16){__a, __a, __a, __a, __a, __a, __a, __a,
+                           __a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+#define wasm_i8x16_extract_lane(__a, __i)                                      \
+  (__builtin_wasm_extract_lane_s_i8x16((__i8x16)(__a), __i))
+
+#define wasm_u8x16_extract_lane(__a, __i)                                      \
+  (__builtin_wasm_extract_lane_u_i8x16((__i8x16)(__a), __i))
+
+#define wasm_i8x16_replace_lane(__a, __i, __b)                                 \
+  ((v128_t)__builtin_wasm_replace_lane_i8x16((__i8x16)(__a), __i, __b))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_splat(int16_t __a) {
+  return (v128_t)(__i16x8){__a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+#define wasm_i16x8_extract_lane(__a, __i)                                      \
+  (__builtin_wasm_extract_lane_s_i16x8((__i16x8)(__a), __i))
+
+#define wasm_u16x8_extract_lane(__a, __i)                                      \
+  (__builtin_wasm_extract_lane_u_i16x8((__i16x8)(__a), __i))
+
+#define wasm_i16x8_replace_lane(__a, __i, __b)                                 \
+  ((v128_t)__builtin_wasm_replace_lane_i16x8((__i16x8)(__a), __i, __b))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_splat(int32_t __a) {
+  return (v128_t)(__i32x4){__a, __a, __a, __a};
+}
+
+#define wasm_i32x4_extract_lane(__a, __i)                                      \
+  (__builtin_wasm_extract_lane_i32x4((__i32x4)(__a), __i))
+
+#define wasm_i32x4_replace_lane(__a, __i, __b)                                 \
+  ((v128_t)__builtin_wasm_replace_lane_i32x4((__i32x4)(__a), __i, __b))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_splat(int64_t __a) {
+  return (v128_t)(__i64x2){__a, __a};
+}
+
+#define wasm_i64x2_extract_lane(__a, __i)                                      \
+  (__builtin_wasm_extract_lane_i64x2((__i64x2)(__a), __i))
+
+#define wasm_i64x2_replace_lane(__a, __i, __b)                                 \
+  ((v128_t)__builtin_wasm_replace_lane_i64x2((__i64x2)(__a), __i, __b))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_splat(float __a) {
+  return (v128_t)(__f32x4){__a, __a, __a, __a};
+}
+
+#define wasm_f32x4_extract_lane(__a, __i)                                      \
+  (__builtin_wasm_extract_lane_f32x4((__f32x4)(__a), __i))
+
+#define wasm_f32x4_replace_lane(__a, __i, __b)                                 \
+  ((v128_t)__builtin_wasm_replace_lane_f32x4((__f32x4)(__a), __i, __b))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_splat(double __a) {
+  return (v128_t)(__f64x2){__a, __a};
+}
+
+#define wasm_f64x2_extract_lane(__a, __i)                                      \
+  (__builtin_wasm_extract_lane_f64x2((__f64x2)(__a), __i))
+
+#define wasm_f64x2_replace_lane(__a, __i, __b)                                 \
+  ((v128_t)__builtin_wasm_replace_lane_f64x2((__f64x2)(__a), __i, __b))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_eq(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__s8x16)__a == (__s8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_ne(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__s8x16)__a != (__s8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_lt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__s8x16)__a < (__s8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_lt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u8x16)__a < (__u8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_gt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__s8x16)__a > (__s8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_gt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u8x16)__a > (__u8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_le(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__s8x16)__a <= (__s8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_le(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u8x16)__a <= (__u8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_ge(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__s8x16)__a >= (__s8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_ge(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u8x16)__a >= (__u8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_eq(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__i16x8)__a == (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_ne(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u16x8)__a != (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_lt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__i16x8)__a < (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_lt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u16x8)__a < (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_gt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__i16x8)__a > (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_gt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u16x8)__a > (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_le(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__i16x8)__a <= (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_le(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u16x8)__a <= (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_ge(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__i16x8)__a >= (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_ge(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u16x8)__a >= (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_eq(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__i32x4)__a == (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_ne(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__i32x4)__a != (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_lt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__i32x4)__a < (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_lt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u32x4)__a < (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_gt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__i32x4)__a > (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_gt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u32x4)__a > (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_le(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__i32x4)__a <= (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_le(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u32x4)__a <= (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_ge(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__i32x4)__a >= (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_ge(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__u32x4)__a >= (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_eq(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__f32x4)__a == (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_ne(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__f32x4)__a != (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_lt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__f32x4)__a < (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_gt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__f32x4)__a > (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_le(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__f32x4)__a <= (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_ge(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__f32x4)__a >= (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_eq(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__f64x2)__a == (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_ne(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__f64x2)__a != (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_lt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__f64x2)__a < (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_gt(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__f64x2)__a > (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_le(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__f64x2)__a <= (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_ge(v128_t __a,
+                                                          v128_t __b) {
+  return (v128_t)((__f64x2)__a >= (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_not(v128_t __a) {
+  return ~__a;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_and(v128_t __a,
+                                                          v128_t __b) {
+  return __a & __b;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_or(v128_t __a,
+                                                         v128_t __b) {
+  return __a | __b;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_xor(v128_t __a,
+                                                          v128_t __b) {
+  return __a ^ __b;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_andnot(v128_t __a,
+                                                             v128_t __b) {
+  return __a & ~__b;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_bitselect(v128_t __a,
+                                                                v128_t __b,
+                                                                v128_t __mask) {
+  return (v128_t)__builtin_wasm_bitselect((__i32x4)__a, (__i32x4)__b,
+                                          (__i32x4)__mask);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_abs(v128_t __a) {
+  return (v128_t)__builtin_wasm_abs_i8x16((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_neg(v128_t __a) {
+  return (v128_t)(-(__u8x16)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i8x16_any_true(v128_t __a) {
+  return __builtin_wasm_any_true_i8x16((__i8x16)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i8x16_all_true(v128_t __a) {
+  return __builtin_wasm_all_true_i8x16((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shl(v128_t __a,
+                                                           int32_t __b) {
+  return (v128_t)((__i8x16)__a << __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shr(v128_t __a,
+                                                           int32_t __b) {
+  return (v128_t)((__s8x16)__a >> __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_shr(v128_t __a,
+                                                           int32_t __b) {
+  return (v128_t)((__u8x16)__a >> __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_add(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__u8x16)__a + (__u8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i8x16_add_saturate(v128_t __a, v128_t __b) {
+  return (v128_t)__builtin_wasm_add_saturate_s_i8x16((__i8x16)__a,
+                                                     (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u8x16_add_saturate(v128_t __a, v128_t __b) {
+  return (v128_t)__builtin_wasm_add_saturate_u_i8x16((__i8x16)__a,
+                                                     (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_sub(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__u8x16)__a - (__u8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i8x16_sub_saturate(v128_t __a, v128_t __b) {
+  return (v128_t)__builtin_wasm_sub_saturate_s_i8x16((__i8x16)__a,
+                                                     (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u8x16_sub_saturate(v128_t __a, v128_t __b) {
+  return (v128_t)__builtin_wasm_sub_saturate_u_i8x16((__i8x16)__a,
+                                                     (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_min(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_min_s_i8x16((__i8x16)__a, (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_min(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_min_u_i8x16((__i8x16)__a, (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_max(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_max_s_i8x16((__i8x16)__a, (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_max(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_max_u_i8x16((__i8x16)__a, (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_avgr(v128_t __a,
+                                                            v128_t __b) {
+  return (v128_t)__builtin_wasm_avgr_u_i8x16((__i8x16)__a, (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_abs(v128_t __a) {
+  return (v128_t)__builtin_wasm_abs_i16x8((__i16x8)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_neg(v128_t __a) {
+  return (v128_t)(-(__u16x8)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i16x8_any_true(v128_t __a) {
+  return __builtin_wasm_any_true_i16x8((__i16x8)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i16x8_all_true(v128_t __a) {
+  return __builtin_wasm_all_true_i16x8((__i16x8)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shl(v128_t __a,
+                                                           int32_t __b) {
+  return (v128_t)((__i16x8)__a << __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shr(v128_t __a,
+                                                           int32_t __b) {
+  return (v128_t)((__i16x8)__a >> __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_shr(v128_t __a,
+                                                           int32_t __b) {
+  return (v128_t)((__u16x8)__a >> __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_add(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__u16x8)__a + (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_add_saturate(v128_t __a, v128_t __b) {
+  return (v128_t)__builtin_wasm_add_saturate_s_i16x8((__i16x8)__a,
+                                                     (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u16x8_add_saturate(v128_t __a, v128_t __b) {
+  return (v128_t)__builtin_wasm_add_saturate_u_i16x8((__i16x8)__a,
+                                                     (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_sub(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__i16x8)__a - (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_sub_saturate(v128_t __a, v128_t __b) {
+  return (v128_t)__builtin_wasm_sub_saturate_s_i16x8((__i16x8)__a,
+                                                     (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u16x8_sub_saturate(v128_t __a, v128_t __b) {
+  return (v128_t)__builtin_wasm_sub_saturate_u_i16x8((__i16x8)__a,
+                                                     (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_mul(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__u16x8)__a * (__u16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_min(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_min_s_i16x8((__i16x8)__a, (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_min(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_min_u_i16x8((__i16x8)__a, (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_max(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_max_s_i16x8((__i16x8)__a, (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_max(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_max_u_i16x8((__i16x8)__a, (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_avgr(v128_t __a,
+                                                            v128_t __b) {
+  return (v128_t)__builtin_wasm_avgr_u_i16x8((__i16x8)__a, (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_abs(v128_t __a) {
+  return (v128_t)__builtin_wasm_abs_i32x4((__i32x4)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_neg(v128_t __a) {
+  return (v128_t)(-(__u32x4)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i32x4_any_true(v128_t __a) {
+  return __builtin_wasm_any_true_i32x4((__i32x4)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i32x4_all_true(v128_t __a) {
+  return __builtin_wasm_all_true_i32x4((__i32x4)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shl(v128_t __a,
+                                                           int32_t __b) {
+  return (v128_t)((__i32x4)__a << __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shr(v128_t __a,
+                                                           int32_t __b) {
+  return (v128_t)((__i32x4)__a >> __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_shr(v128_t __a,
+                                                           int32_t __b) {
+  return (v128_t)((__u32x4)__a >> __b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_add(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__u32x4)__a + (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_sub(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__u32x4)__a - (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_mul(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__u32x4)__a * (__u32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_min(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_min_s_i32x4((__i32x4)__a, (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_min(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_min_u_i32x4((__i32x4)__a, (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_max(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_max_s_i32x4((__i32x4)__a, (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_max(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_max_u_i32x4((__i32x4)__a, (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_neg(v128_t __a) {
+  return (v128_t)(-(__u64x2)__a);
+}
+
+#ifdef __wasm_unimplemented_simd128__
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i64x2_any_true(v128_t __a) {
+  return __builtin_wasm_any_true_i64x2((__i64x2)__a);
+}
+
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_i64x2_all_true(v128_t __a) {
+  return __builtin_wasm_all_true_i64x2((__i64x2)__a);
+}
+
+#endif // __wasm_unimplemented_simd128__
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shl(v128_t __a,
+                                                           int32_t __b) {
+  return (v128_t)((__i64x2)__a << (int64_t)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shr(v128_t __a,
+                                                           int32_t __b) {
+  return (v128_t)((__i64x2)__a >> (int64_t)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_shr(v128_t __a,
+                                                           int32_t __b) {
+  return (v128_t)((__u64x2)__a >> (int64_t)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_add(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__u64x2)__a + (__u64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_sub(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__u64x2)__a - (__u64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_mul(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__u64x2)__a * (__u64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_abs(v128_t __a) {
+  return (v128_t)__builtin_wasm_abs_f32x4((__f32x4)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_neg(v128_t __a) {
+  return (v128_t)(-(__f32x4)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_sqrt(v128_t __a) {
+  return (v128_t)__builtin_wasm_sqrt_f32x4((__f32x4)__a);
+}
+
+#ifdef __wasm_unimplemented_simd128__
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_qfma(v128_t __a,
+                                                            v128_t __b,
+                                                            v128_t __c) {
+  return (v128_t)__builtin_wasm_qfma_f32x4((__f32x4)__a, (__f32x4)__b,
+                                           (__f32x4)__c);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_qfms(v128_t __a,
+                                                            v128_t __b,
+                                                            v128_t __c) {
+  return (v128_t)__builtin_wasm_qfms_f32x4((__f32x4)__a, (__f32x4)__b,
+                                           (__f32x4)__c);
+}
+
+#endif // __wasm_unimplemented_simd128__
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_add(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__f32x4)__a + (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_sub(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__f32x4)__a - (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_mul(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__f32x4)__a * (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_div(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__f32x4)__a / (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_min(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_min_f32x4((__f32x4)__a, (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_max(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_max_f32x4((__f32x4)__a, (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_pmin(v128_t __a,
+                                                            v128_t __b) {
+  return (v128_t)__builtin_wasm_pmin_f32x4((__f32x4)__a, (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_pmax(v128_t __a,
+                                                            v128_t __b) {
+  return (v128_t)__builtin_wasm_pmax_f32x4((__f32x4)__a, (__f32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_abs(v128_t __a) {
+  return (v128_t)__builtin_wasm_abs_f64x2((__f64x2)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_neg(v128_t __a) {
+  return (v128_t)(-(__f64x2)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_sqrt(v128_t __a) {
+  return (v128_t)__builtin_wasm_sqrt_f64x2((__f64x2)__a);
+}
+
+#ifdef __wasm_unimplemented_simd128__
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_qfma(v128_t __a,
+                                                            v128_t __b,
+                                                            v128_t __c) {
+  return (v128_t)__builtin_wasm_qfma_f64x2((__f64x2)__a, (__f64x2)__b,
+                                           (__f64x2)__c);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_qfms(v128_t __a,
+                                                            v128_t __b,
+                                                            v128_t __c) {
+  return (v128_t)__builtin_wasm_qfms_f64x2((__f64x2)__a, (__f64x2)__b,
+                                           (__f64x2)__c);
+}
+
+#endif // __wasm_unimplemented_simd128__
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_add(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__f64x2)__a + (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_sub(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__f64x2)__a - (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_mul(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__f64x2)__a * (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_div(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)((__f64x2)__a / (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_min(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_min_f64x2((__f64x2)__a, (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_max(v128_t __a,
+                                                           v128_t __b) {
+  return (v128_t)__builtin_wasm_max_f64x2((__f64x2)__a, (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_pmin(v128_t __a,
+                                                            v128_t __b) {
+  return (v128_t)__builtin_wasm_pmin_f64x2((__f64x2)__a, (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_pmax(v128_t __a,
+                                                            v128_t __b) {
+  return (v128_t)__builtin_wasm_pmax_f64x2((__f64x2)__a, (__f64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_trunc_saturate_f32x4(v128_t __a) {
+  return (v128_t)__builtin_wasm_trunc_saturate_s_i32x4_f32x4((__f32x4)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u32x4_trunc_saturate_f32x4(v128_t __a) {
+  return (v128_t)__builtin_wasm_trunc_saturate_u_i32x4_f32x4((__f32x4)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_f32x4_convert_i32x4(v128_t __a) {
+  return (v128_t) __builtin_convertvector((__i32x4)__a, __f32x4);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_f32x4_convert_u32x4(v128_t __a) {
+  return (v128_t) __builtin_convertvector((__u32x4)__a, __f32x4);
+}
+
+#define wasm_v8x16_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
+                           __c7, __c8, __c9, __c10, __c11, __c12, __c13,       \
+                           __c14, __c15)                                       \
+  ((v128_t)__builtin_wasm_shuffle_v8x16(                                       \
+      (__i8x16)(__a), (__i8x16)(__b), __c0, __c1, __c2, __c3, __c4, __c5,      \
+      __c6, __c7, __c8, __c9, __c10, __c11, __c12, __c13, __c14, __c15))
+
+#define wasm_v16x8_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
+                           __c7)                                               \
+  ((v128_t)__builtin_wasm_shuffle_v8x16(                                       \
+      (__i8x16)(__a), (__i8x16)(__b), (__c0)*2, (__c0)*2 + 1, (__c1)*2,        \
+      (__c1)*2 + 1, (__c2)*2, (__c2)*2 + 1, (__c3)*2, (__c3)*2 + 1, (__c4)*2,  \
+      (__c4)*2 + 1, (__c5)*2, (__c5)*2 + 1, (__c6)*2, (__c6)*2 + 1, (__c7)*2,  \
+      (__c7)*2 + 1))
+
+#define wasm_v32x4_shuffle(__a, __b, __c0, __c1, __c2, __c3)                   \
+  ((v128_t)__builtin_wasm_shuffle_v8x16(                                       \
+      (__i8x16)(__a), (__i8x16)(__b), (__c0)*4, (__c0)*4 + 1, (__c0)*4 + 2,    \
+      (__c0)*4 + 3, (__c1)*4, (__c1)*4 + 1, (__c1)*4 + 2, (__c1)*4 + 3,        \
+      (__c2)*4, (__c2)*4 + 1, (__c2)*4 + 2, (__c2)*4 + 3, (__c3)*4,            \
+      (__c3)*4 + 1, (__c3)*4 + 2, (__c3)*4 + 3))
+
+#define wasm_v64x2_shuffle(__a, __b, __c0, __c1)                               \
+  ((v128_t)__builtin_wasm_shuffle_v8x16(                                       \
+      (__i8x16)(__a), (__i8x16)(__b), (__c0)*8, (__c0)*8 + 1, (__c0)*8 + 2,    \
+      (__c0)*8 + 3, (__c0)*8 + 4, (__c0)*8 + 5, (__c0)*8 + 6, (__c0)*8 + 7,    \
+      (__c1)*8, (__c1)*8 + 1, (__c1)*8 + 2, (__c1)*8 + 3, (__c1)*8 + 4,        \
+      (__c1)*8 + 5, (__c1)*8 + 6, (__c1)*8 + 7))
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v8x16_swizzle(v128_t __a,
+                                                               v128_t __b) {
+  return (v128_t)__builtin_wasm_swizzle_v8x16((__i8x16)__a, (__i8x16)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i8x16_narrow_i16x8(v128_t __a, v128_t __b) {
+  return (v128_t)__builtin_wasm_narrow_s_i8x16_i16x8((__i16x8)__a,
+                                                     (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u8x16_narrow_i16x8(v128_t __a, v128_t __b) {
+  return (v128_t)__builtin_wasm_narrow_u_i8x16_i16x8((__i16x8)__a,
+                                                     (__i16x8)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_narrow_i32x4(v128_t __a, v128_t __b) {
+  return (v128_t)__builtin_wasm_narrow_s_i16x8_i32x4((__i32x4)__a,
+                                                     (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u16x8_narrow_i32x4(v128_t __a, v128_t __b) {
+  return (v128_t)__builtin_wasm_narrow_u_i16x8_i32x4((__i32x4)__a,
+                                                     (__i32x4)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_widen_low_i8x16(v128_t __a) {
+  return (v128_t)__builtin_wasm_widen_low_s_i16x8_i8x16((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_widen_high_i8x16(v128_t __a) {
+  return (v128_t)__builtin_wasm_widen_high_s_i16x8_i8x16((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_widen_low_u8x16(v128_t __a) {
+  return (v128_t)__builtin_wasm_widen_low_u_i16x8_i8x16((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_widen_high_u8x16(v128_t __a) {
+  return (v128_t)__builtin_wasm_widen_high_u_i16x8_i8x16((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_widen_low_i16x8(v128_t __a) {
+  return (v128_t)__builtin_wasm_widen_low_s_i32x4_i16x8((__i16x8)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_widen_high_i16x8(v128_t __a) {
+  return (v128_t)__builtin_wasm_widen_high_s_i32x4_i16x8((__i16x8)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_widen_low_u16x8(v128_t __a) {
+  return (v128_t)__builtin_wasm_widen_low_u_i32x4_i16x8((__i16x8)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_widen_high_u16x8(v128_t __a) {
+  return (v128_t)__builtin_wasm_widen_high_u_i32x4_i16x8((__i16x8)__a);
+}
+
+// Undefine helper macros
+#undef __DEFAULT_FN_ATTRS
+
+#endif // __WASM_SIMD128_H
diff --git a/linux-x86/lib64/clang/11.0.1/include/wbnoinvdintrin.h b/linux-x86/lib64/clang/11.0.5/include/wbnoinvdintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/wbnoinvdintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/wbnoinvdintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/wmmintrin.h b/linux-x86/lib64/clang/11.0.5/include/wmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/wmmintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/wmmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/x86intrin.h b/linux-x86/lib64/clang/11.0.5/include/x86intrin.h
new file mode 100644
index 0000000..768d0e5
--- /dev/null
+++ b/linux-x86/lib64/clang/11.0.5/include/x86intrin.h
@@ -0,0 +1,63 @@
+/*===---- x86intrin.h - X86 intrinsics -------------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#define __X86INTRIN_H
+
+#include <ia32intrin.h>
+
+#include <immintrin.h>
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__3dNOW__)
+#include <mm3dnow.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__PRFCHW__)
+#include <prfchwintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__SSE4A__)
+#include <ammintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__FMA4__)
+#include <fma4intrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__XOP__)
+#include <xopintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__TBM__)
+#include <tbmintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__LWP__)
+#include <lwpintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__MWAITX__)
+#include <mwaitxintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__CLZERO__)
+#include <clzerointrin.h>
+#endif
+
+
+#endif /* __X86INTRIN_H */
diff --git a/darwin-x86/lib64/clang/11.0.1/include/xmmintrin.h b/linux-x86/lib64/clang/11.0.5/include/xmmintrin.h
similarity index 98%
copy from darwin-x86/lib64/clang/11.0.1/include/xmmintrin.h
copy to linux-x86/lib64/clang/11.0.5/include/xmmintrin.h
index 9b8de63..f468669 100644
--- a/darwin-x86/lib64/clang/11.0.1/include/xmmintrin.h
+++ b/linux-x86/lib64/clang/11.0.5/include/xmmintrin.h
@@ -2931,31 +2931,31 @@
 
 #define _MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
 
-#define _MM_EXCEPT_INVALID    (0x0001)
-#define _MM_EXCEPT_DENORM     (0x0002)
-#define _MM_EXCEPT_DIV_ZERO   (0x0004)
-#define _MM_EXCEPT_OVERFLOW   (0x0008)
-#define _MM_EXCEPT_UNDERFLOW  (0x0010)
-#define _MM_EXCEPT_INEXACT    (0x0020)
-#define _MM_EXCEPT_MASK       (0x003f)
+#define _MM_EXCEPT_INVALID    (0x0001U)
+#define _MM_EXCEPT_DENORM     (0x0002U)
+#define _MM_EXCEPT_DIV_ZERO   (0x0004U)
+#define _MM_EXCEPT_OVERFLOW   (0x0008U)
+#define _MM_EXCEPT_UNDERFLOW  (0x0010U)
+#define _MM_EXCEPT_INEXACT    (0x0020U)
+#define _MM_EXCEPT_MASK       (0x003fU)
 
-#define _MM_MASK_INVALID      (0x0080)
-#define _MM_MASK_DENORM       (0x0100)
-#define _MM_MASK_DIV_ZERO     (0x0200)
-#define _MM_MASK_OVERFLOW     (0x0400)
-#define _MM_MASK_UNDERFLOW    (0x0800)
-#define _MM_MASK_INEXACT      (0x1000)
-#define _MM_MASK_MASK         (0x1f80)
+#define _MM_MASK_INVALID      (0x0080U)
+#define _MM_MASK_DENORM       (0x0100U)
+#define _MM_MASK_DIV_ZERO     (0x0200U)
+#define _MM_MASK_OVERFLOW     (0x0400U)
+#define _MM_MASK_UNDERFLOW    (0x0800U)
+#define _MM_MASK_INEXACT      (0x1000U)
+#define _MM_MASK_MASK         (0x1f80U)
 
-#define _MM_ROUND_NEAREST     (0x0000)
-#define _MM_ROUND_DOWN        (0x2000)
-#define _MM_ROUND_UP          (0x4000)
-#define _MM_ROUND_TOWARD_ZERO (0x6000)
-#define _MM_ROUND_MASK        (0x6000)
+#define _MM_ROUND_NEAREST     (0x0000U)
+#define _MM_ROUND_DOWN        (0x2000U)
+#define _MM_ROUND_UP          (0x4000U)
+#define _MM_ROUND_TOWARD_ZERO (0x6000U)
+#define _MM_ROUND_MASK        (0x6000U)
 
-#define _MM_FLUSH_ZERO_MASK   (0x8000)
-#define _MM_FLUSH_ZERO_ON     (0x8000)
-#define _MM_FLUSH_ZERO_OFF    (0x0000)
+#define _MM_FLUSH_ZERO_MASK   (0x8000U)
+#define _MM_FLUSH_ZERO_ON     (0x8000U)
+#define _MM_FLUSH_ZERO_OFF    (0x0000U)
 
 #define _MM_GET_EXCEPTION_MASK() (_mm_getcsr() & _MM_MASK_MASK)
 #define _MM_GET_EXCEPTION_STATE() (_mm_getcsr() & _MM_EXCEPT_MASK)
diff --git a/linux-x86/lib64/clang/11.0.1/include/xopintrin.h b/linux-x86/lib64/clang/11.0.5/include/xopintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/xopintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/xopintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/xsavecintrin.h b/linux-x86/lib64/clang/11.0.5/include/xsavecintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/xsavecintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/xsavecintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/xsaveintrin.h b/linux-x86/lib64/clang/11.0.5/include/xsaveintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/xsaveintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/xsaveintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/xsaveoptintrin.h b/linux-x86/lib64/clang/11.0.5/include/xsaveoptintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/xsaveoptintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/xsaveoptintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/xsavesintrin.h b/linux-x86/lib64/clang/11.0.5/include/xsavesintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/xsavesintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/xsavesintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/include/xtestintrin.h b/linux-x86/lib64/clang/11.0.5/include/xtestintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/include/xtestintrin.h
rename to linux-x86/lib64/clang/11.0.5/include/xtestintrin.h
diff --git a/linux-x86/lib64/clang/11.0.1/share/asan_blacklist.txt b/linux-x86/lib64/clang/11.0.5/share/asan_blacklist.txt
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/share/asan_blacklist.txt
rename to linux-x86/lib64/clang/11.0.5/share/asan_blacklist.txt
diff --git a/linux-x86/lib64/clang/11.0.1/share/cfi_blacklist.txt b/linux-x86/lib64/clang/11.0.5/share/cfi_blacklist.txt
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/share/cfi_blacklist.txt
rename to linux-x86/lib64/clang/11.0.5/share/cfi_blacklist.txt
diff --git a/linux-x86/lib64/clang/11.0.1/share/dfsan_abilist.txt b/linux-x86/lib64/clang/11.0.5/share/dfsan_abilist.txt
similarity index 99%
rename from linux-x86/lib64/clang/11.0.1/share/dfsan_abilist.txt
rename to linux-x86/lib64/clang/11.0.5/share/dfsan_abilist.txt
index fd212e6..b2021b7 100644
--- a/linux-x86/lib64/clang/11.0.1/share/dfsan_abilist.txt
+++ b/linux-x86/lib64/clang/11.0.5/share/dfsan_abilist.txt
@@ -33,7 +33,6 @@
 # glibc
 ###############################################################################
 fun:malloc=discard
-fun:realloc=discard
 fun:free=discard
 
 # Functions that return a value that depends on the input, but the output might
diff --git a/linux-x86/lib64/clang/11.0.1/share/hwasan_blacklist.txt b/linux-x86/lib64/clang/11.0.5/share/hwasan_blacklist.txt
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/share/hwasan_blacklist.txt
rename to linux-x86/lib64/clang/11.0.5/share/hwasan_blacklist.txt
diff --git a/linux-x86/lib64/clang/11.0.1/share/msan_blacklist.txt b/linux-x86/lib64/clang/11.0.5/share/msan_blacklist.txt
similarity index 100%
rename from linux-x86/lib64/clang/11.0.1/share/msan_blacklist.txt
rename to linux-x86/lib64/clang/11.0.5/share/msan_blacklist.txt
diff --git a/linux-x86/lib64/libLLVM-11git.so b/linux-x86/lib64/libLLVM-11git.so
index 622b076..938702b 100644
--- a/linux-x86/lib64/libLLVM-11git.so
+++ b/linux-x86/lib64/libLLVM-11git.so
Binary files differ
diff --git a/linux-x86/lib64/libbase.so b/linux-x86/lib64/libbase.so
index 5bf98fb..689d2c0 100755
--- a/linux-x86/lib64/libbase.so
+++ b/linux-x86/lib64/libbase.so
Binary files differ
diff --git a/linux-x86/lib64/libc++.so b/linux-x86/lib64/libc++.so
index fabe36a..fe9c13f 100755
--- a/linux-x86/lib64/libc++.so
+++ b/linux-x86/lib64/libc++.so
Binary files differ
diff --git a/linux-x86/lib64/libc++.so.1 b/linux-x86/lib64/libc++.so.1
index cd7fff5..7cc5763 100644
--- a/linux-x86/lib64/libc++.so.1
+++ b/linux-x86/lib64/libc++.so.1
Binary files differ
diff --git a/linux-x86/lib64/libclang_cxx.so.11git b/linux-x86/lib64/libclang_cxx.so.11git
index ffc43f1..ab7f681 100644
--- a/linux-x86/lib64/libclang_cxx.so.11git
+++ b/linux-x86/lib64/libclang_cxx.so.11git
Binary files differ
diff --git a/linux-x86/lib64/liblog.so b/linux-x86/lib64/liblog.so
index d39609f..ffb9a80 100755
--- a/linux-x86/lib64/liblog.so
+++ b/linux-x86/lib64/liblog.so
Binary files differ
diff --git a/linux-x86/lib64/libprotobuf-cpp-full.so b/linux-x86/lib64/libprotobuf-cpp-full.so
index 9cf876e..05e00f5 100755
--- a/linux-x86/lib64/libprotobuf-cpp-full.so
+++ b/linux-x86/lib64/libprotobuf-cpp-full.so
Binary files differ
diff --git a/linux-x86/lib64/libxml2.so.2.9.10 b/linux-x86/lib64/libxml2.so.2.9.10
new file mode 100644
index 0000000..9c2b702
--- /dev/null
+++ b/linux-x86/lib64/libxml2.so.2.9.10
Binary files differ
diff --git a/linux-x86/lib64/libz-host.so b/linux-x86/lib64/libz-host.so
index 6c47646..3f20793 100755
--- a/linux-x86/lib64/libz-host.so
+++ b/linux-x86/lib64/libz-host.so
Binary files differ
diff --git a/linux-x86/lib64/libziparchive.so b/linux-x86/lib64/libziparchive.so
index b8edf51..b700e96 100755
--- a/linux-x86/lib64/libziparchive.so
+++ b/linux-x86/lib64/libziparchive.so
Binary files differ
diff --git a/manifest.xml b/manifest.xml
index 767b5ac..ec0c625 100644
--- a/manifest.xml
+++ b/manifest.xml
@@ -5,61 +5,61 @@
 
   <default remote="aosp" revision="master" />
 
-  <project name="platform/development" path="development" revision="2400a70623e7b1a8b2fc8f37aa6be42767ddea9c" />
+  <project name="platform/development" path="development" revision="8d6aeab88209798a45b03f7e52c85bc6665b510b" />
 
-  <project clone-depth="1" name="platform/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8" path="prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8" revision="90e45fec56fdeac84c9eb174c63fa6e748c6faa9" />
+  <project clone-depth="1" name="platform/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8" path="prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8" revision="70a8d2affe5a31cbb6ae03469a83981b3331c9a4" />
 
   <project name="platform/external/go-creachadair-stringset" path="external/go-creachadair-stringset" revision="580fe893c0cd9bc1cb41a735b6ff456901f00e83" />
 
-  <project name="platform/build/soong" path="build/soong" revision="0ae555df1a5b0fc86a6fa83c1de42026ef184cd2">
+  <project name="platform/build/soong" path="build/soong" revision="70bc5454079dd5a99fa5342990683524b327e954">
     <linkfile dest="Android.bp" src="root.bp" />
 
     <linkfile dest="bootstrap.bash" src="bootstrap.bash" />
 </project>
 
-  <project name="platform/external/ninja" path="external/ninja" revision="add90eee261445c62c3dabfdaf1bdd425310dd49" />
+  <project name="platform/external/ninja" path="external/ninja" revision="4b3e54d29302e0accfab963fc1af21b2d44771d1" />
 
-  <project name="platform/build/blueprint" path="build/blueprint" revision="d877dde242324136778d4114161cd1428428c6dc" />
+  <project name="platform/build/blueprint" path="build/blueprint" revision="e0118ca809f8c533c3c6423585545360c88ed4da" />
 
   <project name="platform/external/rapidjson" path="external/rapidjson" revision="9fa2a3d9e356a1f42a6184dcf1e0508ddfa9dbfb" />
 
-  <project name="platform/external/jsoncpp" path="external/jsoncpp" revision="60f1d4392708db183229cdc306d299404794f4a5" />
+  <project name="platform/external/jsoncpp" path="external/jsoncpp" revision="f5b9450d34b337a4620fcd1e8099782139cfbc70" />
 
-  <project name="platform/external/googletest" path="external/googletest" revision="de13bdb524c9d888b24a56490742227d9aabad12" />
+  <project name="platform/external/googletest" path="external/googletest" revision="ee3fca52fa6409aacab73bf519633d21a3a3a738" />
 
   <project name="platform/external/regex-re2" path="external/regex-re2" revision="84e28962b2c2f357b5daccb460501b169193fafe" />
 
-  <project clone-depth="1" name="platform/prebuilts/build-tools" path="prebuilts/build-tools" revision="05e86d0d7ce26f17daa89f5c1d708e29f551dcde" />
+  <project clone-depth="1" name="platform/prebuilts/build-tools" path="prebuilts/build-tools" revision="48542e4070ba33560f29ee5175a6f8490d11633c" />
 
-  <project name="platform/bionic" path="bionic" revision="b5f301eaafdc23f05fe5558fb0406a0ab04fc807" />
+  <project name="platform/bionic" path="bionic" revision="08a43426a0bcbc53cd2a33b3624bc6b3f7176117" />
 
-  <project clone-depth="1" groups="linux" name="platform/prebuilts/go/linux-x86" path="prebuilts/go/linux-x86" revision="7f4776e1ddb05395a4025a30de8d7b78d62ac9aa" />
+  <project clone-depth="1" groups="linux" name="platform/prebuilts/go/linux-x86" path="prebuilts/go/linux-x86" revision="fa2108d8e617e4f7d2585c27e18c09e65cd73ff8" />
 
-  <project name="platform/external/zlib" path="external/zlib" revision="63d1b8aa46f71519f18c335e61d1945e9dca56ac" />
+  <project name="platform/external/zlib" path="external/zlib" revision="28b8c108faa203d7f3675e2fa40fa0c11239c0a7" />
 
   <project name="platform/external/golang-x-tools" path="external/golang-x-tools" revision="bcc6484babb4e999e4bdb6a982bfa7e92fc640d2" />
 
-  <project name="platform/external/zopfli" path="external/zopfli" revision="b481a8f686da1cb148ff09eb10ca02198c6bdb49" />
+  <project name="platform/external/zopfli" path="external/zopfli" revision="3c7853623a3fd85d952b5e4180c800b04c09669e" />
 
   <project name="platform/external/go-cmp" path="external/go-cmp" revision="0f7c828c80e325c9fc2af078ffb108362bb84c15" />
 
-  <project clone-depth="1" groups="darwin" name="platform/prebuilts/go/darwin-x86" path="prebuilts/go/darwin-x86" revision="748609c914d883228a9381c6be86623984d68a2b" />
+  <project clone-depth="1" groups="darwin" name="platform/prebuilts/go/darwin-x86" path="prebuilts/go/darwin-x86" revision="5367c0b6f8ef3536becae14b3f3713390372abfa" />
 
-  <project name="platform/system/libbase" path="system/libbase" revision="6216488884f4efef1b70ce9738b0926d9999bce2" />
+  <project name="platform/system/libbase" path="system/libbase" revision="f927d5ca7e028eff433e068fd3c2f2954b5b00cf" />
 
-  <project clone-depth="1" name="platform/prebuilts/jdk/jdk11" path="prebuilts/jdk/jdk11" revision="e4fb089b45a6633baf0233e2c524d614ed7355f1" />
+  <project clone-depth="1" name="platform/prebuilts/jdk/jdk11" path="prebuilts/jdk/jdk11" revision="671f6a7cab9a313c08d1925b46264786c93feb5f" />
 
-  <project name="platform/external/kythe" path="external/kythe" revision="d2ba966907924219950c3d416e66036c7124fa39" />
+  <project name="platform/external/kythe" path="external/kythe" revision="9e960add0dc4d3798329897313793ee3d46dff3f" />
 
-  <project name="platform/external/libcxx" path="external/libcxx" revision="1447d718cab6c1a5b6a2b66ab77a39148b2caf13" />
+  <project name="platform/external/libcxx" path="external/libcxx" revision="9057c2f748e6005253cab91cc0e98d2a00d41acc" />
 
-  <project groups="pdk" name="platform/external/boringssl" path="external/boringssl" revision="2e925e97fef694474f6984c3edef8a9f348affa9" />
+  <project groups="pdk" name="platform/external/boringssl" path="external/boringssl" revision="ec522029127e610390f9a469dc135cf12495718c" />
 
   <project clone-depth="1" groups="linux" name="platform/prebuilts/ninja/linux-x86" path="prebuilts/ninja/linux-x86" revision="6369b19fc3fbe765636af75d394627e2b92599ed" />
 
-  <project name="platform/external/clang" path="external/clang" revision="382efa3f0dd36a342e30e8bf06af717e0664de3f" />
+  <project name="platform/external/clang" path="external/clang" revision="10d160c2c39d0b0f33f9be135c4dc6349e996eb8" />
 
-  <project name="platform/external/libcxxabi" path="external/libcxxabi" revision="72a9c9f1f20747fa2899b964d30c8d117de54a04" />
+  <project name="platform/external/libcxxabi" path="external/libcxxabi" revision="3364b1d8fc2d3447d44d9ae44e0eeb8fd4107fba" />
 
   <project clone-depth="1" name="platform/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8" path="prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8" revision="d9aafaade740ca38612c742f6d87debf362132ea" />
 
@@ -67,59 +67,59 @@
 
   <project clone-depth="1" groups="darwin" name="platform/prebuilts/gcc/darwin-x86/host/headers" path="prebuilts/gcc/darwin-x86/host/headers" revision="4ac4f7cc41cf3c9e36fc3d6cf37fd1cfa9587a68" />
 
-  <project name="platform/system/libziparchive" path="system/libziparchive" revision="7679e7393598c358b0abf605624c2e0dcd577ce0" />
+  <project name="platform/system/libziparchive" path="system/libziparchive" revision="5357e00bea027e10fffdcf94e70054f1d77c20b0" />
 
-  <project name="platform/external/libunwind" path="external/libunwind" revision="2674521dff29c84e9b37b28243fdaf9ca24620cd" />
+  <project name="platform/external/libunwind" path="external/libunwind" revision="a3af76adb4715dd6e2f40cc7f165e52670a72a9a" />
 
-  <project name="platform/system/core" path="system/core" revision="4203129353478c49708ea6a5a9658ab88ea309e3" />
+  <project name="platform/system/core" path="system/core" revision="3f452134da2a4d4578b46a254f41dbd008a72f82" />
 
-  <project clone-depth="1" groups="linux" name="platform/prebuilts/clang/host/linux-x86" path="prebuilts/clang/host/linux-x86" revision="03d80bda09b54abcf69ff647f5a8c869df6361d0" />
+  <project clone-depth="1" groups="linux" name="platform/prebuilts/clang/host/linux-x86" path="prebuilts/clang/host/linux-x86" revision="28187863afd843d34a8595017d509f0caa487c93" />
 
-  <project clone-depth="3" name="platform/prebuilts/clang-tools" path="prebuilts/clang-tools" revision="441f03682caa769215949156a2b58d7b1806a816" />
+  <project clone-depth="3" name="platform/prebuilts/clang-tools" path="prebuilts/clang-tools" revision="75439a9e6bf9ae45ff41bb250b2b59d34ec3efe5" />
 
-  <project name="platform/external/golang-protobuf" path="external/golang-protobuf" revision="b34db27e38b1b60c83ab7c3a1985203c3f98281b" />
+  <project name="platform/external/golang-protobuf" path="external/golang-protobuf" revision="5c0a66170926160524118a1b5d5e5bd6d697898e" />
 
-  <project clone-depth="1" name="platform/prebuilts/jdk/jdk9" path="prebuilts/jdk/jdk9" revision="3447ef32fa4961229942fa345c8707c7f9e00246" />
+  <project clone-depth="1" name="platform/prebuilts/jdk/jdk9" path="prebuilts/jdk/jdk9" revision="b113c3808dea0cc1ef161e235f42705592971f82" />
 
-  <project clone-depth="1" groups="darwin" name="platform/prebuilts/gcc/darwin-x86/host/i686-apple-darwin-4.2.1" path="prebuilts/gcc/darwin-x86/host/i686-apple-darwin-4.2.1" revision="b616fc20ee024ace07f39b13a1f5a2bbdb67f8f7" />
+  <project clone-depth="1" groups="darwin" name="platform/prebuilts/gcc/darwin-x86/host/i686-apple-darwin-4.2.1" path="prebuilts/gcc/darwin-x86/host/i686-apple-darwin-4.2.1" revision="353332b8c310174c2dff55db520349cff40b1295" />
 
-  <project name="platform/external/python/cpython3" path="external/python/cpython3" revision="2e31e5f83d4ef377099c9e71427b7e31e1da145f" />
+  <project name="platform/external/python/cpython3" path="external/python/cpython3" revision="cb2888033cf654029351044bd4392fc330f319e5" />
 
-  <project name="platform/external/compiler-rt" path="external/compiler-rt" revision="83c5095a0c8a7a90930e2ba427fdb64115402e21" />
+  <project name="platform/external/compiler-rt" path="external/compiler-rt" revision="e1df04d40038fc940d1a35f8bc3145f91c014908" />
 
   <project name="platform/external/golang-x-sync" path="external/golang-x-sync" revision="6ec98728804a67c2ee80bbfffae3233a5b46168d" />
 
   <project name="platform/external/go-creachadair-shell" path="external/go-creachadair-shell" revision="97d772a0b6c62d6bfac71224c4fdbf896a5478ba" />
 
-  <project name="platform/external/libunwind_llvm" path="external/libunwind_llvm" revision="b2aff48eedb8ca162aa438135add08d915540925" />
+  <project name="platform/external/libunwind_llvm" path="external/libunwind_llvm" revision="5011a847022edf4c12a2dd20f827374abc93d51f" />
 
-  <project name="platform/external/fmtlib" path="external/fmtlib" revision="bb56bd984c93597a8b2a90931351aef89d7edd18" />
+  <project name="platform/external/fmtlib" path="external/fmtlib" revision="61708eeb46d540d7b31944be926ef3c9b5123496" />
 
-  <project name="platform/external/llvm" path="external/llvm" revision="26b4ad25609ef0f620d6dd9a4102e9fb20b34240" />
+  <project name="platform/external/llvm" path="external/llvm" revision="d48ce923680e694bbb884ecc7d13a0d1725d2061" />
 
-  <project groups="pdk" name="platform/external/gflags" path="external/gflags" revision="6827298aeef132e2545279ed2ac5d0b74fa18e7a" />
+  <project groups="pdk" name="platform/external/gflags" path="external/gflags" revision="f3c8cc25b57a8161128d8c2bb4cc392226d4a72b" />
 
-  <project name="platform/build" path="build/make" revision="faac5a2e7d9482b4002f3e26759aa015031f2e51">
+  <project name="platform/build" path="build/make" revision="2f171ac0a5586fa9f3dd7859d542c64a185228ca">
     <linkfile dest="build/tools" src="tools" />
 </project>
 
-  <project clone-depth="1" name="platform/prebuilts/misc" path="prebuilts/misc" revision="cd9a0395ecbd7a288389659825b887337e011fad" />
+  <project clone-depth="1" name="platform/prebuilts/misc" path="prebuilts/misc" revision="01cc4c919daa1ad203ba64a4c54a293399820166" />
 
   <project name="platform/external/go-subcommands" path="external/go-subcommands" revision="409adebbdb103fb745f56343d7345b57c8ea13aa" />
 
-  <project clone-depth="1" groups="darwin" name="platform/prebuilts/clang/host/darwin-x86" path="prebuilts/clang/host/darwin-x86" revision="b40fb300258802b9a6c56ce0d33ca8dfd3e665b1" />
+  <project clone-depth="1" groups="darwin" name="platform/prebuilts/clang/host/darwin-x86" path="prebuilts/clang/host/darwin-x86" revision="0bd4f978e2f563979a873cac2126a4090bb60e67" />
 
-  <project name="platform/external/protobuf" path="external/protobuf" revision="61802769fdb4ef36599d1beeedb5acb9b7c43d50" />
+  <project name="platform/external/protobuf" path="external/protobuf" revision="43e6f74c3dd6bb1f6e97111e31206f8cb8c7a438" />
 
   <project clone-depth="1" name="platform/prebuilts/jdk/jdk8" path="prebuilts/jdk/jdk8" revision="74e4f1844dfa9b8df9e0fe2ff34a2ecc24d52b07" />
 
   <project clone-depth="1" groups="darwin" name="platform/prebuilts/ninja/darwin-x86" path="prebuilts/ninja/darwin-x86" revision="00f798346dedb4a7a6a6dcc9ad32ff09d66ee0db" />
 
-  <project name="platform/external/python/cpython2" path="external/python/cpython2" revision="f15d0f01f0db905e60300cb9d4556bf5e3a5bff2" />
+  <project name="platform/external/python/cpython2" path="external/python/cpython2" revision="30b69e13b4c8f6273523290bf1724784bab8ed55" />
 
-  <project name="platform/dalvik" path="dalvik" revision="d236cfb4cf2b757c6ed728688a9034ef88338fc6" />
+  <project name="platform/dalvik" path="dalvik" revision="f73f651cea3087424db1179cf72a7b67ff537715" />
 
-  <project name="platform/external/abseil-cpp" path="external/abseil-cpp" revision="f194138dceced5005962a11ad5bda7f485f05282" />
+  <project name="platform/external/abseil-cpp" path="external/abseil-cpp" revision="2b33f4af7208b9b14d18c001e8175aae97afa7ed" />
 
   <project name="platform/external/go-etree" path="external/go-etree" revision="7fa46d9c9eb9134443a7fc56a354f07b15fc3a76" />
 </manifest>